1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * Eunchul Kim <chulspro.kim@samsung.com> 5 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Sangmin Lee <lsmin.lee@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 */ 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/types.h> 18 #include <linux/clk.h> 19 #include <linux/pm_runtime.h> 20 #include <plat/map-base.h> 21 22 #include <drm/drmP.h> 23 #include <drm/exynos_drm.h> 24 #include "exynos_drm_drv.h" 25 #include "exynos_drm_gem.h" 26 #include "exynos_drm_ipp.h" 27 #include "exynos_drm_iommu.h" 28 29 /* 30 * IPP stands for Image Post Processing and 31 * supports image scaler/rotator and input/output DMA operations. 32 * using FIMC, GSC, Rotator, so on. 33 * IPP is integration device driver of same attribute h/w 34 */ 35 36 /* 37 * TODO 38 * 1. expand command control id. 39 * 2. integrate property and config. 40 * 3. removed send_event id check routine. 41 * 4. compare send_event id if needed. 42 * 5. free subdrv_remove notifier callback list if needed. 43 * 6. need to check subdrv_open about multi-open. 44 * 7. need to power_on implement power and sysmmu ctrl. 45 */ 46 47 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 48 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 49 50 /* 51 * A structure of event. 52 * 53 * @base: base of event. 54 * @event: ipp event. 55 */ 56 struct drm_exynos_ipp_send_event { 57 struct drm_pending_event base; 58 struct drm_exynos_ipp_event event; 59 }; 60 61 /* 62 * A structure of memory node. 63 * 64 * @list: list head to memory queue information. 65 * @ops_id: id of operations. 66 * @prop_id: id of property. 67 * @buf_id: id of buffer. 68 * @buf_info: gem objects and dma address, size. 69 * @filp: a pointer to drm_file. 70 */ 71 struct drm_exynos_ipp_mem_node { 72 struct list_head list; 73 enum drm_exynos_ops_id ops_id; 74 u32 prop_id; 75 u32 buf_id; 76 struct drm_exynos_ipp_buf_info buf_info; 77 struct drm_file *filp; 78 }; 79 80 /* 81 * A structure of ipp context. 82 * 83 * @subdrv: prepare initialization using subdrv. 84 * @ipp_lock: lock for synchronization of access to ipp_idr. 85 * @prop_lock: lock for synchronization of access to prop_idr. 86 * @ipp_idr: ipp driver idr. 87 * @prop_idr: property idr. 88 * @event_workq: event work queue. 89 * @cmd_workq: command work queue. 90 */ 91 struct ipp_context { 92 struct exynos_drm_subdrv subdrv; 93 struct mutex ipp_lock; 94 struct mutex prop_lock; 95 struct idr ipp_idr; 96 struct idr prop_idr; 97 struct workqueue_struct *event_workq; 98 struct workqueue_struct *cmd_workq; 99 }; 100 101 static LIST_HEAD(exynos_drm_ippdrv_list); 102 static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 103 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 104 105 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 106 { 107 DRM_DEBUG_KMS("%s\n", __func__); 108 109 if (!ippdrv) 110 return -EINVAL; 111 112 mutex_lock(&exynos_drm_ippdrv_lock); 113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 114 mutex_unlock(&exynos_drm_ippdrv_lock); 115 116 return 0; 117 } 118 119 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 120 { 121 DRM_DEBUG_KMS("%s\n", __func__); 122 123 if (!ippdrv) 124 return -EINVAL; 125 126 mutex_lock(&exynos_drm_ippdrv_lock); 127 list_del(&ippdrv->drv_list); 128 mutex_unlock(&exynos_drm_ippdrv_lock); 129 130 return 0; 131 } 132 133 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, 134 u32 *idp) 135 { 136 int ret; 137 138 DRM_DEBUG_KMS("%s\n", __func__); 139 140 again: 141 /* ensure there is space available to allocate a handle */ 142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) { 143 DRM_ERROR("failed to get idr.\n"); 144 return -ENOMEM; 145 } 146 147 /* do the allocation under our mutexlock */ 148 mutex_lock(lock); 149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp); 150 mutex_unlock(lock); 151 if (ret == -EAGAIN) 152 goto again; 153 154 return ret; 155 } 156 157 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 158 { 159 void *obj; 160 161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id); 162 163 mutex_lock(lock); 164 165 /* find object using handle */ 166 obj = idr_find(id_idr, id); 167 if (!obj) { 168 DRM_ERROR("failed to find object.\n"); 169 mutex_unlock(lock); 170 return ERR_PTR(-ENODEV); 171 } 172 173 mutex_unlock(lock); 174 175 return obj; 176 } 177 178 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, 179 enum drm_exynos_ipp_cmd cmd) 180 { 181 /* 182 * check dedicated flag and WB, OUTPUT operation with 183 * power on state. 184 */ 185 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) && 186 !pm_runtime_suspended(ippdrv->dev))) 187 return true; 188 189 return false; 190 } 191 192 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 193 struct drm_exynos_ipp_property *property) 194 { 195 struct exynos_drm_ippdrv *ippdrv; 196 u32 ipp_id = property->ipp_id; 197 198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id); 199 200 if (ipp_id) { 201 /* find ipp driver using idr */ 202 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 203 ipp_id); 204 if (IS_ERR_OR_NULL(ippdrv)) { 205 DRM_ERROR("not found ipp%d driver.\n", ipp_id); 206 return ippdrv; 207 } 208 209 /* 210 * WB, OUTPUT opertion not supported multi-operation. 211 * so, make dedicated state at set property ioctl. 212 * when ipp driver finished operations, clear dedicated flags. 213 */ 214 if (ipp_check_dedicated(ippdrv, property->cmd)) { 215 DRM_ERROR("already used choose device.\n"); 216 return ERR_PTR(-EBUSY); 217 } 218 219 /* 220 * This is necessary to find correct device in ipp drivers. 221 * ipp drivers have different abilities, 222 * so need to check property. 223 */ 224 if (ippdrv->check_property && 225 ippdrv->check_property(ippdrv->dev, property)) { 226 DRM_ERROR("not support property.\n"); 227 return ERR_PTR(-EINVAL); 228 } 229 230 return ippdrv; 231 } else { 232 /* 233 * This case is search all ipp driver for finding. 234 * user application don't set ipp_id in this case, 235 * so ipp subsystem search correct driver in driver list. 236 */ 237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 238 if (ipp_check_dedicated(ippdrv, property->cmd)) { 239 DRM_DEBUG_KMS("%s:used device.\n", __func__); 240 continue; 241 } 242 243 if (ippdrv->check_property && 244 ippdrv->check_property(ippdrv->dev, property)) { 245 DRM_DEBUG_KMS("%s:not support property.\n", 246 __func__); 247 continue; 248 } 249 250 return ippdrv; 251 } 252 253 DRM_ERROR("not support ipp driver operations.\n"); 254 } 255 256 return ERR_PTR(-ENODEV); 257 } 258 259 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) 260 { 261 struct exynos_drm_ippdrv *ippdrv; 262 struct drm_exynos_ipp_cmd_node *c_node; 263 int count = 0; 264 265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); 266 267 if (list_empty(&exynos_drm_ippdrv_list)) { 268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); 269 return ERR_PTR(-ENODEV); 270 } 271 272 /* 273 * This case is search ipp driver by prop_id handle. 274 * sometimes, ipp subsystem find driver by prop_id. 275 * e.g PAUSE state, queue buf, command contro. 276 */ 277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__, 279 count++, (int)ippdrv); 280 281 if (!list_empty(&ippdrv->cmd_list)) { 282 list_for_each_entry(c_node, &ippdrv->cmd_list, list) 283 if (c_node->property.prop_id == prop_id) 284 return ippdrv; 285 } 286 } 287 288 return ERR_PTR(-ENODEV); 289 } 290 291 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, 292 struct drm_file *file) 293 { 294 struct drm_exynos_file_private *file_priv = file->driver_priv; 295 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 296 struct device *dev = priv->dev; 297 struct ipp_context *ctx = get_ipp_context(dev); 298 struct drm_exynos_ipp_prop_list *prop_list = data; 299 struct exynos_drm_ippdrv *ippdrv; 300 int count = 0; 301 302 DRM_DEBUG_KMS("%s\n", __func__); 303 304 if (!ctx) { 305 DRM_ERROR("invalid context.\n"); 306 return -EINVAL; 307 } 308 309 if (!prop_list) { 310 DRM_ERROR("invalid property parameter.\n"); 311 return -EINVAL; 312 } 313 314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id); 315 316 if (!prop_list->ipp_id) { 317 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 318 count++; 319 /* 320 * Supports ippdrv list count for user application. 321 * First step user application getting ippdrv count. 322 * and second step getting ippdrv capability using ipp_id. 323 */ 324 prop_list->count = count; 325 } else { 326 /* 327 * Getting ippdrv capability by ipp_id. 328 * some deivce not supported wb, output interface. 329 * so, user application detect correct ipp driver 330 * using this ioctl. 331 */ 332 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 333 prop_list->ipp_id); 334 if (!ippdrv) { 335 DRM_ERROR("not found ipp%d driver.\n", 336 prop_list->ipp_id); 337 return -EINVAL; 338 } 339 340 prop_list = ippdrv->prop_list; 341 } 342 343 return 0; 344 } 345 346 static void ipp_print_property(struct drm_exynos_ipp_property *property, 347 int idx) 348 { 349 struct drm_exynos_ipp_config *config = &property->config[idx]; 350 struct drm_exynos_pos *pos = &config->pos; 351 struct drm_exynos_sz *sz = &config->sz; 352 353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n", 354 __func__, property->prop_id, idx ? "dst" : "src", config->fmt); 355 356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 357 __func__, pos->x, pos->y, pos->w, pos->h, 358 sz->hsize, sz->vsize, config->flip, config->degree); 359 } 360 361 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) 362 { 363 struct exynos_drm_ippdrv *ippdrv; 364 struct drm_exynos_ipp_cmd_node *c_node; 365 u32 prop_id = property->prop_id; 366 367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id); 368 369 ippdrv = ipp_find_drv_by_handle(prop_id); 370 if (IS_ERR_OR_NULL(ippdrv)) { 371 DRM_ERROR("failed to get ipp driver.\n"); 372 return -EINVAL; 373 } 374 375 /* 376 * Find command node using command list in ippdrv. 377 * when we find this command no using prop_id. 378 * return property information set in this command node. 379 */ 380 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 381 if ((c_node->property.prop_id == prop_id) && 382 (c_node->state == IPP_STATE_STOP)) { 383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n", 384 __func__, property->cmd, (int)ippdrv); 385 386 c_node->property = *property; 387 return 0; 388 } 389 } 390 391 DRM_ERROR("failed to search property.\n"); 392 393 return -EINVAL; 394 } 395 396 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 397 { 398 struct drm_exynos_ipp_cmd_work *cmd_work; 399 400 DRM_DEBUG_KMS("%s\n", __func__); 401 402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 403 if (!cmd_work) { 404 DRM_ERROR("failed to alloc cmd_work.\n"); 405 return ERR_PTR(-ENOMEM); 406 } 407 408 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 409 410 return cmd_work; 411 } 412 413 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) 414 { 415 struct drm_exynos_ipp_event_work *event_work; 416 417 DRM_DEBUG_KMS("%s\n", __func__); 418 419 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 420 if (!event_work) { 421 DRM_ERROR("failed to alloc event_work.\n"); 422 return ERR_PTR(-ENOMEM); 423 } 424 425 INIT_WORK((struct work_struct *)event_work, ipp_sched_event); 426 427 return event_work; 428 } 429 430 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, 431 struct drm_file *file) 432 { 433 struct drm_exynos_file_private *file_priv = file->driver_priv; 434 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 435 struct device *dev = priv->dev; 436 struct ipp_context *ctx = get_ipp_context(dev); 437 struct drm_exynos_ipp_property *property = data; 438 struct exynos_drm_ippdrv *ippdrv; 439 struct drm_exynos_ipp_cmd_node *c_node; 440 int ret, i; 441 442 DRM_DEBUG_KMS("%s\n", __func__); 443 444 if (!ctx) { 445 DRM_ERROR("invalid context.\n"); 446 return -EINVAL; 447 } 448 449 if (!property) { 450 DRM_ERROR("invalid property parameter.\n"); 451 return -EINVAL; 452 } 453 454 /* 455 * This is log print for user application property. 456 * user application set various property. 457 */ 458 for_each_ipp_ops(i) 459 ipp_print_property(property, i); 460 461 /* 462 * set property ioctl generated new prop_id. 463 * but in this case already asigned prop_id using old set property. 464 * e.g PAUSE state. this case supports find current prop_id and use it 465 * instead of allocation. 466 */ 467 if (property->prop_id) { 468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 469 return ipp_find_and_set_property(property); 470 } 471 472 /* find ipp driver using ipp id */ 473 ippdrv = ipp_find_driver(ctx, property); 474 if (IS_ERR_OR_NULL(ippdrv)) { 475 DRM_ERROR("failed to get ipp driver.\n"); 476 return -EINVAL; 477 } 478 479 /* allocate command node */ 480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 481 if (!c_node) { 482 DRM_ERROR("failed to allocate map node.\n"); 483 return -ENOMEM; 484 } 485 486 /* create property id */ 487 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, 488 &property->prop_id); 489 if (ret) { 490 DRM_ERROR("failed to create id.\n"); 491 goto err_clear; 492 } 493 494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 495 __func__, property->prop_id, property->cmd, (int)ippdrv); 496 497 /* stored property information and ippdrv in private data */ 498 c_node->priv = priv; 499 c_node->property = *property; 500 c_node->state = IPP_STATE_IDLE; 501 502 c_node->start_work = ipp_create_cmd_work(); 503 if (IS_ERR_OR_NULL(c_node->start_work)) { 504 DRM_ERROR("failed to create start work.\n"); 505 goto err_clear; 506 } 507 508 c_node->stop_work = ipp_create_cmd_work(); 509 if (IS_ERR_OR_NULL(c_node->stop_work)) { 510 DRM_ERROR("failed to create stop work.\n"); 511 goto err_free_start; 512 } 513 514 c_node->event_work = ipp_create_event_work(); 515 if (IS_ERR_OR_NULL(c_node->event_work)) { 516 DRM_ERROR("failed to create event work.\n"); 517 goto err_free_stop; 518 } 519 520 mutex_init(&c_node->cmd_lock); 521 mutex_init(&c_node->mem_lock); 522 mutex_init(&c_node->event_lock); 523 524 init_completion(&c_node->start_complete); 525 init_completion(&c_node->stop_complete); 526 527 for_each_ipp_ops(i) 528 INIT_LIST_HEAD(&c_node->mem_list[i]); 529 530 INIT_LIST_HEAD(&c_node->event_list); 531 list_splice_init(&priv->event_list, &c_node->event_list); 532 list_add_tail(&c_node->list, &ippdrv->cmd_list); 533 534 /* make dedicated state without m2m */ 535 if (!ipp_is_m2m_cmd(property->cmd)) 536 ippdrv->dedicated = true; 537 538 return 0; 539 540 err_free_stop: 541 kfree(c_node->stop_work); 542 err_free_start: 543 kfree(c_node->start_work); 544 err_clear: 545 kfree(c_node); 546 return ret; 547 } 548 549 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node) 550 { 551 DRM_DEBUG_KMS("%s\n", __func__); 552 553 /* delete list */ 554 list_del(&c_node->list); 555 556 /* destroy mutex */ 557 mutex_destroy(&c_node->cmd_lock); 558 mutex_destroy(&c_node->mem_lock); 559 mutex_destroy(&c_node->event_lock); 560 561 /* free command node */ 562 kfree(c_node->start_work); 563 kfree(c_node->stop_work); 564 kfree(c_node->event_work); 565 kfree(c_node); 566 } 567 568 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 569 { 570 struct drm_exynos_ipp_property *property = &c_node->property; 571 struct drm_exynos_ipp_mem_node *m_node; 572 struct list_head *head; 573 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; 574 575 DRM_DEBUG_KMS("%s\n", __func__); 576 577 mutex_lock(&c_node->mem_lock); 578 579 for_each_ipp_ops(i) { 580 /* source/destination memory list */ 581 head = &c_node->mem_list[i]; 582 583 if (list_empty(head)) { 584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__, 585 i ? "dst" : "src"); 586 continue; 587 } 588 589 /* find memory node entry */ 590 list_for_each_entry(m_node, head, list) { 591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__, 592 i ? "dst" : "src", count[i], (int)m_node); 593 count[i]++; 594 } 595 } 596 597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__, 598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), 599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); 600 601 /* 602 * M2M operations should be need paired memory address. 603 * so, need to check minimum count about src, dst. 604 * other case not use paired memory, so use maximum count 605 */ 606 if (ipp_is_m2m_cmd(property->cmd)) 607 ret = min(count[EXYNOS_DRM_OPS_SRC], 608 count[EXYNOS_DRM_OPS_DST]); 609 else 610 ret = max(count[EXYNOS_DRM_OPS_SRC], 611 count[EXYNOS_DRM_OPS_DST]); 612 613 mutex_unlock(&c_node->mem_lock); 614 615 return ret; 616 } 617 618 static struct drm_exynos_ipp_mem_node 619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, 620 struct drm_exynos_ipp_queue_buf *qbuf) 621 { 622 struct drm_exynos_ipp_mem_node *m_node; 623 struct list_head *head; 624 int count = 0; 625 626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id); 627 628 /* source/destination memory list */ 629 head = &c_node->mem_list[qbuf->ops_id]; 630 631 /* find memory node from memory list */ 632 list_for_each_entry(m_node, head, list) { 633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n", 634 __func__, count++, (int)m_node); 635 636 /* compare buffer id */ 637 if (m_node->buf_id == qbuf->buf_id) 638 return m_node; 639 } 640 641 return NULL; 642 } 643 644 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, 645 struct drm_exynos_ipp_cmd_node *c_node, 646 struct drm_exynos_ipp_mem_node *m_node) 647 { 648 struct exynos_drm_ipp_ops *ops = NULL; 649 int ret = 0; 650 651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); 652 653 if (!m_node) { 654 DRM_ERROR("invalid queue node.\n"); 655 return -EFAULT; 656 } 657 658 mutex_lock(&c_node->mem_lock); 659 660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); 661 662 /* get operations callback */ 663 ops = ippdrv->ops[m_node->ops_id]; 664 if (!ops) { 665 DRM_ERROR("not support ops.\n"); 666 ret = -EFAULT; 667 goto err_unlock; 668 } 669 670 /* set address and enable irq */ 671 if (ops->set_addr) { 672 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, 673 m_node->buf_id, IPP_BUF_ENQUEUE); 674 if (ret) { 675 DRM_ERROR("failed to set addr.\n"); 676 goto err_unlock; 677 } 678 } 679 680 err_unlock: 681 mutex_unlock(&c_node->mem_lock); 682 return ret; 683 } 684 685 static struct drm_exynos_ipp_mem_node 686 *ipp_get_mem_node(struct drm_device *drm_dev, 687 struct drm_file *file, 688 struct drm_exynos_ipp_cmd_node *c_node, 689 struct drm_exynos_ipp_queue_buf *qbuf) 690 { 691 struct drm_exynos_ipp_mem_node *m_node; 692 struct drm_exynos_ipp_buf_info buf_info; 693 void *addr; 694 int i; 695 696 DRM_DEBUG_KMS("%s\n", __func__); 697 698 mutex_lock(&c_node->mem_lock); 699 700 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 701 if (!m_node) { 702 DRM_ERROR("failed to allocate queue node.\n"); 703 goto err_unlock; 704 } 705 706 /* clear base address for error handling */ 707 memset(&buf_info, 0x0, sizeof(buf_info)); 708 709 /* operations, buffer id */ 710 m_node->ops_id = qbuf->ops_id; 711 m_node->prop_id = qbuf->prop_id; 712 m_node->buf_id = qbuf->buf_id; 713 714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__, 715 (int)m_node, qbuf->ops_id); 716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__, 717 qbuf->prop_id, m_node->buf_id); 718 719 for_each_ipp_planar(i) { 720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__, 721 i, qbuf->handle[i]); 722 723 /* get dma address by handle */ 724 if (qbuf->handle[i]) { 725 addr = exynos_drm_gem_get_dma_addr(drm_dev, 726 qbuf->handle[i], file); 727 if (IS_ERR(addr)) { 728 DRM_ERROR("failed to get addr.\n"); 729 goto err_clear; 730 } 731 732 buf_info.handles[i] = qbuf->handle[i]; 733 buf_info.base[i] = *(dma_addr_t *) addr; 734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n", 735 __func__, i, buf_info.base[i], 736 (int)buf_info.handles[i]); 737 } 738 } 739 740 m_node->filp = file; 741 m_node->buf_info = buf_info; 742 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 743 744 mutex_unlock(&c_node->mem_lock); 745 return m_node; 746 747 err_clear: 748 kfree(m_node); 749 err_unlock: 750 mutex_unlock(&c_node->mem_lock); 751 return ERR_PTR(-EFAULT); 752 } 753 754 static int ipp_put_mem_node(struct drm_device *drm_dev, 755 struct drm_exynos_ipp_cmd_node *c_node, 756 struct drm_exynos_ipp_mem_node *m_node) 757 { 758 int i; 759 760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node); 761 762 if (!m_node) { 763 DRM_ERROR("invalid dequeue node.\n"); 764 return -EFAULT; 765 } 766 767 if (list_empty(&m_node->list)) { 768 DRM_ERROR("empty memory node.\n"); 769 return -ENOMEM; 770 } 771 772 mutex_lock(&c_node->mem_lock); 773 774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id); 775 776 /* put gem buffer */ 777 for_each_ipp_planar(i) { 778 unsigned long handle = m_node->buf_info.handles[i]; 779 if (handle) 780 exynos_drm_gem_put_dma_addr(drm_dev, handle, 781 m_node->filp); 782 } 783 784 /* delete list in queue */ 785 list_del(&m_node->list); 786 kfree(m_node); 787 788 mutex_unlock(&c_node->mem_lock); 789 790 return 0; 791 } 792 793 static void ipp_free_event(struct drm_pending_event *event) 794 { 795 kfree(event); 796 } 797 798 static int ipp_get_event(struct drm_device *drm_dev, 799 struct drm_file *file, 800 struct drm_exynos_ipp_cmd_node *c_node, 801 struct drm_exynos_ipp_queue_buf *qbuf) 802 { 803 struct drm_exynos_ipp_send_event *e; 804 unsigned long flags; 805 806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__, 807 qbuf->ops_id, qbuf->buf_id); 808 809 e = kzalloc(sizeof(*e), GFP_KERNEL); 810 811 if (!e) { 812 DRM_ERROR("failed to allocate event.\n"); 813 spin_lock_irqsave(&drm_dev->event_lock, flags); 814 file->event_space += sizeof(e->event); 815 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 816 return -ENOMEM; 817 } 818 819 /* make event */ 820 e->event.base.type = DRM_EXYNOS_IPP_EVENT; 821 e->event.base.length = sizeof(e->event); 822 e->event.user_data = qbuf->user_data; 823 e->event.prop_id = qbuf->prop_id; 824 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 825 e->base.event = &e->event.base; 826 e->base.file_priv = file; 827 e->base.destroy = ipp_free_event; 828 list_add_tail(&e->base.link, &c_node->event_list); 829 830 return 0; 831 } 832 833 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, 834 struct drm_exynos_ipp_queue_buf *qbuf) 835 { 836 struct drm_exynos_ipp_send_event *e, *te; 837 int count = 0; 838 839 DRM_DEBUG_KMS("%s\n", __func__); 840 841 if (list_empty(&c_node->event_list)) { 842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__); 843 return; 844 } 845 846 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n", 848 __func__, count++, (int)e); 849 850 /* 851 * quf == NULL condition means all event deletion. 852 * stop operations want to delete all event list. 853 * another case delete only same buf id. 854 */ 855 if (!qbuf) { 856 /* delete list */ 857 list_del(&e->base.link); 858 kfree(e); 859 } 860 861 /* compare buffer id */ 862 if (qbuf && (qbuf->buf_id == 863 e->event.buf_id[EXYNOS_DRM_OPS_DST])) { 864 /* delete list */ 865 list_del(&e->base.link); 866 kfree(e); 867 return; 868 } 869 } 870 } 871 872 static void ipp_handle_cmd_work(struct device *dev, 873 struct exynos_drm_ippdrv *ippdrv, 874 struct drm_exynos_ipp_cmd_work *cmd_work, 875 struct drm_exynos_ipp_cmd_node *c_node) 876 { 877 struct ipp_context *ctx = get_ipp_context(dev); 878 879 cmd_work->ippdrv = ippdrv; 880 cmd_work->c_node = c_node; 881 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 882 } 883 884 static int ipp_queue_buf_with_run(struct device *dev, 885 struct drm_exynos_ipp_cmd_node *c_node, 886 struct drm_exynos_ipp_mem_node *m_node, 887 struct drm_exynos_ipp_queue_buf *qbuf) 888 { 889 struct exynos_drm_ippdrv *ippdrv; 890 struct drm_exynos_ipp_property *property; 891 struct exynos_drm_ipp_ops *ops; 892 int ret; 893 894 DRM_DEBUG_KMS("%s\n", __func__); 895 896 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 897 if (IS_ERR_OR_NULL(ippdrv)) { 898 DRM_ERROR("failed to get ipp driver.\n"); 899 return -EFAULT; 900 } 901 902 ops = ippdrv->ops[qbuf->ops_id]; 903 if (!ops) { 904 DRM_ERROR("failed to get ops.\n"); 905 return -EFAULT; 906 } 907 908 property = &c_node->property; 909 910 if (c_node->state != IPP_STATE_START) { 911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__); 912 return 0; 913 } 914 915 if (!ipp_check_mem_list(c_node)) { 916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 917 return 0; 918 } 919 920 /* 921 * If set destination buffer and enabled clock, 922 * then m2m operations need start operations at queue_buf 923 */ 924 if (ipp_is_m2m_cmd(property->cmd)) { 925 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; 926 927 cmd_work->ctrl = IPP_CTRL_PLAY; 928 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 929 } else { 930 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 931 if (ret) { 932 DRM_ERROR("failed to set m node.\n"); 933 return ret; 934 } 935 } 936 937 return 0; 938 } 939 940 static void ipp_clean_queue_buf(struct drm_device *drm_dev, 941 struct drm_exynos_ipp_cmd_node *c_node, 942 struct drm_exynos_ipp_queue_buf *qbuf) 943 { 944 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 945 946 DRM_DEBUG_KMS("%s\n", __func__); 947 948 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) { 949 /* delete list */ 950 list_for_each_entry_safe(m_node, tm_node, 951 &c_node->mem_list[qbuf->ops_id], list) { 952 if (m_node->buf_id == qbuf->buf_id && 953 m_node->ops_id == qbuf->ops_id) 954 ipp_put_mem_node(drm_dev, c_node, m_node); 955 } 956 } 957 } 958 959 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 960 struct drm_file *file) 961 { 962 struct drm_exynos_file_private *file_priv = file->driver_priv; 963 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 964 struct device *dev = priv->dev; 965 struct ipp_context *ctx = get_ipp_context(dev); 966 struct drm_exynos_ipp_queue_buf *qbuf = data; 967 struct drm_exynos_ipp_cmd_node *c_node; 968 struct drm_exynos_ipp_mem_node *m_node; 969 int ret; 970 971 DRM_DEBUG_KMS("%s\n", __func__); 972 973 if (!qbuf) { 974 DRM_ERROR("invalid buf parameter.\n"); 975 return -EINVAL; 976 } 977 978 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { 979 DRM_ERROR("invalid ops parameter.\n"); 980 return -EINVAL; 981 } 982 983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 984 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 985 qbuf->buf_id, qbuf->buf_type); 986 987 /* find command node */ 988 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 989 qbuf->prop_id); 990 if (!c_node) { 991 DRM_ERROR("failed to get command node.\n"); 992 return -EFAULT; 993 } 994 995 /* buffer control */ 996 switch (qbuf->buf_type) { 997 case IPP_BUF_ENQUEUE: 998 /* get memory node */ 999 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 1000 if (IS_ERR(m_node)) { 1001 DRM_ERROR("failed to get m_node.\n"); 1002 return PTR_ERR(m_node); 1003 } 1004 1005 /* 1006 * first step get event for destination buffer. 1007 * and second step when M2M case run with destination buffer 1008 * if needed. 1009 */ 1010 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 1011 /* get event for destination buffer */ 1012 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 1013 if (ret) { 1014 DRM_ERROR("failed to get event.\n"); 1015 goto err_clean_node; 1016 } 1017 1018 /* 1019 * M2M case run play control for streaming feature. 1020 * other case set address and waiting. 1021 */ 1022 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); 1023 if (ret) { 1024 DRM_ERROR("failed to run command.\n"); 1025 goto err_clean_node; 1026 } 1027 } 1028 break; 1029 case IPP_BUF_DEQUEUE: 1030 mutex_lock(&c_node->cmd_lock); 1031 1032 /* put event for destination buffer */ 1033 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 1034 ipp_put_event(c_node, qbuf); 1035 1036 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 1037 1038 mutex_unlock(&c_node->cmd_lock); 1039 break; 1040 default: 1041 DRM_ERROR("invalid buffer control.\n"); 1042 return -EINVAL; 1043 } 1044 1045 return 0; 1046 1047 err_clean_node: 1048 DRM_ERROR("clean memory nodes.\n"); 1049 1050 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 1051 return ret; 1052 } 1053 1054 static bool exynos_drm_ipp_check_valid(struct device *dev, 1055 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 1056 { 1057 DRM_DEBUG_KMS("%s\n", __func__); 1058 1059 if (ctrl != IPP_CTRL_PLAY) { 1060 if (pm_runtime_suspended(dev)) { 1061 DRM_ERROR("pm:runtime_suspended.\n"); 1062 goto err_status; 1063 } 1064 } 1065 1066 switch (ctrl) { 1067 case IPP_CTRL_PLAY: 1068 if (state != IPP_STATE_IDLE) 1069 goto err_status; 1070 break; 1071 case IPP_CTRL_STOP: 1072 if (state == IPP_STATE_STOP) 1073 goto err_status; 1074 break; 1075 case IPP_CTRL_PAUSE: 1076 if (state != IPP_STATE_START) 1077 goto err_status; 1078 break; 1079 case IPP_CTRL_RESUME: 1080 if (state != IPP_STATE_STOP) 1081 goto err_status; 1082 break; 1083 default: 1084 DRM_ERROR("invalid state.\n"); 1085 goto err_status; 1086 break; 1087 } 1088 1089 return true; 1090 1091 err_status: 1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); 1093 return false; 1094 } 1095 1096 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, 1097 struct drm_file *file) 1098 { 1099 struct drm_exynos_file_private *file_priv = file->driver_priv; 1100 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 1101 struct exynos_drm_ippdrv *ippdrv = NULL; 1102 struct device *dev = priv->dev; 1103 struct ipp_context *ctx = get_ipp_context(dev); 1104 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 1105 struct drm_exynos_ipp_cmd_work *cmd_work; 1106 struct drm_exynos_ipp_cmd_node *c_node; 1107 1108 DRM_DEBUG_KMS("%s\n", __func__); 1109 1110 if (!ctx) { 1111 DRM_ERROR("invalid context.\n"); 1112 return -EINVAL; 1113 } 1114 1115 if (!cmd_ctrl) { 1116 DRM_ERROR("invalid control parameter.\n"); 1117 return -EINVAL; 1118 } 1119 1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__, 1121 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1122 1123 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1124 if (IS_ERR(ippdrv)) { 1125 DRM_ERROR("failed to get ipp driver.\n"); 1126 return PTR_ERR(ippdrv); 1127 } 1128 1129 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1130 cmd_ctrl->prop_id); 1131 if (!c_node) { 1132 DRM_ERROR("invalid command node list.\n"); 1133 return -EINVAL; 1134 } 1135 1136 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1137 c_node->state)) { 1138 DRM_ERROR("invalid state.\n"); 1139 return -EINVAL; 1140 } 1141 1142 switch (cmd_ctrl->ctrl) { 1143 case IPP_CTRL_PLAY: 1144 if (pm_runtime_suspended(ippdrv->dev)) 1145 pm_runtime_get_sync(ippdrv->dev); 1146 c_node->state = IPP_STATE_START; 1147 1148 cmd_work = c_node->start_work; 1149 cmd_work->ctrl = cmd_ctrl->ctrl; 1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1151 c_node->state = IPP_STATE_START; 1152 break; 1153 case IPP_CTRL_STOP: 1154 cmd_work = c_node->stop_work; 1155 cmd_work->ctrl = cmd_ctrl->ctrl; 1156 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1157 1158 if (!wait_for_completion_timeout(&c_node->stop_complete, 1159 msecs_to_jiffies(300))) { 1160 DRM_ERROR("timeout stop:prop_id[%d]\n", 1161 c_node->property.prop_id); 1162 } 1163 1164 c_node->state = IPP_STATE_STOP; 1165 ippdrv->dedicated = false; 1166 ipp_clean_cmd_node(c_node); 1167 1168 if (list_empty(&ippdrv->cmd_list)) 1169 pm_runtime_put_sync(ippdrv->dev); 1170 break; 1171 case IPP_CTRL_PAUSE: 1172 cmd_work = c_node->stop_work; 1173 cmd_work->ctrl = cmd_ctrl->ctrl; 1174 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1175 1176 if (!wait_for_completion_timeout(&c_node->stop_complete, 1177 msecs_to_jiffies(200))) { 1178 DRM_ERROR("timeout stop:prop_id[%d]\n", 1179 c_node->property.prop_id); 1180 } 1181 1182 c_node->state = IPP_STATE_STOP; 1183 break; 1184 case IPP_CTRL_RESUME: 1185 c_node->state = IPP_STATE_START; 1186 cmd_work = c_node->start_work; 1187 cmd_work->ctrl = cmd_ctrl->ctrl; 1188 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1189 break; 1190 default: 1191 DRM_ERROR("could not support this state currently.\n"); 1192 return -EINVAL; 1193 } 1194 1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__, 1196 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1197 1198 return 0; 1199 } 1200 1201 int exynos_drm_ippnb_register(struct notifier_block *nb) 1202 { 1203 return blocking_notifier_chain_register( 1204 &exynos_drm_ippnb_list, nb); 1205 } 1206 1207 int exynos_drm_ippnb_unregister(struct notifier_block *nb) 1208 { 1209 return blocking_notifier_chain_unregister( 1210 &exynos_drm_ippnb_list, nb); 1211 } 1212 1213 int exynos_drm_ippnb_send_event(unsigned long val, void *v) 1214 { 1215 return blocking_notifier_call_chain( 1216 &exynos_drm_ippnb_list, val, v); 1217 } 1218 1219 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, 1220 struct drm_exynos_ipp_property *property) 1221 { 1222 struct exynos_drm_ipp_ops *ops = NULL; 1223 bool swap = false; 1224 int ret, i; 1225 1226 if (!property) { 1227 DRM_ERROR("invalid property parameter.\n"); 1228 return -EINVAL; 1229 } 1230 1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1232 1233 /* reset h/w block */ 1234 if (ippdrv->reset && 1235 ippdrv->reset(ippdrv->dev)) { 1236 DRM_ERROR("failed to reset.\n"); 1237 return -EINVAL; 1238 } 1239 1240 /* set source,destination operations */ 1241 for_each_ipp_ops(i) { 1242 struct drm_exynos_ipp_config *config = 1243 &property->config[i]; 1244 1245 ops = ippdrv->ops[i]; 1246 if (!ops || !config) { 1247 DRM_ERROR("not support ops and config.\n"); 1248 return -EINVAL; 1249 } 1250 1251 /* set format */ 1252 if (ops->set_fmt) { 1253 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1254 if (ret) { 1255 DRM_ERROR("not support format.\n"); 1256 return ret; 1257 } 1258 } 1259 1260 /* set transform for rotation, flip */ 1261 if (ops->set_transf) { 1262 ret = ops->set_transf(ippdrv->dev, config->degree, 1263 config->flip, &swap); 1264 if (ret) { 1265 DRM_ERROR("not support tranf.\n"); 1266 return -EINVAL; 1267 } 1268 } 1269 1270 /* set size */ 1271 if (ops->set_size) { 1272 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1273 &config->sz); 1274 if (ret) { 1275 DRM_ERROR("not support size.\n"); 1276 return ret; 1277 } 1278 } 1279 } 1280 1281 return 0; 1282 } 1283 1284 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, 1285 struct drm_exynos_ipp_cmd_node *c_node) 1286 { 1287 struct drm_exynos_ipp_mem_node *m_node; 1288 struct drm_exynos_ipp_property *property = &c_node->property; 1289 struct list_head *head; 1290 int ret, i; 1291 1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1293 1294 /* store command info in ippdrv */ 1295 ippdrv->c_node = c_node; 1296 1297 if (!ipp_check_mem_list(c_node)) { 1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 1299 return -ENOMEM; 1300 } 1301 1302 /* set current property in ippdrv */ 1303 ret = ipp_set_property(ippdrv, property); 1304 if (ret) { 1305 DRM_ERROR("failed to set property.\n"); 1306 ippdrv->c_node = NULL; 1307 return ret; 1308 } 1309 1310 /* check command */ 1311 switch (property->cmd) { 1312 case IPP_CMD_M2M: 1313 for_each_ipp_ops(i) { 1314 /* source/destination memory list */ 1315 head = &c_node->mem_list[i]; 1316 1317 m_node = list_first_entry(head, 1318 struct drm_exynos_ipp_mem_node, list); 1319 if (!m_node) { 1320 DRM_ERROR("failed to get node.\n"); 1321 ret = -EFAULT; 1322 return ret; 1323 } 1324 1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n", 1326 __func__, (int)m_node); 1327 1328 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1329 if (ret) { 1330 DRM_ERROR("failed to set m node.\n"); 1331 return ret; 1332 } 1333 } 1334 break; 1335 case IPP_CMD_WB: 1336 /* destination memory list */ 1337 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1338 1339 list_for_each_entry(m_node, head, list) { 1340 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1341 if (ret) { 1342 DRM_ERROR("failed to set m node.\n"); 1343 return ret; 1344 } 1345 } 1346 break; 1347 case IPP_CMD_OUTPUT: 1348 /* source memory list */ 1349 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1350 1351 list_for_each_entry(m_node, head, list) { 1352 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1353 if (ret) { 1354 DRM_ERROR("failed to set m node.\n"); 1355 return ret; 1356 } 1357 } 1358 break; 1359 default: 1360 DRM_ERROR("invalid operations.\n"); 1361 return -EINVAL; 1362 } 1363 1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd); 1365 1366 /* start operations */ 1367 if (ippdrv->start) { 1368 ret = ippdrv->start(ippdrv->dev, property->cmd); 1369 if (ret) { 1370 DRM_ERROR("failed to start ops.\n"); 1371 return ret; 1372 } 1373 } 1374 1375 return 0; 1376 } 1377 1378 static int ipp_stop_property(struct drm_device *drm_dev, 1379 struct exynos_drm_ippdrv *ippdrv, 1380 struct drm_exynos_ipp_cmd_node *c_node) 1381 { 1382 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 1383 struct drm_exynos_ipp_property *property = &c_node->property; 1384 struct list_head *head; 1385 int ret = 0, i; 1386 1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id); 1388 1389 /* put event */ 1390 ipp_put_event(c_node, NULL); 1391 1392 /* check command */ 1393 switch (property->cmd) { 1394 case IPP_CMD_M2M: 1395 for_each_ipp_ops(i) { 1396 /* source/destination memory list */ 1397 head = &c_node->mem_list[i]; 1398 1399 if (list_empty(head)) { 1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n", 1401 __func__); 1402 break; 1403 } 1404 1405 list_for_each_entry_safe(m_node, tm_node, 1406 head, list) { 1407 ret = ipp_put_mem_node(drm_dev, c_node, 1408 m_node); 1409 if (ret) { 1410 DRM_ERROR("failed to put m_node.\n"); 1411 goto err_clear; 1412 } 1413 } 1414 } 1415 break; 1416 case IPP_CMD_WB: 1417 /* destination memory list */ 1418 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1419 1420 if (list_empty(head)) { 1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); 1422 break; 1423 } 1424 1425 list_for_each_entry_safe(m_node, tm_node, head, list) { 1426 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1427 if (ret) { 1428 DRM_ERROR("failed to put m_node.\n"); 1429 goto err_clear; 1430 } 1431 } 1432 break; 1433 case IPP_CMD_OUTPUT: 1434 /* source memory list */ 1435 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1436 1437 if (list_empty(head)) { 1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__); 1439 break; 1440 } 1441 1442 list_for_each_entry_safe(m_node, tm_node, head, list) { 1443 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1444 if (ret) { 1445 DRM_ERROR("failed to put m_node.\n"); 1446 goto err_clear; 1447 } 1448 } 1449 break; 1450 default: 1451 DRM_ERROR("invalid operations.\n"); 1452 ret = -EINVAL; 1453 goto err_clear; 1454 } 1455 1456 err_clear: 1457 /* stop operations */ 1458 if (ippdrv->stop) 1459 ippdrv->stop(ippdrv->dev, property->cmd); 1460 1461 return ret; 1462 } 1463 1464 void ipp_sched_cmd(struct work_struct *work) 1465 { 1466 struct drm_exynos_ipp_cmd_work *cmd_work = 1467 (struct drm_exynos_ipp_cmd_work *)work; 1468 struct exynos_drm_ippdrv *ippdrv; 1469 struct drm_exynos_ipp_cmd_node *c_node; 1470 struct drm_exynos_ipp_property *property; 1471 int ret; 1472 1473 DRM_DEBUG_KMS("%s\n", __func__); 1474 1475 ippdrv = cmd_work->ippdrv; 1476 if (!ippdrv) { 1477 DRM_ERROR("invalid ippdrv list.\n"); 1478 return; 1479 } 1480 1481 c_node = cmd_work->c_node; 1482 if (!c_node) { 1483 DRM_ERROR("invalid command node list.\n"); 1484 return; 1485 } 1486 1487 mutex_lock(&c_node->cmd_lock); 1488 1489 property = &c_node->property; 1490 1491 switch (cmd_work->ctrl) { 1492 case IPP_CTRL_PLAY: 1493 case IPP_CTRL_RESUME: 1494 ret = ipp_start_property(ippdrv, c_node); 1495 if (ret) { 1496 DRM_ERROR("failed to start property:prop_id[%d]\n", 1497 c_node->property.prop_id); 1498 goto err_unlock; 1499 } 1500 1501 /* 1502 * M2M case supports wait_completion of transfer. 1503 * because M2M case supports single unit operation 1504 * with multiple queue. 1505 * M2M need to wait completion of data transfer. 1506 */ 1507 if (ipp_is_m2m_cmd(property->cmd)) { 1508 if (!wait_for_completion_timeout 1509 (&c_node->start_complete, msecs_to_jiffies(200))) { 1510 DRM_ERROR("timeout event:prop_id[%d]\n", 1511 c_node->property.prop_id); 1512 goto err_unlock; 1513 } 1514 } 1515 break; 1516 case IPP_CTRL_STOP: 1517 case IPP_CTRL_PAUSE: 1518 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, 1519 c_node); 1520 if (ret) { 1521 DRM_ERROR("failed to stop property.\n"); 1522 goto err_unlock; 1523 } 1524 1525 complete(&c_node->stop_complete); 1526 break; 1527 default: 1528 DRM_ERROR("unknown control type\n"); 1529 break; 1530 } 1531 1532 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl); 1533 1534 err_unlock: 1535 mutex_unlock(&c_node->cmd_lock); 1536 } 1537 1538 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1539 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) 1540 { 1541 struct drm_device *drm_dev = ippdrv->drm_dev; 1542 struct drm_exynos_ipp_property *property = &c_node->property; 1543 struct drm_exynos_ipp_mem_node *m_node; 1544 struct drm_exynos_ipp_queue_buf qbuf; 1545 struct drm_exynos_ipp_send_event *e; 1546 struct list_head *head; 1547 struct timeval now; 1548 unsigned long flags; 1549 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; 1550 int ret, i; 1551 1552 for_each_ipp_ops(i) 1553 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, 1554 i ? "dst" : "src", buf_id[i]); 1555 1556 if (!drm_dev) { 1557 DRM_ERROR("failed to get drm_dev.\n"); 1558 return -EINVAL; 1559 } 1560 1561 if (!property) { 1562 DRM_ERROR("failed to get property.\n"); 1563 return -EINVAL; 1564 } 1565 1566 if (list_empty(&c_node->event_list)) { 1567 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__); 1568 return 0; 1569 } 1570 1571 if (!ipp_check_mem_list(c_node)) { 1572 DRM_DEBUG_KMS("%s:empty memory.\n", __func__); 1573 return 0; 1574 } 1575 1576 /* check command */ 1577 switch (property->cmd) { 1578 case IPP_CMD_M2M: 1579 for_each_ipp_ops(i) { 1580 /* source/destination memory list */ 1581 head = &c_node->mem_list[i]; 1582 1583 m_node = list_first_entry(head, 1584 struct drm_exynos_ipp_mem_node, list); 1585 if (!m_node) { 1586 DRM_ERROR("empty memory node.\n"); 1587 return -ENOMEM; 1588 } 1589 1590 tbuf_id[i] = m_node->buf_id; 1591 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__, 1592 i ? "dst" : "src", tbuf_id[i]); 1593 1594 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1595 if (ret) 1596 DRM_ERROR("failed to put m_node.\n"); 1597 } 1598 break; 1599 case IPP_CMD_WB: 1600 /* clear buf for finding */ 1601 memset(&qbuf, 0x0, sizeof(qbuf)); 1602 qbuf.ops_id = EXYNOS_DRM_OPS_DST; 1603 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; 1604 1605 /* get memory node entry */ 1606 m_node = ipp_find_mem_node(c_node, &qbuf); 1607 if (!m_node) { 1608 DRM_ERROR("empty memory node.\n"); 1609 return -ENOMEM; 1610 } 1611 1612 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1613 1614 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1615 if (ret) 1616 DRM_ERROR("failed to put m_node.\n"); 1617 break; 1618 case IPP_CMD_OUTPUT: 1619 /* source memory list */ 1620 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1621 1622 m_node = list_first_entry(head, 1623 struct drm_exynos_ipp_mem_node, list); 1624 if (!m_node) { 1625 DRM_ERROR("empty memory node.\n"); 1626 return -ENOMEM; 1627 } 1628 1629 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1630 1631 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1632 if (ret) 1633 DRM_ERROR("failed to put m_node.\n"); 1634 break; 1635 default: 1636 DRM_ERROR("invalid operations.\n"); 1637 return -EINVAL; 1638 } 1639 1640 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1641 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1642 tbuf_id[1], buf_id[1], property->prop_id); 1643 1644 /* 1645 * command node have event list of destination buffer 1646 * If destination buffer enqueue to mem list, 1647 * then we make event and link to event list tail. 1648 * so, we get first event for first enqueued buffer. 1649 */ 1650 e = list_first_entry(&c_node->event_list, 1651 struct drm_exynos_ipp_send_event, base.link); 1652 1653 if (!e) { 1654 DRM_ERROR("empty event.\n"); 1655 return -EINVAL; 1656 } 1657 1658 do_gettimeofday(&now); 1659 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n" 1660 , __func__, now.tv_sec, now.tv_usec); 1661 e->event.tv_sec = now.tv_sec; 1662 e->event.tv_usec = now.tv_usec; 1663 e->event.prop_id = property->prop_id; 1664 1665 /* set buffer id about source destination */ 1666 for_each_ipp_ops(i) 1667 e->event.buf_id[i] = tbuf_id[i]; 1668 1669 spin_lock_irqsave(&drm_dev->event_lock, flags); 1670 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1671 wake_up_interruptible(&e->base.file_priv->event_wait); 1672 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1673 1674 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__, 1675 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1676 1677 return 0; 1678 } 1679 1680 void ipp_sched_event(struct work_struct *work) 1681 { 1682 struct drm_exynos_ipp_event_work *event_work = 1683 (struct drm_exynos_ipp_event_work *)work; 1684 struct exynos_drm_ippdrv *ippdrv; 1685 struct drm_exynos_ipp_cmd_node *c_node; 1686 int ret; 1687 1688 if (!event_work) { 1689 DRM_ERROR("failed to get event_work.\n"); 1690 return; 1691 } 1692 1693 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, 1694 event_work->buf_id[EXYNOS_DRM_OPS_DST]); 1695 1696 ippdrv = event_work->ippdrv; 1697 if (!ippdrv) { 1698 DRM_ERROR("failed to get ipp driver.\n"); 1699 return; 1700 } 1701 1702 c_node = ippdrv->c_node; 1703 if (!c_node) { 1704 DRM_ERROR("failed to get command node.\n"); 1705 return; 1706 } 1707 1708 /* 1709 * IPP supports command thread, event thread synchronization. 1710 * If IPP close immediately from user land, then IPP make 1711 * synchronization with command thread, so make complete event. 1712 * or going out operations. 1713 */ 1714 if (c_node->state != IPP_STATE_START) { 1715 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n", 1716 __func__, c_node->state, c_node->property.prop_id); 1717 goto err_completion; 1718 } 1719 1720 mutex_lock(&c_node->event_lock); 1721 1722 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1723 if (ret) { 1724 DRM_ERROR("failed to send event.\n"); 1725 goto err_completion; 1726 } 1727 1728 err_completion: 1729 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1730 complete(&c_node->start_complete); 1731 1732 mutex_unlock(&c_node->event_lock); 1733 } 1734 1735 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1736 { 1737 struct ipp_context *ctx = get_ipp_context(dev); 1738 struct exynos_drm_ippdrv *ippdrv; 1739 int ret, count = 0; 1740 1741 DRM_DEBUG_KMS("%s\n", __func__); 1742 1743 /* get ipp driver entry */ 1744 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1745 ippdrv->drm_dev = drm_dev; 1746 1747 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, 1748 &ippdrv->ipp_id); 1749 if (ret) { 1750 DRM_ERROR("failed to create id.\n"); 1751 goto err_idr; 1752 } 1753 1754 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__, 1755 count++, (int)ippdrv, ippdrv->ipp_id); 1756 1757 if (ippdrv->ipp_id == 0) { 1758 DRM_ERROR("failed to get ipp_id[%d]\n", 1759 ippdrv->ipp_id); 1760 goto err_idr; 1761 } 1762 1763 /* store parent device for node */ 1764 ippdrv->parent_dev = dev; 1765 1766 /* store event work queue and handler */ 1767 ippdrv->event_workq = ctx->event_workq; 1768 ippdrv->sched_event = ipp_sched_event; 1769 INIT_LIST_HEAD(&ippdrv->cmd_list); 1770 1771 if (is_drm_iommu_supported(drm_dev)) { 1772 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1773 if (ret) { 1774 DRM_ERROR("failed to activate iommu\n"); 1775 goto err_iommu; 1776 } 1777 } 1778 } 1779 1780 return 0; 1781 1782 err_iommu: 1783 /* get ipp driver entry */ 1784 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list) 1785 if (is_drm_iommu_supported(drm_dev)) 1786 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1787 1788 err_idr: 1789 idr_remove_all(&ctx->ipp_idr); 1790 idr_remove_all(&ctx->prop_idr); 1791 idr_destroy(&ctx->ipp_idr); 1792 idr_destroy(&ctx->prop_idr); 1793 return ret; 1794 } 1795 1796 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1797 { 1798 struct exynos_drm_ippdrv *ippdrv; 1799 1800 DRM_DEBUG_KMS("%s\n", __func__); 1801 1802 /* get ipp driver entry */ 1803 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1804 if (is_drm_iommu_supported(drm_dev)) 1805 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1806 1807 ippdrv->drm_dev = NULL; 1808 exynos_drm_ippdrv_unregister(ippdrv); 1809 } 1810 } 1811 1812 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, 1813 struct drm_file *file) 1814 { 1815 struct drm_exynos_file_private *file_priv = file->driver_priv; 1816 struct exynos_drm_ipp_private *priv; 1817 1818 DRM_DEBUG_KMS("%s\n", __func__); 1819 1820 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1821 if (!priv) { 1822 DRM_ERROR("failed to allocate priv.\n"); 1823 return -ENOMEM; 1824 } 1825 priv->dev = dev; 1826 file_priv->ipp_priv = priv; 1827 1828 INIT_LIST_HEAD(&priv->event_list); 1829 1830 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv); 1831 1832 return 0; 1833 } 1834 1835 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1836 struct drm_file *file) 1837 { 1838 struct drm_exynos_file_private *file_priv = file->driver_priv; 1839 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 1840 struct exynos_drm_ippdrv *ippdrv = NULL; 1841 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1842 int count = 0; 1843 1844 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv); 1845 1846 if (list_empty(&exynos_drm_ippdrv_list)) { 1847 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__); 1848 goto err_clear; 1849 } 1850 1851 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1852 if (list_empty(&ippdrv->cmd_list)) 1853 continue; 1854 1855 list_for_each_entry_safe(c_node, tc_node, 1856 &ippdrv->cmd_list, list) { 1857 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", 1858 __func__, count++, (int)ippdrv); 1859 1860 if (c_node->priv == priv) { 1861 /* 1862 * userland goto unnormal state. process killed. 1863 * and close the file. 1864 * so, IPP didn't called stop cmd ctrl. 1865 * so, we are make stop operation in this state. 1866 */ 1867 if (c_node->state == IPP_STATE_START) { 1868 ipp_stop_property(drm_dev, ippdrv, 1869 c_node); 1870 c_node->state = IPP_STATE_STOP; 1871 } 1872 1873 ippdrv->dedicated = false; 1874 ipp_clean_cmd_node(c_node); 1875 if (list_empty(&ippdrv->cmd_list)) 1876 pm_runtime_put_sync(ippdrv->dev); 1877 } 1878 } 1879 } 1880 1881 err_clear: 1882 kfree(priv); 1883 return; 1884 } 1885 1886 static int ipp_probe(struct platform_device *pdev) 1887 { 1888 struct device *dev = &pdev->dev; 1889 struct ipp_context *ctx; 1890 struct exynos_drm_subdrv *subdrv; 1891 int ret; 1892 1893 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); 1894 if (!ctx) 1895 return -ENOMEM; 1896 1897 DRM_DEBUG_KMS("%s\n", __func__); 1898 1899 mutex_init(&ctx->ipp_lock); 1900 mutex_init(&ctx->prop_lock); 1901 1902 idr_init(&ctx->ipp_idr); 1903 idr_init(&ctx->prop_idr); 1904 1905 /* 1906 * create single thread for ipp event 1907 * IPP supports event thread for IPP drivers. 1908 * IPP driver send event_work to this thread. 1909 * and IPP event thread send event to user process. 1910 */ 1911 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1912 if (!ctx->event_workq) { 1913 dev_err(dev, "failed to create event workqueue\n"); 1914 return -EINVAL; 1915 } 1916 1917 /* 1918 * create single thread for ipp command 1919 * IPP supports command thread for user process. 1920 * user process make command node using set property ioctl. 1921 * and make start_work and send this work to command thread. 1922 * and then this command thread start property. 1923 */ 1924 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); 1925 if (!ctx->cmd_workq) { 1926 dev_err(dev, "failed to create cmd workqueue\n"); 1927 ret = -EINVAL; 1928 goto err_event_workq; 1929 } 1930 1931 /* set sub driver informations */ 1932 subdrv = &ctx->subdrv; 1933 subdrv->dev = dev; 1934 subdrv->probe = ipp_subdrv_probe; 1935 subdrv->remove = ipp_subdrv_remove; 1936 subdrv->open = ipp_subdrv_open; 1937 subdrv->close = ipp_subdrv_close; 1938 1939 platform_set_drvdata(pdev, ctx); 1940 1941 ret = exynos_drm_subdrv_register(subdrv); 1942 if (ret < 0) { 1943 DRM_ERROR("failed to register drm ipp device.\n"); 1944 goto err_cmd_workq; 1945 } 1946 1947 dev_info(&pdev->dev, "drm ipp registered successfully.\n"); 1948 1949 return 0; 1950 1951 err_cmd_workq: 1952 destroy_workqueue(ctx->cmd_workq); 1953 err_event_workq: 1954 destroy_workqueue(ctx->event_workq); 1955 return ret; 1956 } 1957 1958 static int ipp_remove(struct platform_device *pdev) 1959 { 1960 struct ipp_context *ctx = platform_get_drvdata(pdev); 1961 1962 DRM_DEBUG_KMS("%s\n", __func__); 1963 1964 /* unregister sub driver */ 1965 exynos_drm_subdrv_unregister(&ctx->subdrv); 1966 1967 /* remove,destroy ipp idr */ 1968 idr_remove_all(&ctx->ipp_idr); 1969 idr_remove_all(&ctx->prop_idr); 1970 idr_destroy(&ctx->ipp_idr); 1971 idr_destroy(&ctx->prop_idr); 1972 1973 mutex_destroy(&ctx->ipp_lock); 1974 mutex_destroy(&ctx->prop_lock); 1975 1976 /* destroy command, event work queue */ 1977 destroy_workqueue(ctx->cmd_workq); 1978 destroy_workqueue(ctx->event_workq); 1979 1980 return 0; 1981 } 1982 1983 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) 1984 { 1985 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable); 1986 1987 return 0; 1988 } 1989 1990 #ifdef CONFIG_PM_SLEEP 1991 static int ipp_suspend(struct device *dev) 1992 { 1993 struct ipp_context *ctx = get_ipp_context(dev); 1994 1995 DRM_DEBUG_KMS("%s\n", __func__); 1996 1997 if (pm_runtime_suspended(dev)) 1998 return 0; 1999 2000 return ipp_power_ctrl(ctx, false); 2001 } 2002 2003 static int ipp_resume(struct device *dev) 2004 { 2005 struct ipp_context *ctx = get_ipp_context(dev); 2006 2007 DRM_DEBUG_KMS("%s\n", __func__); 2008 2009 if (!pm_runtime_suspended(dev)) 2010 return ipp_power_ctrl(ctx, true); 2011 2012 return 0; 2013 } 2014 #endif 2015 2016 #ifdef CONFIG_PM_RUNTIME 2017 static int ipp_runtime_suspend(struct device *dev) 2018 { 2019 struct ipp_context *ctx = get_ipp_context(dev); 2020 2021 DRM_DEBUG_KMS("%s\n", __func__); 2022 2023 return ipp_power_ctrl(ctx, false); 2024 } 2025 2026 static int ipp_runtime_resume(struct device *dev) 2027 { 2028 struct ipp_context *ctx = get_ipp_context(dev); 2029 2030 DRM_DEBUG_KMS("%s\n", __func__); 2031 2032 return ipp_power_ctrl(ctx, true); 2033 } 2034 #endif 2035 2036 static const struct dev_pm_ops ipp_pm_ops = { 2037 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume) 2038 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL) 2039 }; 2040 2041 struct platform_driver ipp_driver = { 2042 .probe = ipp_probe, 2043 .remove = ipp_remove, 2044 .driver = { 2045 .name = "exynos-drm-ipp", 2046 .owner = THIS_MODULE, 2047 .pm = &ipp_pm_ops, 2048 }, 2049 }; 2050 2051