1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * Eunchul Kim <chulspro.kim@samsung.com> 5 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Sangmin Lee <lsmin.lee@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 */ 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 #include <linux/clk.h> 18 #include <linux/pm_runtime.h> 19 20 #include <drm/drmP.h> 21 #include <drm/exynos_drm.h> 22 #include "exynos_drm_drv.h" 23 #include "exynos_drm_gem.h" 24 #include "exynos_drm_ipp.h" 25 #include "exynos_drm_iommu.h" 26 27 /* 28 * IPP stands for Image Post Processing and 29 * supports image scaler/rotator and input/output DMA operations. 30 * using FIMC, GSC, Rotator, so on. 31 * IPP is integration device driver of same attribute h/w 32 */ 33 34 /* 35 * TODO 36 * 1. expand command control id. 37 * 2. integrate property and config. 38 * 3. removed send_event id check routine. 39 * 4. compare send_event id if needed. 40 * 5. free subdrv_remove notifier callback list if needed. 41 * 6. need to check subdrv_open about multi-open. 42 * 7. need to power_on implement power and sysmmu ctrl. 43 */ 44 45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 47 48 /* platform device pointer for ipp device. */ 49 static struct platform_device *exynos_drm_ipp_pdev; 50 51 /* 52 * A structure of event. 53 * 54 * @base: base of event. 55 * @event: ipp event. 56 */ 57 struct drm_exynos_ipp_send_event { 58 struct drm_pending_event base; 59 struct drm_exynos_ipp_event event; 60 }; 61 62 /* 63 * A structure of memory node. 64 * 65 * @list: list head to memory queue information. 66 * @ops_id: id of operations. 67 * @prop_id: id of property. 68 * @buf_id: id of buffer. 69 * @buf_info: gem objects and dma address, size. 70 * @filp: a pointer to drm_file. 71 */ 72 struct drm_exynos_ipp_mem_node { 73 struct list_head list; 74 enum drm_exynos_ops_id ops_id; 75 u32 prop_id; 76 u32 buf_id; 77 struct drm_exynos_ipp_buf_info buf_info; 78 struct drm_file *filp; 79 }; 80 81 /* 82 * A structure of ipp context. 83 * 84 * @subdrv: prepare initialization using subdrv. 85 * @ipp_lock: lock for synchronization of access to ipp_idr. 86 * @prop_lock: lock for synchronization of access to prop_idr. 87 * @ipp_idr: ipp driver idr. 88 * @prop_idr: property idr. 89 * @event_workq: event work queue. 90 * @cmd_workq: command work queue. 91 */ 92 struct ipp_context { 93 struct exynos_drm_subdrv subdrv; 94 struct mutex ipp_lock; 95 struct mutex prop_lock; 96 struct idr ipp_idr; 97 struct idr prop_idr; 98 struct workqueue_struct *event_workq; 99 struct workqueue_struct *cmd_workq; 100 }; 101 102 static LIST_HEAD(exynos_drm_ippdrv_list); 103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 105 106 int exynos_platform_device_ipp_register(void) 107 { 108 struct platform_device *pdev; 109 110 if (exynos_drm_ipp_pdev) 111 return -EEXIST; 112 113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); 114 if (IS_ERR(pdev)) 115 return PTR_ERR(pdev); 116 117 exynos_drm_ipp_pdev = pdev; 118 119 return 0; 120 } 121 122 void exynos_platform_device_ipp_unregister(void) 123 { 124 if (exynos_drm_ipp_pdev) { 125 platform_device_unregister(exynos_drm_ipp_pdev); 126 exynos_drm_ipp_pdev = NULL; 127 } 128 } 129 130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 131 { 132 if (!ippdrv) 133 return -EINVAL; 134 135 mutex_lock(&exynos_drm_ippdrv_lock); 136 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 137 mutex_unlock(&exynos_drm_ippdrv_lock); 138 139 return 0; 140 } 141 142 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 143 { 144 if (!ippdrv) 145 return -EINVAL; 146 147 mutex_lock(&exynos_drm_ippdrv_lock); 148 list_del(&ippdrv->drv_list); 149 mutex_unlock(&exynos_drm_ippdrv_lock); 150 151 return 0; 152 } 153 154 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, 155 u32 *idp) 156 { 157 int ret; 158 159 /* do the allocation under our mutexlock */ 160 mutex_lock(lock); 161 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 162 mutex_unlock(lock); 163 if (ret < 0) 164 return ret; 165 166 *idp = ret; 167 return 0; 168 } 169 170 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) 171 { 172 mutex_lock(lock); 173 idr_remove(id_idr, id); 174 mutex_unlock(lock); 175 } 176 177 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 178 { 179 void *obj; 180 181 DRM_DEBUG_KMS("id[%d]\n", id); 182 183 mutex_lock(lock); 184 185 /* find object using handle */ 186 obj = idr_find(id_idr, id); 187 if (!obj) { 188 DRM_ERROR("failed to find object.\n"); 189 mutex_unlock(lock); 190 return ERR_PTR(-ENODEV); 191 } 192 193 mutex_unlock(lock); 194 195 return obj; 196 } 197 198 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, 199 enum drm_exynos_ipp_cmd cmd) 200 { 201 /* 202 * check dedicated flag and WB, OUTPUT operation with 203 * power on state. 204 */ 205 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) && 206 !pm_runtime_suspended(ippdrv->dev))) 207 return true; 208 209 return false; 210 } 211 212 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 213 struct drm_exynos_ipp_property *property) 214 { 215 struct exynos_drm_ippdrv *ippdrv; 216 u32 ipp_id = property->ipp_id; 217 218 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id); 219 220 if (ipp_id) { 221 /* find ipp driver using idr */ 222 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 223 ipp_id); 224 if (IS_ERR(ippdrv)) { 225 DRM_ERROR("not found ipp%d driver.\n", ipp_id); 226 return ippdrv; 227 } 228 229 /* 230 * WB, OUTPUT opertion not supported multi-operation. 231 * so, make dedicated state at set property ioctl. 232 * when ipp driver finished operations, clear dedicated flags. 233 */ 234 if (ipp_check_dedicated(ippdrv, property->cmd)) { 235 DRM_ERROR("already used choose device.\n"); 236 return ERR_PTR(-EBUSY); 237 } 238 239 /* 240 * This is necessary to find correct device in ipp drivers. 241 * ipp drivers have different abilities, 242 * so need to check property. 243 */ 244 if (ippdrv->check_property && 245 ippdrv->check_property(ippdrv->dev, property)) { 246 DRM_ERROR("not support property.\n"); 247 return ERR_PTR(-EINVAL); 248 } 249 250 return ippdrv; 251 } else { 252 /* 253 * This case is search all ipp driver for finding. 254 * user application don't set ipp_id in this case, 255 * so ipp subsystem search correct driver in driver list. 256 */ 257 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 258 if (ipp_check_dedicated(ippdrv, property->cmd)) { 259 DRM_DEBUG_KMS("used device.\n"); 260 continue; 261 } 262 263 if (ippdrv->check_property && 264 ippdrv->check_property(ippdrv->dev, property)) { 265 DRM_DEBUG_KMS("not support property.\n"); 266 continue; 267 } 268 269 return ippdrv; 270 } 271 272 DRM_ERROR("not support ipp driver operations.\n"); 273 } 274 275 return ERR_PTR(-ENODEV); 276 } 277 278 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) 279 { 280 struct exynos_drm_ippdrv *ippdrv; 281 struct drm_exynos_ipp_cmd_node *c_node; 282 int count = 0; 283 284 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 285 286 /* 287 * This case is search ipp driver by prop_id handle. 288 * sometimes, ipp subsystem find driver by prop_id. 289 * e.g PAUSE state, queue buf, command control. 290 */ 291 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 292 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 293 294 mutex_lock(&ippdrv->cmd_lock); 295 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 296 if (c_node->property.prop_id == prop_id) { 297 mutex_unlock(&ippdrv->cmd_lock); 298 return ippdrv; 299 } 300 } 301 mutex_unlock(&ippdrv->cmd_lock); 302 } 303 304 return ERR_PTR(-ENODEV); 305 } 306 307 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, 308 struct drm_file *file) 309 { 310 struct drm_exynos_file_private *file_priv = file->driver_priv; 311 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 312 struct device *dev = priv->dev; 313 struct ipp_context *ctx = get_ipp_context(dev); 314 struct drm_exynos_ipp_prop_list *prop_list = data; 315 struct exynos_drm_ippdrv *ippdrv; 316 int count = 0; 317 318 if (!ctx) { 319 DRM_ERROR("invalid context.\n"); 320 return -EINVAL; 321 } 322 323 if (!prop_list) { 324 DRM_ERROR("invalid property parameter.\n"); 325 return -EINVAL; 326 } 327 328 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); 329 330 if (!prop_list->ipp_id) { 331 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 332 count++; 333 334 /* 335 * Supports ippdrv list count for user application. 336 * First step user application getting ippdrv count. 337 * and second step getting ippdrv capability using ipp_id. 338 */ 339 prop_list->count = count; 340 } else { 341 /* 342 * Getting ippdrv capability by ipp_id. 343 * some device not supported wb, output interface. 344 * so, user application detect correct ipp driver 345 * using this ioctl. 346 */ 347 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 348 prop_list->ipp_id); 349 if (IS_ERR(ippdrv)) { 350 DRM_ERROR("not found ipp%d driver.\n", 351 prop_list->ipp_id); 352 return PTR_ERR(ippdrv); 353 } 354 355 *prop_list = ippdrv->prop_list; 356 } 357 358 return 0; 359 } 360 361 static void ipp_print_property(struct drm_exynos_ipp_property *property, 362 int idx) 363 { 364 struct drm_exynos_ipp_config *config = &property->config[idx]; 365 struct drm_exynos_pos *pos = &config->pos; 366 struct drm_exynos_sz *sz = &config->sz; 367 368 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", 369 property->prop_id, idx ? "dst" : "src", config->fmt); 370 371 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 372 pos->x, pos->y, pos->w, pos->h, 373 sz->hsize, sz->vsize, config->flip, config->degree); 374 } 375 376 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) 377 { 378 struct exynos_drm_ippdrv *ippdrv; 379 struct drm_exynos_ipp_cmd_node *c_node; 380 u32 prop_id = property->prop_id; 381 382 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 383 384 ippdrv = ipp_find_drv_by_handle(prop_id); 385 if (IS_ERR(ippdrv)) { 386 DRM_ERROR("failed to get ipp driver.\n"); 387 return -EINVAL; 388 } 389 390 /* 391 * Find command node using command list in ippdrv. 392 * when we find this command no using prop_id. 393 * return property information set in this command node. 394 */ 395 mutex_lock(&ippdrv->cmd_lock); 396 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 397 if ((c_node->property.prop_id == prop_id) && 398 (c_node->state == IPP_STATE_STOP)) { 399 mutex_unlock(&ippdrv->cmd_lock); 400 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n", 401 property->cmd, (int)ippdrv); 402 403 c_node->property = *property; 404 return 0; 405 } 406 } 407 mutex_unlock(&ippdrv->cmd_lock); 408 409 DRM_ERROR("failed to search property.\n"); 410 411 return -EINVAL; 412 } 413 414 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 415 { 416 struct drm_exynos_ipp_cmd_work *cmd_work; 417 418 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 419 if (!cmd_work) 420 return ERR_PTR(-ENOMEM); 421 422 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 423 424 return cmd_work; 425 } 426 427 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) 428 { 429 struct drm_exynos_ipp_event_work *event_work; 430 431 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 432 if (!event_work) 433 return ERR_PTR(-ENOMEM); 434 435 INIT_WORK((struct work_struct *)event_work, ipp_sched_event); 436 437 return event_work; 438 } 439 440 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, 441 struct drm_file *file) 442 { 443 struct drm_exynos_file_private *file_priv = file->driver_priv; 444 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 445 struct device *dev = priv->dev; 446 struct ipp_context *ctx = get_ipp_context(dev); 447 struct drm_exynos_ipp_property *property = data; 448 struct exynos_drm_ippdrv *ippdrv; 449 struct drm_exynos_ipp_cmd_node *c_node; 450 int ret, i; 451 452 if (!ctx) { 453 DRM_ERROR("invalid context.\n"); 454 return -EINVAL; 455 } 456 457 if (!property) { 458 DRM_ERROR("invalid property parameter.\n"); 459 return -EINVAL; 460 } 461 462 /* 463 * This is log print for user application property. 464 * user application set various property. 465 */ 466 for_each_ipp_ops(i) 467 ipp_print_property(property, i); 468 469 /* 470 * set property ioctl generated new prop_id. 471 * but in this case already asigned prop_id using old set property. 472 * e.g PAUSE state. this case supports find current prop_id and use it 473 * instead of allocation. 474 */ 475 if (property->prop_id) { 476 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 477 return ipp_find_and_set_property(property); 478 } 479 480 /* find ipp driver using ipp id */ 481 ippdrv = ipp_find_driver(ctx, property); 482 if (IS_ERR(ippdrv)) { 483 DRM_ERROR("failed to get ipp driver.\n"); 484 return -EINVAL; 485 } 486 487 /* allocate command node */ 488 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 489 if (!c_node) 490 return -ENOMEM; 491 492 /* create property id */ 493 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, 494 &property->prop_id); 495 if (ret) { 496 DRM_ERROR("failed to create id.\n"); 497 goto err_clear; 498 } 499 500 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 501 property->prop_id, property->cmd, (int)ippdrv); 502 503 /* stored property information and ippdrv in private data */ 504 c_node->priv = priv; 505 c_node->property = *property; 506 c_node->state = IPP_STATE_IDLE; 507 508 c_node->start_work = ipp_create_cmd_work(); 509 if (IS_ERR(c_node->start_work)) { 510 DRM_ERROR("failed to create start work.\n"); 511 goto err_remove_id; 512 } 513 514 c_node->stop_work = ipp_create_cmd_work(); 515 if (IS_ERR(c_node->stop_work)) { 516 DRM_ERROR("failed to create stop work.\n"); 517 goto err_free_start; 518 } 519 520 c_node->event_work = ipp_create_event_work(); 521 if (IS_ERR(c_node->event_work)) { 522 DRM_ERROR("failed to create event work.\n"); 523 goto err_free_stop; 524 } 525 526 mutex_init(&c_node->lock); 527 mutex_init(&c_node->mem_lock); 528 mutex_init(&c_node->event_lock); 529 530 init_completion(&c_node->start_complete); 531 init_completion(&c_node->stop_complete); 532 533 for_each_ipp_ops(i) 534 INIT_LIST_HEAD(&c_node->mem_list[i]); 535 536 INIT_LIST_HEAD(&c_node->event_list); 537 list_splice_init(&priv->event_list, &c_node->event_list); 538 mutex_lock(&ippdrv->cmd_lock); 539 list_add_tail(&c_node->list, &ippdrv->cmd_list); 540 mutex_unlock(&ippdrv->cmd_lock); 541 542 /* make dedicated state without m2m */ 543 if (!ipp_is_m2m_cmd(property->cmd)) 544 ippdrv->dedicated = true; 545 546 return 0; 547 548 err_free_stop: 549 kfree(c_node->stop_work); 550 err_free_start: 551 kfree(c_node->start_work); 552 err_remove_id: 553 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); 554 err_clear: 555 kfree(c_node); 556 return ret; 557 } 558 559 static void ipp_clean_cmd_node(struct ipp_context *ctx, 560 struct drm_exynos_ipp_cmd_node *c_node) 561 { 562 /* delete list */ 563 list_del(&c_node->list); 564 565 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, 566 c_node->property.prop_id); 567 568 /* destroy mutex */ 569 mutex_destroy(&c_node->lock); 570 mutex_destroy(&c_node->mem_lock); 571 mutex_destroy(&c_node->event_lock); 572 573 /* free command node */ 574 kfree(c_node->start_work); 575 kfree(c_node->stop_work); 576 kfree(c_node->event_work); 577 kfree(c_node); 578 } 579 580 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 581 { 582 struct drm_exynos_ipp_property *property = &c_node->property; 583 struct drm_exynos_ipp_mem_node *m_node; 584 struct list_head *head; 585 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; 586 587 for_each_ipp_ops(i) { 588 /* source/destination memory list */ 589 head = &c_node->mem_list[i]; 590 591 /* find memory node entry */ 592 list_for_each_entry(m_node, head, list) { 593 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n", 594 i ? "dst" : "src", count[i], (int)m_node); 595 count[i]++; 596 } 597 } 598 599 DRM_DEBUG_KMS("min[%d]max[%d]\n", 600 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), 601 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); 602 603 /* 604 * M2M operations should be need paired memory address. 605 * so, need to check minimum count about src, dst. 606 * other case not use paired memory, so use maximum count 607 */ 608 if (ipp_is_m2m_cmd(property->cmd)) 609 ret = min(count[EXYNOS_DRM_OPS_SRC], 610 count[EXYNOS_DRM_OPS_DST]); 611 else 612 ret = max(count[EXYNOS_DRM_OPS_SRC], 613 count[EXYNOS_DRM_OPS_DST]); 614 615 return ret; 616 } 617 618 static struct drm_exynos_ipp_mem_node 619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, 620 struct drm_exynos_ipp_queue_buf *qbuf) 621 { 622 struct drm_exynos_ipp_mem_node *m_node; 623 struct list_head *head; 624 int count = 0; 625 626 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); 627 628 /* source/destination memory list */ 629 head = &c_node->mem_list[qbuf->ops_id]; 630 631 /* find memory node from memory list */ 632 list_for_each_entry(m_node, head, list) { 633 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 634 635 /* compare buffer id */ 636 if (m_node->buf_id == qbuf->buf_id) 637 return m_node; 638 } 639 640 return NULL; 641 } 642 643 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, 644 struct drm_exynos_ipp_cmd_node *c_node, 645 struct drm_exynos_ipp_mem_node *m_node) 646 { 647 struct exynos_drm_ipp_ops *ops = NULL; 648 int ret = 0; 649 650 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 651 652 if (!m_node) { 653 DRM_ERROR("invalid queue node.\n"); 654 return -EFAULT; 655 } 656 657 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 658 659 /* get operations callback */ 660 ops = ippdrv->ops[m_node->ops_id]; 661 if (!ops) { 662 DRM_ERROR("not support ops.\n"); 663 return -EFAULT; 664 } 665 666 /* set address and enable irq */ 667 if (ops->set_addr) { 668 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, 669 m_node->buf_id, IPP_BUF_ENQUEUE); 670 if (ret) { 671 DRM_ERROR("failed to set addr.\n"); 672 return ret; 673 } 674 } 675 676 return ret; 677 } 678 679 static struct drm_exynos_ipp_mem_node 680 *ipp_get_mem_node(struct drm_device *drm_dev, 681 struct drm_file *file, 682 struct drm_exynos_ipp_cmd_node *c_node, 683 struct drm_exynos_ipp_queue_buf *qbuf) 684 { 685 struct drm_exynos_ipp_mem_node *m_node; 686 struct drm_exynos_ipp_buf_info buf_info; 687 void *addr; 688 int i; 689 690 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 691 if (!m_node) 692 return ERR_PTR(-ENOMEM); 693 694 /* clear base address for error handling */ 695 memset(&buf_info, 0x0, sizeof(buf_info)); 696 697 /* operations, buffer id */ 698 m_node->ops_id = qbuf->ops_id; 699 m_node->prop_id = qbuf->prop_id; 700 m_node->buf_id = qbuf->buf_id; 701 702 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 703 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 704 705 for_each_ipp_planar(i) { 706 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); 707 708 /* get dma address by handle */ 709 if (qbuf->handle[i]) { 710 addr = exynos_drm_gem_get_dma_addr(drm_dev, 711 qbuf->handle[i], file); 712 if (IS_ERR(addr)) { 713 DRM_ERROR("failed to get addr.\n"); 714 goto err_clear; 715 } 716 717 buf_info.handles[i] = qbuf->handle[i]; 718 buf_info.base[i] = *(dma_addr_t *) addr; 719 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n", 720 i, buf_info.base[i], (int)buf_info.handles[i]); 721 } 722 } 723 724 m_node->filp = file; 725 m_node->buf_info = buf_info; 726 mutex_lock(&c_node->mem_lock); 727 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 728 mutex_unlock(&c_node->mem_lock); 729 730 return m_node; 731 732 err_clear: 733 kfree(m_node); 734 return ERR_PTR(-EFAULT); 735 } 736 737 static int ipp_put_mem_node(struct drm_device *drm_dev, 738 struct drm_exynos_ipp_cmd_node *c_node, 739 struct drm_exynos_ipp_mem_node *m_node) 740 { 741 int i; 742 743 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 744 745 if (!m_node) { 746 DRM_ERROR("invalid dequeue node.\n"); 747 return -EFAULT; 748 } 749 750 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 751 752 /* put gem buffer */ 753 for_each_ipp_planar(i) { 754 unsigned long handle = m_node->buf_info.handles[i]; 755 if (handle) 756 exynos_drm_gem_put_dma_addr(drm_dev, handle, 757 m_node->filp); 758 } 759 760 /* delete list in queue */ 761 list_del(&m_node->list); 762 kfree(m_node); 763 764 return 0; 765 } 766 767 static void ipp_free_event(struct drm_pending_event *event) 768 { 769 kfree(event); 770 } 771 772 static int ipp_get_event(struct drm_device *drm_dev, 773 struct drm_file *file, 774 struct drm_exynos_ipp_cmd_node *c_node, 775 struct drm_exynos_ipp_queue_buf *qbuf) 776 { 777 struct drm_exynos_ipp_send_event *e; 778 unsigned long flags; 779 780 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 781 782 e = kzalloc(sizeof(*e), GFP_KERNEL); 783 if (!e) { 784 spin_lock_irqsave(&drm_dev->event_lock, flags); 785 file->event_space += sizeof(e->event); 786 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 787 return -ENOMEM; 788 } 789 790 /* make event */ 791 e->event.base.type = DRM_EXYNOS_IPP_EVENT; 792 e->event.base.length = sizeof(e->event); 793 e->event.user_data = qbuf->user_data; 794 e->event.prop_id = qbuf->prop_id; 795 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 796 e->base.event = &e->event.base; 797 e->base.file_priv = file; 798 e->base.destroy = ipp_free_event; 799 mutex_lock(&c_node->event_lock); 800 list_add_tail(&e->base.link, &c_node->event_list); 801 mutex_unlock(&c_node->event_lock); 802 803 return 0; 804 } 805 806 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, 807 struct drm_exynos_ipp_queue_buf *qbuf) 808 { 809 struct drm_exynos_ipp_send_event *e, *te; 810 int count = 0; 811 812 mutex_lock(&c_node->event_lock); 813 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 814 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 815 816 /* 817 * qbuf == NULL condition means all event deletion. 818 * stop operations want to delete all event list. 819 * another case delete only same buf id. 820 */ 821 if (!qbuf) { 822 /* delete list */ 823 list_del(&e->base.link); 824 kfree(e); 825 } 826 827 /* compare buffer id */ 828 if (qbuf && (qbuf->buf_id == 829 e->event.buf_id[EXYNOS_DRM_OPS_DST])) { 830 /* delete list */ 831 list_del(&e->base.link); 832 kfree(e); 833 goto out_unlock; 834 } 835 } 836 837 out_unlock: 838 mutex_unlock(&c_node->event_lock); 839 return; 840 } 841 842 static void ipp_handle_cmd_work(struct device *dev, 843 struct exynos_drm_ippdrv *ippdrv, 844 struct drm_exynos_ipp_cmd_work *cmd_work, 845 struct drm_exynos_ipp_cmd_node *c_node) 846 { 847 struct ipp_context *ctx = get_ipp_context(dev); 848 849 cmd_work->ippdrv = ippdrv; 850 cmd_work->c_node = c_node; 851 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 852 } 853 854 static int ipp_queue_buf_with_run(struct device *dev, 855 struct drm_exynos_ipp_cmd_node *c_node, 856 struct drm_exynos_ipp_mem_node *m_node, 857 struct drm_exynos_ipp_queue_buf *qbuf) 858 { 859 struct exynos_drm_ippdrv *ippdrv; 860 struct drm_exynos_ipp_property *property; 861 struct exynos_drm_ipp_ops *ops; 862 int ret; 863 864 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 865 if (IS_ERR(ippdrv)) { 866 DRM_ERROR("failed to get ipp driver.\n"); 867 return -EFAULT; 868 } 869 870 ops = ippdrv->ops[qbuf->ops_id]; 871 if (!ops) { 872 DRM_ERROR("failed to get ops.\n"); 873 return -EFAULT; 874 } 875 876 property = &c_node->property; 877 878 if (c_node->state != IPP_STATE_START) { 879 DRM_DEBUG_KMS("bypass for invalid state.\n"); 880 return 0; 881 } 882 883 mutex_lock(&c_node->mem_lock); 884 if (!ipp_check_mem_list(c_node)) { 885 mutex_unlock(&c_node->mem_lock); 886 DRM_DEBUG_KMS("empty memory.\n"); 887 return 0; 888 } 889 890 /* 891 * If set destination buffer and enabled clock, 892 * then m2m operations need start operations at queue_buf 893 */ 894 if (ipp_is_m2m_cmd(property->cmd)) { 895 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; 896 897 cmd_work->ctrl = IPP_CTRL_PLAY; 898 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 899 } else { 900 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 901 if (ret) { 902 mutex_unlock(&c_node->mem_lock); 903 DRM_ERROR("failed to set m node.\n"); 904 return ret; 905 } 906 } 907 mutex_unlock(&c_node->mem_lock); 908 909 return 0; 910 } 911 912 static void ipp_clean_queue_buf(struct drm_device *drm_dev, 913 struct drm_exynos_ipp_cmd_node *c_node, 914 struct drm_exynos_ipp_queue_buf *qbuf) 915 { 916 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 917 918 /* delete list */ 919 mutex_lock(&c_node->mem_lock); 920 list_for_each_entry_safe(m_node, tm_node, 921 &c_node->mem_list[qbuf->ops_id], list) { 922 if (m_node->buf_id == qbuf->buf_id && 923 m_node->ops_id == qbuf->ops_id) 924 ipp_put_mem_node(drm_dev, c_node, m_node); 925 } 926 mutex_unlock(&c_node->mem_lock); 927 } 928 929 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 930 struct drm_file *file) 931 { 932 struct drm_exynos_file_private *file_priv = file->driver_priv; 933 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 934 struct device *dev = priv->dev; 935 struct ipp_context *ctx = get_ipp_context(dev); 936 struct drm_exynos_ipp_queue_buf *qbuf = data; 937 struct drm_exynos_ipp_cmd_node *c_node; 938 struct drm_exynos_ipp_mem_node *m_node; 939 int ret; 940 941 if (!qbuf) { 942 DRM_ERROR("invalid buf parameter.\n"); 943 return -EINVAL; 944 } 945 946 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { 947 DRM_ERROR("invalid ops parameter.\n"); 948 return -EINVAL; 949 } 950 951 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 952 qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 953 qbuf->buf_id, qbuf->buf_type); 954 955 /* find command node */ 956 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 957 qbuf->prop_id); 958 if (IS_ERR(c_node)) { 959 DRM_ERROR("failed to get command node.\n"); 960 return PTR_ERR(c_node); 961 } 962 963 /* buffer control */ 964 switch (qbuf->buf_type) { 965 case IPP_BUF_ENQUEUE: 966 /* get memory node */ 967 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 968 if (IS_ERR(m_node)) { 969 DRM_ERROR("failed to get m_node.\n"); 970 return PTR_ERR(m_node); 971 } 972 973 /* 974 * first step get event for destination buffer. 975 * and second step when M2M case run with destination buffer 976 * if needed. 977 */ 978 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 979 /* get event for destination buffer */ 980 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 981 if (ret) { 982 DRM_ERROR("failed to get event.\n"); 983 goto err_clean_node; 984 } 985 986 /* 987 * M2M case run play control for streaming feature. 988 * other case set address and waiting. 989 */ 990 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); 991 if (ret) { 992 DRM_ERROR("failed to run command.\n"); 993 goto err_clean_node; 994 } 995 } 996 break; 997 case IPP_BUF_DEQUEUE: 998 mutex_lock(&c_node->lock); 999 1000 /* put event for destination buffer */ 1001 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 1002 ipp_put_event(c_node, qbuf); 1003 1004 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 1005 1006 mutex_unlock(&c_node->lock); 1007 break; 1008 default: 1009 DRM_ERROR("invalid buffer control.\n"); 1010 return -EINVAL; 1011 } 1012 1013 return 0; 1014 1015 err_clean_node: 1016 DRM_ERROR("clean memory nodes.\n"); 1017 1018 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 1019 return ret; 1020 } 1021 1022 static bool exynos_drm_ipp_check_valid(struct device *dev, 1023 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 1024 { 1025 if (ctrl != IPP_CTRL_PLAY) { 1026 if (pm_runtime_suspended(dev)) { 1027 DRM_ERROR("pm:runtime_suspended.\n"); 1028 goto err_status; 1029 } 1030 } 1031 1032 switch (ctrl) { 1033 case IPP_CTRL_PLAY: 1034 if (state != IPP_STATE_IDLE) 1035 goto err_status; 1036 break; 1037 case IPP_CTRL_STOP: 1038 if (state == IPP_STATE_STOP) 1039 goto err_status; 1040 break; 1041 case IPP_CTRL_PAUSE: 1042 if (state != IPP_STATE_START) 1043 goto err_status; 1044 break; 1045 case IPP_CTRL_RESUME: 1046 if (state != IPP_STATE_STOP) 1047 goto err_status; 1048 break; 1049 default: 1050 DRM_ERROR("invalid state.\n"); 1051 goto err_status; 1052 } 1053 1054 return true; 1055 1056 err_status: 1057 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); 1058 return false; 1059 } 1060 1061 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, 1062 struct drm_file *file) 1063 { 1064 struct drm_exynos_file_private *file_priv = file->driver_priv; 1065 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 1066 struct exynos_drm_ippdrv *ippdrv = NULL; 1067 struct device *dev = priv->dev; 1068 struct ipp_context *ctx = get_ipp_context(dev); 1069 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 1070 struct drm_exynos_ipp_cmd_work *cmd_work; 1071 struct drm_exynos_ipp_cmd_node *c_node; 1072 1073 if (!ctx) { 1074 DRM_ERROR("invalid context.\n"); 1075 return -EINVAL; 1076 } 1077 1078 if (!cmd_ctrl) { 1079 DRM_ERROR("invalid control parameter.\n"); 1080 return -EINVAL; 1081 } 1082 1083 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", 1084 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1085 1086 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1087 if (IS_ERR(ippdrv)) { 1088 DRM_ERROR("failed to get ipp driver.\n"); 1089 return PTR_ERR(ippdrv); 1090 } 1091 1092 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1093 cmd_ctrl->prop_id); 1094 if (IS_ERR(c_node)) { 1095 DRM_ERROR("invalid command node list.\n"); 1096 return PTR_ERR(c_node); 1097 } 1098 1099 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1100 c_node->state)) { 1101 DRM_ERROR("invalid state.\n"); 1102 return -EINVAL; 1103 } 1104 1105 switch (cmd_ctrl->ctrl) { 1106 case IPP_CTRL_PLAY: 1107 if (pm_runtime_suspended(ippdrv->dev)) 1108 pm_runtime_get_sync(ippdrv->dev); 1109 1110 c_node->state = IPP_STATE_START; 1111 1112 cmd_work = c_node->start_work; 1113 cmd_work->ctrl = cmd_ctrl->ctrl; 1114 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1115 break; 1116 case IPP_CTRL_STOP: 1117 cmd_work = c_node->stop_work; 1118 cmd_work->ctrl = cmd_ctrl->ctrl; 1119 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1120 1121 if (!wait_for_completion_timeout(&c_node->stop_complete, 1122 msecs_to_jiffies(300))) { 1123 DRM_ERROR("timeout stop:prop_id[%d]\n", 1124 c_node->property.prop_id); 1125 } 1126 1127 c_node->state = IPP_STATE_STOP; 1128 ippdrv->dedicated = false; 1129 mutex_lock(&ippdrv->cmd_lock); 1130 ipp_clean_cmd_node(ctx, c_node); 1131 1132 if (list_empty(&ippdrv->cmd_list)) 1133 pm_runtime_put_sync(ippdrv->dev); 1134 mutex_unlock(&ippdrv->cmd_lock); 1135 break; 1136 case IPP_CTRL_PAUSE: 1137 cmd_work = c_node->stop_work; 1138 cmd_work->ctrl = cmd_ctrl->ctrl; 1139 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1140 1141 if (!wait_for_completion_timeout(&c_node->stop_complete, 1142 msecs_to_jiffies(200))) { 1143 DRM_ERROR("timeout stop:prop_id[%d]\n", 1144 c_node->property.prop_id); 1145 } 1146 1147 c_node->state = IPP_STATE_STOP; 1148 break; 1149 case IPP_CTRL_RESUME: 1150 c_node->state = IPP_STATE_START; 1151 cmd_work = c_node->start_work; 1152 cmd_work->ctrl = cmd_ctrl->ctrl; 1153 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1154 break; 1155 default: 1156 DRM_ERROR("could not support this state currently.\n"); 1157 return -EINVAL; 1158 } 1159 1160 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", 1161 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1162 1163 return 0; 1164 } 1165 1166 int exynos_drm_ippnb_register(struct notifier_block *nb) 1167 { 1168 return blocking_notifier_chain_register( 1169 &exynos_drm_ippnb_list, nb); 1170 } 1171 1172 int exynos_drm_ippnb_unregister(struct notifier_block *nb) 1173 { 1174 return blocking_notifier_chain_unregister( 1175 &exynos_drm_ippnb_list, nb); 1176 } 1177 1178 int exynos_drm_ippnb_send_event(unsigned long val, void *v) 1179 { 1180 return blocking_notifier_call_chain( 1181 &exynos_drm_ippnb_list, val, v); 1182 } 1183 1184 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, 1185 struct drm_exynos_ipp_property *property) 1186 { 1187 struct exynos_drm_ipp_ops *ops = NULL; 1188 bool swap = false; 1189 int ret, i; 1190 1191 if (!property) { 1192 DRM_ERROR("invalid property parameter.\n"); 1193 return -EINVAL; 1194 } 1195 1196 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1197 1198 /* reset h/w block */ 1199 if (ippdrv->reset && 1200 ippdrv->reset(ippdrv->dev)) { 1201 DRM_ERROR("failed to reset.\n"); 1202 return -EINVAL; 1203 } 1204 1205 /* set source,destination operations */ 1206 for_each_ipp_ops(i) { 1207 struct drm_exynos_ipp_config *config = 1208 &property->config[i]; 1209 1210 ops = ippdrv->ops[i]; 1211 if (!ops || !config) { 1212 DRM_ERROR("not support ops and config.\n"); 1213 return -EINVAL; 1214 } 1215 1216 /* set format */ 1217 if (ops->set_fmt) { 1218 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1219 if (ret) { 1220 DRM_ERROR("not support format.\n"); 1221 return ret; 1222 } 1223 } 1224 1225 /* set transform for rotation, flip */ 1226 if (ops->set_transf) { 1227 ret = ops->set_transf(ippdrv->dev, config->degree, 1228 config->flip, &swap); 1229 if (ret) { 1230 DRM_ERROR("not support tranf.\n"); 1231 return -EINVAL; 1232 } 1233 } 1234 1235 /* set size */ 1236 if (ops->set_size) { 1237 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1238 &config->sz); 1239 if (ret) { 1240 DRM_ERROR("not support size.\n"); 1241 return ret; 1242 } 1243 } 1244 } 1245 1246 return 0; 1247 } 1248 1249 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, 1250 struct drm_exynos_ipp_cmd_node *c_node) 1251 { 1252 struct drm_exynos_ipp_mem_node *m_node; 1253 struct drm_exynos_ipp_property *property = &c_node->property; 1254 struct list_head *head; 1255 int ret, i; 1256 1257 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1258 1259 /* store command info in ippdrv */ 1260 ippdrv->c_node = c_node; 1261 1262 mutex_lock(&c_node->mem_lock); 1263 if (!ipp_check_mem_list(c_node)) { 1264 DRM_DEBUG_KMS("empty memory.\n"); 1265 ret = -ENOMEM; 1266 goto err_unlock; 1267 } 1268 1269 /* set current property in ippdrv */ 1270 ret = ipp_set_property(ippdrv, property); 1271 if (ret) { 1272 DRM_ERROR("failed to set property.\n"); 1273 ippdrv->c_node = NULL; 1274 goto err_unlock; 1275 } 1276 1277 /* check command */ 1278 switch (property->cmd) { 1279 case IPP_CMD_M2M: 1280 for_each_ipp_ops(i) { 1281 /* source/destination memory list */ 1282 head = &c_node->mem_list[i]; 1283 1284 m_node = list_first_entry(head, 1285 struct drm_exynos_ipp_mem_node, list); 1286 if (!m_node) { 1287 DRM_ERROR("failed to get node.\n"); 1288 ret = -EFAULT; 1289 goto err_unlock; 1290 } 1291 1292 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1293 1294 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1295 if (ret) { 1296 DRM_ERROR("failed to set m node.\n"); 1297 goto err_unlock; 1298 } 1299 } 1300 break; 1301 case IPP_CMD_WB: 1302 /* destination memory list */ 1303 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1304 1305 list_for_each_entry(m_node, head, list) { 1306 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1307 if (ret) { 1308 DRM_ERROR("failed to set m node.\n"); 1309 goto err_unlock; 1310 } 1311 } 1312 break; 1313 case IPP_CMD_OUTPUT: 1314 /* source memory list */ 1315 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1316 1317 list_for_each_entry(m_node, head, list) { 1318 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1319 if (ret) { 1320 DRM_ERROR("failed to set m node.\n"); 1321 goto err_unlock; 1322 } 1323 } 1324 break; 1325 default: 1326 DRM_ERROR("invalid operations.\n"); 1327 ret = -EINVAL; 1328 goto err_unlock; 1329 } 1330 mutex_unlock(&c_node->mem_lock); 1331 1332 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); 1333 1334 /* start operations */ 1335 if (ippdrv->start) { 1336 ret = ippdrv->start(ippdrv->dev, property->cmd); 1337 if (ret) { 1338 DRM_ERROR("failed to start ops.\n"); 1339 ippdrv->c_node = NULL; 1340 return ret; 1341 } 1342 } 1343 1344 return 0; 1345 1346 err_unlock: 1347 mutex_unlock(&c_node->mem_lock); 1348 ippdrv->c_node = NULL; 1349 return ret; 1350 } 1351 1352 static int ipp_stop_property(struct drm_device *drm_dev, 1353 struct exynos_drm_ippdrv *ippdrv, 1354 struct drm_exynos_ipp_cmd_node *c_node) 1355 { 1356 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 1357 struct drm_exynos_ipp_property *property = &c_node->property; 1358 struct list_head *head; 1359 int ret = 0, i; 1360 1361 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1362 1363 /* put event */ 1364 ipp_put_event(c_node, NULL); 1365 1366 mutex_lock(&c_node->mem_lock); 1367 1368 /* check command */ 1369 switch (property->cmd) { 1370 case IPP_CMD_M2M: 1371 for_each_ipp_ops(i) { 1372 /* source/destination memory list */ 1373 head = &c_node->mem_list[i]; 1374 1375 list_for_each_entry_safe(m_node, tm_node, 1376 head, list) { 1377 ret = ipp_put_mem_node(drm_dev, c_node, 1378 m_node); 1379 if (ret) { 1380 DRM_ERROR("failed to put m_node.\n"); 1381 goto err_clear; 1382 } 1383 } 1384 } 1385 break; 1386 case IPP_CMD_WB: 1387 /* destination memory list */ 1388 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1389 1390 list_for_each_entry_safe(m_node, tm_node, head, list) { 1391 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1392 if (ret) { 1393 DRM_ERROR("failed to put m_node.\n"); 1394 goto err_clear; 1395 } 1396 } 1397 break; 1398 case IPP_CMD_OUTPUT: 1399 /* source memory list */ 1400 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1401 1402 list_for_each_entry_safe(m_node, tm_node, head, list) { 1403 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1404 if (ret) { 1405 DRM_ERROR("failed to put m_node.\n"); 1406 goto err_clear; 1407 } 1408 } 1409 break; 1410 default: 1411 DRM_ERROR("invalid operations.\n"); 1412 ret = -EINVAL; 1413 goto err_clear; 1414 } 1415 1416 err_clear: 1417 mutex_unlock(&c_node->mem_lock); 1418 1419 /* stop operations */ 1420 if (ippdrv->stop) 1421 ippdrv->stop(ippdrv->dev, property->cmd); 1422 1423 return ret; 1424 } 1425 1426 void ipp_sched_cmd(struct work_struct *work) 1427 { 1428 struct drm_exynos_ipp_cmd_work *cmd_work = 1429 (struct drm_exynos_ipp_cmd_work *)work; 1430 struct exynos_drm_ippdrv *ippdrv; 1431 struct drm_exynos_ipp_cmd_node *c_node; 1432 struct drm_exynos_ipp_property *property; 1433 int ret; 1434 1435 ippdrv = cmd_work->ippdrv; 1436 if (!ippdrv) { 1437 DRM_ERROR("invalid ippdrv list.\n"); 1438 return; 1439 } 1440 1441 c_node = cmd_work->c_node; 1442 if (!c_node) { 1443 DRM_ERROR("invalid command node list.\n"); 1444 return; 1445 } 1446 1447 mutex_lock(&c_node->lock); 1448 1449 property = &c_node->property; 1450 1451 switch (cmd_work->ctrl) { 1452 case IPP_CTRL_PLAY: 1453 case IPP_CTRL_RESUME: 1454 ret = ipp_start_property(ippdrv, c_node); 1455 if (ret) { 1456 DRM_ERROR("failed to start property:prop_id[%d]\n", 1457 c_node->property.prop_id); 1458 goto err_unlock; 1459 } 1460 1461 /* 1462 * M2M case supports wait_completion of transfer. 1463 * because M2M case supports single unit operation 1464 * with multiple queue. 1465 * M2M need to wait completion of data transfer. 1466 */ 1467 if (ipp_is_m2m_cmd(property->cmd)) { 1468 if (!wait_for_completion_timeout 1469 (&c_node->start_complete, msecs_to_jiffies(200))) { 1470 DRM_ERROR("timeout event:prop_id[%d]\n", 1471 c_node->property.prop_id); 1472 goto err_unlock; 1473 } 1474 } 1475 break; 1476 case IPP_CTRL_STOP: 1477 case IPP_CTRL_PAUSE: 1478 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, 1479 c_node); 1480 if (ret) { 1481 DRM_ERROR("failed to stop property.\n"); 1482 goto err_unlock; 1483 } 1484 1485 complete(&c_node->stop_complete); 1486 break; 1487 default: 1488 DRM_ERROR("unknown control type\n"); 1489 break; 1490 } 1491 1492 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); 1493 1494 err_unlock: 1495 mutex_unlock(&c_node->lock); 1496 } 1497 1498 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1499 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) 1500 { 1501 struct drm_device *drm_dev = ippdrv->drm_dev; 1502 struct drm_exynos_ipp_property *property = &c_node->property; 1503 struct drm_exynos_ipp_mem_node *m_node; 1504 struct drm_exynos_ipp_queue_buf qbuf; 1505 struct drm_exynos_ipp_send_event *e; 1506 struct list_head *head; 1507 struct timeval now; 1508 unsigned long flags; 1509 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; 1510 int ret, i; 1511 1512 for_each_ipp_ops(i) 1513 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); 1514 1515 if (!drm_dev) { 1516 DRM_ERROR("failed to get drm_dev.\n"); 1517 return -EINVAL; 1518 } 1519 1520 if (!property) { 1521 DRM_ERROR("failed to get property.\n"); 1522 return -EINVAL; 1523 } 1524 1525 mutex_lock(&c_node->event_lock); 1526 if (list_empty(&c_node->event_list)) { 1527 DRM_DEBUG_KMS("event list is empty.\n"); 1528 ret = 0; 1529 goto err_event_unlock; 1530 } 1531 1532 mutex_lock(&c_node->mem_lock); 1533 if (!ipp_check_mem_list(c_node)) { 1534 DRM_DEBUG_KMS("empty memory.\n"); 1535 ret = 0; 1536 goto err_mem_unlock; 1537 } 1538 1539 /* check command */ 1540 switch (property->cmd) { 1541 case IPP_CMD_M2M: 1542 for_each_ipp_ops(i) { 1543 /* source/destination memory list */ 1544 head = &c_node->mem_list[i]; 1545 1546 m_node = list_first_entry(head, 1547 struct drm_exynos_ipp_mem_node, list); 1548 if (!m_node) { 1549 DRM_ERROR("empty memory node.\n"); 1550 ret = -ENOMEM; 1551 goto err_mem_unlock; 1552 } 1553 1554 tbuf_id[i] = m_node->buf_id; 1555 DRM_DEBUG_KMS("%s buf_id[%d]\n", 1556 i ? "dst" : "src", tbuf_id[i]); 1557 1558 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1559 if (ret) 1560 DRM_ERROR("failed to put m_node.\n"); 1561 } 1562 break; 1563 case IPP_CMD_WB: 1564 /* clear buf for finding */ 1565 memset(&qbuf, 0x0, sizeof(qbuf)); 1566 qbuf.ops_id = EXYNOS_DRM_OPS_DST; 1567 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; 1568 1569 /* get memory node entry */ 1570 m_node = ipp_find_mem_node(c_node, &qbuf); 1571 if (!m_node) { 1572 DRM_ERROR("empty memory node.\n"); 1573 ret = -ENOMEM; 1574 goto err_mem_unlock; 1575 } 1576 1577 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1578 1579 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1580 if (ret) 1581 DRM_ERROR("failed to put m_node.\n"); 1582 break; 1583 case IPP_CMD_OUTPUT: 1584 /* source memory list */ 1585 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1586 1587 m_node = list_first_entry(head, 1588 struct drm_exynos_ipp_mem_node, list); 1589 if (!m_node) { 1590 DRM_ERROR("empty memory node.\n"); 1591 ret = -ENOMEM; 1592 goto err_mem_unlock; 1593 } 1594 1595 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1596 1597 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1598 if (ret) 1599 DRM_ERROR("failed to put m_node.\n"); 1600 break; 1601 default: 1602 DRM_ERROR("invalid operations.\n"); 1603 ret = -EINVAL; 1604 goto err_mem_unlock; 1605 } 1606 mutex_unlock(&c_node->mem_lock); 1607 1608 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1609 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1610 tbuf_id[1], buf_id[1], property->prop_id); 1611 1612 /* 1613 * command node have event list of destination buffer 1614 * If destination buffer enqueue to mem list, 1615 * then we make event and link to event list tail. 1616 * so, we get first event for first enqueued buffer. 1617 */ 1618 e = list_first_entry(&c_node->event_list, 1619 struct drm_exynos_ipp_send_event, base.link); 1620 1621 do_gettimeofday(&now); 1622 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); 1623 e->event.tv_sec = now.tv_sec; 1624 e->event.tv_usec = now.tv_usec; 1625 e->event.prop_id = property->prop_id; 1626 1627 /* set buffer id about source destination */ 1628 for_each_ipp_ops(i) 1629 e->event.buf_id[i] = tbuf_id[i]; 1630 1631 spin_lock_irqsave(&drm_dev->event_lock, flags); 1632 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1633 wake_up_interruptible(&e->base.file_priv->event_wait); 1634 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1635 mutex_unlock(&c_node->event_lock); 1636 1637 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", 1638 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1639 1640 return 0; 1641 1642 err_mem_unlock: 1643 mutex_unlock(&c_node->mem_lock); 1644 err_event_unlock: 1645 mutex_unlock(&c_node->event_lock); 1646 return ret; 1647 } 1648 1649 void ipp_sched_event(struct work_struct *work) 1650 { 1651 struct drm_exynos_ipp_event_work *event_work = 1652 (struct drm_exynos_ipp_event_work *)work; 1653 struct exynos_drm_ippdrv *ippdrv; 1654 struct drm_exynos_ipp_cmd_node *c_node; 1655 int ret; 1656 1657 if (!event_work) { 1658 DRM_ERROR("failed to get event_work.\n"); 1659 return; 1660 } 1661 1662 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); 1663 1664 ippdrv = event_work->ippdrv; 1665 if (!ippdrv) { 1666 DRM_ERROR("failed to get ipp driver.\n"); 1667 return; 1668 } 1669 1670 c_node = ippdrv->c_node; 1671 if (!c_node) { 1672 DRM_ERROR("failed to get command node.\n"); 1673 return; 1674 } 1675 1676 /* 1677 * IPP supports command thread, event thread synchronization. 1678 * If IPP close immediately from user land, then IPP make 1679 * synchronization with command thread, so make complete event. 1680 * or going out operations. 1681 */ 1682 if (c_node->state != IPP_STATE_START) { 1683 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", 1684 c_node->state, c_node->property.prop_id); 1685 goto err_completion; 1686 } 1687 1688 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1689 if (ret) { 1690 DRM_ERROR("failed to send event.\n"); 1691 goto err_completion; 1692 } 1693 1694 err_completion: 1695 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1696 complete(&c_node->start_complete); 1697 } 1698 1699 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1700 { 1701 struct ipp_context *ctx = get_ipp_context(dev); 1702 struct exynos_drm_ippdrv *ippdrv; 1703 int ret, count = 0; 1704 1705 /* get ipp driver entry */ 1706 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1707 u32 ipp_id; 1708 1709 ippdrv->drm_dev = drm_dev; 1710 1711 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, 1712 &ipp_id); 1713 if (ret || ipp_id == 0) { 1714 DRM_ERROR("failed to create id.\n"); 1715 goto err; 1716 } 1717 1718 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1719 count++, (int)ippdrv, ipp_id); 1720 1721 ippdrv->prop_list.ipp_id = ipp_id; 1722 1723 /* store parent device for node */ 1724 ippdrv->parent_dev = dev; 1725 1726 /* store event work queue and handler */ 1727 ippdrv->event_workq = ctx->event_workq; 1728 ippdrv->sched_event = ipp_sched_event; 1729 INIT_LIST_HEAD(&ippdrv->cmd_list); 1730 mutex_init(&ippdrv->cmd_lock); 1731 1732 if (is_drm_iommu_supported(drm_dev)) { 1733 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1734 if (ret) { 1735 DRM_ERROR("failed to activate iommu\n"); 1736 goto err; 1737 } 1738 } 1739 } 1740 1741 return 0; 1742 1743 err: 1744 /* get ipp driver entry */ 1745 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, 1746 drv_list) { 1747 if (is_drm_iommu_supported(drm_dev)) 1748 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1749 1750 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1751 ippdrv->prop_list.ipp_id); 1752 } 1753 1754 return ret; 1755 } 1756 1757 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1758 { 1759 struct exynos_drm_ippdrv *ippdrv; 1760 struct ipp_context *ctx = get_ipp_context(dev); 1761 1762 /* get ipp driver entry */ 1763 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1764 if (is_drm_iommu_supported(drm_dev)) 1765 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1766 1767 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1768 ippdrv->prop_list.ipp_id); 1769 1770 ippdrv->drm_dev = NULL; 1771 exynos_drm_ippdrv_unregister(ippdrv); 1772 } 1773 } 1774 1775 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, 1776 struct drm_file *file) 1777 { 1778 struct drm_exynos_file_private *file_priv = file->driver_priv; 1779 struct exynos_drm_ipp_private *priv; 1780 1781 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1782 if (!priv) 1783 return -ENOMEM; 1784 priv->dev = dev; 1785 file_priv->ipp_priv = priv; 1786 1787 INIT_LIST_HEAD(&priv->event_list); 1788 1789 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv); 1790 1791 return 0; 1792 } 1793 1794 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1795 struct drm_file *file) 1796 { 1797 struct drm_exynos_file_private *file_priv = file->driver_priv; 1798 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; 1799 struct exynos_drm_ippdrv *ippdrv = NULL; 1800 struct ipp_context *ctx = get_ipp_context(dev); 1801 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1802 int count = 0; 1803 1804 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv); 1805 1806 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1807 mutex_lock(&ippdrv->cmd_lock); 1808 list_for_each_entry_safe(c_node, tc_node, 1809 &ippdrv->cmd_list, list) { 1810 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1811 count++, (int)ippdrv); 1812 1813 if (c_node->priv == priv) { 1814 /* 1815 * userland goto unnormal state. process killed. 1816 * and close the file. 1817 * so, IPP didn't called stop cmd ctrl. 1818 * so, we are make stop operation in this state. 1819 */ 1820 if (c_node->state == IPP_STATE_START) { 1821 ipp_stop_property(drm_dev, ippdrv, 1822 c_node); 1823 c_node->state = IPP_STATE_STOP; 1824 } 1825 1826 ippdrv->dedicated = false; 1827 ipp_clean_cmd_node(ctx, c_node); 1828 if (list_empty(&ippdrv->cmd_list)) 1829 pm_runtime_put_sync(ippdrv->dev); 1830 } 1831 } 1832 mutex_unlock(&ippdrv->cmd_lock); 1833 } 1834 1835 kfree(priv); 1836 return; 1837 } 1838 1839 static int ipp_probe(struct platform_device *pdev) 1840 { 1841 struct device *dev = &pdev->dev; 1842 struct ipp_context *ctx; 1843 struct exynos_drm_subdrv *subdrv; 1844 int ret; 1845 1846 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1847 if (!ctx) 1848 return -ENOMEM; 1849 1850 mutex_init(&ctx->ipp_lock); 1851 mutex_init(&ctx->prop_lock); 1852 1853 idr_init(&ctx->ipp_idr); 1854 idr_init(&ctx->prop_idr); 1855 1856 /* 1857 * create single thread for ipp event 1858 * IPP supports event thread for IPP drivers. 1859 * IPP driver send event_work to this thread. 1860 * and IPP event thread send event to user process. 1861 */ 1862 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1863 if (!ctx->event_workq) { 1864 dev_err(dev, "failed to create event workqueue\n"); 1865 return -EINVAL; 1866 } 1867 1868 /* 1869 * create single thread for ipp command 1870 * IPP supports command thread for user process. 1871 * user process make command node using set property ioctl. 1872 * and make start_work and send this work to command thread. 1873 * and then this command thread start property. 1874 */ 1875 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); 1876 if (!ctx->cmd_workq) { 1877 dev_err(dev, "failed to create cmd workqueue\n"); 1878 ret = -EINVAL; 1879 goto err_event_workq; 1880 } 1881 1882 /* set sub driver informations */ 1883 subdrv = &ctx->subdrv; 1884 subdrv->dev = dev; 1885 subdrv->probe = ipp_subdrv_probe; 1886 subdrv->remove = ipp_subdrv_remove; 1887 subdrv->open = ipp_subdrv_open; 1888 subdrv->close = ipp_subdrv_close; 1889 1890 platform_set_drvdata(pdev, ctx); 1891 1892 ret = exynos_drm_subdrv_register(subdrv); 1893 if (ret < 0) { 1894 DRM_ERROR("failed to register drm ipp device.\n"); 1895 goto err_cmd_workq; 1896 } 1897 1898 dev_info(dev, "drm ipp registered successfully.\n"); 1899 1900 return 0; 1901 1902 err_cmd_workq: 1903 destroy_workqueue(ctx->cmd_workq); 1904 err_event_workq: 1905 destroy_workqueue(ctx->event_workq); 1906 return ret; 1907 } 1908 1909 static int ipp_remove(struct platform_device *pdev) 1910 { 1911 struct ipp_context *ctx = platform_get_drvdata(pdev); 1912 1913 /* unregister sub driver */ 1914 exynos_drm_subdrv_unregister(&ctx->subdrv); 1915 1916 /* remove,destroy ipp idr */ 1917 idr_destroy(&ctx->ipp_idr); 1918 idr_destroy(&ctx->prop_idr); 1919 1920 mutex_destroy(&ctx->ipp_lock); 1921 mutex_destroy(&ctx->prop_lock); 1922 1923 /* destroy command, event work queue */ 1924 destroy_workqueue(ctx->cmd_workq); 1925 destroy_workqueue(ctx->event_workq); 1926 1927 return 0; 1928 } 1929 1930 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) 1931 { 1932 DRM_DEBUG_KMS("enable[%d]\n", enable); 1933 1934 return 0; 1935 } 1936 1937 #ifdef CONFIG_PM_SLEEP 1938 static int ipp_suspend(struct device *dev) 1939 { 1940 struct ipp_context *ctx = get_ipp_context(dev); 1941 1942 if (pm_runtime_suspended(dev)) 1943 return 0; 1944 1945 return ipp_power_ctrl(ctx, false); 1946 } 1947 1948 static int ipp_resume(struct device *dev) 1949 { 1950 struct ipp_context *ctx = get_ipp_context(dev); 1951 1952 if (!pm_runtime_suspended(dev)) 1953 return ipp_power_ctrl(ctx, true); 1954 1955 return 0; 1956 } 1957 #endif 1958 1959 #ifdef CONFIG_PM_RUNTIME 1960 static int ipp_runtime_suspend(struct device *dev) 1961 { 1962 struct ipp_context *ctx = get_ipp_context(dev); 1963 1964 return ipp_power_ctrl(ctx, false); 1965 } 1966 1967 static int ipp_runtime_resume(struct device *dev) 1968 { 1969 struct ipp_context *ctx = get_ipp_context(dev); 1970 1971 return ipp_power_ctrl(ctx, true); 1972 } 1973 #endif 1974 1975 static const struct dev_pm_ops ipp_pm_ops = { 1976 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume) 1977 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL) 1978 }; 1979 1980 struct platform_driver ipp_driver = { 1981 .probe = ipp_probe, 1982 .remove = ipp_remove, 1983 .driver = { 1984 .name = "exynos-drm-ipp", 1985 .owner = THIS_MODULE, 1986 .pm = &ipp_pm_ops, 1987 }, 1988 }; 1989 1990