1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * Eunchul Kim <chulspro.kim@samsung.com> 5 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Sangmin Lee <lsmin.lee@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 */ 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 #include <linux/clk.h> 18 #include <linux/pm_runtime.h> 19 20 #include <drm/drmP.h> 21 #include <drm/exynos_drm.h> 22 #include "exynos_drm_drv.h" 23 #include "exynos_drm_gem.h" 24 #include "exynos_drm_ipp.h" 25 #include "exynos_drm_iommu.h" 26 27 /* 28 * IPP stands for Image Post Processing and 29 * supports image scaler/rotator and input/output DMA operations. 30 * using FIMC, GSC, Rotator, so on. 31 * IPP is integration device driver of same attribute h/w 32 */ 33 34 /* 35 * TODO 36 * 1. expand command control id. 37 * 2. integrate property and config. 38 * 3. removed send_event id check routine. 39 * 4. compare send_event id if needed. 40 * 5. free subdrv_remove notifier callback list if needed. 41 * 6. need to check subdrv_open about multi-open. 42 * 7. need to power_on implement power and sysmmu ctrl. 43 */ 44 45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 47 48 /* platform device pointer for ipp device. */ 49 static struct platform_device *exynos_drm_ipp_pdev; 50 51 /* 52 * A structure of event. 53 * 54 * @base: base of event. 55 * @event: ipp event. 56 */ 57 struct drm_exynos_ipp_send_event { 58 struct drm_pending_event base; 59 struct drm_exynos_ipp_event event; 60 }; 61 62 /* 63 * A structure of memory node. 64 * 65 * @list: list head to memory queue information. 66 * @ops_id: id of operations. 67 * @prop_id: id of property. 68 * @buf_id: id of buffer. 69 * @buf_info: gem objects and dma address, size. 70 * @filp: a pointer to drm_file. 71 */ 72 struct drm_exynos_ipp_mem_node { 73 struct list_head list; 74 enum drm_exynos_ops_id ops_id; 75 u32 prop_id; 76 u32 buf_id; 77 struct drm_exynos_ipp_buf_info buf_info; 78 }; 79 80 /* 81 * A structure of ipp context. 82 * 83 * @subdrv: prepare initialization using subdrv. 84 * @ipp_lock: lock for synchronization of access to ipp_idr. 85 * @prop_lock: lock for synchronization of access to prop_idr. 86 * @ipp_idr: ipp driver idr. 87 * @prop_idr: property idr. 88 * @event_workq: event work queue. 89 * @cmd_workq: command work queue. 90 */ 91 struct ipp_context { 92 struct exynos_drm_subdrv subdrv; 93 struct mutex ipp_lock; 94 struct mutex prop_lock; 95 struct idr ipp_idr; 96 struct idr prop_idr; 97 struct workqueue_struct *event_workq; 98 struct workqueue_struct *cmd_workq; 99 }; 100 101 static LIST_HEAD(exynos_drm_ippdrv_list); 102 static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 103 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 104 105 int exynos_platform_device_ipp_register(void) 106 { 107 struct platform_device *pdev; 108 109 if (exynos_drm_ipp_pdev) 110 return -EEXIST; 111 112 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); 113 if (IS_ERR(pdev)) 114 return PTR_ERR(pdev); 115 116 exynos_drm_ipp_pdev = pdev; 117 118 return 0; 119 } 120 121 void exynos_platform_device_ipp_unregister(void) 122 { 123 if (exynos_drm_ipp_pdev) { 124 platform_device_unregister(exynos_drm_ipp_pdev); 125 exynos_drm_ipp_pdev = NULL; 126 } 127 } 128 129 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 130 { 131 mutex_lock(&exynos_drm_ippdrv_lock); 132 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 133 mutex_unlock(&exynos_drm_ippdrv_lock); 134 135 return 0; 136 } 137 138 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 139 { 140 mutex_lock(&exynos_drm_ippdrv_lock); 141 list_del(&ippdrv->drv_list); 142 mutex_unlock(&exynos_drm_ippdrv_lock); 143 144 return 0; 145 } 146 147 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) 148 { 149 int ret; 150 151 mutex_lock(lock); 152 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 153 mutex_unlock(lock); 154 155 return ret; 156 } 157 158 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) 159 { 160 mutex_lock(lock); 161 idr_remove(id_idr, id); 162 mutex_unlock(lock); 163 } 164 165 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 166 { 167 void *obj; 168 169 mutex_lock(lock); 170 obj = idr_find(id_idr, id); 171 mutex_unlock(lock); 172 173 return obj; 174 } 175 176 static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, 177 struct drm_exynos_ipp_property *property) 178 { 179 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && 180 !pm_runtime_suspended(ippdrv->dev))) 181 return -EBUSY; 182 183 if (ippdrv->check_property && 184 ippdrv->check_property(ippdrv->dev, property)) 185 return -EINVAL; 186 187 return 0; 188 } 189 190 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 191 struct drm_exynos_ipp_property *property) 192 { 193 struct exynos_drm_ippdrv *ippdrv; 194 u32 ipp_id = property->ipp_id; 195 int ret; 196 197 if (ipp_id) { 198 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); 199 if (!ippdrv) { 200 DRM_DEBUG("ipp%d driver not found\n", ipp_id); 201 return ERR_PTR(-ENODEV); 202 } 203 204 ret = ipp_check_driver(ippdrv, property); 205 if (ret < 0) { 206 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); 207 return ERR_PTR(ret); 208 } 209 210 return ippdrv; 211 } else { 212 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 213 ret = ipp_check_driver(ippdrv, property); 214 if (ret == 0) 215 return ippdrv; 216 } 217 218 DRM_DEBUG("cannot find driver suitable for given property.\n"); 219 } 220 221 return ERR_PTR(-ENODEV); 222 } 223 224 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) 225 { 226 struct exynos_drm_ippdrv *ippdrv; 227 struct drm_exynos_ipp_cmd_node *c_node; 228 int count = 0; 229 230 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 231 232 /* 233 * This case is search ipp driver by prop_id handle. 234 * sometimes, ipp subsystem find driver by prop_id. 235 * e.g PAUSE state, queue buf, command control. 236 */ 237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 238 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 239 240 mutex_lock(&ippdrv->cmd_lock); 241 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 242 if (c_node->property.prop_id == prop_id) { 243 mutex_unlock(&ippdrv->cmd_lock); 244 return ippdrv; 245 } 246 } 247 mutex_unlock(&ippdrv->cmd_lock); 248 } 249 250 return ERR_PTR(-ENODEV); 251 } 252 253 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, 254 struct drm_file *file) 255 { 256 struct drm_exynos_file_private *file_priv = file->driver_priv; 257 struct device *dev = file_priv->ipp_dev; 258 struct ipp_context *ctx = get_ipp_context(dev); 259 struct drm_exynos_ipp_prop_list *prop_list = data; 260 struct exynos_drm_ippdrv *ippdrv; 261 int count = 0; 262 263 if (!ctx) { 264 DRM_ERROR("invalid context.\n"); 265 return -EINVAL; 266 } 267 268 if (!prop_list) { 269 DRM_ERROR("invalid property parameter.\n"); 270 return -EINVAL; 271 } 272 273 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); 274 275 if (!prop_list->ipp_id) { 276 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 277 count++; 278 279 /* 280 * Supports ippdrv list count for user application. 281 * First step user application getting ippdrv count. 282 * and second step getting ippdrv capability using ipp_id. 283 */ 284 prop_list->count = count; 285 } else { 286 /* 287 * Getting ippdrv capability by ipp_id. 288 * some device not supported wb, output interface. 289 * so, user application detect correct ipp driver 290 * using this ioctl. 291 */ 292 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 293 prop_list->ipp_id); 294 if (!ippdrv) { 295 DRM_ERROR("not found ipp%d driver.\n", 296 prop_list->ipp_id); 297 return -ENODEV; 298 } 299 300 *prop_list = ippdrv->prop_list; 301 } 302 303 return 0; 304 } 305 306 static void ipp_print_property(struct drm_exynos_ipp_property *property, 307 int idx) 308 { 309 struct drm_exynos_ipp_config *config = &property->config[idx]; 310 struct drm_exynos_pos *pos = &config->pos; 311 struct drm_exynos_sz *sz = &config->sz; 312 313 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", 314 property->prop_id, idx ? "dst" : "src", config->fmt); 315 316 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 317 pos->x, pos->y, pos->w, pos->h, 318 sz->hsize, sz->vsize, config->flip, config->degree); 319 } 320 321 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 322 { 323 struct drm_exynos_ipp_cmd_work *cmd_work; 324 325 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 326 if (!cmd_work) 327 return ERR_PTR(-ENOMEM); 328 329 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 330 331 return cmd_work; 332 } 333 334 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) 335 { 336 struct drm_exynos_ipp_event_work *event_work; 337 338 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 339 if (!event_work) 340 return ERR_PTR(-ENOMEM); 341 342 INIT_WORK(&event_work->work, ipp_sched_event); 343 344 return event_work; 345 } 346 347 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, 348 struct drm_file *file) 349 { 350 struct drm_exynos_file_private *file_priv = file->driver_priv; 351 struct device *dev = file_priv->ipp_dev; 352 struct ipp_context *ctx = get_ipp_context(dev); 353 struct drm_exynos_ipp_property *property = data; 354 struct exynos_drm_ippdrv *ippdrv; 355 struct drm_exynos_ipp_cmd_node *c_node; 356 u32 prop_id; 357 int ret, i; 358 359 if (!ctx) { 360 DRM_ERROR("invalid context.\n"); 361 return -EINVAL; 362 } 363 364 if (!property) { 365 DRM_ERROR("invalid property parameter.\n"); 366 return -EINVAL; 367 } 368 369 prop_id = property->prop_id; 370 371 /* 372 * This is log print for user application property. 373 * user application set various property. 374 */ 375 for_each_ipp_ops(i) 376 ipp_print_property(property, i); 377 378 /* 379 * In case prop_id is not zero try to set existing property. 380 */ 381 if (prop_id) { 382 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id); 383 384 if (!c_node || c_node->filp != file) { 385 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id); 386 return -EINVAL; 387 } 388 389 if (c_node->state != IPP_STATE_STOP) { 390 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id); 391 return -EINVAL; 392 } 393 394 c_node->property = *property; 395 396 return 0; 397 } 398 399 /* find ipp driver using ipp id */ 400 ippdrv = ipp_find_driver(ctx, property); 401 if (IS_ERR(ippdrv)) { 402 DRM_ERROR("failed to get ipp driver.\n"); 403 return -EINVAL; 404 } 405 406 /* allocate command node */ 407 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 408 if (!c_node) 409 return -ENOMEM; 410 411 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); 412 if (ret < 0) { 413 DRM_ERROR("failed to create id.\n"); 414 goto err_clear; 415 } 416 property->prop_id = ret; 417 418 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 419 property->prop_id, property->cmd, (int)ippdrv); 420 421 /* stored property information and ippdrv in private data */ 422 c_node->property = *property; 423 c_node->state = IPP_STATE_IDLE; 424 c_node->filp = file; 425 426 c_node->start_work = ipp_create_cmd_work(); 427 if (IS_ERR(c_node->start_work)) { 428 DRM_ERROR("failed to create start work.\n"); 429 goto err_remove_id; 430 } 431 432 c_node->stop_work = ipp_create_cmd_work(); 433 if (IS_ERR(c_node->stop_work)) { 434 DRM_ERROR("failed to create stop work.\n"); 435 goto err_free_start; 436 } 437 438 c_node->event_work = ipp_create_event_work(); 439 if (IS_ERR(c_node->event_work)) { 440 DRM_ERROR("failed to create event work.\n"); 441 goto err_free_stop; 442 } 443 444 mutex_init(&c_node->lock); 445 mutex_init(&c_node->mem_lock); 446 mutex_init(&c_node->event_lock); 447 448 init_completion(&c_node->start_complete); 449 init_completion(&c_node->stop_complete); 450 451 for_each_ipp_ops(i) 452 INIT_LIST_HEAD(&c_node->mem_list[i]); 453 454 INIT_LIST_HEAD(&c_node->event_list); 455 mutex_lock(&ippdrv->cmd_lock); 456 list_add_tail(&c_node->list, &ippdrv->cmd_list); 457 mutex_unlock(&ippdrv->cmd_lock); 458 459 /* make dedicated state without m2m */ 460 if (!ipp_is_m2m_cmd(property->cmd)) 461 ippdrv->dedicated = true; 462 463 return 0; 464 465 err_free_stop: 466 kfree(c_node->stop_work); 467 err_free_start: 468 kfree(c_node->start_work); 469 err_remove_id: 470 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); 471 err_clear: 472 kfree(c_node); 473 return ret; 474 } 475 476 static int ipp_put_mem_node(struct drm_device *drm_dev, 477 struct drm_exynos_ipp_cmd_node *c_node, 478 struct drm_exynos_ipp_mem_node *m_node) 479 { 480 int i; 481 482 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 483 484 if (!m_node) { 485 DRM_ERROR("invalid dequeue node.\n"); 486 return -EFAULT; 487 } 488 489 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 490 491 /* put gem buffer */ 492 for_each_ipp_planar(i) { 493 unsigned long handle = m_node->buf_info.handles[i]; 494 if (handle) 495 exynos_drm_gem_put_dma_addr(drm_dev, handle, 496 c_node->filp); 497 } 498 499 list_del(&m_node->list); 500 kfree(m_node); 501 502 return 0; 503 } 504 505 static struct drm_exynos_ipp_mem_node 506 *ipp_get_mem_node(struct drm_device *drm_dev, 507 struct drm_exynos_ipp_cmd_node *c_node, 508 struct drm_exynos_ipp_queue_buf *qbuf) 509 { 510 struct drm_exynos_ipp_mem_node *m_node; 511 struct drm_exynos_ipp_buf_info *buf_info; 512 int i; 513 514 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 515 if (!m_node) 516 return ERR_PTR(-ENOMEM); 517 518 buf_info = &m_node->buf_info; 519 520 /* operations, buffer id */ 521 m_node->ops_id = qbuf->ops_id; 522 m_node->prop_id = qbuf->prop_id; 523 m_node->buf_id = qbuf->buf_id; 524 INIT_LIST_HEAD(&m_node->list); 525 526 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 527 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 528 529 for_each_ipp_planar(i) { 530 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); 531 532 /* get dma address by handle */ 533 if (qbuf->handle[i]) { 534 dma_addr_t *addr; 535 536 addr = exynos_drm_gem_get_dma_addr(drm_dev, 537 qbuf->handle[i], c_node->filp); 538 if (IS_ERR(addr)) { 539 DRM_ERROR("failed to get addr.\n"); 540 ipp_put_mem_node(drm_dev, c_node, m_node); 541 return ERR_PTR(-EFAULT); 542 } 543 544 buf_info->handles[i] = qbuf->handle[i]; 545 buf_info->base[i] = *addr; 546 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 547 buf_info->base[i], buf_info->handles[i]); 548 } 549 } 550 551 mutex_lock(&c_node->mem_lock); 552 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 553 mutex_unlock(&c_node->mem_lock); 554 555 return m_node; 556 } 557 558 static void ipp_clean_mem_nodes(struct drm_device *drm_dev, 559 struct drm_exynos_ipp_cmd_node *c_node, int ops) 560 { 561 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 562 struct list_head *head = &c_node->mem_list[ops]; 563 564 mutex_lock(&c_node->mem_lock); 565 566 list_for_each_entry_safe(m_node, tm_node, head, list) { 567 int ret; 568 569 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 570 if (ret) 571 DRM_ERROR("failed to put m_node.\n"); 572 } 573 574 mutex_unlock(&c_node->mem_lock); 575 } 576 577 static void ipp_free_event(struct drm_pending_event *event) 578 { 579 kfree(event); 580 } 581 582 static int ipp_get_event(struct drm_device *drm_dev, 583 struct drm_exynos_ipp_cmd_node *c_node, 584 struct drm_exynos_ipp_queue_buf *qbuf) 585 { 586 struct drm_exynos_ipp_send_event *e; 587 unsigned long flags; 588 589 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 590 591 e = kzalloc(sizeof(*e), GFP_KERNEL); 592 if (!e) { 593 spin_lock_irqsave(&drm_dev->event_lock, flags); 594 c_node->filp->event_space += sizeof(e->event); 595 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 596 return -ENOMEM; 597 } 598 599 /* make event */ 600 e->event.base.type = DRM_EXYNOS_IPP_EVENT; 601 e->event.base.length = sizeof(e->event); 602 e->event.user_data = qbuf->user_data; 603 e->event.prop_id = qbuf->prop_id; 604 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 605 e->base.event = &e->event.base; 606 e->base.file_priv = c_node->filp; 607 e->base.destroy = ipp_free_event; 608 mutex_lock(&c_node->event_lock); 609 list_add_tail(&e->base.link, &c_node->event_list); 610 mutex_unlock(&c_node->event_lock); 611 612 return 0; 613 } 614 615 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, 616 struct drm_exynos_ipp_queue_buf *qbuf) 617 { 618 struct drm_exynos_ipp_send_event *e, *te; 619 int count = 0; 620 621 mutex_lock(&c_node->event_lock); 622 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 623 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 624 625 /* 626 * qbuf == NULL condition means all event deletion. 627 * stop operations want to delete all event list. 628 * another case delete only same buf id. 629 */ 630 if (!qbuf) { 631 /* delete list */ 632 list_del(&e->base.link); 633 kfree(e); 634 } 635 636 /* compare buffer id */ 637 if (qbuf && (qbuf->buf_id == 638 e->event.buf_id[EXYNOS_DRM_OPS_DST])) { 639 /* delete list */ 640 list_del(&e->base.link); 641 kfree(e); 642 goto out_unlock; 643 } 644 } 645 646 out_unlock: 647 mutex_unlock(&c_node->event_lock); 648 return; 649 } 650 651 static void ipp_clean_cmd_node(struct ipp_context *ctx, 652 struct drm_exynos_ipp_cmd_node *c_node) 653 { 654 int i; 655 656 /* cancel works */ 657 cancel_work_sync(&c_node->start_work->work); 658 cancel_work_sync(&c_node->stop_work->work); 659 cancel_work_sync(&c_node->event_work->work); 660 661 /* put event */ 662 ipp_put_event(c_node, NULL); 663 664 for_each_ipp_ops(i) 665 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i); 666 667 /* delete list */ 668 list_del(&c_node->list); 669 670 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, 671 c_node->property.prop_id); 672 673 /* destroy mutex */ 674 mutex_destroy(&c_node->lock); 675 mutex_destroy(&c_node->mem_lock); 676 mutex_destroy(&c_node->event_lock); 677 678 /* free command node */ 679 kfree(c_node->start_work); 680 kfree(c_node->stop_work); 681 kfree(c_node->event_work); 682 kfree(c_node); 683 } 684 685 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 686 { 687 switch (c_node->property.cmd) { 688 case IPP_CMD_WB: 689 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 690 case IPP_CMD_OUTPUT: 691 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); 692 case IPP_CMD_M2M: 693 default: 694 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && 695 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 696 } 697 } 698 699 static struct drm_exynos_ipp_mem_node 700 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, 701 struct drm_exynos_ipp_queue_buf *qbuf) 702 { 703 struct drm_exynos_ipp_mem_node *m_node; 704 struct list_head *head; 705 int count = 0; 706 707 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); 708 709 /* source/destination memory list */ 710 head = &c_node->mem_list[qbuf->ops_id]; 711 712 /* find memory node from memory list */ 713 list_for_each_entry(m_node, head, list) { 714 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 715 716 /* compare buffer id */ 717 if (m_node->buf_id == qbuf->buf_id) 718 return m_node; 719 } 720 721 return NULL; 722 } 723 724 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, 725 struct drm_exynos_ipp_cmd_node *c_node, 726 struct drm_exynos_ipp_mem_node *m_node) 727 { 728 struct exynos_drm_ipp_ops *ops = NULL; 729 int ret = 0; 730 731 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 732 733 if (!m_node) { 734 DRM_ERROR("invalid queue node.\n"); 735 return -EFAULT; 736 } 737 738 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 739 740 /* get operations callback */ 741 ops = ippdrv->ops[m_node->ops_id]; 742 if (!ops) { 743 DRM_ERROR("not support ops.\n"); 744 return -EFAULT; 745 } 746 747 /* set address and enable irq */ 748 if (ops->set_addr) { 749 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, 750 m_node->buf_id, IPP_BUF_ENQUEUE); 751 if (ret) { 752 DRM_ERROR("failed to set addr.\n"); 753 return ret; 754 } 755 } 756 757 return ret; 758 } 759 760 static void ipp_handle_cmd_work(struct device *dev, 761 struct exynos_drm_ippdrv *ippdrv, 762 struct drm_exynos_ipp_cmd_work *cmd_work, 763 struct drm_exynos_ipp_cmd_node *c_node) 764 { 765 struct ipp_context *ctx = get_ipp_context(dev); 766 767 cmd_work->ippdrv = ippdrv; 768 cmd_work->c_node = c_node; 769 queue_work(ctx->cmd_workq, &cmd_work->work); 770 } 771 772 static int ipp_queue_buf_with_run(struct device *dev, 773 struct drm_exynos_ipp_cmd_node *c_node, 774 struct drm_exynos_ipp_mem_node *m_node, 775 struct drm_exynos_ipp_queue_buf *qbuf) 776 { 777 struct exynos_drm_ippdrv *ippdrv; 778 struct drm_exynos_ipp_property *property; 779 struct exynos_drm_ipp_ops *ops; 780 int ret; 781 782 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 783 if (IS_ERR(ippdrv)) { 784 DRM_ERROR("failed to get ipp driver.\n"); 785 return -EFAULT; 786 } 787 788 ops = ippdrv->ops[qbuf->ops_id]; 789 if (!ops) { 790 DRM_ERROR("failed to get ops.\n"); 791 return -EFAULT; 792 } 793 794 property = &c_node->property; 795 796 if (c_node->state != IPP_STATE_START) { 797 DRM_DEBUG_KMS("bypass for invalid state.\n"); 798 return 0; 799 } 800 801 mutex_lock(&c_node->mem_lock); 802 if (!ipp_check_mem_list(c_node)) { 803 mutex_unlock(&c_node->mem_lock); 804 DRM_DEBUG_KMS("empty memory.\n"); 805 return 0; 806 } 807 808 /* 809 * If set destination buffer and enabled clock, 810 * then m2m operations need start operations at queue_buf 811 */ 812 if (ipp_is_m2m_cmd(property->cmd)) { 813 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; 814 815 cmd_work->ctrl = IPP_CTRL_PLAY; 816 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 817 } else { 818 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 819 if (ret) { 820 mutex_unlock(&c_node->mem_lock); 821 DRM_ERROR("failed to set m node.\n"); 822 return ret; 823 } 824 } 825 mutex_unlock(&c_node->mem_lock); 826 827 return 0; 828 } 829 830 static void ipp_clean_queue_buf(struct drm_device *drm_dev, 831 struct drm_exynos_ipp_cmd_node *c_node, 832 struct drm_exynos_ipp_queue_buf *qbuf) 833 { 834 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 835 836 /* delete list */ 837 mutex_lock(&c_node->mem_lock); 838 list_for_each_entry_safe(m_node, tm_node, 839 &c_node->mem_list[qbuf->ops_id], list) { 840 if (m_node->buf_id == qbuf->buf_id && 841 m_node->ops_id == qbuf->ops_id) 842 ipp_put_mem_node(drm_dev, c_node, m_node); 843 } 844 mutex_unlock(&c_node->mem_lock); 845 } 846 847 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 848 struct drm_file *file) 849 { 850 struct drm_exynos_file_private *file_priv = file->driver_priv; 851 struct device *dev = file_priv->ipp_dev; 852 struct ipp_context *ctx = get_ipp_context(dev); 853 struct drm_exynos_ipp_queue_buf *qbuf = data; 854 struct drm_exynos_ipp_cmd_node *c_node; 855 struct drm_exynos_ipp_mem_node *m_node; 856 int ret; 857 858 if (!qbuf) { 859 DRM_ERROR("invalid buf parameter.\n"); 860 return -EINVAL; 861 } 862 863 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { 864 DRM_ERROR("invalid ops parameter.\n"); 865 return -EINVAL; 866 } 867 868 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 869 qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 870 qbuf->buf_id, qbuf->buf_type); 871 872 /* find command node */ 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 874 qbuf->prop_id); 875 if (!c_node || c_node->filp != file) { 876 DRM_ERROR("failed to get command node.\n"); 877 return -ENODEV; 878 } 879 880 /* buffer control */ 881 switch (qbuf->buf_type) { 882 case IPP_BUF_ENQUEUE: 883 /* get memory node */ 884 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf); 885 if (IS_ERR(m_node)) { 886 DRM_ERROR("failed to get m_node.\n"); 887 return PTR_ERR(m_node); 888 } 889 890 /* 891 * first step get event for destination buffer. 892 * and second step when M2M case run with destination buffer 893 * if needed. 894 */ 895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 896 /* get event for destination buffer */ 897 ret = ipp_get_event(drm_dev, c_node, qbuf); 898 if (ret) { 899 DRM_ERROR("failed to get event.\n"); 900 goto err_clean_node; 901 } 902 903 /* 904 * M2M case run play control for streaming feature. 905 * other case set address and waiting. 906 */ 907 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); 908 if (ret) { 909 DRM_ERROR("failed to run command.\n"); 910 goto err_clean_node; 911 } 912 } 913 break; 914 case IPP_BUF_DEQUEUE: 915 mutex_lock(&c_node->lock); 916 917 /* put event for destination buffer */ 918 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 919 ipp_put_event(c_node, qbuf); 920 921 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 922 923 mutex_unlock(&c_node->lock); 924 break; 925 default: 926 DRM_ERROR("invalid buffer control.\n"); 927 return -EINVAL; 928 } 929 930 return 0; 931 932 err_clean_node: 933 DRM_ERROR("clean memory nodes.\n"); 934 935 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 936 return ret; 937 } 938 939 static bool exynos_drm_ipp_check_valid(struct device *dev, 940 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 941 { 942 if (ctrl != IPP_CTRL_PLAY) { 943 if (pm_runtime_suspended(dev)) { 944 DRM_ERROR("pm:runtime_suspended.\n"); 945 goto err_status; 946 } 947 } 948 949 switch (ctrl) { 950 case IPP_CTRL_PLAY: 951 if (state != IPP_STATE_IDLE) 952 goto err_status; 953 break; 954 case IPP_CTRL_STOP: 955 if (state == IPP_STATE_STOP) 956 goto err_status; 957 break; 958 case IPP_CTRL_PAUSE: 959 if (state != IPP_STATE_START) 960 goto err_status; 961 break; 962 case IPP_CTRL_RESUME: 963 if (state != IPP_STATE_STOP) 964 goto err_status; 965 break; 966 default: 967 DRM_ERROR("invalid state.\n"); 968 goto err_status; 969 } 970 971 return true; 972 973 err_status: 974 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); 975 return false; 976 } 977 978 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, 979 struct drm_file *file) 980 { 981 struct drm_exynos_file_private *file_priv = file->driver_priv; 982 struct exynos_drm_ippdrv *ippdrv = NULL; 983 struct device *dev = file_priv->ipp_dev; 984 struct ipp_context *ctx = get_ipp_context(dev); 985 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 986 struct drm_exynos_ipp_cmd_work *cmd_work; 987 struct drm_exynos_ipp_cmd_node *c_node; 988 989 if (!ctx) { 990 DRM_ERROR("invalid context.\n"); 991 return -EINVAL; 992 } 993 994 if (!cmd_ctrl) { 995 DRM_ERROR("invalid control parameter.\n"); 996 return -EINVAL; 997 } 998 999 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", 1000 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1001 1002 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1003 if (IS_ERR(ippdrv)) { 1004 DRM_ERROR("failed to get ipp driver.\n"); 1005 return PTR_ERR(ippdrv); 1006 } 1007 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1009 cmd_ctrl->prop_id); 1010 if (!c_node || c_node->filp != file) { 1011 DRM_ERROR("invalid command node list.\n"); 1012 return -ENODEV; 1013 } 1014 1015 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1016 c_node->state)) { 1017 DRM_ERROR("invalid state.\n"); 1018 return -EINVAL; 1019 } 1020 1021 switch (cmd_ctrl->ctrl) { 1022 case IPP_CTRL_PLAY: 1023 if (pm_runtime_suspended(ippdrv->dev)) 1024 pm_runtime_get_sync(ippdrv->dev); 1025 1026 c_node->state = IPP_STATE_START; 1027 1028 cmd_work = c_node->start_work; 1029 cmd_work->ctrl = cmd_ctrl->ctrl; 1030 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1031 break; 1032 case IPP_CTRL_STOP: 1033 cmd_work = c_node->stop_work; 1034 cmd_work->ctrl = cmd_ctrl->ctrl; 1035 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1036 1037 if (!wait_for_completion_timeout(&c_node->stop_complete, 1038 msecs_to_jiffies(300))) { 1039 DRM_ERROR("timeout stop:prop_id[%d]\n", 1040 c_node->property.prop_id); 1041 } 1042 1043 c_node->state = IPP_STATE_STOP; 1044 ippdrv->dedicated = false; 1045 mutex_lock(&ippdrv->cmd_lock); 1046 ipp_clean_cmd_node(ctx, c_node); 1047 1048 if (list_empty(&ippdrv->cmd_list)) 1049 pm_runtime_put_sync(ippdrv->dev); 1050 mutex_unlock(&ippdrv->cmd_lock); 1051 break; 1052 case IPP_CTRL_PAUSE: 1053 cmd_work = c_node->stop_work; 1054 cmd_work->ctrl = cmd_ctrl->ctrl; 1055 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1056 1057 if (!wait_for_completion_timeout(&c_node->stop_complete, 1058 msecs_to_jiffies(200))) { 1059 DRM_ERROR("timeout stop:prop_id[%d]\n", 1060 c_node->property.prop_id); 1061 } 1062 1063 c_node->state = IPP_STATE_STOP; 1064 break; 1065 case IPP_CTRL_RESUME: 1066 c_node->state = IPP_STATE_START; 1067 cmd_work = c_node->start_work; 1068 cmd_work->ctrl = cmd_ctrl->ctrl; 1069 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1070 break; 1071 default: 1072 DRM_ERROR("could not support this state currently.\n"); 1073 return -EINVAL; 1074 } 1075 1076 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", 1077 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1078 1079 return 0; 1080 } 1081 1082 int exynos_drm_ippnb_register(struct notifier_block *nb) 1083 { 1084 return blocking_notifier_chain_register( 1085 &exynos_drm_ippnb_list, nb); 1086 } 1087 1088 int exynos_drm_ippnb_unregister(struct notifier_block *nb) 1089 { 1090 return blocking_notifier_chain_unregister( 1091 &exynos_drm_ippnb_list, nb); 1092 } 1093 1094 int exynos_drm_ippnb_send_event(unsigned long val, void *v) 1095 { 1096 return blocking_notifier_call_chain( 1097 &exynos_drm_ippnb_list, val, v); 1098 } 1099 1100 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, 1101 struct drm_exynos_ipp_property *property) 1102 { 1103 struct exynos_drm_ipp_ops *ops = NULL; 1104 bool swap = false; 1105 int ret, i; 1106 1107 if (!property) { 1108 DRM_ERROR("invalid property parameter.\n"); 1109 return -EINVAL; 1110 } 1111 1112 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1113 1114 /* reset h/w block */ 1115 if (ippdrv->reset && 1116 ippdrv->reset(ippdrv->dev)) { 1117 return -EINVAL; 1118 } 1119 1120 /* set source,destination operations */ 1121 for_each_ipp_ops(i) { 1122 struct drm_exynos_ipp_config *config = 1123 &property->config[i]; 1124 1125 ops = ippdrv->ops[i]; 1126 if (!ops || !config) { 1127 DRM_ERROR("not support ops and config.\n"); 1128 return -EINVAL; 1129 } 1130 1131 /* set format */ 1132 if (ops->set_fmt) { 1133 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1134 if (ret) 1135 return ret; 1136 } 1137 1138 /* set transform for rotation, flip */ 1139 if (ops->set_transf) { 1140 ret = ops->set_transf(ippdrv->dev, config->degree, 1141 config->flip, &swap); 1142 if (ret) 1143 return ret; 1144 } 1145 1146 /* set size */ 1147 if (ops->set_size) { 1148 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1149 &config->sz); 1150 if (ret) 1151 return ret; 1152 } 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, 1159 struct drm_exynos_ipp_cmd_node *c_node) 1160 { 1161 struct drm_exynos_ipp_mem_node *m_node; 1162 struct drm_exynos_ipp_property *property = &c_node->property; 1163 struct list_head *head; 1164 int ret, i; 1165 1166 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1167 1168 /* store command info in ippdrv */ 1169 ippdrv->c_node = c_node; 1170 1171 mutex_lock(&c_node->mem_lock); 1172 if (!ipp_check_mem_list(c_node)) { 1173 DRM_DEBUG_KMS("empty memory.\n"); 1174 ret = -ENOMEM; 1175 goto err_unlock; 1176 } 1177 1178 /* set current property in ippdrv */ 1179 ret = ipp_set_property(ippdrv, property); 1180 if (ret) { 1181 DRM_ERROR("failed to set property.\n"); 1182 ippdrv->c_node = NULL; 1183 goto err_unlock; 1184 } 1185 1186 /* check command */ 1187 switch (property->cmd) { 1188 case IPP_CMD_M2M: 1189 for_each_ipp_ops(i) { 1190 /* source/destination memory list */ 1191 head = &c_node->mem_list[i]; 1192 1193 m_node = list_first_entry(head, 1194 struct drm_exynos_ipp_mem_node, list); 1195 1196 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1197 1198 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1199 if (ret) { 1200 DRM_ERROR("failed to set m node.\n"); 1201 goto err_unlock; 1202 } 1203 } 1204 break; 1205 case IPP_CMD_WB: 1206 /* destination memory list */ 1207 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1208 1209 list_for_each_entry(m_node, head, list) { 1210 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1211 if (ret) { 1212 DRM_ERROR("failed to set m node.\n"); 1213 goto err_unlock; 1214 } 1215 } 1216 break; 1217 case IPP_CMD_OUTPUT: 1218 /* source memory list */ 1219 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1220 1221 list_for_each_entry(m_node, head, list) { 1222 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1223 if (ret) { 1224 DRM_ERROR("failed to set m node.\n"); 1225 goto err_unlock; 1226 } 1227 } 1228 break; 1229 default: 1230 DRM_ERROR("invalid operations.\n"); 1231 ret = -EINVAL; 1232 goto err_unlock; 1233 } 1234 mutex_unlock(&c_node->mem_lock); 1235 1236 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); 1237 1238 /* start operations */ 1239 if (ippdrv->start) { 1240 ret = ippdrv->start(ippdrv->dev, property->cmd); 1241 if (ret) { 1242 DRM_ERROR("failed to start ops.\n"); 1243 ippdrv->c_node = NULL; 1244 return ret; 1245 } 1246 } 1247 1248 return 0; 1249 1250 err_unlock: 1251 mutex_unlock(&c_node->mem_lock); 1252 ippdrv->c_node = NULL; 1253 return ret; 1254 } 1255 1256 static int ipp_stop_property(struct drm_device *drm_dev, 1257 struct exynos_drm_ippdrv *ippdrv, 1258 struct drm_exynos_ipp_cmd_node *c_node) 1259 { 1260 struct drm_exynos_ipp_property *property = &c_node->property; 1261 int i; 1262 1263 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1264 1265 /* stop operations */ 1266 if (ippdrv->stop) 1267 ippdrv->stop(ippdrv->dev, property->cmd); 1268 1269 /* check command */ 1270 switch (property->cmd) { 1271 case IPP_CMD_M2M: 1272 for_each_ipp_ops(i) 1273 ipp_clean_mem_nodes(drm_dev, c_node, i); 1274 break; 1275 case IPP_CMD_WB: 1276 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST); 1277 break; 1278 case IPP_CMD_OUTPUT: 1279 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC); 1280 break; 1281 default: 1282 DRM_ERROR("invalid operations.\n"); 1283 return -EINVAL; 1284 } 1285 1286 return 0; 1287 } 1288 1289 void ipp_sched_cmd(struct work_struct *work) 1290 { 1291 struct drm_exynos_ipp_cmd_work *cmd_work = 1292 container_of(work, struct drm_exynos_ipp_cmd_work, work); 1293 struct exynos_drm_ippdrv *ippdrv; 1294 struct drm_exynos_ipp_cmd_node *c_node; 1295 struct drm_exynos_ipp_property *property; 1296 int ret; 1297 1298 ippdrv = cmd_work->ippdrv; 1299 if (!ippdrv) { 1300 DRM_ERROR("invalid ippdrv list.\n"); 1301 return; 1302 } 1303 1304 c_node = cmd_work->c_node; 1305 if (!c_node) { 1306 DRM_ERROR("invalid command node list.\n"); 1307 return; 1308 } 1309 1310 mutex_lock(&c_node->lock); 1311 1312 property = &c_node->property; 1313 1314 switch (cmd_work->ctrl) { 1315 case IPP_CTRL_PLAY: 1316 case IPP_CTRL_RESUME: 1317 ret = ipp_start_property(ippdrv, c_node); 1318 if (ret) { 1319 DRM_ERROR("failed to start property:prop_id[%d]\n", 1320 c_node->property.prop_id); 1321 goto err_unlock; 1322 } 1323 1324 /* 1325 * M2M case supports wait_completion of transfer. 1326 * because M2M case supports single unit operation 1327 * with multiple queue. 1328 * M2M need to wait completion of data transfer. 1329 */ 1330 if (ipp_is_m2m_cmd(property->cmd)) { 1331 if (!wait_for_completion_timeout 1332 (&c_node->start_complete, msecs_to_jiffies(200))) { 1333 DRM_ERROR("timeout event:prop_id[%d]\n", 1334 c_node->property.prop_id); 1335 goto err_unlock; 1336 } 1337 } 1338 break; 1339 case IPP_CTRL_STOP: 1340 case IPP_CTRL_PAUSE: 1341 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, 1342 c_node); 1343 if (ret) { 1344 DRM_ERROR("failed to stop property.\n"); 1345 goto err_unlock; 1346 } 1347 1348 complete(&c_node->stop_complete); 1349 break; 1350 default: 1351 DRM_ERROR("unknown control type\n"); 1352 break; 1353 } 1354 1355 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); 1356 1357 err_unlock: 1358 mutex_unlock(&c_node->lock); 1359 } 1360 1361 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1362 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) 1363 { 1364 struct drm_device *drm_dev = ippdrv->drm_dev; 1365 struct drm_exynos_ipp_property *property = &c_node->property; 1366 struct drm_exynos_ipp_mem_node *m_node; 1367 struct drm_exynos_ipp_queue_buf qbuf; 1368 struct drm_exynos_ipp_send_event *e; 1369 struct list_head *head; 1370 struct timeval now; 1371 unsigned long flags; 1372 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; 1373 int ret, i; 1374 1375 for_each_ipp_ops(i) 1376 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); 1377 1378 if (!drm_dev) { 1379 DRM_ERROR("failed to get drm_dev.\n"); 1380 return -EINVAL; 1381 } 1382 1383 if (!property) { 1384 DRM_ERROR("failed to get property.\n"); 1385 return -EINVAL; 1386 } 1387 1388 mutex_lock(&c_node->event_lock); 1389 if (list_empty(&c_node->event_list)) { 1390 DRM_DEBUG_KMS("event list is empty.\n"); 1391 ret = 0; 1392 goto err_event_unlock; 1393 } 1394 1395 mutex_lock(&c_node->mem_lock); 1396 if (!ipp_check_mem_list(c_node)) { 1397 DRM_DEBUG_KMS("empty memory.\n"); 1398 ret = 0; 1399 goto err_mem_unlock; 1400 } 1401 1402 /* check command */ 1403 switch (property->cmd) { 1404 case IPP_CMD_M2M: 1405 for_each_ipp_ops(i) { 1406 /* source/destination memory list */ 1407 head = &c_node->mem_list[i]; 1408 1409 m_node = list_first_entry(head, 1410 struct drm_exynos_ipp_mem_node, list); 1411 1412 tbuf_id[i] = m_node->buf_id; 1413 DRM_DEBUG_KMS("%s buf_id[%d]\n", 1414 i ? "dst" : "src", tbuf_id[i]); 1415 1416 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1417 if (ret) 1418 DRM_ERROR("failed to put m_node.\n"); 1419 } 1420 break; 1421 case IPP_CMD_WB: 1422 /* clear buf for finding */ 1423 memset(&qbuf, 0x0, sizeof(qbuf)); 1424 qbuf.ops_id = EXYNOS_DRM_OPS_DST; 1425 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; 1426 1427 /* get memory node entry */ 1428 m_node = ipp_find_mem_node(c_node, &qbuf); 1429 if (!m_node) { 1430 DRM_ERROR("empty memory node.\n"); 1431 ret = -ENOMEM; 1432 goto err_mem_unlock; 1433 } 1434 1435 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1436 1437 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1438 if (ret) 1439 DRM_ERROR("failed to put m_node.\n"); 1440 break; 1441 case IPP_CMD_OUTPUT: 1442 /* source memory list */ 1443 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1444 1445 m_node = list_first_entry(head, 1446 struct drm_exynos_ipp_mem_node, list); 1447 1448 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1449 1450 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1451 if (ret) 1452 DRM_ERROR("failed to put m_node.\n"); 1453 break; 1454 default: 1455 DRM_ERROR("invalid operations.\n"); 1456 ret = -EINVAL; 1457 goto err_mem_unlock; 1458 } 1459 mutex_unlock(&c_node->mem_lock); 1460 1461 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1462 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1463 tbuf_id[1], buf_id[1], property->prop_id); 1464 1465 /* 1466 * command node have event list of destination buffer 1467 * If destination buffer enqueue to mem list, 1468 * then we make event and link to event list tail. 1469 * so, we get first event for first enqueued buffer. 1470 */ 1471 e = list_first_entry(&c_node->event_list, 1472 struct drm_exynos_ipp_send_event, base.link); 1473 1474 do_gettimeofday(&now); 1475 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); 1476 e->event.tv_sec = now.tv_sec; 1477 e->event.tv_usec = now.tv_usec; 1478 e->event.prop_id = property->prop_id; 1479 1480 /* set buffer id about source destination */ 1481 for_each_ipp_ops(i) 1482 e->event.buf_id[i] = tbuf_id[i]; 1483 1484 spin_lock_irqsave(&drm_dev->event_lock, flags); 1485 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1486 wake_up_interruptible(&e->base.file_priv->event_wait); 1487 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1488 mutex_unlock(&c_node->event_lock); 1489 1490 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", 1491 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1492 1493 return 0; 1494 1495 err_mem_unlock: 1496 mutex_unlock(&c_node->mem_lock); 1497 err_event_unlock: 1498 mutex_unlock(&c_node->event_lock); 1499 return ret; 1500 } 1501 1502 void ipp_sched_event(struct work_struct *work) 1503 { 1504 struct drm_exynos_ipp_event_work *event_work = 1505 container_of(work, struct drm_exynos_ipp_event_work, work); 1506 struct exynos_drm_ippdrv *ippdrv; 1507 struct drm_exynos_ipp_cmd_node *c_node; 1508 int ret; 1509 1510 if (!event_work) { 1511 DRM_ERROR("failed to get event_work.\n"); 1512 return; 1513 } 1514 1515 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); 1516 1517 ippdrv = event_work->ippdrv; 1518 if (!ippdrv) { 1519 DRM_ERROR("failed to get ipp driver.\n"); 1520 return; 1521 } 1522 1523 c_node = ippdrv->c_node; 1524 if (!c_node) { 1525 DRM_ERROR("failed to get command node.\n"); 1526 return; 1527 } 1528 1529 /* 1530 * IPP supports command thread, event thread synchronization. 1531 * If IPP close immediately from user land, then IPP make 1532 * synchronization with command thread, so make complete event. 1533 * or going out operations. 1534 */ 1535 if (c_node->state != IPP_STATE_START) { 1536 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", 1537 c_node->state, c_node->property.prop_id); 1538 goto err_completion; 1539 } 1540 1541 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1542 if (ret) { 1543 DRM_ERROR("failed to send event.\n"); 1544 goto err_completion; 1545 } 1546 1547 err_completion: 1548 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1549 complete(&c_node->start_complete); 1550 } 1551 1552 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1553 { 1554 struct ipp_context *ctx = get_ipp_context(dev); 1555 struct exynos_drm_ippdrv *ippdrv; 1556 int ret, count = 0; 1557 1558 /* get ipp driver entry */ 1559 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1560 ippdrv->drm_dev = drm_dev; 1561 1562 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); 1563 if (ret < 0) { 1564 DRM_ERROR("failed to create id.\n"); 1565 goto err; 1566 } 1567 ippdrv->prop_list.ipp_id = ret; 1568 1569 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1570 count++, (int)ippdrv, ret); 1571 1572 /* store parent device for node */ 1573 ippdrv->parent_dev = dev; 1574 1575 /* store event work queue and handler */ 1576 ippdrv->event_workq = ctx->event_workq; 1577 ippdrv->sched_event = ipp_sched_event; 1578 INIT_LIST_HEAD(&ippdrv->cmd_list); 1579 mutex_init(&ippdrv->cmd_lock); 1580 1581 if (is_drm_iommu_supported(drm_dev)) { 1582 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1583 if (ret) { 1584 DRM_ERROR("failed to activate iommu\n"); 1585 goto err; 1586 } 1587 } 1588 } 1589 1590 return 0; 1591 1592 err: 1593 /* get ipp driver entry */ 1594 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, 1595 drv_list) { 1596 if (is_drm_iommu_supported(drm_dev)) 1597 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1598 1599 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1600 ippdrv->prop_list.ipp_id); 1601 } 1602 1603 return ret; 1604 } 1605 1606 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1607 { 1608 struct exynos_drm_ippdrv *ippdrv; 1609 struct ipp_context *ctx = get_ipp_context(dev); 1610 1611 /* get ipp driver entry */ 1612 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1613 if (is_drm_iommu_supported(drm_dev)) 1614 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1615 1616 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1617 ippdrv->prop_list.ipp_id); 1618 1619 ippdrv->drm_dev = NULL; 1620 exynos_drm_ippdrv_unregister(ippdrv); 1621 } 1622 } 1623 1624 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, 1625 struct drm_file *file) 1626 { 1627 struct drm_exynos_file_private *file_priv = file->driver_priv; 1628 1629 file_priv->ipp_dev = dev; 1630 1631 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1632 1633 return 0; 1634 } 1635 1636 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1637 struct drm_file *file) 1638 { 1639 struct exynos_drm_ippdrv *ippdrv = NULL; 1640 struct ipp_context *ctx = get_ipp_context(dev); 1641 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1642 int count = 0; 1643 1644 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1645 mutex_lock(&ippdrv->cmd_lock); 1646 list_for_each_entry_safe(c_node, tc_node, 1647 &ippdrv->cmd_list, list) { 1648 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1649 count++, (int)ippdrv); 1650 1651 if (c_node->filp == file) { 1652 /* 1653 * userland goto unnormal state. process killed. 1654 * and close the file. 1655 * so, IPP didn't called stop cmd ctrl. 1656 * so, we are make stop operation in this state. 1657 */ 1658 if (c_node->state == IPP_STATE_START) { 1659 ipp_stop_property(drm_dev, ippdrv, 1660 c_node); 1661 c_node->state = IPP_STATE_STOP; 1662 } 1663 1664 ippdrv->dedicated = false; 1665 ipp_clean_cmd_node(ctx, c_node); 1666 if (list_empty(&ippdrv->cmd_list)) 1667 pm_runtime_put_sync(ippdrv->dev); 1668 } 1669 } 1670 mutex_unlock(&ippdrv->cmd_lock); 1671 } 1672 1673 return; 1674 } 1675 1676 static int ipp_probe(struct platform_device *pdev) 1677 { 1678 struct device *dev = &pdev->dev; 1679 struct ipp_context *ctx; 1680 struct exynos_drm_subdrv *subdrv; 1681 int ret; 1682 1683 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1684 if (!ctx) 1685 return -ENOMEM; 1686 1687 mutex_init(&ctx->ipp_lock); 1688 mutex_init(&ctx->prop_lock); 1689 1690 idr_init(&ctx->ipp_idr); 1691 idr_init(&ctx->prop_idr); 1692 1693 /* 1694 * create single thread for ipp event 1695 * IPP supports event thread for IPP drivers. 1696 * IPP driver send event_work to this thread. 1697 * and IPP event thread send event to user process. 1698 */ 1699 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1700 if (!ctx->event_workq) { 1701 dev_err(dev, "failed to create event workqueue\n"); 1702 return -EINVAL; 1703 } 1704 1705 /* 1706 * create single thread for ipp command 1707 * IPP supports command thread for user process. 1708 * user process make command node using set property ioctl. 1709 * and make start_work and send this work to command thread. 1710 * and then this command thread start property. 1711 */ 1712 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); 1713 if (!ctx->cmd_workq) { 1714 dev_err(dev, "failed to create cmd workqueue\n"); 1715 ret = -EINVAL; 1716 goto err_event_workq; 1717 } 1718 1719 /* set sub driver informations */ 1720 subdrv = &ctx->subdrv; 1721 subdrv->dev = dev; 1722 subdrv->probe = ipp_subdrv_probe; 1723 subdrv->remove = ipp_subdrv_remove; 1724 subdrv->open = ipp_subdrv_open; 1725 subdrv->close = ipp_subdrv_close; 1726 1727 platform_set_drvdata(pdev, ctx); 1728 1729 ret = exynos_drm_subdrv_register(subdrv); 1730 if (ret < 0) { 1731 DRM_ERROR("failed to register drm ipp device.\n"); 1732 goto err_cmd_workq; 1733 } 1734 1735 dev_info(dev, "drm ipp registered successfully.\n"); 1736 1737 return 0; 1738 1739 err_cmd_workq: 1740 destroy_workqueue(ctx->cmd_workq); 1741 err_event_workq: 1742 destroy_workqueue(ctx->event_workq); 1743 return ret; 1744 } 1745 1746 static int ipp_remove(struct platform_device *pdev) 1747 { 1748 struct ipp_context *ctx = platform_get_drvdata(pdev); 1749 1750 /* unregister sub driver */ 1751 exynos_drm_subdrv_unregister(&ctx->subdrv); 1752 1753 /* remove,destroy ipp idr */ 1754 idr_destroy(&ctx->ipp_idr); 1755 idr_destroy(&ctx->prop_idr); 1756 1757 mutex_destroy(&ctx->ipp_lock); 1758 mutex_destroy(&ctx->prop_lock); 1759 1760 /* destroy command, event work queue */ 1761 destroy_workqueue(ctx->cmd_workq); 1762 destroy_workqueue(ctx->event_workq); 1763 1764 return 0; 1765 } 1766 1767 struct platform_driver ipp_driver = { 1768 .probe = ipp_probe, 1769 .remove = ipp_remove, 1770 .driver = { 1771 .name = "exynos-drm-ipp", 1772 .owner = THIS_MODULE, 1773 }, 1774 }; 1775 1776