1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * Eunchul Kim <chulspro.kim@samsung.com> 5 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Sangmin Lee <lsmin.lee@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 */ 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 #include <linux/clk.h> 18 #include <linux/pm_runtime.h> 19 20 #include <drm/drmP.h> 21 #include <drm/exynos_drm.h> 22 #include "exynos_drm_drv.h" 23 #include "exynos_drm_gem.h" 24 #include "exynos_drm_ipp.h" 25 #include "exynos_drm_iommu.h" 26 27 /* 28 * IPP stands for Image Post Processing and 29 * supports image scaler/rotator and input/output DMA operations. 30 * using FIMC, GSC, Rotator, so on. 31 * IPP is integration device driver of same attribute h/w 32 */ 33 34 /* 35 * TODO 36 * 1. expand command control id. 37 * 2. integrate property and config. 38 * 3. removed send_event id check routine. 39 * 4. compare send_event id if needed. 40 * 5. free subdrv_remove notifier callback list if needed. 41 * 6. need to check subdrv_open about multi-open. 42 * 7. need to power_on implement power and sysmmu ctrl. 43 */ 44 45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 47 48 /* platform device pointer for ipp device. */ 49 static struct platform_device *exynos_drm_ipp_pdev; 50 51 /* 52 * A structure of event. 53 * 54 * @base: base of event. 55 * @event: ipp event. 56 */ 57 struct drm_exynos_ipp_send_event { 58 struct drm_pending_event base; 59 struct drm_exynos_ipp_event event; 60 }; 61 62 /* 63 * A structure of memory node. 64 * 65 * @list: list head to memory queue information. 66 * @ops_id: id of operations. 67 * @prop_id: id of property. 68 * @buf_id: id of buffer. 69 * @buf_info: gem objects and dma address, size. 70 * @filp: a pointer to drm_file. 71 */ 72 struct drm_exynos_ipp_mem_node { 73 struct list_head list; 74 enum drm_exynos_ops_id ops_id; 75 u32 prop_id; 76 u32 buf_id; 77 struct drm_exynos_ipp_buf_info buf_info; 78 struct drm_file *filp; 79 }; 80 81 /* 82 * A structure of ipp context. 83 * 84 * @subdrv: prepare initialization using subdrv. 85 * @ipp_lock: lock for synchronization of access to ipp_idr. 86 * @prop_lock: lock for synchronization of access to prop_idr. 87 * @ipp_idr: ipp driver idr. 88 * @prop_idr: property idr. 89 * @event_workq: event work queue. 90 * @cmd_workq: command work queue. 91 */ 92 struct ipp_context { 93 struct exynos_drm_subdrv subdrv; 94 struct mutex ipp_lock; 95 struct mutex prop_lock; 96 struct idr ipp_idr; 97 struct idr prop_idr; 98 struct workqueue_struct *event_workq; 99 struct workqueue_struct *cmd_workq; 100 }; 101 102 static LIST_HEAD(exynos_drm_ippdrv_list); 103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 105 106 int exynos_platform_device_ipp_register(void) 107 { 108 struct platform_device *pdev; 109 110 if (exynos_drm_ipp_pdev) 111 return -EEXIST; 112 113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); 114 if (IS_ERR(pdev)) 115 return PTR_ERR(pdev); 116 117 exynos_drm_ipp_pdev = pdev; 118 119 return 0; 120 } 121 122 void exynos_platform_device_ipp_unregister(void) 123 { 124 if (exynos_drm_ipp_pdev) { 125 platform_device_unregister(exynos_drm_ipp_pdev); 126 exynos_drm_ipp_pdev = NULL; 127 } 128 } 129 130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 131 { 132 mutex_lock(&exynos_drm_ippdrv_lock); 133 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 134 mutex_unlock(&exynos_drm_ippdrv_lock); 135 136 return 0; 137 } 138 139 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 140 { 141 mutex_lock(&exynos_drm_ippdrv_lock); 142 list_del(&ippdrv->drv_list); 143 mutex_unlock(&exynos_drm_ippdrv_lock); 144 145 return 0; 146 } 147 148 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) 149 { 150 int ret; 151 152 mutex_lock(lock); 153 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 154 mutex_unlock(lock); 155 156 return ret; 157 } 158 159 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) 160 { 161 mutex_lock(lock); 162 idr_remove(id_idr, id); 163 mutex_unlock(lock); 164 } 165 166 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 167 { 168 void *obj; 169 170 mutex_lock(lock); 171 obj = idr_find(id_idr, id); 172 mutex_unlock(lock); 173 174 return obj; 175 } 176 177 static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, 178 struct drm_exynos_ipp_property *property) 179 { 180 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && 181 !pm_runtime_suspended(ippdrv->dev))) 182 return -EBUSY; 183 184 if (ippdrv->check_property && 185 ippdrv->check_property(ippdrv->dev, property)) 186 return -EINVAL; 187 188 return 0; 189 } 190 191 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 192 struct drm_exynos_ipp_property *property) 193 { 194 struct exynos_drm_ippdrv *ippdrv; 195 u32 ipp_id = property->ipp_id; 196 int ret; 197 198 if (ipp_id) { 199 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); 200 if (!ippdrv) { 201 DRM_DEBUG("ipp%d driver not found\n", ipp_id); 202 return ERR_PTR(-ENODEV); 203 } 204 205 ret = ipp_check_driver(ippdrv, property); 206 if (ret < 0) { 207 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); 208 return ERR_PTR(ret); 209 } 210 211 return ippdrv; 212 } else { 213 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 214 ret = ipp_check_driver(ippdrv, property); 215 if (ret == 0) 216 return ippdrv; 217 } 218 219 DRM_DEBUG("cannot find driver suitable for given property.\n"); 220 } 221 222 return ERR_PTR(-ENODEV); 223 } 224 225 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) 226 { 227 struct exynos_drm_ippdrv *ippdrv; 228 struct drm_exynos_ipp_cmd_node *c_node; 229 int count = 0; 230 231 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 232 233 /* 234 * This case is search ipp driver by prop_id handle. 235 * sometimes, ipp subsystem find driver by prop_id. 236 * e.g PAUSE state, queue buf, command control. 237 */ 238 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 239 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 240 241 mutex_lock(&ippdrv->cmd_lock); 242 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 243 if (c_node->property.prop_id == prop_id) { 244 mutex_unlock(&ippdrv->cmd_lock); 245 return ippdrv; 246 } 247 } 248 mutex_unlock(&ippdrv->cmd_lock); 249 } 250 251 return ERR_PTR(-ENODEV); 252 } 253 254 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, 255 struct drm_file *file) 256 { 257 struct drm_exynos_file_private *file_priv = file->driver_priv; 258 struct device *dev = file_priv->ipp_dev; 259 struct ipp_context *ctx = get_ipp_context(dev); 260 struct drm_exynos_ipp_prop_list *prop_list = data; 261 struct exynos_drm_ippdrv *ippdrv; 262 int count = 0; 263 264 if (!ctx) { 265 DRM_ERROR("invalid context.\n"); 266 return -EINVAL; 267 } 268 269 if (!prop_list) { 270 DRM_ERROR("invalid property parameter.\n"); 271 return -EINVAL; 272 } 273 274 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); 275 276 if (!prop_list->ipp_id) { 277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 278 count++; 279 280 /* 281 * Supports ippdrv list count for user application. 282 * First step user application getting ippdrv count. 283 * and second step getting ippdrv capability using ipp_id. 284 */ 285 prop_list->count = count; 286 } else { 287 /* 288 * Getting ippdrv capability by ipp_id. 289 * some device not supported wb, output interface. 290 * so, user application detect correct ipp driver 291 * using this ioctl. 292 */ 293 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 294 prop_list->ipp_id); 295 if (!ippdrv) { 296 DRM_ERROR("not found ipp%d driver.\n", 297 prop_list->ipp_id); 298 return -ENODEV; 299 } 300 301 *prop_list = ippdrv->prop_list; 302 } 303 304 return 0; 305 } 306 307 static void ipp_print_property(struct drm_exynos_ipp_property *property, 308 int idx) 309 { 310 struct drm_exynos_ipp_config *config = &property->config[idx]; 311 struct drm_exynos_pos *pos = &config->pos; 312 struct drm_exynos_sz *sz = &config->sz; 313 314 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", 315 property->prop_id, idx ? "dst" : "src", config->fmt); 316 317 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 318 pos->x, pos->y, pos->w, pos->h, 319 sz->hsize, sz->vsize, config->flip, config->degree); 320 } 321 322 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) 323 { 324 struct exynos_drm_ippdrv *ippdrv; 325 struct drm_exynos_ipp_cmd_node *c_node; 326 u32 prop_id = property->prop_id; 327 328 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 329 330 ippdrv = ipp_find_drv_by_handle(prop_id); 331 if (IS_ERR(ippdrv)) { 332 DRM_ERROR("failed to get ipp driver.\n"); 333 return -EINVAL; 334 } 335 336 /* 337 * Find command node using command list in ippdrv. 338 * when we find this command no using prop_id. 339 * return property information set in this command node. 340 */ 341 mutex_lock(&ippdrv->cmd_lock); 342 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 343 if ((c_node->property.prop_id == prop_id) && 344 (c_node->state == IPP_STATE_STOP)) { 345 mutex_unlock(&ippdrv->cmd_lock); 346 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n", 347 property->cmd, (int)ippdrv); 348 349 c_node->property = *property; 350 return 0; 351 } 352 } 353 mutex_unlock(&ippdrv->cmd_lock); 354 355 DRM_ERROR("failed to search property.\n"); 356 357 return -EINVAL; 358 } 359 360 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 361 { 362 struct drm_exynos_ipp_cmd_work *cmd_work; 363 364 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 365 if (!cmd_work) 366 return ERR_PTR(-ENOMEM); 367 368 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 369 370 return cmd_work; 371 } 372 373 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) 374 { 375 struct drm_exynos_ipp_event_work *event_work; 376 377 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 378 if (!event_work) 379 return ERR_PTR(-ENOMEM); 380 381 INIT_WORK(&event_work->work, ipp_sched_event); 382 383 return event_work; 384 } 385 386 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, 387 struct drm_file *file) 388 { 389 struct drm_exynos_file_private *file_priv = file->driver_priv; 390 struct device *dev = file_priv->ipp_dev; 391 struct ipp_context *ctx = get_ipp_context(dev); 392 struct drm_exynos_ipp_property *property = data; 393 struct exynos_drm_ippdrv *ippdrv; 394 struct drm_exynos_ipp_cmd_node *c_node; 395 int ret, i; 396 397 if (!ctx) { 398 DRM_ERROR("invalid context.\n"); 399 return -EINVAL; 400 } 401 402 if (!property) { 403 DRM_ERROR("invalid property parameter.\n"); 404 return -EINVAL; 405 } 406 407 /* 408 * This is log print for user application property. 409 * user application set various property. 410 */ 411 for_each_ipp_ops(i) 412 ipp_print_property(property, i); 413 414 /* 415 * set property ioctl generated new prop_id. 416 * but in this case already asigned prop_id using old set property. 417 * e.g PAUSE state. this case supports find current prop_id and use it 418 * instead of allocation. 419 */ 420 if (property->prop_id) { 421 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 422 return ipp_find_and_set_property(property); 423 } 424 425 /* find ipp driver using ipp id */ 426 ippdrv = ipp_find_driver(ctx, property); 427 if (IS_ERR(ippdrv)) { 428 DRM_ERROR("failed to get ipp driver.\n"); 429 return -EINVAL; 430 } 431 432 /* allocate command node */ 433 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 434 if (!c_node) 435 return -ENOMEM; 436 437 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); 438 if (ret < 0) { 439 DRM_ERROR("failed to create id.\n"); 440 goto err_clear; 441 } 442 property->prop_id = ret; 443 444 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 445 property->prop_id, property->cmd, (int)ippdrv); 446 447 /* stored property information and ippdrv in private data */ 448 c_node->dev = dev; 449 c_node->property = *property; 450 c_node->state = IPP_STATE_IDLE; 451 452 c_node->start_work = ipp_create_cmd_work(); 453 if (IS_ERR(c_node->start_work)) { 454 DRM_ERROR("failed to create start work.\n"); 455 goto err_remove_id; 456 } 457 458 c_node->stop_work = ipp_create_cmd_work(); 459 if (IS_ERR(c_node->stop_work)) { 460 DRM_ERROR("failed to create stop work.\n"); 461 goto err_free_start; 462 } 463 464 c_node->event_work = ipp_create_event_work(); 465 if (IS_ERR(c_node->event_work)) { 466 DRM_ERROR("failed to create event work.\n"); 467 goto err_free_stop; 468 } 469 470 mutex_init(&c_node->lock); 471 mutex_init(&c_node->mem_lock); 472 mutex_init(&c_node->event_lock); 473 474 init_completion(&c_node->start_complete); 475 init_completion(&c_node->stop_complete); 476 477 for_each_ipp_ops(i) 478 INIT_LIST_HEAD(&c_node->mem_list[i]); 479 480 INIT_LIST_HEAD(&c_node->event_list); 481 mutex_lock(&ippdrv->cmd_lock); 482 list_add_tail(&c_node->list, &ippdrv->cmd_list); 483 mutex_unlock(&ippdrv->cmd_lock); 484 485 /* make dedicated state without m2m */ 486 if (!ipp_is_m2m_cmd(property->cmd)) 487 ippdrv->dedicated = true; 488 489 return 0; 490 491 err_free_stop: 492 kfree(c_node->stop_work); 493 err_free_start: 494 kfree(c_node->start_work); 495 err_remove_id: 496 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); 497 err_clear: 498 kfree(c_node); 499 return ret; 500 } 501 502 static void ipp_clean_cmd_node(struct ipp_context *ctx, 503 struct drm_exynos_ipp_cmd_node *c_node) 504 { 505 /* delete list */ 506 list_del(&c_node->list); 507 508 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, 509 c_node->property.prop_id); 510 511 /* destroy mutex */ 512 mutex_destroy(&c_node->lock); 513 mutex_destroy(&c_node->mem_lock); 514 mutex_destroy(&c_node->event_lock); 515 516 /* free command node */ 517 kfree(c_node->start_work); 518 kfree(c_node->stop_work); 519 kfree(c_node->event_work); 520 kfree(c_node); 521 } 522 523 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 524 { 525 switch (c_node->property.cmd) { 526 case IPP_CMD_WB: 527 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 528 case IPP_CMD_OUTPUT: 529 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); 530 case IPP_CMD_M2M: 531 default: 532 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && 533 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 534 } 535 } 536 537 static struct drm_exynos_ipp_mem_node 538 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, 539 struct drm_exynos_ipp_queue_buf *qbuf) 540 { 541 struct drm_exynos_ipp_mem_node *m_node; 542 struct list_head *head; 543 int count = 0; 544 545 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); 546 547 /* source/destination memory list */ 548 head = &c_node->mem_list[qbuf->ops_id]; 549 550 /* find memory node from memory list */ 551 list_for_each_entry(m_node, head, list) { 552 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 553 554 /* compare buffer id */ 555 if (m_node->buf_id == qbuf->buf_id) 556 return m_node; 557 } 558 559 return NULL; 560 } 561 562 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, 563 struct drm_exynos_ipp_cmd_node *c_node, 564 struct drm_exynos_ipp_mem_node *m_node) 565 { 566 struct exynos_drm_ipp_ops *ops = NULL; 567 int ret = 0; 568 569 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 570 571 if (!m_node) { 572 DRM_ERROR("invalid queue node.\n"); 573 return -EFAULT; 574 } 575 576 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 577 578 /* get operations callback */ 579 ops = ippdrv->ops[m_node->ops_id]; 580 if (!ops) { 581 DRM_ERROR("not support ops.\n"); 582 return -EFAULT; 583 } 584 585 /* set address and enable irq */ 586 if (ops->set_addr) { 587 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, 588 m_node->buf_id, IPP_BUF_ENQUEUE); 589 if (ret) { 590 DRM_ERROR("failed to set addr.\n"); 591 return ret; 592 } 593 } 594 595 return ret; 596 } 597 598 static struct drm_exynos_ipp_mem_node 599 *ipp_get_mem_node(struct drm_device *drm_dev, 600 struct drm_file *file, 601 struct drm_exynos_ipp_cmd_node *c_node, 602 struct drm_exynos_ipp_queue_buf *qbuf) 603 { 604 struct drm_exynos_ipp_mem_node *m_node; 605 struct drm_exynos_ipp_buf_info *buf_info; 606 int i; 607 608 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 609 if (!m_node) 610 return ERR_PTR(-ENOMEM); 611 612 buf_info = &m_node->buf_info; 613 614 /* operations, buffer id */ 615 m_node->ops_id = qbuf->ops_id; 616 m_node->prop_id = qbuf->prop_id; 617 m_node->buf_id = qbuf->buf_id; 618 619 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 620 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 621 622 for_each_ipp_planar(i) { 623 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); 624 625 /* get dma address by handle */ 626 if (qbuf->handle[i]) { 627 dma_addr_t *addr; 628 629 addr = exynos_drm_gem_get_dma_addr(drm_dev, 630 qbuf->handle[i], file); 631 if (IS_ERR(addr)) { 632 DRM_ERROR("failed to get addr.\n"); 633 goto err_clear; 634 } 635 636 buf_info->handles[i] = qbuf->handle[i]; 637 buf_info->base[i] = *addr; 638 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 639 buf_info->base[i], buf_info->handles[i]); 640 } 641 } 642 643 m_node->filp = file; 644 mutex_lock(&c_node->mem_lock); 645 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 646 mutex_unlock(&c_node->mem_lock); 647 648 return m_node; 649 650 err_clear: 651 kfree(m_node); 652 return ERR_PTR(-EFAULT); 653 } 654 655 static int ipp_put_mem_node(struct drm_device *drm_dev, 656 struct drm_exynos_ipp_cmd_node *c_node, 657 struct drm_exynos_ipp_mem_node *m_node) 658 { 659 int i; 660 661 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 662 663 if (!m_node) { 664 DRM_ERROR("invalid dequeue node.\n"); 665 return -EFAULT; 666 } 667 668 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 669 670 /* put gem buffer */ 671 for_each_ipp_planar(i) { 672 unsigned long handle = m_node->buf_info.handles[i]; 673 if (handle) 674 exynos_drm_gem_put_dma_addr(drm_dev, handle, 675 m_node->filp); 676 } 677 678 /* delete list in queue */ 679 list_del(&m_node->list); 680 kfree(m_node); 681 682 return 0; 683 } 684 685 static void ipp_free_event(struct drm_pending_event *event) 686 { 687 kfree(event); 688 } 689 690 static int ipp_get_event(struct drm_device *drm_dev, 691 struct drm_file *file, 692 struct drm_exynos_ipp_cmd_node *c_node, 693 struct drm_exynos_ipp_queue_buf *qbuf) 694 { 695 struct drm_exynos_ipp_send_event *e; 696 unsigned long flags; 697 698 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 699 700 e = kzalloc(sizeof(*e), GFP_KERNEL); 701 if (!e) { 702 spin_lock_irqsave(&drm_dev->event_lock, flags); 703 file->event_space += sizeof(e->event); 704 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 705 return -ENOMEM; 706 } 707 708 /* make event */ 709 e->event.base.type = DRM_EXYNOS_IPP_EVENT; 710 e->event.base.length = sizeof(e->event); 711 e->event.user_data = qbuf->user_data; 712 e->event.prop_id = qbuf->prop_id; 713 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 714 e->base.event = &e->event.base; 715 e->base.file_priv = file; 716 e->base.destroy = ipp_free_event; 717 mutex_lock(&c_node->event_lock); 718 list_add_tail(&e->base.link, &c_node->event_list); 719 mutex_unlock(&c_node->event_lock); 720 721 return 0; 722 } 723 724 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, 725 struct drm_exynos_ipp_queue_buf *qbuf) 726 { 727 struct drm_exynos_ipp_send_event *e, *te; 728 int count = 0; 729 730 mutex_lock(&c_node->event_lock); 731 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 732 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 733 734 /* 735 * qbuf == NULL condition means all event deletion. 736 * stop operations want to delete all event list. 737 * another case delete only same buf id. 738 */ 739 if (!qbuf) { 740 /* delete list */ 741 list_del(&e->base.link); 742 kfree(e); 743 } 744 745 /* compare buffer id */ 746 if (qbuf && (qbuf->buf_id == 747 e->event.buf_id[EXYNOS_DRM_OPS_DST])) { 748 /* delete list */ 749 list_del(&e->base.link); 750 kfree(e); 751 goto out_unlock; 752 } 753 } 754 755 out_unlock: 756 mutex_unlock(&c_node->event_lock); 757 return; 758 } 759 760 static void ipp_handle_cmd_work(struct device *dev, 761 struct exynos_drm_ippdrv *ippdrv, 762 struct drm_exynos_ipp_cmd_work *cmd_work, 763 struct drm_exynos_ipp_cmd_node *c_node) 764 { 765 struct ipp_context *ctx = get_ipp_context(dev); 766 767 cmd_work->ippdrv = ippdrv; 768 cmd_work->c_node = c_node; 769 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); 770 } 771 772 static int ipp_queue_buf_with_run(struct device *dev, 773 struct drm_exynos_ipp_cmd_node *c_node, 774 struct drm_exynos_ipp_mem_node *m_node, 775 struct drm_exynos_ipp_queue_buf *qbuf) 776 { 777 struct exynos_drm_ippdrv *ippdrv; 778 struct drm_exynos_ipp_property *property; 779 struct exynos_drm_ipp_ops *ops; 780 int ret; 781 782 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 783 if (IS_ERR(ippdrv)) { 784 DRM_ERROR("failed to get ipp driver.\n"); 785 return -EFAULT; 786 } 787 788 ops = ippdrv->ops[qbuf->ops_id]; 789 if (!ops) { 790 DRM_ERROR("failed to get ops.\n"); 791 return -EFAULT; 792 } 793 794 property = &c_node->property; 795 796 if (c_node->state != IPP_STATE_START) { 797 DRM_DEBUG_KMS("bypass for invalid state.\n"); 798 return 0; 799 } 800 801 mutex_lock(&c_node->mem_lock); 802 if (!ipp_check_mem_list(c_node)) { 803 mutex_unlock(&c_node->mem_lock); 804 DRM_DEBUG_KMS("empty memory.\n"); 805 return 0; 806 } 807 808 /* 809 * If set destination buffer and enabled clock, 810 * then m2m operations need start operations at queue_buf 811 */ 812 if (ipp_is_m2m_cmd(property->cmd)) { 813 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; 814 815 cmd_work->ctrl = IPP_CTRL_PLAY; 816 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 817 } else { 818 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 819 if (ret) { 820 mutex_unlock(&c_node->mem_lock); 821 DRM_ERROR("failed to set m node.\n"); 822 return ret; 823 } 824 } 825 mutex_unlock(&c_node->mem_lock); 826 827 return 0; 828 } 829 830 static void ipp_clean_queue_buf(struct drm_device *drm_dev, 831 struct drm_exynos_ipp_cmd_node *c_node, 832 struct drm_exynos_ipp_queue_buf *qbuf) 833 { 834 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 835 836 /* delete list */ 837 mutex_lock(&c_node->mem_lock); 838 list_for_each_entry_safe(m_node, tm_node, 839 &c_node->mem_list[qbuf->ops_id], list) { 840 if (m_node->buf_id == qbuf->buf_id && 841 m_node->ops_id == qbuf->ops_id) 842 ipp_put_mem_node(drm_dev, c_node, m_node); 843 } 844 mutex_unlock(&c_node->mem_lock); 845 } 846 847 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 848 struct drm_file *file) 849 { 850 struct drm_exynos_file_private *file_priv = file->driver_priv; 851 struct device *dev = file_priv->ipp_dev; 852 struct ipp_context *ctx = get_ipp_context(dev); 853 struct drm_exynos_ipp_queue_buf *qbuf = data; 854 struct drm_exynos_ipp_cmd_node *c_node; 855 struct drm_exynos_ipp_mem_node *m_node; 856 int ret; 857 858 if (!qbuf) { 859 DRM_ERROR("invalid buf parameter.\n"); 860 return -EINVAL; 861 } 862 863 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { 864 DRM_ERROR("invalid ops parameter.\n"); 865 return -EINVAL; 866 } 867 868 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 869 qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 870 qbuf->buf_id, qbuf->buf_type); 871 872 /* find command node */ 873 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 874 qbuf->prop_id); 875 if (!c_node) { 876 DRM_ERROR("failed to get command node.\n"); 877 return -ENODEV; 878 } 879 880 /* buffer control */ 881 switch (qbuf->buf_type) { 882 case IPP_BUF_ENQUEUE: 883 /* get memory node */ 884 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); 885 if (IS_ERR(m_node)) { 886 DRM_ERROR("failed to get m_node.\n"); 887 return PTR_ERR(m_node); 888 } 889 890 /* 891 * first step get event for destination buffer. 892 * and second step when M2M case run with destination buffer 893 * if needed. 894 */ 895 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 896 /* get event for destination buffer */ 897 ret = ipp_get_event(drm_dev, file, c_node, qbuf); 898 if (ret) { 899 DRM_ERROR("failed to get event.\n"); 900 goto err_clean_node; 901 } 902 903 /* 904 * M2M case run play control for streaming feature. 905 * other case set address and waiting. 906 */ 907 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); 908 if (ret) { 909 DRM_ERROR("failed to run command.\n"); 910 goto err_clean_node; 911 } 912 } 913 break; 914 case IPP_BUF_DEQUEUE: 915 mutex_lock(&c_node->lock); 916 917 /* put event for destination buffer */ 918 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 919 ipp_put_event(c_node, qbuf); 920 921 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 922 923 mutex_unlock(&c_node->lock); 924 break; 925 default: 926 DRM_ERROR("invalid buffer control.\n"); 927 return -EINVAL; 928 } 929 930 return 0; 931 932 err_clean_node: 933 DRM_ERROR("clean memory nodes.\n"); 934 935 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 936 return ret; 937 } 938 939 static bool exynos_drm_ipp_check_valid(struct device *dev, 940 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 941 { 942 if (ctrl != IPP_CTRL_PLAY) { 943 if (pm_runtime_suspended(dev)) { 944 DRM_ERROR("pm:runtime_suspended.\n"); 945 goto err_status; 946 } 947 } 948 949 switch (ctrl) { 950 case IPP_CTRL_PLAY: 951 if (state != IPP_STATE_IDLE) 952 goto err_status; 953 break; 954 case IPP_CTRL_STOP: 955 if (state == IPP_STATE_STOP) 956 goto err_status; 957 break; 958 case IPP_CTRL_PAUSE: 959 if (state != IPP_STATE_START) 960 goto err_status; 961 break; 962 case IPP_CTRL_RESUME: 963 if (state != IPP_STATE_STOP) 964 goto err_status; 965 break; 966 default: 967 DRM_ERROR("invalid state.\n"); 968 goto err_status; 969 } 970 971 return true; 972 973 err_status: 974 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); 975 return false; 976 } 977 978 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, 979 struct drm_file *file) 980 { 981 struct drm_exynos_file_private *file_priv = file->driver_priv; 982 struct exynos_drm_ippdrv *ippdrv = NULL; 983 struct device *dev = file_priv->ipp_dev; 984 struct ipp_context *ctx = get_ipp_context(dev); 985 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 986 struct drm_exynos_ipp_cmd_work *cmd_work; 987 struct drm_exynos_ipp_cmd_node *c_node; 988 989 if (!ctx) { 990 DRM_ERROR("invalid context.\n"); 991 return -EINVAL; 992 } 993 994 if (!cmd_ctrl) { 995 DRM_ERROR("invalid control parameter.\n"); 996 return -EINVAL; 997 } 998 999 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", 1000 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1001 1002 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1003 if (IS_ERR(ippdrv)) { 1004 DRM_ERROR("failed to get ipp driver.\n"); 1005 return PTR_ERR(ippdrv); 1006 } 1007 1008 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1009 cmd_ctrl->prop_id); 1010 if (!c_node) { 1011 DRM_ERROR("invalid command node list.\n"); 1012 return -ENODEV; 1013 } 1014 1015 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1016 c_node->state)) { 1017 DRM_ERROR("invalid state.\n"); 1018 return -EINVAL; 1019 } 1020 1021 switch (cmd_ctrl->ctrl) { 1022 case IPP_CTRL_PLAY: 1023 if (pm_runtime_suspended(ippdrv->dev)) 1024 pm_runtime_get_sync(ippdrv->dev); 1025 1026 c_node->state = IPP_STATE_START; 1027 1028 cmd_work = c_node->start_work; 1029 cmd_work->ctrl = cmd_ctrl->ctrl; 1030 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1031 break; 1032 case IPP_CTRL_STOP: 1033 cmd_work = c_node->stop_work; 1034 cmd_work->ctrl = cmd_ctrl->ctrl; 1035 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1036 1037 if (!wait_for_completion_timeout(&c_node->stop_complete, 1038 msecs_to_jiffies(300))) { 1039 DRM_ERROR("timeout stop:prop_id[%d]\n", 1040 c_node->property.prop_id); 1041 } 1042 1043 c_node->state = IPP_STATE_STOP; 1044 ippdrv->dedicated = false; 1045 mutex_lock(&ippdrv->cmd_lock); 1046 ipp_clean_cmd_node(ctx, c_node); 1047 1048 if (list_empty(&ippdrv->cmd_list)) 1049 pm_runtime_put_sync(ippdrv->dev); 1050 mutex_unlock(&ippdrv->cmd_lock); 1051 break; 1052 case IPP_CTRL_PAUSE: 1053 cmd_work = c_node->stop_work; 1054 cmd_work->ctrl = cmd_ctrl->ctrl; 1055 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1056 1057 if (!wait_for_completion_timeout(&c_node->stop_complete, 1058 msecs_to_jiffies(200))) { 1059 DRM_ERROR("timeout stop:prop_id[%d]\n", 1060 c_node->property.prop_id); 1061 } 1062 1063 c_node->state = IPP_STATE_STOP; 1064 break; 1065 case IPP_CTRL_RESUME: 1066 c_node->state = IPP_STATE_START; 1067 cmd_work = c_node->start_work; 1068 cmd_work->ctrl = cmd_ctrl->ctrl; 1069 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1070 break; 1071 default: 1072 DRM_ERROR("could not support this state currently.\n"); 1073 return -EINVAL; 1074 } 1075 1076 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", 1077 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1078 1079 return 0; 1080 } 1081 1082 int exynos_drm_ippnb_register(struct notifier_block *nb) 1083 { 1084 return blocking_notifier_chain_register( 1085 &exynos_drm_ippnb_list, nb); 1086 } 1087 1088 int exynos_drm_ippnb_unregister(struct notifier_block *nb) 1089 { 1090 return blocking_notifier_chain_unregister( 1091 &exynos_drm_ippnb_list, nb); 1092 } 1093 1094 int exynos_drm_ippnb_send_event(unsigned long val, void *v) 1095 { 1096 return blocking_notifier_call_chain( 1097 &exynos_drm_ippnb_list, val, v); 1098 } 1099 1100 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, 1101 struct drm_exynos_ipp_property *property) 1102 { 1103 struct exynos_drm_ipp_ops *ops = NULL; 1104 bool swap = false; 1105 int ret, i; 1106 1107 if (!property) { 1108 DRM_ERROR("invalid property parameter.\n"); 1109 return -EINVAL; 1110 } 1111 1112 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1113 1114 /* reset h/w block */ 1115 if (ippdrv->reset && 1116 ippdrv->reset(ippdrv->dev)) { 1117 return -EINVAL; 1118 } 1119 1120 /* set source,destination operations */ 1121 for_each_ipp_ops(i) { 1122 struct drm_exynos_ipp_config *config = 1123 &property->config[i]; 1124 1125 ops = ippdrv->ops[i]; 1126 if (!ops || !config) { 1127 DRM_ERROR("not support ops and config.\n"); 1128 return -EINVAL; 1129 } 1130 1131 /* set format */ 1132 if (ops->set_fmt) { 1133 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1134 if (ret) 1135 return ret; 1136 } 1137 1138 /* set transform for rotation, flip */ 1139 if (ops->set_transf) { 1140 ret = ops->set_transf(ippdrv->dev, config->degree, 1141 config->flip, &swap); 1142 if (ret) 1143 return ret; 1144 } 1145 1146 /* set size */ 1147 if (ops->set_size) { 1148 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1149 &config->sz); 1150 if (ret) 1151 return ret; 1152 } 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, 1159 struct drm_exynos_ipp_cmd_node *c_node) 1160 { 1161 struct drm_exynos_ipp_mem_node *m_node; 1162 struct drm_exynos_ipp_property *property = &c_node->property; 1163 struct list_head *head; 1164 int ret, i; 1165 1166 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1167 1168 /* store command info in ippdrv */ 1169 ippdrv->c_node = c_node; 1170 1171 mutex_lock(&c_node->mem_lock); 1172 if (!ipp_check_mem_list(c_node)) { 1173 DRM_DEBUG_KMS("empty memory.\n"); 1174 ret = -ENOMEM; 1175 goto err_unlock; 1176 } 1177 1178 /* set current property in ippdrv */ 1179 ret = ipp_set_property(ippdrv, property); 1180 if (ret) { 1181 DRM_ERROR("failed to set property.\n"); 1182 ippdrv->c_node = NULL; 1183 goto err_unlock; 1184 } 1185 1186 /* check command */ 1187 switch (property->cmd) { 1188 case IPP_CMD_M2M: 1189 for_each_ipp_ops(i) { 1190 /* source/destination memory list */ 1191 head = &c_node->mem_list[i]; 1192 1193 m_node = list_first_entry(head, 1194 struct drm_exynos_ipp_mem_node, list); 1195 1196 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1197 1198 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1199 if (ret) { 1200 DRM_ERROR("failed to set m node.\n"); 1201 goto err_unlock; 1202 } 1203 } 1204 break; 1205 case IPP_CMD_WB: 1206 /* destination memory list */ 1207 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1208 1209 list_for_each_entry(m_node, head, list) { 1210 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1211 if (ret) { 1212 DRM_ERROR("failed to set m node.\n"); 1213 goto err_unlock; 1214 } 1215 } 1216 break; 1217 case IPP_CMD_OUTPUT: 1218 /* source memory list */ 1219 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1220 1221 list_for_each_entry(m_node, head, list) { 1222 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1223 if (ret) { 1224 DRM_ERROR("failed to set m node.\n"); 1225 goto err_unlock; 1226 } 1227 } 1228 break; 1229 default: 1230 DRM_ERROR("invalid operations.\n"); 1231 ret = -EINVAL; 1232 goto err_unlock; 1233 } 1234 mutex_unlock(&c_node->mem_lock); 1235 1236 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); 1237 1238 /* start operations */ 1239 if (ippdrv->start) { 1240 ret = ippdrv->start(ippdrv->dev, property->cmd); 1241 if (ret) { 1242 DRM_ERROR("failed to start ops.\n"); 1243 ippdrv->c_node = NULL; 1244 return ret; 1245 } 1246 } 1247 1248 return 0; 1249 1250 err_unlock: 1251 mutex_unlock(&c_node->mem_lock); 1252 ippdrv->c_node = NULL; 1253 return ret; 1254 } 1255 1256 static int ipp_stop_property(struct drm_device *drm_dev, 1257 struct exynos_drm_ippdrv *ippdrv, 1258 struct drm_exynos_ipp_cmd_node *c_node) 1259 { 1260 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 1261 struct drm_exynos_ipp_property *property = &c_node->property; 1262 struct list_head *head; 1263 int ret = 0, i; 1264 1265 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1266 1267 /* put event */ 1268 ipp_put_event(c_node, NULL); 1269 1270 mutex_lock(&c_node->mem_lock); 1271 1272 /* check command */ 1273 switch (property->cmd) { 1274 case IPP_CMD_M2M: 1275 for_each_ipp_ops(i) { 1276 /* source/destination memory list */ 1277 head = &c_node->mem_list[i]; 1278 1279 list_for_each_entry_safe(m_node, tm_node, 1280 head, list) { 1281 ret = ipp_put_mem_node(drm_dev, c_node, 1282 m_node); 1283 if (ret) { 1284 DRM_ERROR("failed to put m_node.\n"); 1285 goto err_clear; 1286 } 1287 } 1288 } 1289 break; 1290 case IPP_CMD_WB: 1291 /* destination memory list */ 1292 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1293 1294 list_for_each_entry_safe(m_node, tm_node, head, list) { 1295 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1296 if (ret) { 1297 DRM_ERROR("failed to put m_node.\n"); 1298 goto err_clear; 1299 } 1300 } 1301 break; 1302 case IPP_CMD_OUTPUT: 1303 /* source memory list */ 1304 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1305 1306 list_for_each_entry_safe(m_node, tm_node, head, list) { 1307 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1308 if (ret) { 1309 DRM_ERROR("failed to put m_node.\n"); 1310 goto err_clear; 1311 } 1312 } 1313 break; 1314 default: 1315 DRM_ERROR("invalid operations.\n"); 1316 ret = -EINVAL; 1317 goto err_clear; 1318 } 1319 1320 err_clear: 1321 mutex_unlock(&c_node->mem_lock); 1322 1323 /* stop operations */ 1324 if (ippdrv->stop) 1325 ippdrv->stop(ippdrv->dev, property->cmd); 1326 1327 return ret; 1328 } 1329 1330 void ipp_sched_cmd(struct work_struct *work) 1331 { 1332 struct drm_exynos_ipp_cmd_work *cmd_work = 1333 (struct drm_exynos_ipp_cmd_work *)work; 1334 struct exynos_drm_ippdrv *ippdrv; 1335 struct drm_exynos_ipp_cmd_node *c_node; 1336 struct drm_exynos_ipp_property *property; 1337 int ret; 1338 1339 ippdrv = cmd_work->ippdrv; 1340 if (!ippdrv) { 1341 DRM_ERROR("invalid ippdrv list.\n"); 1342 return; 1343 } 1344 1345 c_node = cmd_work->c_node; 1346 if (!c_node) { 1347 DRM_ERROR("invalid command node list.\n"); 1348 return; 1349 } 1350 1351 mutex_lock(&c_node->lock); 1352 1353 property = &c_node->property; 1354 1355 switch (cmd_work->ctrl) { 1356 case IPP_CTRL_PLAY: 1357 case IPP_CTRL_RESUME: 1358 ret = ipp_start_property(ippdrv, c_node); 1359 if (ret) { 1360 DRM_ERROR("failed to start property:prop_id[%d]\n", 1361 c_node->property.prop_id); 1362 goto err_unlock; 1363 } 1364 1365 /* 1366 * M2M case supports wait_completion of transfer. 1367 * because M2M case supports single unit operation 1368 * with multiple queue. 1369 * M2M need to wait completion of data transfer. 1370 */ 1371 if (ipp_is_m2m_cmd(property->cmd)) { 1372 if (!wait_for_completion_timeout 1373 (&c_node->start_complete, msecs_to_jiffies(200))) { 1374 DRM_ERROR("timeout event:prop_id[%d]\n", 1375 c_node->property.prop_id); 1376 goto err_unlock; 1377 } 1378 } 1379 break; 1380 case IPP_CTRL_STOP: 1381 case IPP_CTRL_PAUSE: 1382 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, 1383 c_node); 1384 if (ret) { 1385 DRM_ERROR("failed to stop property.\n"); 1386 goto err_unlock; 1387 } 1388 1389 complete(&c_node->stop_complete); 1390 break; 1391 default: 1392 DRM_ERROR("unknown control type\n"); 1393 break; 1394 } 1395 1396 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); 1397 1398 err_unlock: 1399 mutex_unlock(&c_node->lock); 1400 } 1401 1402 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1403 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) 1404 { 1405 struct drm_device *drm_dev = ippdrv->drm_dev; 1406 struct drm_exynos_ipp_property *property = &c_node->property; 1407 struct drm_exynos_ipp_mem_node *m_node; 1408 struct drm_exynos_ipp_queue_buf qbuf; 1409 struct drm_exynos_ipp_send_event *e; 1410 struct list_head *head; 1411 struct timeval now; 1412 unsigned long flags; 1413 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; 1414 int ret, i; 1415 1416 for_each_ipp_ops(i) 1417 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); 1418 1419 if (!drm_dev) { 1420 DRM_ERROR("failed to get drm_dev.\n"); 1421 return -EINVAL; 1422 } 1423 1424 if (!property) { 1425 DRM_ERROR("failed to get property.\n"); 1426 return -EINVAL; 1427 } 1428 1429 mutex_lock(&c_node->event_lock); 1430 if (list_empty(&c_node->event_list)) { 1431 DRM_DEBUG_KMS("event list is empty.\n"); 1432 ret = 0; 1433 goto err_event_unlock; 1434 } 1435 1436 mutex_lock(&c_node->mem_lock); 1437 if (!ipp_check_mem_list(c_node)) { 1438 DRM_DEBUG_KMS("empty memory.\n"); 1439 ret = 0; 1440 goto err_mem_unlock; 1441 } 1442 1443 /* check command */ 1444 switch (property->cmd) { 1445 case IPP_CMD_M2M: 1446 for_each_ipp_ops(i) { 1447 /* source/destination memory list */ 1448 head = &c_node->mem_list[i]; 1449 1450 m_node = list_first_entry(head, 1451 struct drm_exynos_ipp_mem_node, list); 1452 1453 tbuf_id[i] = m_node->buf_id; 1454 DRM_DEBUG_KMS("%s buf_id[%d]\n", 1455 i ? "dst" : "src", tbuf_id[i]); 1456 1457 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1458 if (ret) 1459 DRM_ERROR("failed to put m_node.\n"); 1460 } 1461 break; 1462 case IPP_CMD_WB: 1463 /* clear buf for finding */ 1464 memset(&qbuf, 0x0, sizeof(qbuf)); 1465 qbuf.ops_id = EXYNOS_DRM_OPS_DST; 1466 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; 1467 1468 /* get memory node entry */ 1469 m_node = ipp_find_mem_node(c_node, &qbuf); 1470 if (!m_node) { 1471 DRM_ERROR("empty memory node.\n"); 1472 ret = -ENOMEM; 1473 goto err_mem_unlock; 1474 } 1475 1476 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1477 1478 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1479 if (ret) 1480 DRM_ERROR("failed to put m_node.\n"); 1481 break; 1482 case IPP_CMD_OUTPUT: 1483 /* source memory list */ 1484 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1485 1486 m_node = list_first_entry(head, 1487 struct drm_exynos_ipp_mem_node, list); 1488 1489 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1490 1491 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1492 if (ret) 1493 DRM_ERROR("failed to put m_node.\n"); 1494 break; 1495 default: 1496 DRM_ERROR("invalid operations.\n"); 1497 ret = -EINVAL; 1498 goto err_mem_unlock; 1499 } 1500 mutex_unlock(&c_node->mem_lock); 1501 1502 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1503 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1504 tbuf_id[1], buf_id[1], property->prop_id); 1505 1506 /* 1507 * command node have event list of destination buffer 1508 * If destination buffer enqueue to mem list, 1509 * then we make event and link to event list tail. 1510 * so, we get first event for first enqueued buffer. 1511 */ 1512 e = list_first_entry(&c_node->event_list, 1513 struct drm_exynos_ipp_send_event, base.link); 1514 1515 do_gettimeofday(&now); 1516 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); 1517 e->event.tv_sec = now.tv_sec; 1518 e->event.tv_usec = now.tv_usec; 1519 e->event.prop_id = property->prop_id; 1520 1521 /* set buffer id about source destination */ 1522 for_each_ipp_ops(i) 1523 e->event.buf_id[i] = tbuf_id[i]; 1524 1525 spin_lock_irqsave(&drm_dev->event_lock, flags); 1526 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1527 wake_up_interruptible(&e->base.file_priv->event_wait); 1528 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1529 mutex_unlock(&c_node->event_lock); 1530 1531 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", 1532 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1533 1534 return 0; 1535 1536 err_mem_unlock: 1537 mutex_unlock(&c_node->mem_lock); 1538 err_event_unlock: 1539 mutex_unlock(&c_node->event_lock); 1540 return ret; 1541 } 1542 1543 void ipp_sched_event(struct work_struct *work) 1544 { 1545 struct drm_exynos_ipp_event_work *event_work = 1546 (struct drm_exynos_ipp_event_work *)work; 1547 struct exynos_drm_ippdrv *ippdrv; 1548 struct drm_exynos_ipp_cmd_node *c_node; 1549 int ret; 1550 1551 if (!event_work) { 1552 DRM_ERROR("failed to get event_work.\n"); 1553 return; 1554 } 1555 1556 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); 1557 1558 ippdrv = event_work->ippdrv; 1559 if (!ippdrv) { 1560 DRM_ERROR("failed to get ipp driver.\n"); 1561 return; 1562 } 1563 1564 c_node = ippdrv->c_node; 1565 if (!c_node) { 1566 DRM_ERROR("failed to get command node.\n"); 1567 return; 1568 } 1569 1570 /* 1571 * IPP supports command thread, event thread synchronization. 1572 * If IPP close immediately from user land, then IPP make 1573 * synchronization with command thread, so make complete event. 1574 * or going out operations. 1575 */ 1576 if (c_node->state != IPP_STATE_START) { 1577 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", 1578 c_node->state, c_node->property.prop_id); 1579 goto err_completion; 1580 } 1581 1582 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1583 if (ret) { 1584 DRM_ERROR("failed to send event.\n"); 1585 goto err_completion; 1586 } 1587 1588 err_completion: 1589 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1590 complete(&c_node->start_complete); 1591 } 1592 1593 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1594 { 1595 struct ipp_context *ctx = get_ipp_context(dev); 1596 struct exynos_drm_ippdrv *ippdrv; 1597 int ret, count = 0; 1598 1599 /* get ipp driver entry */ 1600 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1601 ippdrv->drm_dev = drm_dev; 1602 1603 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); 1604 if (ret < 0) { 1605 DRM_ERROR("failed to create id.\n"); 1606 goto err; 1607 } 1608 ippdrv->prop_list.ipp_id = ret; 1609 1610 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1611 count++, (int)ippdrv, ret); 1612 1613 /* store parent device for node */ 1614 ippdrv->parent_dev = dev; 1615 1616 /* store event work queue and handler */ 1617 ippdrv->event_workq = ctx->event_workq; 1618 ippdrv->sched_event = ipp_sched_event; 1619 INIT_LIST_HEAD(&ippdrv->cmd_list); 1620 mutex_init(&ippdrv->cmd_lock); 1621 1622 if (is_drm_iommu_supported(drm_dev)) { 1623 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1624 if (ret) { 1625 DRM_ERROR("failed to activate iommu\n"); 1626 goto err; 1627 } 1628 } 1629 } 1630 1631 return 0; 1632 1633 err: 1634 /* get ipp driver entry */ 1635 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, 1636 drv_list) { 1637 if (is_drm_iommu_supported(drm_dev)) 1638 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1639 1640 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1641 ippdrv->prop_list.ipp_id); 1642 } 1643 1644 return ret; 1645 } 1646 1647 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1648 { 1649 struct exynos_drm_ippdrv *ippdrv; 1650 struct ipp_context *ctx = get_ipp_context(dev); 1651 1652 /* get ipp driver entry */ 1653 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1654 if (is_drm_iommu_supported(drm_dev)) 1655 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1656 1657 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1658 ippdrv->prop_list.ipp_id); 1659 1660 ippdrv->drm_dev = NULL; 1661 exynos_drm_ippdrv_unregister(ippdrv); 1662 } 1663 } 1664 1665 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, 1666 struct drm_file *file) 1667 { 1668 struct drm_exynos_file_private *file_priv = file->driver_priv; 1669 1670 file_priv->ipp_dev = dev; 1671 1672 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1673 1674 return 0; 1675 } 1676 1677 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1678 struct drm_file *file) 1679 { 1680 struct drm_exynos_file_private *file_priv = file->driver_priv; 1681 struct exynos_drm_ippdrv *ippdrv = NULL; 1682 struct ipp_context *ctx = get_ipp_context(dev); 1683 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1684 int count = 0; 1685 1686 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev); 1687 1688 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1689 mutex_lock(&ippdrv->cmd_lock); 1690 list_for_each_entry_safe(c_node, tc_node, 1691 &ippdrv->cmd_list, list) { 1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1693 count++, (int)ippdrv); 1694 1695 if (c_node->dev == file_priv->ipp_dev) { 1696 /* 1697 * userland goto unnormal state. process killed. 1698 * and close the file. 1699 * so, IPP didn't called stop cmd ctrl. 1700 * so, we are make stop operation in this state. 1701 */ 1702 if (c_node->state == IPP_STATE_START) { 1703 ipp_stop_property(drm_dev, ippdrv, 1704 c_node); 1705 c_node->state = IPP_STATE_STOP; 1706 } 1707 1708 ippdrv->dedicated = false; 1709 ipp_clean_cmd_node(ctx, c_node); 1710 if (list_empty(&ippdrv->cmd_list)) 1711 pm_runtime_put_sync(ippdrv->dev); 1712 } 1713 } 1714 mutex_unlock(&ippdrv->cmd_lock); 1715 } 1716 1717 return; 1718 } 1719 1720 static int ipp_probe(struct platform_device *pdev) 1721 { 1722 struct device *dev = &pdev->dev; 1723 struct ipp_context *ctx; 1724 struct exynos_drm_subdrv *subdrv; 1725 int ret; 1726 1727 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1728 if (!ctx) 1729 return -ENOMEM; 1730 1731 mutex_init(&ctx->ipp_lock); 1732 mutex_init(&ctx->prop_lock); 1733 1734 idr_init(&ctx->ipp_idr); 1735 idr_init(&ctx->prop_idr); 1736 1737 /* 1738 * create single thread for ipp event 1739 * IPP supports event thread for IPP drivers. 1740 * IPP driver send event_work to this thread. 1741 * and IPP event thread send event to user process. 1742 */ 1743 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1744 if (!ctx->event_workq) { 1745 dev_err(dev, "failed to create event workqueue\n"); 1746 return -EINVAL; 1747 } 1748 1749 /* 1750 * create single thread for ipp command 1751 * IPP supports command thread for user process. 1752 * user process make command node using set property ioctl. 1753 * and make start_work and send this work to command thread. 1754 * and then this command thread start property. 1755 */ 1756 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); 1757 if (!ctx->cmd_workq) { 1758 dev_err(dev, "failed to create cmd workqueue\n"); 1759 ret = -EINVAL; 1760 goto err_event_workq; 1761 } 1762 1763 /* set sub driver informations */ 1764 subdrv = &ctx->subdrv; 1765 subdrv->dev = dev; 1766 subdrv->probe = ipp_subdrv_probe; 1767 subdrv->remove = ipp_subdrv_remove; 1768 subdrv->open = ipp_subdrv_open; 1769 subdrv->close = ipp_subdrv_close; 1770 1771 platform_set_drvdata(pdev, ctx); 1772 1773 ret = exynos_drm_subdrv_register(subdrv); 1774 if (ret < 0) { 1775 DRM_ERROR("failed to register drm ipp device.\n"); 1776 goto err_cmd_workq; 1777 } 1778 1779 dev_info(dev, "drm ipp registered successfully.\n"); 1780 1781 return 0; 1782 1783 err_cmd_workq: 1784 destroy_workqueue(ctx->cmd_workq); 1785 err_event_workq: 1786 destroy_workqueue(ctx->event_workq); 1787 return ret; 1788 } 1789 1790 static int ipp_remove(struct platform_device *pdev) 1791 { 1792 struct ipp_context *ctx = platform_get_drvdata(pdev); 1793 1794 /* unregister sub driver */ 1795 exynos_drm_subdrv_unregister(&ctx->subdrv); 1796 1797 /* remove,destroy ipp idr */ 1798 idr_destroy(&ctx->ipp_idr); 1799 idr_destroy(&ctx->prop_idr); 1800 1801 mutex_destroy(&ctx->ipp_lock); 1802 mutex_destroy(&ctx->prop_lock); 1803 1804 /* destroy command, event work queue */ 1805 destroy_workqueue(ctx->cmd_workq); 1806 destroy_workqueue(ctx->event_workq); 1807 1808 return 0; 1809 } 1810 1811 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) 1812 { 1813 DRM_DEBUG_KMS("enable[%d]\n", enable); 1814 1815 return 0; 1816 } 1817 1818 #ifdef CONFIG_PM_SLEEP 1819 static int ipp_suspend(struct device *dev) 1820 { 1821 struct ipp_context *ctx = get_ipp_context(dev); 1822 1823 if (pm_runtime_suspended(dev)) 1824 return 0; 1825 1826 return ipp_power_ctrl(ctx, false); 1827 } 1828 1829 static int ipp_resume(struct device *dev) 1830 { 1831 struct ipp_context *ctx = get_ipp_context(dev); 1832 1833 if (!pm_runtime_suspended(dev)) 1834 return ipp_power_ctrl(ctx, true); 1835 1836 return 0; 1837 } 1838 #endif 1839 1840 #ifdef CONFIG_PM_RUNTIME 1841 static int ipp_runtime_suspend(struct device *dev) 1842 { 1843 struct ipp_context *ctx = get_ipp_context(dev); 1844 1845 return ipp_power_ctrl(ctx, false); 1846 } 1847 1848 static int ipp_runtime_resume(struct device *dev) 1849 { 1850 struct ipp_context *ctx = get_ipp_context(dev); 1851 1852 return ipp_power_ctrl(ctx, true); 1853 } 1854 #endif 1855 1856 static const struct dev_pm_ops ipp_pm_ops = { 1857 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume) 1858 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL) 1859 }; 1860 1861 struct platform_driver ipp_driver = { 1862 .probe = ipp_probe, 1863 .remove = ipp_remove, 1864 .driver = { 1865 .name = "exynos-drm-ipp", 1866 .owner = THIS_MODULE, 1867 .pm = &ipp_pm_ops, 1868 }, 1869 }; 1870 1871