1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: 4 * Eunchul Kim <chulspro.kim@samsung.com> 5 * Jinyoung Jeon <jy0.jeon@samsung.com> 6 * Sangmin Lee <lsmin.lee@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 */ 14 #include <linux/kernel.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 #include <linux/clk.h> 18 #include <linux/pm_runtime.h> 19 20 #include <drm/drmP.h> 21 #include <drm/exynos_drm.h> 22 #include "exynos_drm_drv.h" 23 #include "exynos_drm_gem.h" 24 #include "exynos_drm_ipp.h" 25 #include "exynos_drm_iommu.h" 26 27 /* 28 * IPP stands for Image Post Processing and 29 * supports image scaler/rotator and input/output DMA operations. 30 * using FIMC, GSC, Rotator, so on. 31 * IPP is integration device driver of same attribute h/w 32 */ 33 34 /* 35 * TODO 36 * 1. expand command control id. 37 * 2. integrate property and config. 38 * 3. removed send_event id check routine. 39 * 4. compare send_event id if needed. 40 * 5. free subdrv_remove notifier callback list if needed. 41 * 6. need to check subdrv_open about multi-open. 42 * 7. need to power_on implement power and sysmmu ctrl. 43 */ 44 45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 47 48 /* 49 * A structure of event. 50 * 51 * @base: base of event. 52 * @event: ipp event. 53 */ 54 struct drm_exynos_ipp_send_event { 55 struct drm_pending_event base; 56 struct drm_exynos_ipp_event event; 57 }; 58 59 /* 60 * A structure of memory node. 61 * 62 * @list: list head to memory queue information. 63 * @ops_id: id of operations. 64 * @prop_id: id of property. 65 * @buf_id: id of buffer. 66 * @buf_info: gem objects and dma address, size. 67 * @filp: a pointer to drm_file. 68 */ 69 struct drm_exynos_ipp_mem_node { 70 struct list_head list; 71 enum drm_exynos_ops_id ops_id; 72 u32 prop_id; 73 u32 buf_id; 74 struct drm_exynos_ipp_buf_info buf_info; 75 }; 76 77 /* 78 * A structure of ipp context. 79 * 80 * @subdrv: prepare initialization using subdrv. 81 * @ipp_lock: lock for synchronization of access to ipp_idr. 82 * @prop_lock: lock for synchronization of access to prop_idr. 83 * @ipp_idr: ipp driver idr. 84 * @prop_idr: property idr. 85 * @event_workq: event work queue. 86 * @cmd_workq: command work queue. 87 */ 88 struct ipp_context { 89 struct exynos_drm_subdrv subdrv; 90 struct mutex ipp_lock; 91 struct mutex prop_lock; 92 struct idr ipp_idr; 93 struct idr prop_idr; 94 struct workqueue_struct *event_workq; 95 struct workqueue_struct *cmd_workq; 96 }; 97 98 static LIST_HEAD(exynos_drm_ippdrv_list); 99 static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 100 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 101 102 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 103 { 104 mutex_lock(&exynos_drm_ippdrv_lock); 105 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); 106 mutex_unlock(&exynos_drm_ippdrv_lock); 107 108 return 0; 109 } 110 111 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) 112 { 113 mutex_lock(&exynos_drm_ippdrv_lock); 114 list_del(&ippdrv->drv_list); 115 mutex_unlock(&exynos_drm_ippdrv_lock); 116 117 return 0; 118 } 119 120 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) 121 { 122 int ret; 123 124 mutex_lock(lock); 125 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); 126 mutex_unlock(lock); 127 128 return ret; 129 } 130 131 static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) 132 { 133 mutex_lock(lock); 134 idr_remove(id_idr, id); 135 mutex_unlock(lock); 136 } 137 138 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) 139 { 140 void *obj; 141 142 mutex_lock(lock); 143 obj = idr_find(id_idr, id); 144 mutex_unlock(lock); 145 146 return obj; 147 } 148 149 static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, 150 struct drm_exynos_ipp_property *property) 151 { 152 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && 153 !pm_runtime_suspended(ippdrv->dev))) 154 return -EBUSY; 155 156 if (ippdrv->check_property && 157 ippdrv->check_property(ippdrv->dev, property)) 158 return -EINVAL; 159 160 return 0; 161 } 162 163 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, 164 struct drm_exynos_ipp_property *property) 165 { 166 struct exynos_drm_ippdrv *ippdrv; 167 u32 ipp_id = property->ipp_id; 168 int ret; 169 170 if (ipp_id) { 171 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); 172 if (!ippdrv) { 173 DRM_DEBUG("ipp%d driver not found\n", ipp_id); 174 return ERR_PTR(-ENODEV); 175 } 176 177 ret = ipp_check_driver(ippdrv, property); 178 if (ret < 0) { 179 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); 180 return ERR_PTR(ret); 181 } 182 183 return ippdrv; 184 } else { 185 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 186 ret = ipp_check_driver(ippdrv, property); 187 if (ret == 0) 188 return ippdrv; 189 } 190 191 DRM_DEBUG("cannot find driver suitable for given property.\n"); 192 } 193 194 return ERR_PTR(-ENODEV); 195 } 196 197 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) 198 { 199 struct exynos_drm_ippdrv *ippdrv; 200 struct drm_exynos_ipp_cmd_node *c_node; 201 int count = 0; 202 203 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); 204 205 /* 206 * This case is search ipp driver by prop_id handle. 207 * sometimes, ipp subsystem find driver by prop_id. 208 * e.g PAUSE state, queue buf, command control. 209 */ 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 212 213 mutex_lock(&ippdrv->cmd_lock); 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 215 if (c_node->property.prop_id == prop_id) { 216 mutex_unlock(&ippdrv->cmd_lock); 217 return ippdrv; 218 } 219 } 220 mutex_unlock(&ippdrv->cmd_lock); 221 } 222 223 return ERR_PTR(-ENODEV); 224 } 225 226 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, 227 struct drm_file *file) 228 { 229 struct drm_exynos_file_private *file_priv = file->driver_priv; 230 struct device *dev = file_priv->ipp_dev; 231 struct ipp_context *ctx = get_ipp_context(dev); 232 struct drm_exynos_ipp_prop_list *prop_list = data; 233 struct exynos_drm_ippdrv *ippdrv; 234 int count = 0; 235 236 if (!ctx) { 237 DRM_ERROR("invalid context.\n"); 238 return -EINVAL; 239 } 240 241 if (!prop_list) { 242 DRM_ERROR("invalid property parameter.\n"); 243 return -EINVAL; 244 } 245 246 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); 247 248 if (!prop_list->ipp_id) { 249 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) 250 count++; 251 252 /* 253 * Supports ippdrv list count for user application. 254 * First step user application getting ippdrv count. 255 * and second step getting ippdrv capability using ipp_id. 256 */ 257 prop_list->count = count; 258 } else { 259 /* 260 * Getting ippdrv capability by ipp_id. 261 * some device not supported wb, output interface. 262 * so, user application detect correct ipp driver 263 * using this ioctl. 264 */ 265 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, 266 prop_list->ipp_id); 267 if (!ippdrv) { 268 DRM_ERROR("not found ipp%d driver.\n", 269 prop_list->ipp_id); 270 return -ENODEV; 271 } 272 273 *prop_list = ippdrv->prop_list; 274 } 275 276 return 0; 277 } 278 279 static void ipp_print_property(struct drm_exynos_ipp_property *property, 280 int idx) 281 { 282 struct drm_exynos_ipp_config *config = &property->config[idx]; 283 struct drm_exynos_pos *pos = &config->pos; 284 struct drm_exynos_sz *sz = &config->sz; 285 286 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", 287 property->prop_id, idx ? "dst" : "src", config->fmt); 288 289 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", 290 pos->x, pos->y, pos->w, pos->h, 291 sz->hsize, sz->vsize, config->flip, config->degree); 292 } 293 294 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) 295 { 296 struct drm_exynos_ipp_cmd_work *cmd_work; 297 298 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); 299 if (!cmd_work) 300 return ERR_PTR(-ENOMEM); 301 302 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); 303 304 return cmd_work; 305 } 306 307 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) 308 { 309 struct drm_exynos_ipp_event_work *event_work; 310 311 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); 312 if (!event_work) 313 return ERR_PTR(-ENOMEM); 314 315 INIT_WORK(&event_work->work, ipp_sched_event); 316 317 return event_work; 318 } 319 320 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, 321 struct drm_file *file) 322 { 323 struct drm_exynos_file_private *file_priv = file->driver_priv; 324 struct device *dev = file_priv->ipp_dev; 325 struct ipp_context *ctx = get_ipp_context(dev); 326 struct drm_exynos_ipp_property *property = data; 327 struct exynos_drm_ippdrv *ippdrv; 328 struct drm_exynos_ipp_cmd_node *c_node; 329 u32 prop_id; 330 int ret, i; 331 332 if (!ctx) { 333 DRM_ERROR("invalid context.\n"); 334 return -EINVAL; 335 } 336 337 if (!property) { 338 DRM_ERROR("invalid property parameter.\n"); 339 return -EINVAL; 340 } 341 342 prop_id = property->prop_id; 343 344 /* 345 * This is log print for user application property. 346 * user application set various property. 347 */ 348 for_each_ipp_ops(i) 349 ipp_print_property(property, i); 350 351 /* 352 * In case prop_id is not zero try to set existing property. 353 */ 354 if (prop_id) { 355 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id); 356 357 if (!c_node || c_node->filp != file) { 358 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id); 359 return -EINVAL; 360 } 361 362 if (c_node->state != IPP_STATE_STOP) { 363 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id); 364 return -EINVAL; 365 } 366 367 c_node->property = *property; 368 369 return 0; 370 } 371 372 /* find ipp driver using ipp id */ 373 ippdrv = ipp_find_driver(ctx, property); 374 if (IS_ERR(ippdrv)) { 375 DRM_ERROR("failed to get ipp driver.\n"); 376 return -EINVAL; 377 } 378 379 /* allocate command node */ 380 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); 381 if (!c_node) 382 return -ENOMEM; 383 384 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); 385 if (ret < 0) { 386 DRM_ERROR("failed to create id.\n"); 387 goto err_clear; 388 } 389 property->prop_id = ret; 390 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 392 property->prop_id, property->cmd, (int)ippdrv); 393 394 /* stored property information and ippdrv in private data */ 395 c_node->property = *property; 396 c_node->state = IPP_STATE_IDLE; 397 c_node->filp = file; 398 399 c_node->start_work = ipp_create_cmd_work(); 400 if (IS_ERR(c_node->start_work)) { 401 DRM_ERROR("failed to create start work.\n"); 402 ret = PTR_ERR(c_node->start_work); 403 goto err_remove_id; 404 } 405 406 c_node->stop_work = ipp_create_cmd_work(); 407 if (IS_ERR(c_node->stop_work)) { 408 DRM_ERROR("failed to create stop work.\n"); 409 ret = PTR_ERR(c_node->stop_work); 410 goto err_free_start; 411 } 412 413 c_node->event_work = ipp_create_event_work(); 414 if (IS_ERR(c_node->event_work)) { 415 DRM_ERROR("failed to create event work.\n"); 416 ret = PTR_ERR(c_node->event_work); 417 goto err_free_stop; 418 } 419 420 mutex_init(&c_node->lock); 421 mutex_init(&c_node->mem_lock); 422 mutex_init(&c_node->event_lock); 423 424 init_completion(&c_node->start_complete); 425 init_completion(&c_node->stop_complete); 426 427 for_each_ipp_ops(i) 428 INIT_LIST_HEAD(&c_node->mem_list[i]); 429 430 INIT_LIST_HEAD(&c_node->event_list); 431 mutex_lock(&ippdrv->cmd_lock); 432 list_add_tail(&c_node->list, &ippdrv->cmd_list); 433 mutex_unlock(&ippdrv->cmd_lock); 434 435 /* make dedicated state without m2m */ 436 if (!ipp_is_m2m_cmd(property->cmd)) 437 ippdrv->dedicated = true; 438 439 return 0; 440 441 err_free_stop: 442 kfree(c_node->stop_work); 443 err_free_start: 444 kfree(c_node->start_work); 445 err_remove_id: 446 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); 447 err_clear: 448 kfree(c_node); 449 return ret; 450 } 451 452 static int ipp_validate_mem_node(struct drm_device *drm_dev, 453 struct drm_exynos_ipp_mem_node *m_node, 454 struct drm_exynos_ipp_cmd_node *c_node) 455 { 456 struct drm_exynos_ipp_config *ipp_cfg; 457 unsigned int num_plane; 458 unsigned long min_size, size; 459 unsigned int bpp; 460 int i; 461 462 ipp_cfg = &c_node->property.config[m_node->ops_id]; 463 num_plane = drm_format_num_planes(ipp_cfg->fmt); 464 465 /** 466 * This is a rather simplified validation of a memory node. 467 * It basically verifies provided gem object handles 468 * and the buffer sizes with respect to current configuration. 469 * This is not the best that can be done 470 * but it seems more than enough 471 */ 472 for (i = 0; i < num_plane; ++i) { 473 if (!m_node->buf_info.handles[i]) { 474 DRM_ERROR("invalid handle for plane %d\n", i); 475 return -EINVAL; 476 } 477 bpp = drm_format_plane_cpp(ipp_cfg->fmt, i); 478 min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3; 479 size = exynos_drm_gem_get_size(drm_dev, 480 m_node->buf_info.handles[i], 481 c_node->filp); 482 if (min_size > size) { 483 DRM_ERROR("invalid size for plane %d\n", i); 484 return -EINVAL; 485 } 486 } 487 return 0; 488 } 489 490 static int ipp_put_mem_node(struct drm_device *drm_dev, 491 struct drm_exynos_ipp_cmd_node *c_node, 492 struct drm_exynos_ipp_mem_node *m_node) 493 { 494 int i; 495 496 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 497 498 if (!m_node) { 499 DRM_ERROR("invalid dequeue node.\n"); 500 return -EFAULT; 501 } 502 503 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 504 505 /* put gem buffer */ 506 for_each_ipp_planar(i) { 507 unsigned long handle = m_node->buf_info.handles[i]; 508 if (handle) 509 exynos_drm_gem_put_dma_addr(drm_dev, handle, 510 c_node->filp); 511 } 512 513 list_del(&m_node->list); 514 kfree(m_node); 515 516 return 0; 517 } 518 519 static struct drm_exynos_ipp_mem_node 520 *ipp_get_mem_node(struct drm_device *drm_dev, 521 struct drm_exynos_ipp_cmd_node *c_node, 522 struct drm_exynos_ipp_queue_buf *qbuf) 523 { 524 struct drm_exynos_ipp_mem_node *m_node; 525 struct drm_exynos_ipp_buf_info *buf_info; 526 int i; 527 528 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); 529 if (!m_node) 530 return ERR_PTR(-ENOMEM); 531 532 buf_info = &m_node->buf_info; 533 534 /* operations, buffer id */ 535 m_node->ops_id = qbuf->ops_id; 536 m_node->prop_id = qbuf->prop_id; 537 m_node->buf_id = qbuf->buf_id; 538 INIT_LIST_HEAD(&m_node->list); 539 540 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 541 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 542 543 for_each_ipp_planar(i) { 544 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); 545 546 /* get dma address by handle */ 547 if (qbuf->handle[i]) { 548 dma_addr_t *addr; 549 550 addr = exynos_drm_gem_get_dma_addr(drm_dev, 551 qbuf->handle[i], c_node->filp); 552 if (IS_ERR(addr)) { 553 DRM_ERROR("failed to get addr.\n"); 554 ipp_put_mem_node(drm_dev, c_node, m_node); 555 return ERR_PTR(-EFAULT); 556 } 557 558 buf_info->handles[i] = qbuf->handle[i]; 559 buf_info->base[i] = *addr; 560 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 561 buf_info->base[i], buf_info->handles[i]); 562 } 563 } 564 565 mutex_lock(&c_node->mem_lock); 566 if (ipp_validate_mem_node(drm_dev, m_node, c_node)) { 567 ipp_put_mem_node(drm_dev, c_node, m_node); 568 mutex_unlock(&c_node->mem_lock); 569 return ERR_PTR(-EFAULT); 570 } 571 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); 572 mutex_unlock(&c_node->mem_lock); 573 574 return m_node; 575 } 576 577 static void ipp_clean_mem_nodes(struct drm_device *drm_dev, 578 struct drm_exynos_ipp_cmd_node *c_node, int ops) 579 { 580 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 581 struct list_head *head = &c_node->mem_list[ops]; 582 583 mutex_lock(&c_node->mem_lock); 584 585 list_for_each_entry_safe(m_node, tm_node, head, list) { 586 int ret; 587 588 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 589 if (ret) 590 DRM_ERROR("failed to put m_node.\n"); 591 } 592 593 mutex_unlock(&c_node->mem_lock); 594 } 595 596 static void ipp_free_event(struct drm_pending_event *event) 597 { 598 kfree(event); 599 } 600 601 static int ipp_get_event(struct drm_device *drm_dev, 602 struct drm_exynos_ipp_cmd_node *c_node, 603 struct drm_exynos_ipp_queue_buf *qbuf) 604 { 605 struct drm_exynos_ipp_send_event *e; 606 unsigned long flags; 607 608 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); 609 610 e = kzalloc(sizeof(*e), GFP_KERNEL); 611 if (!e) { 612 spin_lock_irqsave(&drm_dev->event_lock, flags); 613 c_node->filp->event_space += sizeof(e->event); 614 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 615 return -ENOMEM; 616 } 617 618 /* make event */ 619 e->event.base.type = DRM_EXYNOS_IPP_EVENT; 620 e->event.base.length = sizeof(e->event); 621 e->event.user_data = qbuf->user_data; 622 e->event.prop_id = qbuf->prop_id; 623 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; 624 e->base.event = &e->event.base; 625 e->base.file_priv = c_node->filp; 626 e->base.destroy = ipp_free_event; 627 mutex_lock(&c_node->event_lock); 628 list_add_tail(&e->base.link, &c_node->event_list); 629 mutex_unlock(&c_node->event_lock); 630 631 return 0; 632 } 633 634 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, 635 struct drm_exynos_ipp_queue_buf *qbuf) 636 { 637 struct drm_exynos_ipp_send_event *e, *te; 638 int count = 0; 639 640 mutex_lock(&c_node->event_lock); 641 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 642 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 643 644 /* 645 * qbuf == NULL condition means all event deletion. 646 * stop operations want to delete all event list. 647 * another case delete only same buf id. 648 */ 649 if (!qbuf) { 650 /* delete list */ 651 list_del(&e->base.link); 652 kfree(e); 653 } 654 655 /* compare buffer id */ 656 if (qbuf && (qbuf->buf_id == 657 e->event.buf_id[EXYNOS_DRM_OPS_DST])) { 658 /* delete list */ 659 list_del(&e->base.link); 660 kfree(e); 661 goto out_unlock; 662 } 663 } 664 665 out_unlock: 666 mutex_unlock(&c_node->event_lock); 667 return; 668 } 669 670 static void ipp_clean_cmd_node(struct ipp_context *ctx, 671 struct drm_exynos_ipp_cmd_node *c_node) 672 { 673 int i; 674 675 /* cancel works */ 676 cancel_work_sync(&c_node->start_work->work); 677 cancel_work_sync(&c_node->stop_work->work); 678 cancel_work_sync(&c_node->event_work->work); 679 680 /* put event */ 681 ipp_put_event(c_node, NULL); 682 683 for_each_ipp_ops(i) 684 ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i); 685 686 /* delete list */ 687 list_del(&c_node->list); 688 689 ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, 690 c_node->property.prop_id); 691 692 /* destroy mutex */ 693 mutex_destroy(&c_node->lock); 694 mutex_destroy(&c_node->mem_lock); 695 mutex_destroy(&c_node->event_lock); 696 697 /* free command node */ 698 kfree(c_node->start_work); 699 kfree(c_node->stop_work); 700 kfree(c_node->event_work); 701 kfree(c_node); 702 } 703 704 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) 705 { 706 switch (c_node->property.cmd) { 707 case IPP_CMD_WB: 708 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 709 case IPP_CMD_OUTPUT: 710 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); 711 case IPP_CMD_M2M: 712 default: 713 return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && 714 !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); 715 } 716 } 717 718 static struct drm_exynos_ipp_mem_node 719 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, 720 struct drm_exynos_ipp_queue_buf *qbuf) 721 { 722 struct drm_exynos_ipp_mem_node *m_node; 723 struct list_head *head; 724 int count = 0; 725 726 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); 727 728 /* source/destination memory list */ 729 head = &c_node->mem_list[qbuf->ops_id]; 730 731 /* find memory node from memory list */ 732 list_for_each_entry(m_node, head, list) { 733 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 734 735 /* compare buffer id */ 736 if (m_node->buf_id == qbuf->buf_id) 737 return m_node; 738 } 739 740 return NULL; 741 } 742 743 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, 744 struct drm_exynos_ipp_cmd_node *c_node, 745 struct drm_exynos_ipp_mem_node *m_node) 746 { 747 struct exynos_drm_ipp_ops *ops = NULL; 748 int ret = 0; 749 750 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 751 752 if (!m_node) { 753 DRM_ERROR("invalid queue node.\n"); 754 return -EFAULT; 755 } 756 757 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); 758 759 /* get operations callback */ 760 ops = ippdrv->ops[m_node->ops_id]; 761 if (!ops) { 762 DRM_ERROR("not support ops.\n"); 763 return -EFAULT; 764 } 765 766 /* set address and enable irq */ 767 if (ops->set_addr) { 768 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, 769 m_node->buf_id, IPP_BUF_ENQUEUE); 770 if (ret) { 771 DRM_ERROR("failed to set addr.\n"); 772 return ret; 773 } 774 } 775 776 return ret; 777 } 778 779 static void ipp_handle_cmd_work(struct device *dev, 780 struct exynos_drm_ippdrv *ippdrv, 781 struct drm_exynos_ipp_cmd_work *cmd_work, 782 struct drm_exynos_ipp_cmd_node *c_node) 783 { 784 struct ipp_context *ctx = get_ipp_context(dev); 785 786 cmd_work->ippdrv = ippdrv; 787 cmd_work->c_node = c_node; 788 queue_work(ctx->cmd_workq, &cmd_work->work); 789 } 790 791 static int ipp_queue_buf_with_run(struct device *dev, 792 struct drm_exynos_ipp_cmd_node *c_node, 793 struct drm_exynos_ipp_mem_node *m_node, 794 struct drm_exynos_ipp_queue_buf *qbuf) 795 { 796 struct exynos_drm_ippdrv *ippdrv; 797 struct drm_exynos_ipp_property *property; 798 struct exynos_drm_ipp_ops *ops; 799 int ret; 800 801 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); 802 if (IS_ERR(ippdrv)) { 803 DRM_ERROR("failed to get ipp driver.\n"); 804 return -EFAULT; 805 } 806 807 ops = ippdrv->ops[qbuf->ops_id]; 808 if (!ops) { 809 DRM_ERROR("failed to get ops.\n"); 810 return -EFAULT; 811 } 812 813 property = &c_node->property; 814 815 if (c_node->state != IPP_STATE_START) { 816 DRM_DEBUG_KMS("bypass for invalid state.\n"); 817 return 0; 818 } 819 820 mutex_lock(&c_node->mem_lock); 821 if (!ipp_check_mem_list(c_node)) { 822 mutex_unlock(&c_node->mem_lock); 823 DRM_DEBUG_KMS("empty memory.\n"); 824 return 0; 825 } 826 827 /* 828 * If set destination buffer and enabled clock, 829 * then m2m operations need start operations at queue_buf 830 */ 831 if (ipp_is_m2m_cmd(property->cmd)) { 832 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; 833 834 cmd_work->ctrl = IPP_CTRL_PLAY; 835 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 836 } else { 837 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 838 if (ret) { 839 mutex_unlock(&c_node->mem_lock); 840 DRM_ERROR("failed to set m node.\n"); 841 return ret; 842 } 843 } 844 mutex_unlock(&c_node->mem_lock); 845 846 return 0; 847 } 848 849 static void ipp_clean_queue_buf(struct drm_device *drm_dev, 850 struct drm_exynos_ipp_cmd_node *c_node, 851 struct drm_exynos_ipp_queue_buf *qbuf) 852 { 853 struct drm_exynos_ipp_mem_node *m_node, *tm_node; 854 855 /* delete list */ 856 mutex_lock(&c_node->mem_lock); 857 list_for_each_entry_safe(m_node, tm_node, 858 &c_node->mem_list[qbuf->ops_id], list) { 859 if (m_node->buf_id == qbuf->buf_id && 860 m_node->ops_id == qbuf->ops_id) 861 ipp_put_mem_node(drm_dev, c_node, m_node); 862 } 863 mutex_unlock(&c_node->mem_lock); 864 } 865 866 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, 867 struct drm_file *file) 868 { 869 struct drm_exynos_file_private *file_priv = file->driver_priv; 870 struct device *dev = file_priv->ipp_dev; 871 struct ipp_context *ctx = get_ipp_context(dev); 872 struct drm_exynos_ipp_queue_buf *qbuf = data; 873 struct drm_exynos_ipp_cmd_node *c_node; 874 struct drm_exynos_ipp_mem_node *m_node; 875 int ret; 876 877 if (!qbuf) { 878 DRM_ERROR("invalid buf parameter.\n"); 879 return -EINVAL; 880 } 881 882 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { 883 DRM_ERROR("invalid ops parameter.\n"); 884 return -EINVAL; 885 } 886 887 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", 888 qbuf->prop_id, qbuf->ops_id ? "dst" : "src", 889 qbuf->buf_id, qbuf->buf_type); 890 891 /* find command node */ 892 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 893 qbuf->prop_id); 894 if (!c_node || c_node->filp != file) { 895 DRM_ERROR("failed to get command node.\n"); 896 return -ENODEV; 897 } 898 899 /* buffer control */ 900 switch (qbuf->buf_type) { 901 case IPP_BUF_ENQUEUE: 902 /* get memory node */ 903 m_node = ipp_get_mem_node(drm_dev, c_node, qbuf); 904 if (IS_ERR(m_node)) { 905 DRM_ERROR("failed to get m_node.\n"); 906 return PTR_ERR(m_node); 907 } 908 909 /* 910 * first step get event for destination buffer. 911 * and second step when M2M case run with destination buffer 912 * if needed. 913 */ 914 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { 915 /* get event for destination buffer */ 916 ret = ipp_get_event(drm_dev, c_node, qbuf); 917 if (ret) { 918 DRM_ERROR("failed to get event.\n"); 919 goto err_clean_node; 920 } 921 922 /* 923 * M2M case run play control for streaming feature. 924 * other case set address and waiting. 925 */ 926 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); 927 if (ret) { 928 DRM_ERROR("failed to run command.\n"); 929 goto err_clean_node; 930 } 931 } 932 break; 933 case IPP_BUF_DEQUEUE: 934 mutex_lock(&c_node->lock); 935 936 /* put event for destination buffer */ 937 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) 938 ipp_put_event(c_node, qbuf); 939 940 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 941 942 mutex_unlock(&c_node->lock); 943 break; 944 default: 945 DRM_ERROR("invalid buffer control.\n"); 946 return -EINVAL; 947 } 948 949 return 0; 950 951 err_clean_node: 952 DRM_ERROR("clean memory nodes.\n"); 953 954 ipp_clean_queue_buf(drm_dev, c_node, qbuf); 955 return ret; 956 } 957 958 static bool exynos_drm_ipp_check_valid(struct device *dev, 959 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) 960 { 961 if (ctrl != IPP_CTRL_PLAY) { 962 if (pm_runtime_suspended(dev)) { 963 DRM_ERROR("pm:runtime_suspended.\n"); 964 goto err_status; 965 } 966 } 967 968 switch (ctrl) { 969 case IPP_CTRL_PLAY: 970 if (state != IPP_STATE_IDLE) 971 goto err_status; 972 break; 973 case IPP_CTRL_STOP: 974 if (state == IPP_STATE_STOP) 975 goto err_status; 976 break; 977 case IPP_CTRL_PAUSE: 978 if (state != IPP_STATE_START) 979 goto err_status; 980 break; 981 case IPP_CTRL_RESUME: 982 if (state != IPP_STATE_STOP) 983 goto err_status; 984 break; 985 default: 986 DRM_ERROR("invalid state.\n"); 987 goto err_status; 988 } 989 990 return true; 991 992 err_status: 993 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); 994 return false; 995 } 996 997 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, 998 struct drm_file *file) 999 { 1000 struct drm_exynos_file_private *file_priv = file->driver_priv; 1001 struct exynos_drm_ippdrv *ippdrv = NULL; 1002 struct device *dev = file_priv->ipp_dev; 1003 struct ipp_context *ctx = get_ipp_context(dev); 1004 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; 1005 struct drm_exynos_ipp_cmd_work *cmd_work; 1006 struct drm_exynos_ipp_cmd_node *c_node; 1007 1008 if (!ctx) { 1009 DRM_ERROR("invalid context.\n"); 1010 return -EINVAL; 1011 } 1012 1013 if (!cmd_ctrl) { 1014 DRM_ERROR("invalid control parameter.\n"); 1015 return -EINVAL; 1016 } 1017 1018 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", 1019 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1020 1021 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); 1022 if (IS_ERR(ippdrv)) { 1023 DRM_ERROR("failed to get ipp driver.\n"); 1024 return PTR_ERR(ippdrv); 1025 } 1026 1027 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, 1028 cmd_ctrl->prop_id); 1029 if (!c_node || c_node->filp != file) { 1030 DRM_ERROR("invalid command node list.\n"); 1031 return -ENODEV; 1032 } 1033 1034 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, 1035 c_node->state)) { 1036 DRM_ERROR("invalid state.\n"); 1037 return -EINVAL; 1038 } 1039 1040 switch (cmd_ctrl->ctrl) { 1041 case IPP_CTRL_PLAY: 1042 if (pm_runtime_suspended(ippdrv->dev)) 1043 pm_runtime_get_sync(ippdrv->dev); 1044 1045 c_node->state = IPP_STATE_START; 1046 1047 cmd_work = c_node->start_work; 1048 cmd_work->ctrl = cmd_ctrl->ctrl; 1049 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1050 break; 1051 case IPP_CTRL_STOP: 1052 cmd_work = c_node->stop_work; 1053 cmd_work->ctrl = cmd_ctrl->ctrl; 1054 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1055 1056 if (!wait_for_completion_timeout(&c_node->stop_complete, 1057 msecs_to_jiffies(300))) { 1058 DRM_ERROR("timeout stop:prop_id[%d]\n", 1059 c_node->property.prop_id); 1060 } 1061 1062 c_node->state = IPP_STATE_STOP; 1063 ippdrv->dedicated = false; 1064 mutex_lock(&ippdrv->cmd_lock); 1065 ipp_clean_cmd_node(ctx, c_node); 1066 1067 if (list_empty(&ippdrv->cmd_list)) 1068 pm_runtime_put_sync(ippdrv->dev); 1069 mutex_unlock(&ippdrv->cmd_lock); 1070 break; 1071 case IPP_CTRL_PAUSE: 1072 cmd_work = c_node->stop_work; 1073 cmd_work->ctrl = cmd_ctrl->ctrl; 1074 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1075 1076 if (!wait_for_completion_timeout(&c_node->stop_complete, 1077 msecs_to_jiffies(200))) { 1078 DRM_ERROR("timeout stop:prop_id[%d]\n", 1079 c_node->property.prop_id); 1080 } 1081 1082 c_node->state = IPP_STATE_STOP; 1083 break; 1084 case IPP_CTRL_RESUME: 1085 c_node->state = IPP_STATE_START; 1086 cmd_work = c_node->start_work; 1087 cmd_work->ctrl = cmd_ctrl->ctrl; 1088 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); 1089 break; 1090 default: 1091 DRM_ERROR("could not support this state currently.\n"); 1092 return -EINVAL; 1093 } 1094 1095 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", 1096 cmd_ctrl->ctrl, cmd_ctrl->prop_id); 1097 1098 return 0; 1099 } 1100 1101 int exynos_drm_ippnb_register(struct notifier_block *nb) 1102 { 1103 return blocking_notifier_chain_register( 1104 &exynos_drm_ippnb_list, nb); 1105 } 1106 1107 int exynos_drm_ippnb_unregister(struct notifier_block *nb) 1108 { 1109 return blocking_notifier_chain_unregister( 1110 &exynos_drm_ippnb_list, nb); 1111 } 1112 1113 int exynos_drm_ippnb_send_event(unsigned long val, void *v) 1114 { 1115 return blocking_notifier_call_chain( 1116 &exynos_drm_ippnb_list, val, v); 1117 } 1118 1119 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, 1120 struct drm_exynos_ipp_property *property) 1121 { 1122 struct exynos_drm_ipp_ops *ops = NULL; 1123 bool swap = false; 1124 int ret, i; 1125 1126 if (!property) { 1127 DRM_ERROR("invalid property parameter.\n"); 1128 return -EINVAL; 1129 } 1130 1131 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1132 1133 /* reset h/w block */ 1134 if (ippdrv->reset && 1135 ippdrv->reset(ippdrv->dev)) { 1136 return -EINVAL; 1137 } 1138 1139 /* set source,destination operations */ 1140 for_each_ipp_ops(i) { 1141 struct drm_exynos_ipp_config *config = 1142 &property->config[i]; 1143 1144 ops = ippdrv->ops[i]; 1145 if (!ops || !config) { 1146 DRM_ERROR("not support ops and config.\n"); 1147 return -EINVAL; 1148 } 1149 1150 /* set format */ 1151 if (ops->set_fmt) { 1152 ret = ops->set_fmt(ippdrv->dev, config->fmt); 1153 if (ret) 1154 return ret; 1155 } 1156 1157 /* set transform for rotation, flip */ 1158 if (ops->set_transf) { 1159 ret = ops->set_transf(ippdrv->dev, config->degree, 1160 config->flip, &swap); 1161 if (ret) 1162 return ret; 1163 } 1164 1165 /* set size */ 1166 if (ops->set_size) { 1167 ret = ops->set_size(ippdrv->dev, swap, &config->pos, 1168 &config->sz); 1169 if (ret) 1170 return ret; 1171 } 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, 1178 struct drm_exynos_ipp_cmd_node *c_node) 1179 { 1180 struct drm_exynos_ipp_mem_node *m_node; 1181 struct drm_exynos_ipp_property *property = &c_node->property; 1182 struct list_head *head; 1183 int ret, i; 1184 1185 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1186 1187 /* store command info in ippdrv */ 1188 ippdrv->c_node = c_node; 1189 1190 mutex_lock(&c_node->mem_lock); 1191 if (!ipp_check_mem_list(c_node)) { 1192 DRM_DEBUG_KMS("empty memory.\n"); 1193 ret = -ENOMEM; 1194 goto err_unlock; 1195 } 1196 1197 /* set current property in ippdrv */ 1198 ret = ipp_set_property(ippdrv, property); 1199 if (ret) { 1200 DRM_ERROR("failed to set property.\n"); 1201 ippdrv->c_node = NULL; 1202 goto err_unlock; 1203 } 1204 1205 /* check command */ 1206 switch (property->cmd) { 1207 case IPP_CMD_M2M: 1208 for_each_ipp_ops(i) { 1209 /* source/destination memory list */ 1210 head = &c_node->mem_list[i]; 1211 1212 m_node = list_first_entry(head, 1213 struct drm_exynos_ipp_mem_node, list); 1214 1215 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1216 1217 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1218 if (ret) { 1219 DRM_ERROR("failed to set m node.\n"); 1220 goto err_unlock; 1221 } 1222 } 1223 break; 1224 case IPP_CMD_WB: 1225 /* destination memory list */ 1226 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; 1227 1228 list_for_each_entry(m_node, head, list) { 1229 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1230 if (ret) { 1231 DRM_ERROR("failed to set m node.\n"); 1232 goto err_unlock; 1233 } 1234 } 1235 break; 1236 case IPP_CMD_OUTPUT: 1237 /* source memory list */ 1238 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1239 1240 list_for_each_entry(m_node, head, list) { 1241 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1242 if (ret) { 1243 DRM_ERROR("failed to set m node.\n"); 1244 goto err_unlock; 1245 } 1246 } 1247 break; 1248 default: 1249 DRM_ERROR("invalid operations.\n"); 1250 ret = -EINVAL; 1251 goto err_unlock; 1252 } 1253 mutex_unlock(&c_node->mem_lock); 1254 1255 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); 1256 1257 /* start operations */ 1258 if (ippdrv->start) { 1259 ret = ippdrv->start(ippdrv->dev, property->cmd); 1260 if (ret) { 1261 DRM_ERROR("failed to start ops.\n"); 1262 ippdrv->c_node = NULL; 1263 return ret; 1264 } 1265 } 1266 1267 return 0; 1268 1269 err_unlock: 1270 mutex_unlock(&c_node->mem_lock); 1271 ippdrv->c_node = NULL; 1272 return ret; 1273 } 1274 1275 static int ipp_stop_property(struct drm_device *drm_dev, 1276 struct exynos_drm_ippdrv *ippdrv, 1277 struct drm_exynos_ipp_cmd_node *c_node) 1278 { 1279 struct drm_exynos_ipp_property *property = &c_node->property; 1280 int i; 1281 1282 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); 1283 1284 /* stop operations */ 1285 if (ippdrv->stop) 1286 ippdrv->stop(ippdrv->dev, property->cmd); 1287 1288 /* check command */ 1289 switch (property->cmd) { 1290 case IPP_CMD_M2M: 1291 for_each_ipp_ops(i) 1292 ipp_clean_mem_nodes(drm_dev, c_node, i); 1293 break; 1294 case IPP_CMD_WB: 1295 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST); 1296 break; 1297 case IPP_CMD_OUTPUT: 1298 ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC); 1299 break; 1300 default: 1301 DRM_ERROR("invalid operations.\n"); 1302 return -EINVAL; 1303 } 1304 1305 return 0; 1306 } 1307 1308 void ipp_sched_cmd(struct work_struct *work) 1309 { 1310 struct drm_exynos_ipp_cmd_work *cmd_work = 1311 container_of(work, struct drm_exynos_ipp_cmd_work, work); 1312 struct exynos_drm_ippdrv *ippdrv; 1313 struct drm_exynos_ipp_cmd_node *c_node; 1314 struct drm_exynos_ipp_property *property; 1315 int ret; 1316 1317 ippdrv = cmd_work->ippdrv; 1318 if (!ippdrv) { 1319 DRM_ERROR("invalid ippdrv list.\n"); 1320 return; 1321 } 1322 1323 c_node = cmd_work->c_node; 1324 if (!c_node) { 1325 DRM_ERROR("invalid command node list.\n"); 1326 return; 1327 } 1328 1329 mutex_lock(&c_node->lock); 1330 1331 property = &c_node->property; 1332 1333 switch (cmd_work->ctrl) { 1334 case IPP_CTRL_PLAY: 1335 case IPP_CTRL_RESUME: 1336 ret = ipp_start_property(ippdrv, c_node); 1337 if (ret) { 1338 DRM_ERROR("failed to start property:prop_id[%d]\n", 1339 c_node->property.prop_id); 1340 goto err_unlock; 1341 } 1342 1343 /* 1344 * M2M case supports wait_completion of transfer. 1345 * because M2M case supports single unit operation 1346 * with multiple queue. 1347 * M2M need to wait completion of data transfer. 1348 */ 1349 if (ipp_is_m2m_cmd(property->cmd)) { 1350 if (!wait_for_completion_timeout 1351 (&c_node->start_complete, msecs_to_jiffies(200))) { 1352 DRM_ERROR("timeout event:prop_id[%d]\n", 1353 c_node->property.prop_id); 1354 goto err_unlock; 1355 } 1356 } 1357 break; 1358 case IPP_CTRL_STOP: 1359 case IPP_CTRL_PAUSE: 1360 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, 1361 c_node); 1362 if (ret) { 1363 DRM_ERROR("failed to stop property.\n"); 1364 goto err_unlock; 1365 } 1366 1367 complete(&c_node->stop_complete); 1368 break; 1369 default: 1370 DRM_ERROR("unknown control type\n"); 1371 break; 1372 } 1373 1374 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); 1375 1376 err_unlock: 1377 mutex_unlock(&c_node->lock); 1378 } 1379 1380 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, 1381 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) 1382 { 1383 struct drm_device *drm_dev = ippdrv->drm_dev; 1384 struct drm_exynos_ipp_property *property = &c_node->property; 1385 struct drm_exynos_ipp_mem_node *m_node; 1386 struct drm_exynos_ipp_queue_buf qbuf; 1387 struct drm_exynos_ipp_send_event *e; 1388 struct list_head *head; 1389 struct timeval now; 1390 unsigned long flags; 1391 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; 1392 int ret, i; 1393 1394 for_each_ipp_ops(i) 1395 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); 1396 1397 if (!drm_dev) { 1398 DRM_ERROR("failed to get drm_dev.\n"); 1399 return -EINVAL; 1400 } 1401 1402 if (!property) { 1403 DRM_ERROR("failed to get property.\n"); 1404 return -EINVAL; 1405 } 1406 1407 mutex_lock(&c_node->event_lock); 1408 if (list_empty(&c_node->event_list)) { 1409 DRM_DEBUG_KMS("event list is empty.\n"); 1410 ret = 0; 1411 goto err_event_unlock; 1412 } 1413 1414 mutex_lock(&c_node->mem_lock); 1415 if (!ipp_check_mem_list(c_node)) { 1416 DRM_DEBUG_KMS("empty memory.\n"); 1417 ret = 0; 1418 goto err_mem_unlock; 1419 } 1420 1421 /* check command */ 1422 switch (property->cmd) { 1423 case IPP_CMD_M2M: 1424 for_each_ipp_ops(i) { 1425 /* source/destination memory list */ 1426 head = &c_node->mem_list[i]; 1427 1428 m_node = list_first_entry(head, 1429 struct drm_exynos_ipp_mem_node, list); 1430 1431 tbuf_id[i] = m_node->buf_id; 1432 DRM_DEBUG_KMS("%s buf_id[%d]\n", 1433 i ? "dst" : "src", tbuf_id[i]); 1434 1435 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1436 if (ret) 1437 DRM_ERROR("failed to put m_node.\n"); 1438 } 1439 break; 1440 case IPP_CMD_WB: 1441 /* clear buf for finding */ 1442 memset(&qbuf, 0x0, sizeof(qbuf)); 1443 qbuf.ops_id = EXYNOS_DRM_OPS_DST; 1444 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; 1445 1446 /* get memory node entry */ 1447 m_node = ipp_find_mem_node(c_node, &qbuf); 1448 if (!m_node) { 1449 DRM_ERROR("empty memory node.\n"); 1450 ret = -ENOMEM; 1451 goto err_mem_unlock; 1452 } 1453 1454 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; 1455 1456 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1457 if (ret) 1458 DRM_ERROR("failed to put m_node.\n"); 1459 break; 1460 case IPP_CMD_OUTPUT: 1461 /* source memory list */ 1462 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; 1463 1464 m_node = list_first_entry(head, 1465 struct drm_exynos_ipp_mem_node, list); 1466 1467 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; 1468 1469 ret = ipp_put_mem_node(drm_dev, c_node, m_node); 1470 if (ret) 1471 DRM_ERROR("failed to put m_node.\n"); 1472 break; 1473 default: 1474 DRM_ERROR("invalid operations.\n"); 1475 ret = -EINVAL; 1476 goto err_mem_unlock; 1477 } 1478 mutex_unlock(&c_node->mem_lock); 1479 1480 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) 1481 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", 1482 tbuf_id[1], buf_id[1], property->prop_id); 1483 1484 /* 1485 * command node have event list of destination buffer 1486 * If destination buffer enqueue to mem list, 1487 * then we make event and link to event list tail. 1488 * so, we get first event for first enqueued buffer. 1489 */ 1490 e = list_first_entry(&c_node->event_list, 1491 struct drm_exynos_ipp_send_event, base.link); 1492 1493 do_gettimeofday(&now); 1494 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); 1495 e->event.tv_sec = now.tv_sec; 1496 e->event.tv_usec = now.tv_usec; 1497 e->event.prop_id = property->prop_id; 1498 1499 /* set buffer id about source destination */ 1500 for_each_ipp_ops(i) 1501 e->event.buf_id[i] = tbuf_id[i]; 1502 1503 spin_lock_irqsave(&drm_dev->event_lock, flags); 1504 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1505 wake_up_interruptible(&e->base.file_priv->event_wait); 1506 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 1507 mutex_unlock(&c_node->event_lock); 1508 1509 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", 1510 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); 1511 1512 return 0; 1513 1514 err_mem_unlock: 1515 mutex_unlock(&c_node->mem_lock); 1516 err_event_unlock: 1517 mutex_unlock(&c_node->event_lock); 1518 return ret; 1519 } 1520 1521 void ipp_sched_event(struct work_struct *work) 1522 { 1523 struct drm_exynos_ipp_event_work *event_work = 1524 container_of(work, struct drm_exynos_ipp_event_work, work); 1525 struct exynos_drm_ippdrv *ippdrv; 1526 struct drm_exynos_ipp_cmd_node *c_node; 1527 int ret; 1528 1529 if (!event_work) { 1530 DRM_ERROR("failed to get event_work.\n"); 1531 return; 1532 } 1533 1534 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); 1535 1536 ippdrv = event_work->ippdrv; 1537 if (!ippdrv) { 1538 DRM_ERROR("failed to get ipp driver.\n"); 1539 return; 1540 } 1541 1542 c_node = ippdrv->c_node; 1543 if (!c_node) { 1544 DRM_ERROR("failed to get command node.\n"); 1545 return; 1546 } 1547 1548 /* 1549 * IPP supports command thread, event thread synchronization. 1550 * If IPP close immediately from user land, then IPP make 1551 * synchronization with command thread, so make complete event. 1552 * or going out operations. 1553 */ 1554 if (c_node->state != IPP_STATE_START) { 1555 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", 1556 c_node->state, c_node->property.prop_id); 1557 goto err_completion; 1558 } 1559 1560 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); 1561 if (ret) { 1562 DRM_ERROR("failed to send event.\n"); 1563 goto err_completion; 1564 } 1565 1566 err_completion: 1567 if (ipp_is_m2m_cmd(c_node->property.cmd)) 1568 complete(&c_node->start_complete); 1569 } 1570 1571 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1572 { 1573 struct ipp_context *ctx = get_ipp_context(dev); 1574 struct exynos_drm_ippdrv *ippdrv; 1575 int ret, count = 0; 1576 1577 /* get ipp driver entry */ 1578 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1579 ippdrv->drm_dev = drm_dev; 1580 1581 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); 1582 if (ret < 0) { 1583 DRM_ERROR("failed to create id.\n"); 1584 goto err; 1585 } 1586 ippdrv->prop_list.ipp_id = ret; 1587 1588 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1589 count++, (int)ippdrv, ret); 1590 1591 /* store parent device for node */ 1592 ippdrv->parent_dev = dev; 1593 1594 /* store event work queue and handler */ 1595 ippdrv->event_workq = ctx->event_workq; 1596 ippdrv->sched_event = ipp_sched_event; 1597 INIT_LIST_HEAD(&ippdrv->cmd_list); 1598 mutex_init(&ippdrv->cmd_lock); 1599 1600 if (is_drm_iommu_supported(drm_dev)) { 1601 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); 1602 if (ret) { 1603 DRM_ERROR("failed to activate iommu\n"); 1604 goto err; 1605 } 1606 } 1607 } 1608 1609 return 0; 1610 1611 err: 1612 /* get ipp driver entry */ 1613 list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, 1614 drv_list) { 1615 if (is_drm_iommu_supported(drm_dev)) 1616 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1617 1618 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1619 ippdrv->prop_list.ipp_id); 1620 } 1621 1622 return ret; 1623 } 1624 1625 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1626 { 1627 struct exynos_drm_ippdrv *ippdrv, *t; 1628 struct ipp_context *ctx = get_ipp_context(dev); 1629 1630 /* get ipp driver entry */ 1631 list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) { 1632 if (is_drm_iommu_supported(drm_dev)) 1633 drm_iommu_detach_device(drm_dev, ippdrv->dev); 1634 1635 ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, 1636 ippdrv->prop_list.ipp_id); 1637 1638 ippdrv->drm_dev = NULL; 1639 exynos_drm_ippdrv_unregister(ippdrv); 1640 } 1641 } 1642 1643 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, 1644 struct drm_file *file) 1645 { 1646 struct drm_exynos_file_private *file_priv = file->driver_priv; 1647 1648 file_priv->ipp_dev = dev; 1649 1650 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1651 1652 return 0; 1653 } 1654 1655 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, 1656 struct drm_file *file) 1657 { 1658 struct exynos_drm_ippdrv *ippdrv = NULL; 1659 struct ipp_context *ctx = get_ipp_context(dev); 1660 struct drm_exynos_ipp_cmd_node *c_node, *tc_node; 1661 int count = 0; 1662 1663 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 1664 mutex_lock(&ippdrv->cmd_lock); 1665 list_for_each_entry_safe(c_node, tc_node, 1666 &ippdrv->cmd_list, list) { 1667 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1668 count++, (int)ippdrv); 1669 1670 if (c_node->filp == file) { 1671 /* 1672 * userland goto unnormal state. process killed. 1673 * and close the file. 1674 * so, IPP didn't called stop cmd ctrl. 1675 * so, we are make stop operation in this state. 1676 */ 1677 if (c_node->state == IPP_STATE_START) { 1678 ipp_stop_property(drm_dev, ippdrv, 1679 c_node); 1680 c_node->state = IPP_STATE_STOP; 1681 } 1682 1683 ippdrv->dedicated = false; 1684 ipp_clean_cmd_node(ctx, c_node); 1685 if (list_empty(&ippdrv->cmd_list)) 1686 pm_runtime_put_sync(ippdrv->dev); 1687 } 1688 } 1689 mutex_unlock(&ippdrv->cmd_lock); 1690 } 1691 1692 return; 1693 } 1694 1695 static int ipp_probe(struct platform_device *pdev) 1696 { 1697 struct device *dev = &pdev->dev; 1698 struct ipp_context *ctx; 1699 struct exynos_drm_subdrv *subdrv; 1700 int ret; 1701 1702 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1703 if (!ctx) 1704 return -ENOMEM; 1705 1706 mutex_init(&ctx->ipp_lock); 1707 mutex_init(&ctx->prop_lock); 1708 1709 idr_init(&ctx->ipp_idr); 1710 idr_init(&ctx->prop_idr); 1711 1712 /* 1713 * create single thread for ipp event 1714 * IPP supports event thread for IPP drivers. 1715 * IPP driver send event_work to this thread. 1716 * and IPP event thread send event to user process. 1717 */ 1718 ctx->event_workq = create_singlethread_workqueue("ipp_event"); 1719 if (!ctx->event_workq) { 1720 dev_err(dev, "failed to create event workqueue\n"); 1721 return -EINVAL; 1722 } 1723 1724 /* 1725 * create single thread for ipp command 1726 * IPP supports command thread for user process. 1727 * user process make command node using set property ioctl. 1728 * and make start_work and send this work to command thread. 1729 * and then this command thread start property. 1730 */ 1731 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); 1732 if (!ctx->cmd_workq) { 1733 dev_err(dev, "failed to create cmd workqueue\n"); 1734 ret = -EINVAL; 1735 goto err_event_workq; 1736 } 1737 1738 /* set sub driver informations */ 1739 subdrv = &ctx->subdrv; 1740 subdrv->dev = dev; 1741 subdrv->probe = ipp_subdrv_probe; 1742 subdrv->remove = ipp_subdrv_remove; 1743 subdrv->open = ipp_subdrv_open; 1744 subdrv->close = ipp_subdrv_close; 1745 1746 platform_set_drvdata(pdev, ctx); 1747 1748 ret = exynos_drm_subdrv_register(subdrv); 1749 if (ret < 0) { 1750 DRM_ERROR("failed to register drm ipp device.\n"); 1751 goto err_cmd_workq; 1752 } 1753 1754 dev_info(dev, "drm ipp registered successfully.\n"); 1755 1756 return 0; 1757 1758 err_cmd_workq: 1759 destroy_workqueue(ctx->cmd_workq); 1760 err_event_workq: 1761 destroy_workqueue(ctx->event_workq); 1762 return ret; 1763 } 1764 1765 static int ipp_remove(struct platform_device *pdev) 1766 { 1767 struct ipp_context *ctx = platform_get_drvdata(pdev); 1768 1769 /* unregister sub driver */ 1770 exynos_drm_subdrv_unregister(&ctx->subdrv); 1771 1772 /* remove,destroy ipp idr */ 1773 idr_destroy(&ctx->ipp_idr); 1774 idr_destroy(&ctx->prop_idr); 1775 1776 mutex_destroy(&ctx->ipp_lock); 1777 mutex_destroy(&ctx->prop_lock); 1778 1779 /* destroy command, event work queue */ 1780 destroy_workqueue(ctx->cmd_workq); 1781 destroy_workqueue(ctx->event_workq); 1782 1783 return 0; 1784 } 1785 1786 struct platform_driver ipp_driver = { 1787 .probe = ipp_probe, 1788 .remove = ipp_remove, 1789 .driver = { 1790 .name = "exynos-drm-ipp", 1791 .owner = THIS_MODULE, 1792 }, 1793 }; 1794 1795