1 /* 2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 3 * Authors: Joonyoung Shim <jy0922.shim@samsung.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundationr 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/clk.h> 13 #include <linux/err.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/slab.h> 19 #include <linux/workqueue.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/dma-attrs.h> 22 #include <linux/of.h> 23 24 #include <drm/drmP.h> 25 #include <drm/exynos_drm.h> 26 #include "exynos_drm_drv.h" 27 #include "exynos_drm_gem.h" 28 #include "exynos_drm_iommu.h" 29 30 #define G2D_HW_MAJOR_VER 4 31 #define G2D_HW_MINOR_VER 1 32 33 /* vaild register range set from user: 0x0104 ~ 0x0880 */ 34 #define G2D_VALID_START 0x0104 35 #define G2D_VALID_END 0x0880 36 37 /* general registers */ 38 #define G2D_SOFT_RESET 0x0000 39 #define G2D_INTEN 0x0004 40 #define G2D_INTC_PEND 0x000C 41 #define G2D_DMA_SFR_BASE_ADDR 0x0080 42 #define G2D_DMA_COMMAND 0x0084 43 #define G2D_DMA_STATUS 0x008C 44 #define G2D_DMA_HOLD_CMD 0x0090 45 46 /* command registers */ 47 #define G2D_BITBLT_START 0x0100 48 49 /* registers for base address */ 50 #define G2D_SRC_BASE_ADDR 0x0304 51 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 52 #define G2D_DST_BASE_ADDR 0x0404 53 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 54 #define G2D_PAT_BASE_ADDR 0x0500 55 #define G2D_MSK_BASE_ADDR 0x0520 56 57 /* G2D_SOFT_RESET */ 58 #define G2D_SFRCLEAR (1 << 1) 59 #define G2D_R (1 << 0) 60 61 /* G2D_INTEN */ 62 #define G2D_INTEN_ACF (1 << 3) 63 #define G2D_INTEN_UCF (1 << 2) 64 #define G2D_INTEN_GCF (1 << 1) 65 #define G2D_INTEN_SCF (1 << 0) 66 67 /* G2D_INTC_PEND */ 68 #define G2D_INTP_ACMD_FIN (1 << 3) 69 #define G2D_INTP_UCMD_FIN (1 << 2) 70 #define G2D_INTP_GCMD_FIN (1 << 1) 71 #define G2D_INTP_SCMD_FIN (1 << 0) 72 73 /* G2D_DMA_COMMAND */ 74 #define G2D_DMA_HALT (1 << 2) 75 #define G2D_DMA_CONTINUE (1 << 1) 76 #define G2D_DMA_START (1 << 0) 77 78 /* G2D_DMA_STATUS */ 79 #define G2D_DMA_LIST_DONE_COUNT (0xFF << 17) 80 #define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1) 81 #define G2D_DMA_DONE (1 << 0) 82 #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 83 84 /* G2D_DMA_HOLD_CMD */ 85 #define G2D_USET_HOLD (1 << 2) 86 #define G2D_LIST_HOLD (1 << 1) 87 #define G2D_BITBLT_HOLD (1 << 0) 88 89 /* G2D_BITBLT_START */ 90 #define G2D_START_CASESEL (1 << 2) 91 #define G2D_START_NHOLT (1 << 1) 92 #define G2D_START_BITBLT (1 << 0) 93 94 #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) 95 #define G2D_CMDLIST_NUM 64 96 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 97 #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 98 99 #define MAX_BUF_ADDR_NR 6 100 101 /* maximum buffer pool size of userptr is 64MB as default */ 102 #define MAX_POOL (64 * 1024 * 1024) 103 104 enum { 105 BUF_TYPE_GEM = 1, 106 BUF_TYPE_USERPTR, 107 }; 108 109 /* cmdlist data structure */ 110 struct g2d_cmdlist { 111 u32 head; 112 unsigned long data[G2D_CMDLIST_DATA_NUM]; 113 u32 last; /* last data offset */ 114 }; 115 116 struct drm_exynos_pending_g2d_event { 117 struct drm_pending_event base; 118 struct drm_exynos_g2d_event event; 119 }; 120 121 struct g2d_cmdlist_userptr { 122 struct list_head list; 123 dma_addr_t dma_addr; 124 unsigned long userptr; 125 unsigned long size; 126 struct page **pages; 127 unsigned int npages; 128 struct sg_table *sgt; 129 struct vm_area_struct *vma; 130 atomic_t refcount; 131 bool in_pool; 132 bool out_of_list; 133 }; 134 135 struct g2d_cmdlist_node { 136 struct list_head list; 137 struct g2d_cmdlist *cmdlist; 138 unsigned int map_nr; 139 unsigned long handles[MAX_BUF_ADDR_NR]; 140 unsigned int obj_type[MAX_BUF_ADDR_NR]; 141 dma_addr_t dma_addr; 142 143 struct drm_exynos_pending_g2d_event *event; 144 }; 145 146 struct g2d_runqueue_node { 147 struct list_head list; 148 struct list_head run_cmdlist; 149 struct list_head event_list; 150 struct drm_file *filp; 151 pid_t pid; 152 struct completion complete; 153 int async; 154 }; 155 156 struct g2d_data { 157 struct device *dev; 158 struct clk *gate_clk; 159 void __iomem *regs; 160 int irq; 161 struct workqueue_struct *g2d_workq; 162 struct work_struct runqueue_work; 163 struct exynos_drm_subdrv subdrv; 164 bool suspended; 165 166 /* cmdlist */ 167 struct g2d_cmdlist_node *cmdlist_node; 168 struct list_head free_cmdlist; 169 struct mutex cmdlist_mutex; 170 dma_addr_t cmdlist_pool; 171 void *cmdlist_pool_virt; 172 struct dma_attrs cmdlist_dma_attrs; 173 174 /* runqueue*/ 175 struct g2d_runqueue_node *runqueue_node; 176 struct list_head runqueue; 177 struct mutex runqueue_mutex; 178 struct kmem_cache *runqueue_slab; 179 180 unsigned long current_pool; 181 unsigned long max_pool; 182 }; 183 184 static int g2d_init_cmdlist(struct g2d_data *g2d) 185 { 186 struct device *dev = g2d->dev; 187 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 188 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 189 int nr; 190 int ret; 191 192 init_dma_attrs(&g2d->cmdlist_dma_attrs); 193 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); 194 195 g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, 196 G2D_CMDLIST_POOL_SIZE, 197 &g2d->cmdlist_pool, GFP_KERNEL, 198 &g2d->cmdlist_dma_attrs); 199 if (!g2d->cmdlist_pool_virt) { 200 dev_err(dev, "failed to allocate dma memory\n"); 201 return -ENOMEM; 202 } 203 204 node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); 205 if (!node) { 206 dev_err(dev, "failed to allocate memory\n"); 207 ret = -ENOMEM; 208 goto err; 209 } 210 211 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { 212 node[nr].cmdlist = 213 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; 214 node[nr].dma_addr = 215 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; 216 217 list_add_tail(&node[nr].list, &g2d->free_cmdlist); 218 } 219 220 return 0; 221 222 err: 223 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, 224 g2d->cmdlist_pool_virt, 225 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 226 return ret; 227 } 228 229 static void g2d_fini_cmdlist(struct g2d_data *g2d) 230 { 231 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 232 233 kfree(g2d->cmdlist_node); 234 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, 235 g2d->cmdlist_pool_virt, 236 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 237 } 238 239 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 240 { 241 struct device *dev = g2d->dev; 242 struct g2d_cmdlist_node *node; 243 244 mutex_lock(&g2d->cmdlist_mutex); 245 if (list_empty(&g2d->free_cmdlist)) { 246 dev_err(dev, "there is no free cmdlist\n"); 247 mutex_unlock(&g2d->cmdlist_mutex); 248 return NULL; 249 } 250 251 node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node, 252 list); 253 list_del_init(&node->list); 254 mutex_unlock(&g2d->cmdlist_mutex); 255 256 return node; 257 } 258 259 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node) 260 { 261 mutex_lock(&g2d->cmdlist_mutex); 262 list_move_tail(&node->list, &g2d->free_cmdlist); 263 mutex_unlock(&g2d->cmdlist_mutex); 264 } 265 266 static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv, 267 struct g2d_cmdlist_node *node) 268 { 269 struct g2d_cmdlist_node *lnode; 270 271 if (list_empty(&g2d_priv->inuse_cmdlist)) 272 goto add_to_list; 273 274 /* this links to base address of new cmdlist */ 275 lnode = list_entry(g2d_priv->inuse_cmdlist.prev, 276 struct g2d_cmdlist_node, list); 277 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; 278 279 add_to_list: 280 list_add_tail(&node->list, &g2d_priv->inuse_cmdlist); 281 282 if (node->event) 283 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 284 } 285 286 static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, 287 unsigned long obj, 288 bool force) 289 { 290 struct g2d_cmdlist_userptr *g2d_userptr = 291 (struct g2d_cmdlist_userptr *)obj; 292 293 if (!obj) 294 return; 295 296 if (force) 297 goto out; 298 299 atomic_dec(&g2d_userptr->refcount); 300 301 if (atomic_read(&g2d_userptr->refcount) > 0) 302 return; 303 304 if (g2d_userptr->in_pool) 305 return; 306 307 out: 308 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, 309 DMA_BIDIRECTIONAL); 310 311 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 312 g2d_userptr->npages, 313 g2d_userptr->vma); 314 315 if (!g2d_userptr->out_of_list) 316 list_del_init(&g2d_userptr->list); 317 318 sg_free_table(g2d_userptr->sgt); 319 kfree(g2d_userptr->sgt); 320 g2d_userptr->sgt = NULL; 321 322 kfree(g2d_userptr->pages); 323 g2d_userptr->pages = NULL; 324 kfree(g2d_userptr); 325 g2d_userptr = NULL; 326 } 327 328 static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, 329 unsigned long userptr, 330 unsigned long size, 331 struct drm_file *filp, 332 unsigned long *obj) 333 { 334 struct drm_exynos_file_private *file_priv = filp->driver_priv; 335 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 336 struct g2d_cmdlist_userptr *g2d_userptr; 337 struct g2d_data *g2d; 338 struct page **pages; 339 struct sg_table *sgt; 340 struct vm_area_struct *vma; 341 unsigned long start, end; 342 unsigned int npages, offset; 343 int ret; 344 345 if (!size) { 346 DRM_ERROR("invalid userptr size.\n"); 347 return ERR_PTR(-EINVAL); 348 } 349 350 g2d = dev_get_drvdata(g2d_priv->dev); 351 352 /* check if userptr already exists in userptr_list. */ 353 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { 354 if (g2d_userptr->userptr == userptr) { 355 /* 356 * also check size because there could be same address 357 * and different size. 358 */ 359 if (g2d_userptr->size == size) { 360 atomic_inc(&g2d_userptr->refcount); 361 *obj = (unsigned long)g2d_userptr; 362 363 return &g2d_userptr->dma_addr; 364 } 365 366 /* 367 * at this moment, maybe g2d dma is accessing this 368 * g2d_userptr memory region so just remove this 369 * g2d_userptr object from userptr_list not to be 370 * referred again and also except it the userptr 371 * pool to be released after the dma access completion. 372 */ 373 g2d_userptr->out_of_list = true; 374 g2d_userptr->in_pool = false; 375 list_del_init(&g2d_userptr->list); 376 377 break; 378 } 379 } 380 381 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); 382 if (!g2d_userptr) { 383 DRM_ERROR("failed to allocate g2d_userptr.\n"); 384 return ERR_PTR(-ENOMEM); 385 } 386 387 atomic_set(&g2d_userptr->refcount, 1); 388 389 start = userptr & PAGE_MASK; 390 offset = userptr & ~PAGE_MASK; 391 end = PAGE_ALIGN(userptr + size); 392 npages = (end - start) >> PAGE_SHIFT; 393 g2d_userptr->npages = npages; 394 395 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL); 396 if (!pages) { 397 DRM_ERROR("failed to allocate pages.\n"); 398 kfree(g2d_userptr); 399 return ERR_PTR(-ENOMEM); 400 } 401 402 vma = find_vma(current->mm, userptr); 403 if (!vma) { 404 DRM_ERROR("failed to get vm region.\n"); 405 ret = -EFAULT; 406 goto err_free_pages; 407 } 408 409 if (vma->vm_end < userptr + size) { 410 DRM_ERROR("vma is too small.\n"); 411 ret = -EFAULT; 412 goto err_free_pages; 413 } 414 415 g2d_userptr->vma = exynos_gem_get_vma(vma); 416 if (!g2d_userptr->vma) { 417 DRM_ERROR("failed to copy vma.\n"); 418 ret = -ENOMEM; 419 goto err_free_pages; 420 } 421 422 g2d_userptr->size = size; 423 424 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, 425 npages, pages, vma); 426 if (ret < 0) { 427 DRM_ERROR("failed to get user pages from userptr.\n"); 428 goto err_put_vma; 429 } 430 431 g2d_userptr->pages = pages; 432 433 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 434 if (!sgt) { 435 DRM_ERROR("failed to allocate sg table.\n"); 436 ret = -ENOMEM; 437 goto err_free_userptr; 438 } 439 440 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, 441 size, GFP_KERNEL); 442 if (ret < 0) { 443 DRM_ERROR("failed to get sgt from pages.\n"); 444 goto err_free_sgt; 445 } 446 447 g2d_userptr->sgt = sgt; 448 449 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, 450 DMA_BIDIRECTIONAL); 451 if (ret < 0) { 452 DRM_ERROR("failed to map sgt with dma region.\n"); 453 goto err_sg_free_table; 454 } 455 456 g2d_userptr->dma_addr = sgt->sgl[0].dma_address; 457 g2d_userptr->userptr = userptr; 458 459 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); 460 461 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { 462 g2d->current_pool += npages << PAGE_SHIFT; 463 g2d_userptr->in_pool = true; 464 } 465 466 *obj = (unsigned long)g2d_userptr; 467 468 return &g2d_userptr->dma_addr; 469 470 err_sg_free_table: 471 sg_free_table(sgt); 472 473 err_free_sgt: 474 kfree(sgt); 475 sgt = NULL; 476 477 err_free_userptr: 478 exynos_gem_put_pages_to_userptr(g2d_userptr->pages, 479 g2d_userptr->npages, 480 g2d_userptr->vma); 481 482 err_put_vma: 483 exynos_gem_put_vma(g2d_userptr->vma); 484 485 err_free_pages: 486 kfree(pages); 487 kfree(g2d_userptr); 488 pages = NULL; 489 g2d_userptr = NULL; 490 491 return ERR_PTR(ret); 492 } 493 494 static void g2d_userptr_free_all(struct drm_device *drm_dev, 495 struct g2d_data *g2d, 496 struct drm_file *filp) 497 { 498 struct drm_exynos_file_private *file_priv = filp->driver_priv; 499 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 500 struct g2d_cmdlist_userptr *g2d_userptr, *n; 501 502 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) 503 if (g2d_userptr->in_pool) 504 g2d_userptr_put_dma_addr(drm_dev, 505 (unsigned long)g2d_userptr, 506 true); 507 508 g2d->current_pool = 0; 509 } 510 511 static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 512 struct g2d_cmdlist_node *node, 513 struct drm_device *drm_dev, 514 struct drm_file *file) 515 { 516 struct g2d_cmdlist *cmdlist = node->cmdlist; 517 int offset; 518 int i; 519 520 for (i = 0; i < node->map_nr; i++) { 521 unsigned long handle; 522 dma_addr_t *addr; 523 524 offset = cmdlist->last - (i * 2 + 1); 525 handle = cmdlist->data[offset]; 526 527 if (node->obj_type[i] == BUF_TYPE_GEM) { 528 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, 529 file); 530 if (IS_ERR(addr)) { 531 node->map_nr = i; 532 return -EFAULT; 533 } 534 } else { 535 struct drm_exynos_g2d_userptr g2d_userptr; 536 537 if (copy_from_user(&g2d_userptr, (void __user *)handle, 538 sizeof(struct drm_exynos_g2d_userptr))) { 539 node->map_nr = i; 540 return -EFAULT; 541 } 542 543 addr = g2d_userptr_get_dma_addr(drm_dev, 544 g2d_userptr.userptr, 545 g2d_userptr.size, 546 file, 547 &handle); 548 if (IS_ERR(addr)) { 549 node->map_nr = i; 550 return -EFAULT; 551 } 552 } 553 554 cmdlist->data[offset] = *addr; 555 node->handles[i] = handle; 556 } 557 558 return 0; 559 } 560 561 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, 562 struct g2d_cmdlist_node *node, 563 struct drm_file *filp) 564 { 565 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 566 int i; 567 568 for (i = 0; i < node->map_nr; i++) { 569 unsigned long handle = node->handles[i]; 570 571 if (node->obj_type[i] == BUF_TYPE_GEM) 572 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, 573 filp); 574 else 575 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, 576 false); 577 578 node->handles[i] = 0; 579 node->obj_type[i] = 0; 580 } 581 582 node->map_nr = 0; 583 } 584 585 static void g2d_dma_start(struct g2d_data *g2d, 586 struct g2d_runqueue_node *runqueue_node) 587 { 588 struct g2d_cmdlist_node *node = 589 list_first_entry(&runqueue_node->run_cmdlist, 590 struct g2d_cmdlist_node, list); 591 592 pm_runtime_get_sync(g2d->dev); 593 clk_enable(g2d->gate_clk); 594 595 /* interrupt enable */ 596 writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF, 597 g2d->regs + G2D_INTEN); 598 599 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 600 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 601 } 602 603 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) 604 { 605 struct g2d_runqueue_node *runqueue_node; 606 607 if (list_empty(&g2d->runqueue)) 608 return NULL; 609 610 runqueue_node = list_first_entry(&g2d->runqueue, 611 struct g2d_runqueue_node, list); 612 list_del_init(&runqueue_node->list); 613 return runqueue_node; 614 } 615 616 static void g2d_free_runqueue_node(struct g2d_data *g2d, 617 struct g2d_runqueue_node *runqueue_node) 618 { 619 struct g2d_cmdlist_node *node; 620 621 if (!runqueue_node) 622 return; 623 624 mutex_lock(&g2d->cmdlist_mutex); 625 /* 626 * commands in run_cmdlist have been completed so unmap all gem 627 * objects in each command node so that they are unreferenced. 628 */ 629 list_for_each_entry(node, &runqueue_node->run_cmdlist, list) 630 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); 631 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 632 mutex_unlock(&g2d->cmdlist_mutex); 633 634 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 635 } 636 637 static void g2d_exec_runqueue(struct g2d_data *g2d) 638 { 639 g2d->runqueue_node = g2d_get_runqueue_node(g2d); 640 if (g2d->runqueue_node) 641 g2d_dma_start(g2d, g2d->runqueue_node); 642 } 643 644 static void g2d_runqueue_worker(struct work_struct *work) 645 { 646 struct g2d_data *g2d = container_of(work, struct g2d_data, 647 runqueue_work); 648 649 650 mutex_lock(&g2d->runqueue_mutex); 651 clk_disable(g2d->gate_clk); 652 pm_runtime_put_sync(g2d->dev); 653 654 complete(&g2d->runqueue_node->complete); 655 if (g2d->runqueue_node->async) 656 g2d_free_runqueue_node(g2d, g2d->runqueue_node); 657 658 if (g2d->suspended) 659 g2d->runqueue_node = NULL; 660 else 661 g2d_exec_runqueue(g2d); 662 mutex_unlock(&g2d->runqueue_mutex); 663 } 664 665 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) 666 { 667 struct drm_device *drm_dev = g2d->subdrv.drm_dev; 668 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; 669 struct drm_exynos_pending_g2d_event *e; 670 struct timeval now; 671 unsigned long flags; 672 673 if (list_empty(&runqueue_node->event_list)) 674 return; 675 676 e = list_first_entry(&runqueue_node->event_list, 677 struct drm_exynos_pending_g2d_event, base.link); 678 679 do_gettimeofday(&now); 680 e->event.tv_sec = now.tv_sec; 681 e->event.tv_usec = now.tv_usec; 682 e->event.cmdlist_no = cmdlist_no; 683 684 spin_lock_irqsave(&drm_dev->event_lock, flags); 685 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 686 wake_up_interruptible(&e->base.file_priv->event_wait); 687 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 688 } 689 690 static irqreturn_t g2d_irq_handler(int irq, void *dev_id) 691 { 692 struct g2d_data *g2d = dev_id; 693 u32 pending; 694 695 pending = readl_relaxed(g2d->regs + G2D_INTC_PEND); 696 if (pending) 697 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND); 698 699 if (pending & G2D_INTP_GCMD_FIN) { 700 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS); 701 702 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >> 703 G2D_DMA_LIST_DONE_COUNT_OFFSET; 704 705 g2d_finish_event(g2d, cmdlist_no); 706 707 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD); 708 if (!(pending & G2D_INTP_ACMD_FIN)) { 709 writel_relaxed(G2D_DMA_CONTINUE, 710 g2d->regs + G2D_DMA_COMMAND); 711 } 712 } 713 714 if (pending & G2D_INTP_ACMD_FIN) 715 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 716 717 return IRQ_HANDLED; 718 } 719 720 static int g2d_check_reg_offset(struct device *dev, 721 struct g2d_cmdlist_node *node, 722 int nr, bool for_addr) 723 { 724 struct g2d_cmdlist *cmdlist = node->cmdlist; 725 int reg_offset; 726 int index; 727 int i; 728 729 for (i = 0; i < nr; i++) { 730 index = cmdlist->last - 2 * (i + 1); 731 732 if (for_addr) { 733 /* check userptr buffer type. */ 734 reg_offset = (cmdlist->data[index] & 735 ~0x7fffffff) >> 31; 736 if (reg_offset) { 737 node->obj_type[i] = BUF_TYPE_USERPTR; 738 cmdlist->data[index] &= ~G2D_BUF_USERPTR; 739 } 740 } 741 742 reg_offset = cmdlist->data[index] & ~0xfffff000; 743 744 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 745 goto err; 746 if (reg_offset % 4) 747 goto err; 748 749 switch (reg_offset) { 750 case G2D_SRC_BASE_ADDR: 751 case G2D_SRC_PLANE2_BASE_ADDR: 752 case G2D_DST_BASE_ADDR: 753 case G2D_DST_PLANE2_BASE_ADDR: 754 case G2D_PAT_BASE_ADDR: 755 case G2D_MSK_BASE_ADDR: 756 if (!for_addr) 757 goto err; 758 759 if (node->obj_type[i] != BUF_TYPE_USERPTR) 760 node->obj_type[i] = BUF_TYPE_GEM; 761 break; 762 default: 763 if (for_addr) 764 goto err; 765 break; 766 } 767 } 768 769 return 0; 770 771 err: 772 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); 773 return -EINVAL; 774 } 775 776 /* ioctl functions */ 777 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, 778 struct drm_file *file) 779 { 780 struct drm_exynos_g2d_get_ver *ver = data; 781 782 ver->major = G2D_HW_MAJOR_VER; 783 ver->minor = G2D_HW_MINOR_VER; 784 785 return 0; 786 } 787 EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); 788 789 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 790 struct drm_file *file) 791 { 792 struct drm_exynos_file_private *file_priv = file->driver_priv; 793 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 794 struct device *dev = g2d_priv->dev; 795 struct g2d_data *g2d; 796 struct drm_exynos_g2d_set_cmdlist *req = data; 797 struct drm_exynos_g2d_cmd *cmd; 798 struct drm_exynos_pending_g2d_event *e; 799 struct g2d_cmdlist_node *node; 800 struct g2d_cmdlist *cmdlist; 801 unsigned long flags; 802 int size; 803 int ret; 804 805 if (!dev) 806 return -ENODEV; 807 808 g2d = dev_get_drvdata(dev); 809 if (!g2d) 810 return -EFAULT; 811 812 node = g2d_get_cmdlist(g2d); 813 if (!node) 814 return -ENOMEM; 815 816 node->event = NULL; 817 818 if (req->event_type != G2D_EVENT_NOT) { 819 spin_lock_irqsave(&drm_dev->event_lock, flags); 820 if (file->event_space < sizeof(e->event)) { 821 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 822 ret = -ENOMEM; 823 goto err; 824 } 825 file->event_space -= sizeof(e->event); 826 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 827 828 e = kzalloc(sizeof(*node->event), GFP_KERNEL); 829 if (!e) { 830 dev_err(dev, "failed to allocate event\n"); 831 832 spin_lock_irqsave(&drm_dev->event_lock, flags); 833 file->event_space += sizeof(e->event); 834 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 835 836 ret = -ENOMEM; 837 goto err; 838 } 839 840 e->event.base.type = DRM_EXYNOS_G2D_EVENT; 841 e->event.base.length = sizeof(e->event); 842 e->event.user_data = req->user_data; 843 e->base.event = &e->event.base; 844 e->base.file_priv = file; 845 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; 846 847 node->event = e; 848 } 849 850 cmdlist = node->cmdlist; 851 852 cmdlist->last = 0; 853 854 /* 855 * If don't clear SFR registers, the cmdlist is affected by register 856 * values of previous cmdlist. G2D hw executes SFR clear command and 857 * a next command at the same time then the next command is ignored and 858 * is executed rightly from next next command, so needs a dummy command 859 * to next command of SFR clear command. 860 */ 861 cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET; 862 cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR; 863 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; 864 cmdlist->data[cmdlist->last++] = 0; 865 866 if (node->event) { 867 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; 868 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; 869 } 870 871 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 872 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; 873 if (size > G2D_CMDLIST_DATA_NUM) { 874 dev_err(dev, "cmdlist size is too big\n"); 875 ret = -EINVAL; 876 goto err_free_event; 877 } 878 879 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; 880 881 if (copy_from_user(cmdlist->data + cmdlist->last, 882 (void __user *)cmd, 883 sizeof(*cmd) * req->cmd_nr)) { 884 ret = -EFAULT; 885 goto err_free_event; 886 } 887 cmdlist->last += req->cmd_nr * 2; 888 889 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false); 890 if (ret < 0) 891 goto err_free_event; 892 893 node->map_nr = req->cmd_buf_nr; 894 if (req->cmd_buf_nr) { 895 struct drm_exynos_g2d_cmd *cmd_buf; 896 897 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; 898 899 if (copy_from_user(cmdlist->data + cmdlist->last, 900 (void __user *)cmd_buf, 901 sizeof(*cmd_buf) * req->cmd_buf_nr)) { 902 ret = -EFAULT; 903 goto err_free_event; 904 } 905 cmdlist->last += req->cmd_buf_nr * 2; 906 907 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true); 908 if (ret < 0) 909 goto err_free_event; 910 911 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); 912 if (ret < 0) 913 goto err_unmap; 914 } 915 916 cmdlist->data[cmdlist->last++] = G2D_BITBLT_START; 917 cmdlist->data[cmdlist->last++] = G2D_START_BITBLT; 918 919 /* head */ 920 cmdlist->head = cmdlist->last / 2; 921 922 /* tail */ 923 cmdlist->data[cmdlist->last] = 0; 924 925 g2d_add_cmdlist_to_inuse(g2d_priv, node); 926 927 return 0; 928 929 err_unmap: 930 g2d_unmap_cmdlist_gem(g2d, node, file); 931 err_free_event: 932 if (node->event) { 933 spin_lock_irqsave(&drm_dev->event_lock, flags); 934 file->event_space += sizeof(e->event); 935 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 936 kfree(node->event); 937 } 938 err: 939 g2d_put_cmdlist(g2d, node); 940 return ret; 941 } 942 EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); 943 944 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 945 struct drm_file *file) 946 { 947 struct drm_exynos_file_private *file_priv = file->driver_priv; 948 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 949 struct device *dev = g2d_priv->dev; 950 struct g2d_data *g2d; 951 struct drm_exynos_g2d_exec *req = data; 952 struct g2d_runqueue_node *runqueue_node; 953 struct list_head *run_cmdlist; 954 struct list_head *event_list; 955 956 if (!dev) 957 return -ENODEV; 958 959 g2d = dev_get_drvdata(dev); 960 if (!g2d) 961 return -EFAULT; 962 963 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); 964 if (!runqueue_node) { 965 dev_err(dev, "failed to allocate memory\n"); 966 return -ENOMEM; 967 } 968 run_cmdlist = &runqueue_node->run_cmdlist; 969 event_list = &runqueue_node->event_list; 970 INIT_LIST_HEAD(run_cmdlist); 971 INIT_LIST_HEAD(event_list); 972 init_completion(&runqueue_node->complete); 973 runqueue_node->async = req->async; 974 975 list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist); 976 list_splice_init(&g2d_priv->event_list, event_list); 977 978 if (list_empty(run_cmdlist)) { 979 dev_err(dev, "there is no inuse cmdlist\n"); 980 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 981 return -EPERM; 982 } 983 984 mutex_lock(&g2d->runqueue_mutex); 985 runqueue_node->pid = current->pid; 986 runqueue_node->filp = file; 987 list_add_tail(&runqueue_node->list, &g2d->runqueue); 988 if (!g2d->runqueue_node) 989 g2d_exec_runqueue(g2d); 990 mutex_unlock(&g2d->runqueue_mutex); 991 992 if (runqueue_node->async) 993 goto out; 994 995 wait_for_completion(&runqueue_node->complete); 996 g2d_free_runqueue_node(g2d, runqueue_node); 997 998 out: 999 return 0; 1000 } 1001 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 1002 1003 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 1004 { 1005 struct g2d_data *g2d; 1006 int ret; 1007 1008 g2d = dev_get_drvdata(dev); 1009 if (!g2d) 1010 return -EFAULT; 1011 1012 /* allocate dma-aware cmdlist buffer. */ 1013 ret = g2d_init_cmdlist(g2d); 1014 if (ret < 0) { 1015 dev_err(dev, "cmdlist init failed\n"); 1016 return ret; 1017 } 1018 1019 if (!is_drm_iommu_supported(drm_dev)) 1020 return 0; 1021 1022 ret = drm_iommu_attach_device(drm_dev, dev); 1023 if (ret < 0) { 1024 dev_err(dev, "failed to enable iommu.\n"); 1025 g2d_fini_cmdlist(g2d); 1026 } 1027 1028 return ret; 1029 1030 } 1031 1032 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) 1033 { 1034 if (!is_drm_iommu_supported(drm_dev)) 1035 return; 1036 1037 drm_iommu_detach_device(drm_dev, dev); 1038 } 1039 1040 static int g2d_open(struct drm_device *drm_dev, struct device *dev, 1041 struct drm_file *file) 1042 { 1043 struct drm_exynos_file_private *file_priv = file->driver_priv; 1044 struct exynos_drm_g2d_private *g2d_priv; 1045 1046 g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); 1047 if (!g2d_priv) { 1048 dev_err(dev, "failed to allocate g2d private data\n"); 1049 return -ENOMEM; 1050 } 1051 1052 g2d_priv->dev = dev; 1053 file_priv->g2d_priv = g2d_priv; 1054 1055 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1056 INIT_LIST_HEAD(&g2d_priv->event_list); 1057 INIT_LIST_HEAD(&g2d_priv->userptr_list); 1058 1059 return 0; 1060 } 1061 1062 static void g2d_close(struct drm_device *drm_dev, struct device *dev, 1063 struct drm_file *file) 1064 { 1065 struct drm_exynos_file_private *file_priv = file->driver_priv; 1066 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 1067 struct g2d_data *g2d; 1068 struct g2d_cmdlist_node *node, *n; 1069 1070 if (!dev) 1071 return; 1072 1073 g2d = dev_get_drvdata(dev); 1074 if (!g2d) 1075 return; 1076 1077 mutex_lock(&g2d->cmdlist_mutex); 1078 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { 1079 /* 1080 * unmap all gem objects not completed. 1081 * 1082 * P.S. if current process was terminated forcely then 1083 * there may be some commands in inuse_cmdlist so unmap 1084 * them. 1085 */ 1086 g2d_unmap_cmdlist_gem(g2d, node, file); 1087 list_move_tail(&node->list, &g2d->free_cmdlist); 1088 } 1089 mutex_unlock(&g2d->cmdlist_mutex); 1090 1091 /* release all g2d_userptr in pool. */ 1092 g2d_userptr_free_all(drm_dev, g2d, file); 1093 1094 kfree(file_priv->g2d_priv); 1095 } 1096 1097 static int g2d_probe(struct platform_device *pdev) 1098 { 1099 struct device *dev = &pdev->dev; 1100 struct resource *res; 1101 struct g2d_data *g2d; 1102 struct exynos_drm_subdrv *subdrv; 1103 int ret; 1104 1105 g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL); 1106 if (!g2d) { 1107 dev_err(dev, "failed to allocate driver data\n"); 1108 return -ENOMEM; 1109 } 1110 1111 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", 1112 sizeof(struct g2d_runqueue_node), 0, 0, NULL); 1113 if (!g2d->runqueue_slab) 1114 return -ENOMEM; 1115 1116 g2d->dev = dev; 1117 1118 g2d->g2d_workq = create_singlethread_workqueue("g2d"); 1119 if (!g2d->g2d_workq) { 1120 dev_err(dev, "failed to create workqueue\n"); 1121 ret = -EINVAL; 1122 goto err_destroy_slab; 1123 } 1124 1125 INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker); 1126 INIT_LIST_HEAD(&g2d->free_cmdlist); 1127 INIT_LIST_HEAD(&g2d->runqueue); 1128 1129 mutex_init(&g2d->cmdlist_mutex); 1130 mutex_init(&g2d->runqueue_mutex); 1131 1132 g2d->gate_clk = devm_clk_get(dev, "fimg2d"); 1133 if (IS_ERR(g2d->gate_clk)) { 1134 dev_err(dev, "failed to get gate clock\n"); 1135 ret = PTR_ERR(g2d->gate_clk); 1136 goto err_destroy_workqueue; 1137 } 1138 1139 pm_runtime_enable(dev); 1140 1141 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1142 1143 g2d->regs = devm_ioremap_resource(&pdev->dev, res); 1144 if (IS_ERR(g2d->regs)) { 1145 ret = PTR_ERR(g2d->regs); 1146 goto err_put_clk; 1147 } 1148 1149 g2d->irq = platform_get_irq(pdev, 0); 1150 if (g2d->irq < 0) { 1151 dev_err(dev, "failed to get irq\n"); 1152 ret = g2d->irq; 1153 goto err_put_clk; 1154 } 1155 1156 ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0, 1157 "drm_g2d", g2d); 1158 if (ret < 0) { 1159 dev_err(dev, "irq request failed\n"); 1160 goto err_put_clk; 1161 } 1162 1163 g2d->max_pool = MAX_POOL; 1164 1165 platform_set_drvdata(pdev, g2d); 1166 1167 subdrv = &g2d->subdrv; 1168 subdrv->dev = dev; 1169 subdrv->probe = g2d_subdrv_probe; 1170 subdrv->remove = g2d_subdrv_remove; 1171 subdrv->open = g2d_open; 1172 subdrv->close = g2d_close; 1173 1174 ret = exynos_drm_subdrv_register(subdrv); 1175 if (ret < 0) { 1176 dev_err(dev, "failed to register drm g2d device\n"); 1177 goto err_put_clk; 1178 } 1179 1180 dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", 1181 G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); 1182 1183 return 0; 1184 1185 err_put_clk: 1186 pm_runtime_disable(dev); 1187 err_destroy_workqueue: 1188 destroy_workqueue(g2d->g2d_workq); 1189 err_destroy_slab: 1190 kmem_cache_destroy(g2d->runqueue_slab); 1191 return ret; 1192 } 1193 1194 static int g2d_remove(struct platform_device *pdev) 1195 { 1196 struct g2d_data *g2d = platform_get_drvdata(pdev); 1197 1198 cancel_work_sync(&g2d->runqueue_work); 1199 exynos_drm_subdrv_unregister(&g2d->subdrv); 1200 1201 while (g2d->runqueue_node) { 1202 g2d_free_runqueue_node(g2d, g2d->runqueue_node); 1203 g2d->runqueue_node = g2d_get_runqueue_node(g2d); 1204 } 1205 1206 pm_runtime_disable(&pdev->dev); 1207 1208 g2d_fini_cmdlist(g2d); 1209 destroy_workqueue(g2d->g2d_workq); 1210 kmem_cache_destroy(g2d->runqueue_slab); 1211 1212 return 0; 1213 } 1214 1215 #ifdef CONFIG_PM_SLEEP 1216 static int g2d_suspend(struct device *dev) 1217 { 1218 struct g2d_data *g2d = dev_get_drvdata(dev); 1219 1220 mutex_lock(&g2d->runqueue_mutex); 1221 g2d->suspended = true; 1222 mutex_unlock(&g2d->runqueue_mutex); 1223 1224 while (g2d->runqueue_node) 1225 /* FIXME: good range? */ 1226 usleep_range(500, 1000); 1227 1228 flush_work(&g2d->runqueue_work); 1229 1230 return 0; 1231 } 1232 1233 static int g2d_resume(struct device *dev) 1234 { 1235 struct g2d_data *g2d = dev_get_drvdata(dev); 1236 1237 g2d->suspended = false; 1238 g2d_exec_runqueue(g2d); 1239 1240 return 0; 1241 } 1242 #endif 1243 1244 static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); 1245 1246 #ifdef CONFIG_OF 1247 static const struct of_device_id exynos_g2d_match[] = { 1248 { .compatible = "samsung,exynos5250-g2d" }, 1249 {}, 1250 }; 1251 MODULE_DEVICE_TABLE(of, exynos_g2d_match); 1252 #endif 1253 1254 struct platform_driver g2d_driver = { 1255 .probe = g2d_probe, 1256 .remove = g2d_remove, 1257 .driver = { 1258 .name = "s5p-g2d", 1259 .owner = THIS_MODULE, 1260 .pm = &g2d_pm_ops, 1261 .of_match_table = of_match_ptr(exynos_g2d_match), 1262 }, 1263 }; 1264