1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Samsung Electronics Co.Ltd 4 * Authors: Joonyoung Shim <jy0922.shim@samsung.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/component.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/kernel.h> 15 #include <linux/of.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/slab.h> 19 #include <linux/uaccess.h> 20 #include <linux/workqueue.h> 21 22 #include <drm/drm_file.h> 23 #include <drm/exynos_drm.h> 24 25 #include "exynos_drm_drv.h" 26 #include "exynos_drm_g2d.h" 27 #include "exynos_drm_gem.h" 28 29 #define G2D_HW_MAJOR_VER 4 30 #define G2D_HW_MINOR_VER 1 31 32 /* vaild register range set from user: 0x0104 ~ 0x0880 */ 33 #define G2D_VALID_START 0x0104 34 #define G2D_VALID_END 0x0880 35 36 /* general registers */ 37 #define G2D_SOFT_RESET 0x0000 38 #define G2D_INTEN 0x0004 39 #define G2D_INTC_PEND 0x000C 40 #define G2D_DMA_SFR_BASE_ADDR 0x0080 41 #define G2D_DMA_COMMAND 0x0084 42 #define G2D_DMA_STATUS 0x008C 43 #define G2D_DMA_HOLD_CMD 0x0090 44 45 /* command registers */ 46 #define G2D_BITBLT_START 0x0100 47 48 /* registers for base address */ 49 #define G2D_SRC_BASE_ADDR 0x0304 50 #define G2D_SRC_STRIDE 0x0308 51 #define G2D_SRC_COLOR_MODE 0x030C 52 #define G2D_SRC_LEFT_TOP 0x0310 53 #define G2D_SRC_RIGHT_BOTTOM 0x0314 54 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 55 #define G2D_DST_BASE_ADDR 0x0404 56 #define G2D_DST_STRIDE 0x0408 57 #define G2D_DST_COLOR_MODE 0x040C 58 #define G2D_DST_LEFT_TOP 0x0410 59 #define G2D_DST_RIGHT_BOTTOM 0x0414 60 #define G2D_DST_PLANE2_BASE_ADDR 0x0418 61 #define G2D_PAT_BASE_ADDR 0x0500 62 #define G2D_MSK_BASE_ADDR 0x0520 63 64 /* G2D_SOFT_RESET */ 65 #define G2D_SFRCLEAR (1 << 1) 66 #define G2D_R (1 << 0) 67 68 /* G2D_INTEN */ 69 #define G2D_INTEN_ACF (1 << 3) 70 #define G2D_INTEN_UCF (1 << 2) 71 #define G2D_INTEN_GCF (1 << 1) 72 #define G2D_INTEN_SCF (1 << 0) 73 74 /* G2D_INTC_PEND */ 75 #define G2D_INTP_ACMD_FIN (1 << 3) 76 #define G2D_INTP_UCMD_FIN (1 << 2) 77 #define G2D_INTP_GCMD_FIN (1 << 1) 78 #define G2D_INTP_SCMD_FIN (1 << 0) 79 80 /* G2D_DMA_COMMAND */ 81 #define G2D_DMA_HALT (1 << 2) 82 #define G2D_DMA_CONTINUE (1 << 1) 83 #define G2D_DMA_START (1 << 0) 84 85 /* G2D_DMA_STATUS */ 86 #define G2D_DMA_LIST_DONE_COUNT (0xFF << 17) 87 #define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1) 88 #define G2D_DMA_DONE (1 << 0) 89 #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 90 91 /* G2D_DMA_HOLD_CMD */ 92 #define G2D_USER_HOLD (1 << 2) 93 #define G2D_LIST_HOLD (1 << 1) 94 #define G2D_BITBLT_HOLD (1 << 0) 95 96 /* G2D_BITBLT_START */ 97 #define G2D_START_CASESEL (1 << 2) 98 #define G2D_START_NHOLT (1 << 1) 99 #define G2D_START_BITBLT (1 << 0) 100 101 /* buffer color format */ 102 #define G2D_FMT_XRGB8888 0 103 #define G2D_FMT_ARGB8888 1 104 #define G2D_FMT_RGB565 2 105 #define G2D_FMT_XRGB1555 3 106 #define G2D_FMT_ARGB1555 4 107 #define G2D_FMT_XRGB4444 5 108 #define G2D_FMT_ARGB4444 6 109 #define G2D_FMT_PACKED_RGB888 7 110 #define G2D_FMT_A8 11 111 #define G2D_FMT_L8 12 112 113 /* buffer valid length */ 114 #define G2D_LEN_MIN 1 115 #define G2D_LEN_MAX 8000 116 117 #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) 118 #define G2D_CMDLIST_NUM 64 119 #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 120 #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 121 122 /* maximum buffer pool size of userptr is 64MB as default */ 123 #define MAX_POOL (64 * 1024 * 1024) 124 125 enum { 126 BUF_TYPE_GEM = 1, 127 BUF_TYPE_USERPTR, 128 }; 129 130 enum g2d_reg_type { 131 REG_TYPE_NONE = -1, 132 REG_TYPE_SRC, 133 REG_TYPE_SRC_PLANE2, 134 REG_TYPE_DST, 135 REG_TYPE_DST_PLANE2, 136 REG_TYPE_PAT, 137 REG_TYPE_MSK, 138 MAX_REG_TYPE_NR 139 }; 140 141 enum g2d_flag_bits { 142 /* 143 * If set, suspends the runqueue worker after the currently 144 * processed node is finished. 145 */ 146 G2D_BIT_SUSPEND_RUNQUEUE, 147 /* 148 * If set, indicates that the engine is currently busy. 149 */ 150 G2D_BIT_ENGINE_BUSY, 151 }; 152 153 /* cmdlist data structure */ 154 struct g2d_cmdlist { 155 u32 head; 156 unsigned long data[G2D_CMDLIST_DATA_NUM]; 157 u32 last; /* last data offset */ 158 }; 159 160 /* 161 * A structure of buffer description 162 * 163 * @format: color format 164 * @stride: buffer stride/pitch in bytes 165 * @left_x: the x coordinates of left top corner 166 * @top_y: the y coordinates of left top corner 167 * @right_x: the x coordinates of right bottom corner 168 * @bottom_y: the y coordinates of right bottom corner 169 * 170 */ 171 struct g2d_buf_desc { 172 unsigned int format; 173 unsigned int stride; 174 unsigned int left_x; 175 unsigned int top_y; 176 unsigned int right_x; 177 unsigned int bottom_y; 178 }; 179 180 /* 181 * A structure of buffer information 182 * 183 * @map_nr: manages the number of mapped buffers 184 * @reg_types: stores regitster type in the order of requested command 185 * @handles: stores buffer handle in its reg_type position 186 * @types: stores buffer type in its reg_type position 187 * @descs: stores buffer description in its reg_type position 188 * 189 */ 190 struct g2d_buf_info { 191 unsigned int map_nr; 192 enum g2d_reg_type reg_types[MAX_REG_TYPE_NR]; 193 void *obj[MAX_REG_TYPE_NR]; 194 unsigned int types[MAX_REG_TYPE_NR]; 195 struct g2d_buf_desc descs[MAX_REG_TYPE_NR]; 196 }; 197 198 struct drm_exynos_pending_g2d_event { 199 struct drm_pending_event base; 200 struct drm_exynos_g2d_event event; 201 }; 202 203 struct g2d_cmdlist_userptr { 204 struct list_head list; 205 dma_addr_t dma_addr; 206 unsigned long userptr; 207 unsigned long size; 208 struct page **pages; 209 unsigned int npages; 210 struct sg_table *sgt; 211 atomic_t refcount; 212 bool in_pool; 213 bool out_of_list; 214 }; 215 struct g2d_cmdlist_node { 216 struct list_head list; 217 struct g2d_cmdlist *cmdlist; 218 dma_addr_t dma_addr; 219 struct g2d_buf_info buf_info; 220 221 struct drm_exynos_pending_g2d_event *event; 222 }; 223 224 struct g2d_runqueue_node { 225 struct list_head list; 226 struct list_head run_cmdlist; 227 struct list_head event_list; 228 struct drm_file *filp; 229 pid_t pid; 230 struct completion complete; 231 int async; 232 }; 233 234 struct g2d_data { 235 struct device *dev; 236 void *dma_priv; 237 struct clk *gate_clk; 238 void __iomem *regs; 239 int irq; 240 struct workqueue_struct *g2d_workq; 241 struct work_struct runqueue_work; 242 struct drm_device *drm_dev; 243 unsigned long flags; 244 245 /* cmdlist */ 246 struct g2d_cmdlist_node *cmdlist_node; 247 struct list_head free_cmdlist; 248 struct mutex cmdlist_mutex; 249 dma_addr_t cmdlist_pool; 250 void *cmdlist_pool_virt; 251 unsigned long cmdlist_dma_attrs; 252 253 /* runqueue*/ 254 struct g2d_runqueue_node *runqueue_node; 255 struct list_head runqueue; 256 struct mutex runqueue_mutex; 257 struct kmem_cache *runqueue_slab; 258 259 unsigned long current_pool; 260 unsigned long max_pool; 261 }; 262 263 static inline void g2d_hw_reset(struct g2d_data *g2d) 264 { 265 writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET); 266 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 267 } 268 269 static int g2d_init_cmdlist(struct g2d_data *g2d) 270 { 271 struct device *dev = g2d->dev; 272 struct g2d_cmdlist_node *node; 273 int nr; 274 int ret; 275 struct g2d_buf_info *buf_info; 276 277 g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; 278 279 g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev), 280 G2D_CMDLIST_POOL_SIZE, 281 &g2d->cmdlist_pool, GFP_KERNEL, 282 g2d->cmdlist_dma_attrs); 283 if (!g2d->cmdlist_pool_virt) { 284 dev_err(dev, "failed to allocate dma memory\n"); 285 return -ENOMEM; 286 } 287 288 node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); 289 if (!node) { 290 ret = -ENOMEM; 291 goto err; 292 } 293 294 for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { 295 unsigned int i; 296 297 node[nr].cmdlist = 298 g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; 299 node[nr].dma_addr = 300 g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; 301 302 buf_info = &node[nr].buf_info; 303 for (i = 0; i < MAX_REG_TYPE_NR; i++) 304 buf_info->reg_types[i] = REG_TYPE_NONE; 305 306 list_add_tail(&node[nr].list, &g2d->free_cmdlist); 307 } 308 309 return 0; 310 311 err: 312 dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE, 313 g2d->cmdlist_pool_virt, 314 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 315 return ret; 316 } 317 318 static void g2d_fini_cmdlist(struct g2d_data *g2d) 319 { 320 kfree(g2d->cmdlist_node); 321 322 if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) { 323 dma_free_attrs(to_dma_dev(g2d->drm_dev), 324 G2D_CMDLIST_POOL_SIZE, 325 g2d->cmdlist_pool_virt, 326 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 327 } 328 } 329 330 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 331 { 332 struct device *dev = g2d->dev; 333 struct g2d_cmdlist_node *node; 334 335 mutex_lock(&g2d->cmdlist_mutex); 336 if (list_empty(&g2d->free_cmdlist)) { 337 dev_err(dev, "there is no free cmdlist\n"); 338 mutex_unlock(&g2d->cmdlist_mutex); 339 return NULL; 340 } 341 342 node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node, 343 list); 344 list_del_init(&node->list); 345 mutex_unlock(&g2d->cmdlist_mutex); 346 347 return node; 348 } 349 350 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node) 351 { 352 mutex_lock(&g2d->cmdlist_mutex); 353 list_move_tail(&node->list, &g2d->free_cmdlist); 354 mutex_unlock(&g2d->cmdlist_mutex); 355 } 356 357 static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv, 358 struct g2d_cmdlist_node *node) 359 { 360 struct g2d_cmdlist_node *lnode; 361 362 if (list_empty(&file_priv->inuse_cmdlist)) 363 goto add_to_list; 364 365 /* this links to base address of new cmdlist */ 366 lnode = list_entry(file_priv->inuse_cmdlist.prev, 367 struct g2d_cmdlist_node, list); 368 lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; 369 370 add_to_list: 371 list_add_tail(&node->list, &file_priv->inuse_cmdlist); 372 373 if (node->event) 374 list_add_tail(&node->event->base.link, &file_priv->event_list); 375 } 376 377 static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, 378 void *obj, 379 bool force) 380 { 381 struct g2d_cmdlist_userptr *g2d_userptr = obj; 382 383 if (!obj) 384 return; 385 386 if (force) 387 goto out; 388 389 atomic_dec(&g2d_userptr->refcount); 390 391 if (atomic_read(&g2d_userptr->refcount) > 0) 392 return; 393 394 if (g2d_userptr->in_pool) 395 return; 396 397 out: 398 dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, 399 DMA_BIDIRECTIONAL, 0); 400 401 unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages, 402 true); 403 kvfree(g2d_userptr->pages); 404 405 if (!g2d_userptr->out_of_list) 406 list_del_init(&g2d_userptr->list); 407 408 sg_free_table(g2d_userptr->sgt); 409 kfree(g2d_userptr->sgt); 410 kfree(g2d_userptr); 411 } 412 413 static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, 414 unsigned long userptr, 415 unsigned long size, 416 struct drm_file *filp, 417 void **obj) 418 { 419 struct drm_exynos_file_private *file_priv = filp->driver_priv; 420 struct g2d_cmdlist_userptr *g2d_userptr; 421 struct sg_table *sgt; 422 unsigned long start, end; 423 unsigned int npages, offset; 424 int ret; 425 426 if (!size) { 427 DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n"); 428 return ERR_PTR(-EINVAL); 429 } 430 431 /* check if userptr already exists in userptr_list. */ 432 list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) { 433 if (g2d_userptr->userptr == userptr) { 434 /* 435 * also check size because there could be same address 436 * and different size. 437 */ 438 if (g2d_userptr->size == size) { 439 atomic_inc(&g2d_userptr->refcount); 440 *obj = g2d_userptr; 441 442 return &g2d_userptr->dma_addr; 443 } 444 445 /* 446 * at this moment, maybe g2d dma is accessing this 447 * g2d_userptr memory region so just remove this 448 * g2d_userptr object from userptr_list not to be 449 * referred again and also except it the userptr 450 * pool to be released after the dma access completion. 451 */ 452 g2d_userptr->out_of_list = true; 453 g2d_userptr->in_pool = false; 454 list_del_init(&g2d_userptr->list); 455 456 break; 457 } 458 } 459 460 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); 461 if (!g2d_userptr) 462 return ERR_PTR(-ENOMEM); 463 464 atomic_set(&g2d_userptr->refcount, 1); 465 g2d_userptr->size = size; 466 467 start = userptr & PAGE_MASK; 468 offset = userptr & ~PAGE_MASK; 469 end = PAGE_ALIGN(userptr + size); 470 npages = (end - start) >> PAGE_SHIFT; 471 g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages), 472 GFP_KERNEL); 473 if (!g2d_userptr->pages) { 474 ret = -ENOMEM; 475 goto err_free; 476 } 477 478 ret = pin_user_pages_fast(start, npages, FOLL_FORCE | FOLL_WRITE, 479 g2d_userptr->pages); 480 if (ret != npages) { 481 DRM_DEV_ERROR(g2d->dev, 482 "failed to get user pages from userptr.\n"); 483 if (ret < 0) 484 goto err_destroy_pages; 485 npages = ret; 486 ret = -EFAULT; 487 goto err_unpin_pages; 488 } 489 g2d_userptr->npages = npages; 490 491 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 492 if (!sgt) { 493 ret = -ENOMEM; 494 goto err_unpin_pages; 495 } 496 497 ret = sg_alloc_table_from_pages(sgt, 498 g2d_userptr->pages, 499 npages, offset, size, GFP_KERNEL); 500 if (ret < 0) { 501 DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n"); 502 goto err_free_sgt; 503 } 504 505 g2d_userptr->sgt = sgt; 506 507 ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt, 508 DMA_BIDIRECTIONAL, 0); 509 if (ret) { 510 DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n"); 511 goto err_sg_free_table; 512 } 513 514 g2d_userptr->dma_addr = sgt->sgl[0].dma_address; 515 g2d_userptr->userptr = userptr; 516 517 list_add_tail(&g2d_userptr->list, &file_priv->userptr_list); 518 519 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { 520 g2d->current_pool += npages << PAGE_SHIFT; 521 g2d_userptr->in_pool = true; 522 } 523 524 *obj = g2d_userptr; 525 526 return &g2d_userptr->dma_addr; 527 528 err_sg_free_table: 529 sg_free_table(sgt); 530 531 err_free_sgt: 532 kfree(sgt); 533 534 err_unpin_pages: 535 unpin_user_pages(g2d_userptr->pages, npages); 536 537 err_destroy_pages: 538 kvfree(g2d_userptr->pages); 539 540 err_free: 541 kfree(g2d_userptr); 542 543 return ERR_PTR(ret); 544 } 545 546 static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp) 547 { 548 struct drm_exynos_file_private *file_priv = filp->driver_priv; 549 struct g2d_cmdlist_userptr *g2d_userptr, *n; 550 551 list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list) 552 if (g2d_userptr->in_pool) 553 g2d_userptr_put_dma_addr(g2d, g2d_userptr, true); 554 555 g2d->current_pool = 0; 556 } 557 558 static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset) 559 { 560 enum g2d_reg_type reg_type; 561 562 switch (reg_offset) { 563 case G2D_SRC_BASE_ADDR: 564 case G2D_SRC_STRIDE: 565 case G2D_SRC_COLOR_MODE: 566 case G2D_SRC_LEFT_TOP: 567 case G2D_SRC_RIGHT_BOTTOM: 568 reg_type = REG_TYPE_SRC; 569 break; 570 case G2D_SRC_PLANE2_BASE_ADDR: 571 reg_type = REG_TYPE_SRC_PLANE2; 572 break; 573 case G2D_DST_BASE_ADDR: 574 case G2D_DST_STRIDE: 575 case G2D_DST_COLOR_MODE: 576 case G2D_DST_LEFT_TOP: 577 case G2D_DST_RIGHT_BOTTOM: 578 reg_type = REG_TYPE_DST; 579 break; 580 case G2D_DST_PLANE2_BASE_ADDR: 581 reg_type = REG_TYPE_DST_PLANE2; 582 break; 583 case G2D_PAT_BASE_ADDR: 584 reg_type = REG_TYPE_PAT; 585 break; 586 case G2D_MSK_BASE_ADDR: 587 reg_type = REG_TYPE_MSK; 588 break; 589 default: 590 reg_type = REG_TYPE_NONE; 591 DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n", 592 reg_offset); 593 break; 594 } 595 596 return reg_type; 597 } 598 599 static unsigned long g2d_get_buf_bpp(unsigned int format) 600 { 601 unsigned long bpp; 602 603 switch (format) { 604 case G2D_FMT_XRGB8888: 605 case G2D_FMT_ARGB8888: 606 bpp = 4; 607 break; 608 case G2D_FMT_RGB565: 609 case G2D_FMT_XRGB1555: 610 case G2D_FMT_ARGB1555: 611 case G2D_FMT_XRGB4444: 612 case G2D_FMT_ARGB4444: 613 bpp = 2; 614 break; 615 case G2D_FMT_PACKED_RGB888: 616 bpp = 3; 617 break; 618 default: 619 bpp = 1; 620 break; 621 } 622 623 return bpp; 624 } 625 626 static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d, 627 struct g2d_buf_desc *buf_desc, 628 enum g2d_reg_type reg_type, 629 unsigned long size) 630 { 631 int width, height; 632 unsigned long bpp, last_pos; 633 634 /* 635 * check source and destination buffers only. 636 * so the others are always valid. 637 */ 638 if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) 639 return true; 640 641 /* This check also makes sure that right_x > left_x. */ 642 width = (int)buf_desc->right_x - (int)buf_desc->left_x; 643 if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { 644 DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width); 645 return false; 646 } 647 648 /* This check also makes sure that bottom_y > top_y. */ 649 height = (int)buf_desc->bottom_y - (int)buf_desc->top_y; 650 if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { 651 DRM_DEV_ERROR(g2d->dev, 652 "height[%d] is out of range!\n", height); 653 return false; 654 } 655 656 bpp = g2d_get_buf_bpp(buf_desc->format); 657 658 /* Compute the position of the last byte that the engine accesses. */ 659 last_pos = ((unsigned long)buf_desc->bottom_y - 1) * 660 (unsigned long)buf_desc->stride + 661 (unsigned long)buf_desc->right_x * bpp - 1; 662 663 /* 664 * Since right_x > left_x and bottom_y > top_y we already know 665 * that the first_pos < last_pos (first_pos being the position 666 * of the first byte the engine accesses), it just remains to 667 * check if last_pos is smaller then the buffer size. 668 */ 669 670 if (last_pos >= size) { 671 DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] " 672 "is out of range [%lu]!\n", last_pos, size); 673 return false; 674 } 675 676 return true; 677 } 678 679 static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 680 struct g2d_cmdlist_node *node, 681 struct drm_device *drm_dev, 682 struct drm_file *file) 683 { 684 struct g2d_cmdlist *cmdlist = node->cmdlist; 685 struct g2d_buf_info *buf_info = &node->buf_info; 686 int offset; 687 int ret; 688 int i; 689 690 for (i = 0; i < buf_info->map_nr; i++) { 691 struct g2d_buf_desc *buf_desc; 692 enum g2d_reg_type reg_type; 693 int reg_pos; 694 unsigned long handle; 695 dma_addr_t *addr; 696 697 reg_pos = cmdlist->last - 2 * (i + 1); 698 699 offset = cmdlist->data[reg_pos]; 700 handle = cmdlist->data[reg_pos + 1]; 701 702 reg_type = g2d_get_reg_type(g2d, offset); 703 if (reg_type == REG_TYPE_NONE) { 704 ret = -EFAULT; 705 goto err; 706 } 707 708 buf_desc = &buf_info->descs[reg_type]; 709 710 if (buf_info->types[reg_type] == BUF_TYPE_GEM) { 711 struct exynos_drm_gem *exynos_gem; 712 713 exynos_gem = exynos_drm_gem_get(file, handle); 714 if (!exynos_gem) { 715 ret = -EFAULT; 716 goto err; 717 } 718 719 if (!g2d_check_buf_desc_is_valid(g2d, buf_desc, 720 reg_type, exynos_gem->size)) { 721 exynos_drm_gem_put(exynos_gem); 722 ret = -EFAULT; 723 goto err; 724 } 725 726 addr = &exynos_gem->dma_addr; 727 buf_info->obj[reg_type] = exynos_gem; 728 } else { 729 struct drm_exynos_g2d_userptr g2d_userptr; 730 731 if (copy_from_user(&g2d_userptr, (void __user *)handle, 732 sizeof(struct drm_exynos_g2d_userptr))) { 733 ret = -EFAULT; 734 goto err; 735 } 736 737 if (!g2d_check_buf_desc_is_valid(g2d, buf_desc, 738 reg_type, 739 g2d_userptr.size)) { 740 ret = -EFAULT; 741 goto err; 742 } 743 744 addr = g2d_userptr_get_dma_addr(g2d, 745 g2d_userptr.userptr, 746 g2d_userptr.size, 747 file, 748 &buf_info->obj[reg_type]); 749 if (IS_ERR(addr)) { 750 ret = -EFAULT; 751 goto err; 752 } 753 } 754 755 cmdlist->data[reg_pos + 1] = *addr; 756 buf_info->reg_types[i] = reg_type; 757 } 758 759 return 0; 760 761 err: 762 buf_info->map_nr = i; 763 return ret; 764 } 765 766 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, 767 struct g2d_cmdlist_node *node, 768 struct drm_file *filp) 769 { 770 struct g2d_buf_info *buf_info = &node->buf_info; 771 int i; 772 773 for (i = 0; i < buf_info->map_nr; i++) { 774 struct g2d_buf_desc *buf_desc; 775 enum g2d_reg_type reg_type; 776 void *obj; 777 778 reg_type = buf_info->reg_types[i]; 779 780 buf_desc = &buf_info->descs[reg_type]; 781 obj = buf_info->obj[reg_type]; 782 783 if (buf_info->types[reg_type] == BUF_TYPE_GEM) 784 exynos_drm_gem_put(obj); 785 else 786 g2d_userptr_put_dma_addr(g2d, obj, false); 787 788 buf_info->reg_types[i] = REG_TYPE_NONE; 789 buf_info->obj[reg_type] = NULL; 790 buf_info->types[reg_type] = 0; 791 memset(buf_desc, 0x00, sizeof(*buf_desc)); 792 } 793 794 buf_info->map_nr = 0; 795 } 796 797 static void g2d_dma_start(struct g2d_data *g2d, 798 struct g2d_runqueue_node *runqueue_node) 799 { 800 struct g2d_cmdlist_node *node = 801 list_first_entry(&runqueue_node->run_cmdlist, 802 struct g2d_cmdlist_node, list); 803 804 set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 805 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 806 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 807 } 808 809 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) 810 { 811 struct g2d_runqueue_node *runqueue_node; 812 813 if (list_empty(&g2d->runqueue)) 814 return NULL; 815 816 runqueue_node = list_first_entry(&g2d->runqueue, 817 struct g2d_runqueue_node, list); 818 list_del_init(&runqueue_node->list); 819 return runqueue_node; 820 } 821 822 static void g2d_free_runqueue_node(struct g2d_data *g2d, 823 struct g2d_runqueue_node *runqueue_node) 824 { 825 struct g2d_cmdlist_node *node; 826 827 mutex_lock(&g2d->cmdlist_mutex); 828 /* 829 * commands in run_cmdlist have been completed so unmap all gem 830 * objects in each command node so that they are unreferenced. 831 */ 832 list_for_each_entry(node, &runqueue_node->run_cmdlist, list) 833 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); 834 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 835 mutex_unlock(&g2d->cmdlist_mutex); 836 837 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 838 } 839 840 /** 841 * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes 842 * @g2d: G2D state object 843 * @file: if not zero, only remove items with this DRM file 844 * 845 * Has to be called under runqueue lock. 846 */ 847 static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file) 848 { 849 struct g2d_runqueue_node *node, *n; 850 851 if (list_empty(&g2d->runqueue)) 852 return; 853 854 list_for_each_entry_safe(node, n, &g2d->runqueue, list) { 855 if (file && node->filp != file) 856 continue; 857 858 list_del_init(&node->list); 859 g2d_free_runqueue_node(g2d, node); 860 } 861 } 862 863 static void g2d_runqueue_worker(struct work_struct *work) 864 { 865 struct g2d_data *g2d = container_of(work, struct g2d_data, 866 runqueue_work); 867 struct g2d_runqueue_node *runqueue_node; 868 869 /* 870 * The engine is busy and the completion of the current node is going 871 * to poke the runqueue worker, so nothing to do here. 872 */ 873 if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags)) 874 return; 875 876 mutex_lock(&g2d->runqueue_mutex); 877 878 runqueue_node = g2d->runqueue_node; 879 g2d->runqueue_node = NULL; 880 881 if (runqueue_node) { 882 pm_runtime_mark_last_busy(g2d->dev); 883 pm_runtime_put_autosuspend(g2d->dev); 884 885 complete(&runqueue_node->complete); 886 if (runqueue_node->async) 887 g2d_free_runqueue_node(g2d, runqueue_node); 888 } 889 890 if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) { 891 g2d->runqueue_node = g2d_get_runqueue_node(g2d); 892 893 if (g2d->runqueue_node) { 894 pm_runtime_get_sync(g2d->dev); 895 g2d_dma_start(g2d, g2d->runqueue_node); 896 } 897 } 898 899 mutex_unlock(&g2d->runqueue_mutex); 900 } 901 902 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) 903 { 904 struct drm_device *drm_dev = g2d->drm_dev; 905 struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; 906 struct drm_exynos_pending_g2d_event *e; 907 struct timespec64 now; 908 909 if (list_empty(&runqueue_node->event_list)) 910 return; 911 912 e = list_first_entry(&runqueue_node->event_list, 913 struct drm_exynos_pending_g2d_event, base.link); 914 915 ktime_get_ts64(&now); 916 e->event.tv_sec = now.tv_sec; 917 e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; 918 e->event.cmdlist_no = cmdlist_no; 919 920 drm_send_event(drm_dev, &e->base); 921 } 922 923 static irqreturn_t g2d_irq_handler(int irq, void *dev_id) 924 { 925 struct g2d_data *g2d = dev_id; 926 u32 pending; 927 928 pending = readl_relaxed(g2d->regs + G2D_INTC_PEND); 929 if (pending) 930 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND); 931 932 if (pending & G2D_INTP_GCMD_FIN) { 933 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS); 934 935 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >> 936 G2D_DMA_LIST_DONE_COUNT_OFFSET; 937 938 g2d_finish_event(g2d, cmdlist_no); 939 940 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD); 941 if (!(pending & G2D_INTP_ACMD_FIN)) { 942 writel_relaxed(G2D_DMA_CONTINUE, 943 g2d->regs + G2D_DMA_COMMAND); 944 } 945 } 946 947 if (pending & G2D_INTP_ACMD_FIN) { 948 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 949 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 950 } 951 952 return IRQ_HANDLED; 953 } 954 955 /** 956 * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node 957 * @g2d: G2D state object 958 * @file: if not zero, only wait if the current runqueue node belongs 959 * to the DRM file 960 * 961 * Should the engine not become idle after a 100ms timeout, a hardware 962 * reset is issued. 963 */ 964 static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file) 965 { 966 struct device *dev = g2d->dev; 967 968 struct g2d_runqueue_node *runqueue_node = NULL; 969 unsigned int tries = 10; 970 971 mutex_lock(&g2d->runqueue_mutex); 972 973 /* If no node is currently processed, we have nothing to do. */ 974 if (!g2d->runqueue_node) 975 goto out; 976 977 runqueue_node = g2d->runqueue_node; 978 979 /* Check if the currently processed item belongs to us. */ 980 if (file && runqueue_node->filp != file) 981 goto out; 982 983 mutex_unlock(&g2d->runqueue_mutex); 984 985 /* Wait for the G2D engine to finish. */ 986 while (tries-- && (g2d->runqueue_node == runqueue_node)) 987 mdelay(10); 988 989 mutex_lock(&g2d->runqueue_mutex); 990 991 if (g2d->runqueue_node != runqueue_node) 992 goto out; 993 994 dev_err(dev, "wait timed out, resetting engine...\n"); 995 g2d_hw_reset(g2d); 996 997 /* 998 * After the hardware reset of the engine we are going to loose 999 * the IRQ which triggers the PM runtime put(). 1000 * So do this manually here. 1001 */ 1002 pm_runtime_mark_last_busy(dev); 1003 pm_runtime_put_autosuspend(dev); 1004 1005 complete(&runqueue_node->complete); 1006 if (runqueue_node->async) 1007 g2d_free_runqueue_node(g2d, runqueue_node); 1008 1009 out: 1010 mutex_unlock(&g2d->runqueue_mutex); 1011 } 1012 1013 static int g2d_check_reg_offset(struct g2d_data *g2d, 1014 struct g2d_cmdlist_node *node, 1015 int nr, bool for_addr) 1016 { 1017 struct g2d_cmdlist *cmdlist = node->cmdlist; 1018 int reg_offset; 1019 int index; 1020 int i; 1021 1022 for (i = 0; i < nr; i++) { 1023 struct g2d_buf_info *buf_info = &node->buf_info; 1024 struct g2d_buf_desc *buf_desc; 1025 enum g2d_reg_type reg_type; 1026 unsigned long value; 1027 1028 index = cmdlist->last - 2 * (i + 1); 1029 1030 reg_offset = cmdlist->data[index] & ~0xfffff000; 1031 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 1032 goto err; 1033 if (reg_offset % 4) 1034 goto err; 1035 1036 switch (reg_offset) { 1037 case G2D_SRC_BASE_ADDR: 1038 case G2D_SRC_PLANE2_BASE_ADDR: 1039 case G2D_DST_BASE_ADDR: 1040 case G2D_DST_PLANE2_BASE_ADDR: 1041 case G2D_PAT_BASE_ADDR: 1042 case G2D_MSK_BASE_ADDR: 1043 if (!for_addr) 1044 goto err; 1045 1046 reg_type = g2d_get_reg_type(g2d, reg_offset); 1047 1048 /* check userptr buffer type. */ 1049 if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { 1050 buf_info->types[reg_type] = BUF_TYPE_USERPTR; 1051 cmdlist->data[index] &= ~G2D_BUF_USERPTR; 1052 } else 1053 buf_info->types[reg_type] = BUF_TYPE_GEM; 1054 break; 1055 case G2D_SRC_STRIDE: 1056 case G2D_DST_STRIDE: 1057 if (for_addr) 1058 goto err; 1059 1060 reg_type = g2d_get_reg_type(g2d, reg_offset); 1061 1062 buf_desc = &buf_info->descs[reg_type]; 1063 buf_desc->stride = cmdlist->data[index + 1]; 1064 break; 1065 case G2D_SRC_COLOR_MODE: 1066 case G2D_DST_COLOR_MODE: 1067 if (for_addr) 1068 goto err; 1069 1070 reg_type = g2d_get_reg_type(g2d, reg_offset); 1071 1072 buf_desc = &buf_info->descs[reg_type]; 1073 value = cmdlist->data[index + 1]; 1074 1075 buf_desc->format = value & 0xf; 1076 break; 1077 case G2D_SRC_LEFT_TOP: 1078 case G2D_DST_LEFT_TOP: 1079 if (for_addr) 1080 goto err; 1081 1082 reg_type = g2d_get_reg_type(g2d, reg_offset); 1083 1084 buf_desc = &buf_info->descs[reg_type]; 1085 value = cmdlist->data[index + 1]; 1086 1087 buf_desc->left_x = value & 0x1fff; 1088 buf_desc->top_y = (value & 0x1fff0000) >> 16; 1089 break; 1090 case G2D_SRC_RIGHT_BOTTOM: 1091 case G2D_DST_RIGHT_BOTTOM: 1092 if (for_addr) 1093 goto err; 1094 1095 reg_type = g2d_get_reg_type(g2d, reg_offset); 1096 1097 buf_desc = &buf_info->descs[reg_type]; 1098 value = cmdlist->data[index + 1]; 1099 1100 buf_desc->right_x = value & 0x1fff; 1101 buf_desc->bottom_y = (value & 0x1fff0000) >> 16; 1102 break; 1103 default: 1104 if (for_addr) 1105 goto err; 1106 break; 1107 } 1108 } 1109 1110 return 0; 1111 1112 err: 1113 dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); 1114 return -EINVAL; 1115 } 1116 1117 /* ioctl functions */ 1118 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, 1119 struct drm_file *file) 1120 { 1121 struct drm_exynos_g2d_get_ver *ver = data; 1122 1123 ver->major = G2D_HW_MAJOR_VER; 1124 ver->minor = G2D_HW_MINOR_VER; 1125 1126 return 0; 1127 } 1128 1129 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, 1130 struct drm_file *file) 1131 { 1132 struct drm_exynos_file_private *file_priv = file->driver_priv; 1133 struct exynos_drm_private *priv = drm_dev->dev_private; 1134 struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev); 1135 struct drm_exynos_g2d_set_cmdlist *req = data; 1136 struct drm_exynos_g2d_cmd *cmd; 1137 struct drm_exynos_pending_g2d_event *e; 1138 struct g2d_cmdlist_node *node; 1139 struct g2d_cmdlist *cmdlist; 1140 int size; 1141 int ret; 1142 1143 node = g2d_get_cmdlist(g2d); 1144 if (!node) 1145 return -ENOMEM; 1146 1147 /* 1148 * To avoid an integer overflow for the later size computations, we 1149 * enforce a maximum number of submitted commands here. This limit is 1150 * sufficient for all conceivable usage cases of the G2D. 1151 */ 1152 if (req->cmd_nr > G2D_CMDLIST_DATA_NUM || 1153 req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) { 1154 dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n"); 1155 return -EINVAL; 1156 } 1157 1158 node->event = NULL; 1159 1160 if (req->event_type != G2D_EVENT_NOT) { 1161 e = kzalloc(sizeof(*node->event), GFP_KERNEL); 1162 if (!e) { 1163 ret = -ENOMEM; 1164 goto err; 1165 } 1166 1167 e->event.base.type = DRM_EXYNOS_G2D_EVENT; 1168 e->event.base.length = sizeof(e->event); 1169 e->event.user_data = req->user_data; 1170 1171 ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base); 1172 if (ret) { 1173 kfree(e); 1174 goto err; 1175 } 1176 1177 node->event = e; 1178 } 1179 1180 cmdlist = node->cmdlist; 1181 1182 cmdlist->last = 0; 1183 1184 /* 1185 * If don't clear SFR registers, the cmdlist is affected by register 1186 * values of previous cmdlist. G2D hw executes SFR clear command and 1187 * a next command at the same time then the next command is ignored and 1188 * is executed rightly from next next command, so needs a dummy command 1189 * to next command of SFR clear command. 1190 */ 1191 cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET; 1192 cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR; 1193 cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; 1194 cmdlist->data[cmdlist->last++] = 0; 1195 1196 /* 1197 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG 1198 * and GCF bit should be set to INTEN register if user wants 1199 * G2D interrupt event once current command list execution is 1200 * finished. 1201 * Otherwise only ACF bit should be set to INTEN register so 1202 * that one interrupt is occurred after all command lists 1203 * have been completed. 1204 */ 1205 if (node->event) { 1206 cmdlist->data[cmdlist->last++] = G2D_INTEN; 1207 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; 1208 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; 1209 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; 1210 } else { 1211 cmdlist->data[cmdlist->last++] = G2D_INTEN; 1212 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; 1213 } 1214 1215 /* 1216 * Check the size of cmdlist. The 2 that is added last comes from 1217 * the implicit G2D_BITBLT_START that is appended once we have 1218 * checked all the submitted commands. 1219 */ 1220 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; 1221 if (size > G2D_CMDLIST_DATA_NUM) { 1222 dev_err(g2d->dev, "cmdlist size is too big\n"); 1223 ret = -EINVAL; 1224 goto err_free_event; 1225 } 1226 1227 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd; 1228 1229 if (copy_from_user(cmdlist->data + cmdlist->last, 1230 (void __user *)cmd, 1231 sizeof(*cmd) * req->cmd_nr)) { 1232 ret = -EFAULT; 1233 goto err_free_event; 1234 } 1235 cmdlist->last += req->cmd_nr * 2; 1236 1237 ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false); 1238 if (ret < 0) 1239 goto err_free_event; 1240 1241 node->buf_info.map_nr = req->cmd_buf_nr; 1242 if (req->cmd_buf_nr) { 1243 struct drm_exynos_g2d_cmd *cmd_buf; 1244 1245 cmd_buf = (struct drm_exynos_g2d_cmd *) 1246 (unsigned long)req->cmd_buf; 1247 1248 if (copy_from_user(cmdlist->data + cmdlist->last, 1249 (void __user *)cmd_buf, 1250 sizeof(*cmd_buf) * req->cmd_buf_nr)) { 1251 ret = -EFAULT; 1252 goto err_free_event; 1253 } 1254 cmdlist->last += req->cmd_buf_nr * 2; 1255 1256 ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true); 1257 if (ret < 0) 1258 goto err_free_event; 1259 1260 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); 1261 if (ret < 0) 1262 goto err_unmap; 1263 } 1264 1265 cmdlist->data[cmdlist->last++] = G2D_BITBLT_START; 1266 cmdlist->data[cmdlist->last++] = G2D_START_BITBLT; 1267 1268 /* head */ 1269 cmdlist->head = cmdlist->last / 2; 1270 1271 /* tail */ 1272 cmdlist->data[cmdlist->last] = 0; 1273 1274 g2d_add_cmdlist_to_inuse(file_priv, node); 1275 1276 return 0; 1277 1278 err_unmap: 1279 g2d_unmap_cmdlist_gem(g2d, node, file); 1280 err_free_event: 1281 if (node->event) 1282 drm_event_cancel_free(drm_dev, &node->event->base); 1283 err: 1284 g2d_put_cmdlist(g2d, node); 1285 return ret; 1286 } 1287 1288 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, 1289 struct drm_file *file) 1290 { 1291 struct drm_exynos_file_private *file_priv = file->driver_priv; 1292 struct exynos_drm_private *priv = drm_dev->dev_private; 1293 struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev); 1294 struct drm_exynos_g2d_exec *req = data; 1295 struct g2d_runqueue_node *runqueue_node; 1296 struct list_head *run_cmdlist; 1297 struct list_head *event_list; 1298 1299 runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); 1300 if (!runqueue_node) 1301 return -ENOMEM; 1302 1303 run_cmdlist = &runqueue_node->run_cmdlist; 1304 event_list = &runqueue_node->event_list; 1305 INIT_LIST_HEAD(run_cmdlist); 1306 INIT_LIST_HEAD(event_list); 1307 init_completion(&runqueue_node->complete); 1308 runqueue_node->async = req->async; 1309 1310 list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist); 1311 list_splice_init(&file_priv->event_list, event_list); 1312 1313 if (list_empty(run_cmdlist)) { 1314 dev_err(g2d->dev, "there is no inuse cmdlist\n"); 1315 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 1316 return -EPERM; 1317 } 1318 1319 mutex_lock(&g2d->runqueue_mutex); 1320 runqueue_node->pid = current->pid; 1321 runqueue_node->filp = file; 1322 list_add_tail(&runqueue_node->list, &g2d->runqueue); 1323 mutex_unlock(&g2d->runqueue_mutex); 1324 1325 /* Let the runqueue know that there is work to do. */ 1326 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1327 1328 if (runqueue_node->async) 1329 goto out; 1330 1331 wait_for_completion(&runqueue_node->complete); 1332 g2d_free_runqueue_node(g2d, runqueue_node); 1333 1334 out: 1335 return 0; 1336 } 1337 1338 int g2d_open(struct drm_device *drm_dev, struct drm_file *file) 1339 { 1340 struct drm_exynos_file_private *file_priv = file->driver_priv; 1341 1342 INIT_LIST_HEAD(&file_priv->inuse_cmdlist); 1343 INIT_LIST_HEAD(&file_priv->event_list); 1344 INIT_LIST_HEAD(&file_priv->userptr_list); 1345 1346 return 0; 1347 } 1348 1349 void g2d_close(struct drm_device *drm_dev, struct drm_file *file) 1350 { 1351 struct drm_exynos_file_private *file_priv = file->driver_priv; 1352 struct exynos_drm_private *priv = drm_dev->dev_private; 1353 struct g2d_data *g2d; 1354 struct g2d_cmdlist_node *node, *n; 1355 1356 if (!priv->g2d_dev) 1357 return; 1358 1359 g2d = dev_get_drvdata(priv->g2d_dev); 1360 1361 /* Remove the runqueue nodes that belong to us. */ 1362 mutex_lock(&g2d->runqueue_mutex); 1363 g2d_remove_runqueue_nodes(g2d, file); 1364 mutex_unlock(&g2d->runqueue_mutex); 1365 1366 /* 1367 * Wait for the runqueue worker to finish its current node. 1368 * After this the engine should no longer be accessing any 1369 * memory belonging to us. 1370 */ 1371 g2d_wait_finish(g2d, file); 1372 1373 /* 1374 * Even after the engine is idle, there might still be stale cmdlists 1375 * (i.e. cmdlisst which we submitted but never executed) around, with 1376 * their corresponding GEM/userptr buffers. 1377 * Properly unmap these buffers here. 1378 */ 1379 mutex_lock(&g2d->cmdlist_mutex); 1380 list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) { 1381 g2d_unmap_cmdlist_gem(g2d, node, file); 1382 list_move_tail(&node->list, &g2d->free_cmdlist); 1383 } 1384 mutex_unlock(&g2d->cmdlist_mutex); 1385 1386 /* release all g2d_userptr in pool. */ 1387 g2d_userptr_free_all(g2d, file); 1388 } 1389 1390 static int g2d_bind(struct device *dev, struct device *master, void *data) 1391 { 1392 struct g2d_data *g2d = dev_get_drvdata(dev); 1393 struct drm_device *drm_dev = data; 1394 struct exynos_drm_private *priv = drm_dev->dev_private; 1395 int ret; 1396 1397 g2d->drm_dev = drm_dev; 1398 1399 /* allocate dma-aware cmdlist buffer. */ 1400 ret = g2d_init_cmdlist(g2d); 1401 if (ret < 0) { 1402 dev_err(dev, "cmdlist init failed\n"); 1403 return ret; 1404 } 1405 1406 ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv); 1407 if (ret < 0) { 1408 dev_err(dev, "failed to enable iommu.\n"); 1409 g2d_fini_cmdlist(g2d); 1410 return ret; 1411 } 1412 priv->g2d_dev = dev; 1413 1414 dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n", 1415 G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); 1416 return 0; 1417 } 1418 1419 static void g2d_unbind(struct device *dev, struct device *master, void *data) 1420 { 1421 struct g2d_data *g2d = dev_get_drvdata(dev); 1422 struct drm_device *drm_dev = data; 1423 struct exynos_drm_private *priv = drm_dev->dev_private; 1424 1425 /* Suspend operation and wait for engine idle. */ 1426 set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1427 g2d_wait_finish(g2d, NULL); 1428 priv->g2d_dev = NULL; 1429 1430 cancel_work_sync(&g2d->runqueue_work); 1431 exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv); 1432 } 1433 1434 static const struct component_ops g2d_component_ops = { 1435 .bind = g2d_bind, 1436 .unbind = g2d_unbind, 1437 }; 1438 1439 static int g2d_probe(struct platform_device *pdev) 1440 { 1441 struct device *dev = &pdev->dev; 1442 struct resource *res; 1443 struct g2d_data *g2d; 1444 int ret; 1445 1446 g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); 1447 if (!g2d) 1448 return -ENOMEM; 1449 1450 g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", 1451 sizeof(struct g2d_runqueue_node), 0, 0, NULL); 1452 if (!g2d->runqueue_slab) 1453 return -ENOMEM; 1454 1455 g2d->dev = dev; 1456 1457 g2d->g2d_workq = create_singlethread_workqueue("g2d"); 1458 if (!g2d->g2d_workq) { 1459 dev_err(dev, "failed to create workqueue\n"); 1460 ret = -EINVAL; 1461 goto err_destroy_slab; 1462 } 1463 1464 INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker); 1465 INIT_LIST_HEAD(&g2d->free_cmdlist); 1466 INIT_LIST_HEAD(&g2d->runqueue); 1467 1468 mutex_init(&g2d->cmdlist_mutex); 1469 mutex_init(&g2d->runqueue_mutex); 1470 1471 g2d->gate_clk = devm_clk_get(dev, "fimg2d"); 1472 if (IS_ERR(g2d->gate_clk)) { 1473 dev_err(dev, "failed to get gate clock\n"); 1474 ret = PTR_ERR(g2d->gate_clk); 1475 goto err_destroy_workqueue; 1476 } 1477 1478 pm_runtime_use_autosuspend(dev); 1479 pm_runtime_set_autosuspend_delay(dev, 2000); 1480 pm_runtime_enable(dev); 1481 clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1482 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 1483 1484 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1485 1486 g2d->regs = devm_ioremap_resource(dev, res); 1487 if (IS_ERR(g2d->regs)) { 1488 ret = PTR_ERR(g2d->regs); 1489 goto err_put_clk; 1490 } 1491 1492 g2d->irq = platform_get_irq(pdev, 0); 1493 if (g2d->irq < 0) { 1494 ret = g2d->irq; 1495 goto err_put_clk; 1496 } 1497 1498 ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0, 1499 "drm_g2d", g2d); 1500 if (ret < 0) { 1501 dev_err(dev, "irq request failed\n"); 1502 goto err_put_clk; 1503 } 1504 1505 g2d->max_pool = MAX_POOL; 1506 1507 platform_set_drvdata(pdev, g2d); 1508 1509 ret = component_add(dev, &g2d_component_ops); 1510 if (ret < 0) { 1511 dev_err(dev, "failed to register drm g2d device\n"); 1512 goto err_put_clk; 1513 } 1514 1515 return 0; 1516 1517 err_put_clk: 1518 pm_runtime_disable(dev); 1519 err_destroy_workqueue: 1520 destroy_workqueue(g2d->g2d_workq); 1521 err_destroy_slab: 1522 kmem_cache_destroy(g2d->runqueue_slab); 1523 return ret; 1524 } 1525 1526 static int g2d_remove(struct platform_device *pdev) 1527 { 1528 struct g2d_data *g2d = platform_get_drvdata(pdev); 1529 1530 component_del(&pdev->dev, &g2d_component_ops); 1531 1532 /* There should be no locking needed here. */ 1533 g2d_remove_runqueue_nodes(g2d, NULL); 1534 1535 pm_runtime_dont_use_autosuspend(&pdev->dev); 1536 pm_runtime_disable(&pdev->dev); 1537 1538 g2d_fini_cmdlist(g2d); 1539 destroy_workqueue(g2d->g2d_workq); 1540 kmem_cache_destroy(g2d->runqueue_slab); 1541 1542 return 0; 1543 } 1544 1545 #ifdef CONFIG_PM_SLEEP 1546 static int g2d_suspend(struct device *dev) 1547 { 1548 struct g2d_data *g2d = dev_get_drvdata(dev); 1549 1550 /* 1551 * Suspend the runqueue worker operation and wait until the G2D 1552 * engine is idle. 1553 */ 1554 set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1555 g2d_wait_finish(g2d, NULL); 1556 flush_work(&g2d->runqueue_work); 1557 1558 return 0; 1559 } 1560 1561 static int g2d_resume(struct device *dev) 1562 { 1563 struct g2d_data *g2d = dev_get_drvdata(dev); 1564 1565 clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1566 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1567 1568 return 0; 1569 } 1570 #endif 1571 1572 #ifdef CONFIG_PM 1573 static int g2d_runtime_suspend(struct device *dev) 1574 { 1575 struct g2d_data *g2d = dev_get_drvdata(dev); 1576 1577 clk_disable_unprepare(g2d->gate_clk); 1578 1579 return 0; 1580 } 1581 1582 static int g2d_runtime_resume(struct device *dev) 1583 { 1584 struct g2d_data *g2d = dev_get_drvdata(dev); 1585 int ret; 1586 1587 ret = clk_prepare_enable(g2d->gate_clk); 1588 if (ret < 0) 1589 dev_warn(dev, "failed to enable clock.\n"); 1590 1591 return ret; 1592 } 1593 #endif 1594 1595 static const struct dev_pm_ops g2d_pm_ops = { 1596 SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) 1597 SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) 1598 }; 1599 1600 static const struct of_device_id exynos_g2d_match[] = { 1601 { .compatible = "samsung,exynos5250-g2d" }, 1602 { .compatible = "samsung,exynos4212-g2d" }, 1603 {}, 1604 }; 1605 MODULE_DEVICE_TABLE(of, exynos_g2d_match); 1606 1607 struct platform_driver g2d_driver = { 1608 .probe = g2d_probe, 1609 .remove = g2d_remove, 1610 .driver = { 1611 .name = "exynos-drm-g2d", 1612 .owner = THIS_MODULE, 1613 .pm = &g2d_pm_ops, 1614 .of_match_table = exynos_g2d_match, 1615 }, 1616 }; 1617