1 /* 2 * Copyright (C) 2015 Etnaviv Project 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/component.h> 18 #include <linux/of_platform.h> 19 #include <drm/drm_of.h> 20 21 #include "etnaviv_cmdbuf.h" 22 #include "etnaviv_drv.h" 23 #include "etnaviv_gpu.h" 24 #include "etnaviv_gem.h" 25 #include "etnaviv_mmu.h" 26 27 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING 28 static bool reglog; 29 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 30 module_param(reglog, bool, 0600); 31 #else 32 #define reglog 0 33 #endif 34 35 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, 36 const char *dbgname) 37 { 38 struct resource *res; 39 void __iomem *ptr; 40 41 if (name) 42 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 43 else 44 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 45 46 ptr = devm_ioremap_resource(&pdev->dev, res); 47 if (IS_ERR(ptr)) { 48 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, 49 PTR_ERR(ptr)); 50 return ptr; 51 } 52 53 if (reglog) 54 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", 55 dbgname, ptr, (size_t)resource_size(res)); 56 57 return ptr; 58 } 59 60 void etnaviv_writel(u32 data, void __iomem *addr) 61 { 62 if (reglog) 63 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 64 65 writel(data, addr); 66 } 67 68 u32 etnaviv_readl(const void __iomem *addr) 69 { 70 u32 val = readl(addr); 71 72 if (reglog) 73 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); 74 75 return val; 76 } 77 78 /* 79 * DRM operations: 80 */ 81 82 83 static void load_gpu(struct drm_device *dev) 84 { 85 struct etnaviv_drm_private *priv = dev->dev_private; 86 unsigned int i; 87 88 for (i = 0; i < ETNA_MAX_PIPES; i++) { 89 struct etnaviv_gpu *g = priv->gpu[i]; 90 91 if (g) { 92 int ret; 93 94 ret = etnaviv_gpu_init(g); 95 if (ret) 96 priv->gpu[i] = NULL; 97 } 98 } 99 } 100 101 static int etnaviv_open(struct drm_device *dev, struct drm_file *file) 102 { 103 struct etnaviv_file_private *ctx; 104 105 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 106 if (!ctx) 107 return -ENOMEM; 108 109 file->driver_priv = ctx; 110 111 return 0; 112 } 113 114 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) 115 { 116 struct etnaviv_drm_private *priv = dev->dev_private; 117 struct etnaviv_file_private *ctx = file->driver_priv; 118 unsigned int i; 119 120 for (i = 0; i < ETNA_MAX_PIPES; i++) { 121 struct etnaviv_gpu *gpu = priv->gpu[i]; 122 123 if (gpu) { 124 mutex_lock(&gpu->lock); 125 if (gpu->lastctx == ctx) 126 gpu->lastctx = NULL; 127 mutex_unlock(&gpu->lock); 128 } 129 } 130 131 kfree(ctx); 132 } 133 134 /* 135 * DRM debugfs: 136 */ 137 138 #ifdef CONFIG_DEBUG_FS 139 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) 140 { 141 struct etnaviv_drm_private *priv = dev->dev_private; 142 143 etnaviv_gem_describe_objects(priv, m); 144 145 return 0; 146 } 147 148 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) 149 { 150 struct drm_printer p = drm_seq_file_printer(m); 151 152 read_lock(&dev->vma_offset_manager->vm_lock); 153 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p); 154 read_unlock(&dev->vma_offset_manager->vm_lock); 155 156 return 0; 157 } 158 159 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) 160 { 161 struct drm_printer p = drm_seq_file_printer(m); 162 163 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); 164 165 mutex_lock(&gpu->mmu->lock); 166 drm_mm_print(&gpu->mmu->mm, &p); 167 mutex_unlock(&gpu->mmu->lock); 168 169 return 0; 170 } 171 172 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) 173 { 174 struct etnaviv_cmdbuf *buf = gpu->buffer; 175 u32 size = buf->size; 176 u32 *ptr = buf->vaddr; 177 u32 i; 178 179 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", 180 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf), 181 size - buf->user_size); 182 183 for (i = 0; i < size / 4; i++) { 184 if (i && !(i % 4)) 185 seq_puts(m, "\n"); 186 if (i % 4 == 0) 187 seq_printf(m, "\t0x%p: ", ptr + i); 188 seq_printf(m, "%08x ", *(ptr + i)); 189 } 190 seq_puts(m, "\n"); 191 } 192 193 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) 194 { 195 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); 196 197 mutex_lock(&gpu->lock); 198 etnaviv_buffer_dump(gpu, m); 199 mutex_unlock(&gpu->lock); 200 201 return 0; 202 } 203 204 static int show_unlocked(struct seq_file *m, void *arg) 205 { 206 struct drm_info_node *node = (struct drm_info_node *) m->private; 207 struct drm_device *dev = node->minor->dev; 208 int (*show)(struct drm_device *dev, struct seq_file *m) = 209 node->info_ent->data; 210 211 return show(dev, m); 212 } 213 214 static int show_each_gpu(struct seq_file *m, void *arg) 215 { 216 struct drm_info_node *node = (struct drm_info_node *) m->private; 217 struct drm_device *dev = node->minor->dev; 218 struct etnaviv_drm_private *priv = dev->dev_private; 219 struct etnaviv_gpu *gpu; 220 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = 221 node->info_ent->data; 222 unsigned int i; 223 int ret = 0; 224 225 for (i = 0; i < ETNA_MAX_PIPES; i++) { 226 gpu = priv->gpu[i]; 227 if (!gpu) 228 continue; 229 230 ret = show(gpu, m); 231 if (ret < 0) 232 break; 233 } 234 235 return ret; 236 } 237 238 static struct drm_info_list etnaviv_debugfs_list[] = { 239 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, 240 {"gem", show_unlocked, 0, etnaviv_gem_show}, 241 { "mm", show_unlocked, 0, etnaviv_mm_show }, 242 {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, 243 {"ring", show_each_gpu, 0, etnaviv_ring_show}, 244 }; 245 246 static int etnaviv_debugfs_init(struct drm_minor *minor) 247 { 248 struct drm_device *dev = minor->dev; 249 int ret; 250 251 ret = drm_debugfs_create_files(etnaviv_debugfs_list, 252 ARRAY_SIZE(etnaviv_debugfs_list), 253 minor->debugfs_root, minor); 254 255 if (ret) { 256 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); 257 return ret; 258 } 259 260 return ret; 261 } 262 #endif 263 264 /* 265 * DRM ioctls: 266 */ 267 268 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, 269 struct drm_file *file) 270 { 271 struct etnaviv_drm_private *priv = dev->dev_private; 272 struct drm_etnaviv_param *args = data; 273 struct etnaviv_gpu *gpu; 274 275 if (args->pipe >= ETNA_MAX_PIPES) 276 return -EINVAL; 277 278 gpu = priv->gpu[args->pipe]; 279 if (!gpu) 280 return -ENXIO; 281 282 return etnaviv_gpu_get_param(gpu, args->param, &args->value); 283 } 284 285 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, 286 struct drm_file *file) 287 { 288 struct drm_etnaviv_gem_new *args = data; 289 290 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | 291 ETNA_BO_FORCE_MMU)) 292 return -EINVAL; 293 294 return etnaviv_gem_new_handle(dev, file, args->size, 295 args->flags, &args->handle); 296 } 297 298 #define TS(t) ((struct timespec){ \ 299 .tv_sec = (t).tv_sec, \ 300 .tv_nsec = (t).tv_nsec \ 301 }) 302 303 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 304 struct drm_file *file) 305 { 306 struct drm_etnaviv_gem_cpu_prep *args = data; 307 struct drm_gem_object *obj; 308 int ret; 309 310 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) 311 return -EINVAL; 312 313 obj = drm_gem_object_lookup(file, args->handle); 314 if (!obj) 315 return -ENOENT; 316 317 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 318 319 drm_gem_object_put_unlocked(obj); 320 321 return ret; 322 } 323 324 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 325 struct drm_file *file) 326 { 327 struct drm_etnaviv_gem_cpu_fini *args = data; 328 struct drm_gem_object *obj; 329 int ret; 330 331 if (args->flags) 332 return -EINVAL; 333 334 obj = drm_gem_object_lookup(file, args->handle); 335 if (!obj) 336 return -ENOENT; 337 338 ret = etnaviv_gem_cpu_fini(obj); 339 340 drm_gem_object_put_unlocked(obj); 341 342 return ret; 343 } 344 345 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, 346 struct drm_file *file) 347 { 348 struct drm_etnaviv_gem_info *args = data; 349 struct drm_gem_object *obj; 350 int ret; 351 352 if (args->pad) 353 return -EINVAL; 354 355 obj = drm_gem_object_lookup(file, args->handle); 356 if (!obj) 357 return -ENOENT; 358 359 ret = etnaviv_gem_mmap_offset(obj, &args->offset); 360 drm_gem_object_put_unlocked(obj); 361 362 return ret; 363 } 364 365 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, 366 struct drm_file *file) 367 { 368 struct drm_etnaviv_wait_fence *args = data; 369 struct etnaviv_drm_private *priv = dev->dev_private; 370 struct timespec *timeout = &TS(args->timeout); 371 struct etnaviv_gpu *gpu; 372 373 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 374 return -EINVAL; 375 376 if (args->pipe >= ETNA_MAX_PIPES) 377 return -EINVAL; 378 379 gpu = priv->gpu[args->pipe]; 380 if (!gpu) 381 return -ENXIO; 382 383 if (args->flags & ETNA_WAIT_NONBLOCK) 384 timeout = NULL; 385 386 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, 387 timeout); 388 } 389 390 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, 391 struct drm_file *file) 392 { 393 struct drm_etnaviv_gem_userptr *args = data; 394 int access; 395 396 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || 397 args->flags == 0) 398 return -EINVAL; 399 400 if (offset_in_page(args->user_ptr | args->user_size) || 401 (uintptr_t)args->user_ptr != args->user_ptr || 402 (u32)args->user_size != args->user_size || 403 args->user_ptr & ~PAGE_MASK) 404 return -EINVAL; 405 406 if (args->flags & ETNA_USERPTR_WRITE) 407 access = VERIFY_WRITE; 408 else 409 access = VERIFY_READ; 410 411 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, 412 args->user_size)) 413 return -EFAULT; 414 415 return etnaviv_gem_new_userptr(dev, file, args->user_ptr, 416 args->user_size, args->flags, 417 &args->handle); 418 } 419 420 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, 421 struct drm_file *file) 422 { 423 struct etnaviv_drm_private *priv = dev->dev_private; 424 struct drm_etnaviv_gem_wait *args = data; 425 struct timespec *timeout = &TS(args->timeout); 426 struct drm_gem_object *obj; 427 struct etnaviv_gpu *gpu; 428 int ret; 429 430 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 431 return -EINVAL; 432 433 if (args->pipe >= ETNA_MAX_PIPES) 434 return -EINVAL; 435 436 gpu = priv->gpu[args->pipe]; 437 if (!gpu) 438 return -ENXIO; 439 440 obj = drm_gem_object_lookup(file, args->handle); 441 if (!obj) 442 return -ENOENT; 443 444 if (args->flags & ETNA_WAIT_NONBLOCK) 445 timeout = NULL; 446 447 ret = etnaviv_gem_wait_bo(gpu, obj, timeout); 448 449 drm_gem_object_put_unlocked(obj); 450 451 return ret; 452 } 453 454 static const struct drm_ioctl_desc etnaviv_ioctls[] = { 455 #define ETNA_IOCTL(n, func, flags) \ 456 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) 457 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), 458 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 459 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), 460 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), 461 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 462 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 463 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 464 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), 465 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), 466 }; 467 468 static const struct vm_operations_struct vm_ops = { 469 .fault = etnaviv_gem_fault, 470 .open = drm_gem_vm_open, 471 .close = drm_gem_vm_close, 472 }; 473 474 static const struct file_operations fops = { 475 .owner = THIS_MODULE, 476 .open = drm_open, 477 .release = drm_release, 478 .unlocked_ioctl = drm_ioctl, 479 .compat_ioctl = drm_compat_ioctl, 480 .poll = drm_poll, 481 .read = drm_read, 482 .llseek = no_llseek, 483 .mmap = etnaviv_gem_mmap, 484 }; 485 486 static struct drm_driver etnaviv_drm_driver = { 487 .driver_features = DRIVER_GEM | 488 DRIVER_PRIME | 489 DRIVER_RENDER, 490 .open = etnaviv_open, 491 .postclose = etnaviv_postclose, 492 .gem_free_object_unlocked = etnaviv_gem_free_object, 493 .gem_vm_ops = &vm_ops, 494 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 495 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 496 .gem_prime_export = drm_gem_prime_export, 497 .gem_prime_import = drm_gem_prime_import, 498 .gem_prime_res_obj = etnaviv_gem_prime_res_obj, 499 .gem_prime_pin = etnaviv_gem_prime_pin, 500 .gem_prime_unpin = etnaviv_gem_prime_unpin, 501 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, 502 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, 503 .gem_prime_vmap = etnaviv_gem_prime_vmap, 504 .gem_prime_vunmap = etnaviv_gem_prime_vunmap, 505 .gem_prime_mmap = etnaviv_gem_prime_mmap, 506 #ifdef CONFIG_DEBUG_FS 507 .debugfs_init = etnaviv_debugfs_init, 508 #endif 509 .ioctls = etnaviv_ioctls, 510 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, 511 .fops = &fops, 512 .name = "etnaviv", 513 .desc = "etnaviv DRM", 514 .date = "20151214", 515 .major = 1, 516 .minor = 1, 517 }; 518 519 /* 520 * Platform driver: 521 */ 522 static int etnaviv_bind(struct device *dev) 523 { 524 struct etnaviv_drm_private *priv; 525 struct drm_device *drm; 526 int ret; 527 528 drm = drm_dev_alloc(&etnaviv_drm_driver, dev); 529 if (IS_ERR(drm)) 530 return PTR_ERR(drm); 531 532 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 533 if (!priv) { 534 dev_err(dev, "failed to allocate private data\n"); 535 ret = -ENOMEM; 536 goto out_unref; 537 } 538 drm->dev_private = priv; 539 540 priv->wq = alloc_ordered_workqueue("etnaviv", 0); 541 if (!priv->wq) { 542 ret = -ENOMEM; 543 goto out_wq; 544 } 545 546 mutex_init(&priv->gem_lock); 547 INIT_LIST_HEAD(&priv->gem_list); 548 priv->num_gpus = 0; 549 550 dev_set_drvdata(dev, drm); 551 552 ret = component_bind_all(dev, drm); 553 if (ret < 0) 554 goto out_bind; 555 556 load_gpu(drm); 557 558 ret = drm_dev_register(drm, 0); 559 if (ret) 560 goto out_register; 561 562 return 0; 563 564 out_register: 565 component_unbind_all(dev, drm); 566 out_bind: 567 flush_workqueue(priv->wq); 568 destroy_workqueue(priv->wq); 569 out_wq: 570 kfree(priv); 571 out_unref: 572 drm_dev_unref(drm); 573 574 return ret; 575 } 576 577 static void etnaviv_unbind(struct device *dev) 578 { 579 struct drm_device *drm = dev_get_drvdata(dev); 580 struct etnaviv_drm_private *priv = drm->dev_private; 581 582 drm_dev_unregister(drm); 583 584 flush_workqueue(priv->wq); 585 destroy_workqueue(priv->wq); 586 587 component_unbind_all(dev, drm); 588 589 drm->dev_private = NULL; 590 kfree(priv); 591 592 drm_dev_unref(drm); 593 } 594 595 static const struct component_master_ops etnaviv_master_ops = { 596 .bind = etnaviv_bind, 597 .unbind = etnaviv_unbind, 598 }; 599 600 static int compare_of(struct device *dev, void *data) 601 { 602 struct device_node *np = data; 603 604 return dev->of_node == np; 605 } 606 607 static int compare_str(struct device *dev, void *data) 608 { 609 return !strcmp(dev_name(dev), data); 610 } 611 612 static int etnaviv_pdev_probe(struct platform_device *pdev) 613 { 614 struct device *dev = &pdev->dev; 615 struct device_node *node = dev->of_node; 616 struct component_match *match = NULL; 617 618 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 619 620 if (node) { 621 struct device_node *core_node; 622 int i; 623 624 for (i = 0; ; i++) { 625 core_node = of_parse_phandle(node, "cores", i); 626 if (!core_node) 627 break; 628 629 drm_of_component_match_add(&pdev->dev, &match, 630 compare_of, core_node); 631 of_node_put(core_node); 632 } 633 } else if (dev->platform_data) { 634 char **names = dev->platform_data; 635 unsigned i; 636 637 for (i = 0; names[i]; i++) 638 component_match_add(dev, &match, compare_str, names[i]); 639 } 640 641 return component_master_add_with_match(dev, &etnaviv_master_ops, match); 642 } 643 644 static int etnaviv_pdev_remove(struct platform_device *pdev) 645 { 646 component_master_del(&pdev->dev, &etnaviv_master_ops); 647 648 return 0; 649 } 650 651 static const struct of_device_id dt_match[] = { 652 { .compatible = "fsl,imx-gpu-subsystem" }, 653 { .compatible = "marvell,dove-gpu-subsystem" }, 654 {} 655 }; 656 MODULE_DEVICE_TABLE(of, dt_match); 657 658 static struct platform_driver etnaviv_platform_driver = { 659 .probe = etnaviv_pdev_probe, 660 .remove = etnaviv_pdev_remove, 661 .driver = { 662 .name = "etnaviv", 663 .of_match_table = dt_match, 664 }, 665 }; 666 667 static int __init etnaviv_init(void) 668 { 669 int ret; 670 671 etnaviv_validate_init(); 672 673 ret = platform_driver_register(&etnaviv_gpu_driver); 674 if (ret != 0) 675 return ret; 676 677 ret = platform_driver_register(&etnaviv_platform_driver); 678 if (ret != 0) 679 platform_driver_unregister(&etnaviv_gpu_driver); 680 681 return ret; 682 } 683 module_init(etnaviv_init); 684 685 static void __exit etnaviv_exit(void) 686 { 687 platform_driver_unregister(&etnaviv_gpu_driver); 688 platform_driver_unregister(&etnaviv_platform_driver); 689 } 690 module_exit(etnaviv_exit); 691 692 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); 693 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); 694 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); 695 MODULE_DESCRIPTION("etnaviv DRM Driver"); 696 MODULE_LICENSE("GPL v2"); 697 MODULE_ALIAS("platform:etnaviv"); 698