1 /* 2 * Copyright (C) 2015 Etnaviv Project 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/component.h> 18 #include <linux/of_platform.h> 19 #include <drm/drm_of.h> 20 21 #include "etnaviv_drv.h" 22 #include "etnaviv_gpu.h" 23 #include "etnaviv_gem.h" 24 #include "etnaviv_mmu.h" 25 #include "etnaviv_gem.h" 26 27 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING 28 static bool reglog; 29 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 30 module_param(reglog, bool, 0600); 31 #else 32 #define reglog 0 33 #endif 34 35 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, 36 const char *dbgname) 37 { 38 struct resource *res; 39 void __iomem *ptr; 40 41 if (name) 42 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 43 else 44 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 45 46 ptr = devm_ioremap_resource(&pdev->dev, res); 47 if (IS_ERR(ptr)) { 48 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, 49 PTR_ERR(ptr)); 50 return ptr; 51 } 52 53 if (reglog) 54 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", 55 dbgname, ptr, (size_t)resource_size(res)); 56 57 return ptr; 58 } 59 60 void etnaviv_writel(u32 data, void __iomem *addr) 61 { 62 if (reglog) 63 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 64 65 writel(data, addr); 66 } 67 68 u32 etnaviv_readl(const void __iomem *addr) 69 { 70 u32 val = readl(addr); 71 72 if (reglog) 73 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); 74 75 return val; 76 } 77 78 /* 79 * DRM operations: 80 */ 81 82 83 static void load_gpu(struct drm_device *dev) 84 { 85 struct etnaviv_drm_private *priv = dev->dev_private; 86 unsigned int i; 87 88 for (i = 0; i < ETNA_MAX_PIPES; i++) { 89 struct etnaviv_gpu *g = priv->gpu[i]; 90 91 if (g) { 92 int ret; 93 94 ret = etnaviv_gpu_init(g); 95 if (ret) 96 priv->gpu[i] = NULL; 97 } 98 } 99 } 100 101 static int etnaviv_open(struct drm_device *dev, struct drm_file *file) 102 { 103 struct etnaviv_file_private *ctx; 104 105 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 106 if (!ctx) 107 return -ENOMEM; 108 109 file->driver_priv = ctx; 110 111 return 0; 112 } 113 114 static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) 115 { 116 struct etnaviv_drm_private *priv = dev->dev_private; 117 struct etnaviv_file_private *ctx = file->driver_priv; 118 unsigned int i; 119 120 for (i = 0; i < ETNA_MAX_PIPES; i++) { 121 struct etnaviv_gpu *gpu = priv->gpu[i]; 122 123 if (gpu) { 124 mutex_lock(&gpu->lock); 125 if (gpu->lastctx == ctx) 126 gpu->lastctx = NULL; 127 mutex_unlock(&gpu->lock); 128 } 129 } 130 131 kfree(ctx); 132 } 133 134 /* 135 * DRM debugfs: 136 */ 137 138 #ifdef CONFIG_DEBUG_FS 139 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) 140 { 141 struct etnaviv_drm_private *priv = dev->dev_private; 142 143 etnaviv_gem_describe_objects(priv, m); 144 145 return 0; 146 } 147 148 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) 149 { 150 int ret; 151 152 read_lock(&dev->vma_offset_manager->vm_lock); 153 ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); 154 read_unlock(&dev->vma_offset_manager->vm_lock); 155 156 return ret; 157 } 158 159 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) 160 { 161 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); 162 163 mutex_lock(&gpu->mmu->lock); 164 drm_mm_dump_table(m, &gpu->mmu->mm); 165 mutex_unlock(&gpu->mmu->lock); 166 167 return 0; 168 } 169 170 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) 171 { 172 struct etnaviv_cmdbuf *buf = gpu->buffer; 173 u32 size = buf->size; 174 u32 *ptr = buf->vaddr; 175 u32 i; 176 177 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", 178 buf->vaddr, (u64)buf->paddr, size - buf->user_size); 179 180 for (i = 0; i < size / 4; i++) { 181 if (i && !(i % 4)) 182 seq_puts(m, "\n"); 183 if (i % 4 == 0) 184 seq_printf(m, "\t0x%p: ", ptr + i); 185 seq_printf(m, "%08x ", *(ptr + i)); 186 } 187 seq_puts(m, "\n"); 188 } 189 190 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) 191 { 192 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); 193 194 mutex_lock(&gpu->lock); 195 etnaviv_buffer_dump(gpu, m); 196 mutex_unlock(&gpu->lock); 197 198 return 0; 199 } 200 201 static int show_unlocked(struct seq_file *m, void *arg) 202 { 203 struct drm_info_node *node = (struct drm_info_node *) m->private; 204 struct drm_device *dev = node->minor->dev; 205 int (*show)(struct drm_device *dev, struct seq_file *m) = 206 node->info_ent->data; 207 208 return show(dev, m); 209 } 210 211 static int show_each_gpu(struct seq_file *m, void *arg) 212 { 213 struct drm_info_node *node = (struct drm_info_node *) m->private; 214 struct drm_device *dev = node->minor->dev; 215 struct etnaviv_drm_private *priv = dev->dev_private; 216 struct etnaviv_gpu *gpu; 217 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = 218 node->info_ent->data; 219 unsigned int i; 220 int ret = 0; 221 222 for (i = 0; i < ETNA_MAX_PIPES; i++) { 223 gpu = priv->gpu[i]; 224 if (!gpu) 225 continue; 226 227 ret = show(gpu, m); 228 if (ret < 0) 229 break; 230 } 231 232 return ret; 233 } 234 235 static struct drm_info_list etnaviv_debugfs_list[] = { 236 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, 237 {"gem", show_unlocked, 0, etnaviv_gem_show}, 238 { "mm", show_unlocked, 0, etnaviv_mm_show }, 239 {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, 240 {"ring", show_each_gpu, 0, etnaviv_ring_show}, 241 }; 242 243 static int etnaviv_debugfs_init(struct drm_minor *minor) 244 { 245 struct drm_device *dev = minor->dev; 246 int ret; 247 248 ret = drm_debugfs_create_files(etnaviv_debugfs_list, 249 ARRAY_SIZE(etnaviv_debugfs_list), 250 minor->debugfs_root, minor); 251 252 if (ret) { 253 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); 254 return ret; 255 } 256 257 return ret; 258 } 259 260 static void etnaviv_debugfs_cleanup(struct drm_minor *minor) 261 { 262 drm_debugfs_remove_files(etnaviv_debugfs_list, 263 ARRAY_SIZE(etnaviv_debugfs_list), minor); 264 } 265 #endif 266 267 /* 268 * DRM ioctls: 269 */ 270 271 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, 272 struct drm_file *file) 273 { 274 struct etnaviv_drm_private *priv = dev->dev_private; 275 struct drm_etnaviv_param *args = data; 276 struct etnaviv_gpu *gpu; 277 278 if (args->pipe >= ETNA_MAX_PIPES) 279 return -EINVAL; 280 281 gpu = priv->gpu[args->pipe]; 282 if (!gpu) 283 return -ENXIO; 284 285 return etnaviv_gpu_get_param(gpu, args->param, &args->value); 286 } 287 288 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, 289 struct drm_file *file) 290 { 291 struct drm_etnaviv_gem_new *args = data; 292 293 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | 294 ETNA_BO_FORCE_MMU)) 295 return -EINVAL; 296 297 return etnaviv_gem_new_handle(dev, file, args->size, 298 args->flags, &args->handle); 299 } 300 301 #define TS(t) ((struct timespec){ \ 302 .tv_sec = (t).tv_sec, \ 303 .tv_nsec = (t).tv_nsec \ 304 }) 305 306 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 307 struct drm_file *file) 308 { 309 struct drm_etnaviv_gem_cpu_prep *args = data; 310 struct drm_gem_object *obj; 311 int ret; 312 313 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) 314 return -EINVAL; 315 316 obj = drm_gem_object_lookup(file, args->handle); 317 if (!obj) 318 return -ENOENT; 319 320 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 321 322 drm_gem_object_unreference_unlocked(obj); 323 324 return ret; 325 } 326 327 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 328 struct drm_file *file) 329 { 330 struct drm_etnaviv_gem_cpu_fini *args = data; 331 struct drm_gem_object *obj; 332 int ret; 333 334 if (args->flags) 335 return -EINVAL; 336 337 obj = drm_gem_object_lookup(file, args->handle); 338 if (!obj) 339 return -ENOENT; 340 341 ret = etnaviv_gem_cpu_fini(obj); 342 343 drm_gem_object_unreference_unlocked(obj); 344 345 return ret; 346 } 347 348 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, 349 struct drm_file *file) 350 { 351 struct drm_etnaviv_gem_info *args = data; 352 struct drm_gem_object *obj; 353 int ret; 354 355 if (args->pad) 356 return -EINVAL; 357 358 obj = drm_gem_object_lookup(file, args->handle); 359 if (!obj) 360 return -ENOENT; 361 362 ret = etnaviv_gem_mmap_offset(obj, &args->offset); 363 drm_gem_object_unreference_unlocked(obj); 364 365 return ret; 366 } 367 368 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, 369 struct drm_file *file) 370 { 371 struct drm_etnaviv_wait_fence *args = data; 372 struct etnaviv_drm_private *priv = dev->dev_private; 373 struct timespec *timeout = &TS(args->timeout); 374 struct etnaviv_gpu *gpu; 375 376 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 377 return -EINVAL; 378 379 if (args->pipe >= ETNA_MAX_PIPES) 380 return -EINVAL; 381 382 gpu = priv->gpu[args->pipe]; 383 if (!gpu) 384 return -ENXIO; 385 386 if (args->flags & ETNA_WAIT_NONBLOCK) 387 timeout = NULL; 388 389 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, 390 timeout); 391 } 392 393 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, 394 struct drm_file *file) 395 { 396 struct drm_etnaviv_gem_userptr *args = data; 397 int access; 398 399 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || 400 args->flags == 0) 401 return -EINVAL; 402 403 if (offset_in_page(args->user_ptr | args->user_size) || 404 (uintptr_t)args->user_ptr != args->user_ptr || 405 (u32)args->user_size != args->user_size || 406 args->user_ptr & ~PAGE_MASK) 407 return -EINVAL; 408 409 if (args->flags & ETNA_USERPTR_WRITE) 410 access = VERIFY_WRITE; 411 else 412 access = VERIFY_READ; 413 414 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, 415 args->user_size)) 416 return -EFAULT; 417 418 return etnaviv_gem_new_userptr(dev, file, args->user_ptr, 419 args->user_size, args->flags, 420 &args->handle); 421 } 422 423 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, 424 struct drm_file *file) 425 { 426 struct etnaviv_drm_private *priv = dev->dev_private; 427 struct drm_etnaviv_gem_wait *args = data; 428 struct timespec *timeout = &TS(args->timeout); 429 struct drm_gem_object *obj; 430 struct etnaviv_gpu *gpu; 431 int ret; 432 433 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 434 return -EINVAL; 435 436 if (args->pipe >= ETNA_MAX_PIPES) 437 return -EINVAL; 438 439 gpu = priv->gpu[args->pipe]; 440 if (!gpu) 441 return -ENXIO; 442 443 obj = drm_gem_object_lookup(file, args->handle); 444 if (!obj) 445 return -ENOENT; 446 447 if (args->flags & ETNA_WAIT_NONBLOCK) 448 timeout = NULL; 449 450 ret = etnaviv_gem_wait_bo(gpu, obj, timeout); 451 452 drm_gem_object_unreference_unlocked(obj); 453 454 return ret; 455 } 456 457 static const struct drm_ioctl_desc etnaviv_ioctls[] = { 458 #define ETNA_IOCTL(n, func, flags) \ 459 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) 460 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), 461 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 462 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), 463 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), 464 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 465 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 466 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 467 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), 468 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), 469 }; 470 471 static const struct vm_operations_struct vm_ops = { 472 .fault = etnaviv_gem_fault, 473 .open = drm_gem_vm_open, 474 .close = drm_gem_vm_close, 475 }; 476 477 static const struct file_operations fops = { 478 .owner = THIS_MODULE, 479 .open = drm_open, 480 .release = drm_release, 481 .unlocked_ioctl = drm_ioctl, 482 .compat_ioctl = drm_compat_ioctl, 483 .poll = drm_poll, 484 .read = drm_read, 485 .llseek = no_llseek, 486 .mmap = etnaviv_gem_mmap, 487 }; 488 489 static struct drm_driver etnaviv_drm_driver = { 490 .driver_features = DRIVER_GEM | 491 DRIVER_PRIME | 492 DRIVER_RENDER, 493 .open = etnaviv_open, 494 .preclose = etnaviv_preclose, 495 .gem_free_object_unlocked = etnaviv_gem_free_object, 496 .gem_vm_ops = &vm_ops, 497 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 498 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 499 .gem_prime_export = drm_gem_prime_export, 500 .gem_prime_import = drm_gem_prime_import, 501 .gem_prime_pin = etnaviv_gem_prime_pin, 502 .gem_prime_unpin = etnaviv_gem_prime_unpin, 503 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, 504 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, 505 .gem_prime_vmap = etnaviv_gem_prime_vmap, 506 .gem_prime_vunmap = etnaviv_gem_prime_vunmap, 507 #ifdef CONFIG_DEBUG_FS 508 .debugfs_init = etnaviv_debugfs_init, 509 .debugfs_cleanup = etnaviv_debugfs_cleanup, 510 #endif 511 .ioctls = etnaviv_ioctls, 512 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, 513 .fops = &fops, 514 .name = "etnaviv", 515 .desc = "etnaviv DRM", 516 .date = "20151214", 517 .major = 1, 518 .minor = 0, 519 }; 520 521 /* 522 * Platform driver: 523 */ 524 static int etnaviv_bind(struct device *dev) 525 { 526 struct etnaviv_drm_private *priv; 527 struct drm_device *drm; 528 int ret; 529 530 drm = drm_dev_alloc(&etnaviv_drm_driver, dev); 531 if (IS_ERR(drm)) 532 return PTR_ERR(drm); 533 534 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 535 if (!priv) { 536 dev_err(dev, "failed to allocate private data\n"); 537 ret = -ENOMEM; 538 goto out_unref; 539 } 540 drm->dev_private = priv; 541 542 priv->wq = alloc_ordered_workqueue("etnaviv", 0); 543 if (!priv->wq) { 544 ret = -ENOMEM; 545 goto out_wq; 546 } 547 548 mutex_init(&priv->gem_lock); 549 INIT_LIST_HEAD(&priv->gem_list); 550 priv->num_gpus = 0; 551 552 dev_set_drvdata(dev, drm); 553 554 ret = component_bind_all(dev, drm); 555 if (ret < 0) 556 goto out_bind; 557 558 load_gpu(drm); 559 560 ret = drm_dev_register(drm, 0); 561 if (ret) 562 goto out_register; 563 564 return 0; 565 566 out_register: 567 component_unbind_all(dev, drm); 568 out_bind: 569 flush_workqueue(priv->wq); 570 destroy_workqueue(priv->wq); 571 out_wq: 572 kfree(priv); 573 out_unref: 574 drm_dev_unref(drm); 575 576 return ret; 577 } 578 579 static void etnaviv_unbind(struct device *dev) 580 { 581 struct drm_device *drm = dev_get_drvdata(dev); 582 struct etnaviv_drm_private *priv = drm->dev_private; 583 584 drm_dev_unregister(drm); 585 586 flush_workqueue(priv->wq); 587 destroy_workqueue(priv->wq); 588 589 component_unbind_all(dev, drm); 590 591 drm->dev_private = NULL; 592 kfree(priv); 593 594 drm_put_dev(drm); 595 } 596 597 static const struct component_master_ops etnaviv_master_ops = { 598 .bind = etnaviv_bind, 599 .unbind = etnaviv_unbind, 600 }; 601 602 static int compare_of(struct device *dev, void *data) 603 { 604 struct device_node *np = data; 605 606 return dev->of_node == np; 607 } 608 609 static int compare_str(struct device *dev, void *data) 610 { 611 return !strcmp(dev_name(dev), data); 612 } 613 614 static int etnaviv_pdev_probe(struct platform_device *pdev) 615 { 616 struct device *dev = &pdev->dev; 617 struct device_node *node = dev->of_node; 618 struct component_match *match = NULL; 619 620 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 621 622 if (node) { 623 struct device_node *core_node; 624 int i; 625 626 for (i = 0; ; i++) { 627 core_node = of_parse_phandle(node, "cores", i); 628 if (!core_node) 629 break; 630 631 drm_of_component_match_add(&pdev->dev, &match, 632 compare_of, core_node); 633 of_node_put(core_node); 634 } 635 } else if (dev->platform_data) { 636 char **names = dev->platform_data; 637 unsigned i; 638 639 for (i = 0; names[i]; i++) 640 component_match_add(dev, &match, compare_str, names[i]); 641 } 642 643 return component_master_add_with_match(dev, &etnaviv_master_ops, match); 644 } 645 646 static int etnaviv_pdev_remove(struct platform_device *pdev) 647 { 648 component_master_del(&pdev->dev, &etnaviv_master_ops); 649 650 return 0; 651 } 652 653 static const struct of_device_id dt_match[] = { 654 { .compatible = "fsl,imx-gpu-subsystem" }, 655 { .compatible = "marvell,dove-gpu-subsystem" }, 656 {} 657 }; 658 MODULE_DEVICE_TABLE(of, dt_match); 659 660 static struct platform_driver etnaviv_platform_driver = { 661 .probe = etnaviv_pdev_probe, 662 .remove = etnaviv_pdev_remove, 663 .driver = { 664 .name = "etnaviv", 665 .of_match_table = dt_match, 666 }, 667 }; 668 669 static int __init etnaviv_init(void) 670 { 671 int ret; 672 673 etnaviv_validate_init(); 674 675 ret = platform_driver_register(&etnaviv_gpu_driver); 676 if (ret != 0) 677 return ret; 678 679 ret = platform_driver_register(&etnaviv_platform_driver); 680 if (ret != 0) 681 platform_driver_unregister(&etnaviv_gpu_driver); 682 683 return ret; 684 } 685 module_init(etnaviv_init); 686 687 static void __exit etnaviv_exit(void) 688 { 689 platform_driver_unregister(&etnaviv_gpu_driver); 690 platform_driver_unregister(&etnaviv_platform_driver); 691 } 692 module_exit(etnaviv_exit); 693 694 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); 695 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); 696 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); 697 MODULE_DESCRIPTION("etnaviv DRM Driver"); 698 MODULE_LICENSE("GPL v2"); 699 MODULE_ALIAS("platform:etnaviv"); 700