1 /* 2 * Copyright (C) 2015 Etnaviv Project 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/component.h> 18 #include <linux/of_platform.h> 19 #include <drm/drm_of.h> 20 21 #include "etnaviv_cmdbuf.h" 22 #include "etnaviv_drv.h" 23 #include "etnaviv_gpu.h" 24 #include "etnaviv_gem.h" 25 #include "etnaviv_mmu.h" 26 #include "etnaviv_perfmon.h" 27 28 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING 29 static bool reglog; 30 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 31 module_param(reglog, bool, 0600); 32 #else 33 #define reglog 0 34 #endif 35 36 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name, 37 const char *dbgname) 38 { 39 struct resource *res; 40 void __iomem *ptr; 41 42 if (name) 43 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 44 else 45 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 46 47 ptr = devm_ioremap_resource(&pdev->dev, res); 48 if (IS_ERR(ptr)) { 49 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name, 50 PTR_ERR(ptr)); 51 return ptr; 52 } 53 54 if (reglog) 55 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n", 56 dbgname, ptr, (size_t)resource_size(res)); 57 58 return ptr; 59 } 60 61 void etnaviv_writel(u32 data, void __iomem *addr) 62 { 63 if (reglog) 64 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 65 66 writel(data, addr); 67 } 68 69 u32 etnaviv_readl(const void __iomem *addr) 70 { 71 u32 val = readl(addr); 72 73 if (reglog) 74 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val); 75 76 return val; 77 } 78 79 /* 80 * DRM operations: 81 */ 82 83 84 static void load_gpu(struct drm_device *dev) 85 { 86 struct etnaviv_drm_private *priv = dev->dev_private; 87 unsigned int i; 88 89 for (i = 0; i < ETNA_MAX_PIPES; i++) { 90 struct etnaviv_gpu *g = priv->gpu[i]; 91 92 if (g) { 93 int ret; 94 95 ret = etnaviv_gpu_init(g); 96 if (ret) 97 priv->gpu[i] = NULL; 98 } 99 } 100 } 101 102 static int etnaviv_open(struct drm_device *dev, struct drm_file *file) 103 { 104 struct etnaviv_file_private *ctx; 105 106 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 107 if (!ctx) 108 return -ENOMEM; 109 110 file->driver_priv = ctx; 111 112 return 0; 113 } 114 115 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) 116 { 117 struct etnaviv_drm_private *priv = dev->dev_private; 118 struct etnaviv_file_private *ctx = file->driver_priv; 119 unsigned int i; 120 121 for (i = 0; i < ETNA_MAX_PIPES; i++) { 122 struct etnaviv_gpu *gpu = priv->gpu[i]; 123 124 if (gpu) { 125 mutex_lock(&gpu->lock); 126 if (gpu->lastctx == ctx) 127 gpu->lastctx = NULL; 128 mutex_unlock(&gpu->lock); 129 } 130 } 131 132 kfree(ctx); 133 } 134 135 /* 136 * DRM debugfs: 137 */ 138 139 #ifdef CONFIG_DEBUG_FS 140 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) 141 { 142 struct etnaviv_drm_private *priv = dev->dev_private; 143 144 etnaviv_gem_describe_objects(priv, m); 145 146 return 0; 147 } 148 149 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) 150 { 151 struct drm_printer p = drm_seq_file_printer(m); 152 153 read_lock(&dev->vma_offset_manager->vm_lock); 154 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p); 155 read_unlock(&dev->vma_offset_manager->vm_lock); 156 157 return 0; 158 } 159 160 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) 161 { 162 struct drm_printer p = drm_seq_file_printer(m); 163 164 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); 165 166 mutex_lock(&gpu->mmu->lock); 167 drm_mm_print(&gpu->mmu->mm, &p); 168 mutex_unlock(&gpu->mmu->lock); 169 170 return 0; 171 } 172 173 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) 174 { 175 struct etnaviv_cmdbuf *buf = gpu->buffer; 176 u32 size = buf->size; 177 u32 *ptr = buf->vaddr; 178 u32 i; 179 180 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n", 181 buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf), 182 size - buf->user_size); 183 184 for (i = 0; i < size / 4; i++) { 185 if (i && !(i % 4)) 186 seq_puts(m, "\n"); 187 if (i % 4 == 0) 188 seq_printf(m, "\t0x%p: ", ptr + i); 189 seq_printf(m, "%08x ", *(ptr + i)); 190 } 191 seq_puts(m, "\n"); 192 } 193 194 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) 195 { 196 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev)); 197 198 mutex_lock(&gpu->lock); 199 etnaviv_buffer_dump(gpu, m); 200 mutex_unlock(&gpu->lock); 201 202 return 0; 203 } 204 205 static int show_unlocked(struct seq_file *m, void *arg) 206 { 207 struct drm_info_node *node = (struct drm_info_node *) m->private; 208 struct drm_device *dev = node->minor->dev; 209 int (*show)(struct drm_device *dev, struct seq_file *m) = 210 node->info_ent->data; 211 212 return show(dev, m); 213 } 214 215 static int show_each_gpu(struct seq_file *m, void *arg) 216 { 217 struct drm_info_node *node = (struct drm_info_node *) m->private; 218 struct drm_device *dev = node->minor->dev; 219 struct etnaviv_drm_private *priv = dev->dev_private; 220 struct etnaviv_gpu *gpu; 221 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = 222 node->info_ent->data; 223 unsigned int i; 224 int ret = 0; 225 226 for (i = 0; i < ETNA_MAX_PIPES; i++) { 227 gpu = priv->gpu[i]; 228 if (!gpu) 229 continue; 230 231 ret = show(gpu, m); 232 if (ret < 0) 233 break; 234 } 235 236 return ret; 237 } 238 239 static struct drm_info_list etnaviv_debugfs_list[] = { 240 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs}, 241 {"gem", show_unlocked, 0, etnaviv_gem_show}, 242 { "mm", show_unlocked, 0, etnaviv_mm_show }, 243 {"mmu", show_each_gpu, 0, etnaviv_mmu_show}, 244 {"ring", show_each_gpu, 0, etnaviv_ring_show}, 245 }; 246 247 static int etnaviv_debugfs_init(struct drm_minor *minor) 248 { 249 struct drm_device *dev = minor->dev; 250 int ret; 251 252 ret = drm_debugfs_create_files(etnaviv_debugfs_list, 253 ARRAY_SIZE(etnaviv_debugfs_list), 254 minor->debugfs_root, minor); 255 256 if (ret) { 257 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n"); 258 return ret; 259 } 260 261 return ret; 262 } 263 #endif 264 265 /* 266 * DRM ioctls: 267 */ 268 269 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, 270 struct drm_file *file) 271 { 272 struct etnaviv_drm_private *priv = dev->dev_private; 273 struct drm_etnaviv_param *args = data; 274 struct etnaviv_gpu *gpu; 275 276 if (args->pipe >= ETNA_MAX_PIPES) 277 return -EINVAL; 278 279 gpu = priv->gpu[args->pipe]; 280 if (!gpu) 281 return -ENXIO; 282 283 return etnaviv_gpu_get_param(gpu, args->param, &args->value); 284 } 285 286 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, 287 struct drm_file *file) 288 { 289 struct drm_etnaviv_gem_new *args = data; 290 291 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | 292 ETNA_BO_FORCE_MMU)) 293 return -EINVAL; 294 295 return etnaviv_gem_new_handle(dev, file, args->size, 296 args->flags, &args->handle); 297 } 298 299 #define TS(t) ((struct timespec){ \ 300 .tv_sec = (t).tv_sec, \ 301 .tv_nsec = (t).tv_nsec \ 302 }) 303 304 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 305 struct drm_file *file) 306 { 307 struct drm_etnaviv_gem_cpu_prep *args = data; 308 struct drm_gem_object *obj; 309 int ret; 310 311 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) 312 return -EINVAL; 313 314 obj = drm_gem_object_lookup(file, args->handle); 315 if (!obj) 316 return -ENOENT; 317 318 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 319 320 drm_gem_object_put_unlocked(obj); 321 322 return ret; 323 } 324 325 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 326 struct drm_file *file) 327 { 328 struct drm_etnaviv_gem_cpu_fini *args = data; 329 struct drm_gem_object *obj; 330 int ret; 331 332 if (args->flags) 333 return -EINVAL; 334 335 obj = drm_gem_object_lookup(file, args->handle); 336 if (!obj) 337 return -ENOENT; 338 339 ret = etnaviv_gem_cpu_fini(obj); 340 341 drm_gem_object_put_unlocked(obj); 342 343 return ret; 344 } 345 346 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, 347 struct drm_file *file) 348 { 349 struct drm_etnaviv_gem_info *args = data; 350 struct drm_gem_object *obj; 351 int ret; 352 353 if (args->pad) 354 return -EINVAL; 355 356 obj = drm_gem_object_lookup(file, args->handle); 357 if (!obj) 358 return -ENOENT; 359 360 ret = etnaviv_gem_mmap_offset(obj, &args->offset); 361 drm_gem_object_put_unlocked(obj); 362 363 return ret; 364 } 365 366 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, 367 struct drm_file *file) 368 { 369 struct drm_etnaviv_wait_fence *args = data; 370 struct etnaviv_drm_private *priv = dev->dev_private; 371 struct timespec *timeout = &TS(args->timeout); 372 struct etnaviv_gpu *gpu; 373 374 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 375 return -EINVAL; 376 377 if (args->pipe >= ETNA_MAX_PIPES) 378 return -EINVAL; 379 380 gpu = priv->gpu[args->pipe]; 381 if (!gpu) 382 return -ENXIO; 383 384 if (args->flags & ETNA_WAIT_NONBLOCK) 385 timeout = NULL; 386 387 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence, 388 timeout); 389 } 390 391 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, 392 struct drm_file *file) 393 { 394 struct drm_etnaviv_gem_userptr *args = data; 395 int access; 396 397 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || 398 args->flags == 0) 399 return -EINVAL; 400 401 if (offset_in_page(args->user_ptr | args->user_size) || 402 (uintptr_t)args->user_ptr != args->user_ptr || 403 (u32)args->user_size != args->user_size || 404 args->user_ptr & ~PAGE_MASK) 405 return -EINVAL; 406 407 if (args->flags & ETNA_USERPTR_WRITE) 408 access = VERIFY_WRITE; 409 else 410 access = VERIFY_READ; 411 412 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr, 413 args->user_size)) 414 return -EFAULT; 415 416 return etnaviv_gem_new_userptr(dev, file, args->user_ptr, 417 args->user_size, args->flags, 418 &args->handle); 419 } 420 421 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, 422 struct drm_file *file) 423 { 424 struct etnaviv_drm_private *priv = dev->dev_private; 425 struct drm_etnaviv_gem_wait *args = data; 426 struct timespec *timeout = &TS(args->timeout); 427 struct drm_gem_object *obj; 428 struct etnaviv_gpu *gpu; 429 int ret; 430 431 if (args->flags & ~(ETNA_WAIT_NONBLOCK)) 432 return -EINVAL; 433 434 if (args->pipe >= ETNA_MAX_PIPES) 435 return -EINVAL; 436 437 gpu = priv->gpu[args->pipe]; 438 if (!gpu) 439 return -ENXIO; 440 441 obj = drm_gem_object_lookup(file, args->handle); 442 if (!obj) 443 return -ENOENT; 444 445 if (args->flags & ETNA_WAIT_NONBLOCK) 446 timeout = NULL; 447 448 ret = etnaviv_gem_wait_bo(gpu, obj, timeout); 449 450 drm_gem_object_put_unlocked(obj); 451 452 return ret; 453 } 454 455 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data, 456 struct drm_file *file) 457 { 458 struct etnaviv_drm_private *priv = dev->dev_private; 459 struct drm_etnaviv_pm_domain *args = data; 460 struct etnaviv_gpu *gpu; 461 462 /* reject as long as the feature isn't stable */ 463 return -EINVAL; 464 465 if (args->pipe >= ETNA_MAX_PIPES) 466 return -EINVAL; 467 468 gpu = priv->gpu[args->pipe]; 469 if (!gpu) 470 return -ENXIO; 471 472 return etnaviv_pm_query_dom(gpu, args); 473 } 474 475 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data, 476 struct drm_file *file) 477 { 478 struct etnaviv_drm_private *priv = dev->dev_private; 479 struct drm_etnaviv_pm_signal *args = data; 480 struct etnaviv_gpu *gpu; 481 482 /* reject as long as the feature isn't stable */ 483 return -EINVAL; 484 485 if (args->pipe >= ETNA_MAX_PIPES) 486 return -EINVAL; 487 488 gpu = priv->gpu[args->pipe]; 489 if (!gpu) 490 return -ENXIO; 491 492 return etnaviv_pm_query_sig(gpu, args); 493 } 494 495 static const struct drm_ioctl_desc etnaviv_ioctls[] = { 496 #define ETNA_IOCTL(n, func, flags) \ 497 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) 498 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW), 499 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 500 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW), 501 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), 502 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 503 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 504 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 505 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW), 506 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW), 507 ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW), 508 ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW), 509 }; 510 511 static const struct vm_operations_struct vm_ops = { 512 .fault = etnaviv_gem_fault, 513 .open = drm_gem_vm_open, 514 .close = drm_gem_vm_close, 515 }; 516 517 static const struct file_operations fops = { 518 .owner = THIS_MODULE, 519 .open = drm_open, 520 .release = drm_release, 521 .unlocked_ioctl = drm_ioctl, 522 .compat_ioctl = drm_compat_ioctl, 523 .poll = drm_poll, 524 .read = drm_read, 525 .llseek = no_llseek, 526 .mmap = etnaviv_gem_mmap, 527 }; 528 529 static struct drm_driver etnaviv_drm_driver = { 530 .driver_features = DRIVER_GEM | 531 DRIVER_PRIME | 532 DRIVER_RENDER, 533 .open = etnaviv_open, 534 .postclose = etnaviv_postclose, 535 .gem_free_object_unlocked = etnaviv_gem_free_object, 536 .gem_vm_ops = &vm_ops, 537 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 538 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 539 .gem_prime_export = drm_gem_prime_export, 540 .gem_prime_import = drm_gem_prime_import, 541 .gem_prime_res_obj = etnaviv_gem_prime_res_obj, 542 .gem_prime_pin = etnaviv_gem_prime_pin, 543 .gem_prime_unpin = etnaviv_gem_prime_unpin, 544 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, 545 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, 546 .gem_prime_vmap = etnaviv_gem_prime_vmap, 547 .gem_prime_vunmap = etnaviv_gem_prime_vunmap, 548 .gem_prime_mmap = etnaviv_gem_prime_mmap, 549 #ifdef CONFIG_DEBUG_FS 550 .debugfs_init = etnaviv_debugfs_init, 551 #endif 552 .ioctls = etnaviv_ioctls, 553 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, 554 .fops = &fops, 555 .name = "etnaviv", 556 .desc = "etnaviv DRM", 557 .date = "20151214", 558 .major = 1, 559 .minor = 1, 560 }; 561 562 /* 563 * Platform driver: 564 */ 565 static int etnaviv_bind(struct device *dev) 566 { 567 struct etnaviv_drm_private *priv; 568 struct drm_device *drm; 569 int ret; 570 571 drm = drm_dev_alloc(&etnaviv_drm_driver, dev); 572 if (IS_ERR(drm)) 573 return PTR_ERR(drm); 574 575 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 576 if (!priv) { 577 dev_err(dev, "failed to allocate private data\n"); 578 ret = -ENOMEM; 579 goto out_unref; 580 } 581 drm->dev_private = priv; 582 583 priv->wq = alloc_ordered_workqueue("etnaviv", 0); 584 if (!priv->wq) { 585 ret = -ENOMEM; 586 goto out_wq; 587 } 588 589 mutex_init(&priv->gem_lock); 590 INIT_LIST_HEAD(&priv->gem_list); 591 priv->num_gpus = 0; 592 593 dev_set_drvdata(dev, drm); 594 595 ret = component_bind_all(dev, drm); 596 if (ret < 0) 597 goto out_bind; 598 599 load_gpu(drm); 600 601 ret = drm_dev_register(drm, 0); 602 if (ret) 603 goto out_register; 604 605 return 0; 606 607 out_register: 608 component_unbind_all(dev, drm); 609 out_bind: 610 flush_workqueue(priv->wq); 611 destroy_workqueue(priv->wq); 612 out_wq: 613 kfree(priv); 614 out_unref: 615 drm_dev_unref(drm); 616 617 return ret; 618 } 619 620 static void etnaviv_unbind(struct device *dev) 621 { 622 struct drm_device *drm = dev_get_drvdata(dev); 623 struct etnaviv_drm_private *priv = drm->dev_private; 624 625 drm_dev_unregister(drm); 626 627 flush_workqueue(priv->wq); 628 destroy_workqueue(priv->wq); 629 630 component_unbind_all(dev, drm); 631 632 drm->dev_private = NULL; 633 kfree(priv); 634 635 drm_dev_unref(drm); 636 } 637 638 static const struct component_master_ops etnaviv_master_ops = { 639 .bind = etnaviv_bind, 640 .unbind = etnaviv_unbind, 641 }; 642 643 static int compare_of(struct device *dev, void *data) 644 { 645 struct device_node *np = data; 646 647 return dev->of_node == np; 648 } 649 650 static int compare_str(struct device *dev, void *data) 651 { 652 return !strcmp(dev_name(dev), data); 653 } 654 655 static int etnaviv_pdev_probe(struct platform_device *pdev) 656 { 657 struct device *dev = &pdev->dev; 658 struct device_node *node = dev->of_node; 659 struct component_match *match = NULL; 660 661 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 662 663 if (node) { 664 struct device_node *core_node; 665 int i; 666 667 for (i = 0; ; i++) { 668 core_node = of_parse_phandle(node, "cores", i); 669 if (!core_node) 670 break; 671 672 drm_of_component_match_add(&pdev->dev, &match, 673 compare_of, core_node); 674 of_node_put(core_node); 675 } 676 } else if (dev->platform_data) { 677 char **names = dev->platform_data; 678 unsigned i; 679 680 for (i = 0; names[i]; i++) 681 component_match_add(dev, &match, compare_str, names[i]); 682 } 683 684 return component_master_add_with_match(dev, &etnaviv_master_ops, match); 685 } 686 687 static int etnaviv_pdev_remove(struct platform_device *pdev) 688 { 689 component_master_del(&pdev->dev, &etnaviv_master_ops); 690 691 return 0; 692 } 693 694 static const struct of_device_id dt_match[] = { 695 { .compatible = "fsl,imx-gpu-subsystem" }, 696 { .compatible = "marvell,dove-gpu-subsystem" }, 697 {} 698 }; 699 MODULE_DEVICE_TABLE(of, dt_match); 700 701 static struct platform_driver etnaviv_platform_driver = { 702 .probe = etnaviv_pdev_probe, 703 .remove = etnaviv_pdev_remove, 704 .driver = { 705 .name = "etnaviv", 706 .of_match_table = dt_match, 707 }, 708 }; 709 710 static int __init etnaviv_init(void) 711 { 712 int ret; 713 714 etnaviv_validate_init(); 715 716 ret = platform_driver_register(&etnaviv_gpu_driver); 717 if (ret != 0) 718 return ret; 719 720 ret = platform_driver_register(&etnaviv_platform_driver); 721 if (ret != 0) 722 platform_driver_unregister(&etnaviv_gpu_driver); 723 724 return ret; 725 } 726 module_init(etnaviv_init); 727 728 static void __exit etnaviv_exit(void) 729 { 730 platform_driver_unregister(&etnaviv_gpu_driver); 731 platform_driver_unregister(&etnaviv_platform_driver); 732 } 733 module_exit(etnaviv_exit); 734 735 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); 736 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); 737 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); 738 MODULE_DESCRIPTION("etnaviv DRM Driver"); 739 MODULE_LICENSE("GPL v2"); 740 MODULE_ALIAS("platform:etnaviv"); 741