1 /* 2 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Authors: 4 * Inki Dae <inki.dae@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 */ 13 14 #include <linux/pm_runtime.h> 15 #include <drm/drmP.h> 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_crtc_helper.h> 19 20 #include <linux/component.h> 21 22 #include <drm/exynos_drm.h> 23 24 #include "exynos_drm_drv.h" 25 #include "exynos_drm_crtc.h" 26 #include "exynos_drm_fbdev.h" 27 #include "exynos_drm_fb.h" 28 #include "exynos_drm_gem.h" 29 #include "exynos_drm_plane.h" 30 #include "exynos_drm_vidi.h" 31 #include "exynos_drm_g2d.h" 32 #include "exynos_drm_ipp.h" 33 #include "exynos_drm_iommu.h" 34 35 #define DRIVER_NAME "exynos" 36 #define DRIVER_DESC "Samsung SoC DRM" 37 #define DRIVER_DATE "20110530" 38 #define DRIVER_MAJOR 1 39 #define DRIVER_MINOR 0 40 41 struct exynos_atomic_commit { 42 struct work_struct work; 43 struct drm_device *dev; 44 struct drm_atomic_state *state; 45 u32 crtcs; 46 }; 47 48 static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state) 49 { 50 struct drm_crtc_state *crtc_state; 51 struct drm_crtc *crtc; 52 int i, ret; 53 54 for_each_crtc_in_state(state, crtc, crtc_state, i) { 55 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 56 57 if (!crtc->state->enable) 58 continue; 59 60 ret = drm_crtc_vblank_get(crtc); 61 if (ret) 62 continue; 63 64 exynos_drm_crtc_wait_pending_update(exynos_crtc); 65 drm_crtc_vblank_put(crtc); 66 } 67 } 68 69 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) 70 { 71 struct drm_device *dev = commit->dev; 72 struct exynos_drm_private *priv = dev->dev_private; 73 struct drm_atomic_state *state = commit->state; 74 struct drm_plane *plane; 75 struct drm_crtc *crtc; 76 struct drm_plane_state *plane_state; 77 struct drm_crtc_state *crtc_state; 78 int i; 79 80 drm_atomic_helper_commit_modeset_disables(dev, state); 81 82 drm_atomic_helper_commit_modeset_enables(dev, state); 83 84 /* 85 * Exynos can't update planes with CRTCs and encoders disabled, 86 * its updates routines, specially for FIMD, requires the clocks 87 * to be enabled. So it is necessary to handle the modeset operations 88 * *before* the commit_planes() step, this way it will always 89 * have the relevant clocks enabled to perform the update. 90 */ 91 92 for_each_crtc_in_state(state, crtc, crtc_state, i) { 93 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 94 95 atomic_set(&exynos_crtc->pending_update, 0); 96 } 97 98 for_each_plane_in_state(state, plane, plane_state, i) { 99 struct exynos_drm_crtc *exynos_crtc = 100 to_exynos_crtc(plane->crtc); 101 102 if (!plane->crtc) 103 continue; 104 105 atomic_inc(&exynos_crtc->pending_update); 106 } 107 108 drm_atomic_helper_commit_planes(dev, state, false); 109 110 exynos_atomic_wait_for_commit(state); 111 112 drm_atomic_helper_cleanup_planes(dev, state); 113 114 drm_atomic_state_free(state); 115 116 spin_lock(&priv->lock); 117 priv->pending &= ~commit->crtcs; 118 spin_unlock(&priv->lock); 119 120 wake_up_all(&priv->wait); 121 122 kfree(commit); 123 } 124 125 static void exynos_drm_atomic_work(struct work_struct *work) 126 { 127 struct exynos_atomic_commit *commit = container_of(work, 128 struct exynos_atomic_commit, work); 129 130 exynos_atomic_commit_complete(commit); 131 } 132 133 static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 134 { 135 struct exynos_drm_private *private; 136 struct drm_encoder *encoder; 137 unsigned int clone_mask; 138 int cnt, ret; 139 140 private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); 141 if (!private) 142 return -ENOMEM; 143 144 init_waitqueue_head(&private->wait); 145 spin_lock_init(&private->lock); 146 147 dev_set_drvdata(dev->dev, dev); 148 dev->dev_private = (void *)private; 149 150 /* 151 * create mapping to manage iommu table and set a pointer to iommu 152 * mapping structure to iommu_mapping of private data. 153 * also this iommu_mapping can be used to check if iommu is supported 154 * or not. 155 */ 156 ret = drm_create_iommu_mapping(dev); 157 if (ret < 0) { 158 DRM_ERROR("failed to create iommu mapping.\n"); 159 goto err_free_private; 160 } 161 162 drm_mode_config_init(dev); 163 164 exynos_drm_mode_config_init(dev); 165 166 /* setup possible_clones. */ 167 cnt = 0; 168 clone_mask = 0; 169 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 170 clone_mask |= (1 << (cnt++)); 171 172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 173 encoder->possible_clones = clone_mask; 174 175 platform_set_drvdata(dev->platformdev, dev); 176 177 /* Try to bind all sub drivers. */ 178 ret = component_bind_all(dev->dev, dev); 179 if (ret) 180 goto err_mode_config_cleanup; 181 182 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 183 if (ret) 184 goto err_unbind_all; 185 186 /* Probe non kms sub drivers and virtual display driver. */ 187 ret = exynos_drm_device_subdrv_probe(dev); 188 if (ret) 189 goto err_cleanup_vblank; 190 191 drm_mode_config_reset(dev); 192 193 /* 194 * enable drm irq mode. 195 * - with irq_enabled = true, we can use the vblank feature. 196 * 197 * P.S. note that we wouldn't use drm irq handler but 198 * just specific driver own one instead because 199 * drm framework supports only one irq handler. 200 */ 201 dev->irq_enabled = true; 202 203 /* 204 * with vblank_disable_allowed = true, vblank interrupt will be disabled 205 * by drm timer once a current process gives up ownership of 206 * vblank event.(after drm_vblank_put function is called) 207 */ 208 dev->vblank_disable_allowed = true; 209 210 /* init kms poll for handling hpd */ 211 drm_kms_helper_poll_init(dev); 212 213 /* force connectors detection */ 214 drm_helper_hpd_irq_event(dev); 215 216 return 0; 217 218 err_cleanup_vblank: 219 drm_vblank_cleanup(dev); 220 err_unbind_all: 221 component_unbind_all(dev->dev, dev); 222 err_mode_config_cleanup: 223 drm_mode_config_cleanup(dev); 224 drm_release_iommu_mapping(dev); 225 err_free_private: 226 kfree(private); 227 228 return ret; 229 } 230 231 static int exynos_drm_unload(struct drm_device *dev) 232 { 233 exynos_drm_device_subdrv_remove(dev); 234 235 exynos_drm_fbdev_fini(dev); 236 drm_kms_helper_poll_fini(dev); 237 238 drm_vblank_cleanup(dev); 239 component_unbind_all(dev->dev, dev); 240 drm_mode_config_cleanup(dev); 241 drm_release_iommu_mapping(dev); 242 243 kfree(dev->dev_private); 244 dev->dev_private = NULL; 245 246 return 0; 247 } 248 249 static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs) 250 { 251 bool pending; 252 253 spin_lock(&priv->lock); 254 pending = priv->pending & crtcs; 255 spin_unlock(&priv->lock); 256 257 return pending; 258 } 259 260 int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, 261 bool async) 262 { 263 struct exynos_drm_private *priv = dev->dev_private; 264 struct exynos_atomic_commit *commit; 265 int i, ret; 266 267 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 268 if (!commit) 269 return -ENOMEM; 270 271 ret = drm_atomic_helper_prepare_planes(dev, state); 272 if (ret) { 273 kfree(commit); 274 return ret; 275 } 276 277 /* This is the point of no return */ 278 279 INIT_WORK(&commit->work, exynos_drm_atomic_work); 280 commit->dev = dev; 281 commit->state = state; 282 283 /* Wait until all affected CRTCs have completed previous commits and 284 * mark them as pending. 285 */ 286 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 287 if (state->crtcs[i]) 288 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); 289 } 290 291 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); 292 293 spin_lock(&priv->lock); 294 priv->pending |= commit->crtcs; 295 spin_unlock(&priv->lock); 296 297 drm_atomic_helper_swap_state(dev, state); 298 299 if (async) 300 schedule_work(&commit->work); 301 else 302 exynos_atomic_commit_complete(commit); 303 304 return 0; 305 } 306 307 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 308 { 309 struct drm_exynos_file_private *file_priv; 310 int ret; 311 312 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 313 if (!file_priv) 314 return -ENOMEM; 315 316 file->driver_priv = file_priv; 317 318 ret = exynos_drm_subdrv_open(dev, file); 319 if (ret) 320 goto err_file_priv_free; 321 322 return ret; 323 324 err_file_priv_free: 325 kfree(file_priv); 326 file->driver_priv = NULL; 327 return ret; 328 } 329 330 static void exynos_drm_preclose(struct drm_device *dev, 331 struct drm_file *file) 332 { 333 struct drm_crtc *crtc; 334 335 exynos_drm_subdrv_close(dev, file); 336 337 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 338 exynos_drm_crtc_cancel_page_flip(crtc, file); 339 } 340 341 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 342 { 343 kfree(file->driver_priv); 344 file->driver_priv = NULL; 345 } 346 347 static void exynos_drm_lastclose(struct drm_device *dev) 348 { 349 exynos_drm_fbdev_restore_mode(dev); 350 } 351 352 static const struct vm_operations_struct exynos_drm_gem_vm_ops = { 353 .fault = exynos_drm_gem_fault, 354 .open = drm_gem_vm_open, 355 .close = drm_gem_vm_close, 356 }; 357 358 static const struct drm_ioctl_desc exynos_ioctls[] = { 359 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 360 DRM_AUTH | DRM_RENDER_ALLOW), 361 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl, 362 DRM_RENDER_ALLOW), 363 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, 364 DRM_AUTH), 365 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl, 366 DRM_AUTH | DRM_RENDER_ALLOW), 367 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl, 368 DRM_AUTH | DRM_RENDER_ALLOW), 369 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, 370 DRM_AUTH | DRM_RENDER_ALLOW), 371 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property, 372 DRM_AUTH | DRM_RENDER_ALLOW), 373 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property, 374 DRM_AUTH | DRM_RENDER_ALLOW), 375 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf, 376 DRM_AUTH | DRM_RENDER_ALLOW), 377 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl, 378 DRM_AUTH | DRM_RENDER_ALLOW), 379 }; 380 381 static const struct file_operations exynos_drm_driver_fops = { 382 .owner = THIS_MODULE, 383 .open = drm_open, 384 .mmap = exynos_drm_gem_mmap, 385 .poll = drm_poll, 386 .read = drm_read, 387 .unlocked_ioctl = drm_ioctl, 388 #ifdef CONFIG_COMPAT 389 .compat_ioctl = drm_compat_ioctl, 390 #endif 391 .release = drm_release, 392 }; 393 394 static struct drm_driver exynos_drm_driver = { 395 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 396 | DRIVER_ATOMIC | DRIVER_RENDER, 397 .load = exynos_drm_load, 398 .unload = exynos_drm_unload, 399 .open = exynos_drm_open, 400 .preclose = exynos_drm_preclose, 401 .lastclose = exynos_drm_lastclose, 402 .postclose = exynos_drm_postclose, 403 .set_busid = drm_platform_set_busid, 404 .get_vblank_counter = drm_vblank_no_hw_counter, 405 .enable_vblank = exynos_drm_crtc_enable_vblank, 406 .disable_vblank = exynos_drm_crtc_disable_vblank, 407 .gem_free_object = exynos_drm_gem_free_object, 408 .gem_vm_ops = &exynos_drm_gem_vm_ops, 409 .dumb_create = exynos_drm_gem_dumb_create, 410 .dumb_map_offset = exynos_drm_gem_dumb_map_offset, 411 .dumb_destroy = drm_gem_dumb_destroy, 412 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 413 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 414 .gem_prime_export = drm_gem_prime_export, 415 .gem_prime_import = drm_gem_prime_import, 416 .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table, 417 .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, 418 .gem_prime_vmap = exynos_drm_gem_prime_vmap, 419 .gem_prime_vunmap = exynos_drm_gem_prime_vunmap, 420 .ioctls = exynos_ioctls, 421 .num_ioctls = ARRAY_SIZE(exynos_ioctls), 422 .fops = &exynos_drm_driver_fops, 423 .name = DRIVER_NAME, 424 .desc = DRIVER_DESC, 425 .date = DRIVER_DATE, 426 .major = DRIVER_MAJOR, 427 .minor = DRIVER_MINOR, 428 }; 429 430 #ifdef CONFIG_PM_SLEEP 431 static int exynos_drm_suspend(struct device *dev) 432 { 433 struct drm_device *drm_dev = dev_get_drvdata(dev); 434 struct drm_connector *connector; 435 436 if (pm_runtime_suspended(dev) || !drm_dev) 437 return 0; 438 439 drm_modeset_lock_all(drm_dev); 440 drm_for_each_connector(connector, drm_dev) { 441 int old_dpms = connector->dpms; 442 443 if (connector->funcs->dpms) 444 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 445 446 /* Set the old mode back to the connector for resume */ 447 connector->dpms = old_dpms; 448 } 449 drm_modeset_unlock_all(drm_dev); 450 451 return 0; 452 } 453 454 static int exynos_drm_resume(struct device *dev) 455 { 456 struct drm_device *drm_dev = dev_get_drvdata(dev); 457 struct drm_connector *connector; 458 459 if (pm_runtime_suspended(dev) || !drm_dev) 460 return 0; 461 462 drm_modeset_lock_all(drm_dev); 463 drm_for_each_connector(connector, drm_dev) { 464 if (connector->funcs->dpms) { 465 int dpms = connector->dpms; 466 467 connector->dpms = DRM_MODE_DPMS_OFF; 468 connector->funcs->dpms(connector, dpms); 469 } 470 } 471 drm_modeset_unlock_all(drm_dev); 472 473 return 0; 474 } 475 #endif 476 477 static const struct dev_pm_ops exynos_drm_pm_ops = { 478 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume) 479 }; 480 481 /* forward declaration */ 482 static struct platform_driver exynos_drm_platform_driver; 483 484 /* 485 * Connector drivers should not be placed before associated crtc drivers, 486 * because connector requires pipe number of its crtc during initialization. 487 */ 488 static struct platform_driver *const exynos_drm_kms_drivers[] = { 489 #ifdef CONFIG_DRM_EXYNOS_FIMD 490 &fimd_driver, 491 #endif 492 #ifdef CONFIG_DRM_EXYNOS5433_DECON 493 &exynos5433_decon_driver, 494 #endif 495 #ifdef CONFIG_DRM_EXYNOS7_DECON 496 &decon_driver, 497 #endif 498 #ifdef CONFIG_DRM_EXYNOS_MIC 499 &mic_driver, 500 #endif 501 #ifdef CONFIG_DRM_EXYNOS_DP 502 &dp_driver, 503 #endif 504 #ifdef CONFIG_DRM_EXYNOS_DSI 505 &dsi_driver, 506 #endif 507 #ifdef CONFIG_DRM_EXYNOS_MIXER 508 &mixer_driver, 509 #endif 510 #ifdef CONFIG_DRM_EXYNOS_HDMI 511 &hdmi_driver, 512 #endif 513 #ifdef CONFIG_DRM_EXYNOS_VIDI 514 &vidi_driver, 515 #endif 516 }; 517 518 static struct platform_driver *const exynos_drm_non_kms_drivers[] = { 519 #ifdef CONFIG_DRM_EXYNOS_G2D 520 &g2d_driver, 521 #endif 522 #ifdef CONFIG_DRM_EXYNOS_FIMC 523 &fimc_driver, 524 #endif 525 #ifdef CONFIG_DRM_EXYNOS_ROTATOR 526 &rotator_driver, 527 #endif 528 #ifdef CONFIG_DRM_EXYNOS_GSC 529 &gsc_driver, 530 #endif 531 #ifdef CONFIG_DRM_EXYNOS_IPP 532 &ipp_driver, 533 #endif 534 &exynos_drm_platform_driver, 535 }; 536 537 static struct platform_driver *const exynos_drm_drv_with_simple_dev[] = { 538 #ifdef CONFIG_DRM_EXYNOS_VIDI 539 &vidi_driver, 540 #endif 541 #ifdef CONFIG_DRM_EXYNOS_IPP 542 &ipp_driver, 543 #endif 544 &exynos_drm_platform_driver, 545 }; 546 #define PDEV_COUNT ARRAY_SIZE(exynos_drm_drv_with_simple_dev) 547 548 static int compare_dev(struct device *dev, void *data) 549 { 550 return dev == (struct device *)data; 551 } 552 553 static struct component_match *exynos_drm_match_add(struct device *dev) 554 { 555 struct component_match *match = NULL; 556 int i; 557 558 for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) { 559 struct device_driver *drv = &exynos_drm_kms_drivers[i]->driver; 560 struct device *p = NULL, *d; 561 562 while ((d = bus_find_device(&platform_bus_type, p, drv, 563 (void *)platform_bus_type.match))) { 564 put_device(p); 565 component_match_add(dev, &match, compare_dev, d); 566 p = d; 567 } 568 put_device(p); 569 } 570 571 return match ?: ERR_PTR(-ENODEV); 572 } 573 574 static int exynos_drm_bind(struct device *dev) 575 { 576 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev)); 577 } 578 579 static void exynos_drm_unbind(struct device *dev) 580 { 581 drm_put_dev(dev_get_drvdata(dev)); 582 } 583 584 static const struct component_master_ops exynos_drm_ops = { 585 .bind = exynos_drm_bind, 586 .unbind = exynos_drm_unbind, 587 }; 588 589 static int exynos_drm_platform_probe(struct platform_device *pdev) 590 { 591 struct component_match *match; 592 593 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 594 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); 595 596 match = exynos_drm_match_add(&pdev->dev); 597 if (IS_ERR(match)) 598 return PTR_ERR(match); 599 600 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops, 601 match); 602 } 603 604 static int exynos_drm_platform_remove(struct platform_device *pdev) 605 { 606 component_master_del(&pdev->dev, &exynos_drm_ops); 607 return 0; 608 } 609 610 static struct platform_driver exynos_drm_platform_driver = { 611 .probe = exynos_drm_platform_probe, 612 .remove = exynos_drm_platform_remove, 613 .driver = { 614 .name = "exynos-drm", 615 .pm = &exynos_drm_pm_ops, 616 }, 617 }; 618 619 static struct platform_device *exynos_drm_pdevs[PDEV_COUNT]; 620 621 static void exynos_drm_unregister_devices(void) 622 { 623 int i = PDEV_COUNT; 624 625 while (--i >= 0) { 626 platform_device_unregister(exynos_drm_pdevs[i]); 627 exynos_drm_pdevs[i] = NULL; 628 } 629 } 630 631 static int exynos_drm_register_devices(void) 632 { 633 int i; 634 635 for (i = 0; i < PDEV_COUNT; ++i) { 636 struct platform_driver *d = exynos_drm_drv_with_simple_dev[i]; 637 struct platform_device *pdev = 638 platform_device_register_simple(d->driver.name, -1, 639 NULL, 0); 640 641 if (!IS_ERR(pdev)) { 642 exynos_drm_pdevs[i] = pdev; 643 continue; 644 } 645 while (--i >= 0) { 646 platform_device_unregister(exynos_drm_pdevs[i]); 647 exynos_drm_pdevs[i] = NULL; 648 } 649 650 return PTR_ERR(pdev); 651 } 652 653 return 0; 654 } 655 656 static void exynos_drm_unregister_drivers(struct platform_driver * const *drv, 657 int count) 658 { 659 while (--count >= 0) 660 platform_driver_unregister(drv[count]); 661 } 662 663 static int exynos_drm_register_drivers(struct platform_driver * const *drv, 664 int count) 665 { 666 int i, ret; 667 668 for (i = 0; i < count; ++i) { 669 ret = platform_driver_register(drv[i]); 670 if (!ret) 671 continue; 672 673 while (--i >= 0) 674 platform_driver_unregister(drv[i]); 675 676 return ret; 677 } 678 679 return 0; 680 } 681 682 static inline int exynos_drm_register_kms_drivers(void) 683 { 684 return exynos_drm_register_drivers(exynos_drm_kms_drivers, 685 ARRAY_SIZE(exynos_drm_kms_drivers)); 686 } 687 688 static inline int exynos_drm_register_non_kms_drivers(void) 689 { 690 return exynos_drm_register_drivers(exynos_drm_non_kms_drivers, 691 ARRAY_SIZE(exynos_drm_non_kms_drivers)); 692 } 693 694 static inline void exynos_drm_unregister_kms_drivers(void) 695 { 696 exynos_drm_unregister_drivers(exynos_drm_kms_drivers, 697 ARRAY_SIZE(exynos_drm_kms_drivers)); 698 } 699 700 static inline void exynos_drm_unregister_non_kms_drivers(void) 701 { 702 exynos_drm_unregister_drivers(exynos_drm_non_kms_drivers, 703 ARRAY_SIZE(exynos_drm_non_kms_drivers)); 704 } 705 706 static int exynos_drm_init(void) 707 { 708 int ret; 709 710 ret = exynos_drm_register_devices(); 711 if (ret) 712 return ret; 713 714 ret = exynos_drm_register_kms_drivers(); 715 if (ret) 716 goto err_unregister_pdevs; 717 718 ret = exynos_drm_register_non_kms_drivers(); 719 if (ret) 720 goto err_unregister_kms_drivers; 721 722 return 0; 723 724 err_unregister_kms_drivers: 725 exynos_drm_unregister_kms_drivers(); 726 727 err_unregister_pdevs: 728 exynos_drm_unregister_devices(); 729 730 return ret; 731 } 732 733 static void exynos_drm_exit(void) 734 { 735 exynos_drm_unregister_non_kms_drivers(); 736 exynos_drm_unregister_kms_drivers(); 737 exynos_drm_unregister_devices(); 738 } 739 740 module_init(exynos_drm_init); 741 module_exit(exynos_drm_exit); 742 743 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 744 MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); 745 MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); 746 MODULE_DESCRIPTION("Samsung SoC DRM Driver"); 747 MODULE_LICENSE("GPL"); 748