1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <linux/acpi.h> 31 #include <linux/device.h> 32 #include <linux/module.h> 33 #include <linux/oom.h> 34 #include <linux/pci.h> 35 #include <linux/pm.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/slab.h> 38 #include <linux/string_helpers.h> 39 #include <linux/vga_switcheroo.h> 40 #include <linux/vt.h> 41 42 #include <drm/drm_aperture.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_ioctl.h> 45 #include <drm/drm_managed.h> 46 #include <drm/drm_probe_helper.h> 47 48 #include "display/intel_acpi.h" 49 #include "display/intel_bw.h" 50 #include "display/intel_cdclk.h" 51 #include "display/intel_display_types.h" 52 #include "display/intel_dmc.h" 53 #include "display/intel_dp.h" 54 #include "display/intel_dpt.h" 55 #include "display/intel_fbdev.h" 56 #include "display/intel_hotplug.h" 57 #include "display/intel_overlay.h" 58 #include "display/intel_pch_refclk.h" 59 #include "display/intel_pipe_crc.h" 60 #include "display/intel_pps.h" 61 #include "display/intel_sprite.h" 62 #include "display/intel_vga.h" 63 #include "display/skl_watermark.h" 64 65 #include "gem/i915_gem_context.h" 66 #include "gem/i915_gem_create.h" 67 #include "gem/i915_gem_dmabuf.h" 68 #include "gem/i915_gem_ioctls.h" 69 #include "gem/i915_gem_mman.h" 70 #include "gem/i915_gem_pm.h" 71 #include "gt/intel_gt.h" 72 #include "gt/intel_gt_pm.h" 73 #include "gt/intel_rc6.h" 74 75 #include "pxp/intel_pxp.h" 76 #include "pxp/intel_pxp_debugfs.h" 77 #include "pxp/intel_pxp_pm.h" 78 79 #include "soc/intel_dram.h" 80 #include "soc/intel_gmch.h" 81 82 #include "i915_file_private.h" 83 #include "i915_debugfs.h" 84 #include "i915_driver.h" 85 #include "i915_drm_client.h" 86 #include "i915_drv.h" 87 #include "i915_getparam.h" 88 #include "i915_hwmon.h" 89 #include "i915_ioc32.h" 90 #include "i915_ioctl.h" 91 #include "i915_irq.h" 92 #include "i915_memcpy.h" 93 #include "i915_perf.h" 94 #include "i915_query.h" 95 #include "i915_suspend.h" 96 #include "i915_switcheroo.h" 97 #include "i915_sysfs.h" 98 #include "i915_utils.h" 99 #include "i915_vgpu.h" 100 #include "intel_gvt.h" 101 #include "intel_memory_region.h" 102 #include "intel_pci_config.h" 103 #include "intel_pcode.h" 104 #include "intel_pm.h" 105 #include "intel_region_ttm.h" 106 #include "vlv_suspend.h" 107 108 static const struct drm_driver i915_drm_driver; 109 110 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 111 { 112 /* 113 * The i915 workqueue is primarily used for batched retirement of 114 * requests (and thus managing bo) once the task has been completed 115 * by the GPU. i915_retire_requests() is called directly when we 116 * need high-priority retirement, such as waiting for an explicit 117 * bo. 118 * 119 * It is also used for periodic low-priority events, such as 120 * idle-timers and recording error state. 121 * 122 * All tasks on the workqueue are expected to acquire the dev mutex 123 * so there is no point in running more than one instance of the 124 * workqueue at any time. Use an ordered one. 125 */ 126 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 127 if (dev_priv->wq == NULL) 128 goto out_err; 129 130 dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 131 if (dev_priv->display.hotplug.dp_wq == NULL) 132 goto out_free_wq; 133 134 return 0; 135 136 out_free_wq: 137 destroy_workqueue(dev_priv->wq); 138 out_err: 139 drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); 140 141 return -ENOMEM; 142 } 143 144 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 145 { 146 destroy_workqueue(dev_priv->display.hotplug.dp_wq); 147 destroy_workqueue(dev_priv->wq); 148 } 149 150 /* 151 * We don't keep the workarounds for pre-production hardware, so we expect our 152 * driver to fail on these machines in one way or another. A little warning on 153 * dmesg may help both the user and the bug triagers. 154 * 155 * Our policy for removing pre-production workarounds is to keep the 156 * current gen workarounds as a guide to the bring-up of the next gen 157 * (workarounds have a habit of persisting!). Anything older than that 158 * should be removed along with the complications they introduce. 159 */ 160 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 161 { 162 bool pre = false; 163 164 pre |= IS_HSW_EARLY_SDV(dev_priv); 165 pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6; 166 pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA; 167 pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 168 pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; 169 pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; 170 pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 171 pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 172 173 if (pre) { 174 drm_err(&dev_priv->drm, "This is a pre-production stepping. " 175 "It may not be fully functional.\n"); 176 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); 177 } 178 } 179 180 static void sanitize_gpu(struct drm_i915_private *i915) 181 { 182 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { 183 struct intel_gt *gt; 184 unsigned int i; 185 186 for_each_gt(gt, i915, i) 187 __intel_gt_reset(gt, ALL_ENGINES); 188 } 189 } 190 191 /** 192 * i915_driver_early_probe - setup state not requiring device access 193 * @dev_priv: device private 194 * 195 * Initialize everything that is a "SW-only" state, that is state not 196 * requiring accessing the device or exposing the driver via kernel internal 197 * or userspace interfaces. Example steps belonging here: lock initialization, 198 * system memory allocation, setting up device specific attributes and 199 * function hooks not requiring accessing the device. 200 */ 201 static int i915_driver_early_probe(struct drm_i915_private *dev_priv) 202 { 203 int ret = 0; 204 205 if (i915_inject_probe_failure(dev_priv)) 206 return -ENODEV; 207 208 intel_device_info_runtime_init_early(dev_priv); 209 210 intel_step_init(dev_priv); 211 212 intel_uncore_mmio_debug_init_early(dev_priv); 213 214 spin_lock_init(&dev_priv->irq_lock); 215 spin_lock_init(&dev_priv->gpu_error.lock); 216 mutex_init(&dev_priv->display.backlight.lock); 217 218 mutex_init(&dev_priv->sb_lock); 219 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 220 221 mutex_init(&dev_priv->display.audio.mutex); 222 mutex_init(&dev_priv->display.wm.wm_mutex); 223 mutex_init(&dev_priv->display.pps.mutex); 224 mutex_init(&dev_priv->display.hdcp.comp_mutex); 225 spin_lock_init(&dev_priv->display.dkl.phy_lock); 226 227 i915_memcpy_init_early(dev_priv); 228 intel_runtime_pm_init_early(&dev_priv->runtime_pm); 229 230 ret = i915_workqueues_init(dev_priv); 231 if (ret < 0) 232 return ret; 233 234 ret = vlv_suspend_init(dev_priv); 235 if (ret < 0) 236 goto err_workqueues; 237 238 ret = intel_region_ttm_device_init(dev_priv); 239 if (ret) 240 goto err_ttm; 241 242 ret = intel_root_gt_init_early(dev_priv); 243 if (ret < 0) 244 goto err_rootgt; 245 246 i915_drm_clients_init(&dev_priv->clients, dev_priv); 247 248 i915_gem_init_early(dev_priv); 249 250 /* This must be called before any calls to HAS_PCH_* */ 251 intel_detect_pch(dev_priv); 252 253 intel_irq_init(dev_priv); 254 intel_init_display_hooks(dev_priv); 255 intel_init_clock_gating_hooks(dev_priv); 256 257 intel_detect_preproduction_hw(dev_priv); 258 259 return 0; 260 261 err_rootgt: 262 intel_region_ttm_device_fini(dev_priv); 263 err_ttm: 264 vlv_suspend_cleanup(dev_priv); 265 err_workqueues: 266 i915_workqueues_cleanup(dev_priv); 267 return ret; 268 } 269 270 /** 271 * i915_driver_late_release - cleanup the setup done in 272 * i915_driver_early_probe() 273 * @dev_priv: device private 274 */ 275 static void i915_driver_late_release(struct drm_i915_private *dev_priv) 276 { 277 intel_irq_fini(dev_priv); 278 intel_power_domains_cleanup(dev_priv); 279 i915_gem_cleanup_early(dev_priv); 280 intel_gt_driver_late_release_all(dev_priv); 281 i915_drm_clients_fini(&dev_priv->clients); 282 intel_region_ttm_device_fini(dev_priv); 283 vlv_suspend_cleanup(dev_priv); 284 i915_workqueues_cleanup(dev_priv); 285 286 cpu_latency_qos_remove_request(&dev_priv->sb_qos); 287 mutex_destroy(&dev_priv->sb_lock); 288 289 i915_params_free(&dev_priv->params); 290 } 291 292 /** 293 * i915_driver_mmio_probe - setup device MMIO 294 * @dev_priv: device private 295 * 296 * Setup minimal device state necessary for MMIO accesses later in the 297 * initialization sequence. The setup here should avoid any other device-wide 298 * side effects or exposing the driver via kernel internal or user space 299 * interfaces. 300 */ 301 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) 302 { 303 struct intel_gt *gt; 304 int ret, i; 305 306 if (i915_inject_probe_failure(dev_priv)) 307 return -ENODEV; 308 309 ret = intel_gmch_bridge_setup(dev_priv); 310 if (ret < 0) 311 return ret; 312 313 for_each_gt(gt, dev_priv, i) { 314 ret = intel_uncore_init_mmio(gt->uncore); 315 if (ret) 316 return ret; 317 318 ret = drmm_add_action_or_reset(&dev_priv->drm, 319 intel_uncore_fini_mmio, 320 gt->uncore); 321 if (ret) 322 return ret; 323 } 324 325 /* Try to make sure MCHBAR is enabled before poking at it */ 326 intel_gmch_bar_setup(dev_priv); 327 intel_device_info_runtime_init(dev_priv); 328 329 for_each_gt(gt, dev_priv, i) { 330 ret = intel_gt_init_mmio(gt); 331 if (ret) 332 goto err_uncore; 333 } 334 335 /* As early as possible, scrub existing GPU state before clobbering */ 336 sanitize_gpu(dev_priv); 337 338 return 0; 339 340 err_uncore: 341 intel_gmch_bar_teardown(dev_priv); 342 343 return ret; 344 } 345 346 /** 347 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() 348 * @dev_priv: device private 349 */ 350 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) 351 { 352 intel_gmch_bar_teardown(dev_priv); 353 } 354 355 /** 356 * i915_set_dma_info - set all relevant PCI dma info as configured for the 357 * platform 358 * @i915: valid i915 instance 359 * 360 * Set the dma max segment size, device and coherent masks. The dma mask set 361 * needs to occur before i915_ggtt_probe_hw. 362 * 363 * A couple of platforms have special needs. Address them as well. 364 * 365 */ 366 static int i915_set_dma_info(struct drm_i915_private *i915) 367 { 368 unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size; 369 int ret; 370 371 GEM_BUG_ON(!mask_size); 372 373 /* 374 * We don't have a max segment size, so set it to the max so sg's 375 * debugging layer doesn't complain 376 */ 377 dma_set_max_seg_size(i915->drm.dev, UINT_MAX); 378 379 ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 380 if (ret) 381 goto mask_err; 382 383 /* overlay on gen2 is broken and can't address above 1G */ 384 if (GRAPHICS_VER(i915) == 2) 385 mask_size = 30; 386 387 /* 388 * 965GM sometimes incorrectly writes to hardware status page (HWS) 389 * using 32bit addressing, overwriting memory if HWS is located 390 * above 4GB. 391 * 392 * The documentation also mentions an issue with undefined 393 * behaviour if any general state is accessed within a page above 4GB, 394 * which also needs to be handled carefully. 395 */ 396 if (IS_I965G(i915) || IS_I965GM(i915)) 397 mask_size = 32; 398 399 ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size)); 400 if (ret) 401 goto mask_err; 402 403 return 0; 404 405 mask_err: 406 drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret); 407 return ret; 408 } 409 410 static int i915_pcode_init(struct drm_i915_private *i915) 411 { 412 struct intel_gt *gt; 413 int id, ret; 414 415 for_each_gt(gt, i915, id) { 416 ret = intel_pcode_init(gt->uncore); 417 if (ret) { 418 drm_err(>->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret); 419 return ret; 420 } 421 } 422 423 return 0; 424 } 425 426 /** 427 * i915_driver_hw_probe - setup state requiring device access 428 * @dev_priv: device private 429 * 430 * Setup state that requires accessing the device, but doesn't require 431 * exposing the driver via kernel internal or userspace interfaces. 432 */ 433 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) 434 { 435 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 436 struct pci_dev *root_pdev; 437 int ret; 438 439 if (i915_inject_probe_failure(dev_priv)) 440 return -ENODEV; 441 442 if (HAS_PPGTT(dev_priv)) { 443 if (intel_vgpu_active(dev_priv) && 444 !intel_vgpu_has_full_ppgtt(dev_priv)) { 445 i915_report_error(dev_priv, 446 "incompatible vGPU found, support for isolated ppGTT required\n"); 447 return -ENXIO; 448 } 449 } 450 451 if (HAS_EXECLISTS(dev_priv)) { 452 /* 453 * Older GVT emulation depends upon intercepting CSB mmio, 454 * which we no longer use, preferring to use the HWSP cache 455 * instead. 456 */ 457 if (intel_vgpu_active(dev_priv) && 458 !intel_vgpu_has_hwsp_emulation(dev_priv)) { 459 i915_report_error(dev_priv, 460 "old vGPU host found, support for HWSP emulation required\n"); 461 return -ENXIO; 462 } 463 } 464 465 /* needs to be done before ggtt probe */ 466 intel_dram_edram_detect(dev_priv); 467 468 ret = i915_set_dma_info(dev_priv); 469 if (ret) 470 return ret; 471 472 i915_perf_init(dev_priv); 473 474 ret = i915_ggtt_probe_hw(dev_priv); 475 if (ret) 476 goto err_perf; 477 478 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver); 479 if (ret) 480 goto err_ggtt; 481 482 ret = i915_ggtt_init_hw(dev_priv); 483 if (ret) 484 goto err_ggtt; 485 486 /* 487 * Make sure we probe lmem before we probe stolen-lmem. The BAR size 488 * might be different due to bar resizing. 489 */ 490 ret = intel_gt_tiles_init(dev_priv); 491 if (ret) 492 goto err_ggtt; 493 494 ret = intel_memory_regions_hw_probe(dev_priv); 495 if (ret) 496 goto err_ggtt; 497 498 ret = i915_ggtt_enable_hw(dev_priv); 499 if (ret) { 500 drm_err(&dev_priv->drm, "failed to enable GGTT\n"); 501 goto err_mem_regions; 502 } 503 504 pci_set_master(pdev); 505 506 /* On the 945G/GM, the chipset reports the MSI capability on the 507 * integrated graphics even though the support isn't actually there 508 * according to the published specs. It doesn't appear to function 509 * correctly in testing on 945G. 510 * This may be a side effect of MSI having been made available for PEG 511 * and the registers being closely associated. 512 * 513 * According to chipset errata, on the 965GM, MSI interrupts may 514 * be lost or delayed, and was defeatured. MSI interrupts seem to 515 * get lost on g4x as well, and interrupt delivery seems to stay 516 * properly dead afterwards. So we'll just disable them for all 517 * pre-gen5 chipsets. 518 * 519 * dp aux and gmbus irq on gen4 seems to be able to generate legacy 520 * interrupts even when in MSI mode. This results in spurious 521 * interrupt warnings if the legacy irq no. is shared with another 522 * device. The kernel then disables that interrupt source and so 523 * prevents the other device from working properly. 524 */ 525 if (GRAPHICS_VER(dev_priv) >= 5) { 526 if (pci_enable_msi(pdev) < 0) 527 drm_dbg(&dev_priv->drm, "can't enable MSI"); 528 } 529 530 ret = intel_gvt_init(dev_priv); 531 if (ret) 532 goto err_msi; 533 534 intel_opregion_setup(dev_priv); 535 536 ret = i915_pcode_init(dev_priv); 537 if (ret) 538 goto err_opregion; 539 540 /* 541 * Fill the dram structure to get the system dram info. This will be 542 * used for memory latency calculation. 543 */ 544 intel_dram_detect(dev_priv); 545 546 intel_bw_init_hw(dev_priv); 547 548 /* 549 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 550 * This should be totally removed when we handle the pci states properly 551 * on runtime PM and on s2idle cases. 552 */ 553 root_pdev = pcie_find_root_port(pdev); 554 if (root_pdev) 555 pci_d3cold_disable(root_pdev); 556 557 return 0; 558 559 err_opregion: 560 intel_opregion_cleanup(dev_priv); 561 err_msi: 562 if (pdev->msi_enabled) 563 pci_disable_msi(pdev); 564 err_mem_regions: 565 intel_memory_regions_driver_release(dev_priv); 566 err_ggtt: 567 i915_ggtt_driver_release(dev_priv); 568 i915_gem_drain_freed_objects(dev_priv); 569 i915_ggtt_driver_late_release(dev_priv); 570 err_perf: 571 i915_perf_fini(dev_priv); 572 return ret; 573 } 574 575 /** 576 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() 577 * @dev_priv: device private 578 */ 579 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) 580 { 581 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 582 struct pci_dev *root_pdev; 583 584 i915_perf_fini(dev_priv); 585 586 intel_opregion_cleanup(dev_priv); 587 588 if (pdev->msi_enabled) 589 pci_disable_msi(pdev); 590 591 root_pdev = pcie_find_root_port(pdev); 592 if (root_pdev) 593 pci_d3cold_enable(root_pdev); 594 } 595 596 /** 597 * i915_driver_register - register the driver with the rest of the system 598 * @dev_priv: device private 599 * 600 * Perform any steps necessary to make the driver available via kernel 601 * internal or userspace interfaces. 602 */ 603 static void i915_driver_register(struct drm_i915_private *dev_priv) 604 { 605 struct intel_gt *gt; 606 unsigned int i; 607 608 i915_gem_driver_register(dev_priv); 609 i915_pmu_register(dev_priv); 610 611 intel_vgpu_register(dev_priv); 612 613 /* Reveal our presence to userspace */ 614 if (drm_dev_register(&dev_priv->drm, 0)) { 615 drm_err(&dev_priv->drm, 616 "Failed to register driver for userspace access!\n"); 617 return; 618 } 619 620 i915_debugfs_register(dev_priv); 621 i915_setup_sysfs(dev_priv); 622 623 /* Depends on sysfs having been initialized */ 624 i915_perf_register(dev_priv); 625 626 for_each_gt(gt, dev_priv, i) 627 intel_gt_driver_register(gt); 628 629 intel_pxp_debugfs_register(dev_priv->pxp); 630 631 i915_hwmon_register(dev_priv); 632 633 intel_display_driver_register(dev_priv); 634 635 intel_power_domains_enable(dev_priv); 636 intel_runtime_pm_enable(&dev_priv->runtime_pm); 637 638 intel_register_dsm_handler(); 639 640 if (i915_switcheroo_register(dev_priv)) 641 drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); 642 } 643 644 /** 645 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 646 * @dev_priv: device private 647 */ 648 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 649 { 650 struct intel_gt *gt; 651 unsigned int i; 652 653 i915_switcheroo_unregister(dev_priv); 654 655 intel_unregister_dsm_handler(); 656 657 intel_runtime_pm_disable(&dev_priv->runtime_pm); 658 intel_power_domains_disable(dev_priv); 659 660 intel_display_driver_unregister(dev_priv); 661 662 intel_pxp_fini(dev_priv); 663 664 for_each_gt(gt, dev_priv, i) 665 intel_gt_driver_unregister(gt); 666 667 i915_hwmon_unregister(dev_priv); 668 669 i915_perf_unregister(dev_priv); 670 i915_pmu_unregister(dev_priv); 671 672 i915_teardown_sysfs(dev_priv); 673 drm_dev_unplug(&dev_priv->drm); 674 675 i915_gem_driver_unregister(dev_priv); 676 } 677 678 void 679 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) 680 { 681 drm_printf(p, "iommu: %s\n", 682 str_enabled_disabled(i915_vtd_active(i915))); 683 } 684 685 static void i915_welcome_messages(struct drm_i915_private *dev_priv) 686 { 687 if (drm_debug_enabled(DRM_UT_DRIVER)) { 688 struct drm_printer p = drm_debug_printer("i915 device info:"); 689 struct intel_gt *gt; 690 unsigned int i; 691 692 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", 693 INTEL_DEVID(dev_priv), 694 INTEL_REVID(dev_priv), 695 intel_platform_name(INTEL_INFO(dev_priv)->platform), 696 intel_subplatform(RUNTIME_INFO(dev_priv), 697 INTEL_INFO(dev_priv)->platform), 698 GRAPHICS_VER(dev_priv)); 699 700 intel_device_info_print(INTEL_INFO(dev_priv), 701 RUNTIME_INFO(dev_priv), &p); 702 i915_print_iommu_status(dev_priv, &p); 703 for_each_gt(gt, dev_priv, i) 704 intel_gt_info_print(>->info, &p); 705 } 706 707 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 708 drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); 709 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 710 drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); 711 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) 712 drm_info(&dev_priv->drm, 713 "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); 714 } 715 716 static struct drm_i915_private * 717 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) 718 { 719 const struct intel_device_info *match_info = 720 (struct intel_device_info *)ent->driver_data; 721 struct intel_device_info *device_info; 722 struct intel_runtime_info *runtime; 723 struct drm_i915_private *i915; 724 725 i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, 726 struct drm_i915_private, drm); 727 if (IS_ERR(i915)) 728 return i915; 729 730 pci_set_drvdata(pdev, i915); 731 732 /* Device parameters start as a copy of module parameters. */ 733 i915_params_copy(&i915->params, &i915_modparams); 734 735 /* Setup the write-once "constant" device info */ 736 device_info = mkwrite_device_info(i915); 737 memcpy(device_info, match_info, sizeof(*device_info)); 738 739 /* Initialize initial runtime info from static const data and pdev. */ 740 runtime = RUNTIME_INFO(i915); 741 memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); 742 runtime->device_id = pdev->device; 743 744 return i915; 745 } 746 747 /** 748 * i915_driver_probe - setup chip and create an initial config 749 * @pdev: PCI device 750 * @ent: matching PCI ID entry 751 * 752 * The driver probe routine has to do several things: 753 * - drive output discovery via intel_modeset_init() 754 * - initialize the memory manager 755 * - allocate initial config memory 756 * - setup the DRM framebuffer with the allocated memory 757 */ 758 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 759 { 760 struct drm_i915_private *i915; 761 int ret; 762 763 i915 = i915_driver_create(pdev, ent); 764 if (IS_ERR(i915)) 765 return PTR_ERR(i915); 766 767 ret = pci_enable_device(pdev); 768 if (ret) 769 goto out_fini; 770 771 ret = i915_driver_early_probe(i915); 772 if (ret < 0) 773 goto out_pci_disable; 774 775 disable_rpm_wakeref_asserts(&i915->runtime_pm); 776 777 intel_vgpu_detect(i915); 778 779 ret = intel_gt_probe_all(i915); 780 if (ret < 0) 781 goto out_runtime_pm_put; 782 783 ret = i915_driver_mmio_probe(i915); 784 if (ret < 0) 785 goto out_tiles_cleanup; 786 787 ret = i915_driver_hw_probe(i915); 788 if (ret < 0) 789 goto out_cleanup_mmio; 790 791 ret = intel_modeset_init_noirq(i915); 792 if (ret < 0) 793 goto out_cleanup_hw; 794 795 ret = intel_irq_install(i915); 796 if (ret) 797 goto out_cleanup_modeset; 798 799 ret = intel_modeset_init_nogem(i915); 800 if (ret) 801 goto out_cleanup_irq; 802 803 ret = i915_gem_init(i915); 804 if (ret) 805 goto out_cleanup_modeset2; 806 807 intel_pxp_init(i915); 808 809 ret = intel_modeset_init(i915); 810 if (ret) 811 goto out_cleanup_gem; 812 813 i915_driver_register(i915); 814 815 enable_rpm_wakeref_asserts(&i915->runtime_pm); 816 817 i915_welcome_messages(i915); 818 819 i915->do_release = true; 820 821 return 0; 822 823 out_cleanup_gem: 824 i915_gem_suspend(i915); 825 i915_gem_driver_remove(i915); 826 i915_gem_driver_release(i915); 827 out_cleanup_modeset2: 828 /* FIXME clean up the error path */ 829 intel_modeset_driver_remove(i915); 830 intel_irq_uninstall(i915); 831 intel_modeset_driver_remove_noirq(i915); 832 goto out_cleanup_modeset; 833 out_cleanup_irq: 834 intel_irq_uninstall(i915); 835 out_cleanup_modeset: 836 intel_modeset_driver_remove_nogem(i915); 837 out_cleanup_hw: 838 i915_driver_hw_remove(i915); 839 intel_memory_regions_driver_release(i915); 840 i915_ggtt_driver_release(i915); 841 i915_gem_drain_freed_objects(i915); 842 i915_ggtt_driver_late_release(i915); 843 out_cleanup_mmio: 844 i915_driver_mmio_release(i915); 845 out_tiles_cleanup: 846 intel_gt_release_all(i915); 847 out_runtime_pm_put: 848 enable_rpm_wakeref_asserts(&i915->runtime_pm); 849 i915_driver_late_release(i915); 850 out_pci_disable: 851 pci_disable_device(pdev); 852 out_fini: 853 i915_probe_error(i915, "Device initialization failed (%d)\n", ret); 854 return ret; 855 } 856 857 void i915_driver_remove(struct drm_i915_private *i915) 858 { 859 intel_wakeref_t wakeref; 860 861 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 862 863 i915_driver_unregister(i915); 864 865 /* Flush any external code that still may be under the RCU lock */ 866 synchronize_rcu(); 867 868 i915_gem_suspend(i915); 869 870 intel_gvt_driver_remove(i915); 871 872 intel_modeset_driver_remove(i915); 873 874 intel_irq_uninstall(i915); 875 876 intel_modeset_driver_remove_noirq(i915); 877 878 i915_reset_error_state(i915); 879 i915_gem_driver_remove(i915); 880 881 intel_modeset_driver_remove_nogem(i915); 882 883 i915_driver_hw_remove(i915); 884 885 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 886 } 887 888 static void i915_driver_release(struct drm_device *dev) 889 { 890 struct drm_i915_private *dev_priv = to_i915(dev); 891 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 892 intel_wakeref_t wakeref; 893 894 if (!dev_priv->do_release) 895 return; 896 897 wakeref = intel_runtime_pm_get(rpm); 898 899 i915_gem_driver_release(dev_priv); 900 901 intel_memory_regions_driver_release(dev_priv); 902 i915_ggtt_driver_release(dev_priv); 903 i915_gem_drain_freed_objects(dev_priv); 904 i915_ggtt_driver_late_release(dev_priv); 905 906 i915_driver_mmio_release(dev_priv); 907 908 intel_runtime_pm_put(rpm, wakeref); 909 910 intel_runtime_pm_driver_release(rpm); 911 912 i915_driver_late_release(dev_priv); 913 } 914 915 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 916 { 917 struct drm_i915_private *i915 = to_i915(dev); 918 int ret; 919 920 ret = i915_gem_open(i915, file); 921 if (ret) 922 return ret; 923 924 return 0; 925 } 926 927 /** 928 * i915_driver_lastclose - clean up after all DRM clients have exited 929 * @dev: DRM device 930 * 931 * Take care of cleaning up after all DRM clients have exited. In the 932 * mode setting case, we want to restore the kernel's initial mode (just 933 * in case the last client left us in a bad state). 934 * 935 * Additionally, in the non-mode setting case, we'll tear down the GTT 936 * and DMA structures, since the kernel won't be using them, and clea 937 * up any GEM state. 938 */ 939 static void i915_driver_lastclose(struct drm_device *dev) 940 { 941 struct drm_i915_private *i915 = to_i915(dev); 942 943 intel_fbdev_restore_mode(i915); 944 945 vga_switcheroo_process_delayed_switch(); 946 } 947 948 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 949 { 950 struct drm_i915_file_private *file_priv = file->driver_priv; 951 952 i915_gem_context_close(file); 953 i915_drm_client_put(file_priv->client); 954 955 kfree_rcu(file_priv, rcu); 956 957 /* Catch up with all the deferred frees from "this" client */ 958 i915_gem_flush_free_objects(to_i915(dev)); 959 } 960 961 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 962 { 963 struct intel_encoder *encoder; 964 965 if (!HAS_DISPLAY(dev_priv)) 966 return; 967 968 drm_modeset_lock_all(&dev_priv->drm); 969 for_each_intel_encoder(&dev_priv->drm, encoder) 970 if (encoder->suspend) 971 encoder->suspend(encoder); 972 drm_modeset_unlock_all(&dev_priv->drm); 973 } 974 975 static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) 976 { 977 struct intel_encoder *encoder; 978 979 if (!HAS_DISPLAY(dev_priv)) 980 return; 981 982 drm_modeset_lock_all(&dev_priv->drm); 983 for_each_intel_encoder(&dev_priv->drm, encoder) 984 if (encoder->shutdown) 985 encoder->shutdown(encoder); 986 drm_modeset_unlock_all(&dev_priv->drm); 987 } 988 989 void i915_driver_shutdown(struct drm_i915_private *i915) 990 { 991 disable_rpm_wakeref_asserts(&i915->runtime_pm); 992 intel_runtime_pm_disable(&i915->runtime_pm); 993 intel_power_domains_disable(i915); 994 995 if (HAS_DISPLAY(i915)) { 996 drm_kms_helper_poll_disable(&i915->drm); 997 998 drm_atomic_helper_shutdown(&i915->drm); 999 } 1000 1001 intel_dp_mst_suspend(i915); 1002 1003 intel_runtime_pm_disable_interrupts(i915); 1004 intel_hpd_cancel_work(i915); 1005 1006 intel_suspend_encoders(i915); 1007 intel_shutdown_encoders(i915); 1008 1009 intel_dmc_suspend(i915); 1010 1011 i915_gem_suspend(i915); 1012 1013 /* 1014 * The only requirement is to reboot with display DC states disabled, 1015 * for now leaving all display power wells in the INIT power domain 1016 * enabled. 1017 * 1018 * TODO: 1019 * - unify the pci_driver::shutdown sequence here with the 1020 * pci_driver.driver.pm.poweroff,poweroff_late sequence. 1021 * - unify the driver remove and system/runtime suspend sequences with 1022 * the above unified shutdown/poweroff sequence. 1023 */ 1024 intel_power_domains_driver_remove(i915); 1025 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1026 1027 intel_runtime_pm_driver_release(&i915->runtime_pm); 1028 } 1029 1030 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1031 { 1032 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1033 if (acpi_target_system_state() < ACPI_STATE_S3) 1034 return true; 1035 #endif 1036 return false; 1037 } 1038 1039 static void i915_drm_complete(struct drm_device *dev) 1040 { 1041 struct drm_i915_private *i915 = to_i915(dev); 1042 1043 intel_pxp_resume_complete(i915->pxp); 1044 } 1045 1046 static int i915_drm_prepare(struct drm_device *dev) 1047 { 1048 struct drm_i915_private *i915 = to_i915(dev); 1049 1050 intel_pxp_suspend_prepare(i915->pxp); 1051 1052 /* 1053 * NB intel_display_suspend() may issue new requests after we've 1054 * ostensibly marked the GPU as ready-to-sleep here. We need to 1055 * split out that work and pull it forward so that after point, 1056 * the GPU is not woken again. 1057 */ 1058 return i915_gem_backup_suspend(i915); 1059 } 1060 1061 static int i915_drm_suspend(struct drm_device *dev) 1062 { 1063 struct drm_i915_private *dev_priv = to_i915(dev); 1064 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1065 pci_power_t opregion_target_state; 1066 1067 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1068 1069 /* We do a lot of poking in a lot of registers, make sure they work 1070 * properly. */ 1071 intel_power_domains_disable(dev_priv); 1072 if (HAS_DISPLAY(dev_priv)) 1073 drm_kms_helper_poll_disable(dev); 1074 1075 pci_save_state(pdev); 1076 1077 intel_display_suspend(dev); 1078 1079 intel_dp_mst_suspend(dev_priv); 1080 1081 intel_runtime_pm_disable_interrupts(dev_priv); 1082 intel_hpd_cancel_work(dev_priv); 1083 1084 intel_suspend_encoders(dev_priv); 1085 1086 /* Must be called before GGTT is suspended. */ 1087 intel_dpt_suspend(dev_priv); 1088 i915_ggtt_suspend(to_gt(dev_priv)->ggtt); 1089 1090 i915_save_display(dev_priv); 1091 1092 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1093 intel_opregion_suspend(dev_priv, opregion_target_state); 1094 1095 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1096 1097 dev_priv->suspend_count++; 1098 1099 intel_dmc_suspend(dev_priv); 1100 1101 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1102 1103 i915_gem_drain_freed_objects(dev_priv); 1104 1105 return 0; 1106 } 1107 1108 static enum i915_drm_suspend_mode 1109 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) 1110 { 1111 if (hibernate) 1112 return I915_DRM_SUSPEND_HIBERNATE; 1113 1114 if (suspend_to_idle(dev_priv)) 1115 return I915_DRM_SUSPEND_IDLE; 1116 1117 return I915_DRM_SUSPEND_MEM; 1118 } 1119 1120 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 1121 { 1122 struct drm_i915_private *dev_priv = to_i915(dev); 1123 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1124 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1125 struct intel_gt *gt; 1126 int ret, i; 1127 1128 disable_rpm_wakeref_asserts(rpm); 1129 1130 intel_pxp_suspend(dev_priv->pxp); 1131 1132 i915_gem_suspend_late(dev_priv); 1133 1134 for_each_gt(gt, dev_priv, i) 1135 intel_uncore_suspend(gt->uncore); 1136 1137 intel_power_domains_suspend(dev_priv, 1138 get_suspend_mode(dev_priv, hibernation)); 1139 1140 intel_display_power_suspend_late(dev_priv); 1141 1142 ret = vlv_suspend_complete(dev_priv); 1143 if (ret) { 1144 drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); 1145 intel_power_domains_resume(dev_priv); 1146 1147 goto out; 1148 } 1149 1150 pci_disable_device(pdev); 1151 /* 1152 * During hibernation on some platforms the BIOS may try to access 1153 * the device even though it's already in D3 and hang the machine. So 1154 * leave the device in D0 on those platforms and hope the BIOS will 1155 * power down the device properly. The issue was seen on multiple old 1156 * GENs with different BIOS vendors, so having an explicit blacklist 1157 * is inpractical; apply the workaround on everything pre GEN6. The 1158 * platforms where the issue was seen: 1159 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1160 * Fujitsu FSC S7110 1161 * Acer Aspire 1830T 1162 */ 1163 if (!(hibernation && GRAPHICS_VER(dev_priv) < 6)) 1164 pci_set_power_state(pdev, PCI_D3hot); 1165 1166 out: 1167 enable_rpm_wakeref_asserts(rpm); 1168 if (!dev_priv->uncore.user_forcewake_count) 1169 intel_runtime_pm_driver_release(rpm); 1170 1171 return ret; 1172 } 1173 1174 int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, 1175 pm_message_t state) 1176 { 1177 int error; 1178 1179 if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && 1180 state.event != PM_EVENT_FREEZE)) 1181 return -EINVAL; 1182 1183 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1184 return 0; 1185 1186 error = i915_drm_suspend(&i915->drm); 1187 if (error) 1188 return error; 1189 1190 return i915_drm_suspend_late(&i915->drm, false); 1191 } 1192 1193 static int i915_drm_resume(struct drm_device *dev) 1194 { 1195 struct drm_i915_private *dev_priv = to_i915(dev); 1196 struct intel_gt *gt; 1197 int ret, i; 1198 1199 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1200 1201 ret = i915_pcode_init(dev_priv); 1202 if (ret) 1203 return ret; 1204 1205 sanitize_gpu(dev_priv); 1206 1207 ret = i915_ggtt_enable_hw(dev_priv); 1208 if (ret) 1209 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 1210 1211 i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1212 1213 for_each_gt(gt, dev_priv, i) 1214 if (GRAPHICS_VER(gt->i915) >= 8) 1215 setup_private_pat(gt); 1216 1217 /* Must be called after GGTT is resumed. */ 1218 intel_dpt_resume(dev_priv); 1219 1220 intel_dmc_resume(dev_priv); 1221 1222 i915_restore_display(dev_priv); 1223 intel_pps_unlock_regs_wa(dev_priv); 1224 1225 intel_init_pch_refclk(dev_priv); 1226 1227 /* 1228 * Interrupts have to be enabled before any batches are run. If not the 1229 * GPU will hang. i915_gem_init_hw() will initiate batches to 1230 * update/restore the context. 1231 * 1232 * drm_mode_config_reset() needs AUX interrupts. 1233 * 1234 * Modeset enabling in intel_modeset_init_hw() also needs working 1235 * interrupts. 1236 */ 1237 intel_runtime_pm_enable_interrupts(dev_priv); 1238 1239 if (HAS_DISPLAY(dev_priv)) 1240 drm_mode_config_reset(dev); 1241 1242 i915_gem_resume(dev_priv); 1243 1244 intel_modeset_init_hw(dev_priv); 1245 intel_init_clock_gating(dev_priv); 1246 intel_hpd_init(dev_priv); 1247 1248 /* MST sideband requires HPD interrupts enabled */ 1249 intel_dp_mst_resume(dev_priv); 1250 intel_display_resume(dev); 1251 1252 intel_hpd_poll_disable(dev_priv); 1253 if (HAS_DISPLAY(dev_priv)) 1254 drm_kms_helper_poll_enable(dev); 1255 1256 intel_opregion_resume(dev_priv); 1257 1258 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1259 1260 intel_power_domains_enable(dev_priv); 1261 1262 intel_gvt_resume(dev_priv); 1263 1264 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1265 1266 return 0; 1267 } 1268 1269 static int i915_drm_resume_early(struct drm_device *dev) 1270 { 1271 struct drm_i915_private *dev_priv = to_i915(dev); 1272 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1273 struct intel_gt *gt; 1274 int ret, i; 1275 1276 /* 1277 * We have a resume ordering issue with the snd-hda driver also 1278 * requiring our device to be power up. Due to the lack of a 1279 * parent/child relationship we currently solve this with an early 1280 * resume hook. 1281 * 1282 * FIXME: This should be solved with a special hdmi sink device or 1283 * similar so that power domains can be employed. 1284 */ 1285 1286 /* 1287 * Note that we need to set the power state explicitly, since we 1288 * powered off the device during freeze and the PCI core won't power 1289 * it back up for us during thaw. Powering off the device during 1290 * freeze is not a hard requirement though, and during the 1291 * suspend/resume phases the PCI core makes sure we get here with the 1292 * device powered on. So in case we change our freeze logic and keep 1293 * the device powered we can also remove the following set power state 1294 * call. 1295 */ 1296 ret = pci_set_power_state(pdev, PCI_D0); 1297 if (ret) { 1298 drm_err(&dev_priv->drm, 1299 "failed to set PCI D0 power state (%d)\n", ret); 1300 return ret; 1301 } 1302 1303 /* 1304 * Note that pci_enable_device() first enables any parent bridge 1305 * device and only then sets the power state for this device. The 1306 * bridge enabling is a nop though, since bridge devices are resumed 1307 * first. The order of enabling power and enabling the device is 1308 * imposed by the PCI core as described above, so here we preserve the 1309 * same order for the freeze/thaw phases. 1310 * 1311 * TODO: eventually we should remove pci_disable_device() / 1312 * pci_enable_enable_device() from suspend/resume. Due to how they 1313 * depend on the device enable refcount we can't anyway depend on them 1314 * disabling/enabling the device. 1315 */ 1316 if (pci_enable_device(pdev)) 1317 return -EIO; 1318 1319 pci_set_master(pdev); 1320 1321 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1322 1323 ret = vlv_resume_prepare(dev_priv, false); 1324 if (ret) 1325 drm_err(&dev_priv->drm, 1326 "Resume prepare failed: %d, continuing anyway\n", ret); 1327 1328 for_each_gt(gt, dev_priv, i) { 1329 intel_uncore_resume_early(gt->uncore); 1330 intel_gt_check_and_clear_faults(gt); 1331 } 1332 1333 intel_display_power_resume_early(dev_priv); 1334 1335 intel_power_domains_resume(dev_priv); 1336 1337 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1338 1339 return ret; 1340 } 1341 1342 int i915_driver_resume_switcheroo(struct drm_i915_private *i915) 1343 { 1344 int ret; 1345 1346 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1347 return 0; 1348 1349 ret = i915_drm_resume_early(&i915->drm); 1350 if (ret) 1351 return ret; 1352 1353 return i915_drm_resume(&i915->drm); 1354 } 1355 1356 static int i915_pm_prepare(struct device *kdev) 1357 { 1358 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1359 1360 if (!i915) { 1361 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1362 return -ENODEV; 1363 } 1364 1365 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1366 return 0; 1367 1368 return i915_drm_prepare(&i915->drm); 1369 } 1370 1371 static int i915_pm_suspend(struct device *kdev) 1372 { 1373 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1374 1375 if (!i915) { 1376 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1377 return -ENODEV; 1378 } 1379 1380 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1381 return 0; 1382 1383 return i915_drm_suspend(&i915->drm); 1384 } 1385 1386 static int i915_pm_suspend_late(struct device *kdev) 1387 { 1388 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1389 1390 /* 1391 * We have a suspend ordering issue with the snd-hda driver also 1392 * requiring our device to be power up. Due to the lack of a 1393 * parent/child relationship we currently solve this with an late 1394 * suspend hook. 1395 * 1396 * FIXME: This should be solved with a special hdmi sink device or 1397 * similar so that power domains can be employed. 1398 */ 1399 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1400 return 0; 1401 1402 return i915_drm_suspend_late(&i915->drm, false); 1403 } 1404 1405 static int i915_pm_poweroff_late(struct device *kdev) 1406 { 1407 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1408 1409 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1410 return 0; 1411 1412 return i915_drm_suspend_late(&i915->drm, true); 1413 } 1414 1415 static int i915_pm_resume_early(struct device *kdev) 1416 { 1417 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1418 1419 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1420 return 0; 1421 1422 return i915_drm_resume_early(&i915->drm); 1423 } 1424 1425 static int i915_pm_resume(struct device *kdev) 1426 { 1427 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1428 1429 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1430 return 0; 1431 1432 return i915_drm_resume(&i915->drm); 1433 } 1434 1435 static void i915_pm_complete(struct device *kdev) 1436 { 1437 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1438 1439 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1440 return; 1441 1442 i915_drm_complete(&i915->drm); 1443 } 1444 1445 /* freeze: before creating the hibernation_image */ 1446 static int i915_pm_freeze(struct device *kdev) 1447 { 1448 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1449 int ret; 1450 1451 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 1452 ret = i915_drm_suspend(&i915->drm); 1453 if (ret) 1454 return ret; 1455 } 1456 1457 ret = i915_gem_freeze(i915); 1458 if (ret) 1459 return ret; 1460 1461 return 0; 1462 } 1463 1464 static int i915_pm_freeze_late(struct device *kdev) 1465 { 1466 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1467 int ret; 1468 1469 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { 1470 ret = i915_drm_suspend_late(&i915->drm, true); 1471 if (ret) 1472 return ret; 1473 } 1474 1475 ret = i915_gem_freeze_late(i915); 1476 if (ret) 1477 return ret; 1478 1479 return 0; 1480 } 1481 1482 /* thaw: called after creating the hibernation image, but before turning off. */ 1483 static int i915_pm_thaw_early(struct device *kdev) 1484 { 1485 return i915_pm_resume_early(kdev); 1486 } 1487 1488 static int i915_pm_thaw(struct device *kdev) 1489 { 1490 return i915_pm_resume(kdev); 1491 } 1492 1493 /* restore: called after loading the hibernation image. */ 1494 static int i915_pm_restore_early(struct device *kdev) 1495 { 1496 return i915_pm_resume_early(kdev); 1497 } 1498 1499 static int i915_pm_restore(struct device *kdev) 1500 { 1501 return i915_pm_resume(kdev); 1502 } 1503 1504 static int intel_runtime_suspend(struct device *kdev) 1505 { 1506 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1507 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1508 struct intel_gt *gt; 1509 int ret, i; 1510 1511 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1512 return -ENODEV; 1513 1514 drm_dbg(&dev_priv->drm, "Suspending device\n"); 1515 1516 disable_rpm_wakeref_asserts(rpm); 1517 1518 /* 1519 * We are safe here against re-faults, since the fault handler takes 1520 * an RPM reference. 1521 */ 1522 i915_gem_runtime_suspend(dev_priv); 1523 1524 intel_pxp_runtime_suspend(dev_priv->pxp); 1525 1526 for_each_gt(gt, dev_priv, i) 1527 intel_gt_runtime_suspend(gt); 1528 1529 intel_runtime_pm_disable_interrupts(dev_priv); 1530 1531 for_each_gt(gt, dev_priv, i) 1532 intel_uncore_suspend(gt->uncore); 1533 1534 intel_display_power_suspend(dev_priv); 1535 1536 ret = vlv_suspend_complete(dev_priv); 1537 if (ret) { 1538 drm_err(&dev_priv->drm, 1539 "Runtime suspend failed, disabling it (%d)\n", ret); 1540 intel_uncore_runtime_resume(&dev_priv->uncore); 1541 1542 intel_runtime_pm_enable_interrupts(dev_priv); 1543 1544 for_each_gt(gt, dev_priv, i) 1545 intel_gt_runtime_resume(gt); 1546 1547 enable_rpm_wakeref_asserts(rpm); 1548 1549 return ret; 1550 } 1551 1552 enable_rpm_wakeref_asserts(rpm); 1553 intel_runtime_pm_driver_release(rpm); 1554 1555 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) 1556 drm_err(&dev_priv->drm, 1557 "Unclaimed access detected prior to suspending\n"); 1558 1559 rpm->suspended = true; 1560 1561 /* 1562 * FIXME: We really should find a document that references the arguments 1563 * used below! 1564 */ 1565 if (IS_BROADWELL(dev_priv)) { 1566 /* 1567 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1568 * being detected, and the call we do at intel_runtime_resume() 1569 * won't be able to restore them. Since PCI_D3hot matches the 1570 * actual specification and appears to be working, use it. 1571 */ 1572 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 1573 } else { 1574 /* 1575 * current versions of firmware which depend on this opregion 1576 * notification have repurposed the D1 definition to mean 1577 * "runtime suspended" vs. what you would normally expect (D3) 1578 * to distinguish it from notifications that might be sent via 1579 * the suspend path. 1580 */ 1581 intel_opregion_notify_adapter(dev_priv, PCI_D1); 1582 } 1583 1584 assert_forcewakes_inactive(&dev_priv->uncore); 1585 1586 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1587 intel_hpd_poll_enable(dev_priv); 1588 1589 drm_dbg(&dev_priv->drm, "Device suspended\n"); 1590 return 0; 1591 } 1592 1593 static int intel_runtime_resume(struct device *kdev) 1594 { 1595 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1596 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1597 struct intel_gt *gt; 1598 int ret, i; 1599 1600 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1601 return -ENODEV; 1602 1603 drm_dbg(&dev_priv->drm, "Resuming device\n"); 1604 1605 drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); 1606 disable_rpm_wakeref_asserts(rpm); 1607 1608 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1609 rpm->suspended = false; 1610 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) 1611 drm_dbg(&dev_priv->drm, 1612 "Unclaimed access during suspend, bios?\n"); 1613 1614 intel_display_power_resume(dev_priv); 1615 1616 ret = vlv_resume_prepare(dev_priv, true); 1617 1618 for_each_gt(gt, dev_priv, i) 1619 intel_uncore_runtime_resume(gt->uncore); 1620 1621 intel_runtime_pm_enable_interrupts(dev_priv); 1622 1623 /* 1624 * No point of rolling back things in case of an error, as the best 1625 * we can do is to hope that things will still work (and disable RPM). 1626 */ 1627 for_each_gt(gt, dev_priv, i) 1628 intel_gt_runtime_resume(gt); 1629 1630 intel_pxp_runtime_resume(dev_priv->pxp); 1631 1632 /* 1633 * On VLV/CHV display interrupts are part of the display 1634 * power well, so hpd is reinitialized from there. For 1635 * everyone else do it here. 1636 */ 1637 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 1638 intel_hpd_init(dev_priv); 1639 intel_hpd_poll_disable(dev_priv); 1640 } 1641 1642 skl_watermark_ipc_update(dev_priv); 1643 1644 enable_rpm_wakeref_asserts(rpm); 1645 1646 if (ret) 1647 drm_err(&dev_priv->drm, 1648 "Runtime resume failed, disabling it (%d)\n", ret); 1649 else 1650 drm_dbg(&dev_priv->drm, "Device resumed\n"); 1651 1652 return ret; 1653 } 1654 1655 const struct dev_pm_ops i915_pm_ops = { 1656 /* 1657 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1658 * PMSG_RESUME] 1659 */ 1660 .prepare = i915_pm_prepare, 1661 .suspend = i915_pm_suspend, 1662 .suspend_late = i915_pm_suspend_late, 1663 .resume_early = i915_pm_resume_early, 1664 .resume = i915_pm_resume, 1665 .complete = i915_pm_complete, 1666 1667 /* 1668 * S4 event handlers 1669 * @freeze, @freeze_late : called (1) before creating the 1670 * hibernation image [PMSG_FREEZE] and 1671 * (2) after rebooting, before restoring 1672 * the image [PMSG_QUIESCE] 1673 * @thaw, @thaw_early : called (1) after creating the hibernation 1674 * image, before writing it [PMSG_THAW] 1675 * and (2) after failing to create or 1676 * restore the image [PMSG_RECOVER] 1677 * @poweroff, @poweroff_late: called after writing the hibernation 1678 * image, before rebooting [PMSG_HIBERNATE] 1679 * @restore, @restore_early : called after rebooting and restoring the 1680 * hibernation image [PMSG_RESTORE] 1681 */ 1682 .freeze = i915_pm_freeze, 1683 .freeze_late = i915_pm_freeze_late, 1684 .thaw_early = i915_pm_thaw_early, 1685 .thaw = i915_pm_thaw, 1686 .poweroff = i915_pm_suspend, 1687 .poweroff_late = i915_pm_poweroff_late, 1688 .restore_early = i915_pm_restore_early, 1689 .restore = i915_pm_restore, 1690 1691 /* S0ix (via runtime suspend) event handlers */ 1692 .runtime_suspend = intel_runtime_suspend, 1693 .runtime_resume = intel_runtime_resume, 1694 }; 1695 1696 static const struct file_operations i915_driver_fops = { 1697 .owner = THIS_MODULE, 1698 .open = drm_open, 1699 .release = drm_release_noglobal, 1700 .unlocked_ioctl = drm_ioctl, 1701 .mmap = i915_gem_mmap, 1702 .poll = drm_poll, 1703 .read = drm_read, 1704 .compat_ioctl = i915_ioc32_compat_ioctl, 1705 .llseek = noop_llseek, 1706 #ifdef CONFIG_PROC_FS 1707 .show_fdinfo = i915_drm_client_fdinfo, 1708 #endif 1709 }; 1710 1711 static int 1712 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 1713 struct drm_file *file) 1714 { 1715 return -ENODEV; 1716 } 1717 1718 static const struct drm_ioctl_desc i915_ioctls[] = { 1719 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1720 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 1721 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 1722 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 1723 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 1724 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 1725 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW), 1726 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1727 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1728 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1729 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1730 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 1731 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1732 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1733 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 1734 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 1735 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1736 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1737 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH), 1738 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW), 1739 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1740 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1741 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW), 1742 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 1743 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 1744 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW), 1745 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1746 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1747 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 1748 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW), 1749 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 1750 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 1751 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 1752 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW), 1753 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 1754 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 1755 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 1756 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 1757 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 1758 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 1759 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 1760 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), 1761 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), 1762 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), 1763 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), 1764 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW), 1765 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 1766 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 1767 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 1768 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 1769 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 1770 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 1771 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 1772 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), 1773 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW), 1774 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW), 1775 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), 1776 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), 1777 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), 1778 }; 1779 1780 /* 1781 * Interface history: 1782 * 1783 * 1.1: Original. 1784 * 1.2: Add Power Management 1785 * 1.3: Add vblank support 1786 * 1.4: Fix cmdbuffer path, add heap destroy 1787 * 1.5: Add vblank pipe configuration 1788 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 1789 * - Support vertical blank on secondary display pipe 1790 */ 1791 #define DRIVER_MAJOR 1 1792 #define DRIVER_MINOR 6 1793 #define DRIVER_PATCHLEVEL 0 1794 1795 static const struct drm_driver i915_drm_driver = { 1796 /* Don't use MTRRs here; the Xserver or userspace app should 1797 * deal with them for Intel hardware. 1798 */ 1799 .driver_features = 1800 DRIVER_GEM | 1801 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ | 1802 DRIVER_SYNCOBJ_TIMELINE, 1803 .release = i915_driver_release, 1804 .open = i915_driver_open, 1805 .lastclose = i915_driver_lastclose, 1806 .postclose = i915_driver_postclose, 1807 1808 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1809 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1810 .gem_prime_import = i915_gem_prime_import, 1811 1812 .dumb_create = i915_gem_dumb_create, 1813 .dumb_map_offset = i915_gem_dumb_mmap_offset, 1814 1815 .ioctls = i915_ioctls, 1816 .num_ioctls = ARRAY_SIZE(i915_ioctls), 1817 .fops = &i915_driver_fops, 1818 .name = DRIVER_NAME, 1819 .desc = DRIVER_DESC, 1820 .date = DRIVER_DATE, 1821 .major = DRIVER_MAJOR, 1822 .minor = DRIVER_MINOR, 1823 .patchlevel = DRIVER_PATCHLEVEL, 1824 }; 1825