1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/pci.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/slab.h> 32 #include <linux/uaccess.h> 33 #include <linux/vga_switcheroo.h> 34 35 #include <drm/drm_agpsupport.h> 36 #include <drm/drm_fb_helper.h> 37 #include <drm/drm_file.h> 38 #include <drm/drm_ioctl.h> 39 #include <drm/radeon_drm.h> 40 41 #include "radeon.h" 42 #include "radeon_asic.h" 43 #include "radeon_drv.h" 44 #include "radeon_kms.h" 45 46 #if defined(CONFIG_VGA_SWITCHEROO) 47 bool radeon_has_atpx(void); 48 #else 49 static inline bool radeon_has_atpx(void) { return false; } 50 #endif 51 52 /** 53 * radeon_driver_unload_kms - Main unload function for KMS. 54 * 55 * @dev: drm dev pointer 56 * 57 * This is the main unload function for KMS (all asics). 58 * It calls radeon_modeset_fini() to tear down the 59 * displays, and radeon_device_fini() to tear down 60 * the rest of the device (CP, writeback, etc.). 61 * Returns 0 on success. 62 */ 63 void radeon_driver_unload_kms(struct drm_device *dev) 64 { 65 struct radeon_device *rdev = dev->dev_private; 66 67 if (rdev == NULL) 68 return; 69 70 if (rdev->rmmio == NULL) 71 goto done_free; 72 73 if (radeon_is_px(dev)) { 74 pm_runtime_get_sync(dev->dev); 75 pm_runtime_forbid(dev->dev); 76 } 77 78 radeon_acpi_fini(rdev); 79 80 radeon_modeset_fini(rdev); 81 radeon_device_fini(rdev); 82 83 if (dev->agp) 84 arch_phys_wc_del(dev->agp->agp_mtrr); 85 kfree(dev->agp); 86 dev->agp = NULL; 87 88 done_free: 89 kfree(rdev); 90 dev->dev_private = NULL; 91 } 92 93 /** 94 * radeon_driver_load_kms - Main load function for KMS. 95 * 96 * @dev: drm dev pointer 97 * @flags: device flags 98 * 99 * This is the main load function for KMS (all asics). 100 * It calls radeon_device_init() to set up the non-display 101 * parts of the chip (asic init, CP, writeback, etc.), and 102 * radeon_modeset_init() to set up the display parts 103 * (crtcs, encoders, hotplug detect, etc.). 104 * Returns 0 on success, error on failure. 105 */ 106 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 107 { 108 struct radeon_device *rdev; 109 int r, acpi_status; 110 111 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 112 if (rdev == NULL) { 113 return -ENOMEM; 114 } 115 dev->dev_private = (void *)rdev; 116 117 /* update BUS flag */ 118 if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) { 119 flags |= RADEON_IS_AGP; 120 } else if (pci_is_pcie(dev->pdev)) { 121 flags |= RADEON_IS_PCIE; 122 } else { 123 flags |= RADEON_IS_PCI; 124 } 125 126 if ((radeon_runtime_pm != 0) && 127 radeon_has_atpx() && 128 ((flags & RADEON_IS_IGP) == 0) && 129 !pci_is_thunderbolt_attached(dev->pdev)) 130 flags |= RADEON_IS_PX; 131 132 /* radeon_device_init should report only fatal error 133 * like memory allocation failure or iomapping failure, 134 * or memory manager initialization failure, it must 135 * properly initialize the GPU MC controller and permit 136 * VRAM allocation 137 */ 138 r = radeon_device_init(rdev, dev, dev->pdev, flags); 139 if (r) { 140 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 141 goto out; 142 } 143 144 /* Again modeset_init should fail only on fatal error 145 * otherwise it should provide enough functionalities 146 * for shadowfb to run 147 */ 148 r = radeon_modeset_init(rdev); 149 if (r) 150 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 151 152 /* Call ACPI methods: require modeset init 153 * but failure is not fatal 154 */ 155 if (!r) { 156 acpi_status = radeon_acpi_init(rdev); 157 if (acpi_status) 158 dev_dbg(&dev->pdev->dev, 159 "Error during ACPI methods call\n"); 160 } 161 162 if (radeon_is_px(dev)) { 163 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 164 pm_runtime_use_autosuspend(dev->dev); 165 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 166 pm_runtime_set_active(dev->dev); 167 pm_runtime_allow(dev->dev); 168 pm_runtime_mark_last_busy(dev->dev); 169 pm_runtime_put_autosuspend(dev->dev); 170 } 171 172 out: 173 if (r) 174 radeon_driver_unload_kms(dev); 175 176 177 return r; 178 } 179 180 /** 181 * radeon_set_filp_rights - Set filp right. 182 * 183 * @dev: drm dev pointer 184 * @owner: drm file 185 * @applier: drm file 186 * @value: value 187 * 188 * Sets the filp rights for the device (all asics). 189 */ 190 static void radeon_set_filp_rights(struct drm_device *dev, 191 struct drm_file **owner, 192 struct drm_file *applier, 193 uint32_t *value) 194 { 195 struct radeon_device *rdev = dev->dev_private; 196 197 mutex_lock(&rdev->gem.mutex); 198 if (*value == 1) { 199 /* wants rights */ 200 if (!*owner) 201 *owner = applier; 202 } else if (*value == 0) { 203 /* revokes rights */ 204 if (*owner == applier) 205 *owner = NULL; 206 } 207 *value = *owner == applier ? 1 : 0; 208 mutex_unlock(&rdev->gem.mutex); 209 } 210 211 /* 212 * Userspace get information ioctl 213 */ 214 /** 215 * radeon_info_ioctl - answer a device specific request. 216 * 217 * @dev: drm device pointer 218 * @data: request object 219 * @filp: drm filp 220 * 221 * This function is used to pass device specific parameters to the userspace 222 * drivers. Examples include: pci device id, pipeline parms, tiling params, 223 * etc. (all asics). 224 * Returns 0 on success, -EINVAL on failure. 225 */ 226 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 227 { 228 struct radeon_device *rdev = dev->dev_private; 229 struct drm_radeon_info *info = data; 230 struct radeon_mode_info *minfo = &rdev->mode_info; 231 uint32_t *value, value_tmp, *value_ptr, value_size; 232 uint64_t value64; 233 struct drm_crtc *crtc; 234 int i, found; 235 236 value_ptr = (uint32_t *)((unsigned long)info->value); 237 value = &value_tmp; 238 value_size = sizeof(uint32_t); 239 240 switch (info->request) { 241 case RADEON_INFO_DEVICE_ID: 242 *value = dev->pdev->device; 243 break; 244 case RADEON_INFO_NUM_GB_PIPES: 245 *value = rdev->num_gb_pipes; 246 break; 247 case RADEON_INFO_NUM_Z_PIPES: 248 *value = rdev->num_z_pipes; 249 break; 250 case RADEON_INFO_ACCEL_WORKING: 251 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 252 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 253 *value = false; 254 else 255 *value = rdev->accel_working; 256 break; 257 case RADEON_INFO_CRTC_FROM_ID: 258 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 259 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 260 return -EFAULT; 261 } 262 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 263 crtc = (struct drm_crtc *)minfo->crtcs[i]; 264 if (crtc && crtc->base.id == *value) { 265 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 266 *value = radeon_crtc->crtc_id; 267 found = 1; 268 break; 269 } 270 } 271 if (!found) { 272 DRM_DEBUG_KMS("unknown crtc id %d\n", *value); 273 return -EINVAL; 274 } 275 break; 276 case RADEON_INFO_ACCEL_WORKING2: 277 if (rdev->family == CHIP_HAWAII) { 278 if (rdev->accel_working) { 279 if (rdev->new_fw) 280 *value = 3; 281 else 282 *value = 2; 283 } else { 284 *value = 0; 285 } 286 } else { 287 *value = rdev->accel_working; 288 } 289 break; 290 case RADEON_INFO_TILING_CONFIG: 291 if (rdev->family >= CHIP_BONAIRE) 292 *value = rdev->config.cik.tile_config; 293 else if (rdev->family >= CHIP_TAHITI) 294 *value = rdev->config.si.tile_config; 295 else if (rdev->family >= CHIP_CAYMAN) 296 *value = rdev->config.cayman.tile_config; 297 else if (rdev->family >= CHIP_CEDAR) 298 *value = rdev->config.evergreen.tile_config; 299 else if (rdev->family >= CHIP_RV770) 300 *value = rdev->config.rv770.tile_config; 301 else if (rdev->family >= CHIP_R600) 302 *value = rdev->config.r600.tile_config; 303 else { 304 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 305 return -EINVAL; 306 } 307 break; 308 case RADEON_INFO_WANT_HYPERZ: 309 /* The "value" here is both an input and output parameter. 310 * If the input value is 1, filp requests hyper-z access. 311 * If the input value is 0, filp revokes its hyper-z access. 312 * 313 * When returning, the value is 1 if filp owns hyper-z access, 314 * 0 otherwise. */ 315 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 316 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 317 return -EFAULT; 318 } 319 if (*value >= 2) { 320 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value); 321 return -EINVAL; 322 } 323 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); 324 break; 325 case RADEON_INFO_WANT_CMASK: 326 /* The same logic as Hyper-Z. */ 327 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 328 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 329 return -EFAULT; 330 } 331 if (*value >= 2) { 332 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value); 333 return -EINVAL; 334 } 335 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); 336 break; 337 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 338 /* return clock value in KHz */ 339 if (rdev->asic->get_xclk) 340 *value = radeon_get_xclk(rdev) * 10; 341 else 342 *value = rdev->clock.spll.reference_freq * 10; 343 break; 344 case RADEON_INFO_NUM_BACKENDS: 345 if (rdev->family >= CHIP_BONAIRE) 346 *value = rdev->config.cik.max_backends_per_se * 347 rdev->config.cik.max_shader_engines; 348 else if (rdev->family >= CHIP_TAHITI) 349 *value = rdev->config.si.max_backends_per_se * 350 rdev->config.si.max_shader_engines; 351 else if (rdev->family >= CHIP_CAYMAN) 352 *value = rdev->config.cayman.max_backends_per_se * 353 rdev->config.cayman.max_shader_engines; 354 else if (rdev->family >= CHIP_CEDAR) 355 *value = rdev->config.evergreen.max_backends; 356 else if (rdev->family >= CHIP_RV770) 357 *value = rdev->config.rv770.max_backends; 358 else if (rdev->family >= CHIP_R600) 359 *value = rdev->config.r600.max_backends; 360 else { 361 return -EINVAL; 362 } 363 break; 364 case RADEON_INFO_NUM_TILE_PIPES: 365 if (rdev->family >= CHIP_BONAIRE) 366 *value = rdev->config.cik.max_tile_pipes; 367 else if (rdev->family >= CHIP_TAHITI) 368 *value = rdev->config.si.max_tile_pipes; 369 else if (rdev->family >= CHIP_CAYMAN) 370 *value = rdev->config.cayman.max_tile_pipes; 371 else if (rdev->family >= CHIP_CEDAR) 372 *value = rdev->config.evergreen.max_tile_pipes; 373 else if (rdev->family >= CHIP_RV770) 374 *value = rdev->config.rv770.max_tile_pipes; 375 else if (rdev->family >= CHIP_R600) 376 *value = rdev->config.r600.max_tile_pipes; 377 else { 378 return -EINVAL; 379 } 380 break; 381 case RADEON_INFO_FUSION_GART_WORKING: 382 *value = 1; 383 break; 384 case RADEON_INFO_BACKEND_MAP: 385 if (rdev->family >= CHIP_BONAIRE) 386 *value = rdev->config.cik.backend_map; 387 else if (rdev->family >= CHIP_TAHITI) 388 *value = rdev->config.si.backend_map; 389 else if (rdev->family >= CHIP_CAYMAN) 390 *value = rdev->config.cayman.backend_map; 391 else if (rdev->family >= CHIP_CEDAR) 392 *value = rdev->config.evergreen.backend_map; 393 else if (rdev->family >= CHIP_RV770) 394 *value = rdev->config.rv770.backend_map; 395 else if (rdev->family >= CHIP_R600) 396 *value = rdev->config.r600.backend_map; 397 else { 398 return -EINVAL; 399 } 400 break; 401 case RADEON_INFO_VA_START: 402 /* this is where we report if vm is supported or not */ 403 if (rdev->family < CHIP_CAYMAN) 404 return -EINVAL; 405 *value = RADEON_VA_RESERVED_SIZE; 406 break; 407 case RADEON_INFO_IB_VM_MAX_SIZE: 408 /* this is where we report if vm is supported or not */ 409 if (rdev->family < CHIP_CAYMAN) 410 return -EINVAL; 411 *value = RADEON_IB_VM_MAX_SIZE; 412 break; 413 case RADEON_INFO_MAX_PIPES: 414 if (rdev->family >= CHIP_BONAIRE) 415 *value = rdev->config.cik.max_cu_per_sh; 416 else if (rdev->family >= CHIP_TAHITI) 417 *value = rdev->config.si.max_cu_per_sh; 418 else if (rdev->family >= CHIP_CAYMAN) 419 *value = rdev->config.cayman.max_pipes_per_simd; 420 else if (rdev->family >= CHIP_CEDAR) 421 *value = rdev->config.evergreen.max_pipes; 422 else if (rdev->family >= CHIP_RV770) 423 *value = rdev->config.rv770.max_pipes; 424 else if (rdev->family >= CHIP_R600) 425 *value = rdev->config.r600.max_pipes; 426 else { 427 return -EINVAL; 428 } 429 break; 430 case RADEON_INFO_TIMESTAMP: 431 if (rdev->family < CHIP_R600) { 432 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 433 return -EINVAL; 434 } 435 value = (uint32_t*)&value64; 436 value_size = sizeof(uint64_t); 437 value64 = radeon_get_gpu_clock_counter(rdev); 438 break; 439 case RADEON_INFO_MAX_SE: 440 if (rdev->family >= CHIP_BONAIRE) 441 *value = rdev->config.cik.max_shader_engines; 442 else if (rdev->family >= CHIP_TAHITI) 443 *value = rdev->config.si.max_shader_engines; 444 else if (rdev->family >= CHIP_CAYMAN) 445 *value = rdev->config.cayman.max_shader_engines; 446 else if (rdev->family >= CHIP_CEDAR) 447 *value = rdev->config.evergreen.num_ses; 448 else 449 *value = 1; 450 break; 451 case RADEON_INFO_MAX_SH_PER_SE: 452 if (rdev->family >= CHIP_BONAIRE) 453 *value = rdev->config.cik.max_sh_per_se; 454 else if (rdev->family >= CHIP_TAHITI) 455 *value = rdev->config.si.max_sh_per_se; 456 else 457 return -EINVAL; 458 break; 459 case RADEON_INFO_FASTFB_WORKING: 460 *value = rdev->fastfb_working; 461 break; 462 case RADEON_INFO_RING_WORKING: 463 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 464 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 465 return -EFAULT; 466 } 467 switch (*value) { 468 case RADEON_CS_RING_GFX: 469 case RADEON_CS_RING_COMPUTE: 470 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; 471 break; 472 case RADEON_CS_RING_DMA: 473 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; 474 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; 475 break; 476 case RADEON_CS_RING_UVD: 477 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; 478 break; 479 case RADEON_CS_RING_VCE: 480 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; 481 break; 482 default: 483 return -EINVAL; 484 } 485 break; 486 case RADEON_INFO_SI_TILE_MODE_ARRAY: 487 if (rdev->family >= CHIP_BONAIRE) { 488 value = rdev->config.cik.tile_mode_array; 489 value_size = sizeof(uint32_t)*32; 490 } else if (rdev->family >= CHIP_TAHITI) { 491 value = rdev->config.si.tile_mode_array; 492 value_size = sizeof(uint32_t)*32; 493 } else { 494 DRM_DEBUG_KMS("tile mode array is si+ only!\n"); 495 return -EINVAL; 496 } 497 break; 498 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: 499 if (rdev->family >= CHIP_BONAIRE) { 500 value = rdev->config.cik.macrotile_mode_array; 501 value_size = sizeof(uint32_t)*16; 502 } else { 503 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); 504 return -EINVAL; 505 } 506 break; 507 case RADEON_INFO_SI_CP_DMA_COMPUTE: 508 *value = 1; 509 break; 510 case RADEON_INFO_SI_BACKEND_ENABLED_MASK: 511 if (rdev->family >= CHIP_BONAIRE) { 512 *value = rdev->config.cik.backend_enable_mask; 513 } else if (rdev->family >= CHIP_TAHITI) { 514 *value = rdev->config.si.backend_enable_mask; 515 } else { 516 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); 517 } 518 break; 519 case RADEON_INFO_MAX_SCLK: 520 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 521 rdev->pm.dpm_enabled) 522 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 523 else 524 *value = rdev->pm.default_sclk * 10; 525 break; 526 case RADEON_INFO_VCE_FW_VERSION: 527 *value = rdev->vce.fw_version; 528 break; 529 case RADEON_INFO_VCE_FB_VERSION: 530 *value = rdev->vce.fb_version; 531 break; 532 case RADEON_INFO_NUM_BYTES_MOVED: 533 value = (uint32_t*)&value64; 534 value_size = sizeof(uint64_t); 535 value64 = atomic64_read(&rdev->num_bytes_moved); 536 break; 537 case RADEON_INFO_VRAM_USAGE: 538 value = (uint32_t*)&value64; 539 value_size = sizeof(uint64_t); 540 value64 = atomic64_read(&rdev->vram_usage); 541 break; 542 case RADEON_INFO_GTT_USAGE: 543 value = (uint32_t*)&value64; 544 value_size = sizeof(uint64_t); 545 value64 = atomic64_read(&rdev->gtt_usage); 546 break; 547 case RADEON_INFO_ACTIVE_CU_COUNT: 548 if (rdev->family >= CHIP_BONAIRE) 549 *value = rdev->config.cik.active_cus; 550 else if (rdev->family >= CHIP_TAHITI) 551 *value = rdev->config.si.active_cus; 552 else if (rdev->family >= CHIP_CAYMAN) 553 *value = rdev->config.cayman.active_simds; 554 else if (rdev->family >= CHIP_CEDAR) 555 *value = rdev->config.evergreen.active_simds; 556 else if (rdev->family >= CHIP_RV770) 557 *value = rdev->config.rv770.active_simds; 558 else if (rdev->family >= CHIP_R600) 559 *value = rdev->config.r600.active_simds; 560 else 561 *value = 1; 562 break; 563 case RADEON_INFO_CURRENT_GPU_TEMP: 564 /* get temperature in millidegrees C */ 565 if (rdev->asic->pm.get_temperature) 566 *value = radeon_get_temperature(rdev); 567 else 568 *value = 0; 569 break; 570 case RADEON_INFO_CURRENT_GPU_SCLK: 571 /* get sclk in Mhz */ 572 if (rdev->pm.dpm_enabled) 573 *value = radeon_dpm_get_current_sclk(rdev) / 100; 574 else 575 *value = rdev->pm.current_sclk / 100; 576 break; 577 case RADEON_INFO_CURRENT_GPU_MCLK: 578 /* get mclk in Mhz */ 579 if (rdev->pm.dpm_enabled) 580 *value = radeon_dpm_get_current_mclk(rdev) / 100; 581 else 582 *value = rdev->pm.current_mclk / 100; 583 break; 584 case RADEON_INFO_READ_REG: 585 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 586 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 587 return -EFAULT; 588 } 589 if (radeon_get_allowed_info_register(rdev, *value, value)) 590 return -EINVAL; 591 break; 592 case RADEON_INFO_VA_UNMAP_WORKING: 593 *value = true; 594 break; 595 case RADEON_INFO_GPU_RESET_COUNTER: 596 *value = atomic_read(&rdev->gpu_reset_counter); 597 break; 598 default: 599 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 600 return -EINVAL; 601 } 602 if (copy_to_user(value_ptr, (char*)value, value_size)) { 603 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 604 return -EFAULT; 605 } 606 return 0; 607 } 608 609 610 /* 611 * Outdated mess for old drm with Xorg being in charge (void function now). 612 */ 613 /** 614 * radeon_driver_lastclose_kms - drm callback for last close 615 * 616 * @dev: drm dev pointer 617 * 618 * Switch vga_switcheroo state after last close (all asics). 619 */ 620 void radeon_driver_lastclose_kms(struct drm_device *dev) 621 { 622 drm_fb_helper_lastclose(dev); 623 vga_switcheroo_process_delayed_switch(); 624 } 625 626 /** 627 * radeon_driver_open_kms - drm callback for open 628 * 629 * @dev: drm dev pointer 630 * @file_priv: drm file 631 * 632 * On device open, init vm on cayman+ (all asics). 633 * Returns 0 on success, error on failure. 634 */ 635 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 636 { 637 struct radeon_device *rdev = dev->dev_private; 638 int r; 639 640 file_priv->driver_priv = NULL; 641 642 r = pm_runtime_get_sync(dev->dev); 643 if (r < 0) { 644 pm_runtime_put_autosuspend(dev->dev); 645 return r; 646 } 647 648 /* new gpu have virtual address space support */ 649 if (rdev->family >= CHIP_CAYMAN) { 650 struct radeon_fpriv *fpriv; 651 struct radeon_vm *vm; 652 653 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 654 if (unlikely(!fpriv)) { 655 r = -ENOMEM; 656 goto out_suspend; 657 } 658 659 if (rdev->accel_working) { 660 vm = &fpriv->vm; 661 r = radeon_vm_init(rdev, vm); 662 if (r) { 663 kfree(fpriv); 664 goto out_suspend; 665 } 666 667 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 668 if (r) { 669 radeon_vm_fini(rdev, vm); 670 kfree(fpriv); 671 goto out_suspend; 672 } 673 674 /* map the ib pool buffer read only into 675 * virtual address space */ 676 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, 677 rdev->ring_tmp_bo.bo); 678 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, 679 RADEON_VA_IB_OFFSET, 680 RADEON_VM_PAGE_READABLE | 681 RADEON_VM_PAGE_SNOOPED); 682 if (r) { 683 radeon_vm_fini(rdev, vm); 684 kfree(fpriv); 685 goto out_suspend; 686 } 687 } 688 file_priv->driver_priv = fpriv; 689 } 690 691 out_suspend: 692 pm_runtime_mark_last_busy(dev->dev); 693 pm_runtime_put_autosuspend(dev->dev); 694 return r; 695 } 696 697 /** 698 * radeon_driver_postclose_kms - drm callback for post close 699 * 700 * @dev: drm dev pointer 701 * @file_priv: drm file 702 * 703 * On device close, tear down hyperz and cmask filps on r1xx-r5xx 704 * (all asics). And tear down vm on cayman+ (all asics). 705 */ 706 void radeon_driver_postclose_kms(struct drm_device *dev, 707 struct drm_file *file_priv) 708 { 709 struct radeon_device *rdev = dev->dev_private; 710 711 pm_runtime_get_sync(dev->dev); 712 713 mutex_lock(&rdev->gem.mutex); 714 if (rdev->hyperz_filp == file_priv) 715 rdev->hyperz_filp = NULL; 716 if (rdev->cmask_filp == file_priv) 717 rdev->cmask_filp = NULL; 718 mutex_unlock(&rdev->gem.mutex); 719 720 radeon_uvd_free_handles(rdev, file_priv); 721 radeon_vce_free_handles(rdev, file_priv); 722 723 /* new gpu have virtual address space support */ 724 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 725 struct radeon_fpriv *fpriv = file_priv->driver_priv; 726 struct radeon_vm *vm = &fpriv->vm; 727 int r; 728 729 if (rdev->accel_working) { 730 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 731 if (!r) { 732 if (vm->ib_bo_va) 733 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 734 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 735 } 736 radeon_vm_fini(rdev, vm); 737 } 738 739 kfree(fpriv); 740 file_priv->driver_priv = NULL; 741 } 742 pm_runtime_mark_last_busy(dev->dev); 743 pm_runtime_put_autosuspend(dev->dev); 744 } 745 746 /* 747 * VBlank related functions. 748 */ 749 /** 750 * radeon_get_vblank_counter_kms - get frame count 751 * 752 * @crtc: crtc to get the frame count from 753 * 754 * Gets the frame count on the requested crtc (all asics). 755 * Returns frame count on success, -EINVAL on failure. 756 */ 757 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc) 758 { 759 struct drm_device *dev = crtc->dev; 760 unsigned int pipe = crtc->index; 761 int vpos, hpos, stat; 762 u32 count; 763 struct radeon_device *rdev = dev->dev_private; 764 765 if (pipe >= rdev->num_crtc) { 766 DRM_ERROR("Invalid crtc %u\n", pipe); 767 return -EINVAL; 768 } 769 770 /* The hw increments its frame counter at start of vsync, not at start 771 * of vblank, as is required by DRM core vblank counter handling. 772 * Cook the hw count here to make it appear to the caller as if it 773 * incremented at start of vblank. We measure distance to start of 774 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 775 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 776 * result by 1 to give the proper appearance to caller. 777 */ 778 if (rdev->mode_info.crtcs[pipe]) { 779 /* Repeat readout if needed to provide stable result if 780 * we cross start of vsync during the queries. 781 */ 782 do { 783 count = radeon_get_vblank_counter(rdev, pipe); 784 /* Ask radeon_get_crtc_scanoutpos to return vpos as 785 * distance to start of vblank, instead of regular 786 * vertical scanout pos. 787 */ 788 stat = radeon_get_crtc_scanoutpos( 789 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 790 &vpos, &hpos, NULL, NULL, 791 &rdev->mode_info.crtcs[pipe]->base.hwmode); 792 } while (count != radeon_get_vblank_counter(rdev, pipe)); 793 794 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 795 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 796 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 797 } 798 else { 799 DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n", 800 pipe, vpos); 801 802 /* Bump counter if we are at >= leading edge of vblank, 803 * but before vsync where vpos would turn negative and 804 * the hw counter really increments. 805 */ 806 if (vpos >= 0) 807 count++; 808 } 809 } 810 else { 811 /* Fallback to use value as is. */ 812 count = radeon_get_vblank_counter(rdev, pipe); 813 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 814 } 815 816 return count; 817 } 818 819 /** 820 * radeon_enable_vblank_kms - enable vblank interrupt 821 * 822 * @crtc: crtc to enable vblank interrupt for 823 * 824 * Enable the interrupt on the requested crtc (all asics). 825 * Returns 0 on success, -EINVAL on failure. 826 */ 827 int radeon_enable_vblank_kms(struct drm_crtc *crtc) 828 { 829 struct drm_device *dev = crtc->dev; 830 unsigned int pipe = crtc->index; 831 struct radeon_device *rdev = dev->dev_private; 832 unsigned long irqflags; 833 int r; 834 835 if (pipe >= rdev->num_crtc) { 836 DRM_ERROR("Invalid crtc %d\n", pipe); 837 return -EINVAL; 838 } 839 840 spin_lock_irqsave(&rdev->irq.lock, irqflags); 841 rdev->irq.crtc_vblank_int[pipe] = true; 842 r = radeon_irq_set(rdev); 843 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 844 return r; 845 } 846 847 /** 848 * radeon_disable_vblank_kms - disable vblank interrupt 849 * 850 * @crtc: crtc to disable vblank interrupt for 851 * 852 * Disable the interrupt on the requested crtc (all asics). 853 */ 854 void radeon_disable_vblank_kms(struct drm_crtc *crtc) 855 { 856 struct drm_device *dev = crtc->dev; 857 unsigned int pipe = crtc->index; 858 struct radeon_device *rdev = dev->dev_private; 859 unsigned long irqflags; 860 861 if (pipe >= rdev->num_crtc) { 862 DRM_ERROR("Invalid crtc %d\n", pipe); 863 return; 864 } 865 866 spin_lock_irqsave(&rdev->irq.lock, irqflags); 867 rdev->irq.crtc_vblank_int[pipe] = false; 868 radeon_irq_set(rdev); 869 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 870 } 871