1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include "radeon.h" 30 #include <drm/radeon_drm.h> 31 #include "radeon_asic.h" 32 33 #include <linux/vga_switcheroo.h> 34 #include <linux/slab.h> 35 #include <linux/pm_runtime.h> 36 37 #include "radeon_kfd.h" 38 39 #if defined(CONFIG_VGA_SWITCHEROO) 40 bool radeon_has_atpx(void); 41 #else 42 static inline bool radeon_has_atpx(void) { return false; } 43 #endif 44 45 /** 46 * radeon_driver_unload_kms - Main unload function for KMS. 47 * 48 * @dev: drm dev pointer 49 * 50 * This is the main unload function for KMS (all asics). 51 * It calls radeon_modeset_fini() to tear down the 52 * displays, and radeon_device_fini() to tear down 53 * the rest of the device (CP, writeback, etc.). 54 * Returns 0 on success. 55 */ 56 void radeon_driver_unload_kms(struct drm_device *dev) 57 { 58 struct radeon_device *rdev = dev->dev_private; 59 60 if (rdev == NULL) 61 return; 62 63 if (rdev->rmmio == NULL) 64 goto done_free; 65 66 if (radeon_is_px(dev)) { 67 pm_runtime_get_sync(dev->dev); 68 pm_runtime_forbid(dev->dev); 69 } 70 71 radeon_kfd_device_fini(rdev); 72 73 radeon_acpi_fini(rdev); 74 75 radeon_modeset_fini(rdev); 76 radeon_device_fini(rdev); 77 78 done_free: 79 kfree(rdev); 80 dev->dev_private = NULL; 81 } 82 83 /** 84 * radeon_driver_load_kms - Main load function for KMS. 85 * 86 * @dev: drm dev pointer 87 * @flags: device flags 88 * 89 * This is the main load function for KMS (all asics). 90 * It calls radeon_device_init() to set up the non-display 91 * parts of the chip (asic init, CP, writeback, etc.), and 92 * radeon_modeset_init() to set up the display parts 93 * (crtcs, encoders, hotplug detect, etc.). 94 * Returns 0 on success, error on failure. 95 */ 96 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 97 { 98 struct radeon_device *rdev; 99 int r, acpi_status; 100 101 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 102 if (rdev == NULL) { 103 return -ENOMEM; 104 } 105 dev->dev_private = (void *)rdev; 106 107 /* update BUS flag */ 108 if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) { 109 flags |= RADEON_IS_AGP; 110 } else if (pci_is_pcie(dev->pdev)) { 111 flags |= RADEON_IS_PCIE; 112 } else { 113 flags |= RADEON_IS_PCI; 114 } 115 116 if ((radeon_runtime_pm != 0) && 117 radeon_has_atpx() && 118 ((flags & RADEON_IS_IGP) == 0)) 119 flags |= RADEON_IS_PX; 120 121 /* radeon_device_init should report only fatal error 122 * like memory allocation failure or iomapping failure, 123 * or memory manager initialization failure, it must 124 * properly initialize the GPU MC controller and permit 125 * VRAM allocation 126 */ 127 r = radeon_device_init(rdev, dev, dev->pdev, flags); 128 if (r) { 129 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 130 goto out; 131 } 132 133 /* Again modeset_init should fail only on fatal error 134 * otherwise it should provide enough functionalities 135 * for shadowfb to run 136 */ 137 r = radeon_modeset_init(rdev); 138 if (r) 139 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 140 141 /* Call ACPI methods: require modeset init 142 * but failure is not fatal 143 */ 144 if (!r) { 145 acpi_status = radeon_acpi_init(rdev); 146 if (acpi_status) 147 dev_dbg(&dev->pdev->dev, 148 "Error during ACPI methods call\n"); 149 } 150 151 radeon_kfd_device_probe(rdev); 152 radeon_kfd_device_init(rdev); 153 154 if (radeon_is_px(dev)) { 155 pm_runtime_use_autosuspend(dev->dev); 156 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 157 pm_runtime_set_active(dev->dev); 158 pm_runtime_allow(dev->dev); 159 pm_runtime_mark_last_busy(dev->dev); 160 pm_runtime_put_autosuspend(dev->dev); 161 } 162 163 out: 164 if (r) 165 radeon_driver_unload_kms(dev); 166 167 168 return r; 169 } 170 171 /** 172 * radeon_set_filp_rights - Set filp right. 173 * 174 * @dev: drm dev pointer 175 * @owner: drm file 176 * @applier: drm file 177 * @value: value 178 * 179 * Sets the filp rights for the device (all asics). 180 */ 181 static void radeon_set_filp_rights(struct drm_device *dev, 182 struct drm_file **owner, 183 struct drm_file *applier, 184 uint32_t *value) 185 { 186 struct radeon_device *rdev = dev->dev_private; 187 188 mutex_lock(&rdev->gem.mutex); 189 if (*value == 1) { 190 /* wants rights */ 191 if (!*owner) 192 *owner = applier; 193 } else if (*value == 0) { 194 /* revokes rights */ 195 if (*owner == applier) 196 *owner = NULL; 197 } 198 *value = *owner == applier ? 1 : 0; 199 mutex_unlock(&rdev->gem.mutex); 200 } 201 202 /* 203 * Userspace get information ioctl 204 */ 205 /** 206 * radeon_info_ioctl - answer a device specific request. 207 * 208 * @rdev: radeon device pointer 209 * @data: request object 210 * @filp: drm filp 211 * 212 * This function is used to pass device specific parameters to the userspace 213 * drivers. Examples include: pci device id, pipeline parms, tiling params, 214 * etc. (all asics). 215 * Returns 0 on success, -EINVAL on failure. 216 */ 217 static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 218 { 219 struct radeon_device *rdev = dev->dev_private; 220 struct drm_radeon_info *info = data; 221 struct radeon_mode_info *minfo = &rdev->mode_info; 222 uint32_t *value, value_tmp, *value_ptr, value_size; 223 uint64_t value64; 224 struct drm_crtc *crtc; 225 int i, found; 226 227 value_ptr = (uint32_t *)((unsigned long)info->value); 228 value = &value_tmp; 229 value_size = sizeof(uint32_t); 230 231 switch (info->request) { 232 case RADEON_INFO_DEVICE_ID: 233 *value = dev->pdev->device; 234 break; 235 case RADEON_INFO_NUM_GB_PIPES: 236 *value = rdev->num_gb_pipes; 237 break; 238 case RADEON_INFO_NUM_Z_PIPES: 239 *value = rdev->num_z_pipes; 240 break; 241 case RADEON_INFO_ACCEL_WORKING: 242 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 243 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 244 *value = false; 245 else 246 *value = rdev->accel_working; 247 break; 248 case RADEON_INFO_CRTC_FROM_ID: 249 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 250 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 251 return -EFAULT; 252 } 253 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 254 crtc = (struct drm_crtc *)minfo->crtcs[i]; 255 if (crtc && crtc->base.id == *value) { 256 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 257 *value = radeon_crtc->crtc_id; 258 found = 1; 259 break; 260 } 261 } 262 if (!found) { 263 DRM_DEBUG_KMS("unknown crtc id %d\n", *value); 264 return -EINVAL; 265 } 266 break; 267 case RADEON_INFO_ACCEL_WORKING2: 268 if (rdev->family == CHIP_HAWAII) { 269 if (rdev->accel_working) { 270 if (rdev->new_fw) 271 *value = 3; 272 else 273 *value = 2; 274 } else { 275 *value = 0; 276 } 277 } else { 278 *value = rdev->accel_working; 279 } 280 break; 281 case RADEON_INFO_TILING_CONFIG: 282 if (rdev->family >= CHIP_BONAIRE) 283 *value = rdev->config.cik.tile_config; 284 else if (rdev->family >= CHIP_TAHITI) 285 *value = rdev->config.si.tile_config; 286 else if (rdev->family >= CHIP_CAYMAN) 287 *value = rdev->config.cayman.tile_config; 288 else if (rdev->family >= CHIP_CEDAR) 289 *value = rdev->config.evergreen.tile_config; 290 else if (rdev->family >= CHIP_RV770) 291 *value = rdev->config.rv770.tile_config; 292 else if (rdev->family >= CHIP_R600) 293 *value = rdev->config.r600.tile_config; 294 else { 295 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 296 return -EINVAL; 297 } 298 break; 299 case RADEON_INFO_WANT_HYPERZ: 300 /* The "value" here is both an input and output parameter. 301 * If the input value is 1, filp requests hyper-z access. 302 * If the input value is 0, filp revokes its hyper-z access. 303 * 304 * When returning, the value is 1 if filp owns hyper-z access, 305 * 0 otherwise. */ 306 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 307 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 308 return -EFAULT; 309 } 310 if (*value >= 2) { 311 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value); 312 return -EINVAL; 313 } 314 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); 315 break; 316 case RADEON_INFO_WANT_CMASK: 317 /* The same logic as Hyper-Z. */ 318 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 319 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 320 return -EFAULT; 321 } 322 if (*value >= 2) { 323 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value); 324 return -EINVAL; 325 } 326 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); 327 break; 328 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 329 /* return clock value in KHz */ 330 if (rdev->asic->get_xclk) 331 *value = radeon_get_xclk(rdev) * 10; 332 else 333 *value = rdev->clock.spll.reference_freq * 10; 334 break; 335 case RADEON_INFO_NUM_BACKENDS: 336 if (rdev->family >= CHIP_BONAIRE) 337 *value = rdev->config.cik.max_backends_per_se * 338 rdev->config.cik.max_shader_engines; 339 else if (rdev->family >= CHIP_TAHITI) 340 *value = rdev->config.si.max_backends_per_se * 341 rdev->config.si.max_shader_engines; 342 else if (rdev->family >= CHIP_CAYMAN) 343 *value = rdev->config.cayman.max_backends_per_se * 344 rdev->config.cayman.max_shader_engines; 345 else if (rdev->family >= CHIP_CEDAR) 346 *value = rdev->config.evergreen.max_backends; 347 else if (rdev->family >= CHIP_RV770) 348 *value = rdev->config.rv770.max_backends; 349 else if (rdev->family >= CHIP_R600) 350 *value = rdev->config.r600.max_backends; 351 else { 352 return -EINVAL; 353 } 354 break; 355 case RADEON_INFO_NUM_TILE_PIPES: 356 if (rdev->family >= CHIP_BONAIRE) 357 *value = rdev->config.cik.max_tile_pipes; 358 else if (rdev->family >= CHIP_TAHITI) 359 *value = rdev->config.si.max_tile_pipes; 360 else if (rdev->family >= CHIP_CAYMAN) 361 *value = rdev->config.cayman.max_tile_pipes; 362 else if (rdev->family >= CHIP_CEDAR) 363 *value = rdev->config.evergreen.max_tile_pipes; 364 else if (rdev->family >= CHIP_RV770) 365 *value = rdev->config.rv770.max_tile_pipes; 366 else if (rdev->family >= CHIP_R600) 367 *value = rdev->config.r600.max_tile_pipes; 368 else { 369 return -EINVAL; 370 } 371 break; 372 case RADEON_INFO_FUSION_GART_WORKING: 373 *value = 1; 374 break; 375 case RADEON_INFO_BACKEND_MAP: 376 if (rdev->family >= CHIP_BONAIRE) 377 *value = rdev->config.cik.backend_map; 378 else if (rdev->family >= CHIP_TAHITI) 379 *value = rdev->config.si.backend_map; 380 else if (rdev->family >= CHIP_CAYMAN) 381 *value = rdev->config.cayman.backend_map; 382 else if (rdev->family >= CHIP_CEDAR) 383 *value = rdev->config.evergreen.backend_map; 384 else if (rdev->family >= CHIP_RV770) 385 *value = rdev->config.rv770.backend_map; 386 else if (rdev->family >= CHIP_R600) 387 *value = rdev->config.r600.backend_map; 388 else { 389 return -EINVAL; 390 } 391 break; 392 case RADEON_INFO_VA_START: 393 /* this is where we report if vm is supported or not */ 394 if (rdev->family < CHIP_CAYMAN) 395 return -EINVAL; 396 *value = RADEON_VA_RESERVED_SIZE; 397 break; 398 case RADEON_INFO_IB_VM_MAX_SIZE: 399 /* this is where we report if vm is supported or not */ 400 if (rdev->family < CHIP_CAYMAN) 401 return -EINVAL; 402 *value = RADEON_IB_VM_MAX_SIZE; 403 break; 404 case RADEON_INFO_MAX_PIPES: 405 if (rdev->family >= CHIP_BONAIRE) 406 *value = rdev->config.cik.max_cu_per_sh; 407 else if (rdev->family >= CHIP_TAHITI) 408 *value = rdev->config.si.max_cu_per_sh; 409 else if (rdev->family >= CHIP_CAYMAN) 410 *value = rdev->config.cayman.max_pipes_per_simd; 411 else if (rdev->family >= CHIP_CEDAR) 412 *value = rdev->config.evergreen.max_pipes; 413 else if (rdev->family >= CHIP_RV770) 414 *value = rdev->config.rv770.max_pipes; 415 else if (rdev->family >= CHIP_R600) 416 *value = rdev->config.r600.max_pipes; 417 else { 418 return -EINVAL; 419 } 420 break; 421 case RADEON_INFO_TIMESTAMP: 422 if (rdev->family < CHIP_R600) { 423 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 424 return -EINVAL; 425 } 426 value = (uint32_t*)&value64; 427 value_size = sizeof(uint64_t); 428 value64 = radeon_get_gpu_clock_counter(rdev); 429 break; 430 case RADEON_INFO_MAX_SE: 431 if (rdev->family >= CHIP_BONAIRE) 432 *value = rdev->config.cik.max_shader_engines; 433 else if (rdev->family >= CHIP_TAHITI) 434 *value = rdev->config.si.max_shader_engines; 435 else if (rdev->family >= CHIP_CAYMAN) 436 *value = rdev->config.cayman.max_shader_engines; 437 else if (rdev->family >= CHIP_CEDAR) 438 *value = rdev->config.evergreen.num_ses; 439 else 440 *value = 1; 441 break; 442 case RADEON_INFO_MAX_SH_PER_SE: 443 if (rdev->family >= CHIP_BONAIRE) 444 *value = rdev->config.cik.max_sh_per_se; 445 else if (rdev->family >= CHIP_TAHITI) 446 *value = rdev->config.si.max_sh_per_se; 447 else 448 return -EINVAL; 449 break; 450 case RADEON_INFO_FASTFB_WORKING: 451 *value = rdev->fastfb_working; 452 break; 453 case RADEON_INFO_RING_WORKING: 454 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 455 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 456 return -EFAULT; 457 } 458 switch (*value) { 459 case RADEON_CS_RING_GFX: 460 case RADEON_CS_RING_COMPUTE: 461 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; 462 break; 463 case RADEON_CS_RING_DMA: 464 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; 465 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; 466 break; 467 case RADEON_CS_RING_UVD: 468 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; 469 break; 470 case RADEON_CS_RING_VCE: 471 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; 472 break; 473 default: 474 return -EINVAL; 475 } 476 break; 477 case RADEON_INFO_SI_TILE_MODE_ARRAY: 478 if (rdev->family >= CHIP_BONAIRE) { 479 value = rdev->config.cik.tile_mode_array; 480 value_size = sizeof(uint32_t)*32; 481 } else if (rdev->family >= CHIP_TAHITI) { 482 value = rdev->config.si.tile_mode_array; 483 value_size = sizeof(uint32_t)*32; 484 } else { 485 DRM_DEBUG_KMS("tile mode array is si+ only!\n"); 486 return -EINVAL; 487 } 488 break; 489 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: 490 if (rdev->family >= CHIP_BONAIRE) { 491 value = rdev->config.cik.macrotile_mode_array; 492 value_size = sizeof(uint32_t)*16; 493 } else { 494 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); 495 return -EINVAL; 496 } 497 break; 498 case RADEON_INFO_SI_CP_DMA_COMPUTE: 499 *value = 1; 500 break; 501 case RADEON_INFO_SI_BACKEND_ENABLED_MASK: 502 if (rdev->family >= CHIP_BONAIRE) { 503 *value = rdev->config.cik.backend_enable_mask; 504 } else if (rdev->family >= CHIP_TAHITI) { 505 *value = rdev->config.si.backend_enable_mask; 506 } else { 507 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); 508 } 509 break; 510 case RADEON_INFO_MAX_SCLK: 511 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 512 rdev->pm.dpm_enabled) 513 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 514 else 515 *value = rdev->pm.default_sclk * 10; 516 break; 517 case RADEON_INFO_VCE_FW_VERSION: 518 *value = rdev->vce.fw_version; 519 break; 520 case RADEON_INFO_VCE_FB_VERSION: 521 *value = rdev->vce.fb_version; 522 break; 523 case RADEON_INFO_NUM_BYTES_MOVED: 524 value = (uint32_t*)&value64; 525 value_size = sizeof(uint64_t); 526 value64 = atomic64_read(&rdev->num_bytes_moved); 527 break; 528 case RADEON_INFO_VRAM_USAGE: 529 value = (uint32_t*)&value64; 530 value_size = sizeof(uint64_t); 531 value64 = atomic64_read(&rdev->vram_usage); 532 break; 533 case RADEON_INFO_GTT_USAGE: 534 value = (uint32_t*)&value64; 535 value_size = sizeof(uint64_t); 536 value64 = atomic64_read(&rdev->gtt_usage); 537 break; 538 case RADEON_INFO_ACTIVE_CU_COUNT: 539 if (rdev->family >= CHIP_BONAIRE) 540 *value = rdev->config.cik.active_cus; 541 else if (rdev->family >= CHIP_TAHITI) 542 *value = rdev->config.si.active_cus; 543 else if (rdev->family >= CHIP_CAYMAN) 544 *value = rdev->config.cayman.active_simds; 545 else if (rdev->family >= CHIP_CEDAR) 546 *value = rdev->config.evergreen.active_simds; 547 else if (rdev->family >= CHIP_RV770) 548 *value = rdev->config.rv770.active_simds; 549 else if (rdev->family >= CHIP_R600) 550 *value = rdev->config.r600.active_simds; 551 else 552 *value = 1; 553 break; 554 case RADEON_INFO_CURRENT_GPU_TEMP: 555 /* get temperature in millidegrees C */ 556 if (rdev->asic->pm.get_temperature) 557 *value = radeon_get_temperature(rdev); 558 else 559 *value = 0; 560 break; 561 case RADEON_INFO_CURRENT_GPU_SCLK: 562 /* get sclk in Mhz */ 563 if (rdev->pm.dpm_enabled) 564 *value = radeon_dpm_get_current_sclk(rdev) / 100; 565 else 566 *value = rdev->pm.current_sclk / 100; 567 break; 568 case RADEON_INFO_CURRENT_GPU_MCLK: 569 /* get mclk in Mhz */ 570 if (rdev->pm.dpm_enabled) 571 *value = radeon_dpm_get_current_mclk(rdev) / 100; 572 else 573 *value = rdev->pm.current_mclk / 100; 574 break; 575 case RADEON_INFO_READ_REG: 576 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 577 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 578 return -EFAULT; 579 } 580 if (radeon_get_allowed_info_register(rdev, *value, value)) 581 return -EINVAL; 582 break; 583 case RADEON_INFO_VA_UNMAP_WORKING: 584 *value = true; 585 break; 586 case RADEON_INFO_GPU_RESET_COUNTER: 587 *value = atomic_read(&rdev->gpu_reset_counter); 588 break; 589 default: 590 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 591 return -EINVAL; 592 } 593 if (copy_to_user(value_ptr, (char*)value, value_size)) { 594 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 595 return -EFAULT; 596 } 597 return 0; 598 } 599 600 601 /* 602 * Outdated mess for old drm with Xorg being in charge (void function now). 603 */ 604 /** 605 * radeon_driver_lastclose_kms - drm callback for last close 606 * 607 * @dev: drm dev pointer 608 * 609 * Switch vga_switcheroo state after last close (all asics). 610 */ 611 void radeon_driver_lastclose_kms(struct drm_device *dev) 612 { 613 struct radeon_device *rdev = dev->dev_private; 614 615 radeon_fbdev_restore_mode(rdev); 616 vga_switcheroo_process_delayed_switch(); 617 } 618 619 /** 620 * radeon_driver_open_kms - drm callback for open 621 * 622 * @dev: drm dev pointer 623 * @file_priv: drm file 624 * 625 * On device open, init vm on cayman+ (all asics). 626 * Returns 0 on success, error on failure. 627 */ 628 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 629 { 630 struct radeon_device *rdev = dev->dev_private; 631 int r; 632 633 file_priv->driver_priv = NULL; 634 635 r = pm_runtime_get_sync(dev->dev); 636 if (r < 0) 637 return r; 638 639 /* new gpu have virtual address space support */ 640 if (rdev->family >= CHIP_CAYMAN) { 641 struct radeon_fpriv *fpriv; 642 struct radeon_vm *vm; 643 644 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 645 if (unlikely(!fpriv)) { 646 r = -ENOMEM; 647 goto out_suspend; 648 } 649 650 if (rdev->accel_working) { 651 vm = &fpriv->vm; 652 r = radeon_vm_init(rdev, vm); 653 if (r) { 654 kfree(fpriv); 655 goto out_suspend; 656 } 657 658 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 659 if (r) { 660 radeon_vm_fini(rdev, vm); 661 kfree(fpriv); 662 goto out_suspend; 663 } 664 665 /* map the ib pool buffer read only into 666 * virtual address space */ 667 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, 668 rdev->ring_tmp_bo.bo); 669 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, 670 RADEON_VA_IB_OFFSET, 671 RADEON_VM_PAGE_READABLE | 672 RADEON_VM_PAGE_SNOOPED); 673 if (r) { 674 radeon_vm_fini(rdev, vm); 675 kfree(fpriv); 676 goto out_suspend; 677 } 678 } 679 file_priv->driver_priv = fpriv; 680 } 681 682 out_suspend: 683 pm_runtime_mark_last_busy(dev->dev); 684 pm_runtime_put_autosuspend(dev->dev); 685 return r; 686 } 687 688 /** 689 * radeon_driver_postclose_kms - drm callback for post close 690 * 691 * @dev: drm dev pointer 692 * @file_priv: drm file 693 * 694 * On device post close, tear down vm on cayman+ (all asics). 695 */ 696 void radeon_driver_postclose_kms(struct drm_device *dev, 697 struct drm_file *file_priv) 698 { 699 struct radeon_device *rdev = dev->dev_private; 700 701 /* new gpu have virtual address space support */ 702 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 703 struct radeon_fpriv *fpriv = file_priv->driver_priv; 704 struct radeon_vm *vm = &fpriv->vm; 705 int r; 706 707 if (rdev->accel_working) { 708 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 709 if (!r) { 710 if (vm->ib_bo_va) 711 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 712 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 713 } 714 radeon_vm_fini(rdev, vm); 715 } 716 717 kfree(fpriv); 718 file_priv->driver_priv = NULL; 719 } 720 pm_runtime_mark_last_busy(dev->dev); 721 pm_runtime_put_autosuspend(dev->dev); 722 } 723 724 /** 725 * radeon_driver_preclose_kms - drm callback for pre close 726 * 727 * @dev: drm dev pointer 728 * @file_priv: drm file 729 * 730 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx 731 * (all asics). 732 */ 733 void radeon_driver_preclose_kms(struct drm_device *dev, 734 struct drm_file *file_priv) 735 { 736 struct radeon_device *rdev = dev->dev_private; 737 738 pm_runtime_get_sync(dev->dev); 739 740 mutex_lock(&rdev->gem.mutex); 741 if (rdev->hyperz_filp == file_priv) 742 rdev->hyperz_filp = NULL; 743 if (rdev->cmask_filp == file_priv) 744 rdev->cmask_filp = NULL; 745 mutex_unlock(&rdev->gem.mutex); 746 747 radeon_uvd_free_handles(rdev, file_priv); 748 radeon_vce_free_handles(rdev, file_priv); 749 } 750 751 /* 752 * VBlank related functions. 753 */ 754 /** 755 * radeon_get_vblank_counter_kms - get frame count 756 * 757 * @dev: drm dev pointer 758 * @pipe: crtc to get the frame count from 759 * 760 * Gets the frame count on the requested crtc (all asics). 761 * Returns frame count on success, -EINVAL on failure. 762 */ 763 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 764 { 765 int vpos, hpos, stat; 766 u32 count; 767 struct radeon_device *rdev = dev->dev_private; 768 769 if (pipe >= rdev->num_crtc) { 770 DRM_ERROR("Invalid crtc %u\n", pipe); 771 return -EINVAL; 772 } 773 774 /* The hw increments its frame counter at start of vsync, not at start 775 * of vblank, as is required by DRM core vblank counter handling. 776 * Cook the hw count here to make it appear to the caller as if it 777 * incremented at start of vblank. We measure distance to start of 778 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 779 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 780 * result by 1 to give the proper appearance to caller. 781 */ 782 if (rdev->mode_info.crtcs[pipe]) { 783 /* Repeat readout if needed to provide stable result if 784 * we cross start of vsync during the queries. 785 */ 786 do { 787 count = radeon_get_vblank_counter(rdev, pipe); 788 /* Ask radeon_get_crtc_scanoutpos to return vpos as 789 * distance to start of vblank, instead of regular 790 * vertical scanout pos. 791 */ 792 stat = radeon_get_crtc_scanoutpos( 793 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 794 &vpos, &hpos, NULL, NULL, 795 &rdev->mode_info.crtcs[pipe]->base.hwmode); 796 } while (count != radeon_get_vblank_counter(rdev, pipe)); 797 798 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 799 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 800 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 801 } 802 else { 803 DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n", 804 pipe, vpos); 805 806 /* Bump counter if we are at >= leading edge of vblank, 807 * but before vsync where vpos would turn negative and 808 * the hw counter really increments. 809 */ 810 if (vpos >= 0) 811 count++; 812 } 813 } 814 else { 815 /* Fallback to use value as is. */ 816 count = radeon_get_vblank_counter(rdev, pipe); 817 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 818 } 819 820 return count; 821 } 822 823 /** 824 * radeon_enable_vblank_kms - enable vblank interrupt 825 * 826 * @dev: drm dev pointer 827 * @crtc: crtc to enable vblank interrupt for 828 * 829 * Enable the interrupt on the requested crtc (all asics). 830 * Returns 0 on success, -EINVAL on failure. 831 */ 832 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 833 { 834 struct radeon_device *rdev = dev->dev_private; 835 unsigned long irqflags; 836 int r; 837 838 if (crtc < 0 || crtc >= rdev->num_crtc) { 839 DRM_ERROR("Invalid crtc %d\n", crtc); 840 return -EINVAL; 841 } 842 843 spin_lock_irqsave(&rdev->irq.lock, irqflags); 844 rdev->irq.crtc_vblank_int[crtc] = true; 845 r = radeon_irq_set(rdev); 846 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 847 return r; 848 } 849 850 /** 851 * radeon_disable_vblank_kms - disable vblank interrupt 852 * 853 * @dev: drm dev pointer 854 * @crtc: crtc to disable vblank interrupt for 855 * 856 * Disable the interrupt on the requested crtc (all asics). 857 */ 858 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 859 { 860 struct radeon_device *rdev = dev->dev_private; 861 unsigned long irqflags; 862 863 if (crtc < 0 || crtc >= rdev->num_crtc) { 864 DRM_ERROR("Invalid crtc %d\n", crtc); 865 return; 866 } 867 868 spin_lock_irqsave(&rdev->irq.lock, irqflags); 869 rdev->irq.crtc_vblank_int[crtc] = false; 870 radeon_irq_set(rdev); 871 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 872 } 873 874 /** 875 * radeon_get_vblank_timestamp_kms - get vblank timestamp 876 * 877 * @dev: drm dev pointer 878 * @crtc: crtc to get the timestamp for 879 * @max_error: max error 880 * @vblank_time: time value 881 * @flags: flags passed to the driver 882 * 883 * Gets the timestamp on the requested crtc based on the 884 * scanout position. (all asics). 885 * Returns postive status flags on success, negative error on failure. 886 */ 887 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 888 int *max_error, 889 struct timeval *vblank_time, 890 unsigned flags) 891 { 892 struct drm_crtc *drmcrtc; 893 struct radeon_device *rdev = dev->dev_private; 894 895 if (crtc < 0 || crtc >= dev->num_crtcs) { 896 DRM_ERROR("Invalid crtc %d\n", crtc); 897 return -EINVAL; 898 } 899 900 /* Get associated drm_crtc: */ 901 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 902 if (!drmcrtc) 903 return -EINVAL; 904 905 /* Helper routine in DRM core does all the work: */ 906 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 907 vblank_time, flags, 908 &drmcrtc->hwmode); 909 } 910 911 const struct drm_ioctl_desc radeon_ioctls_kms[] = { 912 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 913 DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 914 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 915 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 916 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), 917 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), 918 DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), 919 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), 920 DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), 921 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), 922 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), 923 DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), 924 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), 925 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), 926 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 927 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), 928 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), 929 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), 930 DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), 931 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), 932 DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), 933 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 934 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), 935 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), 936 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), 937 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), 938 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), 939 /* KMS */ 940 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 941 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 942 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 943 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 944 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), 945 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), 946 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 947 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 948 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 949 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 950 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 951 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 952 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 953 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 954 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 955 }; 956 int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 957