1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include "amdgpu.h" 30 #include <drm/drm_debugfs.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu_sched.h" 33 #include "amdgpu_uvd.h" 34 #include "amdgpu_vce.h" 35 #include "atom.h" 36 37 #include <linux/vga_switcheroo.h> 38 #include <linux/slab.h> 39 #include <linux/uaccess.h> 40 #include <linux/pci.h> 41 #include <linux/pm_runtime.h> 42 #include "amdgpu_amdkfd.h" 43 #include "amdgpu_gem.h" 44 #include "amdgpu_display.h" 45 #include "amdgpu_ras.h" 46 47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 48 { 49 struct amdgpu_gpu_instance *gpu_instance; 50 int i; 51 52 mutex_lock(&mgpu_info.mutex); 53 54 for (i = 0; i < mgpu_info.num_gpu; i++) { 55 gpu_instance = &(mgpu_info.gpu_ins[i]); 56 if (gpu_instance->adev == adev) { 57 mgpu_info.gpu_ins[i] = 58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 59 mgpu_info.num_gpu--; 60 if (adev->flags & AMD_IS_APU) 61 mgpu_info.num_apu--; 62 else 63 mgpu_info.num_dgpu--; 64 break; 65 } 66 } 67 68 mutex_unlock(&mgpu_info.mutex); 69 } 70 71 /** 72 * amdgpu_driver_unload_kms - Main unload function for KMS. 73 * 74 * @dev: drm dev pointer 75 * 76 * This is the main unload function for KMS (all asics). 77 * Returns 0 on success. 78 */ 79 void amdgpu_driver_unload_kms(struct drm_device *dev) 80 { 81 struct amdgpu_device *adev = drm_to_adev(dev); 82 83 if (adev == NULL) 84 return; 85 86 amdgpu_unregister_gpu_instance(adev); 87 88 if (adev->rmmio == NULL) 89 return; 90 91 if (adev->runpm) { 92 pm_runtime_get_sync(dev->dev); 93 pm_runtime_forbid(dev->dev); 94 } 95 96 amdgpu_acpi_fini(adev); 97 amdgpu_device_fini(adev); 98 } 99 100 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 101 { 102 struct amdgpu_gpu_instance *gpu_instance; 103 104 mutex_lock(&mgpu_info.mutex); 105 106 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 107 DRM_ERROR("Cannot register more gpu instance\n"); 108 mutex_unlock(&mgpu_info.mutex); 109 return; 110 } 111 112 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 113 gpu_instance->adev = adev; 114 gpu_instance->mgpu_fan_enabled = 0; 115 116 mgpu_info.num_gpu++; 117 if (adev->flags & AMD_IS_APU) 118 mgpu_info.num_apu++; 119 else 120 mgpu_info.num_dgpu++; 121 122 mutex_unlock(&mgpu_info.mutex); 123 } 124 125 /** 126 * amdgpu_driver_load_kms - Main load function for KMS. 127 * 128 * @adev: pointer to struct amdgpu_device 129 * @flags: device flags 130 * 131 * This is the main load function for KMS (all asics). 132 * Returns 0 on success, error on failure. 133 */ 134 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) 135 { 136 struct drm_device *dev; 137 int r, acpi_status; 138 139 dev = adev_to_drm(adev); 140 141 if (amdgpu_has_atpx() && 142 (amdgpu_is_atpx_hybrid() || 143 amdgpu_has_atpx_dgpu_power_cntl()) && 144 ((flags & AMD_IS_APU) == 0) && 145 !pci_is_thunderbolt_attached(dev->pdev)) 146 flags |= AMD_IS_PX; 147 148 /* amdgpu_device_init should report only fatal error 149 * like memory allocation failure or iomapping failure, 150 * or memory manager initialization failure, it must 151 * properly initialize the GPU MC controller and permit 152 * VRAM allocation 153 */ 154 r = amdgpu_device_init(adev, flags); 155 if (r) { 156 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 157 goto out; 158 } 159 160 if (amdgpu_device_supports_boco(dev) && 161 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ 162 adev->runpm = true; 163 } else if (amdgpu_device_supports_baco(dev) && 164 (amdgpu_runtime_pm != 0)) { 165 switch (adev->asic_type) { 166 #ifdef CONFIG_DRM_AMDGPU_CIK 167 case CHIP_BONAIRE: 168 case CHIP_HAWAII: 169 #endif 170 case CHIP_VEGA20: 171 case CHIP_ARCTURUS: 172 case CHIP_SIENNA_CICHLID: 173 case CHIP_NAVY_FLOUNDER: 174 /* enable runpm if runpm=1 */ 175 if (amdgpu_runtime_pm > 0) 176 adev->runpm = true; 177 break; 178 case CHIP_VEGA10: 179 /* turn runpm on if noretry=0 */ 180 if (!adev->gmc.noretry) 181 adev->runpm = true; 182 break; 183 default: 184 /* enable runpm on VI+ */ 185 adev->runpm = true; 186 break; 187 } 188 } 189 190 /* Call ACPI methods: require modeset init 191 * but failure is not fatal 192 */ 193 194 acpi_status = amdgpu_acpi_init(adev); 195 if (acpi_status) 196 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); 197 198 if (adev->runpm) { 199 /* only need to skip on ATPX */ 200 if (amdgpu_device_supports_boco(dev) && 201 !amdgpu_is_atpx_hybrid()) 202 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 203 pm_runtime_use_autosuspend(dev->dev); 204 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 205 pm_runtime_allow(dev->dev); 206 pm_runtime_mark_last_busy(dev->dev); 207 pm_runtime_put_autosuspend(dev->dev); 208 } 209 210 out: 211 if (r) { 212 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 213 if (adev->rmmio && adev->runpm) 214 pm_runtime_put_noidle(dev->dev); 215 amdgpu_driver_unload_kms(dev); 216 } 217 218 return r; 219 } 220 221 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 222 struct drm_amdgpu_query_fw *query_fw, 223 struct amdgpu_device *adev) 224 { 225 switch (query_fw->fw_type) { 226 case AMDGPU_INFO_FW_VCE: 227 fw_info->ver = adev->vce.fw_version; 228 fw_info->feature = adev->vce.fb_version; 229 break; 230 case AMDGPU_INFO_FW_UVD: 231 fw_info->ver = adev->uvd.fw_version; 232 fw_info->feature = 0; 233 break; 234 case AMDGPU_INFO_FW_VCN: 235 fw_info->ver = adev->vcn.fw_version; 236 fw_info->feature = 0; 237 break; 238 case AMDGPU_INFO_FW_GMC: 239 fw_info->ver = adev->gmc.fw_version; 240 fw_info->feature = 0; 241 break; 242 case AMDGPU_INFO_FW_GFX_ME: 243 fw_info->ver = adev->gfx.me_fw_version; 244 fw_info->feature = adev->gfx.me_feature_version; 245 break; 246 case AMDGPU_INFO_FW_GFX_PFP: 247 fw_info->ver = adev->gfx.pfp_fw_version; 248 fw_info->feature = adev->gfx.pfp_feature_version; 249 break; 250 case AMDGPU_INFO_FW_GFX_CE: 251 fw_info->ver = adev->gfx.ce_fw_version; 252 fw_info->feature = adev->gfx.ce_feature_version; 253 break; 254 case AMDGPU_INFO_FW_GFX_RLC: 255 fw_info->ver = adev->gfx.rlc_fw_version; 256 fw_info->feature = adev->gfx.rlc_feature_version; 257 break; 258 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 259 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 260 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 261 break; 262 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 263 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 264 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 265 break; 266 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 267 fw_info->ver = adev->gfx.rlc_srls_fw_version; 268 fw_info->feature = adev->gfx.rlc_srls_feature_version; 269 break; 270 case AMDGPU_INFO_FW_GFX_MEC: 271 if (query_fw->index == 0) { 272 fw_info->ver = adev->gfx.mec_fw_version; 273 fw_info->feature = adev->gfx.mec_feature_version; 274 } else if (query_fw->index == 1) { 275 fw_info->ver = adev->gfx.mec2_fw_version; 276 fw_info->feature = adev->gfx.mec2_feature_version; 277 } else 278 return -EINVAL; 279 break; 280 case AMDGPU_INFO_FW_SMC: 281 fw_info->ver = adev->pm.fw_version; 282 fw_info->feature = 0; 283 break; 284 case AMDGPU_INFO_FW_TA: 285 switch (query_fw->index) { 286 case 0: 287 fw_info->ver = adev->psp.ta_fw_version; 288 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 289 break; 290 case 1: 291 fw_info->ver = adev->psp.ta_fw_version; 292 fw_info->feature = adev->psp.ta_ras_ucode_version; 293 break; 294 case 2: 295 fw_info->ver = adev->psp.ta_fw_version; 296 fw_info->feature = adev->psp.ta_hdcp_ucode_version; 297 break; 298 case 3: 299 fw_info->ver = adev->psp.ta_fw_version; 300 fw_info->feature = adev->psp.ta_dtm_ucode_version; 301 break; 302 default: 303 return -EINVAL; 304 } 305 break; 306 case AMDGPU_INFO_FW_SDMA: 307 if (query_fw->index >= adev->sdma.num_instances) 308 return -EINVAL; 309 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 310 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 311 break; 312 case AMDGPU_INFO_FW_SOS: 313 fw_info->ver = adev->psp.sos_fw_version; 314 fw_info->feature = adev->psp.sos_feature_version; 315 break; 316 case AMDGPU_INFO_FW_ASD: 317 fw_info->ver = adev->psp.asd_fw_version; 318 fw_info->feature = adev->psp.asd_feature_version; 319 break; 320 case AMDGPU_INFO_FW_DMCU: 321 fw_info->ver = adev->dm.dmcu_fw_version; 322 fw_info->feature = 0; 323 break; 324 case AMDGPU_INFO_FW_DMCUB: 325 fw_info->ver = adev->dm.dmcub_fw_version; 326 fw_info->feature = 0; 327 break; 328 default: 329 return -EINVAL; 330 } 331 return 0; 332 } 333 334 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 335 struct drm_amdgpu_info *info, 336 struct drm_amdgpu_info_hw_ip *result) 337 { 338 uint32_t ib_start_alignment = 0; 339 uint32_t ib_size_alignment = 0; 340 enum amd_ip_block_type type; 341 unsigned int num_rings = 0; 342 unsigned int i, j; 343 344 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 345 return -EINVAL; 346 347 switch (info->query_hw_ip.type) { 348 case AMDGPU_HW_IP_GFX: 349 type = AMD_IP_BLOCK_TYPE_GFX; 350 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 351 if (adev->gfx.gfx_ring[i].sched.ready) 352 ++num_rings; 353 ib_start_alignment = 32; 354 ib_size_alignment = 32; 355 break; 356 case AMDGPU_HW_IP_COMPUTE: 357 type = AMD_IP_BLOCK_TYPE_GFX; 358 for (i = 0; i < adev->gfx.num_compute_rings; i++) 359 if (adev->gfx.compute_ring[i].sched.ready) 360 ++num_rings; 361 ib_start_alignment = 32; 362 ib_size_alignment = 32; 363 break; 364 case AMDGPU_HW_IP_DMA: 365 type = AMD_IP_BLOCK_TYPE_SDMA; 366 for (i = 0; i < adev->sdma.num_instances; i++) 367 if (adev->sdma.instance[i].ring.sched.ready) 368 ++num_rings; 369 ib_start_alignment = 256; 370 ib_size_alignment = 4; 371 break; 372 case AMDGPU_HW_IP_UVD: 373 type = AMD_IP_BLOCK_TYPE_UVD; 374 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 375 if (adev->uvd.harvest_config & (1 << i)) 376 continue; 377 378 if (adev->uvd.inst[i].ring.sched.ready) 379 ++num_rings; 380 } 381 ib_start_alignment = 64; 382 ib_size_alignment = 64; 383 break; 384 case AMDGPU_HW_IP_VCE: 385 type = AMD_IP_BLOCK_TYPE_VCE; 386 for (i = 0; i < adev->vce.num_rings; i++) 387 if (adev->vce.ring[i].sched.ready) 388 ++num_rings; 389 ib_start_alignment = 4; 390 ib_size_alignment = 1; 391 break; 392 case AMDGPU_HW_IP_UVD_ENC: 393 type = AMD_IP_BLOCK_TYPE_UVD; 394 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 395 if (adev->uvd.harvest_config & (1 << i)) 396 continue; 397 398 for (j = 0; j < adev->uvd.num_enc_rings; j++) 399 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 400 ++num_rings; 401 } 402 ib_start_alignment = 64; 403 ib_size_alignment = 64; 404 break; 405 case AMDGPU_HW_IP_VCN_DEC: 406 type = AMD_IP_BLOCK_TYPE_VCN; 407 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 408 if (adev->uvd.harvest_config & (1 << i)) 409 continue; 410 411 if (adev->vcn.inst[i].ring_dec.sched.ready) 412 ++num_rings; 413 } 414 ib_start_alignment = 16; 415 ib_size_alignment = 16; 416 break; 417 case AMDGPU_HW_IP_VCN_ENC: 418 type = AMD_IP_BLOCK_TYPE_VCN; 419 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 420 if (adev->uvd.harvest_config & (1 << i)) 421 continue; 422 423 for (j = 0; j < adev->vcn.num_enc_rings; j++) 424 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 425 ++num_rings; 426 } 427 ib_start_alignment = 64; 428 ib_size_alignment = 1; 429 break; 430 case AMDGPU_HW_IP_VCN_JPEG: 431 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 432 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 433 434 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 435 if (adev->jpeg.harvest_config & (1 << i)) 436 continue; 437 438 if (adev->jpeg.inst[i].ring_dec.sched.ready) 439 ++num_rings; 440 } 441 ib_start_alignment = 16; 442 ib_size_alignment = 16; 443 break; 444 default: 445 return -EINVAL; 446 } 447 448 for (i = 0; i < adev->num_ip_blocks; i++) 449 if (adev->ip_blocks[i].version->type == type && 450 adev->ip_blocks[i].status.valid) 451 break; 452 453 if (i == adev->num_ip_blocks) 454 return 0; 455 456 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 457 num_rings); 458 459 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 460 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 461 result->capabilities_flags = 0; 462 result->available_rings = (1 << num_rings) - 1; 463 result->ib_start_alignment = ib_start_alignment; 464 result->ib_size_alignment = ib_size_alignment; 465 return 0; 466 } 467 468 /* 469 * Userspace get information ioctl 470 */ 471 /** 472 * amdgpu_info_ioctl - answer a device specific request. 473 * 474 * @adev: amdgpu device pointer 475 * @data: request object 476 * @filp: drm filp 477 * 478 * This function is used to pass device specific parameters to the userspace 479 * drivers. Examples include: pci device id, pipeline parms, tiling params, 480 * etc. (all asics). 481 * Returns 0 on success, -EINVAL on failure. 482 */ 483 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 484 { 485 struct amdgpu_device *adev = drm_to_adev(dev); 486 struct drm_amdgpu_info *info = data; 487 struct amdgpu_mode_info *minfo = &adev->mode_info; 488 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 489 uint32_t size = info->return_size; 490 struct drm_crtc *crtc; 491 uint32_t ui32 = 0; 492 uint64_t ui64 = 0; 493 int i, found; 494 int ui32_size = sizeof(ui32); 495 496 if (!info->return_size || !info->return_pointer) 497 return -EINVAL; 498 499 switch (info->query) { 500 case AMDGPU_INFO_ACCEL_WORKING: 501 ui32 = adev->accel_working; 502 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 503 case AMDGPU_INFO_CRTC_FROM_ID: 504 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 505 crtc = (struct drm_crtc *)minfo->crtcs[i]; 506 if (crtc && crtc->base.id == info->mode_crtc.id) { 507 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 508 ui32 = amdgpu_crtc->crtc_id; 509 found = 1; 510 break; 511 } 512 } 513 if (!found) { 514 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 515 return -EINVAL; 516 } 517 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 518 case AMDGPU_INFO_HW_IP_INFO: { 519 struct drm_amdgpu_info_hw_ip ip = {}; 520 int ret; 521 522 ret = amdgpu_hw_ip_info(adev, info, &ip); 523 if (ret) 524 return ret; 525 526 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 527 return ret ? -EFAULT : 0; 528 } 529 case AMDGPU_INFO_HW_IP_COUNT: { 530 enum amd_ip_block_type type; 531 uint32_t count = 0; 532 533 switch (info->query_hw_ip.type) { 534 case AMDGPU_HW_IP_GFX: 535 type = AMD_IP_BLOCK_TYPE_GFX; 536 break; 537 case AMDGPU_HW_IP_COMPUTE: 538 type = AMD_IP_BLOCK_TYPE_GFX; 539 break; 540 case AMDGPU_HW_IP_DMA: 541 type = AMD_IP_BLOCK_TYPE_SDMA; 542 break; 543 case AMDGPU_HW_IP_UVD: 544 type = AMD_IP_BLOCK_TYPE_UVD; 545 break; 546 case AMDGPU_HW_IP_VCE: 547 type = AMD_IP_BLOCK_TYPE_VCE; 548 break; 549 case AMDGPU_HW_IP_UVD_ENC: 550 type = AMD_IP_BLOCK_TYPE_UVD; 551 break; 552 case AMDGPU_HW_IP_VCN_DEC: 553 case AMDGPU_HW_IP_VCN_ENC: 554 type = AMD_IP_BLOCK_TYPE_VCN; 555 break; 556 case AMDGPU_HW_IP_VCN_JPEG: 557 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 558 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 559 break; 560 default: 561 return -EINVAL; 562 } 563 564 for (i = 0; i < adev->num_ip_blocks; i++) 565 if (adev->ip_blocks[i].version->type == type && 566 adev->ip_blocks[i].status.valid && 567 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 568 count++; 569 570 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 571 } 572 case AMDGPU_INFO_TIMESTAMP: 573 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 574 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 575 case AMDGPU_INFO_FW_VERSION: { 576 struct drm_amdgpu_info_firmware fw_info; 577 int ret; 578 579 /* We only support one instance of each IP block right now. */ 580 if (info->query_fw.ip_instance != 0) 581 return -EINVAL; 582 583 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 584 if (ret) 585 return ret; 586 587 return copy_to_user(out, &fw_info, 588 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 589 } 590 case AMDGPU_INFO_NUM_BYTES_MOVED: 591 ui64 = atomic64_read(&adev->num_bytes_moved); 592 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 593 case AMDGPU_INFO_NUM_EVICTIONS: 594 ui64 = atomic64_read(&adev->num_evictions); 595 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 596 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 597 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 598 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 599 case AMDGPU_INFO_VRAM_USAGE: 600 ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 601 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 602 case AMDGPU_INFO_VIS_VRAM_USAGE: 603 ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); 604 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 605 case AMDGPU_INFO_GTT_USAGE: 606 ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); 607 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 608 case AMDGPU_INFO_GDS_CONFIG: { 609 struct drm_amdgpu_info_gds gds_info; 610 611 memset(&gds_info, 0, sizeof(gds_info)); 612 gds_info.compute_partition_size = adev->gds.gds_size; 613 gds_info.gds_total_size = adev->gds.gds_size; 614 gds_info.gws_per_compute_partition = adev->gds.gws_size; 615 gds_info.oa_per_compute_partition = adev->gds.oa_size; 616 return copy_to_user(out, &gds_info, 617 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 618 } 619 case AMDGPU_INFO_VRAM_GTT: { 620 struct drm_amdgpu_info_vram_gtt vram_gtt; 621 622 vram_gtt.vram_size = adev->gmc.real_vram_size - 623 atomic64_read(&adev->vram_pin_size) - 624 AMDGPU_VM_RESERVED_VRAM; 625 vram_gtt.vram_cpu_accessible_size = 626 min(adev->gmc.visible_vram_size - 627 atomic64_read(&adev->visible_pin_size), 628 vram_gtt.vram_size); 629 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; 630 vram_gtt.gtt_size *= PAGE_SIZE; 631 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 632 return copy_to_user(out, &vram_gtt, 633 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 634 } 635 case AMDGPU_INFO_MEMORY: { 636 struct drm_amdgpu_memory_info mem; 637 struct ttm_resource_manager *vram_man = 638 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 639 struct ttm_resource_manager *gtt_man = 640 ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); 641 memset(&mem, 0, sizeof(mem)); 642 mem.vram.total_heap_size = adev->gmc.real_vram_size; 643 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 644 atomic64_read(&adev->vram_pin_size) - 645 AMDGPU_VM_RESERVED_VRAM; 646 mem.vram.heap_usage = 647 amdgpu_vram_mgr_usage(vram_man); 648 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 649 650 mem.cpu_accessible_vram.total_heap_size = 651 adev->gmc.visible_vram_size; 652 mem.cpu_accessible_vram.usable_heap_size = 653 min(adev->gmc.visible_vram_size - 654 atomic64_read(&adev->visible_pin_size), 655 mem.vram.usable_heap_size); 656 mem.cpu_accessible_vram.heap_usage = 657 amdgpu_vram_mgr_vis_usage(vram_man); 658 mem.cpu_accessible_vram.max_allocation = 659 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 660 661 mem.gtt.total_heap_size = gtt_man->size; 662 mem.gtt.total_heap_size *= PAGE_SIZE; 663 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 664 atomic64_read(&adev->gart_pin_size); 665 mem.gtt.heap_usage = 666 amdgpu_gtt_mgr_usage(gtt_man); 667 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 668 669 return copy_to_user(out, &mem, 670 min((size_t)size, sizeof(mem))) 671 ? -EFAULT : 0; 672 } 673 case AMDGPU_INFO_READ_MMR_REG: { 674 unsigned n, alloc_size; 675 uint32_t *regs; 676 unsigned se_num = (info->read_mmr_reg.instance >> 677 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 678 AMDGPU_INFO_MMR_SE_INDEX_MASK; 679 unsigned sh_num = (info->read_mmr_reg.instance >> 680 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 681 AMDGPU_INFO_MMR_SH_INDEX_MASK; 682 683 /* set full masks if the userspace set all bits 684 * in the bitfields */ 685 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 686 se_num = 0xffffffff; 687 else if (se_num >= AMDGPU_GFX_MAX_SE) 688 return -EINVAL; 689 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 690 sh_num = 0xffffffff; 691 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) 692 return -EINVAL; 693 694 if (info->read_mmr_reg.count > 128) 695 return -EINVAL; 696 697 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 698 if (!regs) 699 return -ENOMEM; 700 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 701 702 amdgpu_gfx_off_ctrl(adev, false); 703 for (i = 0; i < info->read_mmr_reg.count; i++) { 704 if (amdgpu_asic_read_register(adev, se_num, sh_num, 705 info->read_mmr_reg.dword_offset + i, 706 ®s[i])) { 707 DRM_DEBUG_KMS("unallowed offset %#x\n", 708 info->read_mmr_reg.dword_offset + i); 709 kfree(regs); 710 amdgpu_gfx_off_ctrl(adev, true); 711 return -EFAULT; 712 } 713 } 714 amdgpu_gfx_off_ctrl(adev, true); 715 n = copy_to_user(out, regs, min(size, alloc_size)); 716 kfree(regs); 717 return n ? -EFAULT : 0; 718 } 719 case AMDGPU_INFO_DEV_INFO: { 720 struct drm_amdgpu_info_device dev_info; 721 uint64_t vm_size; 722 723 memset(&dev_info, 0, sizeof(dev_info)); 724 dev_info.device_id = dev->pdev->device; 725 dev_info.chip_rev = adev->rev_id; 726 dev_info.external_rev = adev->external_rev_id; 727 dev_info.pci_rev = dev->pdev->revision; 728 dev_info.family = adev->family; 729 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; 730 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 731 /* return all clocks in KHz */ 732 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 733 if (adev->pm.dpm_enabled) { 734 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 735 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 736 } else { 737 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 738 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 739 } 740 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 741 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 742 adev->gfx.config.max_shader_engines; 743 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 744 dev_info._pad = 0; 745 dev_info.ids_flags = 0; 746 if (adev->flags & AMD_IS_APU) 747 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 748 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 749 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 750 if (amdgpu_is_tmz(adev)) 751 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ; 752 753 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 754 vm_size -= AMDGPU_VA_RESERVED_SIZE; 755 756 /* Older VCE FW versions are buggy and can handle only 40bits */ 757 if (adev->vce.fw_version && 758 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 759 vm_size = min(vm_size, 1ULL << 40); 760 761 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 762 dev_info.virtual_address_max = 763 min(vm_size, AMDGPU_GMC_HOLE_START); 764 765 if (vm_size > AMDGPU_GMC_HOLE_START) { 766 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; 767 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 768 } 769 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 770 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 771 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 772 dev_info.cu_active_number = adev->gfx.cu_info.number; 773 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 774 dev_info.ce_ram_size = adev->gfx.ce_ram_size; 775 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 776 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 777 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 778 sizeof(adev->gfx.cu_info.bitmap)); 779 dev_info.vram_type = adev->gmc.vram_type; 780 dev_info.vram_bit_width = adev->gmc.vram_width; 781 dev_info.vce_harvest_config = adev->vce.harvest_config; 782 dev_info.gc_double_offchip_lds_buf = 783 adev->gfx.config.double_offchip_lds_buf; 784 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; 785 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; 786 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 787 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 788 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 789 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 790 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 791 792 if (adev->family >= AMDGPU_FAMILY_NV) 793 dev_info.pa_sc_tile_steering_override = 794 adev->gfx.config.pa_sc_tile_steering_override; 795 796 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 797 798 return copy_to_user(out, &dev_info, 799 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 800 } 801 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 802 unsigned i; 803 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 804 struct amd_vce_state *vce_state; 805 806 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 807 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 808 if (vce_state) { 809 vce_clk_table.entries[i].sclk = vce_state->sclk; 810 vce_clk_table.entries[i].mclk = vce_state->mclk; 811 vce_clk_table.entries[i].eclk = vce_state->evclk; 812 vce_clk_table.num_valid_entries++; 813 } 814 } 815 816 return copy_to_user(out, &vce_clk_table, 817 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 818 } 819 case AMDGPU_INFO_VBIOS: { 820 uint32_t bios_size = adev->bios_size; 821 822 switch (info->vbios_info.type) { 823 case AMDGPU_INFO_VBIOS_SIZE: 824 return copy_to_user(out, &bios_size, 825 min((size_t)size, sizeof(bios_size))) 826 ? -EFAULT : 0; 827 case AMDGPU_INFO_VBIOS_IMAGE: { 828 uint8_t *bios; 829 uint32_t bios_offset = info->vbios_info.offset; 830 831 if (bios_offset >= bios_size) 832 return -EINVAL; 833 834 bios = adev->bios + bios_offset; 835 return copy_to_user(out, bios, 836 min((size_t)size, (size_t)(bios_size - bios_offset))) 837 ? -EFAULT : 0; 838 } 839 default: 840 DRM_DEBUG_KMS("Invalid request %d\n", 841 info->vbios_info.type); 842 return -EINVAL; 843 } 844 } 845 case AMDGPU_INFO_NUM_HANDLES: { 846 struct drm_amdgpu_info_num_handles handle; 847 848 switch (info->query_hw_ip.type) { 849 case AMDGPU_HW_IP_UVD: 850 /* Starting Polaris, we support unlimited UVD handles */ 851 if (adev->asic_type < CHIP_POLARIS10) { 852 handle.uvd_max_handles = adev->uvd.max_handles; 853 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 854 855 return copy_to_user(out, &handle, 856 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 857 } else { 858 return -ENODATA; 859 } 860 861 break; 862 default: 863 return -EINVAL; 864 } 865 } 866 case AMDGPU_INFO_SENSOR: { 867 if (!adev->pm.dpm_enabled) 868 return -ENOENT; 869 870 switch (info->sensor_info.type) { 871 case AMDGPU_INFO_SENSOR_GFX_SCLK: 872 /* get sclk in Mhz */ 873 if (amdgpu_dpm_read_sensor(adev, 874 AMDGPU_PP_SENSOR_GFX_SCLK, 875 (void *)&ui32, &ui32_size)) { 876 return -EINVAL; 877 } 878 ui32 /= 100; 879 break; 880 case AMDGPU_INFO_SENSOR_GFX_MCLK: 881 /* get mclk in Mhz */ 882 if (amdgpu_dpm_read_sensor(adev, 883 AMDGPU_PP_SENSOR_GFX_MCLK, 884 (void *)&ui32, &ui32_size)) { 885 return -EINVAL; 886 } 887 ui32 /= 100; 888 break; 889 case AMDGPU_INFO_SENSOR_GPU_TEMP: 890 /* get temperature in millidegrees C */ 891 if (amdgpu_dpm_read_sensor(adev, 892 AMDGPU_PP_SENSOR_GPU_TEMP, 893 (void *)&ui32, &ui32_size)) { 894 return -EINVAL; 895 } 896 break; 897 case AMDGPU_INFO_SENSOR_GPU_LOAD: 898 /* get GPU load */ 899 if (amdgpu_dpm_read_sensor(adev, 900 AMDGPU_PP_SENSOR_GPU_LOAD, 901 (void *)&ui32, &ui32_size)) { 902 return -EINVAL; 903 } 904 break; 905 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 906 /* get average GPU power */ 907 if (amdgpu_dpm_read_sensor(adev, 908 AMDGPU_PP_SENSOR_GPU_POWER, 909 (void *)&ui32, &ui32_size)) { 910 return -EINVAL; 911 } 912 ui32 >>= 8; 913 break; 914 case AMDGPU_INFO_SENSOR_VDDNB: 915 /* get VDDNB in millivolts */ 916 if (amdgpu_dpm_read_sensor(adev, 917 AMDGPU_PP_SENSOR_VDDNB, 918 (void *)&ui32, &ui32_size)) { 919 return -EINVAL; 920 } 921 break; 922 case AMDGPU_INFO_SENSOR_VDDGFX: 923 /* get VDDGFX in millivolts */ 924 if (amdgpu_dpm_read_sensor(adev, 925 AMDGPU_PP_SENSOR_VDDGFX, 926 (void *)&ui32, &ui32_size)) { 927 return -EINVAL; 928 } 929 break; 930 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 931 /* get stable pstate sclk in Mhz */ 932 if (amdgpu_dpm_read_sensor(adev, 933 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 934 (void *)&ui32, &ui32_size)) { 935 return -EINVAL; 936 } 937 ui32 /= 100; 938 break; 939 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 940 /* get stable pstate mclk in Mhz */ 941 if (amdgpu_dpm_read_sensor(adev, 942 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 943 (void *)&ui32, &ui32_size)) { 944 return -EINVAL; 945 } 946 ui32 /= 100; 947 break; 948 default: 949 DRM_DEBUG_KMS("Invalid request %d\n", 950 info->sensor_info.type); 951 return -EINVAL; 952 } 953 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 954 } 955 case AMDGPU_INFO_VRAM_LOST_COUNTER: 956 ui32 = atomic_read(&adev->vram_lost_counter); 957 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 958 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 959 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 960 uint64_t ras_mask; 961 962 if (!ras) 963 return -EINVAL; 964 ras_mask = (uint64_t)ras->supported << 32 | ras->features; 965 966 return copy_to_user(out, &ras_mask, 967 min_t(u64, size, sizeof(ras_mask))) ? 968 -EFAULT : 0; 969 } 970 default: 971 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 972 return -EINVAL; 973 } 974 return 0; 975 } 976 977 978 /* 979 * Outdated mess for old drm with Xorg being in charge (void function now). 980 */ 981 /** 982 * amdgpu_driver_lastclose_kms - drm callback for last close 983 * 984 * @dev: drm dev pointer 985 * 986 * Switch vga_switcheroo state after last close (all asics). 987 */ 988 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 989 { 990 drm_fb_helper_lastclose(dev); 991 vga_switcheroo_process_delayed_switch(); 992 } 993 994 /** 995 * amdgpu_driver_open_kms - drm callback for open 996 * 997 * @dev: drm dev pointer 998 * @file_priv: drm file 999 * 1000 * On device open, init vm on cayman+ (all asics). 1001 * Returns 0 on success, error on failure. 1002 */ 1003 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 1004 { 1005 struct amdgpu_device *adev = drm_to_adev(dev); 1006 struct amdgpu_fpriv *fpriv; 1007 int r, pasid; 1008 1009 /* Ensure IB tests are run on ring */ 1010 flush_delayed_work(&adev->delayed_init_work); 1011 1012 1013 if (amdgpu_ras_intr_triggered()) { 1014 DRM_ERROR("RAS Intr triggered, device disabled!!"); 1015 return -EHWPOISON; 1016 } 1017 1018 file_priv->driver_priv = NULL; 1019 1020 r = pm_runtime_get_sync(dev->dev); 1021 if (r < 0) 1022 goto pm_put; 1023 1024 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 1025 if (unlikely(!fpriv)) { 1026 r = -ENOMEM; 1027 goto out_suspend; 1028 } 1029 1030 pasid = amdgpu_pasid_alloc(16); 1031 if (pasid < 0) { 1032 dev_warn(adev->dev, "No more PASIDs available!"); 1033 pasid = 0; 1034 } 1035 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); 1036 if (r) 1037 goto error_pasid; 1038 1039 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1040 if (!fpriv->prt_va) { 1041 r = -ENOMEM; 1042 goto error_vm; 1043 } 1044 1045 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1046 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1047 1048 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1049 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1050 if (r) 1051 goto error_vm; 1052 } 1053 1054 mutex_init(&fpriv->bo_list_lock); 1055 idr_init(&fpriv->bo_list_handles); 1056 1057 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1058 1059 file_priv->driver_priv = fpriv; 1060 goto out_suspend; 1061 1062 error_vm: 1063 amdgpu_vm_fini(adev, &fpriv->vm); 1064 1065 error_pasid: 1066 if (pasid) 1067 amdgpu_pasid_free(pasid); 1068 1069 kfree(fpriv); 1070 1071 out_suspend: 1072 pm_runtime_mark_last_busy(dev->dev); 1073 pm_put: 1074 pm_runtime_put_autosuspend(dev->dev); 1075 1076 return r; 1077 } 1078 1079 /** 1080 * amdgpu_driver_postclose_kms - drm callback for post close 1081 * 1082 * @dev: drm dev pointer 1083 * @file_priv: drm file 1084 * 1085 * On device post close, tear down vm on cayman+ (all asics). 1086 */ 1087 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1088 struct drm_file *file_priv) 1089 { 1090 struct amdgpu_device *adev = drm_to_adev(dev); 1091 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1092 struct amdgpu_bo_list *list; 1093 struct amdgpu_bo *pd; 1094 unsigned int pasid; 1095 int handle; 1096 1097 if (!fpriv) 1098 return; 1099 1100 pm_runtime_get_sync(dev->dev); 1101 1102 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1103 amdgpu_uvd_free_handles(adev, file_priv); 1104 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1105 amdgpu_vce_free_handles(adev, file_priv); 1106 1107 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1108 1109 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1110 /* TODO: how to handle reserve failure */ 1111 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1112 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1113 fpriv->csa_va = NULL; 1114 amdgpu_bo_unreserve(adev->virt.csa_obj); 1115 } 1116 1117 pasid = fpriv->vm.pasid; 1118 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1119 1120 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1121 amdgpu_vm_fini(adev, &fpriv->vm); 1122 1123 if (pasid) 1124 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1125 amdgpu_bo_unref(&pd); 1126 1127 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1128 amdgpu_bo_list_put(list); 1129 1130 idr_destroy(&fpriv->bo_list_handles); 1131 mutex_destroy(&fpriv->bo_list_lock); 1132 1133 kfree(fpriv); 1134 file_priv->driver_priv = NULL; 1135 1136 pm_runtime_mark_last_busy(dev->dev); 1137 pm_runtime_put_autosuspend(dev->dev); 1138 } 1139 1140 /* 1141 * VBlank related functions. 1142 */ 1143 /** 1144 * amdgpu_get_vblank_counter_kms - get frame count 1145 * 1146 * @crtc: crtc to get the frame count from 1147 * 1148 * Gets the frame count on the requested crtc (all asics). 1149 * Returns frame count on success, -EINVAL on failure. 1150 */ 1151 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc) 1152 { 1153 struct drm_device *dev = crtc->dev; 1154 unsigned int pipe = crtc->index; 1155 struct amdgpu_device *adev = drm_to_adev(dev); 1156 int vpos, hpos, stat; 1157 u32 count; 1158 1159 if (pipe >= adev->mode_info.num_crtc) { 1160 DRM_ERROR("Invalid crtc %u\n", pipe); 1161 return -EINVAL; 1162 } 1163 1164 /* The hw increments its frame counter at start of vsync, not at start 1165 * of vblank, as is required by DRM core vblank counter handling. 1166 * Cook the hw count here to make it appear to the caller as if it 1167 * incremented at start of vblank. We measure distance to start of 1168 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1169 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1170 * result by 1 to give the proper appearance to caller. 1171 */ 1172 if (adev->mode_info.crtcs[pipe]) { 1173 /* Repeat readout if needed to provide stable result if 1174 * we cross start of vsync during the queries. 1175 */ 1176 do { 1177 count = amdgpu_display_vblank_get_counter(adev, pipe); 1178 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1179 * vpos as distance to start of vblank, instead of 1180 * regular vertical scanout pos. 1181 */ 1182 stat = amdgpu_display_get_crtc_scanoutpos( 1183 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1184 &vpos, &hpos, NULL, NULL, 1185 &adev->mode_info.crtcs[pipe]->base.hwmode); 1186 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1187 1188 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1189 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1190 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1191 } else { 1192 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1193 pipe, vpos); 1194 1195 /* Bump counter if we are at >= leading edge of vblank, 1196 * but before vsync where vpos would turn negative and 1197 * the hw counter really increments. 1198 */ 1199 if (vpos >= 0) 1200 count++; 1201 } 1202 } else { 1203 /* Fallback to use value as is. */ 1204 count = amdgpu_display_vblank_get_counter(adev, pipe); 1205 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1206 } 1207 1208 return count; 1209 } 1210 1211 /** 1212 * amdgpu_enable_vblank_kms - enable vblank interrupt 1213 * 1214 * @crtc: crtc to enable vblank interrupt for 1215 * 1216 * Enable the interrupt on the requested crtc (all asics). 1217 * Returns 0 on success, -EINVAL on failure. 1218 */ 1219 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc) 1220 { 1221 struct drm_device *dev = crtc->dev; 1222 unsigned int pipe = crtc->index; 1223 struct amdgpu_device *adev = drm_to_adev(dev); 1224 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1225 1226 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1227 } 1228 1229 /** 1230 * amdgpu_disable_vblank_kms - disable vblank interrupt 1231 * 1232 * @crtc: crtc to disable vblank interrupt for 1233 * 1234 * Disable the interrupt on the requested crtc (all asics). 1235 */ 1236 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) 1237 { 1238 struct drm_device *dev = crtc->dev; 1239 unsigned int pipe = crtc->index; 1240 struct amdgpu_device *adev = drm_to_adev(dev); 1241 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1242 1243 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1244 } 1245 1246 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1247 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1248 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1249 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1250 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 1251 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1252 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1253 /* KMS */ 1254 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1255 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1256 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1257 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1258 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1259 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1260 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1261 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1262 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1263 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) 1264 }; 1265 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 1266 1267 /* 1268 * Debugfs info 1269 */ 1270 #if defined(CONFIG_DEBUG_FS) 1271 1272 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) 1273 { 1274 struct drm_info_node *node = (struct drm_info_node *) m->private; 1275 struct drm_device *dev = node->minor->dev; 1276 struct amdgpu_device *adev = drm_to_adev(dev); 1277 struct drm_amdgpu_info_firmware fw_info; 1278 struct drm_amdgpu_query_fw query_fw; 1279 struct atom_context *ctx = adev->mode_info.atom_context; 1280 int ret, i; 1281 1282 /* VCE */ 1283 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1284 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1285 if (ret) 1286 return ret; 1287 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1288 fw_info.feature, fw_info.ver); 1289 1290 /* UVD */ 1291 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1292 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1293 if (ret) 1294 return ret; 1295 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1296 fw_info.feature, fw_info.ver); 1297 1298 /* GMC */ 1299 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1300 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1301 if (ret) 1302 return ret; 1303 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1304 fw_info.feature, fw_info.ver); 1305 1306 /* ME */ 1307 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1308 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1309 if (ret) 1310 return ret; 1311 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1312 fw_info.feature, fw_info.ver); 1313 1314 /* PFP */ 1315 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1316 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1317 if (ret) 1318 return ret; 1319 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1320 fw_info.feature, fw_info.ver); 1321 1322 /* CE */ 1323 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1324 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1325 if (ret) 1326 return ret; 1327 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1328 fw_info.feature, fw_info.ver); 1329 1330 /* RLC */ 1331 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1332 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1333 if (ret) 1334 return ret; 1335 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1336 fw_info.feature, fw_info.ver); 1337 1338 /* RLC SAVE RESTORE LIST CNTL */ 1339 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1340 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1341 if (ret) 1342 return ret; 1343 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1344 fw_info.feature, fw_info.ver); 1345 1346 /* RLC SAVE RESTORE LIST GPM MEM */ 1347 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1348 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1349 if (ret) 1350 return ret; 1351 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1352 fw_info.feature, fw_info.ver); 1353 1354 /* RLC SAVE RESTORE LIST SRM MEM */ 1355 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1356 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1357 if (ret) 1358 return ret; 1359 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1360 fw_info.feature, fw_info.ver); 1361 1362 /* MEC */ 1363 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1364 query_fw.index = 0; 1365 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1366 if (ret) 1367 return ret; 1368 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1369 fw_info.feature, fw_info.ver); 1370 1371 /* MEC2 */ 1372 if (adev->gfx.mec2_fw) { 1373 query_fw.index = 1; 1374 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1375 if (ret) 1376 return ret; 1377 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1378 fw_info.feature, fw_info.ver); 1379 } 1380 1381 /* PSP SOS */ 1382 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1383 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1384 if (ret) 1385 return ret; 1386 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1387 fw_info.feature, fw_info.ver); 1388 1389 1390 /* PSP ASD */ 1391 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1392 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1393 if (ret) 1394 return ret; 1395 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1396 fw_info.feature, fw_info.ver); 1397 1398 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1399 for (i = 0; i < 4; i++) { 1400 query_fw.index = i; 1401 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1402 if (ret) 1403 continue; 1404 switch (query_fw.index) { 1405 case 0: 1406 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1407 "RAS", fw_info.feature, fw_info.ver); 1408 break; 1409 case 1: 1410 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1411 "XGMI", fw_info.feature, fw_info.ver); 1412 break; 1413 case 2: 1414 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1415 "HDCP", fw_info.feature, fw_info.ver); 1416 break; 1417 case 3: 1418 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n", 1419 "DTM", fw_info.feature, fw_info.ver); 1420 break; 1421 default: 1422 return -EINVAL; 1423 } 1424 } 1425 1426 /* SMC */ 1427 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1428 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1429 if (ret) 1430 return ret; 1431 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1432 fw_info.feature, fw_info.ver); 1433 1434 /* SDMA */ 1435 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1436 for (i = 0; i < adev->sdma.num_instances; i++) { 1437 query_fw.index = i; 1438 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1439 if (ret) 1440 return ret; 1441 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1442 i, fw_info.feature, fw_info.ver); 1443 } 1444 1445 /* VCN */ 1446 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1447 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1448 if (ret) 1449 return ret; 1450 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1451 fw_info.feature, fw_info.ver); 1452 1453 /* DMCU */ 1454 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1455 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1456 if (ret) 1457 return ret; 1458 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1459 fw_info.feature, fw_info.ver); 1460 1461 /* DMCUB */ 1462 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; 1463 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1464 if (ret) 1465 return ret; 1466 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", 1467 fw_info.feature, fw_info.ver); 1468 1469 1470 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1471 1472 return 0; 1473 } 1474 1475 static const struct drm_info_list amdgpu_firmware_info_list[] = { 1476 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, 1477 }; 1478 #endif 1479 1480 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1481 { 1482 #if defined(CONFIG_DEBUG_FS) 1483 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, 1484 ARRAY_SIZE(amdgpu_firmware_info_list)); 1485 #else 1486 return 0; 1487 #endif 1488 } 1489