1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include "amdgpu.h" 30 #include <drm/drm_debugfs.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu_sched.h" 33 #include "amdgpu_uvd.h" 34 #include "amdgpu_vce.h" 35 #include "atom.h" 36 37 #include <linux/vga_switcheroo.h> 38 #include <linux/slab.h> 39 #include <linux/uaccess.h> 40 #include <linux/pci.h> 41 #include <linux/pm_runtime.h> 42 #include "amdgpu_amdkfd.h" 43 #include "amdgpu_gem.h" 44 #include "amdgpu_display.h" 45 #include "amdgpu_ras.h" 46 47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 48 { 49 struct amdgpu_gpu_instance *gpu_instance; 50 int i; 51 52 mutex_lock(&mgpu_info.mutex); 53 54 for (i = 0; i < mgpu_info.num_gpu; i++) { 55 gpu_instance = &(mgpu_info.gpu_ins[i]); 56 if (gpu_instance->adev == adev) { 57 mgpu_info.gpu_ins[i] = 58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 59 mgpu_info.num_gpu--; 60 if (adev->flags & AMD_IS_APU) 61 mgpu_info.num_apu--; 62 else 63 mgpu_info.num_dgpu--; 64 break; 65 } 66 } 67 68 mutex_unlock(&mgpu_info.mutex); 69 } 70 71 /** 72 * amdgpu_driver_unload_kms - Main unload function for KMS. 73 * 74 * @dev: drm dev pointer 75 * 76 * This is the main unload function for KMS (all asics). 77 * Returns 0 on success. 78 */ 79 void amdgpu_driver_unload_kms(struct drm_device *dev) 80 { 81 struct amdgpu_device *adev = dev->dev_private; 82 83 if (adev == NULL) 84 return; 85 86 amdgpu_unregister_gpu_instance(adev); 87 88 if (adev->rmmio == NULL) 89 goto done_free; 90 91 if (amdgpu_sriov_vf(adev)) 92 amdgpu_virt_request_full_gpu(adev, false); 93 94 if (adev->runpm) { 95 pm_runtime_get_sync(dev->dev); 96 pm_runtime_forbid(dev->dev); 97 } 98 99 amdgpu_acpi_fini(adev); 100 101 amdgpu_device_fini(adev); 102 103 done_free: 104 kfree(adev); 105 dev->dev_private = NULL; 106 } 107 108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 109 { 110 struct amdgpu_gpu_instance *gpu_instance; 111 112 mutex_lock(&mgpu_info.mutex); 113 114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 115 DRM_ERROR("Cannot register more gpu instance\n"); 116 mutex_unlock(&mgpu_info.mutex); 117 return; 118 } 119 120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 121 gpu_instance->adev = adev; 122 gpu_instance->mgpu_fan_enabled = 0; 123 124 mgpu_info.num_gpu++; 125 if (adev->flags & AMD_IS_APU) 126 mgpu_info.num_apu++; 127 else 128 mgpu_info.num_dgpu++; 129 130 mutex_unlock(&mgpu_info.mutex); 131 } 132 133 /** 134 * amdgpu_driver_load_kms - Main load function for KMS. 135 * 136 * @dev: drm dev pointer 137 * @flags: device flags 138 * 139 * This is the main load function for KMS (all asics). 140 * Returns 0 on success, error on failure. 141 */ 142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) 143 { 144 struct amdgpu_device *adev; 145 int r, acpi_status; 146 147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); 148 if (adev == NULL) { 149 return -ENOMEM; 150 } 151 dev->dev_private = (void *)adev; 152 153 if (amdgpu_has_atpx() && 154 (amdgpu_is_atpx_hybrid() || 155 amdgpu_has_atpx_dgpu_power_cntl()) && 156 ((flags & AMD_IS_APU) == 0) && 157 !pci_is_thunderbolt_attached(dev->pdev)) 158 flags |= AMD_IS_PX; 159 160 /* amdgpu_device_init should report only fatal error 161 * like memory allocation failure or iomapping failure, 162 * or memory manager initialization failure, it must 163 * properly initialize the GPU MC controller and permit 164 * VRAM allocation 165 */ 166 r = amdgpu_device_init(adev, dev, dev->pdev, flags); 167 if (r) { 168 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 169 goto out; 170 } 171 172 if (amdgpu_device_supports_boco(dev) && 173 (amdgpu_runtime_pm != 0)) /* enable runpm by default */ 174 adev->runpm = true; 175 else if (amdgpu_device_supports_baco(dev) && 176 (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */ 177 adev->runpm = true; 178 179 /* Call ACPI methods: require modeset init 180 * but failure is not fatal 181 */ 182 if (!r) { 183 acpi_status = amdgpu_acpi_init(adev); 184 if (acpi_status) 185 dev_dbg(&dev->pdev->dev, 186 "Error during ACPI methods call\n"); 187 } 188 189 if (adev->runpm) { 190 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); 191 pm_runtime_use_autosuspend(dev->dev); 192 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 193 pm_runtime_set_active(dev->dev); 194 pm_runtime_allow(dev->dev); 195 pm_runtime_mark_last_busy(dev->dev); 196 pm_runtime_put_autosuspend(dev->dev); 197 } 198 199 out: 200 if (r) { 201 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 202 if (adev->rmmio && adev->runpm) 203 pm_runtime_put_noidle(dev->dev); 204 amdgpu_driver_unload_kms(dev); 205 } 206 207 return r; 208 } 209 210 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 211 struct drm_amdgpu_query_fw *query_fw, 212 struct amdgpu_device *adev) 213 { 214 switch (query_fw->fw_type) { 215 case AMDGPU_INFO_FW_VCE: 216 fw_info->ver = adev->vce.fw_version; 217 fw_info->feature = adev->vce.fb_version; 218 break; 219 case AMDGPU_INFO_FW_UVD: 220 fw_info->ver = adev->uvd.fw_version; 221 fw_info->feature = 0; 222 break; 223 case AMDGPU_INFO_FW_VCN: 224 fw_info->ver = adev->vcn.fw_version; 225 fw_info->feature = 0; 226 break; 227 case AMDGPU_INFO_FW_GMC: 228 fw_info->ver = adev->gmc.fw_version; 229 fw_info->feature = 0; 230 break; 231 case AMDGPU_INFO_FW_GFX_ME: 232 fw_info->ver = adev->gfx.me_fw_version; 233 fw_info->feature = adev->gfx.me_feature_version; 234 break; 235 case AMDGPU_INFO_FW_GFX_PFP: 236 fw_info->ver = adev->gfx.pfp_fw_version; 237 fw_info->feature = adev->gfx.pfp_feature_version; 238 break; 239 case AMDGPU_INFO_FW_GFX_CE: 240 fw_info->ver = adev->gfx.ce_fw_version; 241 fw_info->feature = adev->gfx.ce_feature_version; 242 break; 243 case AMDGPU_INFO_FW_GFX_RLC: 244 fw_info->ver = adev->gfx.rlc_fw_version; 245 fw_info->feature = adev->gfx.rlc_feature_version; 246 break; 247 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 248 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 249 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 250 break; 251 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 252 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 253 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 254 break; 255 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 256 fw_info->ver = adev->gfx.rlc_srls_fw_version; 257 fw_info->feature = adev->gfx.rlc_srls_feature_version; 258 break; 259 case AMDGPU_INFO_FW_GFX_MEC: 260 if (query_fw->index == 0) { 261 fw_info->ver = adev->gfx.mec_fw_version; 262 fw_info->feature = adev->gfx.mec_feature_version; 263 } else if (query_fw->index == 1) { 264 fw_info->ver = adev->gfx.mec2_fw_version; 265 fw_info->feature = adev->gfx.mec2_feature_version; 266 } else 267 return -EINVAL; 268 break; 269 case AMDGPU_INFO_FW_SMC: 270 fw_info->ver = adev->pm.fw_version; 271 fw_info->feature = 0; 272 break; 273 case AMDGPU_INFO_FW_TA: 274 if (query_fw->index > 1) 275 return -EINVAL; 276 if (query_fw->index == 0) { 277 fw_info->ver = adev->psp.ta_fw_version; 278 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 279 } else { 280 fw_info->ver = adev->psp.ta_fw_version; 281 fw_info->feature = adev->psp.ta_ras_ucode_version; 282 } 283 break; 284 case AMDGPU_INFO_FW_SDMA: 285 if (query_fw->index >= adev->sdma.num_instances) 286 return -EINVAL; 287 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 288 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 289 break; 290 case AMDGPU_INFO_FW_SOS: 291 fw_info->ver = adev->psp.sos_fw_version; 292 fw_info->feature = adev->psp.sos_feature_version; 293 break; 294 case AMDGPU_INFO_FW_ASD: 295 fw_info->ver = adev->psp.asd_fw_version; 296 fw_info->feature = adev->psp.asd_feature_version; 297 break; 298 case AMDGPU_INFO_FW_DMCU: 299 fw_info->ver = adev->dm.dmcu_fw_version; 300 fw_info->feature = 0; 301 break; 302 case AMDGPU_INFO_FW_DMCUB: 303 fw_info->ver = adev->dm.dmcub_fw_version; 304 fw_info->feature = 0; 305 break; 306 default: 307 return -EINVAL; 308 } 309 return 0; 310 } 311 312 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 313 struct drm_amdgpu_info *info, 314 struct drm_amdgpu_info_hw_ip *result) 315 { 316 uint32_t ib_start_alignment = 0; 317 uint32_t ib_size_alignment = 0; 318 enum amd_ip_block_type type; 319 unsigned int num_rings = 0; 320 unsigned int i, j; 321 322 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 323 return -EINVAL; 324 325 switch (info->query_hw_ip.type) { 326 case AMDGPU_HW_IP_GFX: 327 type = AMD_IP_BLOCK_TYPE_GFX; 328 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 329 if (adev->gfx.gfx_ring[i].sched.ready) 330 ++num_rings; 331 ib_start_alignment = 32; 332 ib_size_alignment = 32; 333 break; 334 case AMDGPU_HW_IP_COMPUTE: 335 type = AMD_IP_BLOCK_TYPE_GFX; 336 for (i = 0; i < adev->gfx.num_compute_rings; i++) 337 if (adev->gfx.compute_ring[i].sched.ready) 338 ++num_rings; 339 ib_start_alignment = 32; 340 ib_size_alignment = 32; 341 break; 342 case AMDGPU_HW_IP_DMA: 343 type = AMD_IP_BLOCK_TYPE_SDMA; 344 for (i = 0; i < adev->sdma.num_instances; i++) 345 if (adev->sdma.instance[i].ring.sched.ready) 346 ++num_rings; 347 ib_start_alignment = 256; 348 ib_size_alignment = 4; 349 break; 350 case AMDGPU_HW_IP_UVD: 351 type = AMD_IP_BLOCK_TYPE_UVD; 352 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 353 if (adev->uvd.harvest_config & (1 << i)) 354 continue; 355 356 if (adev->uvd.inst[i].ring.sched.ready) 357 ++num_rings; 358 } 359 ib_start_alignment = 64; 360 ib_size_alignment = 64; 361 break; 362 case AMDGPU_HW_IP_VCE: 363 type = AMD_IP_BLOCK_TYPE_VCE; 364 for (i = 0; i < adev->vce.num_rings; i++) 365 if (adev->vce.ring[i].sched.ready) 366 ++num_rings; 367 ib_start_alignment = 4; 368 ib_size_alignment = 1; 369 break; 370 case AMDGPU_HW_IP_UVD_ENC: 371 type = AMD_IP_BLOCK_TYPE_UVD; 372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 373 if (adev->uvd.harvest_config & (1 << i)) 374 continue; 375 376 for (j = 0; j < adev->uvd.num_enc_rings; j++) 377 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 378 ++num_rings; 379 } 380 ib_start_alignment = 64; 381 ib_size_alignment = 64; 382 break; 383 case AMDGPU_HW_IP_VCN_DEC: 384 type = AMD_IP_BLOCK_TYPE_VCN; 385 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 386 if (adev->uvd.harvest_config & (1 << i)) 387 continue; 388 389 if (adev->vcn.inst[i].ring_dec.sched.ready) 390 ++num_rings; 391 } 392 ib_start_alignment = 16; 393 ib_size_alignment = 16; 394 break; 395 case AMDGPU_HW_IP_VCN_ENC: 396 type = AMD_IP_BLOCK_TYPE_VCN; 397 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 398 if (adev->uvd.harvest_config & (1 << i)) 399 continue; 400 401 for (j = 0; j < adev->vcn.num_enc_rings; j++) 402 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 403 ++num_rings; 404 } 405 ib_start_alignment = 64; 406 ib_size_alignment = 1; 407 break; 408 case AMDGPU_HW_IP_VCN_JPEG: 409 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 410 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 411 412 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 413 if (adev->jpeg.harvest_config & (1 << i)) 414 continue; 415 416 if (adev->jpeg.inst[i].ring_dec.sched.ready) 417 ++num_rings; 418 } 419 ib_start_alignment = 16; 420 ib_size_alignment = 16; 421 break; 422 default: 423 return -EINVAL; 424 } 425 426 for (i = 0; i < adev->num_ip_blocks; i++) 427 if (adev->ip_blocks[i].version->type == type && 428 adev->ip_blocks[i].status.valid) 429 break; 430 431 if (i == adev->num_ip_blocks) 432 return 0; 433 434 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 435 num_rings); 436 437 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 438 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 439 result->capabilities_flags = 0; 440 result->available_rings = (1 << num_rings) - 1; 441 result->ib_start_alignment = ib_start_alignment; 442 result->ib_size_alignment = ib_size_alignment; 443 return 0; 444 } 445 446 /* 447 * Userspace get information ioctl 448 */ 449 /** 450 * amdgpu_info_ioctl - answer a device specific request. 451 * 452 * @adev: amdgpu device pointer 453 * @data: request object 454 * @filp: drm filp 455 * 456 * This function is used to pass device specific parameters to the userspace 457 * drivers. Examples include: pci device id, pipeline parms, tiling params, 458 * etc. (all asics). 459 * Returns 0 on success, -EINVAL on failure. 460 */ 461 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 462 { 463 struct amdgpu_device *adev = dev->dev_private; 464 struct drm_amdgpu_info *info = data; 465 struct amdgpu_mode_info *minfo = &adev->mode_info; 466 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 467 uint32_t size = info->return_size; 468 struct drm_crtc *crtc; 469 uint32_t ui32 = 0; 470 uint64_t ui64 = 0; 471 int i, found; 472 int ui32_size = sizeof(ui32); 473 474 if (!info->return_size || !info->return_pointer) 475 return -EINVAL; 476 477 switch (info->query) { 478 case AMDGPU_INFO_ACCEL_WORKING: 479 ui32 = adev->accel_working; 480 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 481 case AMDGPU_INFO_CRTC_FROM_ID: 482 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 483 crtc = (struct drm_crtc *)minfo->crtcs[i]; 484 if (crtc && crtc->base.id == info->mode_crtc.id) { 485 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 486 ui32 = amdgpu_crtc->crtc_id; 487 found = 1; 488 break; 489 } 490 } 491 if (!found) { 492 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 493 return -EINVAL; 494 } 495 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 496 case AMDGPU_INFO_HW_IP_INFO: { 497 struct drm_amdgpu_info_hw_ip ip = {}; 498 int ret; 499 500 ret = amdgpu_hw_ip_info(adev, info, &ip); 501 if (ret) 502 return ret; 503 504 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 505 return ret ? -EFAULT : 0; 506 } 507 case AMDGPU_INFO_HW_IP_COUNT: { 508 enum amd_ip_block_type type; 509 uint32_t count = 0; 510 511 switch (info->query_hw_ip.type) { 512 case AMDGPU_HW_IP_GFX: 513 type = AMD_IP_BLOCK_TYPE_GFX; 514 break; 515 case AMDGPU_HW_IP_COMPUTE: 516 type = AMD_IP_BLOCK_TYPE_GFX; 517 break; 518 case AMDGPU_HW_IP_DMA: 519 type = AMD_IP_BLOCK_TYPE_SDMA; 520 break; 521 case AMDGPU_HW_IP_UVD: 522 type = AMD_IP_BLOCK_TYPE_UVD; 523 break; 524 case AMDGPU_HW_IP_VCE: 525 type = AMD_IP_BLOCK_TYPE_VCE; 526 break; 527 case AMDGPU_HW_IP_UVD_ENC: 528 type = AMD_IP_BLOCK_TYPE_UVD; 529 break; 530 case AMDGPU_HW_IP_VCN_DEC: 531 case AMDGPU_HW_IP_VCN_ENC: 532 type = AMD_IP_BLOCK_TYPE_VCN; 533 break; 534 case AMDGPU_HW_IP_VCN_JPEG: 535 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ? 536 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN; 537 break; 538 default: 539 return -EINVAL; 540 } 541 542 for (i = 0; i < adev->num_ip_blocks; i++) 543 if (adev->ip_blocks[i].version->type == type && 544 adev->ip_blocks[i].status.valid && 545 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 546 count++; 547 548 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 549 } 550 case AMDGPU_INFO_TIMESTAMP: 551 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 552 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 553 case AMDGPU_INFO_FW_VERSION: { 554 struct drm_amdgpu_info_firmware fw_info; 555 int ret; 556 557 /* We only support one instance of each IP block right now. */ 558 if (info->query_fw.ip_instance != 0) 559 return -EINVAL; 560 561 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 562 if (ret) 563 return ret; 564 565 return copy_to_user(out, &fw_info, 566 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 567 } 568 case AMDGPU_INFO_NUM_BYTES_MOVED: 569 ui64 = atomic64_read(&adev->num_bytes_moved); 570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 571 case AMDGPU_INFO_NUM_EVICTIONS: 572 ui64 = atomic64_read(&adev->num_evictions); 573 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 574 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 575 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 576 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 577 case AMDGPU_INFO_VRAM_USAGE: 578 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 579 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 580 case AMDGPU_INFO_VIS_VRAM_USAGE: 581 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 582 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 583 case AMDGPU_INFO_GTT_USAGE: 584 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 585 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 586 case AMDGPU_INFO_GDS_CONFIG: { 587 struct drm_amdgpu_info_gds gds_info; 588 589 memset(&gds_info, 0, sizeof(gds_info)); 590 gds_info.compute_partition_size = adev->gds.gds_size; 591 gds_info.gds_total_size = adev->gds.gds_size; 592 gds_info.gws_per_compute_partition = adev->gds.gws_size; 593 gds_info.oa_per_compute_partition = adev->gds.oa_size; 594 return copy_to_user(out, &gds_info, 595 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 596 } 597 case AMDGPU_INFO_VRAM_GTT: { 598 struct drm_amdgpu_info_vram_gtt vram_gtt; 599 600 vram_gtt.vram_size = adev->gmc.real_vram_size - 601 atomic64_read(&adev->vram_pin_size) - 602 AMDGPU_VM_RESERVED_VRAM; 603 vram_gtt.vram_cpu_accessible_size = 604 min(adev->gmc.visible_vram_size - 605 atomic64_read(&adev->visible_pin_size), 606 vram_gtt.vram_size); 607 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 608 vram_gtt.gtt_size *= PAGE_SIZE; 609 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 610 return copy_to_user(out, &vram_gtt, 611 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 612 } 613 case AMDGPU_INFO_MEMORY: { 614 struct drm_amdgpu_memory_info mem; 615 616 memset(&mem, 0, sizeof(mem)); 617 mem.vram.total_heap_size = adev->gmc.real_vram_size; 618 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 619 atomic64_read(&adev->vram_pin_size) - 620 AMDGPU_VM_RESERVED_VRAM; 621 mem.vram.heap_usage = 622 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 623 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 624 625 mem.cpu_accessible_vram.total_heap_size = 626 adev->gmc.visible_vram_size; 627 mem.cpu_accessible_vram.usable_heap_size = 628 min(adev->gmc.visible_vram_size - 629 atomic64_read(&adev->visible_pin_size), 630 mem.vram.usable_heap_size); 631 mem.cpu_accessible_vram.heap_usage = 632 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 633 mem.cpu_accessible_vram.max_allocation = 634 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 635 636 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; 637 mem.gtt.total_heap_size *= PAGE_SIZE; 638 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 639 atomic64_read(&adev->gart_pin_size); 640 mem.gtt.heap_usage = 641 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 642 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 643 644 return copy_to_user(out, &mem, 645 min((size_t)size, sizeof(mem))) 646 ? -EFAULT : 0; 647 } 648 case AMDGPU_INFO_READ_MMR_REG: { 649 unsigned n, alloc_size; 650 uint32_t *regs; 651 unsigned se_num = (info->read_mmr_reg.instance >> 652 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 653 AMDGPU_INFO_MMR_SE_INDEX_MASK; 654 unsigned sh_num = (info->read_mmr_reg.instance >> 655 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 656 AMDGPU_INFO_MMR_SH_INDEX_MASK; 657 658 /* set full masks if the userspace set all bits 659 * in the bitfields */ 660 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 661 se_num = 0xffffffff; 662 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 663 sh_num = 0xffffffff; 664 665 if (info->read_mmr_reg.count > 128) 666 return -EINVAL; 667 668 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 669 if (!regs) 670 return -ENOMEM; 671 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 672 673 amdgpu_gfx_off_ctrl(adev, false); 674 for (i = 0; i < info->read_mmr_reg.count; i++) { 675 if (amdgpu_asic_read_register(adev, se_num, sh_num, 676 info->read_mmr_reg.dword_offset + i, 677 ®s[i])) { 678 DRM_DEBUG_KMS("unallowed offset %#x\n", 679 info->read_mmr_reg.dword_offset + i); 680 kfree(regs); 681 amdgpu_gfx_off_ctrl(adev, true); 682 return -EFAULT; 683 } 684 } 685 amdgpu_gfx_off_ctrl(adev, true); 686 n = copy_to_user(out, regs, min(size, alloc_size)); 687 kfree(regs); 688 return n ? -EFAULT : 0; 689 } 690 case AMDGPU_INFO_DEV_INFO: { 691 struct drm_amdgpu_info_device dev_info = {}; 692 uint64_t vm_size; 693 694 dev_info.device_id = dev->pdev->device; 695 dev_info.chip_rev = adev->rev_id; 696 dev_info.external_rev = adev->external_rev_id; 697 dev_info.pci_rev = dev->pdev->revision; 698 dev_info.family = adev->family; 699 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; 700 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 701 /* return all clocks in KHz */ 702 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 703 if (adev->pm.dpm_enabled) { 704 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 705 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 706 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 707 adev->virt.ops->get_pp_clk) { 708 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 709 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 710 } else { 711 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 712 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 713 } 714 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 715 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 716 adev->gfx.config.max_shader_engines; 717 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 718 dev_info._pad = 0; 719 dev_info.ids_flags = 0; 720 if (adev->flags & AMD_IS_APU) 721 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 722 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 723 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 724 725 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 726 vm_size -= AMDGPU_VA_RESERVED_SIZE; 727 728 /* Older VCE FW versions are buggy and can handle only 40bits */ 729 if (adev->vce.fw_version && 730 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 731 vm_size = min(vm_size, 1ULL << 40); 732 733 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 734 dev_info.virtual_address_max = 735 min(vm_size, AMDGPU_GMC_HOLE_START); 736 737 if (vm_size > AMDGPU_GMC_HOLE_START) { 738 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; 739 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 740 } 741 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 742 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 743 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 744 dev_info.cu_active_number = adev->gfx.cu_info.number; 745 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 746 dev_info.ce_ram_size = adev->gfx.ce_ram_size; 747 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 748 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 749 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 750 sizeof(adev->gfx.cu_info.bitmap)); 751 dev_info.vram_type = adev->gmc.vram_type; 752 dev_info.vram_bit_width = adev->gmc.vram_width; 753 dev_info.vce_harvest_config = adev->vce.harvest_config; 754 dev_info.gc_double_offchip_lds_buf = 755 adev->gfx.config.double_offchip_lds_buf; 756 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; 757 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; 758 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 759 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 760 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 761 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 762 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 763 764 if (adev->family >= AMDGPU_FAMILY_NV) 765 dev_info.pa_sc_tile_steering_override = 766 adev->gfx.config.pa_sc_tile_steering_override; 767 768 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 769 770 return copy_to_user(out, &dev_info, 771 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 772 } 773 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 774 unsigned i; 775 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 776 struct amd_vce_state *vce_state; 777 778 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 779 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 780 if (vce_state) { 781 vce_clk_table.entries[i].sclk = vce_state->sclk; 782 vce_clk_table.entries[i].mclk = vce_state->mclk; 783 vce_clk_table.entries[i].eclk = vce_state->evclk; 784 vce_clk_table.num_valid_entries++; 785 } 786 } 787 788 return copy_to_user(out, &vce_clk_table, 789 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 790 } 791 case AMDGPU_INFO_VBIOS: { 792 uint32_t bios_size = adev->bios_size; 793 794 switch (info->vbios_info.type) { 795 case AMDGPU_INFO_VBIOS_SIZE: 796 return copy_to_user(out, &bios_size, 797 min((size_t)size, sizeof(bios_size))) 798 ? -EFAULT : 0; 799 case AMDGPU_INFO_VBIOS_IMAGE: { 800 uint8_t *bios; 801 uint32_t bios_offset = info->vbios_info.offset; 802 803 if (bios_offset >= bios_size) 804 return -EINVAL; 805 806 bios = adev->bios + bios_offset; 807 return copy_to_user(out, bios, 808 min((size_t)size, (size_t)(bios_size - bios_offset))) 809 ? -EFAULT : 0; 810 } 811 default: 812 DRM_DEBUG_KMS("Invalid request %d\n", 813 info->vbios_info.type); 814 return -EINVAL; 815 } 816 } 817 case AMDGPU_INFO_NUM_HANDLES: { 818 struct drm_amdgpu_info_num_handles handle; 819 820 switch (info->query_hw_ip.type) { 821 case AMDGPU_HW_IP_UVD: 822 /* Starting Polaris, we support unlimited UVD handles */ 823 if (adev->asic_type < CHIP_POLARIS10) { 824 handle.uvd_max_handles = adev->uvd.max_handles; 825 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 826 827 return copy_to_user(out, &handle, 828 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 829 } else { 830 return -ENODATA; 831 } 832 833 break; 834 default: 835 return -EINVAL; 836 } 837 } 838 case AMDGPU_INFO_SENSOR: { 839 if (!adev->pm.dpm_enabled) 840 return -ENOENT; 841 842 switch (info->sensor_info.type) { 843 case AMDGPU_INFO_SENSOR_GFX_SCLK: 844 /* get sclk in Mhz */ 845 if (amdgpu_dpm_read_sensor(adev, 846 AMDGPU_PP_SENSOR_GFX_SCLK, 847 (void *)&ui32, &ui32_size)) { 848 return -EINVAL; 849 } 850 ui32 /= 100; 851 break; 852 case AMDGPU_INFO_SENSOR_GFX_MCLK: 853 /* get mclk in Mhz */ 854 if (amdgpu_dpm_read_sensor(adev, 855 AMDGPU_PP_SENSOR_GFX_MCLK, 856 (void *)&ui32, &ui32_size)) { 857 return -EINVAL; 858 } 859 ui32 /= 100; 860 break; 861 case AMDGPU_INFO_SENSOR_GPU_TEMP: 862 /* get temperature in millidegrees C */ 863 if (amdgpu_dpm_read_sensor(adev, 864 AMDGPU_PP_SENSOR_GPU_TEMP, 865 (void *)&ui32, &ui32_size)) { 866 return -EINVAL; 867 } 868 break; 869 case AMDGPU_INFO_SENSOR_GPU_LOAD: 870 /* get GPU load */ 871 if (amdgpu_dpm_read_sensor(adev, 872 AMDGPU_PP_SENSOR_GPU_LOAD, 873 (void *)&ui32, &ui32_size)) { 874 return -EINVAL; 875 } 876 break; 877 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 878 /* get average GPU power */ 879 if (amdgpu_dpm_read_sensor(adev, 880 AMDGPU_PP_SENSOR_GPU_POWER, 881 (void *)&ui32, &ui32_size)) { 882 return -EINVAL; 883 } 884 ui32 >>= 8; 885 break; 886 case AMDGPU_INFO_SENSOR_VDDNB: 887 /* get VDDNB in millivolts */ 888 if (amdgpu_dpm_read_sensor(adev, 889 AMDGPU_PP_SENSOR_VDDNB, 890 (void *)&ui32, &ui32_size)) { 891 return -EINVAL; 892 } 893 break; 894 case AMDGPU_INFO_SENSOR_VDDGFX: 895 /* get VDDGFX in millivolts */ 896 if (amdgpu_dpm_read_sensor(adev, 897 AMDGPU_PP_SENSOR_VDDGFX, 898 (void *)&ui32, &ui32_size)) { 899 return -EINVAL; 900 } 901 break; 902 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 903 /* get stable pstate sclk in Mhz */ 904 if (amdgpu_dpm_read_sensor(adev, 905 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 906 (void *)&ui32, &ui32_size)) { 907 return -EINVAL; 908 } 909 ui32 /= 100; 910 break; 911 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 912 /* get stable pstate mclk in Mhz */ 913 if (amdgpu_dpm_read_sensor(adev, 914 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 915 (void *)&ui32, &ui32_size)) { 916 return -EINVAL; 917 } 918 ui32 /= 100; 919 break; 920 default: 921 DRM_DEBUG_KMS("Invalid request %d\n", 922 info->sensor_info.type); 923 return -EINVAL; 924 } 925 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 926 } 927 case AMDGPU_INFO_VRAM_LOST_COUNTER: 928 ui32 = atomic_read(&adev->vram_lost_counter); 929 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 930 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 931 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 932 uint64_t ras_mask; 933 934 if (!ras) 935 return -EINVAL; 936 ras_mask = (uint64_t)ras->supported << 32 | ras->features; 937 938 return copy_to_user(out, &ras_mask, 939 min_t(u64, size, sizeof(ras_mask))) ? 940 -EFAULT : 0; 941 } 942 default: 943 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 944 return -EINVAL; 945 } 946 return 0; 947 } 948 949 950 /* 951 * Outdated mess for old drm with Xorg being in charge (void function now). 952 */ 953 /** 954 * amdgpu_driver_lastclose_kms - drm callback for last close 955 * 956 * @dev: drm dev pointer 957 * 958 * Switch vga_switcheroo state after last close (all asics). 959 */ 960 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 961 { 962 drm_fb_helper_lastclose(dev); 963 vga_switcheroo_process_delayed_switch(); 964 } 965 966 /** 967 * amdgpu_driver_open_kms - drm callback for open 968 * 969 * @dev: drm dev pointer 970 * @file_priv: drm file 971 * 972 * On device open, init vm on cayman+ (all asics). 973 * Returns 0 on success, error on failure. 974 */ 975 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 976 { 977 struct amdgpu_device *adev = dev->dev_private; 978 struct amdgpu_fpriv *fpriv; 979 int r, pasid; 980 981 /* Ensure IB tests are run on ring */ 982 flush_delayed_work(&adev->delayed_init_work); 983 984 985 if (amdgpu_ras_intr_triggered()) { 986 DRM_ERROR("RAS Intr triggered, device disabled!!"); 987 return -EHWPOISON; 988 } 989 990 file_priv->driver_priv = NULL; 991 992 r = pm_runtime_get_sync(dev->dev); 993 if (r < 0) 994 return r; 995 996 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 997 if (unlikely(!fpriv)) { 998 r = -ENOMEM; 999 goto out_suspend; 1000 } 1001 1002 pasid = amdgpu_pasid_alloc(16); 1003 if (pasid < 0) { 1004 dev_warn(adev->dev, "No more PASIDs available!"); 1005 pasid = 0; 1006 } 1007 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); 1008 if (r) 1009 goto error_pasid; 1010 1011 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1012 if (!fpriv->prt_va) { 1013 r = -ENOMEM; 1014 goto error_vm; 1015 } 1016 1017 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1018 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1019 1020 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1021 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1022 if (r) 1023 goto error_vm; 1024 } 1025 1026 mutex_init(&fpriv->bo_list_lock); 1027 idr_init(&fpriv->bo_list_handles); 1028 1029 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1030 1031 file_priv->driver_priv = fpriv; 1032 goto out_suspend; 1033 1034 error_vm: 1035 amdgpu_vm_fini(adev, &fpriv->vm); 1036 1037 error_pasid: 1038 if (pasid) 1039 amdgpu_pasid_free(pasid); 1040 1041 kfree(fpriv); 1042 1043 out_suspend: 1044 pm_runtime_mark_last_busy(dev->dev); 1045 pm_runtime_put_autosuspend(dev->dev); 1046 1047 return r; 1048 } 1049 1050 /** 1051 * amdgpu_driver_postclose_kms - drm callback for post close 1052 * 1053 * @dev: drm dev pointer 1054 * @file_priv: drm file 1055 * 1056 * On device post close, tear down vm on cayman+ (all asics). 1057 */ 1058 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1059 struct drm_file *file_priv) 1060 { 1061 struct amdgpu_device *adev = dev->dev_private; 1062 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1063 struct amdgpu_bo_list *list; 1064 struct amdgpu_bo *pd; 1065 unsigned int pasid; 1066 int handle; 1067 1068 if (!fpriv) 1069 return; 1070 1071 pm_runtime_get_sync(dev->dev); 1072 1073 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1074 amdgpu_uvd_free_handles(adev, file_priv); 1075 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1076 amdgpu_vce_free_handles(adev, file_priv); 1077 1078 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1079 1080 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1081 /* TODO: how to handle reserve failure */ 1082 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1083 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1084 fpriv->csa_va = NULL; 1085 amdgpu_bo_unreserve(adev->virt.csa_obj); 1086 } 1087 1088 pasid = fpriv->vm.pasid; 1089 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1090 1091 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1092 amdgpu_vm_fini(adev, &fpriv->vm); 1093 1094 if (pasid) 1095 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1096 amdgpu_bo_unref(&pd); 1097 1098 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1099 amdgpu_bo_list_put(list); 1100 1101 idr_destroy(&fpriv->bo_list_handles); 1102 mutex_destroy(&fpriv->bo_list_lock); 1103 1104 kfree(fpriv); 1105 file_priv->driver_priv = NULL; 1106 1107 pm_runtime_mark_last_busy(dev->dev); 1108 pm_runtime_put_autosuspend(dev->dev); 1109 } 1110 1111 /* 1112 * VBlank related functions. 1113 */ 1114 /** 1115 * amdgpu_get_vblank_counter_kms - get frame count 1116 * 1117 * @dev: drm dev pointer 1118 * @pipe: crtc to get the frame count from 1119 * 1120 * Gets the frame count on the requested crtc (all asics). 1121 * Returns frame count on success, -EINVAL on failure. 1122 */ 1123 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 1124 { 1125 struct amdgpu_device *adev = dev->dev_private; 1126 int vpos, hpos, stat; 1127 u32 count; 1128 1129 if (pipe >= adev->mode_info.num_crtc) { 1130 DRM_ERROR("Invalid crtc %u\n", pipe); 1131 return -EINVAL; 1132 } 1133 1134 /* The hw increments its frame counter at start of vsync, not at start 1135 * of vblank, as is required by DRM core vblank counter handling. 1136 * Cook the hw count here to make it appear to the caller as if it 1137 * incremented at start of vblank. We measure distance to start of 1138 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1139 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1140 * result by 1 to give the proper appearance to caller. 1141 */ 1142 if (adev->mode_info.crtcs[pipe]) { 1143 /* Repeat readout if needed to provide stable result if 1144 * we cross start of vsync during the queries. 1145 */ 1146 do { 1147 count = amdgpu_display_vblank_get_counter(adev, pipe); 1148 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1149 * vpos as distance to start of vblank, instead of 1150 * regular vertical scanout pos. 1151 */ 1152 stat = amdgpu_display_get_crtc_scanoutpos( 1153 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1154 &vpos, &hpos, NULL, NULL, 1155 &adev->mode_info.crtcs[pipe]->base.hwmode); 1156 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1157 1158 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1159 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1160 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1161 } else { 1162 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1163 pipe, vpos); 1164 1165 /* Bump counter if we are at >= leading edge of vblank, 1166 * but before vsync where vpos would turn negative and 1167 * the hw counter really increments. 1168 */ 1169 if (vpos >= 0) 1170 count++; 1171 } 1172 } else { 1173 /* Fallback to use value as is. */ 1174 count = amdgpu_display_vblank_get_counter(adev, pipe); 1175 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1176 } 1177 1178 return count; 1179 } 1180 1181 /** 1182 * amdgpu_enable_vblank_kms - enable vblank interrupt 1183 * 1184 * @dev: drm dev pointer 1185 * @pipe: crtc to enable vblank interrupt for 1186 * 1187 * Enable the interrupt on the requested crtc (all asics). 1188 * Returns 0 on success, -EINVAL on failure. 1189 */ 1190 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1191 { 1192 struct amdgpu_device *adev = dev->dev_private; 1193 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1194 1195 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1196 } 1197 1198 /** 1199 * amdgpu_disable_vblank_kms - disable vblank interrupt 1200 * 1201 * @dev: drm dev pointer 1202 * @pipe: crtc to disable vblank interrupt for 1203 * 1204 * Disable the interrupt on the requested crtc (all asics). 1205 */ 1206 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1207 { 1208 struct amdgpu_device *adev = dev->dev_private; 1209 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1210 1211 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1212 } 1213 1214 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1216 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1217 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1218 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 1219 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1220 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1221 /* KMS */ 1222 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1223 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1224 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1225 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1226 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1227 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1228 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1229 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1230 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1231 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) 1232 }; 1233 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 1234 1235 /* 1236 * Debugfs info 1237 */ 1238 #if defined(CONFIG_DEBUG_FS) 1239 1240 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) 1241 { 1242 struct drm_info_node *node = (struct drm_info_node *) m->private; 1243 struct drm_device *dev = node->minor->dev; 1244 struct amdgpu_device *adev = dev->dev_private; 1245 struct drm_amdgpu_info_firmware fw_info; 1246 struct drm_amdgpu_query_fw query_fw; 1247 struct atom_context *ctx = adev->mode_info.atom_context; 1248 int ret, i; 1249 1250 /* VCE */ 1251 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1252 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1253 if (ret) 1254 return ret; 1255 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1256 fw_info.feature, fw_info.ver); 1257 1258 /* UVD */ 1259 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1260 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1261 if (ret) 1262 return ret; 1263 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1264 fw_info.feature, fw_info.ver); 1265 1266 /* GMC */ 1267 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1268 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1269 if (ret) 1270 return ret; 1271 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1272 fw_info.feature, fw_info.ver); 1273 1274 /* ME */ 1275 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1276 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1277 if (ret) 1278 return ret; 1279 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1280 fw_info.feature, fw_info.ver); 1281 1282 /* PFP */ 1283 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1284 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1285 if (ret) 1286 return ret; 1287 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1288 fw_info.feature, fw_info.ver); 1289 1290 /* CE */ 1291 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1292 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1293 if (ret) 1294 return ret; 1295 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1296 fw_info.feature, fw_info.ver); 1297 1298 /* RLC */ 1299 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1300 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1301 if (ret) 1302 return ret; 1303 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1304 fw_info.feature, fw_info.ver); 1305 1306 /* RLC SAVE RESTORE LIST CNTL */ 1307 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1308 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1309 if (ret) 1310 return ret; 1311 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1312 fw_info.feature, fw_info.ver); 1313 1314 /* RLC SAVE RESTORE LIST GPM MEM */ 1315 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1316 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1317 if (ret) 1318 return ret; 1319 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1320 fw_info.feature, fw_info.ver); 1321 1322 /* RLC SAVE RESTORE LIST SRM MEM */ 1323 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1324 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1325 if (ret) 1326 return ret; 1327 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1328 fw_info.feature, fw_info.ver); 1329 1330 /* MEC */ 1331 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1332 query_fw.index = 0; 1333 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1334 if (ret) 1335 return ret; 1336 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1337 fw_info.feature, fw_info.ver); 1338 1339 /* MEC2 */ 1340 if (adev->asic_type == CHIP_KAVERI || 1341 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { 1342 query_fw.index = 1; 1343 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1344 if (ret) 1345 return ret; 1346 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1347 fw_info.feature, fw_info.ver); 1348 } 1349 1350 /* PSP SOS */ 1351 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1352 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1353 if (ret) 1354 return ret; 1355 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1356 fw_info.feature, fw_info.ver); 1357 1358 1359 /* PSP ASD */ 1360 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1361 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1362 if (ret) 1363 return ret; 1364 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1365 fw_info.feature, fw_info.ver); 1366 1367 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1368 for (i = 0; i < 2; i++) { 1369 query_fw.index = i; 1370 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1371 if (ret) 1372 continue; 1373 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n", 1374 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver); 1375 } 1376 1377 /* SMC */ 1378 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1379 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1380 if (ret) 1381 return ret; 1382 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1383 fw_info.feature, fw_info.ver); 1384 1385 /* SDMA */ 1386 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1387 for (i = 0; i < adev->sdma.num_instances; i++) { 1388 query_fw.index = i; 1389 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1390 if (ret) 1391 return ret; 1392 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1393 i, fw_info.feature, fw_info.ver); 1394 } 1395 1396 /* VCN */ 1397 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1398 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1399 if (ret) 1400 return ret; 1401 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1402 fw_info.feature, fw_info.ver); 1403 1404 /* DMCU */ 1405 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1406 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1407 if (ret) 1408 return ret; 1409 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1410 fw_info.feature, fw_info.ver); 1411 1412 /* DMCUB */ 1413 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB; 1414 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1415 if (ret) 1416 return ret; 1417 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n", 1418 fw_info.feature, fw_info.ver); 1419 1420 1421 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1422 1423 return 0; 1424 } 1425 1426 static const struct drm_info_list amdgpu_firmware_info_list[] = { 1427 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, 1428 }; 1429 #endif 1430 1431 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1432 { 1433 #if defined(CONFIG_DEBUG_FS) 1434 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, 1435 ARRAY_SIZE(amdgpu_firmware_info_list)); 1436 #else 1437 return 0; 1438 #endif 1439 } 1440