1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include "amdgpu.h" 30 #include <drm/drm_debugfs.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu_sched.h" 33 #include "amdgpu_uvd.h" 34 #include "amdgpu_vce.h" 35 #include "atom.h" 36 37 #include <linux/vga_switcheroo.h> 38 #include <linux/slab.h> 39 #include <linux/uaccess.h> 40 #include <linux/pci.h> 41 #include <linux/pm_runtime.h> 42 #include "amdgpu_amdkfd.h" 43 #include "amdgpu_gem.h" 44 #include "amdgpu_display.h" 45 #include "amdgpu_ras.h" 46 47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) 48 { 49 struct amdgpu_gpu_instance *gpu_instance; 50 int i; 51 52 mutex_lock(&mgpu_info.mutex); 53 54 for (i = 0; i < mgpu_info.num_gpu; i++) { 55 gpu_instance = &(mgpu_info.gpu_ins[i]); 56 if (gpu_instance->adev == adev) { 57 mgpu_info.gpu_ins[i] = 58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; 59 mgpu_info.num_gpu--; 60 if (adev->flags & AMD_IS_APU) 61 mgpu_info.num_apu--; 62 else 63 mgpu_info.num_dgpu--; 64 break; 65 } 66 } 67 68 mutex_unlock(&mgpu_info.mutex); 69 } 70 71 /** 72 * amdgpu_driver_unload_kms - Main unload function for KMS. 73 * 74 * @dev: drm dev pointer 75 * 76 * This is the main unload function for KMS (all asics). 77 * Returns 0 on success. 78 */ 79 void amdgpu_driver_unload_kms(struct drm_device *dev) 80 { 81 struct amdgpu_device *adev = dev->dev_private; 82 83 if (adev == NULL) 84 return; 85 86 amdgpu_unregister_gpu_instance(adev); 87 88 if (adev->rmmio == NULL) 89 goto done_free; 90 91 if (amdgpu_sriov_vf(adev)) 92 amdgpu_virt_request_full_gpu(adev, false); 93 94 if (amdgpu_device_is_px(dev)) { 95 pm_runtime_get_sync(dev->dev); 96 pm_runtime_forbid(dev->dev); 97 } 98 99 amdgpu_acpi_fini(adev); 100 101 amdgpu_device_fini(adev); 102 103 done_free: 104 kfree(adev); 105 dev->dev_private = NULL; 106 } 107 108 void amdgpu_register_gpu_instance(struct amdgpu_device *adev) 109 { 110 struct amdgpu_gpu_instance *gpu_instance; 111 112 mutex_lock(&mgpu_info.mutex); 113 114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 115 DRM_ERROR("Cannot register more gpu instance\n"); 116 mutex_unlock(&mgpu_info.mutex); 117 return; 118 } 119 120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); 121 gpu_instance->adev = adev; 122 gpu_instance->mgpu_fan_enabled = 0; 123 124 mgpu_info.num_gpu++; 125 if (adev->flags & AMD_IS_APU) 126 mgpu_info.num_apu++; 127 else 128 mgpu_info.num_dgpu++; 129 130 mutex_unlock(&mgpu_info.mutex); 131 } 132 133 /** 134 * amdgpu_driver_load_kms - Main load function for KMS. 135 * 136 * @dev: drm dev pointer 137 * @flags: device flags 138 * 139 * This is the main load function for KMS (all asics). 140 * Returns 0 on success, error on failure. 141 */ 142 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) 143 { 144 struct amdgpu_device *adev; 145 int r, acpi_status; 146 147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); 148 if (adev == NULL) { 149 return -ENOMEM; 150 } 151 dev->dev_private = (void *)adev; 152 153 if ((amdgpu_runtime_pm != 0) && 154 amdgpu_has_atpx() && 155 (amdgpu_is_atpx_hybrid() || 156 amdgpu_has_atpx_dgpu_power_cntl()) && 157 ((flags & AMD_IS_APU) == 0) && 158 !pci_is_thunderbolt_attached(dev->pdev)) 159 flags |= AMD_IS_PX; 160 161 /* amdgpu_device_init should report only fatal error 162 * like memory allocation failure or iomapping failure, 163 * or memory manager initialization failure, it must 164 * properly initialize the GPU MC controller and permit 165 * VRAM allocation 166 */ 167 r = amdgpu_device_init(adev, dev, dev->pdev, flags); 168 if (r) { 169 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 170 goto out; 171 } 172 173 /* Call ACPI methods: require modeset init 174 * but failure is not fatal 175 */ 176 if (!r) { 177 acpi_status = amdgpu_acpi_init(adev); 178 if (acpi_status) 179 dev_dbg(&dev->pdev->dev, 180 "Error during ACPI methods call\n"); 181 } 182 183 if (amdgpu_device_is_px(dev)) { 184 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); 185 pm_runtime_use_autosuspend(dev->dev); 186 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 187 pm_runtime_set_active(dev->dev); 188 pm_runtime_allow(dev->dev); 189 pm_runtime_mark_last_busy(dev->dev); 190 pm_runtime_put_autosuspend(dev->dev); 191 } 192 193 out: 194 if (r) { 195 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 196 if (adev->rmmio && amdgpu_device_is_px(dev)) 197 pm_runtime_put_noidle(dev->dev); 198 amdgpu_driver_unload_kms(dev); 199 } 200 201 return r; 202 } 203 204 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, 205 struct drm_amdgpu_query_fw *query_fw, 206 struct amdgpu_device *adev) 207 { 208 switch (query_fw->fw_type) { 209 case AMDGPU_INFO_FW_VCE: 210 fw_info->ver = adev->vce.fw_version; 211 fw_info->feature = adev->vce.fb_version; 212 break; 213 case AMDGPU_INFO_FW_UVD: 214 fw_info->ver = adev->uvd.fw_version; 215 fw_info->feature = 0; 216 break; 217 case AMDGPU_INFO_FW_VCN: 218 fw_info->ver = adev->vcn.fw_version; 219 fw_info->feature = 0; 220 break; 221 case AMDGPU_INFO_FW_GMC: 222 fw_info->ver = adev->gmc.fw_version; 223 fw_info->feature = 0; 224 break; 225 case AMDGPU_INFO_FW_GFX_ME: 226 fw_info->ver = adev->gfx.me_fw_version; 227 fw_info->feature = adev->gfx.me_feature_version; 228 break; 229 case AMDGPU_INFO_FW_GFX_PFP: 230 fw_info->ver = adev->gfx.pfp_fw_version; 231 fw_info->feature = adev->gfx.pfp_feature_version; 232 break; 233 case AMDGPU_INFO_FW_GFX_CE: 234 fw_info->ver = adev->gfx.ce_fw_version; 235 fw_info->feature = adev->gfx.ce_feature_version; 236 break; 237 case AMDGPU_INFO_FW_GFX_RLC: 238 fw_info->ver = adev->gfx.rlc_fw_version; 239 fw_info->feature = adev->gfx.rlc_feature_version; 240 break; 241 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: 242 fw_info->ver = adev->gfx.rlc_srlc_fw_version; 243 fw_info->feature = adev->gfx.rlc_srlc_feature_version; 244 break; 245 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: 246 fw_info->ver = adev->gfx.rlc_srlg_fw_version; 247 fw_info->feature = adev->gfx.rlc_srlg_feature_version; 248 break; 249 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: 250 fw_info->ver = adev->gfx.rlc_srls_fw_version; 251 fw_info->feature = adev->gfx.rlc_srls_feature_version; 252 break; 253 case AMDGPU_INFO_FW_GFX_MEC: 254 if (query_fw->index == 0) { 255 fw_info->ver = adev->gfx.mec_fw_version; 256 fw_info->feature = adev->gfx.mec_feature_version; 257 } else if (query_fw->index == 1) { 258 fw_info->ver = adev->gfx.mec2_fw_version; 259 fw_info->feature = adev->gfx.mec2_feature_version; 260 } else 261 return -EINVAL; 262 break; 263 case AMDGPU_INFO_FW_SMC: 264 fw_info->ver = adev->pm.fw_version; 265 fw_info->feature = 0; 266 break; 267 case AMDGPU_INFO_FW_TA: 268 if (query_fw->index > 1) 269 return -EINVAL; 270 if (query_fw->index == 0) { 271 fw_info->ver = adev->psp.ta_fw_version; 272 fw_info->feature = adev->psp.ta_xgmi_ucode_version; 273 } else { 274 fw_info->ver = adev->psp.ta_fw_version; 275 fw_info->feature = adev->psp.ta_ras_ucode_version; 276 } 277 break; 278 case AMDGPU_INFO_FW_SDMA: 279 if (query_fw->index >= adev->sdma.num_instances) 280 return -EINVAL; 281 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 282 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 283 break; 284 case AMDGPU_INFO_FW_SOS: 285 fw_info->ver = adev->psp.sos_fw_version; 286 fw_info->feature = adev->psp.sos_feature_version; 287 break; 288 case AMDGPU_INFO_FW_ASD: 289 fw_info->ver = adev->psp.asd_fw_version; 290 fw_info->feature = adev->psp.asd_feature_version; 291 break; 292 case AMDGPU_INFO_FW_DMCU: 293 fw_info->ver = adev->dm.dmcu_fw_version; 294 fw_info->feature = 0; 295 break; 296 default: 297 return -EINVAL; 298 } 299 return 0; 300 } 301 302 static int amdgpu_hw_ip_info(struct amdgpu_device *adev, 303 struct drm_amdgpu_info *info, 304 struct drm_amdgpu_info_hw_ip *result) 305 { 306 uint32_t ib_start_alignment = 0; 307 uint32_t ib_size_alignment = 0; 308 enum amd_ip_block_type type; 309 unsigned int num_rings = 0; 310 unsigned int i, j; 311 312 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 313 return -EINVAL; 314 315 switch (info->query_hw_ip.type) { 316 case AMDGPU_HW_IP_GFX: 317 type = AMD_IP_BLOCK_TYPE_GFX; 318 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 319 if (adev->gfx.gfx_ring[i].sched.ready) 320 ++num_rings; 321 ib_start_alignment = 32; 322 ib_size_alignment = 32; 323 break; 324 case AMDGPU_HW_IP_COMPUTE: 325 type = AMD_IP_BLOCK_TYPE_GFX; 326 for (i = 0; i < adev->gfx.num_compute_rings; i++) 327 if (adev->gfx.compute_ring[i].sched.ready) 328 ++num_rings; 329 ib_start_alignment = 32; 330 ib_size_alignment = 32; 331 break; 332 case AMDGPU_HW_IP_DMA: 333 type = AMD_IP_BLOCK_TYPE_SDMA; 334 for (i = 0; i < adev->sdma.num_instances; i++) 335 if (adev->sdma.instance[i].ring.sched.ready) 336 ++num_rings; 337 ib_start_alignment = 256; 338 ib_size_alignment = 4; 339 break; 340 case AMDGPU_HW_IP_UVD: 341 type = AMD_IP_BLOCK_TYPE_UVD; 342 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 343 if (adev->uvd.harvest_config & (1 << i)) 344 continue; 345 346 if (adev->uvd.inst[i].ring.sched.ready) 347 ++num_rings; 348 } 349 ib_start_alignment = 64; 350 ib_size_alignment = 64; 351 break; 352 case AMDGPU_HW_IP_VCE: 353 type = AMD_IP_BLOCK_TYPE_VCE; 354 for (i = 0; i < adev->vce.num_rings; i++) 355 if (adev->vce.ring[i].sched.ready) 356 ++num_rings; 357 ib_start_alignment = 4; 358 ib_size_alignment = 1; 359 break; 360 case AMDGPU_HW_IP_UVD_ENC: 361 type = AMD_IP_BLOCK_TYPE_UVD; 362 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 363 if (adev->uvd.harvest_config & (1 << i)) 364 continue; 365 366 for (j = 0; j < adev->uvd.num_enc_rings; j++) 367 if (adev->uvd.inst[i].ring_enc[j].sched.ready) 368 ++num_rings; 369 } 370 ib_start_alignment = 64; 371 ib_size_alignment = 64; 372 break; 373 case AMDGPU_HW_IP_VCN_DEC: 374 type = AMD_IP_BLOCK_TYPE_VCN; 375 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 376 if (adev->uvd.harvest_config & (1 << i)) 377 continue; 378 379 if (adev->vcn.inst[i].ring_dec.sched.ready) 380 ++num_rings; 381 } 382 ib_start_alignment = 16; 383 ib_size_alignment = 16; 384 break; 385 case AMDGPU_HW_IP_VCN_ENC: 386 type = AMD_IP_BLOCK_TYPE_VCN; 387 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 388 if (adev->uvd.harvest_config & (1 << i)) 389 continue; 390 391 for (j = 0; j < adev->vcn.num_enc_rings; j++) 392 if (adev->vcn.inst[i].ring_enc[j].sched.ready) 393 ++num_rings; 394 } 395 ib_start_alignment = 64; 396 ib_size_alignment = 1; 397 break; 398 case AMDGPU_HW_IP_VCN_JPEG: 399 type = AMD_IP_BLOCK_TYPE_VCN; 400 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 401 if (adev->uvd.harvest_config & (1 << i)) 402 continue; 403 404 if (adev->vcn.inst[i].ring_jpeg.sched.ready) 405 ++num_rings; 406 } 407 ib_start_alignment = 16; 408 ib_size_alignment = 16; 409 break; 410 default: 411 return -EINVAL; 412 } 413 414 for (i = 0; i < adev->num_ip_blocks; i++) 415 if (adev->ip_blocks[i].version->type == type && 416 adev->ip_blocks[i].status.valid) 417 break; 418 419 if (i == adev->num_ip_blocks) 420 return 0; 421 422 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], 423 num_rings); 424 425 result->hw_ip_version_major = adev->ip_blocks[i].version->major; 426 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; 427 result->capabilities_flags = 0; 428 result->available_rings = (1 << num_rings) - 1; 429 result->ib_start_alignment = ib_start_alignment; 430 result->ib_size_alignment = ib_size_alignment; 431 return 0; 432 } 433 434 /* 435 * Userspace get information ioctl 436 */ 437 /** 438 * amdgpu_info_ioctl - answer a device specific request. 439 * 440 * @adev: amdgpu device pointer 441 * @data: request object 442 * @filp: drm filp 443 * 444 * This function is used to pass device specific parameters to the userspace 445 * drivers. Examples include: pci device id, pipeline parms, tiling params, 446 * etc. (all asics). 447 * Returns 0 on success, -EINVAL on failure. 448 */ 449 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 450 { 451 struct amdgpu_device *adev = dev->dev_private; 452 struct drm_amdgpu_info *info = data; 453 struct amdgpu_mode_info *minfo = &adev->mode_info; 454 void __user *out = (void __user *)(uintptr_t)info->return_pointer; 455 uint32_t size = info->return_size; 456 struct drm_crtc *crtc; 457 uint32_t ui32 = 0; 458 uint64_t ui64 = 0; 459 int i, found; 460 int ui32_size = sizeof(ui32); 461 462 if (!info->return_size || !info->return_pointer) 463 return -EINVAL; 464 465 switch (info->query) { 466 case AMDGPU_INFO_ACCEL_WORKING: 467 ui32 = adev->accel_working; 468 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 469 case AMDGPU_INFO_CRTC_FROM_ID: 470 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) { 471 crtc = (struct drm_crtc *)minfo->crtcs[i]; 472 if (crtc && crtc->base.id == info->mode_crtc.id) { 473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 474 ui32 = amdgpu_crtc->crtc_id; 475 found = 1; 476 break; 477 } 478 } 479 if (!found) { 480 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id); 481 return -EINVAL; 482 } 483 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 484 case AMDGPU_INFO_HW_IP_INFO: { 485 struct drm_amdgpu_info_hw_ip ip = {}; 486 int ret; 487 488 ret = amdgpu_hw_ip_info(adev, info, &ip); 489 if (ret) 490 return ret; 491 492 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); 493 return ret ? -EFAULT : 0; 494 } 495 case AMDGPU_INFO_HW_IP_COUNT: { 496 enum amd_ip_block_type type; 497 uint32_t count = 0; 498 499 switch (info->query_hw_ip.type) { 500 case AMDGPU_HW_IP_GFX: 501 type = AMD_IP_BLOCK_TYPE_GFX; 502 break; 503 case AMDGPU_HW_IP_COMPUTE: 504 type = AMD_IP_BLOCK_TYPE_GFX; 505 break; 506 case AMDGPU_HW_IP_DMA: 507 type = AMD_IP_BLOCK_TYPE_SDMA; 508 break; 509 case AMDGPU_HW_IP_UVD: 510 type = AMD_IP_BLOCK_TYPE_UVD; 511 break; 512 case AMDGPU_HW_IP_VCE: 513 type = AMD_IP_BLOCK_TYPE_VCE; 514 break; 515 case AMDGPU_HW_IP_UVD_ENC: 516 type = AMD_IP_BLOCK_TYPE_UVD; 517 break; 518 case AMDGPU_HW_IP_VCN_DEC: 519 case AMDGPU_HW_IP_VCN_ENC: 520 case AMDGPU_HW_IP_VCN_JPEG: 521 type = AMD_IP_BLOCK_TYPE_VCN; 522 break; 523 default: 524 return -EINVAL; 525 } 526 527 for (i = 0; i < adev->num_ip_blocks; i++) 528 if (adev->ip_blocks[i].version->type == type && 529 adev->ip_blocks[i].status.valid && 530 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 531 count++; 532 533 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 534 } 535 case AMDGPU_INFO_TIMESTAMP: 536 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); 537 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 538 case AMDGPU_INFO_FW_VERSION: { 539 struct drm_amdgpu_info_firmware fw_info; 540 int ret; 541 542 /* We only support one instance of each IP block right now. */ 543 if (info->query_fw.ip_instance != 0) 544 return -EINVAL; 545 546 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); 547 if (ret) 548 return ret; 549 550 return copy_to_user(out, &fw_info, 551 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 552 } 553 case AMDGPU_INFO_NUM_BYTES_MOVED: 554 ui64 = atomic64_read(&adev->num_bytes_moved); 555 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 556 case AMDGPU_INFO_NUM_EVICTIONS: 557 ui64 = atomic64_read(&adev->num_evictions); 558 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 559 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS: 560 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); 561 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 562 case AMDGPU_INFO_VRAM_USAGE: 563 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 564 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 565 case AMDGPU_INFO_VIS_VRAM_USAGE: 566 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 567 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 568 case AMDGPU_INFO_GTT_USAGE: 569 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 571 case AMDGPU_INFO_GDS_CONFIG: { 572 struct drm_amdgpu_info_gds gds_info; 573 574 memset(&gds_info, 0, sizeof(gds_info)); 575 gds_info.compute_partition_size = adev->gds.gds_size; 576 gds_info.gds_total_size = adev->gds.gds_size; 577 gds_info.gws_per_compute_partition = adev->gds.gws_size; 578 gds_info.oa_per_compute_partition = adev->gds.oa_size; 579 return copy_to_user(out, &gds_info, 580 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; 581 } 582 case AMDGPU_INFO_VRAM_GTT: { 583 struct drm_amdgpu_info_vram_gtt vram_gtt; 584 585 vram_gtt.vram_size = adev->gmc.real_vram_size - 586 atomic64_read(&adev->vram_pin_size) - 587 AMDGPU_VM_RESERVED_VRAM; 588 vram_gtt.vram_cpu_accessible_size = 589 min(adev->gmc.visible_vram_size - 590 atomic64_read(&adev->visible_pin_size), 591 vram_gtt.vram_size); 592 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; 593 vram_gtt.gtt_size *= PAGE_SIZE; 594 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); 595 return copy_to_user(out, &vram_gtt, 596 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 597 } 598 case AMDGPU_INFO_MEMORY: { 599 struct drm_amdgpu_memory_info mem; 600 601 memset(&mem, 0, sizeof(mem)); 602 mem.vram.total_heap_size = adev->gmc.real_vram_size; 603 mem.vram.usable_heap_size = adev->gmc.real_vram_size - 604 atomic64_read(&adev->vram_pin_size) - 605 AMDGPU_VM_RESERVED_VRAM; 606 mem.vram.heap_usage = 607 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 608 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; 609 610 mem.cpu_accessible_vram.total_heap_size = 611 adev->gmc.visible_vram_size; 612 mem.cpu_accessible_vram.usable_heap_size = 613 min(adev->gmc.visible_vram_size - 614 atomic64_read(&adev->visible_pin_size), 615 mem.vram.usable_heap_size); 616 mem.cpu_accessible_vram.heap_usage = 617 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); 618 mem.cpu_accessible_vram.max_allocation = 619 mem.cpu_accessible_vram.usable_heap_size * 3 / 4; 620 621 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; 622 mem.gtt.total_heap_size *= PAGE_SIZE; 623 mem.gtt.usable_heap_size = mem.gtt.total_heap_size - 624 atomic64_read(&adev->gart_pin_size); 625 mem.gtt.heap_usage = 626 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); 627 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; 628 629 return copy_to_user(out, &mem, 630 min((size_t)size, sizeof(mem))) 631 ? -EFAULT : 0; 632 } 633 case AMDGPU_INFO_READ_MMR_REG: { 634 unsigned n, alloc_size; 635 uint32_t *regs; 636 unsigned se_num = (info->read_mmr_reg.instance >> 637 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 638 AMDGPU_INFO_MMR_SE_INDEX_MASK; 639 unsigned sh_num = (info->read_mmr_reg.instance >> 640 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 641 AMDGPU_INFO_MMR_SH_INDEX_MASK; 642 643 /* set full masks if the userspace set all bits 644 * in the bitfields */ 645 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 646 se_num = 0xffffffff; 647 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 648 sh_num = 0xffffffff; 649 650 if (info->read_mmr_reg.count > 128) 651 return -EINVAL; 652 653 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 654 if (!regs) 655 return -ENOMEM; 656 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 657 658 amdgpu_gfx_off_ctrl(adev, false); 659 for (i = 0; i < info->read_mmr_reg.count; i++) { 660 if (amdgpu_asic_read_register(adev, se_num, sh_num, 661 info->read_mmr_reg.dword_offset + i, 662 ®s[i])) { 663 DRM_DEBUG_KMS("unallowed offset %#x\n", 664 info->read_mmr_reg.dword_offset + i); 665 kfree(regs); 666 amdgpu_gfx_off_ctrl(adev, true); 667 return -EFAULT; 668 } 669 } 670 amdgpu_gfx_off_ctrl(adev, true); 671 n = copy_to_user(out, regs, min(size, alloc_size)); 672 kfree(regs); 673 return n ? -EFAULT : 0; 674 } 675 case AMDGPU_INFO_DEV_INFO: { 676 struct drm_amdgpu_info_device dev_info = {}; 677 uint64_t vm_size; 678 679 dev_info.device_id = dev->pdev->device; 680 dev_info.chip_rev = adev->rev_id; 681 dev_info.external_rev = adev->external_rev_id; 682 dev_info.pci_rev = dev->pdev->revision; 683 dev_info.family = adev->family; 684 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines; 685 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; 686 /* return all clocks in KHz */ 687 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; 688 if (adev->pm.dpm_enabled) { 689 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 690 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 691 } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 692 adev->virt.ops->get_pp_clk) { 693 dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 694 dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 695 } else { 696 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 697 dev_info.max_memory_clock = adev->clock.default_mclk * 10; 698 } 699 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 700 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * 701 adev->gfx.config.max_shader_engines; 702 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 703 dev_info._pad = 0; 704 dev_info.ids_flags = 0; 705 if (adev->flags & AMD_IS_APU) 706 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 707 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) 708 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; 709 710 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 711 vm_size -= AMDGPU_VA_RESERVED_SIZE; 712 713 /* Older VCE FW versions are buggy and can handle only 40bits */ 714 if (adev->vce.fw_version && 715 adev->vce.fw_version < AMDGPU_VCE_FW_53_45) 716 vm_size = min(vm_size, 1ULL << 40); 717 718 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 719 dev_info.virtual_address_max = 720 min(vm_size, AMDGPU_GMC_HOLE_START); 721 722 if (vm_size > AMDGPU_GMC_HOLE_START) { 723 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; 724 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 725 } 726 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 727 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 728 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; 729 dev_info.cu_active_number = adev->gfx.cu_info.number; 730 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 731 dev_info.ce_ram_size = adev->gfx.ce_ram_size; 732 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], 733 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); 734 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], 735 sizeof(adev->gfx.cu_info.bitmap)); 736 dev_info.vram_type = adev->gmc.vram_type; 737 dev_info.vram_bit_width = adev->gmc.vram_width; 738 dev_info.vce_harvest_config = adev->vce.harvest_config; 739 dev_info.gc_double_offchip_lds_buf = 740 adev->gfx.config.double_offchip_lds_buf; 741 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; 742 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; 743 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; 744 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches; 745 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth; 746 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth; 747 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads; 748 749 if (adev->family >= AMDGPU_FAMILY_NV) 750 dev_info.pa_sc_tile_steering_override = 751 adev->gfx.config.pa_sc_tile_steering_override; 752 753 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 754 755 return copy_to_user(out, &dev_info, 756 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 757 } 758 case AMDGPU_INFO_VCE_CLOCK_TABLE: { 759 unsigned i; 760 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; 761 struct amd_vce_state *vce_state; 762 763 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { 764 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); 765 if (vce_state) { 766 vce_clk_table.entries[i].sclk = vce_state->sclk; 767 vce_clk_table.entries[i].mclk = vce_state->mclk; 768 vce_clk_table.entries[i].eclk = vce_state->evclk; 769 vce_clk_table.num_valid_entries++; 770 } 771 } 772 773 return copy_to_user(out, &vce_clk_table, 774 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; 775 } 776 case AMDGPU_INFO_VBIOS: { 777 uint32_t bios_size = adev->bios_size; 778 779 switch (info->vbios_info.type) { 780 case AMDGPU_INFO_VBIOS_SIZE: 781 return copy_to_user(out, &bios_size, 782 min((size_t)size, sizeof(bios_size))) 783 ? -EFAULT : 0; 784 case AMDGPU_INFO_VBIOS_IMAGE: { 785 uint8_t *bios; 786 uint32_t bios_offset = info->vbios_info.offset; 787 788 if (bios_offset >= bios_size) 789 return -EINVAL; 790 791 bios = adev->bios + bios_offset; 792 return copy_to_user(out, bios, 793 min((size_t)size, (size_t)(bios_size - bios_offset))) 794 ? -EFAULT : 0; 795 } 796 default: 797 DRM_DEBUG_KMS("Invalid request %d\n", 798 info->vbios_info.type); 799 return -EINVAL; 800 } 801 } 802 case AMDGPU_INFO_NUM_HANDLES: { 803 struct drm_amdgpu_info_num_handles handle; 804 805 switch (info->query_hw_ip.type) { 806 case AMDGPU_HW_IP_UVD: 807 /* Starting Polaris, we support unlimited UVD handles */ 808 if (adev->asic_type < CHIP_POLARIS10) { 809 handle.uvd_max_handles = adev->uvd.max_handles; 810 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); 811 812 return copy_to_user(out, &handle, 813 min((size_t)size, sizeof(handle))) ? -EFAULT : 0; 814 } else { 815 return -ENODATA; 816 } 817 818 break; 819 default: 820 return -EINVAL; 821 } 822 } 823 case AMDGPU_INFO_SENSOR: { 824 if (!adev->pm.dpm_enabled) 825 return -ENOENT; 826 827 switch (info->sensor_info.type) { 828 case AMDGPU_INFO_SENSOR_GFX_SCLK: 829 /* get sclk in Mhz */ 830 if (amdgpu_dpm_read_sensor(adev, 831 AMDGPU_PP_SENSOR_GFX_SCLK, 832 (void *)&ui32, &ui32_size)) { 833 return -EINVAL; 834 } 835 ui32 /= 100; 836 break; 837 case AMDGPU_INFO_SENSOR_GFX_MCLK: 838 /* get mclk in Mhz */ 839 if (amdgpu_dpm_read_sensor(adev, 840 AMDGPU_PP_SENSOR_GFX_MCLK, 841 (void *)&ui32, &ui32_size)) { 842 return -EINVAL; 843 } 844 ui32 /= 100; 845 break; 846 case AMDGPU_INFO_SENSOR_GPU_TEMP: 847 /* get temperature in millidegrees C */ 848 if (amdgpu_dpm_read_sensor(adev, 849 AMDGPU_PP_SENSOR_GPU_TEMP, 850 (void *)&ui32, &ui32_size)) { 851 return -EINVAL; 852 } 853 break; 854 case AMDGPU_INFO_SENSOR_GPU_LOAD: 855 /* get GPU load */ 856 if (amdgpu_dpm_read_sensor(adev, 857 AMDGPU_PP_SENSOR_GPU_LOAD, 858 (void *)&ui32, &ui32_size)) { 859 return -EINVAL; 860 } 861 break; 862 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER: 863 /* get average GPU power */ 864 if (amdgpu_dpm_read_sensor(adev, 865 AMDGPU_PP_SENSOR_GPU_POWER, 866 (void *)&ui32, &ui32_size)) { 867 return -EINVAL; 868 } 869 ui32 >>= 8; 870 break; 871 case AMDGPU_INFO_SENSOR_VDDNB: 872 /* get VDDNB in millivolts */ 873 if (amdgpu_dpm_read_sensor(adev, 874 AMDGPU_PP_SENSOR_VDDNB, 875 (void *)&ui32, &ui32_size)) { 876 return -EINVAL; 877 } 878 break; 879 case AMDGPU_INFO_SENSOR_VDDGFX: 880 /* get VDDGFX in millivolts */ 881 if (amdgpu_dpm_read_sensor(adev, 882 AMDGPU_PP_SENSOR_VDDGFX, 883 (void *)&ui32, &ui32_size)) { 884 return -EINVAL; 885 } 886 break; 887 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK: 888 /* get stable pstate sclk in Mhz */ 889 if (amdgpu_dpm_read_sensor(adev, 890 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, 891 (void *)&ui32, &ui32_size)) { 892 return -EINVAL; 893 } 894 ui32 /= 100; 895 break; 896 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK: 897 /* get stable pstate mclk in Mhz */ 898 if (amdgpu_dpm_read_sensor(adev, 899 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 900 (void *)&ui32, &ui32_size)) { 901 return -EINVAL; 902 } 903 ui32 /= 100; 904 break; 905 default: 906 DRM_DEBUG_KMS("Invalid request %d\n", 907 info->sensor_info.type); 908 return -EINVAL; 909 } 910 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 911 } 912 case AMDGPU_INFO_VRAM_LOST_COUNTER: 913 ui32 = atomic_read(&adev->vram_lost_counter); 914 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; 915 case AMDGPU_INFO_RAS_ENABLED_FEATURES: { 916 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 917 uint64_t ras_mask; 918 919 if (!ras) 920 return -EINVAL; 921 ras_mask = (uint64_t)ras->supported << 32 | ras->features; 922 923 return copy_to_user(out, &ras_mask, 924 min_t(u64, size, sizeof(ras_mask))) ? 925 -EFAULT : 0; 926 } 927 default: 928 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 929 return -EINVAL; 930 } 931 return 0; 932 } 933 934 935 /* 936 * Outdated mess for old drm with Xorg being in charge (void function now). 937 */ 938 /** 939 * amdgpu_driver_lastclose_kms - drm callback for last close 940 * 941 * @dev: drm dev pointer 942 * 943 * Switch vga_switcheroo state after last close (all asics). 944 */ 945 void amdgpu_driver_lastclose_kms(struct drm_device *dev) 946 { 947 drm_fb_helper_lastclose(dev); 948 vga_switcheroo_process_delayed_switch(); 949 } 950 951 /** 952 * amdgpu_driver_open_kms - drm callback for open 953 * 954 * @dev: drm dev pointer 955 * @file_priv: drm file 956 * 957 * On device open, init vm on cayman+ (all asics). 958 * Returns 0 on success, error on failure. 959 */ 960 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 961 { 962 struct amdgpu_device *adev = dev->dev_private; 963 struct amdgpu_fpriv *fpriv; 964 int r, pasid; 965 966 /* Ensure IB tests are run on ring */ 967 flush_delayed_work(&adev->delayed_init_work); 968 969 970 if (amdgpu_ras_intr_triggered()) { 971 DRM_ERROR("RAS Intr triggered, device disabled!!"); 972 return -EHWPOISON; 973 } 974 975 file_priv->driver_priv = NULL; 976 977 r = pm_runtime_get_sync(dev->dev); 978 if (r < 0) 979 return r; 980 981 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 982 if (unlikely(!fpriv)) { 983 r = -ENOMEM; 984 goto out_suspend; 985 } 986 987 pasid = amdgpu_pasid_alloc(16); 988 if (pasid < 0) { 989 dev_warn(adev->dev, "No more PASIDs available!"); 990 pasid = 0; 991 } 992 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); 993 if (r) 994 goto error_pasid; 995 996 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 997 if (!fpriv->prt_va) { 998 r = -ENOMEM; 999 goto error_vm; 1000 } 1001 1002 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1003 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; 1004 1005 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, 1006 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); 1007 if (r) 1008 goto error_vm; 1009 } 1010 1011 mutex_init(&fpriv->bo_list_lock); 1012 idr_init(&fpriv->bo_list_handles); 1013 1014 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); 1015 1016 file_priv->driver_priv = fpriv; 1017 goto out_suspend; 1018 1019 error_vm: 1020 amdgpu_vm_fini(adev, &fpriv->vm); 1021 1022 error_pasid: 1023 if (pasid) 1024 amdgpu_pasid_free(pasid); 1025 1026 kfree(fpriv); 1027 1028 out_suspend: 1029 pm_runtime_mark_last_busy(dev->dev); 1030 pm_runtime_put_autosuspend(dev->dev); 1031 1032 return r; 1033 } 1034 1035 /** 1036 * amdgpu_driver_postclose_kms - drm callback for post close 1037 * 1038 * @dev: drm dev pointer 1039 * @file_priv: drm file 1040 * 1041 * On device post close, tear down vm on cayman+ (all asics). 1042 */ 1043 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1044 struct drm_file *file_priv) 1045 { 1046 struct amdgpu_device *adev = dev->dev_private; 1047 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 1048 struct amdgpu_bo_list *list; 1049 struct amdgpu_bo *pd; 1050 unsigned int pasid; 1051 int handle; 1052 1053 if (!fpriv) 1054 return; 1055 1056 pm_runtime_get_sync(dev->dev); 1057 1058 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) 1059 amdgpu_uvd_free_handles(adev, file_priv); 1060 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) 1061 amdgpu_vce_free_handles(adev, file_priv); 1062 1063 amdgpu_vm_bo_rmv(adev, fpriv->prt_va); 1064 1065 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 1066 /* TODO: how to handle reserve failure */ 1067 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); 1068 amdgpu_vm_bo_rmv(adev, fpriv->csa_va); 1069 fpriv->csa_va = NULL; 1070 amdgpu_bo_unreserve(adev->virt.csa_obj); 1071 } 1072 1073 pasid = fpriv->vm.pasid; 1074 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1075 1076 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1077 amdgpu_vm_fini(adev, &fpriv->vm); 1078 1079 if (pasid) 1080 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid); 1081 amdgpu_bo_unref(&pd); 1082 1083 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 1084 amdgpu_bo_list_put(list); 1085 1086 idr_destroy(&fpriv->bo_list_handles); 1087 mutex_destroy(&fpriv->bo_list_lock); 1088 1089 kfree(fpriv); 1090 file_priv->driver_priv = NULL; 1091 1092 pm_runtime_mark_last_busy(dev->dev); 1093 pm_runtime_put_autosuspend(dev->dev); 1094 } 1095 1096 /* 1097 * VBlank related functions. 1098 */ 1099 /** 1100 * amdgpu_get_vblank_counter_kms - get frame count 1101 * 1102 * @dev: drm dev pointer 1103 * @pipe: crtc to get the frame count from 1104 * 1105 * Gets the frame count on the requested crtc (all asics). 1106 * Returns frame count on success, -EINVAL on failure. 1107 */ 1108 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 1109 { 1110 struct amdgpu_device *adev = dev->dev_private; 1111 int vpos, hpos, stat; 1112 u32 count; 1113 1114 if (pipe >= adev->mode_info.num_crtc) { 1115 DRM_ERROR("Invalid crtc %u\n", pipe); 1116 return -EINVAL; 1117 } 1118 1119 /* The hw increments its frame counter at start of vsync, not at start 1120 * of vblank, as is required by DRM core vblank counter handling. 1121 * Cook the hw count here to make it appear to the caller as if it 1122 * incremented at start of vblank. We measure distance to start of 1123 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 1124 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 1125 * result by 1 to give the proper appearance to caller. 1126 */ 1127 if (adev->mode_info.crtcs[pipe]) { 1128 /* Repeat readout if needed to provide stable result if 1129 * we cross start of vsync during the queries. 1130 */ 1131 do { 1132 count = amdgpu_display_vblank_get_counter(adev, pipe); 1133 /* Ask amdgpu_display_get_crtc_scanoutpos to return 1134 * vpos as distance to start of vblank, instead of 1135 * regular vertical scanout pos. 1136 */ 1137 stat = amdgpu_display_get_crtc_scanoutpos( 1138 dev, pipe, GET_DISTANCE_TO_VBLANKSTART, 1139 &vpos, &hpos, NULL, NULL, 1140 &adev->mode_info.crtcs[pipe]->base.hwmode); 1141 } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); 1142 1143 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 1144 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 1145 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 1146 } else { 1147 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 1148 pipe, vpos); 1149 1150 /* Bump counter if we are at >= leading edge of vblank, 1151 * but before vsync where vpos would turn negative and 1152 * the hw counter really increments. 1153 */ 1154 if (vpos >= 0) 1155 count++; 1156 } 1157 } else { 1158 /* Fallback to use value as is. */ 1159 count = amdgpu_display_vblank_get_counter(adev, pipe); 1160 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 1161 } 1162 1163 return count; 1164 } 1165 1166 /** 1167 * amdgpu_enable_vblank_kms - enable vblank interrupt 1168 * 1169 * @dev: drm dev pointer 1170 * @pipe: crtc to enable vblank interrupt for 1171 * 1172 * Enable the interrupt on the requested crtc (all asics). 1173 * Returns 0 on success, -EINVAL on failure. 1174 */ 1175 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1176 { 1177 struct amdgpu_device *adev = dev->dev_private; 1178 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1179 1180 return amdgpu_irq_get(adev, &adev->crtc_irq, idx); 1181 } 1182 1183 /** 1184 * amdgpu_disable_vblank_kms - disable vblank interrupt 1185 * 1186 * @dev: drm dev pointer 1187 * @pipe: crtc to disable vblank interrupt for 1188 * 1189 * Disable the interrupt on the requested crtc (all asics). 1190 */ 1191 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) 1192 { 1193 struct amdgpu_device *adev = dev->dev_private; 1194 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe); 1195 1196 amdgpu_irq_put(adev, &adev->crtc_irq, idx); 1197 } 1198 1199 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1200 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1201 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1202 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1203 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), 1204 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1205 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1206 /* KMS */ 1207 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1208 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1209 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1210 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1211 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1212 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1213 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1214 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1215 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1216 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) 1217 }; 1218 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 1219 1220 /* 1221 * Debugfs info 1222 */ 1223 #if defined(CONFIG_DEBUG_FS) 1224 1225 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) 1226 { 1227 struct drm_info_node *node = (struct drm_info_node *) m->private; 1228 struct drm_device *dev = node->minor->dev; 1229 struct amdgpu_device *adev = dev->dev_private; 1230 struct drm_amdgpu_info_firmware fw_info; 1231 struct drm_amdgpu_query_fw query_fw; 1232 struct atom_context *ctx = adev->mode_info.atom_context; 1233 int ret, i; 1234 1235 /* VCE */ 1236 query_fw.fw_type = AMDGPU_INFO_FW_VCE; 1237 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1238 if (ret) 1239 return ret; 1240 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", 1241 fw_info.feature, fw_info.ver); 1242 1243 /* UVD */ 1244 query_fw.fw_type = AMDGPU_INFO_FW_UVD; 1245 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1246 if (ret) 1247 return ret; 1248 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", 1249 fw_info.feature, fw_info.ver); 1250 1251 /* GMC */ 1252 query_fw.fw_type = AMDGPU_INFO_FW_GMC; 1253 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1254 if (ret) 1255 return ret; 1256 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", 1257 fw_info.feature, fw_info.ver); 1258 1259 /* ME */ 1260 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; 1261 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1262 if (ret) 1263 return ret; 1264 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", 1265 fw_info.feature, fw_info.ver); 1266 1267 /* PFP */ 1268 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; 1269 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1270 if (ret) 1271 return ret; 1272 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", 1273 fw_info.feature, fw_info.ver); 1274 1275 /* CE */ 1276 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; 1277 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1278 if (ret) 1279 return ret; 1280 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", 1281 fw_info.feature, fw_info.ver); 1282 1283 /* RLC */ 1284 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; 1285 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1286 if (ret) 1287 return ret; 1288 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", 1289 fw_info.feature, fw_info.ver); 1290 1291 /* RLC SAVE RESTORE LIST CNTL */ 1292 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; 1293 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1294 if (ret) 1295 return ret; 1296 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", 1297 fw_info.feature, fw_info.ver); 1298 1299 /* RLC SAVE RESTORE LIST GPM MEM */ 1300 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; 1301 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1302 if (ret) 1303 return ret; 1304 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", 1305 fw_info.feature, fw_info.ver); 1306 1307 /* RLC SAVE RESTORE LIST SRM MEM */ 1308 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; 1309 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1310 if (ret) 1311 return ret; 1312 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", 1313 fw_info.feature, fw_info.ver); 1314 1315 /* MEC */ 1316 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; 1317 query_fw.index = 0; 1318 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1319 if (ret) 1320 return ret; 1321 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", 1322 fw_info.feature, fw_info.ver); 1323 1324 /* MEC2 */ 1325 if (adev->asic_type == CHIP_KAVERI || 1326 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { 1327 query_fw.index = 1; 1328 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1329 if (ret) 1330 return ret; 1331 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", 1332 fw_info.feature, fw_info.ver); 1333 } 1334 1335 /* PSP SOS */ 1336 query_fw.fw_type = AMDGPU_INFO_FW_SOS; 1337 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1338 if (ret) 1339 return ret; 1340 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n", 1341 fw_info.feature, fw_info.ver); 1342 1343 1344 /* PSP ASD */ 1345 query_fw.fw_type = AMDGPU_INFO_FW_ASD; 1346 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1347 if (ret) 1348 return ret; 1349 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n", 1350 fw_info.feature, fw_info.ver); 1351 1352 query_fw.fw_type = AMDGPU_INFO_FW_TA; 1353 for (i = 0; i < 2; i++) { 1354 query_fw.index = i; 1355 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1356 if (ret) 1357 continue; 1358 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n", 1359 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver); 1360 } 1361 1362 /* SMC */ 1363 query_fw.fw_type = AMDGPU_INFO_FW_SMC; 1364 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1365 if (ret) 1366 return ret; 1367 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", 1368 fw_info.feature, fw_info.ver); 1369 1370 /* SDMA */ 1371 query_fw.fw_type = AMDGPU_INFO_FW_SDMA; 1372 for (i = 0; i < adev->sdma.num_instances; i++) { 1373 query_fw.index = i; 1374 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1375 if (ret) 1376 return ret; 1377 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", 1378 i, fw_info.feature, fw_info.ver); 1379 } 1380 1381 /* VCN */ 1382 query_fw.fw_type = AMDGPU_INFO_FW_VCN; 1383 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1384 if (ret) 1385 return ret; 1386 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", 1387 fw_info.feature, fw_info.ver); 1388 1389 /* DMCU */ 1390 query_fw.fw_type = AMDGPU_INFO_FW_DMCU; 1391 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); 1392 if (ret) 1393 return ret; 1394 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", 1395 fw_info.feature, fw_info.ver); 1396 1397 1398 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); 1399 1400 return 0; 1401 } 1402 1403 static const struct drm_info_list amdgpu_firmware_info_list[] = { 1404 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, 1405 }; 1406 #endif 1407 1408 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) 1409 { 1410 #if defined(CONFIG_DEBUG_FS) 1411 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, 1412 ARRAY_SIZE(amdgpu_firmware_info_list)); 1413 #else 1414 return 0; 1415 #endif 1416 } 1417