1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/firmware.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/debugfs.h> 31 #include <drm/drm_drv.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_vcn.h" 36 #include "soc15d.h" 37 38 /* Firmware Names */ 39 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" 40 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" 41 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" 42 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" 43 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" 44 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" 45 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" 46 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" 47 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" 48 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" 49 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" 50 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" 51 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" 52 #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" 53 #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" 54 #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" 55 #define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" 56 #define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" 57 #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" 58 #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" 59 60 MODULE_FIRMWARE(FIRMWARE_RAVEN); 61 MODULE_FIRMWARE(FIRMWARE_PICASSO); 62 MODULE_FIRMWARE(FIRMWARE_RAVEN2); 63 MODULE_FIRMWARE(FIRMWARE_ARCTURUS); 64 MODULE_FIRMWARE(FIRMWARE_RENOIR); 65 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE); 66 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN); 67 MODULE_FIRMWARE(FIRMWARE_NAVI10); 68 MODULE_FIRMWARE(FIRMWARE_NAVI14); 69 MODULE_FIRMWARE(FIRMWARE_NAVI12); 70 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); 71 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); 72 MODULE_FIRMWARE(FIRMWARE_VANGOGH); 73 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); 74 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); 75 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); 76 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2); 77 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); 78 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); 79 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); 80 81 static void amdgpu_vcn_idle_work_handler(struct work_struct *work); 82 83 int amdgpu_vcn_sw_init(struct amdgpu_device *adev) 84 { 85 unsigned long bo_size; 86 const char *fw_name; 87 const struct common_firmware_header *hdr; 88 unsigned char fw_check; 89 unsigned int fw_shared_size, log_offset; 90 int i, r; 91 92 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); 93 mutex_init(&adev->vcn.vcn_pg_lock); 94 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); 95 atomic_set(&adev->vcn.total_submission_cnt, 0); 96 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 97 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); 98 99 switch (adev->ip_versions[UVD_HWIP][0]) { 100 case IP_VERSION(1, 0, 0): 101 case IP_VERSION(1, 0, 1): 102 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 103 fw_name = FIRMWARE_RAVEN2; 104 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 105 fw_name = FIRMWARE_PICASSO; 106 else 107 fw_name = FIRMWARE_RAVEN; 108 break; 109 case IP_VERSION(2, 5, 0): 110 fw_name = FIRMWARE_ARCTURUS; 111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 113 adev->vcn.indirect_sram = true; 114 break; 115 case IP_VERSION(2, 2, 0): 116 if (adev->apu_flags & AMD_APU_IS_RENOIR) 117 fw_name = FIRMWARE_RENOIR; 118 else 119 fw_name = FIRMWARE_GREEN_SARDINE; 120 121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 123 adev->vcn.indirect_sram = true; 124 break; 125 case IP_VERSION(2, 6, 0): 126 fw_name = FIRMWARE_ALDEBARAN; 127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 129 adev->vcn.indirect_sram = true; 130 break; 131 case IP_VERSION(2, 0, 0): 132 fw_name = FIRMWARE_NAVI10; 133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 135 adev->vcn.indirect_sram = true; 136 break; 137 case IP_VERSION(2, 0, 2): 138 if (adev->asic_type == CHIP_NAVI12) 139 fw_name = FIRMWARE_NAVI12; 140 else 141 fw_name = FIRMWARE_NAVI14; 142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 144 adev->vcn.indirect_sram = true; 145 break; 146 case IP_VERSION(3, 0, 0): 147 case IP_VERSION(3, 0, 64): 148 case IP_VERSION(3, 0, 192): 149 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 150 fw_name = FIRMWARE_SIENNA_CICHLID; 151 else 152 fw_name = FIRMWARE_NAVY_FLOUNDER; 153 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 154 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 155 adev->vcn.indirect_sram = true; 156 break; 157 case IP_VERSION(3, 0, 2): 158 fw_name = FIRMWARE_VANGOGH; 159 break; 160 case IP_VERSION(3, 0, 16): 161 fw_name = FIRMWARE_DIMGREY_CAVEFISH; 162 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 163 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 164 adev->vcn.indirect_sram = true; 165 break; 166 case IP_VERSION(3, 0, 33): 167 fw_name = FIRMWARE_BEIGE_GOBY; 168 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 169 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 170 adev->vcn.indirect_sram = true; 171 break; 172 case IP_VERSION(3, 1, 1): 173 fw_name = FIRMWARE_YELLOW_CARP; 174 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 175 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 176 adev->vcn.indirect_sram = true; 177 break; 178 case IP_VERSION(3, 1, 2): 179 fw_name = FIRMWARE_VCN_3_1_2; 180 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 181 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 182 adev->vcn.indirect_sram = true; 183 break; 184 case IP_VERSION(4, 0, 0): 185 fw_name = FIRMWARE_VCN4_0_0; 186 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 187 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 188 adev->vcn.indirect_sram = true; 189 break; 190 case IP_VERSION(4, 0, 2): 191 fw_name = FIRMWARE_VCN4_0_2; 192 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 193 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 194 adev->vcn.indirect_sram = true; 195 break; 196 case IP_VERSION(4, 0, 4): 197 fw_name = FIRMWARE_VCN4_0_4; 198 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 199 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 200 adev->vcn.indirect_sram = true; 201 break; 202 default: 203 return -EINVAL; 204 } 205 206 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); 207 if (r) { 208 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", 209 fw_name); 210 return r; 211 } 212 213 r = amdgpu_ucode_validate(adev->vcn.fw); 214 if (r) { 215 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", 216 fw_name); 217 release_firmware(adev->vcn.fw); 218 adev->vcn.fw = NULL; 219 return r; 220 } 221 222 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 223 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); 224 225 /* Bit 20-23, it is encode major and non-zero for new naming convention. 226 * This field is part of version minor and DRM_DISABLED_FLAG in old naming 227 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG 228 * is zero in old naming convention, this field is always zero so far. 229 * These four bits are used to tell which naming convention is present. 230 */ 231 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; 232 if (fw_check) { 233 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; 234 235 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; 236 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; 237 enc_major = fw_check; 238 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; 239 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; 240 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", 241 enc_major, enc_minor, dec_ver, vep, fw_rev); 242 } else { 243 unsigned int version_major, version_minor, family_id; 244 245 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 246 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 247 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 248 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n", 249 version_major, version_minor, family_id); 250 } 251 252 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; 253 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 254 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 255 256 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){ 257 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); 258 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); 259 } else { 260 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); 261 log_offset = offsetof(struct amdgpu_fw_shared, fw_log); 262 } 263 264 bo_size += fw_shared_size; 265 266 if (amdgpu_vcnfw_log) 267 bo_size += AMDGPU_VCNFW_LOG_SIZE; 268 269 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 270 if (adev->vcn.harvest_config & (1 << i)) 271 continue; 272 273 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 274 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, 275 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); 276 if (r) { 277 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); 278 return r; 279 } 280 281 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + 282 bo_size - fw_shared_size; 283 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + 284 bo_size - fw_shared_size; 285 286 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; 287 288 if (amdgpu_vcnfw_log) { 289 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; 290 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; 291 adev->vcn.inst[i].fw_shared.log_offset = log_offset; 292 } 293 294 if (adev->vcn.indirect_sram) { 295 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, 296 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, 297 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); 298 if (r) { 299 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); 300 return r; 301 } 302 } 303 } 304 305 return 0; 306 } 307 308 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) 309 { 310 int i, j; 311 312 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 313 if (adev->vcn.harvest_config & (1 << j)) 314 continue; 315 316 if (adev->vcn.indirect_sram) { 317 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, 318 &adev->vcn.inst[j].dpg_sram_gpu_addr, 319 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); 320 } 321 kvfree(adev->vcn.inst[j].saved_bo); 322 323 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, 324 &adev->vcn.inst[j].gpu_addr, 325 (void **)&adev->vcn.inst[j].cpu_addr); 326 327 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); 328 329 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 330 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); 331 } 332 333 release_firmware(adev->vcn.fw); 334 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); 335 mutex_destroy(&adev->vcn.vcn_pg_lock); 336 337 return 0; 338 } 339 340 /* from vcn4 and above, only unified queue is used */ 341 static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) 342 { 343 struct amdgpu_device *adev = ring->adev; 344 bool ret = false; 345 346 if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) 347 ret = true; 348 349 return ret; 350 } 351 352 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) 353 { 354 bool ret = false; 355 int vcn_config = adev->vcn.vcn_config[vcn_instance]; 356 357 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { 358 ret = true; 359 } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { 360 ret = true; 361 } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { 362 ret = true; 363 } 364 365 return ret; 366 } 367 368 int amdgpu_vcn_suspend(struct amdgpu_device *adev) 369 { 370 unsigned size; 371 void *ptr; 372 int i, idx; 373 374 cancel_delayed_work_sync(&adev->vcn.idle_work); 375 376 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 377 if (adev->vcn.harvest_config & (1 << i)) 378 continue; 379 if (adev->vcn.inst[i].vcpu_bo == NULL) 380 return 0; 381 382 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 383 ptr = adev->vcn.inst[i].cpu_addr; 384 385 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); 386 if (!adev->vcn.inst[i].saved_bo) 387 return -ENOMEM; 388 389 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 390 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); 391 drm_dev_exit(idx); 392 } 393 } 394 return 0; 395 } 396 397 int amdgpu_vcn_resume(struct amdgpu_device *adev) 398 { 399 unsigned size; 400 void *ptr; 401 int i, idx; 402 403 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 404 if (adev->vcn.harvest_config & (1 << i)) 405 continue; 406 if (adev->vcn.inst[i].vcpu_bo == NULL) 407 return -EINVAL; 408 409 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 410 ptr = adev->vcn.inst[i].cpu_addr; 411 412 if (adev->vcn.inst[i].saved_bo != NULL) { 413 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 414 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); 415 drm_dev_exit(idx); 416 } 417 kvfree(adev->vcn.inst[i].saved_bo); 418 adev->vcn.inst[i].saved_bo = NULL; 419 } else { 420 const struct common_firmware_header *hdr; 421 unsigned offset; 422 423 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 424 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 425 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 426 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 427 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, 428 le32_to_cpu(hdr->ucode_size_bytes)); 429 drm_dev_exit(idx); 430 } 431 size -= le32_to_cpu(hdr->ucode_size_bytes); 432 ptr += le32_to_cpu(hdr->ucode_size_bytes); 433 } 434 memset_io(ptr, 0, size); 435 } 436 } 437 return 0; 438 } 439 440 static void amdgpu_vcn_idle_work_handler(struct work_struct *work) 441 { 442 struct amdgpu_device *adev = 443 container_of(work, struct amdgpu_device, vcn.idle_work.work); 444 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; 445 unsigned int i, j; 446 int r = 0; 447 448 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 449 if (adev->vcn.harvest_config & (1 << j)) 450 continue; 451 452 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 453 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); 454 } 455 456 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 457 struct dpg_pause_state new_state; 458 459 if (fence[j] || 460 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) 461 new_state.fw_based = VCN_DPG_STATE__PAUSE; 462 else 463 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 464 465 adev->vcn.pause_dpg_mode(adev, j, &new_state); 466 } 467 468 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); 469 fences += fence[j]; 470 } 471 472 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { 473 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 474 AMD_PG_STATE_GATE); 475 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 476 false); 477 if (r) 478 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); 479 } else { 480 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 481 } 482 } 483 484 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) 485 { 486 struct amdgpu_device *adev = ring->adev; 487 int r = 0; 488 489 atomic_inc(&adev->vcn.total_submission_cnt); 490 491 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { 492 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 493 true); 494 if (r) 495 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); 496 } 497 498 mutex_lock(&adev->vcn.vcn_pg_lock); 499 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 500 AMD_PG_STATE_UNGATE); 501 502 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 503 struct dpg_pause_state new_state; 504 505 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { 506 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); 507 new_state.fw_based = VCN_DPG_STATE__PAUSE; 508 } else { 509 unsigned int fences = 0; 510 unsigned int i; 511 512 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 513 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); 514 515 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) 516 new_state.fw_based = VCN_DPG_STATE__PAUSE; 517 else 518 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 519 } 520 521 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); 522 } 523 mutex_unlock(&adev->vcn.vcn_pg_lock); 524 } 525 526 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) 527 { 528 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && 529 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 530 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); 531 532 atomic_dec(&ring->adev->vcn.total_submission_cnt); 533 534 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 535 } 536 537 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) 538 { 539 struct amdgpu_device *adev = ring->adev; 540 uint32_t tmp = 0; 541 unsigned i; 542 int r; 543 544 /* VCN in SRIOV does not support direct register read/write */ 545 if (amdgpu_sriov_vf(adev)) 546 return 0; 547 548 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); 549 r = amdgpu_ring_alloc(ring, 3); 550 if (r) 551 return r; 552 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); 553 amdgpu_ring_write(ring, 0xDEADBEEF); 554 amdgpu_ring_commit(ring); 555 for (i = 0; i < adev->usec_timeout; i++) { 556 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); 557 if (tmp == 0xDEADBEEF) 558 break; 559 udelay(1); 560 } 561 562 if (i >= adev->usec_timeout) 563 r = -ETIMEDOUT; 564 565 return r; 566 } 567 568 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) 569 { 570 struct amdgpu_device *adev = ring->adev; 571 uint32_t rptr; 572 unsigned int i; 573 int r; 574 575 if (amdgpu_sriov_vf(adev)) 576 return 0; 577 578 r = amdgpu_ring_alloc(ring, 16); 579 if (r) 580 return r; 581 582 rptr = amdgpu_ring_get_rptr(ring); 583 584 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); 585 amdgpu_ring_commit(ring); 586 587 for (i = 0; i < adev->usec_timeout; i++) { 588 if (amdgpu_ring_get_rptr(ring) != rptr) 589 break; 590 udelay(1); 591 } 592 593 if (i >= adev->usec_timeout) 594 r = -ETIMEDOUT; 595 596 return r; 597 } 598 599 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, 600 struct amdgpu_ib *ib_msg, 601 struct dma_fence **fence) 602 { 603 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 604 struct amdgpu_device *adev = ring->adev; 605 struct dma_fence *f = NULL; 606 struct amdgpu_job *job; 607 struct amdgpu_ib *ib; 608 int i, r; 609 610 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 611 64, AMDGPU_IB_POOL_DIRECT, 612 &job); 613 if (r) 614 goto err; 615 616 ib = &job->ibs[0]; 617 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); 618 ib->ptr[1] = addr; 619 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); 620 ib->ptr[3] = addr >> 32; 621 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); 622 ib->ptr[5] = 0; 623 for (i = 6; i < 16; i += 2) { 624 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); 625 ib->ptr[i+1] = 0; 626 } 627 ib->length_dw = 16; 628 629 r = amdgpu_job_submit_direct(job, ring, &f); 630 if (r) 631 goto err_free; 632 633 amdgpu_ib_free(adev, ib_msg, f); 634 635 if (fence) 636 *fence = dma_fence_get(f); 637 dma_fence_put(f); 638 639 return 0; 640 641 err_free: 642 amdgpu_job_free(job); 643 err: 644 amdgpu_ib_free(adev, ib_msg, f); 645 return r; 646 } 647 648 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 649 struct amdgpu_ib *ib) 650 { 651 struct amdgpu_device *adev = ring->adev; 652 uint32_t *msg; 653 int r, i; 654 655 memset(ib, 0, sizeof(*ib)); 656 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, 657 AMDGPU_IB_POOL_DIRECT, 658 ib); 659 if (r) 660 return r; 661 662 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); 663 msg[0] = cpu_to_le32(0x00000028); 664 msg[1] = cpu_to_le32(0x00000038); 665 msg[2] = cpu_to_le32(0x00000001); 666 msg[3] = cpu_to_le32(0x00000000); 667 msg[4] = cpu_to_le32(handle); 668 msg[5] = cpu_to_le32(0x00000000); 669 msg[6] = cpu_to_le32(0x00000001); 670 msg[7] = cpu_to_le32(0x00000028); 671 msg[8] = cpu_to_le32(0x00000010); 672 msg[9] = cpu_to_le32(0x00000000); 673 msg[10] = cpu_to_le32(0x00000007); 674 msg[11] = cpu_to_le32(0x00000000); 675 msg[12] = cpu_to_le32(0x00000780); 676 msg[13] = cpu_to_le32(0x00000440); 677 for (i = 14; i < 1024; ++i) 678 msg[i] = cpu_to_le32(0x0); 679 680 return 0; 681 } 682 683 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 684 struct amdgpu_ib *ib) 685 { 686 struct amdgpu_device *adev = ring->adev; 687 uint32_t *msg; 688 int r, i; 689 690 memset(ib, 0, sizeof(*ib)); 691 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, 692 AMDGPU_IB_POOL_DIRECT, 693 ib); 694 if (r) 695 return r; 696 697 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); 698 msg[0] = cpu_to_le32(0x00000028); 699 msg[1] = cpu_to_le32(0x00000018); 700 msg[2] = cpu_to_le32(0x00000000); 701 msg[3] = cpu_to_le32(0x00000002); 702 msg[4] = cpu_to_le32(handle); 703 msg[5] = cpu_to_le32(0x00000000); 704 for (i = 6; i < 1024; ++i) 705 msg[i] = cpu_to_le32(0x0); 706 707 return 0; 708 } 709 710 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 711 { 712 struct dma_fence *fence = NULL; 713 struct amdgpu_ib ib; 714 long r; 715 716 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); 717 if (r) 718 goto error; 719 720 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); 721 if (r) 722 goto error; 723 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); 724 if (r) 725 goto error; 726 727 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); 728 if (r) 729 goto error; 730 731 r = dma_fence_wait_timeout(fence, false, timeout); 732 if (r == 0) 733 r = -ETIMEDOUT; 734 else if (r > 0) 735 r = 0; 736 737 dma_fence_put(fence); 738 error: 739 return r; 740 } 741 742 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, 743 uint32_t ib_pack_in_dw, bool enc) 744 { 745 uint32_t *ib_checksum; 746 747 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ 748 ib->ptr[ib->length_dw++] = 0x30000002; 749 ib_checksum = &ib->ptr[ib->length_dw++]; 750 ib->ptr[ib->length_dw++] = ib_pack_in_dw; 751 752 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ 753 ib->ptr[ib->length_dw++] = 0x30000001; 754 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; 755 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); 756 757 return ib_checksum; 758 } 759 760 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, 761 uint32_t ib_pack_in_dw) 762 { 763 uint32_t i; 764 uint32_t checksum = 0; 765 766 for (i = 0; i < ib_pack_in_dw; i++) 767 checksum += *(*ib_checksum + 2 + i); 768 769 **ib_checksum = checksum; 770 } 771 772 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, 773 struct amdgpu_ib *ib_msg, 774 struct dma_fence **fence) 775 { 776 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; 777 unsigned int ib_size_dw = 64; 778 struct amdgpu_device *adev = ring->adev; 779 struct dma_fence *f = NULL; 780 struct amdgpu_job *job; 781 struct amdgpu_ib *ib; 782 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 783 bool sq = amdgpu_vcn_using_unified_queue(ring); 784 uint32_t *ib_checksum; 785 uint32_t ib_pack_in_dw; 786 int i, r; 787 788 if (sq) 789 ib_size_dw += 8; 790 791 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 792 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 793 &job); 794 if (r) 795 goto err; 796 797 ib = &job->ibs[0]; 798 ib->length_dw = 0; 799 800 /* single queue headers */ 801 if (sq) { 802 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) 803 + 4 + 2; /* engine info + decoding ib in dw */ 804 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); 805 } 806 807 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; 808 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); 809 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); 810 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4; 811 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer)); 812 813 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER); 814 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32); 815 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr); 816 817 for (i = ib->length_dw; i < ib_size_dw; ++i) 818 ib->ptr[i] = 0x0; 819 820 if (sq) 821 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); 822 823 r = amdgpu_job_submit_direct(job, ring, &f); 824 if (r) 825 goto err_free; 826 827 amdgpu_ib_free(adev, ib_msg, f); 828 829 if (fence) 830 *fence = dma_fence_get(f); 831 dma_fence_put(f); 832 833 return 0; 834 835 err_free: 836 amdgpu_job_free(job); 837 err: 838 amdgpu_ib_free(adev, ib_msg, f); 839 return r; 840 } 841 842 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) 843 { 844 struct dma_fence *fence = NULL; 845 struct amdgpu_ib ib; 846 long r; 847 848 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); 849 if (r) 850 goto error; 851 852 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); 853 if (r) 854 goto error; 855 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); 856 if (r) 857 goto error; 858 859 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); 860 if (r) 861 goto error; 862 863 r = dma_fence_wait_timeout(fence, false, timeout); 864 if (r == 0) 865 r = -ETIMEDOUT; 866 else if (r > 0) 867 r = 0; 868 869 dma_fence_put(fence); 870 error: 871 return r; 872 } 873 874 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) 875 { 876 struct amdgpu_device *adev = ring->adev; 877 uint32_t rptr; 878 unsigned i; 879 int r; 880 881 if (amdgpu_sriov_vf(adev)) 882 return 0; 883 884 r = amdgpu_ring_alloc(ring, 16); 885 if (r) 886 return r; 887 888 rptr = amdgpu_ring_get_rptr(ring); 889 890 amdgpu_ring_write(ring, VCN_ENC_CMD_END); 891 amdgpu_ring_commit(ring); 892 893 for (i = 0; i < adev->usec_timeout; i++) { 894 if (amdgpu_ring_get_rptr(ring) != rptr) 895 break; 896 udelay(1); 897 } 898 899 if (i >= adev->usec_timeout) 900 r = -ETIMEDOUT; 901 902 return r; 903 } 904 905 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 906 struct amdgpu_ib *ib_msg, 907 struct dma_fence **fence) 908 { 909 unsigned int ib_size_dw = 16; 910 struct amdgpu_job *job; 911 struct amdgpu_ib *ib; 912 struct dma_fence *f = NULL; 913 uint32_t *ib_checksum = NULL; 914 uint64_t addr; 915 bool sq = amdgpu_vcn_using_unified_queue(ring); 916 int i, r; 917 918 if (sq) 919 ib_size_dw += 8; 920 921 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 922 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 923 &job); 924 if (r) 925 return r; 926 927 ib = &job->ibs[0]; 928 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 929 930 ib->length_dw = 0; 931 932 if (sq) 933 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); 934 935 ib->ptr[ib->length_dw++] = 0x00000018; 936 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 937 ib->ptr[ib->length_dw++] = handle; 938 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 939 ib->ptr[ib->length_dw++] = addr; 940 ib->ptr[ib->length_dw++] = 0x0000000b; 941 942 ib->ptr[ib->length_dw++] = 0x00000014; 943 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 944 ib->ptr[ib->length_dw++] = 0x0000001c; 945 ib->ptr[ib->length_dw++] = 0x00000000; 946 ib->ptr[ib->length_dw++] = 0x00000000; 947 948 ib->ptr[ib->length_dw++] = 0x00000008; 949 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ 950 951 for (i = ib->length_dw; i < ib_size_dw; ++i) 952 ib->ptr[i] = 0x0; 953 954 if (sq) 955 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); 956 957 r = amdgpu_job_submit_direct(job, ring, &f); 958 if (r) 959 goto err; 960 961 if (fence) 962 *fence = dma_fence_get(f); 963 dma_fence_put(f); 964 965 return 0; 966 967 err: 968 amdgpu_job_free(job); 969 return r; 970 } 971 972 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 973 struct amdgpu_ib *ib_msg, 974 struct dma_fence **fence) 975 { 976 unsigned int ib_size_dw = 16; 977 struct amdgpu_job *job; 978 struct amdgpu_ib *ib; 979 struct dma_fence *f = NULL; 980 uint32_t *ib_checksum = NULL; 981 uint64_t addr; 982 bool sq = amdgpu_vcn_using_unified_queue(ring); 983 int i, r; 984 985 if (sq) 986 ib_size_dw += 8; 987 988 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 989 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 990 &job); 991 if (r) 992 return r; 993 994 ib = &job->ibs[0]; 995 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 996 997 ib->length_dw = 0; 998 999 if (sq) 1000 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); 1001 1002 ib->ptr[ib->length_dw++] = 0x00000018; 1003 ib->ptr[ib->length_dw++] = 0x00000001; 1004 ib->ptr[ib->length_dw++] = handle; 1005 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1006 ib->ptr[ib->length_dw++] = addr; 1007 ib->ptr[ib->length_dw++] = 0x0000000b; 1008 1009 ib->ptr[ib->length_dw++] = 0x00000014; 1010 ib->ptr[ib->length_dw++] = 0x00000002; 1011 ib->ptr[ib->length_dw++] = 0x0000001c; 1012 ib->ptr[ib->length_dw++] = 0x00000000; 1013 ib->ptr[ib->length_dw++] = 0x00000000; 1014 1015 ib->ptr[ib->length_dw++] = 0x00000008; 1016 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ 1017 1018 for (i = ib->length_dw; i < ib_size_dw; ++i) 1019 ib->ptr[i] = 0x0; 1020 1021 if (sq) 1022 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); 1023 1024 r = amdgpu_job_submit_direct(job, ring, &f); 1025 if (r) 1026 goto err; 1027 1028 if (fence) 1029 *fence = dma_fence_get(f); 1030 dma_fence_put(f); 1031 1032 return 0; 1033 1034 err: 1035 amdgpu_job_free(job); 1036 return r; 1037 } 1038 1039 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1040 { 1041 struct amdgpu_device *adev = ring->adev; 1042 struct dma_fence *fence = NULL; 1043 struct amdgpu_ib ib; 1044 long r; 1045 1046 memset(&ib, 0, sizeof(ib)); 1047 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, 1048 AMDGPU_IB_POOL_DIRECT, 1049 &ib); 1050 if (r) 1051 return r; 1052 1053 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); 1054 if (r) 1055 goto error; 1056 1057 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); 1058 if (r) 1059 goto error; 1060 1061 r = dma_fence_wait_timeout(fence, false, timeout); 1062 if (r == 0) 1063 r = -ETIMEDOUT; 1064 else if (r > 0) 1065 r = 0; 1066 1067 error: 1068 amdgpu_ib_free(adev, &ib, fence); 1069 dma_fence_put(fence); 1070 1071 return r; 1072 } 1073 1074 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1075 { 1076 long r; 1077 1078 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); 1079 if (r) 1080 goto error; 1081 1082 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); 1083 1084 error: 1085 return r; 1086 } 1087 1088 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) 1089 { 1090 switch(ring) { 1091 case 0: 1092 return AMDGPU_RING_PRIO_0; 1093 case 1: 1094 return AMDGPU_RING_PRIO_1; 1095 case 2: 1096 return AMDGPU_RING_PRIO_2; 1097 default: 1098 return AMDGPU_RING_PRIO_0; 1099 } 1100 } 1101 1102 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) 1103 { 1104 int i; 1105 unsigned int idx; 1106 1107 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1108 const struct common_firmware_header *hdr; 1109 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 1110 1111 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1112 if (adev->vcn.harvest_config & (1 << i)) 1113 continue; 1114 /* currently only support 2 FW instances */ 1115 if (i >= 2) { 1116 dev_info(adev->dev, "More then 2 VCN FW instances!\n"); 1117 break; 1118 } 1119 idx = AMDGPU_UCODE_ID_VCN + i; 1120 adev->firmware.ucode[idx].ucode_id = idx; 1121 adev->firmware.ucode[idx].fw = adev->vcn.fw; 1122 adev->firmware.fw_size += 1123 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 1124 } 1125 dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); 1126 } 1127 } 1128 1129 /* 1130 * debugfs for mapping vcn firmware log buffer. 1131 */ 1132 #if defined(CONFIG_DEBUG_FS) 1133 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, 1134 size_t size, loff_t *pos) 1135 { 1136 struct amdgpu_vcn_inst *vcn; 1137 void *log_buf; 1138 volatile struct amdgpu_vcn_fwlog *plog; 1139 unsigned int read_pos, write_pos, available, i, read_bytes = 0; 1140 unsigned int read_num[2] = {0}; 1141 1142 vcn = file_inode(f)->i_private; 1143 if (!vcn) 1144 return -ENODEV; 1145 1146 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) 1147 return -EFAULT; 1148 1149 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1150 1151 plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; 1152 read_pos = plog->rptr; 1153 write_pos = plog->wptr; 1154 1155 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE) 1156 return -EFAULT; 1157 1158 if (!size || (read_pos == write_pos)) 1159 return 0; 1160 1161 if (write_pos > read_pos) { 1162 available = write_pos - read_pos; 1163 read_num[0] = min(size, (size_t)available); 1164 } else { 1165 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; 1166 available = read_num[0] + write_pos - plog->header_size; 1167 if (size > available) 1168 read_num[1] = write_pos - plog->header_size; 1169 else if (size > read_num[0]) 1170 read_num[1] = size - read_num[0]; 1171 else 1172 read_num[0] = size; 1173 } 1174 1175 for (i = 0; i < 2; i++) { 1176 if (read_num[i]) { 1177 if (read_pos == AMDGPU_VCNFW_LOG_SIZE) 1178 read_pos = plog->header_size; 1179 if (read_num[i] == copy_to_user((buf + read_bytes), 1180 (log_buf + read_pos), read_num[i])) 1181 return -EFAULT; 1182 1183 read_bytes += read_num[i]; 1184 read_pos += read_num[i]; 1185 } 1186 } 1187 1188 plog->rptr = read_pos; 1189 *pos += read_bytes; 1190 return read_bytes; 1191 } 1192 1193 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = { 1194 .owner = THIS_MODULE, 1195 .read = amdgpu_debugfs_vcn_fwlog_read, 1196 .llseek = default_llseek 1197 }; 1198 #endif 1199 1200 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, 1201 struct amdgpu_vcn_inst *vcn) 1202 { 1203 #if defined(CONFIG_DEBUG_FS) 1204 struct drm_minor *minor = adev_to_drm(adev)->primary; 1205 struct dentry *root = minor->debugfs_root; 1206 char name[32]; 1207 1208 sprintf(name, "amdgpu_vcn_%d_fwlog", i); 1209 debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn, 1210 &amdgpu_debugfs_vcnfwlog_fops, 1211 AMDGPU_VCNFW_LOG_SIZE); 1212 #endif 1213 } 1214 1215 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) 1216 { 1217 #if defined(CONFIG_DEBUG_FS) 1218 volatile uint32_t *flag = vcn->fw_shared.cpu_addr; 1219 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1220 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; 1221 volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; 1222 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr 1223 + vcn->fw_shared.log_offset; 1224 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); 1225 fw_log->is_enabled = 1; 1226 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); 1227 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32); 1228 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE); 1229 1230 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog); 1231 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE; 1232 log_buf->rptr = log_buf->header_size; 1233 log_buf->wptr = log_buf->header_size; 1234 log_buf->wrapped = 0; 1235 #endif 1236 } 1237 1238 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, 1239 struct amdgpu_irq_src *source, 1240 struct amdgpu_iv_entry *entry) 1241 { 1242 struct ras_common_if *ras_if = adev->vcn.ras_if; 1243 struct ras_dispatch_if ih_data = { 1244 .entry = entry, 1245 }; 1246 1247 if (!ras_if) 1248 return 0; 1249 1250 ih_data.head = *ras_if; 1251 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 1252 1253 return 0; 1254 } 1255 1256 void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev) 1257 { 1258 if (!adev->vcn.ras) 1259 return; 1260 1261 amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block); 1262 1263 strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn"); 1264 adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; 1265 adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; 1266 adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm; 1267 1268 /* If don't define special ras_late_init function, use default ras_late_init */ 1269 if (!adev->vcn.ras->ras_block.ras_late_init) 1270 adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; 1271 } 1272