1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include <drm/drm_cache.h> 29 #include "amdgpu.h" 30 #include "gmc_v8_0.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_amdkfd.h" 33 #include "amdgpu_gem.h" 34 35 #include "gmc/gmc_8_1_d.h" 36 #include "gmc/gmc_8_1_sh_mask.h" 37 38 #include "bif/bif_5_0_d.h" 39 #include "bif/bif_5_0_sh_mask.h" 40 41 #include "oss/oss_3_0_d.h" 42 #include "oss/oss_3_0_sh_mask.h" 43 44 #include "dce/dce_10_0_d.h" 45 #include "dce/dce_10_0_sh_mask.h" 46 47 #include "vid.h" 48 #include "vi.h" 49 50 #include "amdgpu_atombios.h" 51 52 #include "ivsrcid/ivsrcid_vislands30.h" 53 54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); 55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 56 static int gmc_v8_0_wait_for_idle(void *handle); 57 58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); 60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); 61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); 62 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); 63 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); 64 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); 65 66 static const u32 golden_settings_tonga_a11[] = 67 { 68 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, 69 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, 70 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, 71 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 72 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 73 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 74 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, 75 }; 76 77 static const u32 tonga_mgcg_cgcg_init[] = 78 { 79 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 80 }; 81 82 static const u32 golden_settings_fiji_a10[] = 83 { 84 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 85 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 86 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 87 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, 88 }; 89 90 static const u32 fiji_mgcg_cgcg_init[] = 91 { 92 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 93 }; 94 95 static const u32 golden_settings_polaris11_a11[] = 96 { 97 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 98 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 99 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 100 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff 101 }; 102 103 static const u32 golden_settings_polaris10_a11[] = 104 { 105 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, 106 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 107 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 108 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 109 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff 110 }; 111 112 static const u32 cz_mgcg_cgcg_init[] = 113 { 114 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 115 }; 116 117 static const u32 stoney_mgcg_cgcg_init[] = 118 { 119 mmATC_MISC_CG, 0xffffffff, 0x000c0200, 120 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 121 }; 122 123 static const u32 golden_settings_stoney_common[] = 124 { 125 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, 126 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 127 }; 128 129 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 130 { 131 switch (adev->asic_type) { 132 case CHIP_FIJI: 133 amdgpu_device_program_register_sequence(adev, 134 fiji_mgcg_cgcg_init, 135 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 136 amdgpu_device_program_register_sequence(adev, 137 golden_settings_fiji_a10, 138 ARRAY_SIZE(golden_settings_fiji_a10)); 139 break; 140 case CHIP_TONGA: 141 amdgpu_device_program_register_sequence(adev, 142 tonga_mgcg_cgcg_init, 143 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 144 amdgpu_device_program_register_sequence(adev, 145 golden_settings_tonga_a11, 146 ARRAY_SIZE(golden_settings_tonga_a11)); 147 break; 148 case CHIP_POLARIS11: 149 case CHIP_POLARIS12: 150 case CHIP_VEGAM: 151 amdgpu_device_program_register_sequence(adev, 152 golden_settings_polaris11_a11, 153 ARRAY_SIZE(golden_settings_polaris11_a11)); 154 break; 155 case CHIP_POLARIS10: 156 amdgpu_device_program_register_sequence(adev, 157 golden_settings_polaris10_a11, 158 ARRAY_SIZE(golden_settings_polaris10_a11)); 159 break; 160 case CHIP_CARRIZO: 161 amdgpu_device_program_register_sequence(adev, 162 cz_mgcg_cgcg_init, 163 ARRAY_SIZE(cz_mgcg_cgcg_init)); 164 break; 165 case CHIP_STONEY: 166 amdgpu_device_program_register_sequence(adev, 167 stoney_mgcg_cgcg_init, 168 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 169 amdgpu_device_program_register_sequence(adev, 170 golden_settings_stoney_common, 171 ARRAY_SIZE(golden_settings_stoney_common)); 172 break; 173 default: 174 break; 175 } 176 } 177 178 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) 179 { 180 u32 blackout; 181 182 gmc_v8_0_wait_for_idle(adev); 183 184 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 185 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { 186 /* Block CPU access */ 187 WREG32(mmBIF_FB_EN, 0); 188 /* blackout the MC */ 189 blackout = REG_SET_FIELD(blackout, 190 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); 191 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); 192 } 193 /* wait for the MC to settle */ 194 udelay(100); 195 } 196 197 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) 198 { 199 u32 tmp; 200 201 /* unblackout the MC */ 202 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 203 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); 204 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); 205 /* allow CPU access */ 206 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); 207 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); 208 WREG32(mmBIF_FB_EN, tmp); 209 } 210 211 /** 212 * gmc_v8_0_init_microcode - load ucode images from disk 213 * 214 * @adev: amdgpu_device pointer 215 * 216 * Use the firmware interface to load the ucode images into 217 * the driver (not loaded into hw). 218 * Returns 0 on success, error on failure. 219 */ 220 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) 221 { 222 const char *chip_name; 223 char fw_name[30]; 224 int err; 225 226 DRM_DEBUG("\n"); 227 228 switch (adev->asic_type) { 229 case CHIP_TONGA: 230 chip_name = "tonga"; 231 break; 232 case CHIP_POLARIS11: 233 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 234 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) 235 chip_name = "polaris11_k"; 236 else 237 chip_name = "polaris11"; 238 break; 239 case CHIP_POLARIS10: 240 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) 241 chip_name = "polaris10_k"; 242 else 243 chip_name = "polaris10"; 244 break; 245 case CHIP_POLARIS12: 246 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) 247 chip_name = "polaris12_k"; 248 else 249 chip_name = "polaris12"; 250 break; 251 case CHIP_FIJI: 252 case CHIP_CARRIZO: 253 case CHIP_STONEY: 254 case CHIP_VEGAM: 255 return 0; 256 default: BUG(); 257 } 258 259 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); 260 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); 261 if (err) 262 goto out; 263 err = amdgpu_ucode_validate(adev->gmc.fw); 264 265 out: 266 if (err) { 267 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); 268 release_firmware(adev->gmc.fw); 269 adev->gmc.fw = NULL; 270 } 271 return err; 272 } 273 274 /** 275 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw 276 * 277 * @adev: amdgpu_device pointer 278 * 279 * Load the GDDR MC ucode into the hw (VI). 280 * Returns 0 on success, error on failure. 281 */ 282 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev) 283 { 284 const struct mc_firmware_header_v1_0 *hdr; 285 const __le32 *fw_data = NULL; 286 const __le32 *io_mc_regs = NULL; 287 u32 running; 288 int i, ucode_size, regs_size; 289 290 /* Skip MC ucode loading on SR-IOV capable boards. 291 * vbios does this for us in asic_init in that case. 292 * Skip MC ucode loading on VF, because hypervisor will do that 293 * for this adaptor. 294 */ 295 if (amdgpu_sriov_bios(adev)) 296 return 0; 297 298 if (!adev->gmc.fw) 299 return -EINVAL; 300 301 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; 302 amdgpu_ucode_print_mc_hdr(&hdr->header); 303 304 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); 305 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 306 io_mc_regs = (const __le32 *) 307 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 308 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 309 fw_data = (const __le32 *) 310 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 311 312 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); 313 314 if (running == 0) { 315 /* reset the engine and set to writable */ 316 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 317 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); 318 319 /* load mc io regs */ 320 for (i = 0; i < regs_size; i++) { 321 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); 322 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); 323 } 324 /* load the MC ucode */ 325 for (i = 0; i < ucode_size; i++) 326 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); 327 328 /* put the engine back into the active state */ 329 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 330 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); 331 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); 332 333 /* wait for training to complete */ 334 for (i = 0; i < adev->usec_timeout; i++) { 335 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 336 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) 337 break; 338 udelay(1); 339 } 340 for (i = 0; i < adev->usec_timeout; i++) { 341 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 342 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) 343 break; 344 udelay(1); 345 } 346 } 347 348 return 0; 349 } 350 351 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) 352 { 353 const struct mc_firmware_header_v1_0 *hdr; 354 const __le32 *fw_data = NULL; 355 const __le32 *io_mc_regs = NULL; 356 u32 data; 357 int i, ucode_size, regs_size; 358 359 /* Skip MC ucode loading on SR-IOV capable boards. 360 * vbios does this for us in asic_init in that case. 361 * Skip MC ucode loading on VF, because hypervisor will do that 362 * for this adaptor. 363 */ 364 if (amdgpu_sriov_bios(adev)) 365 return 0; 366 367 if (!adev->gmc.fw) 368 return -EINVAL; 369 370 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; 371 amdgpu_ucode_print_mc_hdr(&hdr->header); 372 373 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); 374 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 375 io_mc_regs = (const __le32 *) 376 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 377 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 378 fw_data = (const __le32 *) 379 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 380 381 data = RREG32(mmMC_SEQ_MISC0); 382 data &= ~(0x40); 383 WREG32(mmMC_SEQ_MISC0, data); 384 385 /* load mc io regs */ 386 for (i = 0; i < regs_size; i++) { 387 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); 388 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); 389 } 390 391 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 392 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); 393 394 /* load the MC ucode */ 395 for (i = 0; i < ucode_size; i++) 396 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); 397 398 /* put the engine back into the active state */ 399 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 400 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); 401 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); 402 403 /* wait for training to complete */ 404 for (i = 0; i < adev->usec_timeout; i++) { 405 data = RREG32(mmMC_SEQ_MISC0); 406 if (data & 0x80) 407 break; 408 udelay(1); 409 } 410 411 return 0; 412 } 413 414 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, 415 struct amdgpu_gmc *mc) 416 { 417 u64 base = 0; 418 419 if (!amdgpu_sriov_vf(adev)) 420 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 421 base <<= 24; 422 423 amdgpu_gmc_vram_location(adev, mc, base); 424 amdgpu_gmc_gart_location(adev, mc); 425 } 426 427 /** 428 * gmc_v8_0_mc_program - program the GPU memory controller 429 * 430 * @adev: amdgpu_device pointer 431 * 432 * Set the location of vram, gart, and AGP in the GPU's 433 * physical address space (VI). 434 */ 435 static void gmc_v8_0_mc_program(struct amdgpu_device *adev) 436 { 437 u32 tmp; 438 int i, j; 439 440 /* Initialize HDP */ 441 for (i = 0, j = 0; i < 32; i++, j += 0x6) { 442 WREG32((0xb05 + j), 0x00000000); 443 WREG32((0xb06 + j), 0x00000000); 444 WREG32((0xb07 + j), 0x00000000); 445 WREG32((0xb08 + j), 0x00000000); 446 WREG32((0xb09 + j), 0x00000000); 447 } 448 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 449 450 if (gmc_v8_0_wait_for_idle((void *)adev)) { 451 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 452 } 453 if (adev->mode_info.num_crtc) { 454 /* Lockout access through VGA aperture*/ 455 tmp = RREG32(mmVGA_HDP_CONTROL); 456 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 457 WREG32(mmVGA_HDP_CONTROL, tmp); 458 459 /* disable VGA render */ 460 tmp = RREG32(mmVGA_RENDER_CONTROL); 461 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 462 WREG32(mmVGA_RENDER_CONTROL, tmp); 463 } 464 /* Update configuration */ 465 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 466 adev->gmc.vram_start >> 12); 467 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 468 adev->gmc.vram_end >> 12); 469 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 470 adev->vram_scratch.gpu_addr >> 12); 471 472 if (amdgpu_sriov_vf(adev)) { 473 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; 474 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF); 475 WREG32(mmMC_VM_FB_LOCATION, tmp); 476 /* XXX double check these! */ 477 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); 478 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 479 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); 480 } 481 482 WREG32(mmMC_VM_AGP_BASE, 0); 483 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 484 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); 485 if (gmc_v8_0_wait_for_idle((void *)adev)) { 486 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 487 } 488 489 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); 490 491 tmp = RREG32(mmHDP_MISC_CNTL); 492 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); 493 WREG32(mmHDP_MISC_CNTL, tmp); 494 495 tmp = RREG32(mmHDP_HOST_PATH_CNTL); 496 WREG32(mmHDP_HOST_PATH_CNTL, tmp); 497 } 498 499 /** 500 * gmc_v8_0_mc_init - initialize the memory controller driver params 501 * 502 * @adev: amdgpu_device pointer 503 * 504 * Look up the amount of vram, vram width, and decide how to place 505 * vram and gart within the GPU's physical address space (VI). 506 * Returns 0 for success. 507 */ 508 static int gmc_v8_0_mc_init(struct amdgpu_device *adev) 509 { 510 int r; 511 512 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); 513 if (!adev->gmc.vram_width) { 514 u32 tmp; 515 int chansize, numchan; 516 517 /* Get VRAM informations */ 518 tmp = RREG32(mmMC_ARB_RAMCFG); 519 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { 520 chansize = 64; 521 } else { 522 chansize = 32; 523 } 524 tmp = RREG32(mmMC_SHARED_CHMAP); 525 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 526 case 0: 527 default: 528 numchan = 1; 529 break; 530 case 1: 531 numchan = 2; 532 break; 533 case 2: 534 numchan = 4; 535 break; 536 case 3: 537 numchan = 8; 538 break; 539 case 4: 540 numchan = 3; 541 break; 542 case 5: 543 numchan = 6; 544 break; 545 case 6: 546 numchan = 10; 547 break; 548 case 7: 549 numchan = 12; 550 break; 551 case 8: 552 numchan = 16; 553 break; 554 } 555 adev->gmc.vram_width = numchan * chansize; 556 } 557 /* size in MB on si */ 558 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 559 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 560 561 if (!(adev->flags & AMD_IS_APU)) { 562 r = amdgpu_device_resize_fb_bar(adev); 563 if (r) 564 return r; 565 } 566 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 567 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 568 569 #ifdef CONFIG_X86_64 570 if (adev->flags & AMD_IS_APU) { 571 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 572 adev->gmc.aper_size = adev->gmc.real_vram_size; 573 } 574 #endif 575 576 /* In case the PCI BAR is larger than the actual amount of vram */ 577 adev->gmc.visible_vram_size = adev->gmc.aper_size; 578 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 579 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 580 581 /* set the gart size */ 582 if (amdgpu_gart_size == -1) { 583 switch (adev->asic_type) { 584 case CHIP_POLARIS10: /* all engines support GPUVM */ 585 case CHIP_POLARIS11: /* all engines support GPUVM */ 586 case CHIP_POLARIS12: /* all engines support GPUVM */ 587 case CHIP_VEGAM: /* all engines support GPUVM */ 588 default: 589 adev->gmc.gart_size = 256ULL << 20; 590 break; 591 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ 592 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ 593 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ 594 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ 595 adev->gmc.gart_size = 1024ULL << 20; 596 break; 597 } 598 } else { 599 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 600 } 601 602 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; 603 gmc_v8_0_vram_gtt_location(adev, &adev->gmc); 604 605 return 0; 606 } 607 608 /** 609 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid 610 * 611 * @adev: amdgpu_device pointer 612 * @pasid: pasid to be flush 613 * @flush_type: type of flush 614 * @all_hub: flush all hubs 615 * 616 * Flush the TLB for the requested pasid. 617 */ 618 static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 619 uint16_t pasid, uint32_t flush_type, 620 bool all_hub) 621 { 622 int vmid; 623 unsigned int tmp; 624 625 if (amdgpu_in_reset(adev)) 626 return -EIO; 627 628 for (vmid = 1; vmid < 16; vmid++) { 629 630 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 631 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && 632 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { 633 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 634 RREG32(mmVM_INVALIDATE_RESPONSE); 635 break; 636 } 637 } 638 639 return 0; 640 641 } 642 643 /* 644 * GART 645 * VMID 0 is the physical GPU addresses as used by the kernel. 646 * VMIDs 1-15 are used for userspace clients and are handled 647 * by the amdgpu vm/hsa code. 648 */ 649 650 /** 651 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback 652 * 653 * @adev: amdgpu_device pointer 654 * @vmid: vm instance to flush 655 * @vmhub: which hub to flush 656 * @flush_type: type of flush 657 * 658 * Flush the TLB for the requested page table (VI). 659 */ 660 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 661 uint32_t vmhub, uint32_t flush_type) 662 { 663 /* bits 0-15 are the VM contexts0-15 */ 664 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 665 } 666 667 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 668 unsigned vmid, uint64_t pd_addr) 669 { 670 uint32_t reg; 671 672 if (vmid < 8) 673 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; 674 else 675 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; 676 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); 677 678 /* bits 0-15 are the VM contexts0-15 */ 679 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); 680 681 return pd_addr; 682 } 683 684 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 685 unsigned pasid) 686 { 687 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); 688 } 689 690 /* 691 * PTE format on VI: 692 * 63:40 reserved 693 * 39:12 4k physical page base address 694 * 11:7 fragment 695 * 6 write 696 * 5 read 697 * 4 exe 698 * 3 reserved 699 * 2 snooped 700 * 1 system 701 * 0 valid 702 * 703 * PDE format on VI: 704 * 63:59 block fragment size 705 * 58:40 reserved 706 * 39:1 physical base address of PTE 707 * bits 5:1 must be 0. 708 * 0 valid 709 */ 710 711 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, 712 uint64_t *addr, uint64_t *flags) 713 { 714 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 715 } 716 717 static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, 718 struct amdgpu_bo_va_mapping *mapping, 719 uint64_t *flags) 720 { 721 *flags &= ~AMDGPU_PTE_EXECUTABLE; 722 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 723 *flags &= ~AMDGPU_PTE_PRT; 724 } 725 726 /** 727 * gmc_v8_0_set_fault_enable_default - update VM fault handling 728 * 729 * @adev: amdgpu_device pointer 730 * @value: true redirects VM faults to the default page 731 */ 732 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, 733 bool value) 734 { 735 u32 tmp; 736 737 tmp = RREG32(mmVM_CONTEXT1_CNTL); 738 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 739 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 740 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 741 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 742 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 743 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 744 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 745 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 746 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 747 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 748 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 749 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 750 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 751 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 752 WREG32(mmVM_CONTEXT1_CNTL, tmp); 753 } 754 755 /** 756 * gmc_v8_0_set_prt - set PRT VM fault 757 * 758 * @adev: amdgpu_device pointer 759 * @enable: enable/disable VM fault handling for PRT 760 */ 761 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) 762 { 763 u32 tmp; 764 765 if (enable && !adev->gmc.prt_warning) { 766 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 767 adev->gmc.prt_warning = true; 768 } 769 770 tmp = RREG32(mmVM_PRT_CNTL); 771 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 772 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 773 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 774 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 775 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 776 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 777 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 778 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 779 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 780 L2_CACHE_STORE_INVALID_ENTRIES, enable); 781 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 782 L1_TLB_STORE_INVALID_ENTRIES, enable); 783 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 784 MASK_PDE0_FAULT, enable); 785 WREG32(mmVM_PRT_CNTL, tmp); 786 787 if (enable) { 788 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 789 uint32_t high = adev->vm_manager.max_pfn - 790 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); 791 792 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 793 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 794 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); 795 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); 796 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); 797 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); 798 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); 799 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); 800 } else { 801 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); 802 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); 803 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); 804 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); 805 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); 806 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); 807 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); 808 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); 809 } 810 } 811 812 /** 813 * gmc_v8_0_gart_enable - gart enable 814 * 815 * @adev: amdgpu_device pointer 816 * 817 * This sets up the TLBs, programs the page tables for VMID0, 818 * sets up the hw for VMIDs 1-15 which are allocated on 819 * demand, and sets up the global locations for the LDS, GDS, 820 * and GPUVM for FSA64 clients (VI). 821 * Returns 0 for success, errors for failure. 822 */ 823 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) 824 { 825 uint64_t table_addr; 826 int r, i; 827 u32 tmp, field; 828 829 if (adev->gart.bo == NULL) { 830 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 831 return -EINVAL; 832 } 833 r = amdgpu_gart_table_vram_pin(adev); 834 if (r) 835 return r; 836 837 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 838 839 /* Setup TLB control */ 840 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 841 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 842 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); 843 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 844 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); 845 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 846 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 847 /* Setup L2 cache */ 848 tmp = RREG32(mmVM_L2_CNTL); 849 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 850 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); 851 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); 852 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 853 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 854 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 855 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 856 WREG32(mmVM_L2_CNTL, tmp); 857 tmp = RREG32(mmVM_L2_CNTL2); 858 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 859 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 860 WREG32(mmVM_L2_CNTL2, tmp); 861 862 field = adev->vm_manager.fragment_size; 863 tmp = RREG32(mmVM_L2_CNTL3); 864 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); 865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); 866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); 867 WREG32(mmVM_L2_CNTL3, tmp); 868 /* XXX: set to enable PTE/PDE in system memory */ 869 tmp = RREG32(mmVM_L2_CNTL4); 870 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); 871 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); 872 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); 873 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); 874 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); 875 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); 876 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); 877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); 878 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); 879 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); 880 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); 881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); 882 WREG32(mmVM_L2_CNTL4, tmp); 883 /* setup context0 */ 884 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); 885 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); 886 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); 887 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 888 (u32)(adev->dummy_page_addr >> 12)); 889 WREG32(mmVM_CONTEXT0_CNTL2, 0); 890 tmp = RREG32(mmVM_CONTEXT0_CNTL); 891 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 892 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 893 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 894 WREG32(mmVM_CONTEXT0_CNTL, tmp); 895 896 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); 897 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); 898 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); 899 900 /* empty context1-15 */ 901 /* FIXME start with 4G, once using 2 level pt switch to full 902 * vm size space 903 */ 904 /* set vm size, must be a multiple of 4 */ 905 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 906 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); 907 for (i = 1; i < AMDGPU_NUM_VMID; i++) { 908 if (i < 8) 909 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, 910 table_addr >> 12); 911 else 912 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, 913 table_addr >> 12); 914 } 915 916 /* enable context1-15 */ 917 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 918 (u32)(adev->dummy_page_addr >> 12)); 919 WREG32(mmVM_CONTEXT1_CNTL2, 4); 920 tmp = RREG32(mmVM_CONTEXT1_CNTL); 921 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 922 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); 923 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 924 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 925 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 926 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 927 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 928 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 929 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 930 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 931 adev->vm_manager.block_size - 9); 932 WREG32(mmVM_CONTEXT1_CNTL, tmp); 933 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 934 gmc_v8_0_set_fault_enable_default(adev, false); 935 else 936 gmc_v8_0_set_fault_enable_default(adev, true); 937 938 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0); 939 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 940 (unsigned)(adev->gmc.gart_size >> 20), 941 (unsigned long long)table_addr); 942 adev->gart.ready = true; 943 return 0; 944 } 945 946 static int gmc_v8_0_gart_init(struct amdgpu_device *adev) 947 { 948 int r; 949 950 if (adev->gart.bo) { 951 WARN(1, "R600 PCIE GART already initialized\n"); 952 return 0; 953 } 954 /* Initialize common gart structure */ 955 r = amdgpu_gart_init(adev); 956 if (r) 957 return r; 958 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 959 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE; 960 return amdgpu_gart_table_vram_alloc(adev); 961 } 962 963 /** 964 * gmc_v8_0_gart_disable - gart disable 965 * 966 * @adev: amdgpu_device pointer 967 * 968 * This disables all VM page table (VI). 969 */ 970 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) 971 { 972 u32 tmp; 973 974 /* Disable all tables */ 975 WREG32(mmVM_CONTEXT0_CNTL, 0); 976 WREG32(mmVM_CONTEXT1_CNTL, 0); 977 /* Setup TLB control */ 978 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 979 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 980 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); 981 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); 982 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 983 /* Setup L2 cache */ 984 tmp = RREG32(mmVM_L2_CNTL); 985 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 986 WREG32(mmVM_L2_CNTL, tmp); 987 WREG32(mmVM_L2_CNTL2, 0); 988 amdgpu_gart_table_vram_unpin(adev); 989 } 990 991 /** 992 * gmc_v8_0_vm_decode_fault - print human readable fault info 993 * 994 * @adev: amdgpu_device pointer 995 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value 996 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value 997 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value 998 * @pasid: debug logging only - no functional use 999 * 1000 * Print human readable fault information (VI). 1001 */ 1002 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, 1003 u32 addr, u32 mc_client, unsigned pasid) 1004 { 1005 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); 1006 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1007 PROTECTIONS); 1008 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 1009 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 1010 u32 mc_id; 1011 1012 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1013 MEMORY_CLIENT_ID); 1014 1015 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 1016 protections, vmid, pasid, addr, 1017 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1018 MEMORY_CLIENT_RW) ? 1019 "write" : "read", block, mc_client, mc_id); 1020 } 1021 1022 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) 1023 { 1024 switch (mc_seq_vram_type) { 1025 case MC_SEQ_MISC0__MT__GDDR1: 1026 return AMDGPU_VRAM_TYPE_GDDR1; 1027 case MC_SEQ_MISC0__MT__DDR2: 1028 return AMDGPU_VRAM_TYPE_DDR2; 1029 case MC_SEQ_MISC0__MT__GDDR3: 1030 return AMDGPU_VRAM_TYPE_GDDR3; 1031 case MC_SEQ_MISC0__MT__GDDR4: 1032 return AMDGPU_VRAM_TYPE_GDDR4; 1033 case MC_SEQ_MISC0__MT__GDDR5: 1034 return AMDGPU_VRAM_TYPE_GDDR5; 1035 case MC_SEQ_MISC0__MT__HBM: 1036 return AMDGPU_VRAM_TYPE_HBM; 1037 case MC_SEQ_MISC0__MT__DDR3: 1038 return AMDGPU_VRAM_TYPE_DDR3; 1039 default: 1040 return AMDGPU_VRAM_TYPE_UNKNOWN; 1041 } 1042 } 1043 1044 static int gmc_v8_0_early_init(void *handle) 1045 { 1046 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1047 1048 gmc_v8_0_set_gmc_funcs(adev); 1049 gmc_v8_0_set_irq_funcs(adev); 1050 1051 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 1052 adev->gmc.shared_aperture_end = 1053 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 1054 adev->gmc.private_aperture_start = 1055 adev->gmc.shared_aperture_end + 1; 1056 adev->gmc.private_aperture_end = 1057 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 1058 1059 return 0; 1060 } 1061 1062 static int gmc_v8_0_late_init(void *handle) 1063 { 1064 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1065 1066 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 1067 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 1068 else 1069 return 0; 1070 } 1071 1072 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) 1073 { 1074 u32 d1vga_control = RREG32(mmD1VGA_CONTROL); 1075 unsigned size; 1076 1077 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 1078 size = AMDGPU_VBIOS_VGA_ALLOCATION; 1079 } else { 1080 u32 viewport = RREG32(mmVIEWPORT_SIZE); 1081 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 1082 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 1083 4); 1084 } 1085 1086 return size; 1087 } 1088 1089 #define mmMC_SEQ_MISC0_FIJI 0xA71 1090 1091 static int gmc_v8_0_sw_init(void *handle) 1092 { 1093 int r; 1094 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1095 1096 adev->num_vmhubs = 1; 1097 1098 if (adev->flags & AMD_IS_APU) { 1099 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 1100 } else { 1101 u32 tmp; 1102 1103 if ((adev->asic_type == CHIP_FIJI) || 1104 (adev->asic_type == CHIP_VEGAM)) 1105 tmp = RREG32(mmMC_SEQ_MISC0_FIJI); 1106 else 1107 tmp = RREG32(mmMC_SEQ_MISC0); 1108 tmp &= MC_SEQ_MISC0__MT__MASK; 1109 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); 1110 } 1111 1112 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); 1113 if (r) 1114 return r; 1115 1116 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); 1117 if (r) 1118 return r; 1119 1120 /* Adjust VM size here. 1121 * Currently set to 4GB ((1 << 20) 4k pages). 1122 * Max GPUVM size for cayman and SI is 40 bits. 1123 */ 1124 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); 1125 1126 /* Set the internal MC address mask 1127 * This is the max address of the GPU's 1128 * internal address space. 1129 */ 1130 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1131 1132 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); 1133 if (r) { 1134 pr_warn("No suitable DMA available\n"); 1135 return r; 1136 } 1137 adev->need_swiotlb = drm_need_swiotlb(40); 1138 1139 r = gmc_v8_0_init_microcode(adev); 1140 if (r) { 1141 DRM_ERROR("Failed to load mc firmware!\n"); 1142 return r; 1143 } 1144 1145 r = gmc_v8_0_mc_init(adev); 1146 if (r) 1147 return r; 1148 1149 amdgpu_gmc_get_vbios_allocations(adev); 1150 1151 /* Memory manager */ 1152 r = amdgpu_bo_init(adev); 1153 if (r) 1154 return r; 1155 1156 r = gmc_v8_0_gart_init(adev); 1157 if (r) 1158 return r; 1159 1160 /* 1161 * number of VMs 1162 * VMID 0 is reserved for System 1163 * amdgpu graphics/compute will use VMIDs 1-7 1164 * amdkfd will use VMIDs 8-15 1165 */ 1166 adev->vm_manager.first_kfd_vmid = 8; 1167 amdgpu_vm_manager_init(adev); 1168 1169 /* base offset of vram pages */ 1170 if (adev->flags & AMD_IS_APU) { 1171 u64 tmp = RREG32(mmMC_VM_FB_OFFSET); 1172 1173 tmp <<= 22; 1174 adev->vm_manager.vram_base_offset = tmp; 1175 } else { 1176 adev->vm_manager.vram_base_offset = 0; 1177 } 1178 1179 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), 1180 GFP_KERNEL); 1181 if (!adev->gmc.vm_fault_info) 1182 return -ENOMEM; 1183 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1184 1185 return 0; 1186 } 1187 1188 static int gmc_v8_0_sw_fini(void *handle) 1189 { 1190 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1191 1192 amdgpu_gem_force_release(adev); 1193 amdgpu_vm_manager_fini(adev); 1194 kfree(adev->gmc.vm_fault_info); 1195 amdgpu_gart_table_vram_free(adev); 1196 amdgpu_bo_fini(adev); 1197 amdgpu_gart_fini(adev); 1198 release_firmware(adev->gmc.fw); 1199 adev->gmc.fw = NULL; 1200 1201 return 0; 1202 } 1203 1204 static int gmc_v8_0_hw_init(void *handle) 1205 { 1206 int r; 1207 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1208 1209 gmc_v8_0_init_golden_registers(adev); 1210 1211 gmc_v8_0_mc_program(adev); 1212 1213 if (adev->asic_type == CHIP_TONGA) { 1214 r = gmc_v8_0_tonga_mc_load_microcode(adev); 1215 if (r) { 1216 DRM_ERROR("Failed to load MC firmware!\n"); 1217 return r; 1218 } 1219 } else if (adev->asic_type == CHIP_POLARIS11 || 1220 adev->asic_type == CHIP_POLARIS10 || 1221 adev->asic_type == CHIP_POLARIS12) { 1222 r = gmc_v8_0_polaris_mc_load_microcode(adev); 1223 if (r) { 1224 DRM_ERROR("Failed to load MC firmware!\n"); 1225 return r; 1226 } 1227 } 1228 1229 r = gmc_v8_0_gart_enable(adev); 1230 if (r) 1231 return r; 1232 1233 return r; 1234 } 1235 1236 static int gmc_v8_0_hw_fini(void *handle) 1237 { 1238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1239 1240 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1241 gmc_v8_0_gart_disable(adev); 1242 1243 return 0; 1244 } 1245 1246 static int gmc_v8_0_suspend(void *handle) 1247 { 1248 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1249 1250 gmc_v8_0_hw_fini(adev); 1251 1252 return 0; 1253 } 1254 1255 static int gmc_v8_0_resume(void *handle) 1256 { 1257 int r; 1258 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1259 1260 r = gmc_v8_0_hw_init(adev); 1261 if (r) 1262 return r; 1263 1264 amdgpu_vmid_reset_all(adev); 1265 1266 return 0; 1267 } 1268 1269 static bool gmc_v8_0_is_idle(void *handle) 1270 { 1271 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1272 u32 tmp = RREG32(mmSRBM_STATUS); 1273 1274 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1275 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) 1276 return false; 1277 1278 return true; 1279 } 1280 1281 static int gmc_v8_0_wait_for_idle(void *handle) 1282 { 1283 unsigned i; 1284 u32 tmp; 1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1286 1287 for (i = 0; i < adev->usec_timeout; i++) { 1288 /* read MC_STATUS */ 1289 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | 1290 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1291 SRBM_STATUS__MCC_BUSY_MASK | 1292 SRBM_STATUS__MCD_BUSY_MASK | 1293 SRBM_STATUS__VMC_BUSY_MASK | 1294 SRBM_STATUS__VMC1_BUSY_MASK); 1295 if (!tmp) 1296 return 0; 1297 udelay(1); 1298 } 1299 return -ETIMEDOUT; 1300 1301 } 1302 1303 static bool gmc_v8_0_check_soft_reset(void *handle) 1304 { 1305 u32 srbm_soft_reset = 0; 1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1307 u32 tmp = RREG32(mmSRBM_STATUS); 1308 1309 if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 1310 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1311 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); 1312 1313 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1314 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 1315 if (!(adev->flags & AMD_IS_APU)) 1316 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1317 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1318 } 1319 if (srbm_soft_reset) { 1320 adev->gmc.srbm_soft_reset = srbm_soft_reset; 1321 return true; 1322 } else { 1323 adev->gmc.srbm_soft_reset = 0; 1324 return false; 1325 } 1326 } 1327 1328 static int gmc_v8_0_pre_soft_reset(void *handle) 1329 { 1330 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1331 1332 if (!adev->gmc.srbm_soft_reset) 1333 return 0; 1334 1335 gmc_v8_0_mc_stop(adev); 1336 if (gmc_v8_0_wait_for_idle(adev)) { 1337 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int gmc_v8_0_soft_reset(void *handle) 1344 { 1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1346 u32 srbm_soft_reset; 1347 1348 if (!adev->gmc.srbm_soft_reset) 1349 return 0; 1350 srbm_soft_reset = adev->gmc.srbm_soft_reset; 1351 1352 if (srbm_soft_reset) { 1353 u32 tmp; 1354 1355 tmp = RREG32(mmSRBM_SOFT_RESET); 1356 tmp |= srbm_soft_reset; 1357 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1358 WREG32(mmSRBM_SOFT_RESET, tmp); 1359 tmp = RREG32(mmSRBM_SOFT_RESET); 1360 1361 udelay(50); 1362 1363 tmp &= ~srbm_soft_reset; 1364 WREG32(mmSRBM_SOFT_RESET, tmp); 1365 tmp = RREG32(mmSRBM_SOFT_RESET); 1366 1367 /* Wait a little for things to settle down */ 1368 udelay(50); 1369 } 1370 1371 return 0; 1372 } 1373 1374 static int gmc_v8_0_post_soft_reset(void *handle) 1375 { 1376 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1377 1378 if (!adev->gmc.srbm_soft_reset) 1379 return 0; 1380 1381 gmc_v8_0_mc_resume(adev); 1382 return 0; 1383 } 1384 1385 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 1386 struct amdgpu_irq_src *src, 1387 unsigned type, 1388 enum amdgpu_interrupt_state state) 1389 { 1390 u32 tmp; 1391 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1392 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1393 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1394 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1395 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1396 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1397 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); 1398 1399 switch (state) { 1400 case AMDGPU_IRQ_STATE_DISABLE: 1401 /* system context */ 1402 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1403 tmp &= ~bits; 1404 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1405 /* VMs */ 1406 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1407 tmp &= ~bits; 1408 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1409 break; 1410 case AMDGPU_IRQ_STATE_ENABLE: 1411 /* system context */ 1412 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1413 tmp |= bits; 1414 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1415 /* VMs */ 1416 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1417 tmp |= bits; 1418 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1419 break; 1420 default: 1421 break; 1422 } 1423 1424 return 0; 1425 } 1426 1427 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, 1428 struct amdgpu_irq_src *source, 1429 struct amdgpu_iv_entry *entry) 1430 { 1431 u32 addr, status, mc_client, vmid; 1432 1433 if (amdgpu_sriov_vf(adev)) { 1434 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1435 entry->src_id, entry->src_data[0]); 1436 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n"); 1437 return 0; 1438 } 1439 1440 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1441 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1442 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1443 /* reset addr and status */ 1444 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1445 1446 if (!addr && !status) 1447 return 0; 1448 1449 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) 1450 gmc_v8_0_set_fault_enable_default(adev, false); 1451 1452 if (printk_ratelimit()) { 1453 struct amdgpu_task_info task_info; 1454 1455 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 1456 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 1457 1458 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n", 1459 entry->src_id, entry->src_data[0], task_info.process_name, 1460 task_info.tgid, task_info.task_name, task_info.pid); 1461 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1462 addr); 1463 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1464 status); 1465 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client, 1466 entry->pasid); 1467 } 1468 1469 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1470 VMID); 1471 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) 1472 && !atomic_read(&adev->gmc.vm_fault_info_updated)) { 1473 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; 1474 u32 protections = REG_GET_FIELD(status, 1475 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1476 PROTECTIONS); 1477 1478 info->vmid = vmid; 1479 info->mc_id = REG_GET_FIELD(status, 1480 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1481 MEMORY_CLIENT_ID); 1482 info->status = status; 1483 info->page_addr = addr; 1484 info->prot_valid = protections & 0x7 ? true : false; 1485 info->prot_read = protections & 0x8 ? true : false; 1486 info->prot_write = protections & 0x10 ? true : false; 1487 info->prot_exec = protections & 0x20 ? true : false; 1488 mb(); 1489 atomic_set(&adev->gmc.vm_fault_info_updated, 1); 1490 } 1491 1492 return 0; 1493 } 1494 1495 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, 1496 bool enable) 1497 { 1498 uint32_t data; 1499 1500 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 1501 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1502 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; 1503 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1504 1505 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1506 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK; 1507 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1508 1509 data = RREG32(mmMC_HUB_MISC_VM_CG); 1510 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK; 1511 WREG32(mmMC_HUB_MISC_VM_CG, data); 1512 1513 data = RREG32(mmMC_XPB_CLK_GAT); 1514 data |= MC_XPB_CLK_GAT__ENABLE_MASK; 1515 WREG32(mmMC_XPB_CLK_GAT, data); 1516 1517 data = RREG32(mmATC_MISC_CG); 1518 data |= ATC_MISC_CG__ENABLE_MASK; 1519 WREG32(mmATC_MISC_CG, data); 1520 1521 data = RREG32(mmMC_CITF_MISC_WR_CG); 1522 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK; 1523 WREG32(mmMC_CITF_MISC_WR_CG, data); 1524 1525 data = RREG32(mmMC_CITF_MISC_RD_CG); 1526 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK; 1527 WREG32(mmMC_CITF_MISC_RD_CG, data); 1528 1529 data = RREG32(mmMC_CITF_MISC_VM_CG); 1530 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK; 1531 WREG32(mmMC_CITF_MISC_VM_CG, data); 1532 1533 data = RREG32(mmVM_L2_CG); 1534 data |= VM_L2_CG__ENABLE_MASK; 1535 WREG32(mmVM_L2_CG, data); 1536 } else { 1537 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1538 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK; 1539 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1540 1541 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1542 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK; 1543 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1544 1545 data = RREG32(mmMC_HUB_MISC_VM_CG); 1546 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK; 1547 WREG32(mmMC_HUB_MISC_VM_CG, data); 1548 1549 data = RREG32(mmMC_XPB_CLK_GAT); 1550 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK; 1551 WREG32(mmMC_XPB_CLK_GAT, data); 1552 1553 data = RREG32(mmATC_MISC_CG); 1554 data &= ~ATC_MISC_CG__ENABLE_MASK; 1555 WREG32(mmATC_MISC_CG, data); 1556 1557 data = RREG32(mmMC_CITF_MISC_WR_CG); 1558 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK; 1559 WREG32(mmMC_CITF_MISC_WR_CG, data); 1560 1561 data = RREG32(mmMC_CITF_MISC_RD_CG); 1562 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK; 1563 WREG32(mmMC_CITF_MISC_RD_CG, data); 1564 1565 data = RREG32(mmMC_CITF_MISC_VM_CG); 1566 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK; 1567 WREG32(mmMC_CITF_MISC_VM_CG, data); 1568 1569 data = RREG32(mmVM_L2_CG); 1570 data &= ~VM_L2_CG__ENABLE_MASK; 1571 WREG32(mmVM_L2_CG, data); 1572 } 1573 } 1574 1575 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, 1576 bool enable) 1577 { 1578 uint32_t data; 1579 1580 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { 1581 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1582 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; 1583 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1584 1585 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1586 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; 1587 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1588 1589 data = RREG32(mmMC_HUB_MISC_VM_CG); 1590 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1591 WREG32(mmMC_HUB_MISC_VM_CG, data); 1592 1593 data = RREG32(mmMC_XPB_CLK_GAT); 1594 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; 1595 WREG32(mmMC_XPB_CLK_GAT, data); 1596 1597 data = RREG32(mmATC_MISC_CG); 1598 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK; 1599 WREG32(mmATC_MISC_CG, data); 1600 1601 data = RREG32(mmMC_CITF_MISC_WR_CG); 1602 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; 1603 WREG32(mmMC_CITF_MISC_WR_CG, data); 1604 1605 data = RREG32(mmMC_CITF_MISC_RD_CG); 1606 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; 1607 WREG32(mmMC_CITF_MISC_RD_CG, data); 1608 1609 data = RREG32(mmMC_CITF_MISC_VM_CG); 1610 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1611 WREG32(mmMC_CITF_MISC_VM_CG, data); 1612 1613 data = RREG32(mmVM_L2_CG); 1614 data |= VM_L2_CG__MEM_LS_ENABLE_MASK; 1615 WREG32(mmVM_L2_CG, data); 1616 } else { 1617 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1618 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; 1619 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1620 1621 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1622 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; 1623 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1624 1625 data = RREG32(mmMC_HUB_MISC_VM_CG); 1626 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1627 WREG32(mmMC_HUB_MISC_VM_CG, data); 1628 1629 data = RREG32(mmMC_XPB_CLK_GAT); 1630 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; 1631 WREG32(mmMC_XPB_CLK_GAT, data); 1632 1633 data = RREG32(mmATC_MISC_CG); 1634 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK; 1635 WREG32(mmATC_MISC_CG, data); 1636 1637 data = RREG32(mmMC_CITF_MISC_WR_CG); 1638 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; 1639 WREG32(mmMC_CITF_MISC_WR_CG, data); 1640 1641 data = RREG32(mmMC_CITF_MISC_RD_CG); 1642 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; 1643 WREG32(mmMC_CITF_MISC_RD_CG, data); 1644 1645 data = RREG32(mmMC_CITF_MISC_VM_CG); 1646 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1647 WREG32(mmMC_CITF_MISC_VM_CG, data); 1648 1649 data = RREG32(mmVM_L2_CG); 1650 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK; 1651 WREG32(mmVM_L2_CG, data); 1652 } 1653 } 1654 1655 static int gmc_v8_0_set_clockgating_state(void *handle, 1656 enum amd_clockgating_state state) 1657 { 1658 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1659 1660 if (amdgpu_sriov_vf(adev)) 1661 return 0; 1662 1663 switch (adev->asic_type) { 1664 case CHIP_FIJI: 1665 fiji_update_mc_medium_grain_clock_gating(adev, 1666 state == AMD_CG_STATE_GATE); 1667 fiji_update_mc_light_sleep(adev, 1668 state == AMD_CG_STATE_GATE); 1669 break; 1670 default: 1671 break; 1672 } 1673 return 0; 1674 } 1675 1676 static int gmc_v8_0_set_powergating_state(void *handle, 1677 enum amd_powergating_state state) 1678 { 1679 return 0; 1680 } 1681 1682 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags) 1683 { 1684 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1685 int data; 1686 1687 if (amdgpu_sriov_vf(adev)) 1688 *flags = 0; 1689 1690 /* AMD_CG_SUPPORT_MC_MGCG */ 1691 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1692 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK) 1693 *flags |= AMD_CG_SUPPORT_MC_MGCG; 1694 1695 /* AMD_CG_SUPPORT_MC_LS */ 1696 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK) 1697 *flags |= AMD_CG_SUPPORT_MC_LS; 1698 } 1699 1700 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { 1701 .name = "gmc_v8_0", 1702 .early_init = gmc_v8_0_early_init, 1703 .late_init = gmc_v8_0_late_init, 1704 .sw_init = gmc_v8_0_sw_init, 1705 .sw_fini = gmc_v8_0_sw_fini, 1706 .hw_init = gmc_v8_0_hw_init, 1707 .hw_fini = gmc_v8_0_hw_fini, 1708 .suspend = gmc_v8_0_suspend, 1709 .resume = gmc_v8_0_resume, 1710 .is_idle = gmc_v8_0_is_idle, 1711 .wait_for_idle = gmc_v8_0_wait_for_idle, 1712 .check_soft_reset = gmc_v8_0_check_soft_reset, 1713 .pre_soft_reset = gmc_v8_0_pre_soft_reset, 1714 .soft_reset = gmc_v8_0_soft_reset, 1715 .post_soft_reset = gmc_v8_0_post_soft_reset, 1716 .set_clockgating_state = gmc_v8_0_set_clockgating_state, 1717 .set_powergating_state = gmc_v8_0_set_powergating_state, 1718 .get_clockgating_state = gmc_v8_0_get_clockgating_state, 1719 }; 1720 1721 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { 1722 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, 1723 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid, 1724 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, 1725 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, 1726 .set_prt = gmc_v8_0_set_prt, 1727 .get_vm_pde = gmc_v8_0_get_vm_pde, 1728 .get_vm_pte = gmc_v8_0_get_vm_pte, 1729 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size, 1730 }; 1731 1732 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { 1733 .set = gmc_v8_0_vm_fault_interrupt_state, 1734 .process = gmc_v8_0_process_interrupt, 1735 }; 1736 1737 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) 1738 { 1739 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; 1740 } 1741 1742 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) 1743 { 1744 adev->gmc.vm_fault.num_types = 1; 1745 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; 1746 } 1747 1748 const struct amdgpu_ip_block_version gmc_v8_0_ip_block = 1749 { 1750 .type = AMD_IP_BLOCK_TYPE_GMC, 1751 .major = 8, 1752 .minor = 0, 1753 .rev = 0, 1754 .funcs = &gmc_v8_0_ip_funcs, 1755 }; 1756 1757 const struct amdgpu_ip_block_version gmc_v8_1_ip_block = 1758 { 1759 .type = AMD_IP_BLOCK_TYPE_GMC, 1760 .major = 8, 1761 .minor = 1, 1762 .rev = 0, 1763 .funcs = &gmc_v8_0_ip_funcs, 1764 }; 1765 1766 const struct amdgpu_ip_block_version gmc_v8_5_ip_block = 1767 { 1768 .type = AMD_IP_BLOCK_TYPE_GMC, 1769 .major = 8, 1770 .minor = 5, 1771 .rev = 0, 1772 .funcs = &gmc_v8_0_ip_funcs, 1773 }; 1774