1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include <drm/drm_cache.h> 29 #include "amdgpu.h" 30 #include "gmc_v8_0.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_amdkfd.h" 33 #include "amdgpu_gem.h" 34 35 #include "gmc/gmc_8_1_d.h" 36 #include "gmc/gmc_8_1_sh_mask.h" 37 38 #include "bif/bif_5_0_d.h" 39 #include "bif/bif_5_0_sh_mask.h" 40 41 #include "oss/oss_3_0_d.h" 42 #include "oss/oss_3_0_sh_mask.h" 43 44 #include "dce/dce_10_0_d.h" 45 #include "dce/dce_10_0_sh_mask.h" 46 47 #include "vid.h" 48 #include "vi.h" 49 50 #include "amdgpu_atombios.h" 51 52 #include "ivsrcid/ivsrcid_vislands30.h" 53 54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); 55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 56 static int gmc_v8_0_wait_for_idle(void *handle); 57 58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); 60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); 61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); 62 MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin"); 63 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); 64 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); 65 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); 66 67 static const u32 golden_settings_tonga_a11[] = 68 { 69 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, 70 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, 71 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, 72 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 73 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 74 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 75 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, 76 }; 77 78 static const u32 tonga_mgcg_cgcg_init[] = 79 { 80 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 81 }; 82 83 static const u32 golden_settings_fiji_a10[] = 84 { 85 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 86 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 87 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 88 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, 89 }; 90 91 static const u32 fiji_mgcg_cgcg_init[] = 92 { 93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 94 }; 95 96 static const u32 golden_settings_polaris11_a11[] = 97 { 98 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 99 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 100 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 101 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff 102 }; 103 104 static const u32 golden_settings_polaris10_a11[] = 105 { 106 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, 107 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 108 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 109 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 110 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff 111 }; 112 113 static const u32 cz_mgcg_cgcg_init[] = 114 { 115 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 116 }; 117 118 static const u32 stoney_mgcg_cgcg_init[] = 119 { 120 mmATC_MISC_CG, 0xffffffff, 0x000c0200, 121 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 122 }; 123 124 static const u32 golden_settings_stoney_common[] = 125 { 126 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, 127 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 128 }; 129 130 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) 131 { 132 switch (adev->asic_type) { 133 case CHIP_FIJI: 134 amdgpu_device_program_register_sequence(adev, 135 fiji_mgcg_cgcg_init, 136 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 137 amdgpu_device_program_register_sequence(adev, 138 golden_settings_fiji_a10, 139 ARRAY_SIZE(golden_settings_fiji_a10)); 140 break; 141 case CHIP_TONGA: 142 amdgpu_device_program_register_sequence(adev, 143 tonga_mgcg_cgcg_init, 144 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 145 amdgpu_device_program_register_sequence(adev, 146 golden_settings_tonga_a11, 147 ARRAY_SIZE(golden_settings_tonga_a11)); 148 break; 149 case CHIP_POLARIS11: 150 case CHIP_POLARIS12: 151 case CHIP_VEGAM: 152 amdgpu_device_program_register_sequence(adev, 153 golden_settings_polaris11_a11, 154 ARRAY_SIZE(golden_settings_polaris11_a11)); 155 break; 156 case CHIP_POLARIS10: 157 amdgpu_device_program_register_sequence(adev, 158 golden_settings_polaris10_a11, 159 ARRAY_SIZE(golden_settings_polaris10_a11)); 160 break; 161 case CHIP_CARRIZO: 162 amdgpu_device_program_register_sequence(adev, 163 cz_mgcg_cgcg_init, 164 ARRAY_SIZE(cz_mgcg_cgcg_init)); 165 break; 166 case CHIP_STONEY: 167 amdgpu_device_program_register_sequence(adev, 168 stoney_mgcg_cgcg_init, 169 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 170 amdgpu_device_program_register_sequence(adev, 171 golden_settings_stoney_common, 172 ARRAY_SIZE(golden_settings_stoney_common)); 173 break; 174 default: 175 break; 176 } 177 } 178 179 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) 180 { 181 u32 blackout; 182 183 gmc_v8_0_wait_for_idle(adev); 184 185 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 186 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { 187 /* Block CPU access */ 188 WREG32(mmBIF_FB_EN, 0); 189 /* blackout the MC */ 190 blackout = REG_SET_FIELD(blackout, 191 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); 192 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); 193 } 194 /* wait for the MC to settle */ 195 udelay(100); 196 } 197 198 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) 199 { 200 u32 tmp; 201 202 /* unblackout the MC */ 203 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 204 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); 205 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); 206 /* allow CPU access */ 207 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); 208 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); 209 WREG32(mmBIF_FB_EN, tmp); 210 } 211 212 /** 213 * gmc_v8_0_init_microcode - load ucode images from disk 214 * 215 * @adev: amdgpu_device pointer 216 * 217 * Use the firmware interface to load the ucode images into 218 * the driver (not loaded into hw). 219 * Returns 0 on success, error on failure. 220 */ 221 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) 222 { 223 const char *chip_name; 224 char fw_name[30]; 225 int err; 226 227 DRM_DEBUG("\n"); 228 229 switch (adev->asic_type) { 230 case CHIP_TONGA: 231 chip_name = "tonga"; 232 break; 233 case CHIP_POLARIS11: 234 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 235 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) 236 chip_name = "polaris11_k"; 237 else 238 chip_name = "polaris11"; 239 break; 240 case CHIP_POLARIS10: 241 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) 242 chip_name = "polaris10_k"; 243 else 244 chip_name = "polaris10"; 245 break; 246 case CHIP_POLARIS12: 247 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { 248 chip_name = "polaris12_k"; 249 } else { 250 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159); 251 /* Polaris12 32bit ASIC needs a special MC firmware */ 252 if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40) 253 chip_name = "polaris12_32"; 254 else 255 chip_name = "polaris12"; 256 } 257 break; 258 case CHIP_FIJI: 259 case CHIP_CARRIZO: 260 case CHIP_STONEY: 261 case CHIP_VEGAM: 262 return 0; 263 default: BUG(); 264 } 265 266 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); 267 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); 268 if (err) 269 goto out; 270 err = amdgpu_ucode_validate(adev->gmc.fw); 271 272 out: 273 if (err) { 274 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); 275 release_firmware(adev->gmc.fw); 276 adev->gmc.fw = NULL; 277 } 278 return err; 279 } 280 281 /** 282 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw 283 * 284 * @adev: amdgpu_device pointer 285 * 286 * Load the GDDR MC ucode into the hw (VI). 287 * Returns 0 on success, error on failure. 288 */ 289 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev) 290 { 291 const struct mc_firmware_header_v1_0 *hdr; 292 const __le32 *fw_data = NULL; 293 const __le32 *io_mc_regs = NULL; 294 u32 running; 295 int i, ucode_size, regs_size; 296 297 /* Skip MC ucode loading on SR-IOV capable boards. 298 * vbios does this for us in asic_init in that case. 299 * Skip MC ucode loading on VF, because hypervisor will do that 300 * for this adaptor. 301 */ 302 if (amdgpu_sriov_bios(adev)) 303 return 0; 304 305 if (!adev->gmc.fw) 306 return -EINVAL; 307 308 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; 309 amdgpu_ucode_print_mc_hdr(&hdr->header); 310 311 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); 312 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 313 io_mc_regs = (const __le32 *) 314 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 315 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 316 fw_data = (const __le32 *) 317 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 318 319 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); 320 321 if (running == 0) { 322 /* reset the engine and set to writable */ 323 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 324 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); 325 326 /* load mc io regs */ 327 for (i = 0; i < regs_size; i++) { 328 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); 329 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); 330 } 331 /* load the MC ucode */ 332 for (i = 0; i < ucode_size; i++) 333 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); 334 335 /* put the engine back into the active state */ 336 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 337 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); 338 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); 339 340 /* wait for training to complete */ 341 for (i = 0; i < adev->usec_timeout; i++) { 342 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 343 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) 344 break; 345 udelay(1); 346 } 347 for (i = 0; i < adev->usec_timeout; i++) { 348 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 349 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) 350 break; 351 udelay(1); 352 } 353 } 354 355 return 0; 356 } 357 358 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) 359 { 360 const struct mc_firmware_header_v1_0 *hdr; 361 const __le32 *fw_data = NULL; 362 const __le32 *io_mc_regs = NULL; 363 u32 data; 364 int i, ucode_size, regs_size; 365 366 /* Skip MC ucode loading on SR-IOV capable boards. 367 * vbios does this for us in asic_init in that case. 368 * Skip MC ucode loading on VF, because hypervisor will do that 369 * for this adaptor. 370 */ 371 if (amdgpu_sriov_bios(adev)) 372 return 0; 373 374 if (!adev->gmc.fw) 375 return -EINVAL; 376 377 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; 378 amdgpu_ucode_print_mc_hdr(&hdr->header); 379 380 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); 381 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 382 io_mc_regs = (const __le32 *) 383 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 384 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 385 fw_data = (const __le32 *) 386 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 387 388 data = RREG32(mmMC_SEQ_MISC0); 389 data &= ~(0x40); 390 WREG32(mmMC_SEQ_MISC0, data); 391 392 /* load mc io regs */ 393 for (i = 0; i < regs_size; i++) { 394 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); 395 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); 396 } 397 398 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 399 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); 400 401 /* load the MC ucode */ 402 for (i = 0; i < ucode_size; i++) 403 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); 404 405 /* put the engine back into the active state */ 406 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 407 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); 408 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); 409 410 /* wait for training to complete */ 411 for (i = 0; i < adev->usec_timeout; i++) { 412 data = RREG32(mmMC_SEQ_MISC0); 413 if (data & 0x80) 414 break; 415 udelay(1); 416 } 417 418 return 0; 419 } 420 421 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, 422 struct amdgpu_gmc *mc) 423 { 424 u64 base = 0; 425 426 if (!amdgpu_sriov_vf(adev)) 427 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 428 base <<= 24; 429 430 amdgpu_gmc_vram_location(adev, mc, base); 431 amdgpu_gmc_gart_location(adev, mc); 432 } 433 434 /** 435 * gmc_v8_0_mc_program - program the GPU memory controller 436 * 437 * @adev: amdgpu_device pointer 438 * 439 * Set the location of vram, gart, and AGP in the GPU's 440 * physical address space (VI). 441 */ 442 static void gmc_v8_0_mc_program(struct amdgpu_device *adev) 443 { 444 u32 tmp; 445 int i, j; 446 447 /* Initialize HDP */ 448 for (i = 0, j = 0; i < 32; i++, j += 0x6) { 449 WREG32((0xb05 + j), 0x00000000); 450 WREG32((0xb06 + j), 0x00000000); 451 WREG32((0xb07 + j), 0x00000000); 452 WREG32((0xb08 + j), 0x00000000); 453 WREG32((0xb09 + j), 0x00000000); 454 } 455 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 456 457 if (gmc_v8_0_wait_for_idle((void *)adev)) { 458 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 459 } 460 if (adev->mode_info.num_crtc) { 461 /* Lockout access through VGA aperture*/ 462 tmp = RREG32(mmVGA_HDP_CONTROL); 463 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 464 WREG32(mmVGA_HDP_CONTROL, tmp); 465 466 /* disable VGA render */ 467 tmp = RREG32(mmVGA_RENDER_CONTROL); 468 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 469 WREG32(mmVGA_RENDER_CONTROL, tmp); 470 } 471 /* Update configuration */ 472 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 473 adev->gmc.vram_start >> 12); 474 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 475 adev->gmc.vram_end >> 12); 476 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 477 adev->vram_scratch.gpu_addr >> 12); 478 479 if (amdgpu_sriov_vf(adev)) { 480 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; 481 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF); 482 WREG32(mmMC_VM_FB_LOCATION, tmp); 483 /* XXX double check these! */ 484 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); 485 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 486 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); 487 } 488 489 WREG32(mmMC_VM_AGP_BASE, 0); 490 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 491 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); 492 if (gmc_v8_0_wait_for_idle((void *)adev)) { 493 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 494 } 495 496 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); 497 498 tmp = RREG32(mmHDP_MISC_CNTL); 499 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); 500 WREG32(mmHDP_MISC_CNTL, tmp); 501 502 tmp = RREG32(mmHDP_HOST_PATH_CNTL); 503 WREG32(mmHDP_HOST_PATH_CNTL, tmp); 504 } 505 506 /** 507 * gmc_v8_0_mc_init - initialize the memory controller driver params 508 * 509 * @adev: amdgpu_device pointer 510 * 511 * Look up the amount of vram, vram width, and decide how to place 512 * vram and gart within the GPU's physical address space (VI). 513 * Returns 0 for success. 514 */ 515 static int gmc_v8_0_mc_init(struct amdgpu_device *adev) 516 { 517 int r; 518 519 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); 520 if (!adev->gmc.vram_width) { 521 u32 tmp; 522 int chansize, numchan; 523 524 /* Get VRAM informations */ 525 tmp = RREG32(mmMC_ARB_RAMCFG); 526 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { 527 chansize = 64; 528 } else { 529 chansize = 32; 530 } 531 tmp = RREG32(mmMC_SHARED_CHMAP); 532 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 533 case 0: 534 default: 535 numchan = 1; 536 break; 537 case 1: 538 numchan = 2; 539 break; 540 case 2: 541 numchan = 4; 542 break; 543 case 3: 544 numchan = 8; 545 break; 546 case 4: 547 numchan = 3; 548 break; 549 case 5: 550 numchan = 6; 551 break; 552 case 6: 553 numchan = 10; 554 break; 555 case 7: 556 numchan = 12; 557 break; 558 case 8: 559 numchan = 16; 560 break; 561 } 562 adev->gmc.vram_width = numchan * chansize; 563 } 564 /* size in MB on si */ 565 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 566 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 567 568 if (!(adev->flags & AMD_IS_APU)) { 569 r = amdgpu_device_resize_fb_bar(adev); 570 if (r) 571 return r; 572 } 573 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 574 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 575 576 #ifdef CONFIG_X86_64 577 if (adev->flags & AMD_IS_APU) { 578 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 579 adev->gmc.aper_size = adev->gmc.real_vram_size; 580 } 581 #endif 582 583 /* In case the PCI BAR is larger than the actual amount of vram */ 584 adev->gmc.visible_vram_size = adev->gmc.aper_size; 585 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 586 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 587 588 /* set the gart size */ 589 if (amdgpu_gart_size == -1) { 590 switch (adev->asic_type) { 591 case CHIP_POLARIS10: /* all engines support GPUVM */ 592 case CHIP_POLARIS11: /* all engines support GPUVM */ 593 case CHIP_POLARIS12: /* all engines support GPUVM */ 594 case CHIP_VEGAM: /* all engines support GPUVM */ 595 default: 596 adev->gmc.gart_size = 256ULL << 20; 597 break; 598 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ 599 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ 600 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ 601 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ 602 adev->gmc.gart_size = 1024ULL << 20; 603 break; 604 } 605 } else { 606 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 607 } 608 609 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; 610 gmc_v8_0_vram_gtt_location(adev, &adev->gmc); 611 612 return 0; 613 } 614 615 /** 616 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid 617 * 618 * @adev: amdgpu_device pointer 619 * @pasid: pasid to be flush 620 * @flush_type: type of flush 621 * @all_hub: flush all hubs 622 * 623 * Flush the TLB for the requested pasid. 624 */ 625 static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 626 uint16_t pasid, uint32_t flush_type, 627 bool all_hub) 628 { 629 int vmid; 630 unsigned int tmp; 631 632 if (amdgpu_in_reset(adev)) 633 return -EIO; 634 635 for (vmid = 1; vmid < 16; vmid++) { 636 637 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 638 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && 639 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { 640 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 641 RREG32(mmVM_INVALIDATE_RESPONSE); 642 break; 643 } 644 } 645 646 return 0; 647 648 } 649 650 /* 651 * GART 652 * VMID 0 is the physical GPU addresses as used by the kernel. 653 * VMIDs 1-15 are used for userspace clients and are handled 654 * by the amdgpu vm/hsa code. 655 */ 656 657 /** 658 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback 659 * 660 * @adev: amdgpu_device pointer 661 * @vmid: vm instance to flush 662 * @vmhub: which hub to flush 663 * @flush_type: type of flush 664 * 665 * Flush the TLB for the requested page table (VI). 666 */ 667 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 668 uint32_t vmhub, uint32_t flush_type) 669 { 670 /* bits 0-15 are the VM contexts0-15 */ 671 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 672 } 673 674 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 675 unsigned vmid, uint64_t pd_addr) 676 { 677 uint32_t reg; 678 679 if (vmid < 8) 680 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; 681 else 682 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; 683 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); 684 685 /* bits 0-15 are the VM contexts0-15 */ 686 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); 687 688 return pd_addr; 689 } 690 691 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 692 unsigned pasid) 693 { 694 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); 695 } 696 697 /* 698 * PTE format on VI: 699 * 63:40 reserved 700 * 39:12 4k physical page base address 701 * 11:7 fragment 702 * 6 write 703 * 5 read 704 * 4 exe 705 * 3 reserved 706 * 2 snooped 707 * 1 system 708 * 0 valid 709 * 710 * PDE format on VI: 711 * 63:59 block fragment size 712 * 58:40 reserved 713 * 39:1 physical base address of PTE 714 * bits 5:1 must be 0. 715 * 0 valid 716 */ 717 718 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, 719 uint64_t *addr, uint64_t *flags) 720 { 721 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 722 } 723 724 static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, 725 struct amdgpu_bo_va_mapping *mapping, 726 uint64_t *flags) 727 { 728 *flags &= ~AMDGPU_PTE_EXECUTABLE; 729 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 730 *flags &= ~AMDGPU_PTE_PRT; 731 } 732 733 /** 734 * gmc_v8_0_set_fault_enable_default - update VM fault handling 735 * 736 * @adev: amdgpu_device pointer 737 * @value: true redirects VM faults to the default page 738 */ 739 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev, 740 bool value) 741 { 742 u32 tmp; 743 744 tmp = RREG32(mmVM_CONTEXT1_CNTL); 745 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 746 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 747 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 748 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 749 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 750 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 751 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 752 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 753 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 754 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 755 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 756 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 757 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 758 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 759 WREG32(mmVM_CONTEXT1_CNTL, tmp); 760 } 761 762 /** 763 * gmc_v8_0_set_prt - set PRT VM fault 764 * 765 * @adev: amdgpu_device pointer 766 * @enable: enable/disable VM fault handling for PRT 767 */ 768 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) 769 { 770 u32 tmp; 771 772 if (enable && !adev->gmc.prt_warning) { 773 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 774 adev->gmc.prt_warning = true; 775 } 776 777 tmp = RREG32(mmVM_PRT_CNTL); 778 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 779 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 780 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 781 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 782 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 783 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 784 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 785 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 786 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 787 L2_CACHE_STORE_INVALID_ENTRIES, enable); 788 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 789 L1_TLB_STORE_INVALID_ENTRIES, enable); 790 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 791 MASK_PDE0_FAULT, enable); 792 WREG32(mmVM_PRT_CNTL, tmp); 793 794 if (enable) { 795 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 796 uint32_t high = adev->vm_manager.max_pfn - 797 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); 798 799 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 800 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 801 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); 802 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); 803 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); 804 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); 805 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); 806 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); 807 } else { 808 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); 809 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); 810 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); 811 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); 812 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); 813 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); 814 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); 815 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); 816 } 817 } 818 819 /** 820 * gmc_v8_0_gart_enable - gart enable 821 * 822 * @adev: amdgpu_device pointer 823 * 824 * This sets up the TLBs, programs the page tables for VMID0, 825 * sets up the hw for VMIDs 1-15 which are allocated on 826 * demand, and sets up the global locations for the LDS, GDS, 827 * and GPUVM for FSA64 clients (VI). 828 * Returns 0 for success, errors for failure. 829 */ 830 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) 831 { 832 uint64_t table_addr; 833 int r, i; 834 u32 tmp, field; 835 836 if (adev->gart.bo == NULL) { 837 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 838 return -EINVAL; 839 } 840 r = amdgpu_gart_table_vram_pin(adev); 841 if (r) 842 return r; 843 844 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 845 846 /* Setup TLB control */ 847 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 848 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 849 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); 850 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 851 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); 852 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 853 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 854 /* Setup L2 cache */ 855 tmp = RREG32(mmVM_L2_CNTL); 856 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 857 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); 858 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); 859 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 860 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 861 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 862 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 863 WREG32(mmVM_L2_CNTL, tmp); 864 tmp = RREG32(mmVM_L2_CNTL2); 865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 867 WREG32(mmVM_L2_CNTL2, tmp); 868 869 field = adev->vm_manager.fragment_size; 870 tmp = RREG32(mmVM_L2_CNTL3); 871 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); 872 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); 873 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); 874 WREG32(mmVM_L2_CNTL3, tmp); 875 /* XXX: set to enable PTE/PDE in system memory */ 876 tmp = RREG32(mmVM_L2_CNTL4); 877 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); 878 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); 879 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); 880 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); 881 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); 882 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); 883 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); 884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); 885 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); 886 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); 887 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); 888 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); 889 WREG32(mmVM_L2_CNTL4, tmp); 890 /* setup context0 */ 891 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); 892 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); 893 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); 894 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 895 (u32)(adev->dummy_page_addr >> 12)); 896 WREG32(mmVM_CONTEXT0_CNTL2, 0); 897 tmp = RREG32(mmVM_CONTEXT0_CNTL); 898 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 899 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 900 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 901 WREG32(mmVM_CONTEXT0_CNTL, tmp); 902 903 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); 904 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); 905 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); 906 907 /* empty context1-15 */ 908 /* FIXME start with 4G, once using 2 level pt switch to full 909 * vm size space 910 */ 911 /* set vm size, must be a multiple of 4 */ 912 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 913 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); 914 for (i = 1; i < AMDGPU_NUM_VMID; i++) { 915 if (i < 8) 916 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, 917 table_addr >> 12); 918 else 919 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, 920 table_addr >> 12); 921 } 922 923 /* enable context1-15 */ 924 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 925 (u32)(adev->dummy_page_addr >> 12)); 926 WREG32(mmVM_CONTEXT1_CNTL2, 4); 927 tmp = RREG32(mmVM_CONTEXT1_CNTL); 928 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 929 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); 930 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 931 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 932 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 933 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 934 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 935 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 936 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 937 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 938 adev->vm_manager.block_size - 9); 939 WREG32(mmVM_CONTEXT1_CNTL, tmp); 940 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 941 gmc_v8_0_set_fault_enable_default(adev, false); 942 else 943 gmc_v8_0_set_fault_enable_default(adev, true); 944 945 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0); 946 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 947 (unsigned)(adev->gmc.gart_size >> 20), 948 (unsigned long long)table_addr); 949 adev->gart.ready = true; 950 return 0; 951 } 952 953 static int gmc_v8_0_gart_init(struct amdgpu_device *adev) 954 { 955 int r; 956 957 if (adev->gart.bo) { 958 WARN(1, "R600 PCIE GART already initialized\n"); 959 return 0; 960 } 961 /* Initialize common gart structure */ 962 r = amdgpu_gart_init(adev); 963 if (r) 964 return r; 965 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 966 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE; 967 return amdgpu_gart_table_vram_alloc(adev); 968 } 969 970 /** 971 * gmc_v8_0_gart_disable - gart disable 972 * 973 * @adev: amdgpu_device pointer 974 * 975 * This disables all VM page table (VI). 976 */ 977 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) 978 { 979 u32 tmp; 980 981 /* Disable all tables */ 982 WREG32(mmVM_CONTEXT0_CNTL, 0); 983 WREG32(mmVM_CONTEXT1_CNTL, 0); 984 /* Setup TLB control */ 985 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 986 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 987 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); 988 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); 989 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 990 /* Setup L2 cache */ 991 tmp = RREG32(mmVM_L2_CNTL); 992 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 993 WREG32(mmVM_L2_CNTL, tmp); 994 WREG32(mmVM_L2_CNTL2, 0); 995 amdgpu_gart_table_vram_unpin(adev); 996 } 997 998 /** 999 * gmc_v8_0_vm_decode_fault - print human readable fault info 1000 * 1001 * @adev: amdgpu_device pointer 1002 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value 1003 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value 1004 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value 1005 * @pasid: debug logging only - no functional use 1006 * 1007 * Print human readable fault information (VI). 1008 */ 1009 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, 1010 u32 addr, u32 mc_client, unsigned pasid) 1011 { 1012 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); 1013 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1014 PROTECTIONS); 1015 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 1016 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 1017 u32 mc_id; 1018 1019 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1020 MEMORY_CLIENT_ID); 1021 1022 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 1023 protections, vmid, pasid, addr, 1024 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1025 MEMORY_CLIENT_RW) ? 1026 "write" : "read", block, mc_client, mc_id); 1027 } 1028 1029 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) 1030 { 1031 switch (mc_seq_vram_type) { 1032 case MC_SEQ_MISC0__MT__GDDR1: 1033 return AMDGPU_VRAM_TYPE_GDDR1; 1034 case MC_SEQ_MISC0__MT__DDR2: 1035 return AMDGPU_VRAM_TYPE_DDR2; 1036 case MC_SEQ_MISC0__MT__GDDR3: 1037 return AMDGPU_VRAM_TYPE_GDDR3; 1038 case MC_SEQ_MISC0__MT__GDDR4: 1039 return AMDGPU_VRAM_TYPE_GDDR4; 1040 case MC_SEQ_MISC0__MT__GDDR5: 1041 return AMDGPU_VRAM_TYPE_GDDR5; 1042 case MC_SEQ_MISC0__MT__HBM: 1043 return AMDGPU_VRAM_TYPE_HBM; 1044 case MC_SEQ_MISC0__MT__DDR3: 1045 return AMDGPU_VRAM_TYPE_DDR3; 1046 default: 1047 return AMDGPU_VRAM_TYPE_UNKNOWN; 1048 } 1049 } 1050 1051 static int gmc_v8_0_early_init(void *handle) 1052 { 1053 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1054 1055 gmc_v8_0_set_gmc_funcs(adev); 1056 gmc_v8_0_set_irq_funcs(adev); 1057 1058 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 1059 adev->gmc.shared_aperture_end = 1060 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 1061 adev->gmc.private_aperture_start = 1062 adev->gmc.shared_aperture_end + 1; 1063 adev->gmc.private_aperture_end = 1064 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 1065 1066 return 0; 1067 } 1068 1069 static int gmc_v8_0_late_init(void *handle) 1070 { 1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1072 1073 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 1074 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 1075 else 1076 return 0; 1077 } 1078 1079 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) 1080 { 1081 u32 d1vga_control = RREG32(mmD1VGA_CONTROL); 1082 unsigned size; 1083 1084 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 1085 size = AMDGPU_VBIOS_VGA_ALLOCATION; 1086 } else { 1087 u32 viewport = RREG32(mmVIEWPORT_SIZE); 1088 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 1089 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 1090 4); 1091 } 1092 1093 return size; 1094 } 1095 1096 #define mmMC_SEQ_MISC0_FIJI 0xA71 1097 1098 static int gmc_v8_0_sw_init(void *handle) 1099 { 1100 int r; 1101 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1102 1103 adev->num_vmhubs = 1; 1104 1105 if (adev->flags & AMD_IS_APU) { 1106 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 1107 } else { 1108 u32 tmp; 1109 1110 if ((adev->asic_type == CHIP_FIJI) || 1111 (adev->asic_type == CHIP_VEGAM)) 1112 tmp = RREG32(mmMC_SEQ_MISC0_FIJI); 1113 else 1114 tmp = RREG32(mmMC_SEQ_MISC0); 1115 tmp &= MC_SEQ_MISC0__MT__MASK; 1116 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); 1117 } 1118 1119 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); 1120 if (r) 1121 return r; 1122 1123 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); 1124 if (r) 1125 return r; 1126 1127 /* Adjust VM size here. 1128 * Currently set to 4GB ((1 << 20) 4k pages). 1129 * Max GPUVM size for cayman and SI is 40 bits. 1130 */ 1131 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); 1132 1133 /* Set the internal MC address mask 1134 * This is the max address of the GPU's 1135 * internal address space. 1136 */ 1137 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1138 1139 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); 1140 if (r) { 1141 pr_warn("No suitable DMA available\n"); 1142 return r; 1143 } 1144 adev->need_swiotlb = drm_need_swiotlb(40); 1145 1146 r = gmc_v8_0_init_microcode(adev); 1147 if (r) { 1148 DRM_ERROR("Failed to load mc firmware!\n"); 1149 return r; 1150 } 1151 1152 r = gmc_v8_0_mc_init(adev); 1153 if (r) 1154 return r; 1155 1156 amdgpu_gmc_get_vbios_allocations(adev); 1157 1158 /* Memory manager */ 1159 r = amdgpu_bo_init(adev); 1160 if (r) 1161 return r; 1162 1163 r = gmc_v8_0_gart_init(adev); 1164 if (r) 1165 return r; 1166 1167 /* 1168 * number of VMs 1169 * VMID 0 is reserved for System 1170 * amdgpu graphics/compute will use VMIDs 1-7 1171 * amdkfd will use VMIDs 8-15 1172 */ 1173 adev->vm_manager.first_kfd_vmid = 8; 1174 amdgpu_vm_manager_init(adev); 1175 1176 /* base offset of vram pages */ 1177 if (adev->flags & AMD_IS_APU) { 1178 u64 tmp = RREG32(mmMC_VM_FB_OFFSET); 1179 1180 tmp <<= 22; 1181 adev->vm_manager.vram_base_offset = tmp; 1182 } else { 1183 adev->vm_manager.vram_base_offset = 0; 1184 } 1185 1186 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), 1187 GFP_KERNEL); 1188 if (!adev->gmc.vm_fault_info) 1189 return -ENOMEM; 1190 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1191 1192 return 0; 1193 } 1194 1195 static int gmc_v8_0_sw_fini(void *handle) 1196 { 1197 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1198 1199 amdgpu_gem_force_release(adev); 1200 amdgpu_vm_manager_fini(adev); 1201 kfree(adev->gmc.vm_fault_info); 1202 amdgpu_gart_table_vram_free(adev); 1203 amdgpu_bo_fini(adev); 1204 amdgpu_gart_fini(adev); 1205 release_firmware(adev->gmc.fw); 1206 adev->gmc.fw = NULL; 1207 1208 return 0; 1209 } 1210 1211 static int gmc_v8_0_hw_init(void *handle) 1212 { 1213 int r; 1214 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1215 1216 gmc_v8_0_init_golden_registers(adev); 1217 1218 gmc_v8_0_mc_program(adev); 1219 1220 if (adev->asic_type == CHIP_TONGA) { 1221 r = gmc_v8_0_tonga_mc_load_microcode(adev); 1222 if (r) { 1223 DRM_ERROR("Failed to load MC firmware!\n"); 1224 return r; 1225 } 1226 } else if (adev->asic_type == CHIP_POLARIS11 || 1227 adev->asic_type == CHIP_POLARIS10 || 1228 adev->asic_type == CHIP_POLARIS12) { 1229 r = gmc_v8_0_polaris_mc_load_microcode(adev); 1230 if (r) { 1231 DRM_ERROR("Failed to load MC firmware!\n"); 1232 return r; 1233 } 1234 } 1235 1236 r = gmc_v8_0_gart_enable(adev); 1237 if (r) 1238 return r; 1239 1240 return r; 1241 } 1242 1243 static int gmc_v8_0_hw_fini(void *handle) 1244 { 1245 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1246 1247 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1248 gmc_v8_0_gart_disable(adev); 1249 1250 return 0; 1251 } 1252 1253 static int gmc_v8_0_suspend(void *handle) 1254 { 1255 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1256 1257 gmc_v8_0_hw_fini(adev); 1258 1259 return 0; 1260 } 1261 1262 static int gmc_v8_0_resume(void *handle) 1263 { 1264 int r; 1265 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1266 1267 r = gmc_v8_0_hw_init(adev); 1268 if (r) 1269 return r; 1270 1271 amdgpu_vmid_reset_all(adev); 1272 1273 return 0; 1274 } 1275 1276 static bool gmc_v8_0_is_idle(void *handle) 1277 { 1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1279 u32 tmp = RREG32(mmSRBM_STATUS); 1280 1281 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1282 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) 1283 return false; 1284 1285 return true; 1286 } 1287 1288 static int gmc_v8_0_wait_for_idle(void *handle) 1289 { 1290 unsigned i; 1291 u32 tmp; 1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1293 1294 for (i = 0; i < adev->usec_timeout; i++) { 1295 /* read MC_STATUS */ 1296 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | 1297 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1298 SRBM_STATUS__MCC_BUSY_MASK | 1299 SRBM_STATUS__MCD_BUSY_MASK | 1300 SRBM_STATUS__VMC_BUSY_MASK | 1301 SRBM_STATUS__VMC1_BUSY_MASK); 1302 if (!tmp) 1303 return 0; 1304 udelay(1); 1305 } 1306 return -ETIMEDOUT; 1307 1308 } 1309 1310 static bool gmc_v8_0_check_soft_reset(void *handle) 1311 { 1312 u32 srbm_soft_reset = 0; 1313 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1314 u32 tmp = RREG32(mmSRBM_STATUS); 1315 1316 if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 1317 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1318 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); 1319 1320 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1321 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 1322 if (!(adev->flags & AMD_IS_APU)) 1323 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1324 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1325 } 1326 if (srbm_soft_reset) { 1327 adev->gmc.srbm_soft_reset = srbm_soft_reset; 1328 return true; 1329 } else { 1330 adev->gmc.srbm_soft_reset = 0; 1331 return false; 1332 } 1333 } 1334 1335 static int gmc_v8_0_pre_soft_reset(void *handle) 1336 { 1337 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1338 1339 if (!adev->gmc.srbm_soft_reset) 1340 return 0; 1341 1342 gmc_v8_0_mc_stop(adev); 1343 if (gmc_v8_0_wait_for_idle(adev)) { 1344 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); 1345 } 1346 1347 return 0; 1348 } 1349 1350 static int gmc_v8_0_soft_reset(void *handle) 1351 { 1352 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1353 u32 srbm_soft_reset; 1354 1355 if (!adev->gmc.srbm_soft_reset) 1356 return 0; 1357 srbm_soft_reset = adev->gmc.srbm_soft_reset; 1358 1359 if (srbm_soft_reset) { 1360 u32 tmp; 1361 1362 tmp = RREG32(mmSRBM_SOFT_RESET); 1363 tmp |= srbm_soft_reset; 1364 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1365 WREG32(mmSRBM_SOFT_RESET, tmp); 1366 tmp = RREG32(mmSRBM_SOFT_RESET); 1367 1368 udelay(50); 1369 1370 tmp &= ~srbm_soft_reset; 1371 WREG32(mmSRBM_SOFT_RESET, tmp); 1372 tmp = RREG32(mmSRBM_SOFT_RESET); 1373 1374 /* Wait a little for things to settle down */ 1375 udelay(50); 1376 } 1377 1378 return 0; 1379 } 1380 1381 static int gmc_v8_0_post_soft_reset(void *handle) 1382 { 1383 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1384 1385 if (!adev->gmc.srbm_soft_reset) 1386 return 0; 1387 1388 gmc_v8_0_mc_resume(adev); 1389 return 0; 1390 } 1391 1392 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 1393 struct amdgpu_irq_src *src, 1394 unsigned type, 1395 enum amdgpu_interrupt_state state) 1396 { 1397 u32 tmp; 1398 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1399 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1400 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1401 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1402 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1403 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1404 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); 1405 1406 switch (state) { 1407 case AMDGPU_IRQ_STATE_DISABLE: 1408 /* system context */ 1409 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1410 tmp &= ~bits; 1411 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1412 /* VMs */ 1413 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1414 tmp &= ~bits; 1415 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1416 break; 1417 case AMDGPU_IRQ_STATE_ENABLE: 1418 /* system context */ 1419 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1420 tmp |= bits; 1421 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1422 /* VMs */ 1423 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1424 tmp |= bits; 1425 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1426 break; 1427 default: 1428 break; 1429 } 1430 1431 return 0; 1432 } 1433 1434 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, 1435 struct amdgpu_irq_src *source, 1436 struct amdgpu_iv_entry *entry) 1437 { 1438 u32 addr, status, mc_client, vmid; 1439 1440 if (amdgpu_sriov_vf(adev)) { 1441 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1442 entry->src_id, entry->src_data[0]); 1443 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n"); 1444 return 0; 1445 } 1446 1447 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1448 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1449 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1450 /* reset addr and status */ 1451 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1452 1453 if (!addr && !status) 1454 return 0; 1455 1456 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) 1457 gmc_v8_0_set_fault_enable_default(adev, false); 1458 1459 if (printk_ratelimit()) { 1460 struct amdgpu_task_info task_info; 1461 1462 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 1463 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 1464 1465 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n", 1466 entry->src_id, entry->src_data[0], task_info.process_name, 1467 task_info.tgid, task_info.task_name, task_info.pid); 1468 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1469 addr); 1470 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1471 status); 1472 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client, 1473 entry->pasid); 1474 } 1475 1476 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1477 VMID); 1478 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) 1479 && !atomic_read(&adev->gmc.vm_fault_info_updated)) { 1480 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; 1481 u32 protections = REG_GET_FIELD(status, 1482 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1483 PROTECTIONS); 1484 1485 info->vmid = vmid; 1486 info->mc_id = REG_GET_FIELD(status, 1487 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1488 MEMORY_CLIENT_ID); 1489 info->status = status; 1490 info->page_addr = addr; 1491 info->prot_valid = protections & 0x7 ? true : false; 1492 info->prot_read = protections & 0x8 ? true : false; 1493 info->prot_write = protections & 0x10 ? true : false; 1494 info->prot_exec = protections & 0x20 ? true : false; 1495 mb(); 1496 atomic_set(&adev->gmc.vm_fault_info_updated, 1); 1497 } 1498 1499 return 0; 1500 } 1501 1502 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev, 1503 bool enable) 1504 { 1505 uint32_t data; 1506 1507 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 1508 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1509 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK; 1510 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1511 1512 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1513 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK; 1514 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1515 1516 data = RREG32(mmMC_HUB_MISC_VM_CG); 1517 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK; 1518 WREG32(mmMC_HUB_MISC_VM_CG, data); 1519 1520 data = RREG32(mmMC_XPB_CLK_GAT); 1521 data |= MC_XPB_CLK_GAT__ENABLE_MASK; 1522 WREG32(mmMC_XPB_CLK_GAT, data); 1523 1524 data = RREG32(mmATC_MISC_CG); 1525 data |= ATC_MISC_CG__ENABLE_MASK; 1526 WREG32(mmATC_MISC_CG, data); 1527 1528 data = RREG32(mmMC_CITF_MISC_WR_CG); 1529 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK; 1530 WREG32(mmMC_CITF_MISC_WR_CG, data); 1531 1532 data = RREG32(mmMC_CITF_MISC_RD_CG); 1533 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK; 1534 WREG32(mmMC_CITF_MISC_RD_CG, data); 1535 1536 data = RREG32(mmMC_CITF_MISC_VM_CG); 1537 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK; 1538 WREG32(mmMC_CITF_MISC_VM_CG, data); 1539 1540 data = RREG32(mmVM_L2_CG); 1541 data |= VM_L2_CG__ENABLE_MASK; 1542 WREG32(mmVM_L2_CG, data); 1543 } else { 1544 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1545 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK; 1546 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1547 1548 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1549 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK; 1550 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1551 1552 data = RREG32(mmMC_HUB_MISC_VM_CG); 1553 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK; 1554 WREG32(mmMC_HUB_MISC_VM_CG, data); 1555 1556 data = RREG32(mmMC_XPB_CLK_GAT); 1557 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK; 1558 WREG32(mmMC_XPB_CLK_GAT, data); 1559 1560 data = RREG32(mmATC_MISC_CG); 1561 data &= ~ATC_MISC_CG__ENABLE_MASK; 1562 WREG32(mmATC_MISC_CG, data); 1563 1564 data = RREG32(mmMC_CITF_MISC_WR_CG); 1565 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK; 1566 WREG32(mmMC_CITF_MISC_WR_CG, data); 1567 1568 data = RREG32(mmMC_CITF_MISC_RD_CG); 1569 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK; 1570 WREG32(mmMC_CITF_MISC_RD_CG, data); 1571 1572 data = RREG32(mmMC_CITF_MISC_VM_CG); 1573 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK; 1574 WREG32(mmMC_CITF_MISC_VM_CG, data); 1575 1576 data = RREG32(mmVM_L2_CG); 1577 data &= ~VM_L2_CG__ENABLE_MASK; 1578 WREG32(mmVM_L2_CG, data); 1579 } 1580 } 1581 1582 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev, 1583 bool enable) 1584 { 1585 uint32_t data; 1586 1587 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { 1588 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1589 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; 1590 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1591 1592 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1593 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; 1594 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1595 1596 data = RREG32(mmMC_HUB_MISC_VM_CG); 1597 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1598 WREG32(mmMC_HUB_MISC_VM_CG, data); 1599 1600 data = RREG32(mmMC_XPB_CLK_GAT); 1601 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; 1602 WREG32(mmMC_XPB_CLK_GAT, data); 1603 1604 data = RREG32(mmATC_MISC_CG); 1605 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK; 1606 WREG32(mmATC_MISC_CG, data); 1607 1608 data = RREG32(mmMC_CITF_MISC_WR_CG); 1609 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; 1610 WREG32(mmMC_CITF_MISC_WR_CG, data); 1611 1612 data = RREG32(mmMC_CITF_MISC_RD_CG); 1613 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; 1614 WREG32(mmMC_CITF_MISC_RD_CG, data); 1615 1616 data = RREG32(mmMC_CITF_MISC_VM_CG); 1617 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1618 WREG32(mmMC_CITF_MISC_VM_CG, data); 1619 1620 data = RREG32(mmVM_L2_CG); 1621 data |= VM_L2_CG__MEM_LS_ENABLE_MASK; 1622 WREG32(mmVM_L2_CG, data); 1623 } else { 1624 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1625 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK; 1626 WREG32(mmMC_HUB_MISC_HUB_CG, data); 1627 1628 data = RREG32(mmMC_HUB_MISC_SIP_CG); 1629 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK; 1630 WREG32(mmMC_HUB_MISC_SIP_CG, data); 1631 1632 data = RREG32(mmMC_HUB_MISC_VM_CG); 1633 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1634 WREG32(mmMC_HUB_MISC_VM_CG, data); 1635 1636 data = RREG32(mmMC_XPB_CLK_GAT); 1637 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK; 1638 WREG32(mmMC_XPB_CLK_GAT, data); 1639 1640 data = RREG32(mmATC_MISC_CG); 1641 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK; 1642 WREG32(mmATC_MISC_CG, data); 1643 1644 data = RREG32(mmMC_CITF_MISC_WR_CG); 1645 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK; 1646 WREG32(mmMC_CITF_MISC_WR_CG, data); 1647 1648 data = RREG32(mmMC_CITF_MISC_RD_CG); 1649 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK; 1650 WREG32(mmMC_CITF_MISC_RD_CG, data); 1651 1652 data = RREG32(mmMC_CITF_MISC_VM_CG); 1653 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK; 1654 WREG32(mmMC_CITF_MISC_VM_CG, data); 1655 1656 data = RREG32(mmVM_L2_CG); 1657 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK; 1658 WREG32(mmVM_L2_CG, data); 1659 } 1660 } 1661 1662 static int gmc_v8_0_set_clockgating_state(void *handle, 1663 enum amd_clockgating_state state) 1664 { 1665 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1666 1667 if (amdgpu_sriov_vf(adev)) 1668 return 0; 1669 1670 switch (adev->asic_type) { 1671 case CHIP_FIJI: 1672 fiji_update_mc_medium_grain_clock_gating(adev, 1673 state == AMD_CG_STATE_GATE); 1674 fiji_update_mc_light_sleep(adev, 1675 state == AMD_CG_STATE_GATE); 1676 break; 1677 default: 1678 break; 1679 } 1680 return 0; 1681 } 1682 1683 static int gmc_v8_0_set_powergating_state(void *handle, 1684 enum amd_powergating_state state) 1685 { 1686 return 0; 1687 } 1688 1689 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags) 1690 { 1691 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1692 int data; 1693 1694 if (amdgpu_sriov_vf(adev)) 1695 *flags = 0; 1696 1697 /* AMD_CG_SUPPORT_MC_MGCG */ 1698 data = RREG32(mmMC_HUB_MISC_HUB_CG); 1699 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK) 1700 *flags |= AMD_CG_SUPPORT_MC_MGCG; 1701 1702 /* AMD_CG_SUPPORT_MC_LS */ 1703 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK) 1704 *flags |= AMD_CG_SUPPORT_MC_LS; 1705 } 1706 1707 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { 1708 .name = "gmc_v8_0", 1709 .early_init = gmc_v8_0_early_init, 1710 .late_init = gmc_v8_0_late_init, 1711 .sw_init = gmc_v8_0_sw_init, 1712 .sw_fini = gmc_v8_0_sw_fini, 1713 .hw_init = gmc_v8_0_hw_init, 1714 .hw_fini = gmc_v8_0_hw_fini, 1715 .suspend = gmc_v8_0_suspend, 1716 .resume = gmc_v8_0_resume, 1717 .is_idle = gmc_v8_0_is_idle, 1718 .wait_for_idle = gmc_v8_0_wait_for_idle, 1719 .check_soft_reset = gmc_v8_0_check_soft_reset, 1720 .pre_soft_reset = gmc_v8_0_pre_soft_reset, 1721 .soft_reset = gmc_v8_0_soft_reset, 1722 .post_soft_reset = gmc_v8_0_post_soft_reset, 1723 .set_clockgating_state = gmc_v8_0_set_clockgating_state, 1724 .set_powergating_state = gmc_v8_0_set_powergating_state, 1725 .get_clockgating_state = gmc_v8_0_get_clockgating_state, 1726 }; 1727 1728 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { 1729 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb, 1730 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid, 1731 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, 1732 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, 1733 .set_prt = gmc_v8_0_set_prt, 1734 .get_vm_pde = gmc_v8_0_get_vm_pde, 1735 .get_vm_pte = gmc_v8_0_get_vm_pte, 1736 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size, 1737 }; 1738 1739 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { 1740 .set = gmc_v8_0_vm_fault_interrupt_state, 1741 .process = gmc_v8_0_process_interrupt, 1742 }; 1743 1744 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) 1745 { 1746 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; 1747 } 1748 1749 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) 1750 { 1751 adev->gmc.vm_fault.num_types = 1; 1752 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs; 1753 } 1754 1755 const struct amdgpu_ip_block_version gmc_v8_0_ip_block = 1756 { 1757 .type = AMD_IP_BLOCK_TYPE_GMC, 1758 .major = 8, 1759 .minor = 0, 1760 .rev = 0, 1761 .funcs = &gmc_v8_0_ip_funcs, 1762 }; 1763 1764 const struct amdgpu_ip_block_version gmc_v8_1_ip_block = 1765 { 1766 .type = AMD_IP_BLOCK_TYPE_GMC, 1767 .major = 8, 1768 .minor = 1, 1769 .rev = 0, 1770 .funcs = &gmc_v8_0_ip_funcs, 1771 }; 1772 1773 const struct amdgpu_ip_block_version gmc_v8_5_ip_block = 1774 { 1775 .type = AMD_IP_BLOCK_TYPE_GMC, 1776 .major = 8, 1777 .minor = 5, 1778 .rev = 0, 1779 .funcs = &gmc_v8_0_ip_funcs, 1780 }; 1781