1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/pci.h> 26 27 #include <drm/drm_cache.h> 28 29 #include "amdgpu.h" 30 #include "gmc_v9_0.h" 31 #include "amdgpu_atomfirmware.h" 32 #include "amdgpu_gem.h" 33 34 #include "gc/gc_9_0_sh_mask.h" 35 #include "dce/dce_12_0_offset.h" 36 #include "dce/dce_12_0_sh_mask.h" 37 #include "vega10_enum.h" 38 #include "mmhub/mmhub_1_0_offset.h" 39 #include "athub/athub_1_0_sh_mask.h" 40 #include "athub/athub_1_0_offset.h" 41 #include "oss/osssys_4_0_offset.h" 42 43 #include "soc15.h" 44 #include "soc15d.h" 45 #include "soc15_common.h" 46 #include "umc/umc_6_0_sh_mask.h" 47 48 #include "gfxhub_v1_0.h" 49 #include "mmhub_v1_0.h" 50 #include "athub_v1_0.h" 51 #include "gfxhub_v1_1.h" 52 #include "gfxhub_v1_2.h" 53 #include "mmhub_v9_4.h" 54 #include "mmhub_v1_7.h" 55 #include "mmhub_v1_8.h" 56 #include "umc_v6_1.h" 57 #include "umc_v6_0.h" 58 #include "umc_v6_7.h" 59 #include "hdp_v4_0.h" 60 #include "mca_v3_0.h" 61 62 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 63 64 #include "amdgpu_ras.h" 65 #include "amdgpu_xgmi.h" 66 67 #include "amdgpu_reset.h" 68 69 /* add these here since we already include dce12 headers and these are for DCN */ 70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 71 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL 75 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L 76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d 77 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 78 79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea 80 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 81 82 #define MAX_MEM_RANGES 8 83 84 static const char *gfxhub_client_ids[] = { 85 "CB", 86 "DB", 87 "IA", 88 "WD", 89 "CPF", 90 "CPC", 91 "CPG", 92 "RLC", 93 "TCP", 94 "SQC (inst)", 95 "SQC (data)", 96 "SQG", 97 "PA", 98 }; 99 100 static const char *mmhub_client_ids_raven[][2] = { 101 [0][0] = "MP1", 102 [1][0] = "MP0", 103 [2][0] = "VCN", 104 [3][0] = "VCNU", 105 [4][0] = "HDP", 106 [5][0] = "DCE", 107 [13][0] = "UTCL2", 108 [19][0] = "TLS", 109 [26][0] = "OSS", 110 [27][0] = "SDMA0", 111 [0][1] = "MP1", 112 [1][1] = "MP0", 113 [2][1] = "VCN", 114 [3][1] = "VCNU", 115 [4][1] = "HDP", 116 [5][1] = "XDP", 117 [6][1] = "DBGU0", 118 [7][1] = "DCE", 119 [8][1] = "DCEDWB0", 120 [9][1] = "DCEDWB1", 121 [26][1] = "OSS", 122 [27][1] = "SDMA0", 123 }; 124 125 static const char *mmhub_client_ids_renoir[][2] = { 126 [0][0] = "MP1", 127 [1][0] = "MP0", 128 [2][0] = "HDP", 129 [4][0] = "DCEDMC", 130 [5][0] = "DCEVGA", 131 [13][0] = "UTCL2", 132 [19][0] = "TLS", 133 [26][0] = "OSS", 134 [27][0] = "SDMA0", 135 [28][0] = "VCN", 136 [29][0] = "VCNU", 137 [30][0] = "JPEG", 138 [0][1] = "MP1", 139 [1][1] = "MP0", 140 [2][1] = "HDP", 141 [3][1] = "XDP", 142 [6][1] = "DBGU0", 143 [7][1] = "DCEDMC", 144 [8][1] = "DCEVGA", 145 [9][1] = "DCEDWB", 146 [26][1] = "OSS", 147 [27][1] = "SDMA0", 148 [28][1] = "VCN", 149 [29][1] = "VCNU", 150 [30][1] = "JPEG", 151 }; 152 153 static const char *mmhub_client_ids_vega10[][2] = { 154 [0][0] = "MP0", 155 [1][0] = "UVD", 156 [2][0] = "UVDU", 157 [3][0] = "HDP", 158 [13][0] = "UTCL2", 159 [14][0] = "OSS", 160 [15][0] = "SDMA1", 161 [32+0][0] = "VCE0", 162 [32+1][0] = "VCE0U", 163 [32+2][0] = "XDMA", 164 [32+3][0] = "DCE", 165 [32+4][0] = "MP1", 166 [32+14][0] = "SDMA0", 167 [0][1] = "MP0", 168 [1][1] = "UVD", 169 [2][1] = "UVDU", 170 [3][1] = "DBGU0", 171 [4][1] = "HDP", 172 [5][1] = "XDP", 173 [14][1] = "OSS", 174 [15][1] = "SDMA0", 175 [32+0][1] = "VCE0", 176 [32+1][1] = "VCE0U", 177 [32+2][1] = "XDMA", 178 [32+3][1] = "DCE", 179 [32+4][1] = "DCEDWB", 180 [32+5][1] = "MP1", 181 [32+6][1] = "DBGU1", 182 [32+14][1] = "SDMA1", 183 }; 184 185 static const char *mmhub_client_ids_vega12[][2] = { 186 [0][0] = "MP0", 187 [1][0] = "VCE0", 188 [2][0] = "VCE0U", 189 [3][0] = "HDP", 190 [13][0] = "UTCL2", 191 [14][0] = "OSS", 192 [15][0] = "SDMA1", 193 [32+0][0] = "DCE", 194 [32+1][0] = "XDMA", 195 [32+2][0] = "UVD", 196 [32+3][0] = "UVDU", 197 [32+4][0] = "MP1", 198 [32+15][0] = "SDMA0", 199 [0][1] = "MP0", 200 [1][1] = "VCE0", 201 [2][1] = "VCE0U", 202 [3][1] = "DBGU0", 203 [4][1] = "HDP", 204 [5][1] = "XDP", 205 [14][1] = "OSS", 206 [15][1] = "SDMA0", 207 [32+0][1] = "DCE", 208 [32+1][1] = "DCEDWB", 209 [32+2][1] = "XDMA", 210 [32+3][1] = "UVD", 211 [32+4][1] = "UVDU", 212 [32+5][1] = "MP1", 213 [32+6][1] = "DBGU1", 214 [32+15][1] = "SDMA1", 215 }; 216 217 static const char *mmhub_client_ids_vega20[][2] = { 218 [0][0] = "XDMA", 219 [1][0] = "DCE", 220 [2][0] = "VCE0", 221 [3][0] = "VCE0U", 222 [4][0] = "UVD", 223 [5][0] = "UVD1U", 224 [13][0] = "OSS", 225 [14][0] = "HDP", 226 [15][0] = "SDMA0", 227 [32+0][0] = "UVD", 228 [32+1][0] = "UVDU", 229 [32+2][0] = "MP1", 230 [32+3][0] = "MP0", 231 [32+12][0] = "UTCL2", 232 [32+14][0] = "SDMA1", 233 [0][1] = "XDMA", 234 [1][1] = "DCE", 235 [2][1] = "DCEDWB", 236 [3][1] = "VCE0", 237 [4][1] = "VCE0U", 238 [5][1] = "UVD1", 239 [6][1] = "UVD1U", 240 [7][1] = "DBGU0", 241 [8][1] = "XDP", 242 [13][1] = "OSS", 243 [14][1] = "HDP", 244 [15][1] = "SDMA0", 245 [32+0][1] = "UVD", 246 [32+1][1] = "UVDU", 247 [32+2][1] = "DBGU1", 248 [32+3][1] = "MP1", 249 [32+4][1] = "MP0", 250 [32+14][1] = "SDMA1", 251 }; 252 253 static const char *mmhub_client_ids_arcturus[][2] = { 254 [0][0] = "DBGU1", 255 [1][0] = "XDP", 256 [2][0] = "MP1", 257 [14][0] = "HDP", 258 [171][0] = "JPEG", 259 [172][0] = "VCN", 260 [173][0] = "VCNU", 261 [203][0] = "JPEG1", 262 [204][0] = "VCN1", 263 [205][0] = "VCN1U", 264 [256][0] = "SDMA0", 265 [257][0] = "SDMA1", 266 [258][0] = "SDMA2", 267 [259][0] = "SDMA3", 268 [260][0] = "SDMA4", 269 [261][0] = "SDMA5", 270 [262][0] = "SDMA6", 271 [263][0] = "SDMA7", 272 [384][0] = "OSS", 273 [0][1] = "DBGU1", 274 [1][1] = "XDP", 275 [2][1] = "MP1", 276 [14][1] = "HDP", 277 [171][1] = "JPEG", 278 [172][1] = "VCN", 279 [173][1] = "VCNU", 280 [203][1] = "JPEG1", 281 [204][1] = "VCN1", 282 [205][1] = "VCN1U", 283 [256][1] = "SDMA0", 284 [257][1] = "SDMA1", 285 [258][1] = "SDMA2", 286 [259][1] = "SDMA3", 287 [260][1] = "SDMA4", 288 [261][1] = "SDMA5", 289 [262][1] = "SDMA6", 290 [263][1] = "SDMA7", 291 [384][1] = "OSS", 292 }; 293 294 static const char *mmhub_client_ids_aldebaran[][2] = { 295 [2][0] = "MP1", 296 [3][0] = "MP0", 297 [32+1][0] = "DBGU_IO0", 298 [32+2][0] = "DBGU_IO2", 299 [32+4][0] = "MPIO", 300 [96+11][0] = "JPEG0", 301 [96+12][0] = "VCN0", 302 [96+13][0] = "VCNU0", 303 [128+11][0] = "JPEG1", 304 [128+12][0] = "VCN1", 305 [128+13][0] = "VCNU1", 306 [160+1][0] = "XDP", 307 [160+14][0] = "HDP", 308 [256+0][0] = "SDMA0", 309 [256+1][0] = "SDMA1", 310 [256+2][0] = "SDMA2", 311 [256+3][0] = "SDMA3", 312 [256+4][0] = "SDMA4", 313 [384+0][0] = "OSS", 314 [2][1] = "MP1", 315 [3][1] = "MP0", 316 [32+1][1] = "DBGU_IO0", 317 [32+2][1] = "DBGU_IO2", 318 [32+4][1] = "MPIO", 319 [96+11][1] = "JPEG0", 320 [96+12][1] = "VCN0", 321 [96+13][1] = "VCNU0", 322 [128+11][1] = "JPEG1", 323 [128+12][1] = "VCN1", 324 [128+13][1] = "VCNU1", 325 [160+1][1] = "XDP", 326 [160+14][1] = "HDP", 327 [256+0][1] = "SDMA0", 328 [256+1][1] = "SDMA1", 329 [256+2][1] = "SDMA2", 330 [256+3][1] = "SDMA3", 331 [256+4][1] = "SDMA4", 332 [384+0][1] = "OSS", 333 }; 334 335 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = 336 { 337 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), 338 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) 339 }; 340 341 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = 342 { 343 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), 344 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) 345 }; 346 347 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = { 348 (0x000143c0 + 0x00000000), 349 (0x000143c0 + 0x00000800), 350 (0x000143c0 + 0x00001000), 351 (0x000143c0 + 0x00001800), 352 (0x000543c0 + 0x00000000), 353 (0x000543c0 + 0x00000800), 354 (0x000543c0 + 0x00001000), 355 (0x000543c0 + 0x00001800), 356 (0x000943c0 + 0x00000000), 357 (0x000943c0 + 0x00000800), 358 (0x000943c0 + 0x00001000), 359 (0x000943c0 + 0x00001800), 360 (0x000d43c0 + 0x00000000), 361 (0x000d43c0 + 0x00000800), 362 (0x000d43c0 + 0x00001000), 363 (0x000d43c0 + 0x00001800), 364 (0x001143c0 + 0x00000000), 365 (0x001143c0 + 0x00000800), 366 (0x001143c0 + 0x00001000), 367 (0x001143c0 + 0x00001800), 368 (0x001543c0 + 0x00000000), 369 (0x001543c0 + 0x00000800), 370 (0x001543c0 + 0x00001000), 371 (0x001543c0 + 0x00001800), 372 (0x001943c0 + 0x00000000), 373 (0x001943c0 + 0x00000800), 374 (0x001943c0 + 0x00001000), 375 (0x001943c0 + 0x00001800), 376 (0x001d43c0 + 0x00000000), 377 (0x001d43c0 + 0x00000800), 378 (0x001d43c0 + 0x00001000), 379 (0x001d43c0 + 0x00001800), 380 }; 381 382 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { 383 (0x000143e0 + 0x00000000), 384 (0x000143e0 + 0x00000800), 385 (0x000143e0 + 0x00001000), 386 (0x000143e0 + 0x00001800), 387 (0x000543e0 + 0x00000000), 388 (0x000543e0 + 0x00000800), 389 (0x000543e0 + 0x00001000), 390 (0x000543e0 + 0x00001800), 391 (0x000943e0 + 0x00000000), 392 (0x000943e0 + 0x00000800), 393 (0x000943e0 + 0x00001000), 394 (0x000943e0 + 0x00001800), 395 (0x000d43e0 + 0x00000000), 396 (0x000d43e0 + 0x00000800), 397 (0x000d43e0 + 0x00001000), 398 (0x000d43e0 + 0x00001800), 399 (0x001143e0 + 0x00000000), 400 (0x001143e0 + 0x00000800), 401 (0x001143e0 + 0x00001000), 402 (0x001143e0 + 0x00001800), 403 (0x001543e0 + 0x00000000), 404 (0x001543e0 + 0x00000800), 405 (0x001543e0 + 0x00001000), 406 (0x001543e0 + 0x00001800), 407 (0x001943e0 + 0x00000000), 408 (0x001943e0 + 0x00000800), 409 (0x001943e0 + 0x00001000), 410 (0x001943e0 + 0x00001800), 411 (0x001d43e0 + 0x00000000), 412 (0x001d43e0 + 0x00000800), 413 (0x001d43e0 + 0x00001000), 414 (0x001d43e0 + 0x00001800), 415 }; 416 417 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, 418 struct amdgpu_irq_src *src, 419 unsigned type, 420 enum amdgpu_interrupt_state state) 421 { 422 u32 bits, i, tmp, reg; 423 424 /* Devices newer then VEGA10/12 shall have these programming 425 sequences performed by PSP BL */ 426 if (adev->asic_type >= CHIP_VEGA20) 427 return 0; 428 429 bits = 0x7f; 430 431 switch (state) { 432 case AMDGPU_IRQ_STATE_DISABLE: 433 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 434 reg = ecc_umc_mcumc_ctrl_addrs[i]; 435 tmp = RREG32(reg); 436 tmp &= ~bits; 437 WREG32(reg, tmp); 438 } 439 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 440 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 441 tmp = RREG32(reg); 442 tmp &= ~bits; 443 WREG32(reg, tmp); 444 } 445 break; 446 case AMDGPU_IRQ_STATE_ENABLE: 447 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 448 reg = ecc_umc_mcumc_ctrl_addrs[i]; 449 tmp = RREG32(reg); 450 tmp |= bits; 451 WREG32(reg, tmp); 452 } 453 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 454 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 455 tmp = RREG32(reg); 456 tmp |= bits; 457 WREG32(reg, tmp); 458 } 459 break; 460 default: 461 break; 462 } 463 464 return 0; 465 } 466 467 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 468 struct amdgpu_irq_src *src, 469 unsigned type, 470 enum amdgpu_interrupt_state state) 471 { 472 struct amdgpu_vmhub *hub; 473 u32 tmp, reg, bits, i, j; 474 475 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 476 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 477 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 478 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 479 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 480 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 481 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 482 483 switch (state) { 484 case AMDGPU_IRQ_STATE_DISABLE: 485 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 486 hub = &adev->vmhub[j]; 487 for (i = 0; i < 16; i++) { 488 reg = hub->vm_context0_cntl + i; 489 490 /* This works because this interrupt is only 491 * enabled at init/resume and disabled in 492 * fini/suspend, so the overall state doesn't 493 * change over the course of suspend/resume. 494 */ 495 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 496 continue; 497 498 if (j >= AMDGPU_MMHUB0(0)) 499 tmp = RREG32_SOC15_IP(MMHUB, reg); 500 else 501 tmp = RREG32_SOC15_IP(GC, reg); 502 503 tmp &= ~bits; 504 505 if (j >= AMDGPU_MMHUB0(0)) 506 WREG32_SOC15_IP(MMHUB, reg, tmp); 507 else 508 WREG32_SOC15_IP(GC, reg, tmp); 509 } 510 } 511 break; 512 case AMDGPU_IRQ_STATE_ENABLE: 513 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 514 hub = &adev->vmhub[j]; 515 for (i = 0; i < 16; i++) { 516 reg = hub->vm_context0_cntl + i; 517 518 /* This works because this interrupt is only 519 * enabled at init/resume and disabled in 520 * fini/suspend, so the overall state doesn't 521 * change over the course of suspend/resume. 522 */ 523 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 524 continue; 525 526 if (j >= AMDGPU_MMHUB0(0)) 527 tmp = RREG32_SOC15_IP(MMHUB, reg); 528 else 529 tmp = RREG32_SOC15_IP(GC, reg); 530 531 tmp |= bits; 532 533 if (j >= AMDGPU_MMHUB0(0)) 534 WREG32_SOC15_IP(MMHUB, reg, tmp); 535 else 536 WREG32_SOC15_IP(GC, reg, tmp); 537 } 538 } 539 break; 540 default: 541 break; 542 } 543 544 return 0; 545 } 546 547 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, 548 struct amdgpu_irq_src *source, 549 struct amdgpu_iv_entry *entry) 550 { 551 bool retry_fault = !!(entry->src_data[1] & 0x80); 552 bool write_fault = !!(entry->src_data[1] & 0x20); 553 uint32_t status = 0, cid = 0, rw = 0; 554 struct amdgpu_task_info task_info; 555 struct amdgpu_vmhub *hub; 556 const char *mmhub_cid; 557 const char *hub_name; 558 u64 addr; 559 uint32_t cam_index = 0; 560 int ret; 561 uint32_t node_id, xcc_id = 0; 562 563 node_id = entry->node_id; 564 565 addr = (u64)entry->src_data[0] << 12; 566 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 567 568 if (entry->client_id == SOC15_IH_CLIENTID_VMC) { 569 hub_name = "mmhub0"; 570 hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)]; 571 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { 572 hub_name = "mmhub1"; 573 hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; 574 } else { 575 hub_name = "gfxhub0"; 576 if (adev->gfx.funcs->ih_node_to_logical_xcc) { 577 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, 578 node_id); 579 if (xcc_id < 0) 580 xcc_id = 0; 581 } 582 hub = &adev->vmhub[xcc_id]; 583 } 584 585 if (retry_fault) { 586 if (adev->irq.retry_cam_enabled) { 587 /* Delegate it to a different ring if the hardware hasn't 588 * already done it. 589 */ 590 if (entry->ih == &adev->irq.ih) { 591 amdgpu_irq_delegate(adev, entry, 8); 592 return 1; 593 } 594 595 cam_index = entry->src_data[2] & 0x3ff; 596 597 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, 598 addr, write_fault); 599 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); 600 if (ret) 601 return 1; 602 } else { 603 /* Process it onyl if it's the first fault for this address */ 604 if (entry->ih != &adev->irq.ih_soft && 605 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, 606 entry->timestamp)) 607 return 1; 608 609 /* Delegate it to a different ring if the hardware hasn't 610 * already done it. 611 */ 612 if (entry->ih == &adev->irq.ih) { 613 amdgpu_irq_delegate(adev, entry, 8); 614 return 1; 615 } 616 617 /* Try to handle the recoverable page faults by filling page 618 * tables 619 */ 620 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, 621 addr, write_fault)) 622 return 1; 623 } 624 } 625 626 if (!printk_ratelimit()) 627 return 0; 628 629 630 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 631 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 632 633 dev_err(adev->dev, 634 "[%s] %s page fault (src_id:%u ring:%u vmid:%u " 635 "pasid:%u, for process %s pid %d thread %s pid %d)\n", 636 hub_name, retry_fault ? "retry" : "no-retry", 637 entry->src_id, entry->ring_id, entry->vmid, 638 entry->pasid, task_info.process_name, task_info.tgid, 639 task_info.task_name, task_info.pid); 640 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", 641 addr, entry->client_id, 642 soc15_ih_clientid_name[entry->client_id]); 643 644 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 645 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", 646 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, 647 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); 648 649 if (amdgpu_sriov_vf(adev)) 650 return 0; 651 652 /* 653 * Issue a dummy read to wait for the status register to 654 * be updated to avoid reading an incorrect value due to 655 * the new fast GRBM interface. 656 */ 657 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && 658 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) 659 RREG32(hub->vm_l2_pro_fault_status); 660 661 status = RREG32(hub->vm_l2_pro_fault_status); 662 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID); 663 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); 664 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 665 666 dev_err(adev->dev, 667 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 668 status); 669 if (entry->vmid_src == AMDGPU_GFXHUB(0)) { 670 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 671 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : 672 gfxhub_client_ids[cid], 673 cid); 674 } else { 675 switch (adev->ip_versions[MMHUB_HWIP][0]) { 676 case IP_VERSION(9, 0, 0): 677 mmhub_cid = mmhub_client_ids_vega10[cid][rw]; 678 break; 679 case IP_VERSION(9, 3, 0): 680 mmhub_cid = mmhub_client_ids_vega12[cid][rw]; 681 break; 682 case IP_VERSION(9, 4, 0): 683 mmhub_cid = mmhub_client_ids_vega20[cid][rw]; 684 break; 685 case IP_VERSION(9, 4, 1): 686 mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; 687 break; 688 case IP_VERSION(9, 1, 0): 689 case IP_VERSION(9, 2, 0): 690 mmhub_cid = mmhub_client_ids_raven[cid][rw]; 691 break; 692 case IP_VERSION(1, 5, 0): 693 case IP_VERSION(2, 4, 0): 694 mmhub_cid = mmhub_client_ids_renoir[cid][rw]; 695 break; 696 case IP_VERSION(1, 8, 0): 697 case IP_VERSION(9, 4, 2): 698 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; 699 break; 700 default: 701 mmhub_cid = NULL; 702 break; 703 } 704 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 705 mmhub_cid ? mmhub_cid : "unknown", cid); 706 } 707 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 708 REG_GET_FIELD(status, 709 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 710 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 711 REG_GET_FIELD(status, 712 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 713 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 714 REG_GET_FIELD(status, 715 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 716 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 717 REG_GET_FIELD(status, 718 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 719 dev_err(adev->dev, "\t RW: 0x%x\n", rw); 720 return 0; 721 } 722 723 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { 724 .set = gmc_v9_0_vm_fault_interrupt_state, 725 .process = gmc_v9_0_process_interrupt, 726 }; 727 728 729 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { 730 .set = gmc_v9_0_ecc_interrupt_state, 731 .process = amdgpu_umc_process_ecc_irq, 732 }; 733 734 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 735 { 736 adev->gmc.vm_fault.num_types = 1; 737 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 738 739 if (!amdgpu_sriov_vf(adev) && 740 !adev->gmc.xgmi.connected_to_cpu) { 741 adev->gmc.ecc_irq.num_types = 1; 742 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 743 } 744 } 745 746 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, 747 uint32_t flush_type) 748 { 749 u32 req = 0; 750 751 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 752 PER_VMID_INVALIDATE_REQ, 1 << vmid); 753 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 754 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 755 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 756 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 757 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 758 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 759 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 760 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 761 762 return req; 763 } 764 765 /** 766 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore 767 * 768 * @adev: amdgpu_device pointer 769 * @vmhub: vmhub type 770 * 771 */ 772 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, 773 uint32_t vmhub) 774 { 775 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 776 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 777 return false; 778 779 return ((vmhub == AMDGPU_MMHUB0(0) || 780 vmhub == AMDGPU_MMHUB1(0)) && 781 (!amdgpu_sriov_vf(adev)) && 782 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && 783 (adev->apu_flags & AMD_APU_IS_PICASSO)))); 784 } 785 786 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, 787 uint8_t vmid, uint16_t *p_pasid) 788 { 789 uint32_t value; 790 791 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) 792 + vmid); 793 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; 794 795 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); 796 } 797 798 /* 799 * GART 800 * VMID 0 is the physical GPU addresses as used by the kernel. 801 * VMIDs 1-15 are used for userspace clients and are handled 802 * by the amdgpu vm/hsa code. 803 */ 804 805 /** 806 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type 807 * 808 * @adev: amdgpu_device pointer 809 * @vmid: vm instance to flush 810 * @vmhub: which hub to flush 811 * @flush_type: the flush type 812 * 813 * Flush the TLB for the requested page table using certain type. 814 */ 815 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 816 uint32_t vmhub, uint32_t flush_type) 817 { 818 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); 819 const unsigned eng = 17; 820 u32 j, inv_req, inv_req2, tmp; 821 struct amdgpu_vmhub *hub; 822 823 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS); 824 825 hub = &adev->vmhub[vmhub]; 826 if (adev->gmc.xgmi.num_physical_nodes && 827 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { 828 /* Vega20+XGMI caches PTEs in TC and TLB. Add a 829 * heavy-weight TLB flush (type 2), which flushes 830 * both. Due to a race condition with concurrent 831 * memory accesses using the same TLB cache line, we 832 * still need a second TLB flush after this. 833 */ 834 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); 835 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); 836 } else if (flush_type == 2 && 837 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && 838 adev->rev_id == 0) { 839 inv_req = gmc_v9_0_get_invalidate_req(vmid, 0); 840 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); 841 } else { 842 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); 843 inv_req2 = 0; 844 } 845 846 /* This is necessary for a HW workaround under SRIOV as well 847 * as GFXOFF under bare metal 848 */ 849 if (adev->gfx.kiq[0].ring.sched.ready && 850 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && 851 down_read_trylock(&adev->reset_domain->sem)) { 852 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 853 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 854 855 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 856 1 << vmid); 857 up_read(&adev->reset_domain->sem); 858 return; 859 } 860 861 spin_lock(&adev->gmc.invalidate_lock); 862 863 /* 864 * It may lose gpuvm invalidate acknowldege state across power-gating 865 * off cycle, add semaphore acquire before invalidation and semaphore 866 * release after invalidation to avoid entering power gated state 867 * to WA the Issue 868 */ 869 870 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 871 if (use_semaphore) { 872 for (j = 0; j < adev->usec_timeout; j++) { 873 /* a read return value of 1 means semaphore acquire */ 874 if (vmhub >= AMDGPU_MMHUB0(0)) 875 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); 876 else 877 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); 878 if (tmp & 0x1) 879 break; 880 udelay(1); 881 } 882 883 if (j >= adev->usec_timeout) 884 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 885 } 886 887 do { 888 if (vmhub >= AMDGPU_MMHUB0(0)) 889 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); 890 else 891 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); 892 893 /* 894 * Issue a dummy read to wait for the ACK register to 895 * be cleared to avoid a false ACK due to the new fast 896 * GRBM interface. 897 */ 898 if ((vmhub == AMDGPU_GFXHUB(0)) && 899 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) 900 RREG32_NO_KIQ(hub->vm_inv_eng0_req + 901 hub->eng_distance * eng); 902 903 for (j = 0; j < adev->usec_timeout; j++) { 904 if (vmhub >= AMDGPU_MMHUB0(0)) 905 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); 906 else 907 tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); 908 if (tmp & (1 << vmid)) 909 break; 910 udelay(1); 911 } 912 913 inv_req = inv_req2; 914 inv_req2 = 0; 915 } while (inv_req); 916 917 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 918 if (use_semaphore) { 919 /* 920 * add semaphore release after invalidation, 921 * write with 0 means semaphore release 922 */ 923 if (vmhub >= AMDGPU_MMHUB0(0)) 924 WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); 925 else 926 WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); 927 } 928 929 spin_unlock(&adev->gmc.invalidate_lock); 930 931 if (j < adev->usec_timeout) 932 return; 933 934 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 935 } 936 937 /** 938 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid 939 * 940 * @adev: amdgpu_device pointer 941 * @pasid: pasid to be flush 942 * @flush_type: the flush type 943 * @all_hub: flush all hubs 944 * 945 * Flush the TLB for the requested pasid. 946 */ 947 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 948 uint16_t pasid, uint32_t flush_type, 949 bool all_hub, uint32_t inst) 950 { 951 int vmid, i; 952 signed long r; 953 uint32_t seq; 954 uint16_t queried_pasid; 955 bool ret; 956 u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout; 957 struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring; 958 struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst]; 959 960 if (amdgpu_in_reset(adev)) 961 return -EIO; 962 963 if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) { 964 /* Vega20+XGMI caches PTEs in TC and TLB. Add a 965 * heavy-weight TLB flush (type 2), which flushes 966 * both. Due to a race condition with concurrent 967 * memory accesses using the same TLB cache line, we 968 * still need a second TLB flush after this. 969 */ 970 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && 971 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); 972 /* 2 dwords flush + 8 dwords fence */ 973 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; 974 975 if (vega20_xgmi_wa) 976 ndw += kiq->pmf->invalidate_tlbs_size; 977 978 spin_lock(&adev->gfx.kiq[inst].ring_lock); 979 /* 2 dwords flush + 8 dwords fence */ 980 amdgpu_ring_alloc(ring, ndw); 981 if (vega20_xgmi_wa) 982 kiq->pmf->kiq_invalidate_tlbs(ring, 983 pasid, 2, all_hub); 984 985 if (flush_type == 2 && 986 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && 987 adev->rev_id == 0) 988 kiq->pmf->kiq_invalidate_tlbs(ring, 989 pasid, 0, all_hub); 990 991 kiq->pmf->kiq_invalidate_tlbs(ring, 992 pasid, flush_type, all_hub); 993 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); 994 if (r) { 995 amdgpu_ring_undo(ring); 996 spin_unlock(&adev->gfx.kiq[inst].ring_lock); 997 up_read(&adev->reset_domain->sem); 998 return -ETIME; 999 } 1000 1001 amdgpu_ring_commit(ring); 1002 spin_unlock(&adev->gfx.kiq[inst].ring_lock); 1003 r = amdgpu_fence_wait_polling(ring, seq, usec_timeout); 1004 if (r < 1) { 1005 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); 1006 up_read(&adev->reset_domain->sem); 1007 return -ETIME; 1008 } 1009 up_read(&adev->reset_domain->sem); 1010 return 0; 1011 } 1012 1013 for (vmid = 1; vmid < 16; vmid++) { 1014 1015 ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, 1016 &queried_pasid); 1017 if (ret && queried_pasid == pasid) { 1018 if (all_hub) { 1019 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) 1020 gmc_v9_0_flush_gpu_tlb(adev, vmid, 1021 i, flush_type); 1022 } else { 1023 gmc_v9_0_flush_gpu_tlb(adev, vmid, 1024 AMDGPU_GFXHUB(0), flush_type); 1025 } 1026 break; 1027 } 1028 } 1029 1030 return 0; 1031 1032 } 1033 1034 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 1035 unsigned vmid, uint64_t pd_addr) 1036 { 1037 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); 1038 struct amdgpu_device *adev = ring->adev; 1039 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; 1040 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); 1041 unsigned eng = ring->vm_inv_eng; 1042 1043 /* 1044 * It may lose gpuvm invalidate acknowldege state across power-gating 1045 * off cycle, add semaphore acquire before invalidation and semaphore 1046 * release after invalidation to avoid entering power gated state 1047 * to WA the Issue 1048 */ 1049 1050 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 1051 if (use_semaphore) 1052 /* a read return value of 1 means semaphore acuqire */ 1053 amdgpu_ring_emit_reg_wait(ring, 1054 hub->vm_inv_eng0_sem + 1055 hub->eng_distance * eng, 0x1, 0x1); 1056 1057 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 1058 (hub->ctx_addr_distance * vmid), 1059 lower_32_bits(pd_addr)); 1060 1061 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 1062 (hub->ctx_addr_distance * vmid), 1063 upper_32_bits(pd_addr)); 1064 1065 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 1066 hub->eng_distance * eng, 1067 hub->vm_inv_eng0_ack + 1068 hub->eng_distance * eng, 1069 req, 1 << vmid); 1070 1071 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 1072 if (use_semaphore) 1073 /* 1074 * add semaphore release after invalidation, 1075 * write with 0 means semaphore release 1076 */ 1077 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 1078 hub->eng_distance * eng, 0); 1079 1080 return pd_addr; 1081 } 1082 1083 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 1084 unsigned pasid) 1085 { 1086 struct amdgpu_device *adev = ring->adev; 1087 uint32_t reg; 1088 1089 /* Do nothing because there's no lut register for mmhub1. */ 1090 if (ring->vm_hub == AMDGPU_MMHUB1(0)) 1091 return; 1092 1093 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 1094 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 1095 else 1096 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 1097 1098 amdgpu_ring_emit_wreg(ring, reg, pasid); 1099 } 1100 1101 /* 1102 * PTE format on VEGA 10: 1103 * 63:59 reserved 1104 * 58:57 mtype 1105 * 56 F 1106 * 55 L 1107 * 54 P 1108 * 53 SW 1109 * 52 T 1110 * 50:48 reserved 1111 * 47:12 4k physical page base address 1112 * 11:7 fragment 1113 * 6 write 1114 * 5 read 1115 * 4 exe 1116 * 3 Z 1117 * 2 snooped 1118 * 1 system 1119 * 0 valid 1120 * 1121 * PDE format on VEGA 10: 1122 * 63:59 block fragment size 1123 * 58:55 reserved 1124 * 54 P 1125 * 53:48 reserved 1126 * 47:6 physical base address of PD or PTE 1127 * 5:3 reserved 1128 * 2 C 1129 * 1 system 1130 * 0 valid 1131 */ 1132 1133 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 1134 1135 { 1136 switch (flags) { 1137 case AMDGPU_VM_MTYPE_DEFAULT: 1138 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 1139 case AMDGPU_VM_MTYPE_NC: 1140 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 1141 case AMDGPU_VM_MTYPE_WC: 1142 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 1143 case AMDGPU_VM_MTYPE_RW: 1144 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); 1145 case AMDGPU_VM_MTYPE_CC: 1146 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 1147 case AMDGPU_VM_MTYPE_UC: 1148 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 1149 default: 1150 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 1151 } 1152 } 1153 1154 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, 1155 uint64_t *addr, uint64_t *flags) 1156 { 1157 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 1158 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 1159 BUG_ON(*addr & 0xFFFF00000000003FULL); 1160 1161 if (!adev->gmc.translate_further) 1162 return; 1163 1164 if (level == AMDGPU_VM_PDB1) { 1165 /* Set the block fragment size */ 1166 if (!(*flags & AMDGPU_PDE_PTE)) 1167 *flags |= AMDGPU_PDE_BFS(0x9); 1168 1169 } else if (level == AMDGPU_VM_PDB0) { 1170 if (*flags & AMDGPU_PDE_PTE) { 1171 *flags &= ~AMDGPU_PDE_PTE; 1172 if (!(*flags & AMDGPU_PTE_VALID)) 1173 *addr |= 1 << PAGE_SHIFT; 1174 } else { 1175 *flags |= AMDGPU_PTE_TF; 1176 } 1177 } 1178 } 1179 1180 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, 1181 struct amdgpu_bo *bo, 1182 struct amdgpu_bo_va_mapping *mapping, 1183 uint64_t *flags) 1184 { 1185 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1186 bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; 1187 bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; 1188 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; 1189 /* TODO: memory partitions struct amdgpu_vm *vm = mapping->bo_va->base.vm;*/ 1190 unsigned int mtype_local, mtype; 1191 bool snoop = false; 1192 bool is_local; 1193 1194 switch (adev->ip_versions[GC_HWIP][0]) { 1195 case IP_VERSION(9, 4, 1): 1196 case IP_VERSION(9, 4, 2): 1197 if (is_vram) { 1198 if (bo_adev == adev) { 1199 if (uncached) 1200 mtype = MTYPE_UC; 1201 else if (coherent) 1202 mtype = MTYPE_CC; 1203 else 1204 mtype = MTYPE_RW; 1205 /* FIXME: is this still needed? Or does 1206 * amdgpu_ttm_tt_pde_flags already handle this? 1207 */ 1208 if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 1209 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) && 1210 adev->gmc.xgmi.connected_to_cpu) 1211 snoop = true; 1212 } else { 1213 if (uncached || coherent) 1214 mtype = MTYPE_UC; 1215 else 1216 mtype = MTYPE_NC; 1217 if (mapping->bo_va->is_xgmi) 1218 snoop = true; 1219 } 1220 } else { 1221 if (uncached || coherent) 1222 mtype = MTYPE_UC; 1223 else 1224 mtype = MTYPE_NC; 1225 /* FIXME: is this still needed? Or does 1226 * amdgpu_ttm_tt_pde_flags already handle this? 1227 */ 1228 snoop = true; 1229 } 1230 break; 1231 case IP_VERSION(9, 4, 3): 1232 /* Only local VRAM BOs or system memory on non-NUMA APUs 1233 * can be assumed to be local in their entirety. Choose 1234 * MTYPE_NC as safe fallback for all system memory BOs on 1235 * NUMA systems. Their MTYPE can be overridden per-page in 1236 * gmc_v9_0_override_vm_pte_flags. 1237 */ 1238 mtype_local = amdgpu_use_mtype_cc_wa ? MTYPE_CC : MTYPE_RW; 1239 is_local = (!is_vram && (adev->flags & AMD_IS_APU) && 1240 num_possible_nodes() <= 1) || 1241 (is_vram && adev == bo_adev /* TODO: memory partitions && 1242 bo->mem_id == vm->mem_id*/); 1243 snoop = true; 1244 if (uncached) { 1245 mtype = MTYPE_UC; 1246 } else if (adev->flags & AMD_IS_APU) { 1247 mtype = is_local ? mtype_local : MTYPE_NC; 1248 } else { 1249 /* dGPU */ 1250 if (is_local) 1251 mtype = mtype_local; 1252 else if (is_vram) 1253 mtype = MTYPE_NC; 1254 else 1255 mtype = MTYPE_UC; 1256 } 1257 1258 break; 1259 default: 1260 if (uncached || coherent) 1261 mtype = MTYPE_UC; 1262 else 1263 mtype = MTYPE_NC; 1264 1265 /* FIXME: is this still needed? Or does 1266 * amdgpu_ttm_tt_pde_flags already handle this? 1267 */ 1268 if (!is_vram) 1269 snoop = true; 1270 } 1271 1272 if (mtype != MTYPE_NC) 1273 *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | 1274 AMDGPU_PTE_MTYPE_VG10(mtype); 1275 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 1276 } 1277 1278 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, 1279 struct amdgpu_bo_va_mapping *mapping, 1280 uint64_t *flags) 1281 { 1282 struct amdgpu_bo *bo = mapping->bo_va->base.bo; 1283 1284 *flags &= ~AMDGPU_PTE_EXECUTABLE; 1285 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 1286 1287 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 1288 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; 1289 1290 if (mapping->flags & AMDGPU_PTE_PRT) { 1291 *flags |= AMDGPU_PTE_PRT; 1292 *flags &= ~AMDGPU_PTE_VALID; 1293 } 1294 1295 if (bo && bo->tbo.resource) 1296 gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, 1297 mapping, flags); 1298 } 1299 1300 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) 1301 { 1302 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 1303 unsigned size; 1304 1305 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ 1306 1307 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 1308 size = AMDGPU_VBIOS_VGA_ALLOCATION; 1309 } else { 1310 u32 viewport; 1311 1312 switch (adev->ip_versions[DCE_HWIP][0]) { 1313 case IP_VERSION(1, 0, 0): 1314 case IP_VERSION(1, 0, 1): 1315 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 1316 size = (REG_GET_FIELD(viewport, 1317 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 1318 REG_GET_FIELD(viewport, 1319 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 1320 4); 1321 break; 1322 case IP_VERSION(2, 1, 0): 1323 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2); 1324 size = (REG_GET_FIELD(viewport, 1325 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 1326 REG_GET_FIELD(viewport, 1327 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 1328 4); 1329 break; 1330 default: 1331 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); 1332 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 1333 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * 1334 4); 1335 break; 1336 } 1337 } 1338 1339 return size; 1340 } 1341 1342 static enum amdgpu_memory_partition 1343 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) 1344 { 1345 enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; 1346 1347 if (adev->nbio.funcs->get_memory_partition_mode) 1348 mode = adev->nbio.funcs->get_memory_partition_mode(adev, 1349 supp_modes); 1350 1351 return mode; 1352 } 1353 1354 static enum amdgpu_memory_partition 1355 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) 1356 { 1357 if (amdgpu_sriov_vf(adev)) 1358 return AMDGPU_NPS1_PARTITION_MODE; 1359 1360 return gmc_v9_0_get_memory_partition(adev, NULL); 1361 } 1362 1363 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 1364 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 1365 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, 1366 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 1367 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 1368 .map_mtype = gmc_v9_0_map_mtype, 1369 .get_vm_pde = gmc_v9_0_get_vm_pde, 1370 .get_vm_pte = gmc_v9_0_get_vm_pte, 1371 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, 1372 .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, 1373 }; 1374 1375 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) 1376 { 1377 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; 1378 } 1379 1380 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 1381 { 1382 switch (adev->ip_versions[UMC_HWIP][0]) { 1383 case IP_VERSION(6, 0, 0): 1384 adev->umc.funcs = &umc_v6_0_funcs; 1385 break; 1386 case IP_VERSION(6, 1, 1): 1387 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 1388 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 1389 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 1390 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; 1391 adev->umc.retire_unit = 1; 1392 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 1393 adev->umc.ras = &umc_v6_1_ras; 1394 break; 1395 case IP_VERSION(6, 1, 2): 1396 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 1397 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 1398 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 1399 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; 1400 adev->umc.retire_unit = 1; 1401 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 1402 adev->umc.ras = &umc_v6_1_ras; 1403 break; 1404 case IP_VERSION(6, 7, 0): 1405 adev->umc.max_ras_err_cnt_per_query = 1406 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL; 1407 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; 1408 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; 1409 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; 1410 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2); 1411 if (!adev->gmc.xgmi.connected_to_cpu) 1412 adev->umc.ras = &umc_v6_7_ras; 1413 if (1 & adev->smuio.funcs->get_die_id(adev)) 1414 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; 1415 else 1416 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; 1417 break; 1418 default: 1419 break; 1420 } 1421 } 1422 1423 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 1424 { 1425 switch (adev->ip_versions[MMHUB_HWIP][0]) { 1426 case IP_VERSION(9, 4, 1): 1427 adev->mmhub.funcs = &mmhub_v9_4_funcs; 1428 break; 1429 case IP_VERSION(9, 4, 2): 1430 adev->mmhub.funcs = &mmhub_v1_7_funcs; 1431 break; 1432 case IP_VERSION(1, 8, 0): 1433 adev->mmhub.funcs = &mmhub_v1_8_funcs; 1434 break; 1435 default: 1436 adev->mmhub.funcs = &mmhub_v1_0_funcs; 1437 break; 1438 } 1439 } 1440 1441 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) 1442 { 1443 switch (adev->ip_versions[MMHUB_HWIP][0]) { 1444 case IP_VERSION(9, 4, 0): 1445 adev->mmhub.ras = &mmhub_v1_0_ras; 1446 break; 1447 case IP_VERSION(9, 4, 1): 1448 adev->mmhub.ras = &mmhub_v9_4_ras; 1449 break; 1450 case IP_VERSION(9, 4, 2): 1451 adev->mmhub.ras = &mmhub_v1_7_ras; 1452 break; 1453 case IP_VERSION(1, 8, 0): 1454 adev->mmhub.ras = &mmhub_v1_8_ras; 1455 break; 1456 default: 1457 /* mmhub ras is not available */ 1458 break; 1459 } 1460 } 1461 1462 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) 1463 { 1464 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1465 adev->gfxhub.funcs = &gfxhub_v1_2_funcs; 1466 else 1467 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; 1468 } 1469 1470 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) 1471 { 1472 adev->hdp.ras = &hdp_v4_0_ras; 1473 } 1474 1475 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) 1476 { 1477 struct amdgpu_mca *mca = &adev->mca; 1478 1479 /* is UMC the right IP to check for MCA? Maybe DF? */ 1480 switch (adev->ip_versions[UMC_HWIP][0]) { 1481 case IP_VERSION(6, 7, 0): 1482 if (!adev->gmc.xgmi.connected_to_cpu) { 1483 mca->mp0.ras = &mca_v3_0_mp0_ras; 1484 mca->mp1.ras = &mca_v3_0_mp1_ras; 1485 mca->mpio.ras = &mca_v3_0_mpio_ras; 1486 } 1487 break; 1488 default: 1489 break; 1490 } 1491 } 1492 1493 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) 1494 { 1495 if (!adev->gmc.xgmi.connected_to_cpu) 1496 adev->gmc.xgmi.ras = &xgmi_ras; 1497 } 1498 1499 static int gmc_v9_0_early_init(void *handle) 1500 { 1501 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1502 1503 /* 1504 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined 1505 * in their IP discovery tables 1506 */ 1507 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) || 1508 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 1509 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1510 adev->gmc.xgmi.supported = true; 1511 1512 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { 1513 adev->gmc.xgmi.supported = true; 1514 adev->gmc.xgmi.connected_to_cpu = 1515 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); 1516 } 1517 1518 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { 1519 enum amdgpu_pkg_type pkg_type = 1520 adev->smuio.funcs->get_pkg_type(adev); 1521 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present 1522 * and the APU, can be in used two possible modes: 1523 * - carveout mode 1524 * - native APU mode 1525 * "is_app_apu" can be used to identify the APU in the native 1526 * mode. 1527 */ 1528 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU && 1529 !pci_resource_len(adev->pdev, 0)); 1530 } 1531 1532 gmc_v9_0_set_gmc_funcs(adev); 1533 gmc_v9_0_set_irq_funcs(adev); 1534 gmc_v9_0_set_umc_funcs(adev); 1535 gmc_v9_0_set_mmhub_funcs(adev); 1536 gmc_v9_0_set_mmhub_ras_funcs(adev); 1537 gmc_v9_0_set_gfxhub_funcs(adev); 1538 gmc_v9_0_set_hdp_ras_funcs(adev); 1539 gmc_v9_0_set_mca_ras_funcs(adev); 1540 gmc_v9_0_set_xgmi_ras_funcs(adev); 1541 1542 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 1543 adev->gmc.shared_aperture_end = 1544 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 1545 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 1546 adev->gmc.private_aperture_end = 1547 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 1548 1549 return 0; 1550 } 1551 1552 static int gmc_v9_0_late_init(void *handle) 1553 { 1554 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1555 int r; 1556 1557 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 1558 if (r) 1559 return r; 1560 1561 /* 1562 * Workaround performance drop issue with VBIOS enables partial 1563 * writes, while disables HBM ECC for vega10. 1564 */ 1565 if (!amdgpu_sriov_vf(adev) && 1566 (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { 1567 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { 1568 if (adev->df.funcs && 1569 adev->df.funcs->enable_ecc_force_par_wr_rmw) 1570 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); 1571 } 1572 } 1573 1574 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { 1575 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && 1576 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 1577 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); 1578 1579 if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && 1580 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) 1581 adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev); 1582 } 1583 1584 r = amdgpu_gmc_ras_late_init(adev); 1585 if (r) 1586 return r; 1587 1588 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 1589 } 1590 1591 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 1592 struct amdgpu_gmc *mc) 1593 { 1594 u64 base = adev->mmhub.funcs->get_fb_location(adev); 1595 1596 /* add the xgmi offset of the physical node */ 1597 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 1598 if (adev->gmc.xgmi.connected_to_cpu) { 1599 amdgpu_gmc_sysvm_location(adev, mc); 1600 } else { 1601 amdgpu_gmc_vram_location(adev, mc, base); 1602 amdgpu_gmc_gart_location(adev, mc); 1603 amdgpu_gmc_agp_location(adev, mc); 1604 } 1605 /* base offset of vram pages */ 1606 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); 1607 1608 /* XXX: add the xgmi offset of the physical node? */ 1609 adev->vm_manager.vram_base_offset += 1610 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 1611 } 1612 1613 /** 1614 * gmc_v9_0_mc_init - initialize the memory controller driver params 1615 * 1616 * @adev: amdgpu_device pointer 1617 * 1618 * Look up the amount of vram, vram width, and decide how to place 1619 * vram and gart within the GPU's physical address space. 1620 * Returns 0 for success. 1621 */ 1622 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 1623 { 1624 int r; 1625 1626 /* size in MB on si */ 1627 if (!adev->gmc.is_app_apu) { 1628 adev->gmc.mc_vram_size = 1629 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 1630 } else { 1631 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n"); 1632 adev->gmc.mc_vram_size = 0; 1633 } 1634 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 1635 1636 if (!(adev->flags & AMD_IS_APU) && 1637 !adev->gmc.xgmi.connected_to_cpu) { 1638 r = amdgpu_device_resize_fb_bar(adev); 1639 if (r) 1640 return r; 1641 } 1642 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 1643 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 1644 1645 #ifdef CONFIG_X86_64 1646 /* 1647 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi 1648 * interface can use VRAM through here as it appears system reserved 1649 * memory in host address space. 1650 * 1651 * For APUs, VRAM is just the stolen system memory and can be accessed 1652 * directly. 1653 * 1654 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR. 1655 */ 1656 1657 /* check whether both host-gpu and gpu-gpu xgmi links exist */ 1658 if ((!amdgpu_sriov_vf(adev) && 1659 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || 1660 (adev->gmc.xgmi.supported && 1661 adev->gmc.xgmi.connected_to_cpu)) { 1662 adev->gmc.aper_base = 1663 adev->gfxhub.funcs->get_mc_fb_offset(adev) + 1664 adev->gmc.xgmi.physical_node_id * 1665 adev->gmc.xgmi.node_segment_size; 1666 adev->gmc.aper_size = adev->gmc.real_vram_size; 1667 } 1668 1669 #endif 1670 adev->gmc.visible_vram_size = adev->gmc.aper_size; 1671 1672 /* set the gart size */ 1673 if (amdgpu_gart_size == -1) { 1674 switch (adev->ip_versions[GC_HWIP][0]) { 1675 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ 1676 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ 1677 case IP_VERSION(9, 4, 0): 1678 case IP_VERSION(9, 4, 1): 1679 case IP_VERSION(9, 4, 2): 1680 case IP_VERSION(9, 4, 3): 1681 default: 1682 adev->gmc.gart_size = 512ULL << 20; 1683 break; 1684 case IP_VERSION(9, 1, 0): /* DCE SG support */ 1685 case IP_VERSION(9, 2, 2): /* DCE SG support */ 1686 case IP_VERSION(9, 3, 0): 1687 adev->gmc.gart_size = 1024ULL << 20; 1688 break; 1689 } 1690 } else { 1691 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 1692 } 1693 1694 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; 1695 1696 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); 1697 1698 return 0; 1699 } 1700 1701 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) 1702 { 1703 int r; 1704 1705 if (adev->gart.bo) { 1706 WARN(1, "VEGA10 PCIE GART already initialized\n"); 1707 return 0; 1708 } 1709 1710 if (adev->gmc.xgmi.connected_to_cpu) { 1711 adev->gmc.vmid0_page_table_depth = 1; 1712 adev->gmc.vmid0_page_table_block_size = 12; 1713 } else { 1714 adev->gmc.vmid0_page_table_depth = 0; 1715 adev->gmc.vmid0_page_table_block_size = 0; 1716 } 1717 1718 /* Initialize common gart structure */ 1719 r = amdgpu_gart_init(adev); 1720 if (r) 1721 return r; 1722 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 1723 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | 1724 AMDGPU_PTE_EXECUTABLE; 1725 1726 if (!adev->gmc.real_vram_size) { 1727 dev_info(adev->dev, "Put GART in system memory for APU\n"); 1728 r = amdgpu_gart_table_ram_alloc(adev); 1729 if (r) 1730 dev_err(adev->dev, "Failed to allocate GART in system memory\n"); 1731 } else { 1732 r = amdgpu_gart_table_vram_alloc(adev); 1733 if (r) 1734 return r; 1735 1736 if (adev->gmc.xgmi.connected_to_cpu) 1737 r = amdgpu_gmc_pdb0_alloc(adev); 1738 } 1739 1740 return r; 1741 } 1742 1743 /** 1744 * gmc_v9_0_save_registers - saves regs 1745 * 1746 * @adev: amdgpu_device pointer 1747 * 1748 * This saves potential register values that should be 1749 * restored upon resume 1750 */ 1751 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) 1752 { 1753 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 1754 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) 1755 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); 1756 } 1757 1758 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) 1759 { 1760 enum amdgpu_memory_partition mode; 1761 u32 supp_modes; 1762 bool valid; 1763 1764 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); 1765 1766 /* Mode detected by hardware not present in supported modes */ 1767 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && 1768 !(BIT(mode - 1) & supp_modes)) 1769 return false; 1770 1771 switch (mode) { 1772 case UNKNOWN_MEMORY_PARTITION_MODE: 1773 case AMDGPU_NPS1_PARTITION_MODE: 1774 valid = (adev->gmc.num_mem_partitions == 1); 1775 break; 1776 case AMDGPU_NPS2_PARTITION_MODE: 1777 valid = (adev->gmc.num_mem_partitions == 2); 1778 break; 1779 case AMDGPU_NPS4_PARTITION_MODE: 1780 valid = (adev->gmc.num_mem_partitions == 3 || 1781 adev->gmc.num_mem_partitions == 4); 1782 break; 1783 default: 1784 valid = false; 1785 } 1786 1787 return valid; 1788 } 1789 1790 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid) 1791 { 1792 int i; 1793 1794 /* Check if node with id 'nid' is present in 'node_ids' array */ 1795 for (i = 0; i < num_ids; ++i) 1796 if (node_ids[i] == nid) 1797 return true; 1798 1799 return false; 1800 } 1801 1802 static void 1803 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, 1804 struct amdgpu_mem_partition_info *mem_ranges) 1805 { 1806 int num_ranges = 0, ret, mem_groups; 1807 struct amdgpu_numa_info numa_info; 1808 int node_ids[MAX_MEM_RANGES]; 1809 int num_xcc, xcc_id; 1810 uint32_t xcc_mask; 1811 1812 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 1813 xcc_mask = (1U << num_xcc) - 1; 1814 mem_groups = hweight32(adev->aid_mask); 1815 1816 for_each_inst(xcc_id, xcc_mask) { 1817 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); 1818 if (ret) 1819 continue; 1820 1821 if (numa_info.nid == NUMA_NO_NODE) { 1822 mem_ranges[0].size = numa_info.size; 1823 mem_ranges[0].numa.node = numa_info.nid; 1824 num_ranges = 1; 1825 break; 1826 } 1827 1828 if (gmc_v9_0_is_node_present(node_ids, num_ranges, 1829 numa_info.nid)) 1830 continue; 1831 1832 node_ids[num_ranges] = numa_info.nid; 1833 mem_ranges[num_ranges].numa.node = numa_info.nid; 1834 mem_ranges[num_ranges].size = numa_info.size; 1835 ++num_ranges; 1836 } 1837 1838 adev->gmc.num_mem_partitions = num_ranges; 1839 1840 /* If there is only partition, don't use entire size */ 1841 if (adev->gmc.num_mem_partitions == 1) 1842 mem_ranges[0].size = 1843 (mem_ranges[0].size * (mem_groups - 1) / mem_groups); 1844 } 1845 1846 static void 1847 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, 1848 struct amdgpu_mem_partition_info *mem_ranges) 1849 { 1850 enum amdgpu_memory_partition mode; 1851 u32 start_addr = 0, size; 1852 int i; 1853 1854 mode = gmc_v9_0_query_memory_partition(adev); 1855 1856 switch (mode) { 1857 case UNKNOWN_MEMORY_PARTITION_MODE: 1858 case AMDGPU_NPS1_PARTITION_MODE: 1859 adev->gmc.num_mem_partitions = 1; 1860 break; 1861 case AMDGPU_NPS2_PARTITION_MODE: 1862 adev->gmc.num_mem_partitions = 2; 1863 break; 1864 case AMDGPU_NPS4_PARTITION_MODE: 1865 if (adev->flags & AMD_IS_APU) 1866 adev->gmc.num_mem_partitions = 3; 1867 else 1868 adev->gmc.num_mem_partitions = 4; 1869 break; 1870 default: 1871 adev->gmc.num_mem_partitions = 1; 1872 break; 1873 } 1874 1875 size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) / 1876 adev->gmc.num_mem_partitions; 1877 1878 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { 1879 mem_ranges[i].range.fpfn = start_addr; 1880 mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT); 1881 mem_ranges[i].range.lpfn = start_addr + size - 1; 1882 start_addr += size; 1883 } 1884 1885 /* Adjust the last one */ 1886 mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn = 1887 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; 1888 mem_ranges[adev->gmc.num_mem_partitions - 1].size = 1889 adev->gmc.real_vram_size - 1890 ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn 1891 << AMDGPU_GPU_PAGE_SHIFT); 1892 } 1893 1894 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) 1895 { 1896 bool valid; 1897 1898 adev->gmc.mem_partitions = kzalloc( 1899 MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info), 1900 GFP_KERNEL); 1901 1902 if (!adev->gmc.mem_partitions) 1903 return -ENOMEM; 1904 1905 /* TODO : Get the range from PSP/Discovery for dGPU */ 1906 if (adev->gmc.is_app_apu) 1907 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); 1908 else 1909 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); 1910 1911 if (amdgpu_sriov_vf(adev)) 1912 valid = true; 1913 else 1914 valid = gmc_v9_0_validate_partition_info(adev); 1915 if (!valid) { 1916 /* TODO: handle invalid case */ 1917 dev_WARN(adev->dev, 1918 "Mem ranges not matching with hardware config"); 1919 } 1920 1921 return 0; 1922 } 1923 1924 static int gmc_v9_0_sw_init(void *handle) 1925 { 1926 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; 1927 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1928 unsigned long inst_mask = adev->aid_mask; 1929 1930 adev->gfxhub.funcs->init(adev); 1931 1932 adev->mmhub.funcs->init(adev); 1933 1934 spin_lock_init(&adev->gmc.invalidate_lock); 1935 1936 r = amdgpu_atomfirmware_get_vram_info(adev, 1937 &vram_width, &vram_type, &vram_vendor); 1938 if (amdgpu_sriov_vf(adev)) 1939 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1940 * and DF related registers is not readable, seems hardcord is the 1941 * only way to set the correct vram_width 1942 */ 1943 adev->gmc.vram_width = 2048; 1944 else if (amdgpu_emu_mode != 1) 1945 adev->gmc.vram_width = vram_width; 1946 1947 if (!adev->gmc.vram_width) { 1948 int chansize, numchan; 1949 1950 /* hbm memory channel size */ 1951 if (adev->flags & AMD_IS_APU) 1952 chansize = 64; 1953 else 1954 chansize = 128; 1955 if (adev->df.funcs && 1956 adev->df.funcs->get_hbm_channel_number) { 1957 numchan = adev->df.funcs->get_hbm_channel_number(adev); 1958 adev->gmc.vram_width = numchan * chansize; 1959 } 1960 } 1961 1962 adev->gmc.vram_type = vram_type; 1963 adev->gmc.vram_vendor = vram_vendor; 1964 switch (adev->ip_versions[GC_HWIP][0]) { 1965 case IP_VERSION(9, 1, 0): 1966 case IP_VERSION(9, 2, 2): 1967 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 1968 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 1969 1970 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 1971 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1972 } else { 1973 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 1974 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 1975 adev->gmc.translate_further = 1976 adev->vm_manager.num_level > 1; 1977 } 1978 break; 1979 case IP_VERSION(9, 0, 1): 1980 case IP_VERSION(9, 2, 1): 1981 case IP_VERSION(9, 4, 0): 1982 case IP_VERSION(9, 3, 0): 1983 case IP_VERSION(9, 4, 2): 1984 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 1985 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 1986 1987 /* 1988 * To fulfill 4-level page support, 1989 * vm size is 256TB (48bit), maximum size of Vega10, 1990 * block size 512 (9bit) 1991 */ 1992 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ 1993 if (amdgpu_sriov_vf(adev)) 1994 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); 1995 else 1996 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1997 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 1998 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 1999 break; 2000 case IP_VERSION(9, 4, 1): 2001 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 2002 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 2003 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask); 2004 2005 /* Keep the vm size same with Vega20 */ 2006 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 2007 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 2008 break; 2009 case IP_VERSION(9, 4, 3): 2010 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), 2011 NUM_XCC(adev->gfx.xcc_mask)); 2012 2013 inst_mask <<= AMDGPU_MMHUB0(0); 2014 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); 2015 2016 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 2017 break; 2018 default: 2019 break; 2020 } 2021 2022 /* This interrupt is VMC page fault.*/ 2023 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, 2024 &adev->gmc.vm_fault); 2025 if (r) 2026 return r; 2027 2028 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { 2029 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 2030 &adev->gmc.vm_fault); 2031 if (r) 2032 return r; 2033 } 2034 2035 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, 2036 &adev->gmc.vm_fault); 2037 2038 if (r) 2039 return r; 2040 2041 if (!amdgpu_sriov_vf(adev) && 2042 !adev->gmc.xgmi.connected_to_cpu) { 2043 /* interrupt sent to DF. */ 2044 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 2045 &adev->gmc.ecc_irq); 2046 if (r) 2047 return r; 2048 } 2049 2050 /* Set the internal MC address mask 2051 * This is the max address of the GPU's 2052 * internal address space. 2053 */ 2054 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 2055 2056 dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; 2057 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); 2058 if (r) { 2059 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 2060 return r; 2061 } 2062 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); 2063 2064 r = gmc_v9_0_mc_init(adev); 2065 if (r) 2066 return r; 2067 2068 amdgpu_gmc_get_vbios_allocations(adev); 2069 2070 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { 2071 r = gmc_v9_0_init_mem_ranges(adev); 2072 if (r) 2073 return r; 2074 } 2075 2076 /* Memory manager */ 2077 r = amdgpu_bo_init(adev); 2078 if (r) 2079 return r; 2080 2081 r = gmc_v9_0_gart_init(adev); 2082 if (r) 2083 return r; 2084 2085 /* 2086 * number of VMs 2087 * VMID 0 is reserved for System 2088 * amdgpu graphics/compute will use VMIDs 1..n-1 2089 * amdkfd will use VMIDs n..15 2090 * 2091 * The first KFD VMID is 8 for GPUs with graphics, 3 for 2092 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs 2093 * for video processing. 2094 */ 2095 adev->vm_manager.first_kfd_vmid = 2096 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 2097 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 2098 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8; 2099 2100 amdgpu_vm_manager_init(adev); 2101 2102 gmc_v9_0_save_registers(adev); 2103 2104 r = amdgpu_gmc_ras_sw_init(adev); 2105 if (r) 2106 return r; 2107 2108 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 2109 amdgpu_gmc_sysfs_init(adev); 2110 2111 return 0; 2112 } 2113 2114 static int gmc_v9_0_sw_fini(void *handle) 2115 { 2116 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2117 2118 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 2119 amdgpu_gmc_sysfs_fini(adev); 2120 adev->gmc.num_mem_partitions = 0; 2121 kfree(adev->gmc.mem_partitions); 2122 2123 amdgpu_gmc_ras_fini(adev); 2124 amdgpu_gem_force_release(adev); 2125 amdgpu_vm_manager_fini(adev); 2126 if (!adev->gmc.real_vram_size) { 2127 dev_info(adev->dev, "Put GART in system memory for APU free\n"); 2128 amdgpu_gart_table_ram_free(adev); 2129 } else { 2130 amdgpu_gart_table_vram_free(adev); 2131 } 2132 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); 2133 amdgpu_bo_fini(adev); 2134 2135 return 0; 2136 } 2137 2138 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 2139 { 2140 2141 switch (adev->ip_versions[MMHUB_HWIP][0]) { 2142 case IP_VERSION(9, 0, 0): 2143 if (amdgpu_sriov_vf(adev)) 2144 break; 2145 fallthrough; 2146 case IP_VERSION(9, 4, 0): 2147 soc15_program_register_sequence(adev, 2148 golden_settings_mmhub_1_0_0, 2149 ARRAY_SIZE(golden_settings_mmhub_1_0_0)); 2150 soc15_program_register_sequence(adev, 2151 golden_settings_athub_1_0_0, 2152 ARRAY_SIZE(golden_settings_athub_1_0_0)); 2153 break; 2154 case IP_VERSION(9, 1, 0): 2155 case IP_VERSION(9, 2, 0): 2156 /* TODO for renoir */ 2157 soc15_program_register_sequence(adev, 2158 golden_settings_athub_1_0_0, 2159 ARRAY_SIZE(golden_settings_athub_1_0_0)); 2160 break; 2161 default: 2162 break; 2163 } 2164 } 2165 2166 /** 2167 * gmc_v9_0_restore_registers - restores regs 2168 * 2169 * @adev: amdgpu_device pointer 2170 * 2171 * This restores register values, saved at suspend. 2172 */ 2173 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) 2174 { 2175 if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 2176 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { 2177 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); 2178 WARN_ON(adev->gmc.sdpif_register != 2179 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); 2180 } 2181 } 2182 2183 /** 2184 * gmc_v9_0_gart_enable - gart enable 2185 * 2186 * @adev: amdgpu_device pointer 2187 */ 2188 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) 2189 { 2190 int r; 2191 2192 if (adev->gmc.xgmi.connected_to_cpu) 2193 amdgpu_gmc_init_pdb0(adev); 2194 2195 if (adev->gart.bo == NULL) { 2196 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 2197 return -EINVAL; 2198 } 2199 2200 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 2201 2202 if (!adev->in_s0ix) { 2203 r = adev->gfxhub.funcs->gart_enable(adev); 2204 if (r) 2205 return r; 2206 } 2207 2208 r = adev->mmhub.funcs->gart_enable(adev); 2209 if (r) 2210 return r; 2211 2212 DRM_INFO("PCIE GART of %uM enabled.\n", 2213 (unsigned)(adev->gmc.gart_size >> 20)); 2214 if (adev->gmc.pdb0_bo) 2215 DRM_INFO("PDB0 located at 0x%016llX\n", 2216 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); 2217 DRM_INFO("PTB located at 0x%016llX\n", 2218 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 2219 2220 return 0; 2221 } 2222 2223 static int gmc_v9_0_hw_init(void *handle) 2224 { 2225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2226 bool value; 2227 int i, r; 2228 2229 /* The sequence of these two function calls matters.*/ 2230 gmc_v9_0_init_golden_registers(adev); 2231 2232 if (adev->mode_info.num_crtc) { 2233 /* Lockout access through VGA aperture*/ 2234 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 2235 /* disable VGA render */ 2236 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 2237 } 2238 2239 if (adev->mmhub.funcs->update_power_gating) 2240 adev->mmhub.funcs->update_power_gating(adev, true); 2241 2242 adev->hdp.funcs->init_registers(adev); 2243 2244 /* After HDP is initialized, flush HDP.*/ 2245 adev->hdp.funcs->flush_hdp(adev, NULL); 2246 2247 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 2248 value = false; 2249 else 2250 value = true; 2251 2252 if (!amdgpu_sriov_vf(adev)) { 2253 if (!adev->in_s0ix) 2254 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 2255 adev->mmhub.funcs->set_fault_enable_default(adev, value); 2256 } 2257 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 2258 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) 2259 continue; 2260 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 2261 } 2262 2263 if (adev->umc.funcs && adev->umc.funcs->init_registers) 2264 adev->umc.funcs->init_registers(adev); 2265 2266 r = gmc_v9_0_gart_enable(adev); 2267 if (r) 2268 return r; 2269 2270 if (amdgpu_emu_mode == 1) 2271 return amdgpu_gmc_vram_checking(adev); 2272 else 2273 return r; 2274 } 2275 2276 /** 2277 * gmc_v9_0_gart_disable - gart disable 2278 * 2279 * @adev: amdgpu_device pointer 2280 * 2281 * This disables all VM page table. 2282 */ 2283 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 2284 { 2285 if (!adev->in_s0ix) 2286 adev->gfxhub.funcs->gart_disable(adev); 2287 adev->mmhub.funcs->gart_disable(adev); 2288 } 2289 2290 static int gmc_v9_0_hw_fini(void *handle) 2291 { 2292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2293 2294 gmc_v9_0_gart_disable(adev); 2295 2296 if (amdgpu_sriov_vf(adev)) { 2297 /* full access mode, so don't touch any GMC register */ 2298 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 2299 return 0; 2300 } 2301 2302 /* 2303 * Pair the operations did in gmc_v9_0_hw_init and thus maintain 2304 * a correct cached state for GMC. Otherwise, the "gate" again 2305 * operation on S3 resuming will fail due to wrong cached state. 2306 */ 2307 if (adev->mmhub.funcs->update_power_gating) 2308 adev->mmhub.funcs->update_power_gating(adev, false); 2309 2310 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 2311 2312 return 0; 2313 } 2314 2315 static int gmc_v9_0_suspend(void *handle) 2316 { 2317 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2318 2319 return gmc_v9_0_hw_fini(adev); 2320 } 2321 2322 static int gmc_v9_0_resume(void *handle) 2323 { 2324 int r; 2325 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2326 2327 r = gmc_v9_0_hw_init(adev); 2328 if (r) 2329 return r; 2330 2331 amdgpu_vmid_reset_all(adev); 2332 2333 return 0; 2334 } 2335 2336 static bool gmc_v9_0_is_idle(void *handle) 2337 { 2338 /* MC is always ready in GMC v9.*/ 2339 return true; 2340 } 2341 2342 static int gmc_v9_0_wait_for_idle(void *handle) 2343 { 2344 /* There is no need to wait for MC idle in GMC v9.*/ 2345 return 0; 2346 } 2347 2348 static int gmc_v9_0_soft_reset(void *handle) 2349 { 2350 /* XXX for emulation.*/ 2351 return 0; 2352 } 2353 2354 static int gmc_v9_0_set_clockgating_state(void *handle, 2355 enum amd_clockgating_state state) 2356 { 2357 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2358 2359 adev->mmhub.funcs->set_clockgating(adev, state); 2360 2361 athub_v1_0_set_clockgating(adev, state); 2362 2363 return 0; 2364 } 2365 2366 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags) 2367 { 2368 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2369 2370 adev->mmhub.funcs->get_clockgating(adev, flags); 2371 2372 athub_v1_0_get_clockgating(adev, flags); 2373 } 2374 2375 static int gmc_v9_0_set_powergating_state(void *handle, 2376 enum amd_powergating_state state) 2377 { 2378 return 0; 2379 } 2380 2381 const struct amd_ip_funcs gmc_v9_0_ip_funcs = { 2382 .name = "gmc_v9_0", 2383 .early_init = gmc_v9_0_early_init, 2384 .late_init = gmc_v9_0_late_init, 2385 .sw_init = gmc_v9_0_sw_init, 2386 .sw_fini = gmc_v9_0_sw_fini, 2387 .hw_init = gmc_v9_0_hw_init, 2388 .hw_fini = gmc_v9_0_hw_fini, 2389 .suspend = gmc_v9_0_suspend, 2390 .resume = gmc_v9_0_resume, 2391 .is_idle = gmc_v9_0_is_idle, 2392 .wait_for_idle = gmc_v9_0_wait_for_idle, 2393 .soft_reset = gmc_v9_0_soft_reset, 2394 .set_clockgating_state = gmc_v9_0_set_clockgating_state, 2395 .set_powergating_state = gmc_v9_0_set_powergating_state, 2396 .get_clockgating_state = gmc_v9_0_get_clockgating_state, 2397 }; 2398 2399 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = 2400 { 2401 .type = AMD_IP_BLOCK_TYPE_GMC, 2402 .major = 9, 2403 .minor = 0, 2404 .rev = 0, 2405 .funcs = &gmc_v9_0_ip_funcs, 2406 }; 2407