1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/pci.h> 26 27 #include <drm/drm_cache.h> 28 29 #include "amdgpu.h" 30 #include "gmc_v9_0.h" 31 #include "amdgpu_atomfirmware.h" 32 #include "amdgpu_gem.h" 33 34 #include "hdp/hdp_4_0_offset.h" 35 #include "hdp/hdp_4_0_sh_mask.h" 36 #include "gc/gc_9_0_sh_mask.h" 37 #include "dce/dce_12_0_offset.h" 38 #include "dce/dce_12_0_sh_mask.h" 39 #include "vega10_enum.h" 40 #include "mmhub/mmhub_1_0_offset.h" 41 #include "athub/athub_1_0_offset.h" 42 #include "oss/osssys_4_0_offset.h" 43 44 #include "soc15.h" 45 #include "soc15_common.h" 46 #include "umc/umc_6_0_sh_mask.h" 47 48 #include "gfxhub_v1_0.h" 49 #include "mmhub_v1_0.h" 50 #include "athub_v1_0.h" 51 #include "gfxhub_v1_1.h" 52 #include "mmhub_v9_4.h" 53 #include "umc_v6_1.h" 54 #include "umc_v6_0.h" 55 56 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 57 58 #include "amdgpu_ras.h" 59 #include "amdgpu_xgmi.h" 60 61 /* add these here since we already include dce12 headers and these are for DCN */ 62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL 67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L 68 69 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ 70 #define AMDGPU_NUM_OF_VMIDS 8 71 72 static const u32 golden_settings_vega10_hdp[] = 73 { 74 0xf64, 0x0fffffff, 0x00000000, 75 0xf65, 0x0fffffff, 0x00000000, 76 0xf66, 0x0fffffff, 0x00000000, 77 0xf67, 0x0fffffff, 0x00000000, 78 0xf68, 0x0fffffff, 0x00000000, 79 0xf6a, 0x0fffffff, 0x00000000, 80 0xf6b, 0x0fffffff, 0x00000000, 81 0xf6c, 0x0fffffff, 0x00000000, 82 0xf6d, 0x0fffffff, 0x00000000, 83 0xf6e, 0x0fffffff, 0x00000000, 84 }; 85 86 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = 87 { 88 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), 89 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) 90 }; 91 92 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = 93 { 94 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), 95 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) 96 }; 97 98 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = { 99 (0x000143c0 + 0x00000000), 100 (0x000143c0 + 0x00000800), 101 (0x000143c0 + 0x00001000), 102 (0x000143c0 + 0x00001800), 103 (0x000543c0 + 0x00000000), 104 (0x000543c0 + 0x00000800), 105 (0x000543c0 + 0x00001000), 106 (0x000543c0 + 0x00001800), 107 (0x000943c0 + 0x00000000), 108 (0x000943c0 + 0x00000800), 109 (0x000943c0 + 0x00001000), 110 (0x000943c0 + 0x00001800), 111 (0x000d43c0 + 0x00000000), 112 (0x000d43c0 + 0x00000800), 113 (0x000d43c0 + 0x00001000), 114 (0x000d43c0 + 0x00001800), 115 (0x001143c0 + 0x00000000), 116 (0x001143c0 + 0x00000800), 117 (0x001143c0 + 0x00001000), 118 (0x001143c0 + 0x00001800), 119 (0x001543c0 + 0x00000000), 120 (0x001543c0 + 0x00000800), 121 (0x001543c0 + 0x00001000), 122 (0x001543c0 + 0x00001800), 123 (0x001943c0 + 0x00000000), 124 (0x001943c0 + 0x00000800), 125 (0x001943c0 + 0x00001000), 126 (0x001943c0 + 0x00001800), 127 (0x001d43c0 + 0x00000000), 128 (0x001d43c0 + 0x00000800), 129 (0x001d43c0 + 0x00001000), 130 (0x001d43c0 + 0x00001800), 131 }; 132 133 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { 134 (0x000143e0 + 0x00000000), 135 (0x000143e0 + 0x00000800), 136 (0x000143e0 + 0x00001000), 137 (0x000143e0 + 0x00001800), 138 (0x000543e0 + 0x00000000), 139 (0x000543e0 + 0x00000800), 140 (0x000543e0 + 0x00001000), 141 (0x000543e0 + 0x00001800), 142 (0x000943e0 + 0x00000000), 143 (0x000943e0 + 0x00000800), 144 (0x000943e0 + 0x00001000), 145 (0x000943e0 + 0x00001800), 146 (0x000d43e0 + 0x00000000), 147 (0x000d43e0 + 0x00000800), 148 (0x000d43e0 + 0x00001000), 149 (0x000d43e0 + 0x00001800), 150 (0x001143e0 + 0x00000000), 151 (0x001143e0 + 0x00000800), 152 (0x001143e0 + 0x00001000), 153 (0x001143e0 + 0x00001800), 154 (0x001543e0 + 0x00000000), 155 (0x001543e0 + 0x00000800), 156 (0x001543e0 + 0x00001000), 157 (0x001543e0 + 0x00001800), 158 (0x001943e0 + 0x00000000), 159 (0x001943e0 + 0x00000800), 160 (0x001943e0 + 0x00001000), 161 (0x001943e0 + 0x00001800), 162 (0x001d43e0 + 0x00000000), 163 (0x001d43e0 + 0x00000800), 164 (0x001d43e0 + 0x00001000), 165 (0x001d43e0 + 0x00001800), 166 }; 167 168 static const uint32_t ecc_umc_mcumc_status_addrs[] = { 169 (0x000143c2 + 0x00000000), 170 (0x000143c2 + 0x00000800), 171 (0x000143c2 + 0x00001000), 172 (0x000143c2 + 0x00001800), 173 (0x000543c2 + 0x00000000), 174 (0x000543c2 + 0x00000800), 175 (0x000543c2 + 0x00001000), 176 (0x000543c2 + 0x00001800), 177 (0x000943c2 + 0x00000000), 178 (0x000943c2 + 0x00000800), 179 (0x000943c2 + 0x00001000), 180 (0x000943c2 + 0x00001800), 181 (0x000d43c2 + 0x00000000), 182 (0x000d43c2 + 0x00000800), 183 (0x000d43c2 + 0x00001000), 184 (0x000d43c2 + 0x00001800), 185 (0x001143c2 + 0x00000000), 186 (0x001143c2 + 0x00000800), 187 (0x001143c2 + 0x00001000), 188 (0x001143c2 + 0x00001800), 189 (0x001543c2 + 0x00000000), 190 (0x001543c2 + 0x00000800), 191 (0x001543c2 + 0x00001000), 192 (0x001543c2 + 0x00001800), 193 (0x001943c2 + 0x00000000), 194 (0x001943c2 + 0x00000800), 195 (0x001943c2 + 0x00001000), 196 (0x001943c2 + 0x00001800), 197 (0x001d43c2 + 0x00000000), 198 (0x001d43c2 + 0x00000800), 199 (0x001d43c2 + 0x00001000), 200 (0x001d43c2 + 0x00001800), 201 }; 202 203 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, 204 struct amdgpu_irq_src *src, 205 unsigned type, 206 enum amdgpu_interrupt_state state) 207 { 208 u32 bits, i, tmp, reg; 209 210 /* Devices newer then VEGA10/12 shall have these programming 211 sequences performed by PSP BL */ 212 if (adev->asic_type >= CHIP_VEGA20) 213 return 0; 214 215 bits = 0x7f; 216 217 switch (state) { 218 case AMDGPU_IRQ_STATE_DISABLE: 219 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 220 reg = ecc_umc_mcumc_ctrl_addrs[i]; 221 tmp = RREG32(reg); 222 tmp &= ~bits; 223 WREG32(reg, tmp); 224 } 225 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 226 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 227 tmp = RREG32(reg); 228 tmp &= ~bits; 229 WREG32(reg, tmp); 230 } 231 break; 232 case AMDGPU_IRQ_STATE_ENABLE: 233 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 234 reg = ecc_umc_mcumc_ctrl_addrs[i]; 235 tmp = RREG32(reg); 236 tmp |= bits; 237 WREG32(reg, tmp); 238 } 239 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 240 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 241 tmp = RREG32(reg); 242 tmp |= bits; 243 WREG32(reg, tmp); 244 } 245 break; 246 default: 247 break; 248 } 249 250 return 0; 251 } 252 253 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 254 struct amdgpu_irq_src *src, 255 unsigned type, 256 enum amdgpu_interrupt_state state) 257 { 258 struct amdgpu_vmhub *hub; 259 u32 tmp, reg, bits, i, j; 260 261 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 262 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 263 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 264 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 265 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 266 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 267 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 268 269 switch (state) { 270 case AMDGPU_IRQ_STATE_DISABLE: 271 for (j = 0; j < adev->num_vmhubs; j++) { 272 hub = &adev->vmhub[j]; 273 for (i = 0; i < 16; i++) { 274 reg = hub->vm_context0_cntl + i; 275 tmp = RREG32(reg); 276 tmp &= ~bits; 277 WREG32(reg, tmp); 278 } 279 } 280 break; 281 case AMDGPU_IRQ_STATE_ENABLE: 282 for (j = 0; j < adev->num_vmhubs; j++) { 283 hub = &adev->vmhub[j]; 284 for (i = 0; i < 16; i++) { 285 reg = hub->vm_context0_cntl + i; 286 tmp = RREG32(reg); 287 tmp |= bits; 288 WREG32(reg, tmp); 289 } 290 } 291 default: 292 break; 293 } 294 295 return 0; 296 } 297 298 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, 299 struct amdgpu_irq_src *source, 300 struct amdgpu_iv_entry *entry) 301 { 302 struct amdgpu_vmhub *hub; 303 bool retry_fault = !!(entry->src_data[1] & 0x80); 304 uint32_t status = 0; 305 u64 addr; 306 char hub_name[10]; 307 308 addr = (u64)entry->src_data[0] << 12; 309 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 310 311 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid, 312 entry->timestamp)) 313 return 1; /* This also prevents sending it to KFD */ 314 315 if (entry->client_id == SOC15_IH_CLIENTID_VMC) { 316 snprintf(hub_name, sizeof(hub_name), "mmhub0"); 317 hub = &adev->vmhub[AMDGPU_MMHUB_0]; 318 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { 319 snprintf(hub_name, sizeof(hub_name), "mmhub1"); 320 hub = &adev->vmhub[AMDGPU_MMHUB_1]; 321 } else { 322 snprintf(hub_name, sizeof(hub_name), "gfxhub0"); 323 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 324 } 325 326 /* If it's the first fault for this address, process it normally */ 327 if (retry_fault && !in_interrupt() && 328 amdgpu_vm_handle_fault(adev, entry->pasid, addr)) 329 return 1; /* This also prevents sending it to KFD */ 330 331 if (!amdgpu_sriov_vf(adev)) { 332 /* 333 * Issue a dummy read to wait for the status register to 334 * be updated to avoid reading an incorrect value due to 335 * the new fast GRBM interface. 336 */ 337 if (entry->vmid_src == AMDGPU_GFXHUB_0) 338 RREG32(hub->vm_l2_pro_fault_status); 339 340 status = RREG32(hub->vm_l2_pro_fault_status); 341 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 342 } 343 344 if (printk_ratelimit()) { 345 struct amdgpu_task_info task_info; 346 347 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 348 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 349 350 dev_err(adev->dev, 351 "[%s] %s page fault (src_id:%u ring:%u vmid:%u " 352 "pasid:%u, for process %s pid %d thread %s pid %d)\n", 353 hub_name, retry_fault ? "retry" : "no-retry", 354 entry->src_id, entry->ring_id, entry->vmid, 355 entry->pasid, task_info.process_name, task_info.tgid, 356 task_info.task_name, task_info.pid); 357 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 358 addr, entry->client_id); 359 if (!amdgpu_sriov_vf(adev)) { 360 dev_err(adev->dev, 361 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 362 status); 363 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 364 REG_GET_FIELD(status, 365 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 366 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 367 REG_GET_FIELD(status, 368 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 369 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 370 REG_GET_FIELD(status, 371 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 372 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 373 REG_GET_FIELD(status, 374 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 375 dev_err(adev->dev, "\t RW: 0x%lx\n", 376 REG_GET_FIELD(status, 377 VM_L2_PROTECTION_FAULT_STATUS, RW)); 378 379 } 380 } 381 382 return 0; 383 } 384 385 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { 386 .set = gmc_v9_0_vm_fault_interrupt_state, 387 .process = gmc_v9_0_process_interrupt, 388 }; 389 390 391 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { 392 .set = gmc_v9_0_ecc_interrupt_state, 393 .process = amdgpu_umc_process_ecc_irq, 394 }; 395 396 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 397 { 398 adev->gmc.vm_fault.num_types = 1; 399 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 400 401 if (!amdgpu_sriov_vf(adev)) { 402 adev->gmc.ecc_irq.num_types = 1; 403 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 404 } 405 } 406 407 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, 408 uint32_t flush_type) 409 { 410 u32 req = 0; 411 412 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 413 PER_VMID_INVALIDATE_REQ, 1 << vmid); 414 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 415 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 416 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 417 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 418 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 419 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 420 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 421 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 422 423 return req; 424 } 425 426 /** 427 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore 428 * 429 * @adev: amdgpu_device pointer 430 * @vmhub: vmhub type 431 * 432 */ 433 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, 434 uint32_t vmhub) 435 { 436 return ((vmhub == AMDGPU_MMHUB_0 || 437 vmhub == AMDGPU_MMHUB_1) && 438 (!amdgpu_sriov_vf(adev)) && 439 (!(adev->asic_type == CHIP_RAVEN && 440 adev->rev_id < 0x8 && 441 adev->pdev->device == 0x15d8))); 442 } 443 444 /* 445 * GART 446 * VMID 0 is the physical GPU addresses as used by the kernel. 447 * VMIDs 1-15 are used for userspace clients and are handled 448 * by the amdgpu vm/hsa code. 449 */ 450 451 /** 452 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type 453 * 454 * @adev: amdgpu_device pointer 455 * @vmid: vm instance to flush 456 * @flush_type: the flush type 457 * 458 * Flush the TLB for the requested page table using certain type. 459 */ 460 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 461 uint32_t vmhub, uint32_t flush_type) 462 { 463 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); 464 const unsigned eng = 17; 465 u32 j, tmp; 466 struct amdgpu_vmhub *hub; 467 468 BUG_ON(vmhub >= adev->num_vmhubs); 469 470 hub = &adev->vmhub[vmhub]; 471 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); 472 473 /* This is necessary for a HW workaround under SRIOV as well 474 * as GFXOFF under bare metal 475 */ 476 if (adev->gfx.kiq.ring.sched.ready && 477 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && 478 !adev->in_gpu_reset) { 479 uint32_t req = hub->vm_inv_eng0_req + eng; 480 uint32_t ack = hub->vm_inv_eng0_ack + eng; 481 482 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, 483 1 << vmid); 484 return; 485 } 486 487 spin_lock(&adev->gmc.invalidate_lock); 488 489 /* 490 * It may lose gpuvm invalidate acknowldege state across power-gating 491 * off cycle, add semaphore acquire before invalidation and semaphore 492 * release after invalidation to avoid entering power gated state 493 * to WA the Issue 494 */ 495 496 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 497 if (use_semaphore) { 498 for (j = 0; j < adev->usec_timeout; j++) { 499 /* a read return value of 1 means semaphore acuqire */ 500 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); 501 if (tmp & 0x1) 502 break; 503 udelay(1); 504 } 505 506 if (j >= adev->usec_timeout) 507 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 508 } 509 510 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 511 512 /* 513 * Issue a dummy read to wait for the ACK register to be cleared 514 * to avoid a false ACK due to the new fast GRBM interface. 515 */ 516 if (vmhub == AMDGPU_GFXHUB_0) 517 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); 518 519 for (j = 0; j < adev->usec_timeout; j++) { 520 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); 521 if (tmp & (1 << vmid)) 522 break; 523 udelay(1); 524 } 525 526 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 527 if (use_semaphore) 528 /* 529 * add semaphore release after invalidation, 530 * write with 0 means semaphore release 531 */ 532 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); 533 534 spin_unlock(&adev->gmc.invalidate_lock); 535 536 if (j < adev->usec_timeout) 537 return; 538 539 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 540 } 541 542 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 543 unsigned vmid, uint64_t pd_addr) 544 { 545 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); 546 struct amdgpu_device *adev = ring->adev; 547 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; 548 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); 549 unsigned eng = ring->vm_inv_eng; 550 551 /* 552 * It may lose gpuvm invalidate acknowldege state across power-gating 553 * off cycle, add semaphore acquire before invalidation and semaphore 554 * release after invalidation to avoid entering power gated state 555 * to WA the Issue 556 */ 557 558 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 559 if (use_semaphore) 560 /* a read return value of 1 means semaphore acuqire */ 561 amdgpu_ring_emit_reg_wait(ring, 562 hub->vm_inv_eng0_sem + eng, 0x1, 0x1); 563 564 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), 565 lower_32_bits(pd_addr)); 566 567 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 568 upper_32_bits(pd_addr)); 569 570 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, 571 hub->vm_inv_eng0_ack + eng, 572 req, 1 << vmid); 573 574 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 575 if (use_semaphore) 576 /* 577 * add semaphore release after invalidation, 578 * write with 0 means semaphore release 579 */ 580 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); 581 582 return pd_addr; 583 } 584 585 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 586 unsigned pasid) 587 { 588 struct amdgpu_device *adev = ring->adev; 589 uint32_t reg; 590 591 /* Do nothing because there's no lut register for mmhub1. */ 592 if (ring->funcs->vmhub == AMDGPU_MMHUB_1) 593 return; 594 595 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) 596 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 597 else 598 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 599 600 amdgpu_ring_emit_wreg(ring, reg, pasid); 601 } 602 603 /* 604 * PTE format on VEGA 10: 605 * 63:59 reserved 606 * 58:57 mtype 607 * 56 F 608 * 55 L 609 * 54 P 610 * 53 SW 611 * 52 T 612 * 50:48 reserved 613 * 47:12 4k physical page base address 614 * 11:7 fragment 615 * 6 write 616 * 5 read 617 * 4 exe 618 * 3 Z 619 * 2 snooped 620 * 1 system 621 * 0 valid 622 * 623 * PDE format on VEGA 10: 624 * 63:59 block fragment size 625 * 58:55 reserved 626 * 54 P 627 * 53:48 reserved 628 * 47:6 physical base address of PD or PTE 629 * 5:3 reserved 630 * 2 C 631 * 1 system 632 * 0 valid 633 */ 634 635 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 636 637 { 638 switch (flags) { 639 case AMDGPU_VM_MTYPE_DEFAULT: 640 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 641 case AMDGPU_VM_MTYPE_NC: 642 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 643 case AMDGPU_VM_MTYPE_WC: 644 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 645 case AMDGPU_VM_MTYPE_RW: 646 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); 647 case AMDGPU_VM_MTYPE_CC: 648 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 649 case AMDGPU_VM_MTYPE_UC: 650 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 651 default: 652 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 653 } 654 } 655 656 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, 657 uint64_t *addr, uint64_t *flags) 658 { 659 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 660 *addr = adev->vm_manager.vram_base_offset + *addr - 661 adev->gmc.vram_start; 662 BUG_ON(*addr & 0xFFFF00000000003FULL); 663 664 if (!adev->gmc.translate_further) 665 return; 666 667 if (level == AMDGPU_VM_PDB1) { 668 /* Set the block fragment size */ 669 if (!(*flags & AMDGPU_PDE_PTE)) 670 *flags |= AMDGPU_PDE_BFS(0x9); 671 672 } else if (level == AMDGPU_VM_PDB0) { 673 if (*flags & AMDGPU_PDE_PTE) 674 *flags &= ~AMDGPU_PDE_PTE; 675 else 676 *flags |= AMDGPU_PTE_TF; 677 } 678 } 679 680 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, 681 struct amdgpu_bo_va_mapping *mapping, 682 uint64_t *flags) 683 { 684 *flags &= ~AMDGPU_PTE_EXECUTABLE; 685 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 686 687 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 688 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; 689 690 if (mapping->flags & AMDGPU_PTE_PRT) { 691 *flags |= AMDGPU_PTE_PRT; 692 *flags &= ~AMDGPU_PTE_VALID; 693 } 694 695 if (adev->asic_type == CHIP_ARCTURUS && 696 !(*flags & AMDGPU_PTE_SYSTEM) && 697 mapping->bo_va->is_xgmi) 698 *flags |= AMDGPU_PTE_SNOOPED; 699 } 700 701 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 702 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 703 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 704 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 705 .map_mtype = gmc_v9_0_map_mtype, 706 .get_vm_pde = gmc_v9_0_get_vm_pde, 707 .get_vm_pte = gmc_v9_0_get_vm_pte 708 }; 709 710 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) 711 { 712 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; 713 } 714 715 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 716 { 717 switch (adev->asic_type) { 718 case CHIP_VEGA10: 719 adev->umc.funcs = &umc_v6_0_funcs; 720 break; 721 case CHIP_VEGA20: 722 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 723 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 724 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 725 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; 726 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 727 adev->umc.funcs = &umc_v6_1_funcs; 728 break; 729 case CHIP_ARCTURUS: 730 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 731 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 732 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 733 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; 734 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 735 adev->umc.funcs = &umc_v6_1_funcs; 736 break; 737 default: 738 break; 739 } 740 } 741 742 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 743 { 744 switch (adev->asic_type) { 745 case CHIP_VEGA20: 746 adev->mmhub.funcs = &mmhub_v1_0_funcs; 747 break; 748 case CHIP_ARCTURUS: 749 adev->mmhub.funcs = &mmhub_v9_4_funcs; 750 break; 751 default: 752 break; 753 } 754 } 755 756 static int gmc_v9_0_early_init(void *handle) 757 { 758 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 759 760 gmc_v9_0_set_gmc_funcs(adev); 761 gmc_v9_0_set_irq_funcs(adev); 762 gmc_v9_0_set_umc_funcs(adev); 763 gmc_v9_0_set_mmhub_funcs(adev); 764 765 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 766 adev->gmc.shared_aperture_end = 767 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 768 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 769 adev->gmc.private_aperture_end = 770 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 771 772 return 0; 773 } 774 775 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) 776 { 777 778 /* 779 * TODO: 780 * Currently there is a bug where some memory client outside 781 * of the driver writes to first 8M of VRAM on S3 resume, 782 * this overrides GART which by default gets placed in first 8M and 783 * causes VM_FAULTS once GTT is accessed. 784 * Keep the stolen memory reservation until the while this is not solved. 785 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init 786 */ 787 switch (adev->asic_type) { 788 case CHIP_VEGA10: 789 case CHIP_RAVEN: 790 case CHIP_ARCTURUS: 791 case CHIP_RENOIR: 792 return true; 793 case CHIP_VEGA12: 794 case CHIP_VEGA20: 795 default: 796 return false; 797 } 798 } 799 800 static int gmc_v9_0_late_init(void *handle) 801 { 802 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 803 int r; 804 805 if (!gmc_v9_0_keep_stolen_memory(adev)) 806 amdgpu_bo_late_init(adev); 807 808 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 809 if (r) 810 return r; 811 /* Check if ecc is available */ 812 if (!amdgpu_sriov_vf(adev)) { 813 switch (adev->asic_type) { 814 case CHIP_VEGA10: 815 case CHIP_VEGA20: 816 case CHIP_ARCTURUS: 817 r = amdgpu_atomfirmware_mem_ecc_supported(adev); 818 if (!r) { 819 DRM_INFO("ECC is not present.\n"); 820 if (adev->df.funcs->enable_ecc_force_par_wr_rmw) 821 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); 822 } else { 823 DRM_INFO("ECC is active.\n"); 824 } 825 826 r = amdgpu_atomfirmware_sram_ecc_supported(adev); 827 if (!r) { 828 DRM_INFO("SRAM ECC is not present.\n"); 829 } else { 830 DRM_INFO("SRAM ECC is active.\n"); 831 } 832 break; 833 default: 834 break; 835 } 836 } 837 838 r = amdgpu_gmc_ras_late_init(adev); 839 if (r) 840 return r; 841 842 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 843 } 844 845 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 846 struct amdgpu_gmc *mc) 847 { 848 u64 base = 0; 849 850 if (adev->asic_type == CHIP_ARCTURUS) 851 base = mmhub_v9_4_get_fb_location(adev); 852 else if (!amdgpu_sriov_vf(adev)) 853 base = mmhub_v1_0_get_fb_location(adev); 854 855 /* add the xgmi offset of the physical node */ 856 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 857 amdgpu_gmc_vram_location(adev, mc, base); 858 amdgpu_gmc_gart_location(adev, mc); 859 amdgpu_gmc_agp_location(adev, mc); 860 /* base offset of vram pages */ 861 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); 862 863 /* XXX: add the xgmi offset of the physical node? */ 864 adev->vm_manager.vram_base_offset += 865 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 866 } 867 868 /** 869 * gmc_v9_0_mc_init - initialize the memory controller driver params 870 * 871 * @adev: amdgpu_device pointer 872 * 873 * Look up the amount of vram, vram width, and decide how to place 874 * vram and gart within the GPU's physical address space. 875 * Returns 0 for success. 876 */ 877 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 878 { 879 int r; 880 881 /* size in MB on si */ 882 adev->gmc.mc_vram_size = 883 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 884 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 885 886 if (!(adev->flags & AMD_IS_APU)) { 887 r = amdgpu_device_resize_fb_bar(adev); 888 if (r) 889 return r; 890 } 891 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 892 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 893 894 #ifdef CONFIG_X86_64 895 if (adev->flags & AMD_IS_APU) { 896 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); 897 adev->gmc.aper_size = adev->gmc.real_vram_size; 898 } 899 #endif 900 /* In case the PCI BAR is larger than the actual amount of vram */ 901 adev->gmc.visible_vram_size = adev->gmc.aper_size; 902 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 903 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 904 905 /* set the gart size */ 906 if (amdgpu_gart_size == -1) { 907 switch (adev->asic_type) { 908 case CHIP_VEGA10: /* all engines support GPUVM */ 909 case CHIP_VEGA12: /* all engines support GPUVM */ 910 case CHIP_VEGA20: 911 case CHIP_ARCTURUS: 912 default: 913 adev->gmc.gart_size = 512ULL << 20; 914 break; 915 case CHIP_RAVEN: /* DCE SG support */ 916 case CHIP_RENOIR: 917 adev->gmc.gart_size = 1024ULL << 20; 918 break; 919 } 920 } else { 921 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 922 } 923 924 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); 925 926 return 0; 927 } 928 929 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) 930 { 931 int r; 932 933 if (adev->gart.bo) { 934 WARN(1, "VEGA10 PCIE GART already initialized\n"); 935 return 0; 936 } 937 /* Initialize common gart structure */ 938 r = amdgpu_gart_init(adev); 939 if (r) 940 return r; 941 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 942 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | 943 AMDGPU_PTE_EXECUTABLE; 944 return amdgpu_gart_table_vram_alloc(adev); 945 } 946 947 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) 948 { 949 u32 d1vga_control; 950 unsigned size; 951 952 /* 953 * TODO Remove once GART corruption is resolved 954 * Check related code in gmc_v9_0_sw_fini 955 * */ 956 if (gmc_v9_0_keep_stolen_memory(adev)) 957 return 9 * 1024 * 1024; 958 959 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 960 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 961 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ 962 } else { 963 u32 viewport; 964 965 switch (adev->asic_type) { 966 case CHIP_RAVEN: 967 case CHIP_RENOIR: 968 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 969 size = (REG_GET_FIELD(viewport, 970 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 971 REG_GET_FIELD(viewport, 972 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 973 4); 974 break; 975 case CHIP_VEGA10: 976 case CHIP_VEGA12: 977 case CHIP_VEGA20: 978 default: 979 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); 980 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 981 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * 982 4); 983 break; 984 } 985 } 986 /* return 0 if the pre-OS buffer uses up most of vram */ 987 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 988 return 0; 989 990 return size; 991 } 992 993 static int gmc_v9_0_sw_init(void *handle) 994 { 995 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 996 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 997 998 gfxhub_v1_0_init(adev); 999 if (adev->asic_type == CHIP_ARCTURUS) 1000 mmhub_v9_4_init(adev); 1001 else 1002 mmhub_v1_0_init(adev); 1003 1004 spin_lock_init(&adev->gmc.invalidate_lock); 1005 1006 r = amdgpu_atomfirmware_get_vram_info(adev, 1007 &vram_width, &vram_type, &vram_vendor); 1008 if (amdgpu_sriov_vf(adev)) 1009 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1010 * and DF related registers is not readable, seems hardcord is the 1011 * only way to set the correct vram_width 1012 */ 1013 adev->gmc.vram_width = 2048; 1014 else if (amdgpu_emu_mode != 1) 1015 adev->gmc.vram_width = vram_width; 1016 1017 if (!adev->gmc.vram_width) { 1018 int chansize, numchan; 1019 1020 /* hbm memory channel size */ 1021 if (adev->flags & AMD_IS_APU) 1022 chansize = 64; 1023 else 1024 chansize = 128; 1025 1026 numchan = adev->df.funcs->get_hbm_channel_number(adev); 1027 adev->gmc.vram_width = numchan * chansize; 1028 } 1029 1030 adev->gmc.vram_type = vram_type; 1031 adev->gmc.vram_vendor = vram_vendor; 1032 switch (adev->asic_type) { 1033 case CHIP_RAVEN: 1034 adev->num_vmhubs = 2; 1035 1036 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 1037 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1038 } else { 1039 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 1040 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 1041 adev->gmc.translate_further = 1042 adev->vm_manager.num_level > 1; 1043 } 1044 break; 1045 case CHIP_VEGA10: 1046 case CHIP_VEGA12: 1047 case CHIP_VEGA20: 1048 case CHIP_RENOIR: 1049 adev->num_vmhubs = 2; 1050 1051 1052 /* 1053 * To fulfill 4-level page support, 1054 * vm size is 256TB (48bit), maximum size of Vega10, 1055 * block size 512 (9bit) 1056 */ 1057 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ 1058 if (amdgpu_sriov_vf(adev)) 1059 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); 1060 else 1061 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1062 break; 1063 case CHIP_ARCTURUS: 1064 adev->num_vmhubs = 3; 1065 1066 /* Keep the vm size same with Vega20 */ 1067 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1068 break; 1069 default: 1070 break; 1071 } 1072 1073 /* This interrupt is VMC page fault.*/ 1074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, 1075 &adev->gmc.vm_fault); 1076 if (r) 1077 return r; 1078 1079 if (adev->asic_type == CHIP_ARCTURUS) { 1080 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 1081 &adev->gmc.vm_fault); 1082 if (r) 1083 return r; 1084 } 1085 1086 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, 1087 &adev->gmc.vm_fault); 1088 1089 if (r) 1090 return r; 1091 1092 if (!amdgpu_sriov_vf(adev)) { 1093 /* interrupt sent to DF. */ 1094 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1095 &adev->gmc.ecc_irq); 1096 if (r) 1097 return r; 1098 } 1099 1100 /* Set the internal MC address mask 1101 * This is the max address of the GPU's 1102 * internal address space. 1103 */ 1104 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 1105 1106 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 1107 if (r) { 1108 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 1109 return r; 1110 } 1111 adev->need_swiotlb = drm_need_swiotlb(44); 1112 1113 if (adev->gmc.xgmi.supported) { 1114 r = gfxhub_v1_1_get_xgmi_info(adev); 1115 if (r) 1116 return r; 1117 } 1118 1119 r = gmc_v9_0_mc_init(adev); 1120 if (r) 1121 return r; 1122 1123 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); 1124 1125 /* Memory manager */ 1126 r = amdgpu_bo_init(adev); 1127 if (r) 1128 return r; 1129 1130 r = gmc_v9_0_gart_init(adev); 1131 if (r) 1132 return r; 1133 1134 /* 1135 * number of VMs 1136 * VMID 0 is reserved for System 1137 * amdgpu graphics/compute will use VMIDs 1-7 1138 * amdkfd will use VMIDs 8-15 1139 */ 1140 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1141 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1142 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; 1143 1144 amdgpu_vm_manager_init(adev); 1145 1146 return 0; 1147 } 1148 1149 static int gmc_v9_0_sw_fini(void *handle) 1150 { 1151 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1152 void *stolen_vga_buf; 1153 1154 amdgpu_gmc_ras_fini(adev); 1155 amdgpu_gem_force_release(adev); 1156 amdgpu_vm_manager_fini(adev); 1157 1158 if (gmc_v9_0_keep_stolen_memory(adev)) 1159 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); 1160 1161 amdgpu_gart_table_vram_free(adev); 1162 amdgpu_bo_fini(adev); 1163 amdgpu_gart_fini(adev); 1164 1165 return 0; 1166 } 1167 1168 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 1169 { 1170 1171 switch (adev->asic_type) { 1172 case CHIP_VEGA10: 1173 if (amdgpu_sriov_vf(adev)) 1174 break; 1175 /* fall through */ 1176 case CHIP_VEGA20: 1177 soc15_program_register_sequence(adev, 1178 golden_settings_mmhub_1_0_0, 1179 ARRAY_SIZE(golden_settings_mmhub_1_0_0)); 1180 soc15_program_register_sequence(adev, 1181 golden_settings_athub_1_0_0, 1182 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1183 break; 1184 case CHIP_VEGA12: 1185 break; 1186 case CHIP_RAVEN: 1187 /* TODO for renoir */ 1188 soc15_program_register_sequence(adev, 1189 golden_settings_athub_1_0_0, 1190 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1191 break; 1192 default: 1193 break; 1194 } 1195 } 1196 1197 /** 1198 * gmc_v9_0_gart_enable - gart enable 1199 * 1200 * @adev: amdgpu_device pointer 1201 */ 1202 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) 1203 { 1204 int r; 1205 1206 if (adev->gart.bo == NULL) { 1207 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 1208 return -EINVAL; 1209 } 1210 r = amdgpu_gart_table_vram_pin(adev); 1211 if (r) 1212 return r; 1213 1214 r = gfxhub_v1_0_gart_enable(adev); 1215 if (r) 1216 return r; 1217 1218 if (adev->asic_type == CHIP_ARCTURUS) 1219 r = mmhub_v9_4_gart_enable(adev); 1220 else 1221 r = mmhub_v1_0_gart_enable(adev); 1222 if (r) 1223 return r; 1224 1225 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1226 (unsigned)(adev->gmc.gart_size >> 20), 1227 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 1228 adev->gart.ready = true; 1229 return 0; 1230 } 1231 1232 static int gmc_v9_0_hw_init(void *handle) 1233 { 1234 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1235 bool value; 1236 int r, i; 1237 u32 tmp; 1238 1239 /* The sequence of these two function calls matters.*/ 1240 gmc_v9_0_init_golden_registers(adev); 1241 1242 if (adev->mode_info.num_crtc) { 1243 if (adev->asic_type != CHIP_ARCTURUS) { 1244 /* Lockout access through VGA aperture*/ 1245 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 1246 1247 /* disable VGA render */ 1248 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 1249 } 1250 } 1251 1252 amdgpu_device_program_register_sequence(adev, 1253 golden_settings_vega10_hdp, 1254 ARRAY_SIZE(golden_settings_vega10_hdp)); 1255 1256 switch (adev->asic_type) { 1257 case CHIP_RAVEN: 1258 /* TODO for renoir */ 1259 mmhub_v1_0_update_power_gating(adev, true); 1260 break; 1261 case CHIP_ARCTURUS: 1262 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); 1263 break; 1264 default: 1265 break; 1266 } 1267 1268 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); 1269 1270 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); 1271 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 1272 1273 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); 1274 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); 1275 1276 /* After HDP is initialized, flush HDP.*/ 1277 adev->nbio.funcs->hdp_flush(adev, NULL); 1278 1279 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 1280 value = false; 1281 else 1282 value = true; 1283 1284 if (!amdgpu_sriov_vf(adev)) { 1285 gfxhub_v1_0_set_fault_enable_default(adev, value); 1286 if (adev->asic_type == CHIP_ARCTURUS) 1287 mmhub_v9_4_set_fault_enable_default(adev, value); 1288 else 1289 mmhub_v1_0_set_fault_enable_default(adev, value); 1290 } 1291 for (i = 0; i < adev->num_vmhubs; ++i) 1292 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 1293 1294 if (adev->umc.funcs && adev->umc.funcs->init_registers) 1295 adev->umc.funcs->init_registers(adev); 1296 1297 r = gmc_v9_0_gart_enable(adev); 1298 1299 return r; 1300 } 1301 1302 /** 1303 * gmc_v9_0_gart_disable - gart disable 1304 * 1305 * @adev: amdgpu_device pointer 1306 * 1307 * This disables all VM page table. 1308 */ 1309 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 1310 { 1311 gfxhub_v1_0_gart_disable(adev); 1312 if (adev->asic_type == CHIP_ARCTURUS) 1313 mmhub_v9_4_gart_disable(adev); 1314 else 1315 mmhub_v1_0_gart_disable(adev); 1316 amdgpu_gart_table_vram_unpin(adev); 1317 } 1318 1319 static int gmc_v9_0_hw_fini(void *handle) 1320 { 1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1322 1323 if (amdgpu_sriov_vf(adev)) { 1324 /* full access mode, so don't touch any GMC register */ 1325 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 1326 return 0; 1327 } 1328 1329 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1330 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1331 gmc_v9_0_gart_disable(adev); 1332 1333 return 0; 1334 } 1335 1336 static int gmc_v9_0_suspend(void *handle) 1337 { 1338 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1339 1340 return gmc_v9_0_hw_fini(adev); 1341 } 1342 1343 static int gmc_v9_0_resume(void *handle) 1344 { 1345 int r; 1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1347 1348 r = gmc_v9_0_hw_init(adev); 1349 if (r) 1350 return r; 1351 1352 amdgpu_vmid_reset_all(adev); 1353 1354 return 0; 1355 } 1356 1357 static bool gmc_v9_0_is_idle(void *handle) 1358 { 1359 /* MC is always ready in GMC v9.*/ 1360 return true; 1361 } 1362 1363 static int gmc_v9_0_wait_for_idle(void *handle) 1364 { 1365 /* There is no need to wait for MC idle in GMC v9.*/ 1366 return 0; 1367 } 1368 1369 static int gmc_v9_0_soft_reset(void *handle) 1370 { 1371 /* XXX for emulation.*/ 1372 return 0; 1373 } 1374 1375 static int gmc_v9_0_set_clockgating_state(void *handle, 1376 enum amd_clockgating_state state) 1377 { 1378 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1379 1380 if (adev->asic_type == CHIP_ARCTURUS) 1381 mmhub_v9_4_set_clockgating(adev, state); 1382 else 1383 mmhub_v1_0_set_clockgating(adev, state); 1384 1385 athub_v1_0_set_clockgating(adev, state); 1386 1387 return 0; 1388 } 1389 1390 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) 1391 { 1392 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1393 1394 if (adev->asic_type == CHIP_ARCTURUS) 1395 mmhub_v9_4_get_clockgating(adev, flags); 1396 else 1397 mmhub_v1_0_get_clockgating(adev, flags); 1398 1399 athub_v1_0_get_clockgating(adev, flags); 1400 } 1401 1402 static int gmc_v9_0_set_powergating_state(void *handle, 1403 enum amd_powergating_state state) 1404 { 1405 return 0; 1406 } 1407 1408 const struct amd_ip_funcs gmc_v9_0_ip_funcs = { 1409 .name = "gmc_v9_0", 1410 .early_init = gmc_v9_0_early_init, 1411 .late_init = gmc_v9_0_late_init, 1412 .sw_init = gmc_v9_0_sw_init, 1413 .sw_fini = gmc_v9_0_sw_fini, 1414 .hw_init = gmc_v9_0_hw_init, 1415 .hw_fini = gmc_v9_0_hw_fini, 1416 .suspend = gmc_v9_0_suspend, 1417 .resume = gmc_v9_0_resume, 1418 .is_idle = gmc_v9_0_is_idle, 1419 .wait_for_idle = gmc_v9_0_wait_for_idle, 1420 .soft_reset = gmc_v9_0_soft_reset, 1421 .set_clockgating_state = gmc_v9_0_set_clockgating_state, 1422 .set_powergating_state = gmc_v9_0_set_powergating_state, 1423 .get_clockgating_state = gmc_v9_0_get_clockgating_state, 1424 }; 1425 1426 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = 1427 { 1428 .type = AMD_IP_BLOCK_TYPE_GMC, 1429 .major = 9, 1430 .minor = 0, 1431 .rev = 0, 1432 .funcs = &gmc_v9_0_ip_funcs, 1433 }; 1434