1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/pci.h> 26 27 #include <drm/drm_cache.h> 28 29 #include "amdgpu.h" 30 #include "gmc_v9_0.h" 31 #include "amdgpu_atomfirmware.h" 32 #include "amdgpu_gem.h" 33 34 #include "hdp/hdp_4_0_offset.h" 35 #include "hdp/hdp_4_0_sh_mask.h" 36 #include "gc/gc_9_0_sh_mask.h" 37 #include "dce/dce_12_0_offset.h" 38 #include "dce/dce_12_0_sh_mask.h" 39 #include "vega10_enum.h" 40 #include "mmhub/mmhub_1_0_offset.h" 41 #include "athub/athub_1_0_offset.h" 42 #include "oss/osssys_4_0_offset.h" 43 44 #include "soc15.h" 45 #include "soc15_common.h" 46 #include "umc/umc_6_0_sh_mask.h" 47 48 #include "gfxhub_v1_0.h" 49 #include "mmhub_v1_0.h" 50 #include "athub_v1_0.h" 51 #include "gfxhub_v1_1.h" 52 #include "mmhub_v9_4.h" 53 #include "umc_v6_1.h" 54 #include "umc_v6_0.h" 55 56 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 57 58 #include "amdgpu_ras.h" 59 #include "amdgpu_xgmi.h" 60 61 /* add these here since we already include dce12 headers and these are for DCN */ 62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL 67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L 68 69 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ 70 #define AMDGPU_NUM_OF_VMIDS 8 71 72 static const u32 golden_settings_vega10_hdp[] = 73 { 74 0xf64, 0x0fffffff, 0x00000000, 75 0xf65, 0x0fffffff, 0x00000000, 76 0xf66, 0x0fffffff, 0x00000000, 77 0xf67, 0x0fffffff, 0x00000000, 78 0xf68, 0x0fffffff, 0x00000000, 79 0xf6a, 0x0fffffff, 0x00000000, 80 0xf6b, 0x0fffffff, 0x00000000, 81 0xf6c, 0x0fffffff, 0x00000000, 82 0xf6d, 0x0fffffff, 0x00000000, 83 0xf6e, 0x0fffffff, 0x00000000, 84 }; 85 86 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = 87 { 88 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), 89 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) 90 }; 91 92 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = 93 { 94 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), 95 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) 96 }; 97 98 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = { 99 (0x000143c0 + 0x00000000), 100 (0x000143c0 + 0x00000800), 101 (0x000143c0 + 0x00001000), 102 (0x000143c0 + 0x00001800), 103 (0x000543c0 + 0x00000000), 104 (0x000543c0 + 0x00000800), 105 (0x000543c0 + 0x00001000), 106 (0x000543c0 + 0x00001800), 107 (0x000943c0 + 0x00000000), 108 (0x000943c0 + 0x00000800), 109 (0x000943c0 + 0x00001000), 110 (0x000943c0 + 0x00001800), 111 (0x000d43c0 + 0x00000000), 112 (0x000d43c0 + 0x00000800), 113 (0x000d43c0 + 0x00001000), 114 (0x000d43c0 + 0x00001800), 115 (0x001143c0 + 0x00000000), 116 (0x001143c0 + 0x00000800), 117 (0x001143c0 + 0x00001000), 118 (0x001143c0 + 0x00001800), 119 (0x001543c0 + 0x00000000), 120 (0x001543c0 + 0x00000800), 121 (0x001543c0 + 0x00001000), 122 (0x001543c0 + 0x00001800), 123 (0x001943c0 + 0x00000000), 124 (0x001943c0 + 0x00000800), 125 (0x001943c0 + 0x00001000), 126 (0x001943c0 + 0x00001800), 127 (0x001d43c0 + 0x00000000), 128 (0x001d43c0 + 0x00000800), 129 (0x001d43c0 + 0x00001000), 130 (0x001d43c0 + 0x00001800), 131 }; 132 133 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { 134 (0x000143e0 + 0x00000000), 135 (0x000143e0 + 0x00000800), 136 (0x000143e0 + 0x00001000), 137 (0x000143e0 + 0x00001800), 138 (0x000543e0 + 0x00000000), 139 (0x000543e0 + 0x00000800), 140 (0x000543e0 + 0x00001000), 141 (0x000543e0 + 0x00001800), 142 (0x000943e0 + 0x00000000), 143 (0x000943e0 + 0x00000800), 144 (0x000943e0 + 0x00001000), 145 (0x000943e0 + 0x00001800), 146 (0x000d43e0 + 0x00000000), 147 (0x000d43e0 + 0x00000800), 148 (0x000d43e0 + 0x00001000), 149 (0x000d43e0 + 0x00001800), 150 (0x001143e0 + 0x00000000), 151 (0x001143e0 + 0x00000800), 152 (0x001143e0 + 0x00001000), 153 (0x001143e0 + 0x00001800), 154 (0x001543e0 + 0x00000000), 155 (0x001543e0 + 0x00000800), 156 (0x001543e0 + 0x00001000), 157 (0x001543e0 + 0x00001800), 158 (0x001943e0 + 0x00000000), 159 (0x001943e0 + 0x00000800), 160 (0x001943e0 + 0x00001000), 161 (0x001943e0 + 0x00001800), 162 (0x001d43e0 + 0x00000000), 163 (0x001d43e0 + 0x00000800), 164 (0x001d43e0 + 0x00001000), 165 (0x001d43e0 + 0x00001800), 166 }; 167 168 static const uint32_t ecc_umc_mcumc_status_addrs[] = { 169 (0x000143c2 + 0x00000000), 170 (0x000143c2 + 0x00000800), 171 (0x000143c2 + 0x00001000), 172 (0x000143c2 + 0x00001800), 173 (0x000543c2 + 0x00000000), 174 (0x000543c2 + 0x00000800), 175 (0x000543c2 + 0x00001000), 176 (0x000543c2 + 0x00001800), 177 (0x000943c2 + 0x00000000), 178 (0x000943c2 + 0x00000800), 179 (0x000943c2 + 0x00001000), 180 (0x000943c2 + 0x00001800), 181 (0x000d43c2 + 0x00000000), 182 (0x000d43c2 + 0x00000800), 183 (0x000d43c2 + 0x00001000), 184 (0x000d43c2 + 0x00001800), 185 (0x001143c2 + 0x00000000), 186 (0x001143c2 + 0x00000800), 187 (0x001143c2 + 0x00001000), 188 (0x001143c2 + 0x00001800), 189 (0x001543c2 + 0x00000000), 190 (0x001543c2 + 0x00000800), 191 (0x001543c2 + 0x00001000), 192 (0x001543c2 + 0x00001800), 193 (0x001943c2 + 0x00000000), 194 (0x001943c2 + 0x00000800), 195 (0x001943c2 + 0x00001000), 196 (0x001943c2 + 0x00001800), 197 (0x001d43c2 + 0x00000000), 198 (0x001d43c2 + 0x00000800), 199 (0x001d43c2 + 0x00001000), 200 (0x001d43c2 + 0x00001800), 201 }; 202 203 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, 204 struct amdgpu_irq_src *src, 205 unsigned type, 206 enum amdgpu_interrupt_state state) 207 { 208 u32 bits, i, tmp, reg; 209 210 bits = 0x7f; 211 212 switch (state) { 213 case AMDGPU_IRQ_STATE_DISABLE: 214 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 215 reg = ecc_umc_mcumc_ctrl_addrs[i]; 216 tmp = RREG32(reg); 217 tmp &= ~bits; 218 WREG32(reg, tmp); 219 } 220 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 221 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 222 tmp = RREG32(reg); 223 tmp &= ~bits; 224 WREG32(reg, tmp); 225 } 226 break; 227 case AMDGPU_IRQ_STATE_ENABLE: 228 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 229 reg = ecc_umc_mcumc_ctrl_addrs[i]; 230 tmp = RREG32(reg); 231 tmp |= bits; 232 WREG32(reg, tmp); 233 } 234 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 235 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 236 tmp = RREG32(reg); 237 tmp |= bits; 238 WREG32(reg, tmp); 239 } 240 break; 241 default: 242 break; 243 } 244 245 return 0; 246 } 247 248 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 249 struct amdgpu_irq_src *src, 250 unsigned type, 251 enum amdgpu_interrupt_state state) 252 { 253 struct amdgpu_vmhub *hub; 254 u32 tmp, reg, bits, i, j; 255 256 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 257 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 258 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 259 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 260 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 261 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 262 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 263 264 switch (state) { 265 case AMDGPU_IRQ_STATE_DISABLE: 266 for (j = 0; j < adev->num_vmhubs; j++) { 267 hub = &adev->vmhub[j]; 268 for (i = 0; i < 16; i++) { 269 reg = hub->vm_context0_cntl + i; 270 tmp = RREG32(reg); 271 tmp &= ~bits; 272 WREG32(reg, tmp); 273 } 274 } 275 break; 276 case AMDGPU_IRQ_STATE_ENABLE: 277 for (j = 0; j < adev->num_vmhubs; j++) { 278 hub = &adev->vmhub[j]; 279 for (i = 0; i < 16; i++) { 280 reg = hub->vm_context0_cntl + i; 281 tmp = RREG32(reg); 282 tmp |= bits; 283 WREG32(reg, tmp); 284 } 285 } 286 default: 287 break; 288 } 289 290 return 0; 291 } 292 293 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, 294 struct amdgpu_irq_src *source, 295 struct amdgpu_iv_entry *entry) 296 { 297 struct amdgpu_vmhub *hub; 298 bool retry_fault = !!(entry->src_data[1] & 0x80); 299 uint32_t status = 0; 300 u64 addr; 301 char hub_name[10]; 302 303 addr = (u64)entry->src_data[0] << 12; 304 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 305 306 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid, 307 entry->timestamp)) 308 return 1; /* This also prevents sending it to KFD */ 309 310 if (entry->client_id == SOC15_IH_CLIENTID_VMC) { 311 snprintf(hub_name, sizeof(hub_name), "mmhub0"); 312 hub = &adev->vmhub[AMDGPU_MMHUB_0]; 313 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { 314 snprintf(hub_name, sizeof(hub_name), "mmhub1"); 315 hub = &adev->vmhub[AMDGPU_MMHUB_1]; 316 } else { 317 snprintf(hub_name, sizeof(hub_name), "gfxhub0"); 318 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 319 } 320 321 /* If it's the first fault for this address, process it normally */ 322 if (retry_fault && !in_interrupt() && 323 amdgpu_vm_handle_fault(adev, entry->pasid, addr)) 324 return 1; /* This also prevents sending it to KFD */ 325 326 if (!amdgpu_sriov_vf(adev)) { 327 /* 328 * Issue a dummy read to wait for the status register to 329 * be updated to avoid reading an incorrect value due to 330 * the new fast GRBM interface. 331 */ 332 if (entry->vmid_src == AMDGPU_GFXHUB_0) 333 RREG32(hub->vm_l2_pro_fault_status); 334 335 status = RREG32(hub->vm_l2_pro_fault_status); 336 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 337 } 338 339 if (printk_ratelimit()) { 340 struct amdgpu_task_info task_info; 341 342 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 343 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 344 345 dev_err(adev->dev, 346 "[%s] %s page fault (src_id:%u ring:%u vmid:%u " 347 "pasid:%u, for process %s pid %d thread %s pid %d)\n", 348 hub_name, retry_fault ? "retry" : "no-retry", 349 entry->src_id, entry->ring_id, entry->vmid, 350 entry->pasid, task_info.process_name, task_info.tgid, 351 task_info.task_name, task_info.pid); 352 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 353 addr, entry->client_id); 354 if (!amdgpu_sriov_vf(adev)) { 355 dev_err(adev->dev, 356 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 357 status); 358 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 359 REG_GET_FIELD(status, 360 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 361 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 362 REG_GET_FIELD(status, 363 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 364 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 365 REG_GET_FIELD(status, 366 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 367 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 368 REG_GET_FIELD(status, 369 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 370 dev_err(adev->dev, "\t RW: 0x%lx\n", 371 REG_GET_FIELD(status, 372 VM_L2_PROTECTION_FAULT_STATUS, RW)); 373 374 } 375 } 376 377 return 0; 378 } 379 380 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { 381 .set = gmc_v9_0_vm_fault_interrupt_state, 382 .process = gmc_v9_0_process_interrupt, 383 }; 384 385 386 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { 387 .set = gmc_v9_0_ecc_interrupt_state, 388 .process = amdgpu_umc_process_ecc_irq, 389 }; 390 391 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 392 { 393 adev->gmc.vm_fault.num_types = 1; 394 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 395 396 adev->gmc.ecc_irq.num_types = 1; 397 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 398 } 399 400 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, 401 uint32_t flush_type) 402 { 403 u32 req = 0; 404 405 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 406 PER_VMID_INVALIDATE_REQ, 1 << vmid); 407 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 408 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 409 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 410 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 411 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 412 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 413 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 414 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 415 416 return req; 417 } 418 419 /* 420 * GART 421 * VMID 0 is the physical GPU addresses as used by the kernel. 422 * VMIDs 1-15 are used for userspace clients and are handled 423 * by the amdgpu vm/hsa code. 424 */ 425 426 /** 427 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type 428 * 429 * @adev: amdgpu_device pointer 430 * @vmid: vm instance to flush 431 * @flush_type: the flush type 432 * 433 * Flush the TLB for the requested page table using certain type. 434 */ 435 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 436 uint32_t vmhub, uint32_t flush_type) 437 { 438 const unsigned eng = 17; 439 u32 j, tmp; 440 struct amdgpu_vmhub *hub; 441 442 BUG_ON(vmhub >= adev->num_vmhubs); 443 444 hub = &adev->vmhub[vmhub]; 445 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); 446 447 /* This is necessary for a HW workaround under SRIOV as well 448 * as GFXOFF under bare metal 449 */ 450 if (adev->gfx.kiq.ring.sched.ready && 451 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && 452 !adev->in_gpu_reset) { 453 uint32_t req = hub->vm_inv_eng0_req + eng; 454 uint32_t ack = hub->vm_inv_eng0_ack + eng; 455 456 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, 457 1 << vmid); 458 return; 459 } 460 461 spin_lock(&adev->gmc.invalidate_lock); 462 463 /* 464 * It may lose gpuvm invalidate acknowldege state across power-gating 465 * off cycle, add semaphore acquire before invalidation and semaphore 466 * release after invalidation to avoid entering power gated state 467 * to WA the Issue 468 */ 469 470 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 471 if (vmhub == AMDGPU_MMHUB_0 || 472 vmhub == AMDGPU_MMHUB_1) { 473 for (j = 0; j < adev->usec_timeout; j++) { 474 /* a read return value of 1 means semaphore acuqire */ 475 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); 476 if (tmp & 0x1) 477 break; 478 udelay(1); 479 } 480 481 if (j >= adev->usec_timeout) 482 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 483 } 484 485 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 486 487 /* 488 * Issue a dummy read to wait for the ACK register to be cleared 489 * to avoid a false ACK due to the new fast GRBM interface. 490 */ 491 if (vmhub == AMDGPU_GFXHUB_0) 492 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng); 493 494 for (j = 0; j < adev->usec_timeout; j++) { 495 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); 496 if (tmp & (1 << vmid)) 497 break; 498 udelay(1); 499 } 500 501 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 502 if (vmhub == AMDGPU_MMHUB_0 || 503 vmhub == AMDGPU_MMHUB_1) 504 /* 505 * add semaphore release after invalidation, 506 * write with 0 means semaphore release 507 */ 508 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); 509 510 spin_unlock(&adev->gmc.invalidate_lock); 511 512 if (j < adev->usec_timeout) 513 return; 514 515 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 516 } 517 518 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 519 unsigned vmid, uint64_t pd_addr) 520 { 521 struct amdgpu_device *adev = ring->adev; 522 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; 523 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); 524 unsigned eng = ring->vm_inv_eng; 525 526 /* 527 * It may lose gpuvm invalidate acknowldege state across power-gating 528 * off cycle, add semaphore acquire before invalidation and semaphore 529 * release after invalidation to avoid entering power gated state 530 * to WA the Issue 531 */ 532 533 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 534 if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || 535 ring->funcs->vmhub == AMDGPU_MMHUB_1) 536 /* a read return value of 1 means semaphore acuqire */ 537 amdgpu_ring_emit_reg_wait(ring, 538 hub->vm_inv_eng0_sem + eng, 0x1, 0x1); 539 540 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), 541 lower_32_bits(pd_addr)); 542 543 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 544 upper_32_bits(pd_addr)); 545 546 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, 547 hub->vm_inv_eng0_ack + eng, 548 req, 1 << vmid); 549 550 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 551 if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || 552 ring->funcs->vmhub == AMDGPU_MMHUB_1) 553 /* 554 * add semaphore release after invalidation, 555 * write with 0 means semaphore release 556 */ 557 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); 558 559 return pd_addr; 560 } 561 562 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 563 unsigned pasid) 564 { 565 struct amdgpu_device *adev = ring->adev; 566 uint32_t reg; 567 568 /* Do nothing because there's no lut register for mmhub1. */ 569 if (ring->funcs->vmhub == AMDGPU_MMHUB_1) 570 return; 571 572 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) 573 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 574 else 575 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 576 577 amdgpu_ring_emit_wreg(ring, reg, pasid); 578 } 579 580 /* 581 * PTE format on VEGA 10: 582 * 63:59 reserved 583 * 58:57 mtype 584 * 56 F 585 * 55 L 586 * 54 P 587 * 53 SW 588 * 52 T 589 * 50:48 reserved 590 * 47:12 4k physical page base address 591 * 11:7 fragment 592 * 6 write 593 * 5 read 594 * 4 exe 595 * 3 Z 596 * 2 snooped 597 * 1 system 598 * 0 valid 599 * 600 * PDE format on VEGA 10: 601 * 63:59 block fragment size 602 * 58:55 reserved 603 * 54 P 604 * 53:48 reserved 605 * 47:6 physical base address of PD or PTE 606 * 5:3 reserved 607 * 2 C 608 * 1 system 609 * 0 valid 610 */ 611 612 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 613 614 { 615 switch (flags) { 616 case AMDGPU_VM_MTYPE_DEFAULT: 617 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 618 case AMDGPU_VM_MTYPE_NC: 619 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 620 case AMDGPU_VM_MTYPE_WC: 621 return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 622 case AMDGPU_VM_MTYPE_RW: 623 return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); 624 case AMDGPU_VM_MTYPE_CC: 625 return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 626 case AMDGPU_VM_MTYPE_UC: 627 return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 628 default: 629 return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 630 } 631 } 632 633 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, 634 uint64_t *addr, uint64_t *flags) 635 { 636 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 637 *addr = adev->vm_manager.vram_base_offset + *addr - 638 adev->gmc.vram_start; 639 BUG_ON(*addr & 0xFFFF00000000003FULL); 640 641 if (!adev->gmc.translate_further) 642 return; 643 644 if (level == AMDGPU_VM_PDB1) { 645 /* Set the block fragment size */ 646 if (!(*flags & AMDGPU_PDE_PTE)) 647 *flags |= AMDGPU_PDE_BFS(0x9); 648 649 } else if (level == AMDGPU_VM_PDB0) { 650 if (*flags & AMDGPU_PDE_PTE) 651 *flags &= ~AMDGPU_PDE_PTE; 652 else 653 *flags |= AMDGPU_PTE_TF; 654 } 655 } 656 657 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, 658 struct amdgpu_bo_va_mapping *mapping, 659 uint64_t *flags) 660 { 661 *flags &= ~AMDGPU_PTE_EXECUTABLE; 662 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 663 664 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; 665 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; 666 667 if (mapping->flags & AMDGPU_PTE_PRT) { 668 *flags |= AMDGPU_PTE_PRT; 669 *flags &= ~AMDGPU_PTE_VALID; 670 } 671 672 if (adev->asic_type == CHIP_ARCTURUS && 673 !(*flags & AMDGPU_PTE_SYSTEM) && 674 mapping->bo_va->is_xgmi) 675 *flags |= AMDGPU_PTE_SNOOPED; 676 } 677 678 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 679 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 680 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 681 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 682 .map_mtype = gmc_v9_0_map_mtype, 683 .get_vm_pde = gmc_v9_0_get_vm_pde, 684 .get_vm_pte = gmc_v9_0_get_vm_pte 685 }; 686 687 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) 688 { 689 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; 690 } 691 692 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 693 { 694 switch (adev->asic_type) { 695 case CHIP_VEGA10: 696 adev->umc.funcs = &umc_v6_0_funcs; 697 break; 698 case CHIP_VEGA20: 699 case CHIP_ARCTURUS: 700 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 701 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 702 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 703 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; 704 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 705 adev->umc.funcs = &umc_v6_1_funcs; 706 break; 707 default: 708 break; 709 } 710 } 711 712 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 713 { 714 switch (adev->asic_type) { 715 case CHIP_VEGA20: 716 adev->mmhub.funcs = &mmhub_v1_0_funcs; 717 break; 718 case CHIP_ARCTURUS: 719 adev->mmhub.funcs = &mmhub_v9_4_funcs; 720 break; 721 default: 722 break; 723 } 724 } 725 726 static int gmc_v9_0_early_init(void *handle) 727 { 728 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 730 gmc_v9_0_set_gmc_funcs(adev); 731 gmc_v9_0_set_irq_funcs(adev); 732 gmc_v9_0_set_umc_funcs(adev); 733 gmc_v9_0_set_mmhub_funcs(adev); 734 735 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 736 adev->gmc.shared_aperture_end = 737 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 738 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 739 adev->gmc.private_aperture_end = 740 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 741 742 return 0; 743 } 744 745 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) 746 { 747 748 /* 749 * TODO: 750 * Currently there is a bug where some memory client outside 751 * of the driver writes to first 8M of VRAM on S3 resume, 752 * this overrides GART which by default gets placed in first 8M and 753 * causes VM_FAULTS once GTT is accessed. 754 * Keep the stolen memory reservation until the while this is not solved. 755 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init 756 */ 757 switch (adev->asic_type) { 758 case CHIP_VEGA10: 759 case CHIP_RAVEN: 760 case CHIP_ARCTURUS: 761 case CHIP_RENOIR: 762 return true; 763 case CHIP_VEGA12: 764 case CHIP_VEGA20: 765 default: 766 return false; 767 } 768 } 769 770 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) 771 { 772 struct amdgpu_ring *ring; 773 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 774 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 775 GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 776 unsigned i; 777 unsigned vmhub, inv_eng; 778 779 for (i = 0; i < adev->num_rings; ++i) { 780 ring = adev->rings[i]; 781 vmhub = ring->funcs->vmhub; 782 783 inv_eng = ffs(vm_inv_engs[vmhub]); 784 if (!inv_eng) { 785 dev_err(adev->dev, "no VM inv eng for ring %s\n", 786 ring->name); 787 return -EINVAL; 788 } 789 790 ring->vm_inv_eng = inv_eng - 1; 791 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 792 793 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 794 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 795 } 796 797 return 0; 798 } 799 800 static int gmc_v9_0_late_init(void *handle) 801 { 802 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 803 int r; 804 805 if (!gmc_v9_0_keep_stolen_memory(adev)) 806 amdgpu_bo_late_init(adev); 807 808 r = gmc_v9_0_allocate_vm_inv_eng(adev); 809 if (r) 810 return r; 811 /* Check if ecc is available */ 812 if (!amdgpu_sriov_vf(adev)) { 813 switch (adev->asic_type) { 814 case CHIP_VEGA10: 815 case CHIP_VEGA20: 816 case CHIP_ARCTURUS: 817 r = amdgpu_atomfirmware_mem_ecc_supported(adev); 818 if (!r) { 819 DRM_INFO("ECC is not present.\n"); 820 if (adev->df_funcs->enable_ecc_force_par_wr_rmw) 821 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); 822 } else { 823 DRM_INFO("ECC is active.\n"); 824 } 825 826 r = amdgpu_atomfirmware_sram_ecc_supported(adev); 827 if (!r) { 828 DRM_INFO("SRAM ECC is not present.\n"); 829 } else { 830 DRM_INFO("SRAM ECC is active.\n"); 831 } 832 break; 833 default: 834 break; 835 } 836 } 837 838 r = amdgpu_gmc_ras_late_init(adev); 839 if (r) 840 return r; 841 842 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 843 } 844 845 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 846 struct amdgpu_gmc *mc) 847 { 848 u64 base = 0; 849 850 if (adev->asic_type == CHIP_ARCTURUS) 851 base = mmhub_v9_4_get_fb_location(adev); 852 else if (!amdgpu_sriov_vf(adev)) 853 base = mmhub_v1_0_get_fb_location(adev); 854 855 /* add the xgmi offset of the physical node */ 856 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 857 amdgpu_gmc_vram_location(adev, mc, base); 858 amdgpu_gmc_gart_location(adev, mc); 859 amdgpu_gmc_agp_location(adev, mc); 860 /* base offset of vram pages */ 861 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); 862 863 /* XXX: add the xgmi offset of the physical node? */ 864 adev->vm_manager.vram_base_offset += 865 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 866 } 867 868 /** 869 * gmc_v9_0_mc_init - initialize the memory controller driver params 870 * 871 * @adev: amdgpu_device pointer 872 * 873 * Look up the amount of vram, vram width, and decide how to place 874 * vram and gart within the GPU's physical address space. 875 * Returns 0 for success. 876 */ 877 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 878 { 879 int r; 880 881 /* size in MB on si */ 882 adev->gmc.mc_vram_size = 883 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 884 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 885 886 if (!(adev->flags & AMD_IS_APU)) { 887 r = amdgpu_device_resize_fb_bar(adev); 888 if (r) 889 return r; 890 } 891 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 892 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 893 894 #ifdef CONFIG_X86_64 895 if (adev->flags & AMD_IS_APU) { 896 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); 897 adev->gmc.aper_size = adev->gmc.real_vram_size; 898 } 899 #endif 900 /* In case the PCI BAR is larger than the actual amount of vram */ 901 adev->gmc.visible_vram_size = adev->gmc.aper_size; 902 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 903 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 904 905 /* set the gart size */ 906 if (amdgpu_gart_size == -1) { 907 switch (adev->asic_type) { 908 case CHIP_VEGA10: /* all engines support GPUVM */ 909 case CHIP_VEGA12: /* all engines support GPUVM */ 910 case CHIP_VEGA20: 911 case CHIP_ARCTURUS: 912 default: 913 adev->gmc.gart_size = 512ULL << 20; 914 break; 915 case CHIP_RAVEN: /* DCE SG support */ 916 case CHIP_RENOIR: 917 adev->gmc.gart_size = 1024ULL << 20; 918 break; 919 } 920 } else { 921 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 922 } 923 924 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); 925 926 return 0; 927 } 928 929 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) 930 { 931 int r; 932 933 if (adev->gart.bo) { 934 WARN(1, "VEGA10 PCIE GART already initialized\n"); 935 return 0; 936 } 937 /* Initialize common gart structure */ 938 r = amdgpu_gart_init(adev); 939 if (r) 940 return r; 941 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 942 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | 943 AMDGPU_PTE_EXECUTABLE; 944 return amdgpu_gart_table_vram_alloc(adev); 945 } 946 947 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) 948 { 949 u32 d1vga_control; 950 unsigned size; 951 952 /* 953 * TODO Remove once GART corruption is resolved 954 * Check related code in gmc_v9_0_sw_fini 955 * */ 956 if (gmc_v9_0_keep_stolen_memory(adev)) 957 return 9 * 1024 * 1024; 958 959 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 960 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 961 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ 962 } else { 963 u32 viewport; 964 965 switch (adev->asic_type) { 966 case CHIP_RAVEN: 967 case CHIP_RENOIR: 968 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 969 size = (REG_GET_FIELD(viewport, 970 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 971 REG_GET_FIELD(viewport, 972 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 973 4); 974 break; 975 case CHIP_VEGA10: 976 case CHIP_VEGA12: 977 case CHIP_VEGA20: 978 default: 979 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); 980 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 981 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * 982 4); 983 break; 984 } 985 } 986 /* return 0 if the pre-OS buffer uses up most of vram */ 987 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 988 return 0; 989 990 return size; 991 } 992 993 static int gmc_v9_0_sw_init(void *handle) 994 { 995 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 996 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 997 998 gfxhub_v1_0_init(adev); 999 if (adev->asic_type == CHIP_ARCTURUS) 1000 mmhub_v9_4_init(adev); 1001 else 1002 mmhub_v1_0_init(adev); 1003 1004 spin_lock_init(&adev->gmc.invalidate_lock); 1005 1006 r = amdgpu_atomfirmware_get_vram_info(adev, 1007 &vram_width, &vram_type, &vram_vendor); 1008 if (amdgpu_sriov_vf(adev)) 1009 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1010 * and DF related registers is not readable, seems hardcord is the 1011 * only way to set the correct vram_width 1012 */ 1013 adev->gmc.vram_width = 2048; 1014 else if (amdgpu_emu_mode != 1) 1015 adev->gmc.vram_width = vram_width; 1016 1017 if (!adev->gmc.vram_width) { 1018 int chansize, numchan; 1019 1020 /* hbm memory channel size */ 1021 if (adev->flags & AMD_IS_APU) 1022 chansize = 64; 1023 else 1024 chansize = 128; 1025 1026 numchan = adev->df_funcs->get_hbm_channel_number(adev); 1027 adev->gmc.vram_width = numchan * chansize; 1028 } 1029 1030 adev->gmc.vram_type = vram_type; 1031 adev->gmc.vram_vendor = vram_vendor; 1032 switch (adev->asic_type) { 1033 case CHIP_RAVEN: 1034 adev->num_vmhubs = 2; 1035 1036 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 1037 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1038 } else { 1039 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 1040 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 1041 adev->gmc.translate_further = 1042 adev->vm_manager.num_level > 1; 1043 } 1044 break; 1045 case CHIP_VEGA10: 1046 case CHIP_VEGA12: 1047 case CHIP_VEGA20: 1048 case CHIP_RENOIR: 1049 adev->num_vmhubs = 2; 1050 1051 1052 /* 1053 * To fulfill 4-level page support, 1054 * vm size is 256TB (48bit), maximum size of Vega10, 1055 * block size 512 (9bit) 1056 */ 1057 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ 1058 if (amdgpu_sriov_vf(adev)) 1059 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); 1060 else 1061 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1062 break; 1063 case CHIP_ARCTURUS: 1064 adev->num_vmhubs = 3; 1065 1066 /* Keep the vm size same with Vega20 */ 1067 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1068 break; 1069 default: 1070 break; 1071 } 1072 1073 /* This interrupt is VMC page fault.*/ 1074 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, 1075 &adev->gmc.vm_fault); 1076 if (r) 1077 return r; 1078 1079 if (adev->asic_type == CHIP_ARCTURUS) { 1080 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 1081 &adev->gmc.vm_fault); 1082 if (r) 1083 return r; 1084 } 1085 1086 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, 1087 &adev->gmc.vm_fault); 1088 1089 if (r) 1090 return r; 1091 1092 /* interrupt sent to DF. */ 1093 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1094 &adev->gmc.ecc_irq); 1095 if (r) 1096 return r; 1097 1098 /* Set the internal MC address mask 1099 * This is the max address of the GPU's 1100 * internal address space. 1101 */ 1102 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 1103 1104 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 1105 if (r) { 1106 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 1107 return r; 1108 } 1109 adev->need_swiotlb = drm_need_swiotlb(44); 1110 1111 if (adev->gmc.xgmi.supported) { 1112 r = gfxhub_v1_1_get_xgmi_info(adev); 1113 if (r) 1114 return r; 1115 } 1116 1117 r = gmc_v9_0_mc_init(adev); 1118 if (r) 1119 return r; 1120 1121 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); 1122 1123 /* Memory manager */ 1124 r = amdgpu_bo_init(adev); 1125 if (r) 1126 return r; 1127 1128 r = gmc_v9_0_gart_init(adev); 1129 if (r) 1130 return r; 1131 1132 /* 1133 * number of VMs 1134 * VMID 0 is reserved for System 1135 * amdgpu graphics/compute will use VMIDs 1-7 1136 * amdkfd will use VMIDs 8-15 1137 */ 1138 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1139 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1140 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; 1141 1142 amdgpu_vm_manager_init(adev); 1143 1144 return 0; 1145 } 1146 1147 static int gmc_v9_0_sw_fini(void *handle) 1148 { 1149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1150 void *stolen_vga_buf; 1151 1152 amdgpu_gmc_ras_fini(adev); 1153 amdgpu_gem_force_release(adev); 1154 amdgpu_vm_manager_fini(adev); 1155 1156 if (gmc_v9_0_keep_stolen_memory(adev)) 1157 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); 1158 1159 amdgpu_gart_table_vram_free(adev); 1160 amdgpu_bo_fini(adev); 1161 amdgpu_gart_fini(adev); 1162 1163 return 0; 1164 } 1165 1166 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 1167 { 1168 1169 switch (adev->asic_type) { 1170 case CHIP_VEGA10: 1171 if (amdgpu_sriov_vf(adev)) 1172 break; 1173 /* fall through */ 1174 case CHIP_VEGA20: 1175 soc15_program_register_sequence(adev, 1176 golden_settings_mmhub_1_0_0, 1177 ARRAY_SIZE(golden_settings_mmhub_1_0_0)); 1178 soc15_program_register_sequence(adev, 1179 golden_settings_athub_1_0_0, 1180 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1181 break; 1182 case CHIP_VEGA12: 1183 break; 1184 case CHIP_RAVEN: 1185 /* TODO for renoir */ 1186 soc15_program_register_sequence(adev, 1187 golden_settings_athub_1_0_0, 1188 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1189 break; 1190 default: 1191 break; 1192 } 1193 } 1194 1195 /** 1196 * gmc_v9_0_gart_enable - gart enable 1197 * 1198 * @adev: amdgpu_device pointer 1199 */ 1200 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) 1201 { 1202 int r; 1203 1204 if (adev->gart.bo == NULL) { 1205 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 1206 return -EINVAL; 1207 } 1208 r = amdgpu_gart_table_vram_pin(adev); 1209 if (r) 1210 return r; 1211 1212 r = gfxhub_v1_0_gart_enable(adev); 1213 if (r) 1214 return r; 1215 1216 if (adev->asic_type == CHIP_ARCTURUS) 1217 r = mmhub_v9_4_gart_enable(adev); 1218 else 1219 r = mmhub_v1_0_gart_enable(adev); 1220 if (r) 1221 return r; 1222 1223 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1224 (unsigned)(adev->gmc.gart_size >> 20), 1225 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 1226 adev->gart.ready = true; 1227 return 0; 1228 } 1229 1230 static int gmc_v9_0_hw_init(void *handle) 1231 { 1232 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1233 bool value; 1234 int r, i; 1235 u32 tmp; 1236 1237 /* The sequence of these two function calls matters.*/ 1238 gmc_v9_0_init_golden_registers(adev); 1239 1240 if (adev->mode_info.num_crtc) { 1241 if (adev->asic_type != CHIP_ARCTURUS) { 1242 /* Lockout access through VGA aperture*/ 1243 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 1244 1245 /* disable VGA render */ 1246 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 1247 } 1248 } 1249 1250 amdgpu_device_program_register_sequence(adev, 1251 golden_settings_vega10_hdp, 1252 ARRAY_SIZE(golden_settings_vega10_hdp)); 1253 1254 switch (adev->asic_type) { 1255 case CHIP_RAVEN: 1256 /* TODO for renoir */ 1257 mmhub_v1_0_update_power_gating(adev, true); 1258 break; 1259 case CHIP_ARCTURUS: 1260 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); 1261 break; 1262 default: 1263 break; 1264 } 1265 1266 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); 1267 1268 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); 1269 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 1270 1271 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); 1272 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); 1273 1274 /* After HDP is initialized, flush HDP.*/ 1275 adev->nbio.funcs->hdp_flush(adev, NULL); 1276 1277 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 1278 value = false; 1279 else 1280 value = true; 1281 1282 gfxhub_v1_0_set_fault_enable_default(adev, value); 1283 if (adev->asic_type == CHIP_ARCTURUS) 1284 mmhub_v9_4_set_fault_enable_default(adev, value); 1285 else 1286 mmhub_v1_0_set_fault_enable_default(adev, value); 1287 1288 for (i = 0; i < adev->num_vmhubs; ++i) 1289 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 1290 1291 if (adev->umc.funcs && adev->umc.funcs->init_registers) 1292 adev->umc.funcs->init_registers(adev); 1293 1294 r = gmc_v9_0_gart_enable(adev); 1295 1296 return r; 1297 } 1298 1299 /** 1300 * gmc_v9_0_gart_disable - gart disable 1301 * 1302 * @adev: amdgpu_device pointer 1303 * 1304 * This disables all VM page table. 1305 */ 1306 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 1307 { 1308 gfxhub_v1_0_gart_disable(adev); 1309 if (adev->asic_type == CHIP_ARCTURUS) 1310 mmhub_v9_4_gart_disable(adev); 1311 else 1312 mmhub_v1_0_gart_disable(adev); 1313 amdgpu_gart_table_vram_unpin(adev); 1314 } 1315 1316 static int gmc_v9_0_hw_fini(void *handle) 1317 { 1318 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1319 1320 if (amdgpu_sriov_vf(adev)) { 1321 /* full access mode, so don't touch any GMC register */ 1322 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 1323 return 0; 1324 } 1325 1326 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1327 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1328 gmc_v9_0_gart_disable(adev); 1329 1330 return 0; 1331 } 1332 1333 static int gmc_v9_0_suspend(void *handle) 1334 { 1335 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1336 1337 return gmc_v9_0_hw_fini(adev); 1338 } 1339 1340 static int gmc_v9_0_resume(void *handle) 1341 { 1342 int r; 1343 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1344 1345 r = gmc_v9_0_hw_init(adev); 1346 if (r) 1347 return r; 1348 1349 amdgpu_vmid_reset_all(adev); 1350 1351 return 0; 1352 } 1353 1354 static bool gmc_v9_0_is_idle(void *handle) 1355 { 1356 /* MC is always ready in GMC v9.*/ 1357 return true; 1358 } 1359 1360 static int gmc_v9_0_wait_for_idle(void *handle) 1361 { 1362 /* There is no need to wait for MC idle in GMC v9.*/ 1363 return 0; 1364 } 1365 1366 static int gmc_v9_0_soft_reset(void *handle) 1367 { 1368 /* XXX for emulation.*/ 1369 return 0; 1370 } 1371 1372 static int gmc_v9_0_set_clockgating_state(void *handle, 1373 enum amd_clockgating_state state) 1374 { 1375 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1376 1377 if (adev->asic_type == CHIP_ARCTURUS) 1378 mmhub_v9_4_set_clockgating(adev, state); 1379 else 1380 mmhub_v1_0_set_clockgating(adev, state); 1381 1382 athub_v1_0_set_clockgating(adev, state); 1383 1384 return 0; 1385 } 1386 1387 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) 1388 { 1389 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1390 1391 if (adev->asic_type == CHIP_ARCTURUS) 1392 mmhub_v9_4_get_clockgating(adev, flags); 1393 else 1394 mmhub_v1_0_get_clockgating(adev, flags); 1395 1396 athub_v1_0_get_clockgating(adev, flags); 1397 } 1398 1399 static int gmc_v9_0_set_powergating_state(void *handle, 1400 enum amd_powergating_state state) 1401 { 1402 return 0; 1403 } 1404 1405 const struct amd_ip_funcs gmc_v9_0_ip_funcs = { 1406 .name = "gmc_v9_0", 1407 .early_init = gmc_v9_0_early_init, 1408 .late_init = gmc_v9_0_late_init, 1409 .sw_init = gmc_v9_0_sw_init, 1410 .sw_fini = gmc_v9_0_sw_fini, 1411 .hw_init = gmc_v9_0_hw_init, 1412 .hw_fini = gmc_v9_0_hw_fini, 1413 .suspend = gmc_v9_0_suspend, 1414 .resume = gmc_v9_0_resume, 1415 .is_idle = gmc_v9_0_is_idle, 1416 .wait_for_idle = gmc_v9_0_wait_for_idle, 1417 .soft_reset = gmc_v9_0_soft_reset, 1418 .set_clockgating_state = gmc_v9_0_set_clockgating_state, 1419 .set_powergating_state = gmc_v9_0_set_powergating_state, 1420 .get_clockgating_state = gmc_v9_0_get_clockgating_state, 1421 }; 1422 1423 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = 1424 { 1425 .type = AMD_IP_BLOCK_TYPE_GMC, 1426 .major = 9, 1427 .minor = 0, 1428 .rev = 0, 1429 .funcs = &gmc_v9_0_ip_funcs, 1430 }; 1431