1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_ucode.h" 31 #include "amdgpu_trace.h" 32 33 #include "gc/gc_10_1_0_offset.h" 34 #include "gc/gc_10_1_0_sh_mask.h" 35 #include "hdp/hdp_5_0_0_offset.h" 36 #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" 37 #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" 38 39 #include "soc15_common.h" 40 #include "soc15.h" 41 #include "navi10_sdma_pkt_open.h" 42 #include "nbio_v2_3.h" 43 #include "sdma_v5_0.h" 44 45 MODULE_FIRMWARE("amdgpu/navi10_sdma.bin"); 46 MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin"); 47 48 MODULE_FIRMWARE("amdgpu/navi14_sdma.bin"); 49 MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); 50 51 MODULE_FIRMWARE("amdgpu/navi12_sdma.bin"); 52 MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin"); 53 54 #define SDMA1_REG_OFFSET 0x600 55 #define SDMA0_HYP_DEC_REG_START 0x5880 56 #define SDMA0_HYP_DEC_REG_END 0x5893 57 #define SDMA1_HYP_DEC_REG_OFFSET 0x20 58 59 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); 60 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); 61 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); 62 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev); 63 64 static const struct soc15_reg_golden golden_settings_sdma_5[] = { 65 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), 66 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 67 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00), 77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), 78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00) 89 }; 90 91 static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { 92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 94 }; 95 96 static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { 97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 99 }; 100 101 static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { 102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 104 }; 105 106 static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) 107 { 108 u32 base; 109 110 if (internal_offset >= SDMA0_HYP_DEC_REG_START && 111 internal_offset <= SDMA0_HYP_DEC_REG_END) { 112 base = adev->reg_offset[GC_HWIP][0][1]; 113 if (instance == 1) 114 internal_offset += SDMA1_HYP_DEC_REG_OFFSET; 115 } else { 116 base = adev->reg_offset[GC_HWIP][0][0]; 117 if (instance == 1) 118 internal_offset += SDMA1_REG_OFFSET; 119 } 120 121 return base + internal_offset; 122 } 123 124 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) 125 { 126 switch (adev->asic_type) { 127 case CHIP_NAVI10: 128 soc15_program_register_sequence(adev, 129 golden_settings_sdma_5, 130 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 131 soc15_program_register_sequence(adev, 132 golden_settings_sdma_nv10, 133 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)); 134 break; 135 case CHIP_NAVI14: 136 soc15_program_register_sequence(adev, 137 golden_settings_sdma_5, 138 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 139 soc15_program_register_sequence(adev, 140 golden_settings_sdma_nv14, 141 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); 142 break; 143 case CHIP_NAVI12: 144 soc15_program_register_sequence(adev, 145 golden_settings_sdma_5, 146 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 147 soc15_program_register_sequence(adev, 148 golden_settings_sdma_nv12, 149 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); 150 break; 151 default: 152 break; 153 } 154 } 155 156 /** 157 * sdma_v5_0_init_microcode - load ucode images from disk 158 * 159 * @adev: amdgpu_device pointer 160 * 161 * Use the firmware interface to load the ucode images into 162 * the driver (not loaded into hw). 163 * Returns 0 on success, error on failure. 164 */ 165 166 // emulation only, won't work on real chip 167 // navi10 real chip need to use PSP to load firmware 168 static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) 169 { 170 const char *chip_name; 171 char fw_name[30]; 172 int err = 0, i; 173 struct amdgpu_firmware_info *info = NULL; 174 const struct common_firmware_header *header = NULL; 175 const struct sdma_firmware_header_v1_0 *hdr; 176 177 DRM_DEBUG("\n"); 178 179 switch (adev->asic_type) { 180 case CHIP_NAVI10: 181 chip_name = "navi10"; 182 break; 183 case CHIP_NAVI14: 184 chip_name = "navi14"; 185 break; 186 case CHIP_NAVI12: 187 chip_name = "navi12"; 188 break; 189 default: 190 BUG(); 191 } 192 193 for (i = 0; i < adev->sdma.num_instances; i++) { 194 if (i == 0) 195 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 196 else 197 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 198 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 199 if (err) 200 goto out; 201 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 202 if (err) 203 goto out; 204 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 205 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 206 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 207 if (adev->sdma.instance[i].feature_version >= 20) 208 adev->sdma.instance[i].burst_nop = true; 209 DRM_DEBUG("psp_load == '%s'\n", 210 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); 211 212 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 213 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 214 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 215 info->fw = adev->sdma.instance[i].fw; 216 header = (const struct common_firmware_header *)info->fw->data; 217 adev->firmware.fw_size += 218 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 219 } 220 } 221 out: 222 if (err) { 223 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name); 224 for (i = 0; i < adev->sdma.num_instances; i++) { 225 release_firmware(adev->sdma.instance[i].fw); 226 adev->sdma.instance[i].fw = NULL; 227 } 228 } 229 return err; 230 } 231 232 static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring) 233 { 234 unsigned ret; 235 236 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)); 237 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 238 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 239 amdgpu_ring_write(ring, 1); 240 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ 241 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ 242 243 return ret; 244 } 245 246 static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring, 247 unsigned offset) 248 { 249 unsigned cur; 250 251 BUG_ON(offset > ring->buf_mask); 252 BUG_ON(ring->ring[offset] != 0x55aa55aa); 253 254 cur = (ring->wptr - 1) & ring->buf_mask; 255 if (cur > offset) 256 ring->ring[offset] = cur - offset; 257 else 258 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 259 } 260 261 /** 262 * sdma_v5_0_ring_get_rptr - get the current read pointer 263 * 264 * @ring: amdgpu ring pointer 265 * 266 * Get the current rptr from the hardware (NAVI10+). 267 */ 268 static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 269 { 270 u64 *rptr; 271 272 /* XXX check if swapping is necessary on BE */ 273 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); 274 275 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); 276 return ((*rptr) >> 2); 277 } 278 279 /** 280 * sdma_v5_0_ring_get_wptr - get the current write pointer 281 * 282 * @ring: amdgpu ring pointer 283 * 284 * Get the current wptr from the hardware (NAVI10+). 285 */ 286 static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 287 { 288 struct amdgpu_device *adev = ring->adev; 289 u64 *wptr = NULL; 290 uint64_t local_wptr = 0; 291 292 if (ring->use_doorbell) { 293 /* XXX check if swapping is necessary on BE */ 294 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 295 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 296 *wptr = (*wptr) >> 2; 297 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); 298 } else { 299 u32 lowbit, highbit; 300 301 wptr = &local_wptr; 302 lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; 303 highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 304 305 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 306 ring->me, highbit, lowbit); 307 *wptr = highbit; 308 *wptr = (*wptr) << 32; 309 *wptr |= lowbit; 310 } 311 312 return *wptr; 313 } 314 315 /** 316 * sdma_v5_0_ring_set_wptr - commit the write pointer 317 * 318 * @ring: amdgpu ring pointer 319 * 320 * Write the wptr back to the hardware (NAVI10+). 321 */ 322 static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 323 { 324 struct amdgpu_device *adev = ring->adev; 325 326 DRM_DEBUG("Setting write pointer\n"); 327 if (ring->use_doorbell) { 328 DRM_DEBUG("Using doorbell -- " 329 "wptr_offs == 0x%08x " 330 "lower_32_bits(ring->wptr) << 2 == 0x%08x " 331 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", 332 ring->wptr_offs, 333 lower_32_bits(ring->wptr << 2), 334 upper_32_bits(ring->wptr << 2)); 335 /* XXX check if swapping is necessary on BE */ 336 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); 337 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); 338 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", 339 ring->doorbell_index, ring->wptr << 2); 340 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 341 } else { 342 DRM_DEBUG("Not using doorbell -- " 343 "mmSDMA%i_GFX_RB_WPTR == 0x%08x " 344 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", 345 ring->me, 346 lower_32_bits(ring->wptr << 2), 347 ring->me, 348 upper_32_bits(ring->wptr << 2)); 349 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), 350 lower_32_bits(ring->wptr << 2)); 351 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), 352 upper_32_bits(ring->wptr << 2)); 353 } 354 } 355 356 static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 357 { 358 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 359 int i; 360 361 for (i = 0; i < count; i++) 362 if (sdma && sdma->burst_nop && (i == 0)) 363 amdgpu_ring_write(ring, ring->funcs->nop | 364 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 365 else 366 amdgpu_ring_write(ring, ring->funcs->nop); 367 } 368 369 /** 370 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine 371 * 372 * @ring: amdgpu ring pointer 373 * @ib: IB object to schedule 374 * 375 * Schedule an IB in the DMA ring (NAVI10). 376 */ 377 static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 378 struct amdgpu_job *job, 379 struct amdgpu_ib *ib, 380 uint32_t flags) 381 { 382 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 383 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 384 385 /* An IB packet must end on a 8 DW boundary--the next dword 386 * must be on a 8-dword boundary. Our IB packet below is 6 387 * dwords long, thus add x number of NOPs, such that, in 388 * modular arithmetic, 389 * wptr + 6 + x = 8k, k >= 0, which in C is, 390 * (wptr + 6 + x) % 8 = 0. 391 * The expression below, is a solution of x. 392 */ 393 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 394 395 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 396 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); 397 /* base must be 32 byte aligned */ 398 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 399 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 400 amdgpu_ring_write(ring, ib->length_dw); 401 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); 402 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); 403 } 404 405 /** 406 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 407 * 408 * @ring: amdgpu ring pointer 409 * 410 * Emit an hdp flush packet on the requested DMA ring. 411 */ 412 static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 413 { 414 struct amdgpu_device *adev = ring->adev; 415 u32 ref_and_mask = 0; 416 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 417 418 if (ring->me == 0) 419 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; 420 else 421 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; 422 423 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 424 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 425 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 426 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); 427 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); 428 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 429 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 430 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 431 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 432 } 433 434 /** 435 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring 436 * 437 * @ring: amdgpu ring pointer 438 * @fence: amdgpu fence object 439 * 440 * Add a DMA fence packet to the ring to write 441 * the fence seq number and DMA trap packet to generate 442 * an interrupt if needed (NAVI10). 443 */ 444 static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 445 unsigned flags) 446 { 447 struct amdgpu_device *adev = ring->adev; 448 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 449 /* write the fence */ 450 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | 451 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */ 452 /* zero in first two bits */ 453 BUG_ON(addr & 0x3); 454 amdgpu_ring_write(ring, lower_32_bits(addr)); 455 amdgpu_ring_write(ring, upper_32_bits(addr)); 456 amdgpu_ring_write(ring, lower_32_bits(seq)); 457 458 /* optionally write high bits as well */ 459 if (write64bit) { 460 addr += 4; 461 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | 462 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); 463 /* zero in first two bits */ 464 BUG_ON(addr & 0x3); 465 amdgpu_ring_write(ring, lower_32_bits(addr)); 466 amdgpu_ring_write(ring, upper_32_bits(addr)); 467 amdgpu_ring_write(ring, upper_32_bits(seq)); 468 } 469 470 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */ 471 if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) { 472 /* generate an interrupt */ 473 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); 474 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 475 } 476 } 477 478 479 /** 480 * sdma_v5_0_gfx_stop - stop the gfx async dma engines 481 * 482 * @adev: amdgpu_device pointer 483 * 484 * Stop the gfx async dma ring buffers (NAVI10). 485 */ 486 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) 487 { 488 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 489 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 490 u32 rb_cntl, ib_cntl; 491 int i; 492 493 if ((adev->mman.buffer_funcs_ring == sdma0) || 494 (adev->mman.buffer_funcs_ring == sdma1)) 495 amdgpu_ttm_set_buffer_funcs_status(adev, false); 496 497 for (i = 0; i < adev->sdma.num_instances; i++) { 498 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 499 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 500 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 501 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); 502 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 503 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); 504 } 505 506 sdma0->sched.ready = false; 507 sdma1->sched.ready = false; 508 } 509 510 /** 511 * sdma_v5_0_rlc_stop - stop the compute async dma engines 512 * 513 * @adev: amdgpu_device pointer 514 * 515 * Stop the compute async dma queues (NAVI10). 516 */ 517 static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) 518 { 519 /* XXX todo */ 520 } 521 522 /** 523 * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch 524 * 525 * @adev: amdgpu_device pointer 526 * @enable: enable/disable the DMA MEs context switch. 527 * 528 * Halt or unhalt the async dma engines context switch (NAVI10). 529 */ 530 static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) 531 { 532 u32 f32_cntl, phase_quantum = 0; 533 int i; 534 535 if (amdgpu_sdma_phase_quantum) { 536 unsigned value = amdgpu_sdma_phase_quantum; 537 unsigned unit = 0; 538 539 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 540 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { 541 value = (value + 1) >> 1; 542 unit++; 543 } 544 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 545 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { 546 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 547 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); 548 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 549 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); 550 WARN_ONCE(1, 551 "clamping sdma_phase_quantum to %uK clock cycles\n", 552 value << unit); 553 } 554 phase_quantum = 555 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | 556 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; 557 } 558 559 for (i = 0; i < adev->sdma.num_instances; i++) { 560 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); 561 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 562 AUTO_CTXSW_ENABLE, enable ? 1 : 0); 563 if (enable && amdgpu_sdma_phase_quantum) { 564 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), 565 phase_quantum); 566 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), 567 phase_quantum); 568 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), 569 phase_quantum); 570 } 571 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); 572 } 573 574 } 575 576 /** 577 * sdma_v5_0_enable - stop the async dma engines 578 * 579 * @adev: amdgpu_device pointer 580 * @enable: enable/disable the DMA MEs. 581 * 582 * Halt or unhalt the async dma engines (NAVI10). 583 */ 584 static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) 585 { 586 u32 f32_cntl; 587 int i; 588 589 if (enable == false) { 590 sdma_v5_0_gfx_stop(adev); 591 sdma_v5_0_rlc_stop(adev); 592 } 593 594 for (i = 0; i < adev->sdma.num_instances; i++) { 595 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); 596 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); 597 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); 598 } 599 } 600 601 /** 602 * sdma_v5_0_gfx_resume - setup and start the async dma engines 603 * 604 * @adev: amdgpu_device pointer 605 * 606 * Set up the gfx DMA ring buffers and enable them (NAVI10). 607 * Returns 0 for success, error for failure. 608 */ 609 static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) 610 { 611 struct amdgpu_ring *ring; 612 u32 rb_cntl, ib_cntl; 613 u32 rb_bufsz; 614 u32 wb_offset; 615 u32 doorbell; 616 u32 doorbell_offset; 617 u32 temp; 618 u32 wptr_poll_cntl; 619 u64 wptr_gpu_addr; 620 int i, r; 621 622 for (i = 0; i < adev->sdma.num_instances; i++) { 623 ring = &adev->sdma.instance[i].ring; 624 wb_offset = (ring->rptr_offs * 4); 625 626 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); 627 628 /* Set ring buffer size in dwords */ 629 rb_bufsz = order_base_2(ring->ring_size / 4); 630 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 631 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); 632 #ifdef __BIG_ENDIAN 633 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); 634 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 635 RPTR_WRITEBACK_SWAP_ENABLE, 1); 636 #endif 637 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 638 639 /* Initialize the ring buffer's read and write pointers */ 640 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); 641 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); 642 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); 643 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); 644 645 /* setup the wptr shadow polling */ 646 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 647 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), 648 lower_32_bits(wptr_gpu_addr)); 649 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), 650 upper_32_bits(wptr_gpu_addr)); 651 wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, 652 mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); 653 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 654 SDMA0_GFX_RB_WPTR_POLL_CNTL, 655 F32_POLL_ENABLE, 1); 656 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 657 wptr_poll_cntl); 658 659 /* set the wb address whether it's enabled or not */ 660 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), 661 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 662 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), 663 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); 664 665 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 666 667 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); 668 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); 669 670 ring->wptr = 0; 671 672 /* before programing wptr to a less value, need set minor_ptr_update first */ 673 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); 674 675 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ 676 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); 677 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); 678 } 679 680 doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); 681 doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); 682 683 if (ring->use_doorbell) { 684 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); 685 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, 686 OFFSET, ring->doorbell_index); 687 } else { 688 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); 689 } 690 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); 691 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); 692 693 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 694 ring->doorbell_index, 20); 695 696 if (amdgpu_sriov_vf(adev)) 697 sdma_v5_0_ring_set_wptr(ring); 698 699 /* set minor_ptr_update to 0 after wptr programed */ 700 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); 701 702 /* set utc l1 enable flag always to 1 */ 703 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); 704 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); 705 706 /* enable MCBP */ 707 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); 708 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); 709 710 /* Set up RESP_MODE to non-copy addresses */ 711 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); 712 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); 713 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); 714 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); 715 716 /* program default cache read and write policy */ 717 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); 718 /* clean read policy and write policy bits */ 719 temp &= 0xFF0FFF; 720 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); 721 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); 722 723 if (!amdgpu_sriov_vf(adev)) { 724 /* unhalt engine */ 725 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); 726 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); 727 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); 728 } 729 730 /* enable DMA RB */ 731 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); 732 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 733 734 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); 735 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); 736 #ifdef __BIG_ENDIAN 737 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); 738 #endif 739 /* enable DMA IBs */ 740 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); 741 742 ring->sched.ready = true; 743 744 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ 745 sdma_v5_0_ctx_switch_enable(adev, true); 746 sdma_v5_0_enable(adev, true); 747 } 748 749 r = amdgpu_ring_test_helper(ring); 750 if (r) 751 return r; 752 753 if (adev->mman.buffer_funcs_ring == ring) 754 amdgpu_ttm_set_buffer_funcs_status(adev, true); 755 } 756 757 return 0; 758 } 759 760 /** 761 * sdma_v5_0_rlc_resume - setup and start the async dma engines 762 * 763 * @adev: amdgpu_device pointer 764 * 765 * Set up the compute DMA queues and enable them (NAVI10). 766 * Returns 0 for success, error for failure. 767 */ 768 static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev) 769 { 770 return 0; 771 } 772 773 /** 774 * sdma_v5_0_load_microcode - load the sDMA ME ucode 775 * 776 * @adev: amdgpu_device pointer 777 * 778 * Loads the sDMA0/1 ucode. 779 * Returns 0 for success, -EINVAL if the ucode is not available. 780 */ 781 static int sdma_v5_0_load_microcode(struct amdgpu_device *adev) 782 { 783 const struct sdma_firmware_header_v1_0 *hdr; 784 const __le32 *fw_data; 785 u32 fw_size; 786 int i, j; 787 788 /* halt the MEs */ 789 sdma_v5_0_enable(adev, false); 790 791 for (i = 0; i < adev->sdma.num_instances; i++) { 792 if (!adev->sdma.instance[i].fw) 793 return -EINVAL; 794 795 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 796 amdgpu_ucode_print_sdma_hdr(&hdr->header); 797 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 798 799 fw_data = (const __le32 *) 800 (adev->sdma.instance[i].fw->data + 801 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 802 803 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0); 804 805 for (j = 0; j < fw_size; j++) { 806 if (amdgpu_emu_mode == 1 && j % 500 == 0) 807 msleep(1); 808 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 809 } 810 811 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); 812 } 813 814 return 0; 815 } 816 817 /** 818 * sdma_v5_0_start - setup and start the async dma engines 819 * 820 * @adev: amdgpu_device pointer 821 * 822 * Set up the DMA engines and enable them (NAVI10). 823 * Returns 0 for success, error for failure. 824 */ 825 static int sdma_v5_0_start(struct amdgpu_device *adev) 826 { 827 int r = 0; 828 829 if (amdgpu_sriov_vf(adev)) { 830 sdma_v5_0_ctx_switch_enable(adev, false); 831 sdma_v5_0_enable(adev, false); 832 833 /* set RB registers */ 834 r = sdma_v5_0_gfx_resume(adev); 835 return r; 836 } 837 838 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 839 r = sdma_v5_0_load_microcode(adev); 840 if (r) 841 return r; 842 843 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */ 844 if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d) 845 msleep(1000); 846 } 847 848 /* unhalt the MEs */ 849 sdma_v5_0_enable(adev, true); 850 /* enable sdma ring preemption */ 851 sdma_v5_0_ctx_switch_enable(adev, true); 852 853 /* start the gfx rings and rlc compute queues */ 854 r = sdma_v5_0_gfx_resume(adev); 855 if (r) 856 return r; 857 r = sdma_v5_0_rlc_resume(adev); 858 859 return r; 860 } 861 862 /** 863 * sdma_v5_0_ring_test_ring - simple async dma engine test 864 * 865 * @ring: amdgpu_ring structure holding ring information 866 * 867 * Test the DMA engine by writing using it to write an 868 * value to memory. (NAVI10). 869 * Returns 0 for success, error for failure. 870 */ 871 static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) 872 { 873 struct amdgpu_device *adev = ring->adev; 874 unsigned i; 875 unsigned index; 876 int r; 877 u32 tmp; 878 u64 gpu_addr; 879 880 r = amdgpu_device_wb_get(adev, &index); 881 if (r) { 882 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 883 return r; 884 } 885 886 gpu_addr = adev->wb.gpu_addr + (index * 4); 887 tmp = 0xCAFEDEAD; 888 adev->wb.wb[index] = cpu_to_le32(tmp); 889 890 r = amdgpu_ring_alloc(ring, 5); 891 if (r) { 892 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 893 amdgpu_device_wb_free(adev, index); 894 return r; 895 } 896 897 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 898 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 899 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 900 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 901 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); 902 amdgpu_ring_write(ring, 0xDEADBEEF); 903 amdgpu_ring_commit(ring); 904 905 for (i = 0; i < adev->usec_timeout; i++) { 906 tmp = le32_to_cpu(adev->wb.wb[index]); 907 if (tmp == 0xDEADBEEF) 908 break; 909 if (amdgpu_emu_mode == 1) 910 msleep(1); 911 else 912 udelay(1); 913 } 914 915 if (i >= adev->usec_timeout) 916 r = -ETIMEDOUT; 917 918 amdgpu_device_wb_free(adev, index); 919 920 return r; 921 } 922 923 /** 924 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine 925 * 926 * @ring: amdgpu_ring structure holding ring information 927 * 928 * Test a simple IB in the DMA ring (NAVI10). 929 * Returns 0 on success, error on failure. 930 */ 931 static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 932 { 933 struct amdgpu_device *adev = ring->adev; 934 struct amdgpu_ib ib; 935 struct dma_fence *f = NULL; 936 unsigned index; 937 long r; 938 u32 tmp = 0; 939 u64 gpu_addr; 940 941 r = amdgpu_device_wb_get(adev, &index); 942 if (r) { 943 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 944 return r; 945 } 946 947 gpu_addr = adev->wb.gpu_addr + (index * 4); 948 tmp = 0xCAFEDEAD; 949 adev->wb.wb[index] = cpu_to_le32(tmp); 950 memset(&ib, 0, sizeof(ib)); 951 r = amdgpu_ib_get(adev, NULL, 256, &ib); 952 if (r) { 953 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 954 goto err0; 955 } 956 957 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 958 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 959 ib.ptr[1] = lower_32_bits(gpu_addr); 960 ib.ptr[2] = upper_32_bits(gpu_addr); 961 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); 962 ib.ptr[4] = 0xDEADBEEF; 963 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 964 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 965 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 966 ib.length_dw = 8; 967 968 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 969 if (r) 970 goto err1; 971 972 r = dma_fence_wait_timeout(f, false, timeout); 973 if (r == 0) { 974 DRM_ERROR("amdgpu: IB test timed out\n"); 975 r = -ETIMEDOUT; 976 goto err1; 977 } else if (r < 0) { 978 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 979 goto err1; 980 } 981 tmp = le32_to_cpu(adev->wb.wb[index]); 982 if (tmp == 0xDEADBEEF) 983 r = 0; 984 else 985 r = -EINVAL; 986 987 err1: 988 amdgpu_ib_free(adev, &ib, NULL); 989 dma_fence_put(f); 990 err0: 991 amdgpu_device_wb_free(adev, index); 992 return r; 993 } 994 995 996 /** 997 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART 998 * 999 * @ib: indirect buffer to fill with commands 1000 * @pe: addr of the page entry 1001 * @src: src addr to copy from 1002 * @count: number of page entries to update 1003 * 1004 * Update PTEs by copying them from the GART using sDMA (NAVI10). 1005 */ 1006 static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib, 1007 uint64_t pe, uint64_t src, 1008 unsigned count) 1009 { 1010 unsigned bytes = count * 8; 1011 1012 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1013 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1014 ib->ptr[ib->length_dw++] = bytes - 1; 1015 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1016 ib->ptr[ib->length_dw++] = lower_32_bits(src); 1017 ib->ptr[ib->length_dw++] = upper_32_bits(src); 1018 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1019 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1020 1021 } 1022 1023 /** 1024 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually 1025 * 1026 * @ib: indirect buffer to fill with commands 1027 * @pe: addr of the page entry 1028 * @addr: dst addr to write into pe 1029 * @count: number of page entries to update 1030 * @incr: increase next addr by incr bytes 1031 * @flags: access flags 1032 * 1033 * Update PTEs by writing them manually using sDMA (NAVI10). 1034 */ 1035 static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 1036 uint64_t value, unsigned count, 1037 uint32_t incr) 1038 { 1039 unsigned ndw = count * 2; 1040 1041 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 1042 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 1043 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1044 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1045 ib->ptr[ib->length_dw++] = ndw - 1; 1046 for (; ndw > 0; ndw -= 2) { 1047 ib->ptr[ib->length_dw++] = lower_32_bits(value); 1048 ib->ptr[ib->length_dw++] = upper_32_bits(value); 1049 value += incr; 1050 } 1051 } 1052 1053 /** 1054 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA 1055 * 1056 * @ib: indirect buffer to fill with commands 1057 * @pe: addr of the page entry 1058 * @addr: dst addr to write into pe 1059 * @count: number of page entries to update 1060 * @incr: increase next addr by incr bytes 1061 * @flags: access flags 1062 * 1063 * Update the page tables using sDMA (NAVI10). 1064 */ 1065 static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, 1066 uint64_t pe, 1067 uint64_t addr, unsigned count, 1068 uint32_t incr, uint64_t flags) 1069 { 1070 /* for physically contiguous pages (vram) */ 1071 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); 1072 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 1073 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1074 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 1075 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 1076 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 1077 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1078 ib->ptr[ib->length_dw++] = incr; /* increment size */ 1079 ib->ptr[ib->length_dw++] = 0; 1080 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ 1081 } 1082 1083 /** 1084 * sdma_v5_0_ring_pad_ib - pad the IB 1085 * @ib: indirect buffer to fill with padding 1086 * 1087 * Pad the IB with NOPs to a boundary multiple of 8. 1088 */ 1089 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1090 { 1091 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 1092 u32 pad_count; 1093 int i; 1094 1095 pad_count = (-ib->length_dw) & 0x7; 1096 for (i = 0; i < pad_count; i++) 1097 if (sdma && sdma->burst_nop && (i == 0)) 1098 ib->ptr[ib->length_dw++] = 1099 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | 1100 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 1101 else 1102 ib->ptr[ib->length_dw++] = 1103 SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 1104 } 1105 1106 1107 /** 1108 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline 1109 * 1110 * @ring: amdgpu_ring pointer 1111 * 1112 * Make sure all previous operations are completed (CIK). 1113 */ 1114 static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1115 { 1116 uint32_t seq = ring->fence_drv.sync_seq; 1117 uint64_t addr = ring->fence_drv.gpu_addr; 1118 1119 /* wait for idle */ 1120 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1121 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1122 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1123 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1124 amdgpu_ring_write(ring, addr & 0xfffffffc); 1125 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1126 amdgpu_ring_write(ring, seq); /* reference */ 1127 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1128 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1129 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1130 } 1131 1132 1133 /** 1134 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA 1135 * 1136 * @ring: amdgpu_ring pointer 1137 * @vm: amdgpu_vm pointer 1138 * 1139 * Update the page table base and flush the VM TLB 1140 * using sDMA (NAVI10). 1141 */ 1142 static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1143 unsigned vmid, uint64_t pd_addr) 1144 { 1145 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1146 } 1147 1148 static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring, 1149 uint32_t reg, uint32_t val) 1150 { 1151 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1152 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1153 amdgpu_ring_write(ring, reg); 1154 amdgpu_ring_write(ring, val); 1155 } 1156 1157 static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1158 uint32_t val, uint32_t mask) 1159 { 1160 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1161 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1162 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ 1163 amdgpu_ring_write(ring, reg << 2); 1164 amdgpu_ring_write(ring, 0); 1165 amdgpu_ring_write(ring, val); /* reference */ 1166 amdgpu_ring_write(ring, mask); /* mask */ 1167 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1168 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1169 } 1170 1171 static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 1172 uint32_t reg0, uint32_t reg1, 1173 uint32_t ref, uint32_t mask) 1174 { 1175 amdgpu_ring_emit_wreg(ring, reg0, ref); 1176 /* wait for a cycle to reset vm_inv_eng*_ack */ 1177 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); 1178 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); 1179 } 1180 1181 static int sdma_v5_0_early_init(void *handle) 1182 { 1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1184 1185 adev->sdma.num_instances = 2; 1186 1187 sdma_v5_0_set_ring_funcs(adev); 1188 sdma_v5_0_set_buffer_funcs(adev); 1189 sdma_v5_0_set_vm_pte_funcs(adev); 1190 sdma_v5_0_set_irq_funcs(adev); 1191 1192 return 0; 1193 } 1194 1195 1196 static int sdma_v5_0_sw_init(void *handle) 1197 { 1198 struct amdgpu_ring *ring; 1199 int r, i; 1200 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1201 1202 /* SDMA trap event */ 1203 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 1204 SDMA0_5_0__SRCID__SDMA_TRAP, 1205 &adev->sdma.trap_irq); 1206 if (r) 1207 return r; 1208 1209 /* SDMA trap event */ 1210 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 1211 SDMA1_5_0__SRCID__SDMA_TRAP, 1212 &adev->sdma.trap_irq); 1213 if (r) 1214 return r; 1215 1216 r = sdma_v5_0_init_microcode(adev); 1217 if (r) { 1218 DRM_ERROR("Failed to load sdma firmware!\n"); 1219 return r; 1220 } 1221 1222 for (i = 0; i < adev->sdma.num_instances; i++) { 1223 ring = &adev->sdma.instance[i].ring; 1224 ring->ring_obj = NULL; 1225 ring->use_doorbell = true; 1226 1227 DRM_INFO("use_doorbell being set to: [%s]\n", 1228 ring->use_doorbell?"true":"false"); 1229 1230 ring->doorbell_index = (i == 0) ? 1231 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset 1232 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset 1233 1234 sprintf(ring->name, "sdma%d", i); 1235 r = amdgpu_ring_init(adev, ring, 1024, 1236 &adev->sdma.trap_irq, 1237 (i == 0) ? 1238 AMDGPU_SDMA_IRQ_INSTANCE0 : 1239 AMDGPU_SDMA_IRQ_INSTANCE1); 1240 if (r) 1241 return r; 1242 } 1243 1244 return r; 1245 } 1246 1247 static int sdma_v5_0_sw_fini(void *handle) 1248 { 1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1250 int i; 1251 1252 for (i = 0; i < adev->sdma.num_instances; i++) 1253 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1254 1255 return 0; 1256 } 1257 1258 static int sdma_v5_0_hw_init(void *handle) 1259 { 1260 int r; 1261 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1262 1263 sdma_v5_0_init_golden_registers(adev); 1264 1265 r = sdma_v5_0_start(adev); 1266 1267 return r; 1268 } 1269 1270 static int sdma_v5_0_hw_fini(void *handle) 1271 { 1272 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1273 1274 if (amdgpu_sriov_vf(adev)) 1275 return 0; 1276 1277 sdma_v5_0_ctx_switch_enable(adev, false); 1278 sdma_v5_0_enable(adev, false); 1279 1280 return 0; 1281 } 1282 1283 static int sdma_v5_0_suspend(void *handle) 1284 { 1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1286 1287 return sdma_v5_0_hw_fini(adev); 1288 } 1289 1290 static int sdma_v5_0_resume(void *handle) 1291 { 1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1293 1294 return sdma_v5_0_hw_init(adev); 1295 } 1296 1297 static bool sdma_v5_0_is_idle(void *handle) 1298 { 1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1300 u32 i; 1301 1302 for (i = 0; i < adev->sdma.num_instances; i++) { 1303 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG)); 1304 1305 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1306 return false; 1307 } 1308 1309 return true; 1310 } 1311 1312 static int sdma_v5_0_wait_for_idle(void *handle) 1313 { 1314 unsigned i; 1315 u32 sdma0, sdma1; 1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1317 1318 for (i = 0; i < adev->usec_timeout; i++) { 1319 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); 1320 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG)); 1321 1322 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) 1323 return 0; 1324 udelay(1); 1325 } 1326 return -ETIMEDOUT; 1327 } 1328 1329 static int sdma_v5_0_soft_reset(void *handle) 1330 { 1331 /* todo */ 1332 1333 return 0; 1334 } 1335 1336 static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) 1337 { 1338 int i, r = 0; 1339 struct amdgpu_device *adev = ring->adev; 1340 u32 index = 0; 1341 u64 sdma_gfx_preempt; 1342 1343 amdgpu_sdma_get_index_from_ring(ring, &index); 1344 if (index == 0) 1345 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT; 1346 else 1347 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT; 1348 1349 /* assert preemption condition */ 1350 amdgpu_ring_set_preempt_cond_exec(ring, false); 1351 1352 /* emit the trailing fence */ 1353 ring->trail_seq += 1; 1354 amdgpu_ring_alloc(ring, 10); 1355 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, 1356 ring->trail_seq, 0); 1357 amdgpu_ring_commit(ring); 1358 1359 /* assert IB preemption */ 1360 WREG32(sdma_gfx_preempt, 1); 1361 1362 /* poll the trailing fence */ 1363 for (i = 0; i < adev->usec_timeout; i++) { 1364 if (ring->trail_seq == 1365 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 1366 break; 1367 udelay(1); 1368 } 1369 1370 if (i >= adev->usec_timeout) { 1371 r = -EINVAL; 1372 DRM_ERROR("ring %d failed to be preempted\n", ring->idx); 1373 } 1374 1375 /* deassert IB preemption */ 1376 WREG32(sdma_gfx_preempt, 0); 1377 1378 /* deassert the preemption condition */ 1379 amdgpu_ring_set_preempt_cond_exec(ring, true); 1380 return r; 1381 } 1382 1383 static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, 1384 struct amdgpu_irq_src *source, 1385 unsigned type, 1386 enum amdgpu_interrupt_state state) 1387 { 1388 u32 sdma_cntl; 1389 1390 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? 1391 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : 1392 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); 1393 1394 sdma_cntl = RREG32(reg_offset); 1395 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1396 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 1397 WREG32(reg_offset, sdma_cntl); 1398 1399 return 0; 1400 } 1401 1402 static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev, 1403 struct amdgpu_irq_src *source, 1404 struct amdgpu_iv_entry *entry) 1405 { 1406 DRM_DEBUG("IH: SDMA trap\n"); 1407 switch (entry->client_id) { 1408 case SOC15_IH_CLIENTID_SDMA0: 1409 switch (entry->ring_id) { 1410 case 0: 1411 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1412 break; 1413 case 1: 1414 /* XXX compute */ 1415 break; 1416 case 2: 1417 /* XXX compute */ 1418 break; 1419 case 3: 1420 /* XXX page queue*/ 1421 break; 1422 } 1423 break; 1424 case SOC15_IH_CLIENTID_SDMA1: 1425 switch (entry->ring_id) { 1426 case 0: 1427 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1428 break; 1429 case 1: 1430 /* XXX compute */ 1431 break; 1432 case 2: 1433 /* XXX compute */ 1434 break; 1435 case 3: 1436 /* XXX page queue*/ 1437 break; 1438 } 1439 break; 1440 } 1441 return 0; 1442 } 1443 1444 static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev, 1445 struct amdgpu_irq_src *source, 1446 struct amdgpu_iv_entry *entry) 1447 { 1448 return 0; 1449 } 1450 1451 static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 1452 bool enable) 1453 { 1454 uint32_t data, def; 1455 int i; 1456 1457 for (i = 0; i < adev->sdma.num_instances; i++) { 1458 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 1459 /* Enable sdma clock gating */ 1460 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); 1461 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1462 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1463 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1464 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1465 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1466 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1467 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1468 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); 1469 if (def != data) 1470 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); 1471 } else { 1472 /* Disable sdma clock gating */ 1473 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); 1474 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1475 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1476 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1477 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1478 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1479 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1480 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1481 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); 1482 if (def != data) 1483 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); 1484 } 1485 } 1486 } 1487 1488 static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 1489 bool enable) 1490 { 1491 uint32_t data, def; 1492 int i; 1493 1494 for (i = 0; i < adev->sdma.num_instances; i++) { 1495 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { 1496 /* Enable sdma mem light sleep */ 1497 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); 1498 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1499 if (def != data) 1500 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); 1501 1502 } else { 1503 /* Disable sdma mem light sleep */ 1504 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); 1505 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1506 if (def != data) 1507 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); 1508 1509 } 1510 } 1511 } 1512 1513 static int sdma_v5_0_set_clockgating_state(void *handle, 1514 enum amd_clockgating_state state) 1515 { 1516 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1517 1518 if (amdgpu_sriov_vf(adev)) 1519 return 0; 1520 1521 switch (adev->asic_type) { 1522 case CHIP_NAVI10: 1523 case CHIP_NAVI14: 1524 case CHIP_NAVI12: 1525 sdma_v5_0_update_medium_grain_clock_gating(adev, 1526 state == AMD_CG_STATE_GATE); 1527 sdma_v5_0_update_medium_grain_light_sleep(adev, 1528 state == AMD_CG_STATE_GATE); 1529 break; 1530 default: 1531 break; 1532 } 1533 1534 return 0; 1535 } 1536 1537 static int sdma_v5_0_set_powergating_state(void *handle, 1538 enum amd_powergating_state state) 1539 { 1540 return 0; 1541 } 1542 1543 static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags) 1544 { 1545 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1546 int data; 1547 1548 if (amdgpu_sriov_vf(adev)) 1549 *flags = 0; 1550 1551 /* AMD_CG_SUPPORT_SDMA_MGCG */ 1552 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL)); 1553 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK)) 1554 *flags |= AMD_CG_SUPPORT_SDMA_MGCG; 1555 1556 /* AMD_CG_SUPPORT_SDMA_LS */ 1557 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL)); 1558 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) 1559 *flags |= AMD_CG_SUPPORT_SDMA_LS; 1560 } 1561 1562 const struct amd_ip_funcs sdma_v5_0_ip_funcs = { 1563 .name = "sdma_v5_0", 1564 .early_init = sdma_v5_0_early_init, 1565 .late_init = NULL, 1566 .sw_init = sdma_v5_0_sw_init, 1567 .sw_fini = sdma_v5_0_sw_fini, 1568 .hw_init = sdma_v5_0_hw_init, 1569 .hw_fini = sdma_v5_0_hw_fini, 1570 .suspend = sdma_v5_0_suspend, 1571 .resume = sdma_v5_0_resume, 1572 .is_idle = sdma_v5_0_is_idle, 1573 .wait_for_idle = sdma_v5_0_wait_for_idle, 1574 .soft_reset = sdma_v5_0_soft_reset, 1575 .set_clockgating_state = sdma_v5_0_set_clockgating_state, 1576 .set_powergating_state = sdma_v5_0_set_powergating_state, 1577 .get_clockgating_state = sdma_v5_0_get_clockgating_state, 1578 }; 1579 1580 static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { 1581 .type = AMDGPU_RING_TYPE_SDMA, 1582 .align_mask = 0xf, 1583 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 1584 .support_64bit_ptrs = true, 1585 .vmhub = AMDGPU_GFXHUB_0, 1586 .get_rptr = sdma_v5_0_ring_get_rptr, 1587 .get_wptr = sdma_v5_0_ring_get_wptr, 1588 .set_wptr = sdma_v5_0_ring_set_wptr, 1589 .emit_frame_size = 1590 5 + /* sdma_v5_0_ring_init_cond_exec */ 1591 6 + /* sdma_v5_0_ring_emit_hdp_flush */ 1592 3 + /* hdp_invalidate */ 1593 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ 1594 /* sdma_v5_0_ring_emit_vm_flush */ 1595 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1596 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 1597 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 1598 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ 1599 .emit_ib = sdma_v5_0_ring_emit_ib, 1600 .emit_fence = sdma_v5_0_ring_emit_fence, 1601 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, 1602 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush, 1603 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush, 1604 .test_ring = sdma_v5_0_ring_test_ring, 1605 .test_ib = sdma_v5_0_ring_test_ib, 1606 .insert_nop = sdma_v5_0_ring_insert_nop, 1607 .pad_ib = sdma_v5_0_ring_pad_ib, 1608 .emit_wreg = sdma_v5_0_ring_emit_wreg, 1609 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, 1610 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, 1611 .init_cond_exec = sdma_v5_0_ring_init_cond_exec, 1612 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, 1613 .preempt_ib = sdma_v5_0_ring_preempt_ib, 1614 }; 1615 1616 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) 1617 { 1618 int i; 1619 1620 for (i = 0; i < adev->sdma.num_instances; i++) { 1621 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs; 1622 adev->sdma.instance[i].ring.me = i; 1623 } 1624 } 1625 1626 static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = { 1627 .set = sdma_v5_0_set_trap_irq_state, 1628 .process = sdma_v5_0_process_trap_irq, 1629 }; 1630 1631 static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = { 1632 .process = sdma_v5_0_process_illegal_inst_irq, 1633 }; 1634 1635 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) 1636 { 1637 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + 1638 adev->sdma.num_instances; 1639 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs; 1640 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs; 1641 } 1642 1643 /** 1644 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine 1645 * 1646 * @ring: amdgpu_ring structure holding ring information 1647 * @src_offset: src GPU address 1648 * @dst_offset: dst GPU address 1649 * @byte_count: number of bytes to xfer 1650 * 1651 * Copy GPU buffers using the DMA engine (NAVI10). 1652 * Used by the amdgpu ttm implementation to move pages if 1653 * registered as the asic copy callback. 1654 */ 1655 static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, 1656 uint64_t src_offset, 1657 uint64_t dst_offset, 1658 uint32_t byte_count) 1659 { 1660 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1661 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1662 ib->ptr[ib->length_dw++] = byte_count - 1; 1663 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1664 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1665 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1666 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1667 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1668 } 1669 1670 /** 1671 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine 1672 * 1673 * @ring: amdgpu_ring structure holding ring information 1674 * @src_data: value to write to buffer 1675 * @dst_offset: dst GPU address 1676 * @byte_count: number of bytes to xfer 1677 * 1678 * Fill GPU buffers using the DMA engine (NAVI10). 1679 */ 1680 static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib, 1681 uint32_t src_data, 1682 uint64_t dst_offset, 1683 uint32_t byte_count) 1684 { 1685 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); 1686 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1687 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1688 ib->ptr[ib->length_dw++] = src_data; 1689 ib->ptr[ib->length_dw++] = byte_count - 1; 1690 } 1691 1692 static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = { 1693 .copy_max_bytes = 0x400000, 1694 .copy_num_dw = 7, 1695 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer, 1696 1697 .fill_max_bytes = 0x400000, 1698 .fill_num_dw = 5, 1699 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer, 1700 }; 1701 1702 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev) 1703 { 1704 if (adev->mman.buffer_funcs == NULL) { 1705 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs; 1706 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1707 } 1708 } 1709 1710 static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = { 1711 .copy_pte_num_dw = 7, 1712 .copy_pte = sdma_v5_0_vm_copy_pte, 1713 .write_pte = sdma_v5_0_vm_write_pte, 1714 .set_pte_pde = sdma_v5_0_vm_set_pte_pde, 1715 }; 1716 1717 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1718 { 1719 unsigned i; 1720 1721 if (adev->vm_manager.vm_pte_funcs == NULL) { 1722 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; 1723 for (i = 0; i < adev->sdma.num_instances; i++) { 1724 adev->vm_manager.vm_pte_scheds[i] = 1725 &adev->sdma.instance[i].ring.sched; 1726 } 1727 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1728 } 1729 } 1730 1731 const struct amdgpu_ip_block_version sdma_v5_0_ip_block = { 1732 .type = AMD_IP_BLOCK_TYPE_SDMA, 1733 .major = 5, 1734 .minor = 0, 1735 .rev = 0, 1736 .funcs = &sdma_v5_0_ip_funcs, 1737 }; 1738