1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_ucode.h" 31 #include "amdgpu_trace.h" 32 33 #include "gc/gc_10_1_0_offset.h" 34 #include "gc/gc_10_1_0_sh_mask.h" 35 #include "hdp/hdp_5_0_0_offset.h" 36 #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h" 37 #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h" 38 39 #include "soc15_common.h" 40 #include "soc15.h" 41 #include "navi10_sdma_pkt_open.h" 42 #include "nbio_v2_3.h" 43 #include "sdma_v5_0.h" 44 45 MODULE_FIRMWARE("amdgpu/navi10_sdma.bin"); 46 MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin"); 47 48 MODULE_FIRMWARE("amdgpu/navi14_sdma.bin"); 49 MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); 50 51 MODULE_FIRMWARE("amdgpu/navi12_sdma.bin"); 52 MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin"); 53 54 #define SDMA1_REG_OFFSET 0x600 55 #define SDMA0_HYP_DEC_REG_START 0x5880 56 #define SDMA0_HYP_DEC_REG_END 0x5893 57 #define SDMA1_HYP_DEC_REG_OFFSET 0x20 58 59 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev); 60 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev); 61 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev); 62 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev); 63 64 static const struct soc15_reg_golden golden_settings_sdma_5[] = { 65 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), 66 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 67 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00), 77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107), 78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00) 89 }; 90 91 static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = { 92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 112 }; 113 114 static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 117 }; 118 119 static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { 120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 122 }; 123 124 static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { 125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 127 }; 128 129 static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) 130 { 131 u32 base; 132 133 if (internal_offset >= SDMA0_HYP_DEC_REG_START && 134 internal_offset <= SDMA0_HYP_DEC_REG_END) { 135 base = adev->reg_offset[GC_HWIP][0][1]; 136 if (instance == 1) 137 internal_offset += SDMA1_HYP_DEC_REG_OFFSET; 138 } else { 139 base = adev->reg_offset[GC_HWIP][0][0]; 140 if (instance == 1) 141 internal_offset += SDMA1_REG_OFFSET; 142 } 143 144 return base + internal_offset; 145 } 146 147 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) 148 { 149 switch (adev->asic_type) { 150 case CHIP_NAVI10: 151 soc15_program_register_sequence(adev, 152 golden_settings_sdma_5, 153 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 154 soc15_program_register_sequence(adev, 155 golden_settings_sdma_nv10, 156 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)); 157 break; 158 case CHIP_NAVI14: 159 soc15_program_register_sequence(adev, 160 golden_settings_sdma_5, 161 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 162 soc15_program_register_sequence(adev, 163 golden_settings_sdma_nv14, 164 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); 165 break; 166 case CHIP_NAVI12: 167 if (amdgpu_sriov_vf(adev)) 168 soc15_program_register_sequence(adev, 169 golden_settings_sdma_5_sriov, 170 (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov)); 171 else 172 soc15_program_register_sequence(adev, 173 golden_settings_sdma_5, 174 (const u32)ARRAY_SIZE(golden_settings_sdma_5)); 175 soc15_program_register_sequence(adev, 176 golden_settings_sdma_nv12, 177 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); 178 break; 179 default: 180 break; 181 } 182 } 183 184 /** 185 * sdma_v5_0_init_microcode - load ucode images from disk 186 * 187 * @adev: amdgpu_device pointer 188 * 189 * Use the firmware interface to load the ucode images into 190 * the driver (not loaded into hw). 191 * Returns 0 on success, error on failure. 192 */ 193 194 // emulation only, won't work on real chip 195 // navi10 real chip need to use PSP to load firmware 196 static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) 197 { 198 const char *chip_name; 199 char fw_name[30]; 200 int err = 0, i; 201 struct amdgpu_firmware_info *info = NULL; 202 const struct common_firmware_header *header = NULL; 203 const struct sdma_firmware_header_v1_0 *hdr; 204 205 DRM_DEBUG("\n"); 206 207 switch (adev->asic_type) { 208 case CHIP_NAVI10: 209 chip_name = "navi10"; 210 break; 211 case CHIP_NAVI14: 212 chip_name = "navi14"; 213 break; 214 case CHIP_NAVI12: 215 chip_name = "navi12"; 216 break; 217 default: 218 BUG(); 219 } 220 221 for (i = 0; i < adev->sdma.num_instances; i++) { 222 if (i == 0) 223 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 224 else 225 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 226 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 227 if (err) 228 goto out; 229 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 230 if (err) 231 goto out; 232 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 233 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 234 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 235 if (adev->sdma.instance[i].feature_version >= 20) 236 adev->sdma.instance[i].burst_nop = true; 237 DRM_DEBUG("psp_load == '%s'\n", 238 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); 239 240 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 241 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 242 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 243 info->fw = adev->sdma.instance[i].fw; 244 header = (const struct common_firmware_header *)info->fw->data; 245 adev->firmware.fw_size += 246 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 247 } 248 } 249 out: 250 if (err) { 251 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name); 252 for (i = 0; i < adev->sdma.num_instances; i++) { 253 release_firmware(adev->sdma.instance[i].fw); 254 adev->sdma.instance[i].fw = NULL; 255 } 256 } 257 return err; 258 } 259 260 static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring) 261 { 262 unsigned ret; 263 264 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)); 265 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 266 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 267 amdgpu_ring_write(ring, 1); 268 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ 269 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ 270 271 return ret; 272 } 273 274 static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring, 275 unsigned offset) 276 { 277 unsigned cur; 278 279 BUG_ON(offset > ring->buf_mask); 280 BUG_ON(ring->ring[offset] != 0x55aa55aa); 281 282 cur = (ring->wptr - 1) & ring->buf_mask; 283 if (cur > offset) 284 ring->ring[offset] = cur - offset; 285 else 286 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 287 } 288 289 /** 290 * sdma_v5_0_ring_get_rptr - get the current read pointer 291 * 292 * @ring: amdgpu ring pointer 293 * 294 * Get the current rptr from the hardware (NAVI10+). 295 */ 296 static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 297 { 298 u64 *rptr; 299 300 /* XXX check if swapping is necessary on BE */ 301 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); 302 303 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); 304 return ((*rptr) >> 2); 305 } 306 307 /** 308 * sdma_v5_0_ring_get_wptr - get the current write pointer 309 * 310 * @ring: amdgpu ring pointer 311 * 312 * Get the current wptr from the hardware (NAVI10+). 313 */ 314 static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 315 { 316 struct amdgpu_device *adev = ring->adev; 317 u64 *wptr = NULL; 318 uint64_t local_wptr = 0; 319 320 if (ring->use_doorbell) { 321 /* XXX check if swapping is necessary on BE */ 322 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 323 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 324 *wptr = (*wptr) >> 2; 325 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); 326 } else { 327 u32 lowbit, highbit; 328 329 wptr = &local_wptr; 330 lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; 331 highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 332 333 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 334 ring->me, highbit, lowbit); 335 *wptr = highbit; 336 *wptr = (*wptr) << 32; 337 *wptr |= lowbit; 338 } 339 340 return *wptr; 341 } 342 343 /** 344 * sdma_v5_0_ring_set_wptr - commit the write pointer 345 * 346 * @ring: amdgpu ring pointer 347 * 348 * Write the wptr back to the hardware (NAVI10+). 349 */ 350 static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 351 { 352 struct amdgpu_device *adev = ring->adev; 353 354 DRM_DEBUG("Setting write pointer\n"); 355 if (ring->use_doorbell) { 356 DRM_DEBUG("Using doorbell -- " 357 "wptr_offs == 0x%08x " 358 "lower_32_bits(ring->wptr) << 2 == 0x%08x " 359 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", 360 ring->wptr_offs, 361 lower_32_bits(ring->wptr << 2), 362 upper_32_bits(ring->wptr << 2)); 363 /* XXX check if swapping is necessary on BE */ 364 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); 365 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); 366 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", 367 ring->doorbell_index, ring->wptr << 2); 368 WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 369 } else { 370 DRM_DEBUG("Not using doorbell -- " 371 "mmSDMA%i_GFX_RB_WPTR == 0x%08x " 372 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", 373 ring->me, 374 lower_32_bits(ring->wptr << 2), 375 ring->me, 376 upper_32_bits(ring->wptr << 2)); 377 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), 378 lower_32_bits(ring->wptr << 2)); 379 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), 380 upper_32_bits(ring->wptr << 2)); 381 } 382 } 383 384 static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 385 { 386 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 387 int i; 388 389 for (i = 0; i < count; i++) 390 if (sdma && sdma->burst_nop && (i == 0)) 391 amdgpu_ring_write(ring, ring->funcs->nop | 392 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 393 else 394 amdgpu_ring_write(ring, ring->funcs->nop); 395 } 396 397 /** 398 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine 399 * 400 * @ring: amdgpu ring pointer 401 * @ib: IB object to schedule 402 * 403 * Schedule an IB in the DMA ring (NAVI10). 404 */ 405 static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 406 struct amdgpu_job *job, 407 struct amdgpu_ib *ib, 408 uint32_t flags) 409 { 410 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 411 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 412 413 /* An IB packet must end on a 8 DW boundary--the next dword 414 * must be on a 8-dword boundary. Our IB packet below is 6 415 * dwords long, thus add x number of NOPs, such that, in 416 * modular arithmetic, 417 * wptr + 6 + x = 8k, k >= 0, which in C is, 418 * (wptr + 6 + x) % 8 = 0. 419 * The expression below, is a solution of x. 420 */ 421 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 422 423 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 424 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); 425 /* base must be 32 byte aligned */ 426 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 427 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 428 amdgpu_ring_write(ring, ib->length_dw); 429 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); 430 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); 431 } 432 433 /** 434 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 435 * 436 * @ring: amdgpu ring pointer 437 * 438 * Emit an hdp flush packet on the requested DMA ring. 439 */ 440 static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 441 { 442 struct amdgpu_device *adev = ring->adev; 443 u32 ref_and_mask = 0; 444 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 445 446 if (ring->me == 0) 447 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; 448 else 449 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; 450 451 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 452 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 453 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 454 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); 455 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); 456 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 457 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 458 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 459 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 460 } 461 462 /** 463 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring 464 * 465 * @ring: amdgpu ring pointer 466 * @fence: amdgpu fence object 467 * 468 * Add a DMA fence packet to the ring to write 469 * the fence seq number and DMA trap packet to generate 470 * an interrupt if needed (NAVI10). 471 */ 472 static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 473 unsigned flags) 474 { 475 struct amdgpu_device *adev = ring->adev; 476 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 477 /* write the fence */ 478 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | 479 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */ 480 /* zero in first two bits */ 481 BUG_ON(addr & 0x3); 482 amdgpu_ring_write(ring, lower_32_bits(addr)); 483 amdgpu_ring_write(ring, upper_32_bits(addr)); 484 amdgpu_ring_write(ring, lower_32_bits(seq)); 485 486 /* optionally write high bits as well */ 487 if (write64bit) { 488 addr += 4; 489 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) | 490 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); 491 /* zero in first two bits */ 492 BUG_ON(addr & 0x3); 493 amdgpu_ring_write(ring, lower_32_bits(addr)); 494 amdgpu_ring_write(ring, upper_32_bits(addr)); 495 amdgpu_ring_write(ring, upper_32_bits(seq)); 496 } 497 498 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */ 499 if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) { 500 /* generate an interrupt */ 501 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); 502 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 503 } 504 } 505 506 507 /** 508 * sdma_v5_0_gfx_stop - stop the gfx async dma engines 509 * 510 * @adev: amdgpu_device pointer 511 * 512 * Stop the gfx async dma ring buffers (NAVI10). 513 */ 514 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) 515 { 516 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 517 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 518 u32 rb_cntl, ib_cntl; 519 int i; 520 521 if ((adev->mman.buffer_funcs_ring == sdma0) || 522 (adev->mman.buffer_funcs_ring == sdma1)) 523 amdgpu_ttm_set_buffer_funcs_status(adev, false); 524 525 for (i = 0; i < adev->sdma.num_instances; i++) { 526 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 527 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 528 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 529 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); 530 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 531 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); 532 } 533 } 534 535 /** 536 * sdma_v5_0_rlc_stop - stop the compute async dma engines 537 * 538 * @adev: amdgpu_device pointer 539 * 540 * Stop the compute async dma queues (NAVI10). 541 */ 542 static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) 543 { 544 /* XXX todo */ 545 } 546 547 /** 548 * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch 549 * 550 * @adev: amdgpu_device pointer 551 * @enable: enable/disable the DMA MEs context switch. 552 * 553 * Halt or unhalt the async dma engines context switch (NAVI10). 554 */ 555 static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) 556 { 557 u32 f32_cntl = 0, phase_quantum = 0; 558 int i; 559 560 if (amdgpu_sdma_phase_quantum) { 561 unsigned value = amdgpu_sdma_phase_quantum; 562 unsigned unit = 0; 563 564 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 565 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { 566 value = (value + 1) >> 1; 567 unit++; 568 } 569 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 570 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { 571 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 572 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); 573 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 574 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); 575 WARN_ONCE(1, 576 "clamping sdma_phase_quantum to %uK clock cycles\n", 577 value << unit); 578 } 579 phase_quantum = 580 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | 581 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; 582 } 583 584 for (i = 0; i < adev->sdma.num_instances; i++) { 585 if (!amdgpu_sriov_vf(adev)) { 586 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); 587 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 588 AUTO_CTXSW_ENABLE, enable ? 1 : 0); 589 } 590 591 if (enable && amdgpu_sdma_phase_quantum) { 592 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), 593 phase_quantum); 594 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), 595 phase_quantum); 596 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), 597 phase_quantum); 598 } 599 if (!amdgpu_sriov_vf(adev)) 600 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); 601 } 602 603 } 604 605 /** 606 * sdma_v5_0_enable - stop the async dma engines 607 * 608 * @adev: amdgpu_device pointer 609 * @enable: enable/disable the DMA MEs. 610 * 611 * Halt or unhalt the async dma engines (NAVI10). 612 */ 613 static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) 614 { 615 u32 f32_cntl; 616 int i; 617 618 if (enable == false) { 619 sdma_v5_0_gfx_stop(adev); 620 sdma_v5_0_rlc_stop(adev); 621 } 622 623 if (amdgpu_sriov_vf(adev)) 624 return; 625 626 for (i = 0; i < adev->sdma.num_instances; i++) { 627 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); 628 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); 629 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); 630 } 631 } 632 633 /** 634 * sdma_v5_0_gfx_resume - setup and start the async dma engines 635 * 636 * @adev: amdgpu_device pointer 637 * 638 * Set up the gfx DMA ring buffers and enable them (NAVI10). 639 * Returns 0 for success, error for failure. 640 */ 641 static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) 642 { 643 struct amdgpu_ring *ring; 644 u32 rb_cntl, ib_cntl; 645 u32 rb_bufsz; 646 u32 wb_offset; 647 u32 doorbell; 648 u32 doorbell_offset; 649 u32 temp; 650 u32 wptr_poll_cntl; 651 u64 wptr_gpu_addr; 652 int i, r; 653 654 for (i = 0; i < adev->sdma.num_instances; i++) { 655 ring = &adev->sdma.instance[i].ring; 656 wb_offset = (ring->rptr_offs * 4); 657 658 if (!amdgpu_sriov_vf(adev)) 659 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); 660 661 /* Set ring buffer size in dwords */ 662 rb_bufsz = order_base_2(ring->ring_size / 4); 663 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 664 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); 665 #ifdef __BIG_ENDIAN 666 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); 667 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 668 RPTR_WRITEBACK_SWAP_ENABLE, 1); 669 #endif 670 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 671 672 /* Initialize the ring buffer's read and write pointers */ 673 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); 674 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); 675 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); 676 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); 677 678 /* setup the wptr shadow polling */ 679 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 680 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), 681 lower_32_bits(wptr_gpu_addr)); 682 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), 683 upper_32_bits(wptr_gpu_addr)); 684 wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, 685 mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); 686 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 687 SDMA0_GFX_RB_WPTR_POLL_CNTL, 688 F32_POLL_ENABLE, 1); 689 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 690 wptr_poll_cntl); 691 692 /* set the wb address whether it's enabled or not */ 693 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), 694 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 695 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), 696 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); 697 698 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 699 700 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); 701 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); 702 703 ring->wptr = 0; 704 705 /* before programing wptr to a less value, need set minor_ptr_update first */ 706 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); 707 708 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ 709 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); 710 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); 711 } 712 713 doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); 714 doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); 715 716 if (ring->use_doorbell) { 717 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); 718 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, 719 OFFSET, ring->doorbell_index); 720 } else { 721 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); 722 } 723 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); 724 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); 725 726 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 727 ring->doorbell_index, 20); 728 729 if (amdgpu_sriov_vf(adev)) 730 sdma_v5_0_ring_set_wptr(ring); 731 732 /* set minor_ptr_update to 0 after wptr programed */ 733 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); 734 735 if (!amdgpu_sriov_vf(adev)) { 736 /* set utc l1 enable flag always to 1 */ 737 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); 738 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); 739 740 /* enable MCBP */ 741 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); 742 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); 743 744 /* Set up RESP_MODE to non-copy addresses */ 745 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); 746 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); 747 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); 748 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); 749 750 /* program default cache read and write policy */ 751 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); 752 /* clean read policy and write policy bits */ 753 temp &= 0xFF0FFF; 754 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); 755 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); 756 } 757 758 if (!amdgpu_sriov_vf(adev)) { 759 /* unhalt engine */ 760 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); 761 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); 762 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); 763 } 764 765 /* enable DMA RB */ 766 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); 767 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 768 769 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); 770 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); 771 #ifdef __BIG_ENDIAN 772 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); 773 #endif 774 /* enable DMA IBs */ 775 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); 776 777 ring->sched.ready = true; 778 779 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ 780 sdma_v5_0_ctx_switch_enable(adev, true); 781 sdma_v5_0_enable(adev, true); 782 } 783 784 r = amdgpu_ring_test_helper(ring); 785 if (r) 786 return r; 787 788 if (adev->mman.buffer_funcs_ring == ring) 789 amdgpu_ttm_set_buffer_funcs_status(adev, true); 790 } 791 792 return 0; 793 } 794 795 /** 796 * sdma_v5_0_rlc_resume - setup and start the async dma engines 797 * 798 * @adev: amdgpu_device pointer 799 * 800 * Set up the compute DMA queues and enable them (NAVI10). 801 * Returns 0 for success, error for failure. 802 */ 803 static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev) 804 { 805 return 0; 806 } 807 808 /** 809 * sdma_v5_0_load_microcode - load the sDMA ME ucode 810 * 811 * @adev: amdgpu_device pointer 812 * 813 * Loads the sDMA0/1 ucode. 814 * Returns 0 for success, -EINVAL if the ucode is not available. 815 */ 816 static int sdma_v5_0_load_microcode(struct amdgpu_device *adev) 817 { 818 const struct sdma_firmware_header_v1_0 *hdr; 819 const __le32 *fw_data; 820 u32 fw_size; 821 int i, j; 822 823 /* halt the MEs */ 824 sdma_v5_0_enable(adev, false); 825 826 for (i = 0; i < adev->sdma.num_instances; i++) { 827 if (!adev->sdma.instance[i].fw) 828 return -EINVAL; 829 830 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 831 amdgpu_ucode_print_sdma_hdr(&hdr->header); 832 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 833 834 fw_data = (const __le32 *) 835 (adev->sdma.instance[i].fw->data + 836 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 837 838 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0); 839 840 for (j = 0; j < fw_size; j++) { 841 if (amdgpu_emu_mode == 1 && j % 500 == 0) 842 msleep(1); 843 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 844 } 845 846 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); 847 } 848 849 return 0; 850 } 851 852 /** 853 * sdma_v5_0_start - setup and start the async dma engines 854 * 855 * @adev: amdgpu_device pointer 856 * 857 * Set up the DMA engines and enable them (NAVI10). 858 * Returns 0 for success, error for failure. 859 */ 860 static int sdma_v5_0_start(struct amdgpu_device *adev) 861 { 862 int r = 0; 863 864 if (amdgpu_sriov_vf(adev)) { 865 sdma_v5_0_ctx_switch_enable(adev, false); 866 sdma_v5_0_enable(adev, false); 867 868 /* set RB registers */ 869 r = sdma_v5_0_gfx_resume(adev); 870 return r; 871 } 872 873 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 874 r = sdma_v5_0_load_microcode(adev); 875 if (r) 876 return r; 877 878 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */ 879 if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d) 880 msleep(1000); 881 } 882 883 /* unhalt the MEs */ 884 sdma_v5_0_enable(adev, true); 885 /* enable sdma ring preemption */ 886 sdma_v5_0_ctx_switch_enable(adev, true); 887 888 /* start the gfx rings and rlc compute queues */ 889 r = sdma_v5_0_gfx_resume(adev); 890 if (r) 891 return r; 892 r = sdma_v5_0_rlc_resume(adev); 893 894 return r; 895 } 896 897 /** 898 * sdma_v5_0_ring_test_ring - simple async dma engine test 899 * 900 * @ring: amdgpu_ring structure holding ring information 901 * 902 * Test the DMA engine by writing using it to write an 903 * value to memory. (NAVI10). 904 * Returns 0 for success, error for failure. 905 */ 906 static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) 907 { 908 struct amdgpu_device *adev = ring->adev; 909 unsigned i; 910 unsigned index; 911 int r; 912 u32 tmp; 913 u64 gpu_addr; 914 915 r = amdgpu_device_wb_get(adev, &index); 916 if (r) { 917 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 918 return r; 919 } 920 921 gpu_addr = adev->wb.gpu_addr + (index * 4); 922 tmp = 0xCAFEDEAD; 923 adev->wb.wb[index] = cpu_to_le32(tmp); 924 925 r = amdgpu_ring_alloc(ring, 5); 926 if (r) { 927 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 928 amdgpu_device_wb_free(adev, index); 929 return r; 930 } 931 932 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 933 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 934 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 935 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 936 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); 937 amdgpu_ring_write(ring, 0xDEADBEEF); 938 amdgpu_ring_commit(ring); 939 940 for (i = 0; i < adev->usec_timeout; i++) { 941 tmp = le32_to_cpu(adev->wb.wb[index]); 942 if (tmp == 0xDEADBEEF) 943 break; 944 if (amdgpu_emu_mode == 1) 945 msleep(1); 946 else 947 udelay(1); 948 } 949 950 if (i >= adev->usec_timeout) 951 r = -ETIMEDOUT; 952 953 amdgpu_device_wb_free(adev, index); 954 955 return r; 956 } 957 958 /** 959 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine 960 * 961 * @ring: amdgpu_ring structure holding ring information 962 * 963 * Test a simple IB in the DMA ring (NAVI10). 964 * Returns 0 on success, error on failure. 965 */ 966 static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 967 { 968 struct amdgpu_device *adev = ring->adev; 969 struct amdgpu_ib ib; 970 struct dma_fence *f = NULL; 971 unsigned index; 972 long r; 973 u32 tmp = 0; 974 u64 gpu_addr; 975 976 r = amdgpu_device_wb_get(adev, &index); 977 if (r) { 978 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 979 return r; 980 } 981 982 gpu_addr = adev->wb.gpu_addr + (index * 4); 983 tmp = 0xCAFEDEAD; 984 adev->wb.wb[index] = cpu_to_le32(tmp); 985 memset(&ib, 0, sizeof(ib)); 986 r = amdgpu_ib_get(adev, NULL, 256, 987 AMDGPU_IB_POOL_DIRECT, &ib); 988 if (r) { 989 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 990 goto err0; 991 } 992 993 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 994 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 995 ib.ptr[1] = lower_32_bits(gpu_addr); 996 ib.ptr[2] = upper_32_bits(gpu_addr); 997 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); 998 ib.ptr[4] = 0xDEADBEEF; 999 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1000 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1001 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1002 ib.length_dw = 8; 1003 1004 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 1005 if (r) 1006 goto err1; 1007 1008 r = dma_fence_wait_timeout(f, false, timeout); 1009 if (r == 0) { 1010 DRM_ERROR("amdgpu: IB test timed out\n"); 1011 r = -ETIMEDOUT; 1012 goto err1; 1013 } else if (r < 0) { 1014 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1015 goto err1; 1016 } 1017 tmp = le32_to_cpu(adev->wb.wb[index]); 1018 if (tmp == 0xDEADBEEF) 1019 r = 0; 1020 else 1021 r = -EINVAL; 1022 1023 err1: 1024 amdgpu_ib_free(adev, &ib, NULL); 1025 dma_fence_put(f); 1026 err0: 1027 amdgpu_device_wb_free(adev, index); 1028 return r; 1029 } 1030 1031 1032 /** 1033 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART 1034 * 1035 * @ib: indirect buffer to fill with commands 1036 * @pe: addr of the page entry 1037 * @src: src addr to copy from 1038 * @count: number of page entries to update 1039 * 1040 * Update PTEs by copying them from the GART using sDMA (NAVI10). 1041 */ 1042 static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib, 1043 uint64_t pe, uint64_t src, 1044 unsigned count) 1045 { 1046 unsigned bytes = count * 8; 1047 1048 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1049 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1050 ib->ptr[ib->length_dw++] = bytes - 1; 1051 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1052 ib->ptr[ib->length_dw++] = lower_32_bits(src); 1053 ib->ptr[ib->length_dw++] = upper_32_bits(src); 1054 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1055 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1056 1057 } 1058 1059 /** 1060 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually 1061 * 1062 * @ib: indirect buffer to fill with commands 1063 * @pe: addr of the page entry 1064 * @addr: dst addr to write into pe 1065 * @count: number of page entries to update 1066 * @incr: increase next addr by incr bytes 1067 * @flags: access flags 1068 * 1069 * Update PTEs by writing them manually using sDMA (NAVI10). 1070 */ 1071 static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 1072 uint64_t value, unsigned count, 1073 uint32_t incr) 1074 { 1075 unsigned ndw = count * 2; 1076 1077 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 1078 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 1079 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1080 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1081 ib->ptr[ib->length_dw++] = ndw - 1; 1082 for (; ndw > 0; ndw -= 2) { 1083 ib->ptr[ib->length_dw++] = lower_32_bits(value); 1084 ib->ptr[ib->length_dw++] = upper_32_bits(value); 1085 value += incr; 1086 } 1087 } 1088 1089 /** 1090 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA 1091 * 1092 * @ib: indirect buffer to fill with commands 1093 * @pe: addr of the page entry 1094 * @addr: dst addr to write into pe 1095 * @count: number of page entries to update 1096 * @incr: increase next addr by incr bytes 1097 * @flags: access flags 1098 * 1099 * Update the page tables using sDMA (NAVI10). 1100 */ 1101 static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib, 1102 uint64_t pe, 1103 uint64_t addr, unsigned count, 1104 uint32_t incr, uint64_t flags) 1105 { 1106 /* for physically contiguous pages (vram) */ 1107 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); 1108 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 1109 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1110 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 1111 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 1112 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 1113 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1114 ib->ptr[ib->length_dw++] = incr; /* increment size */ 1115 ib->ptr[ib->length_dw++] = 0; 1116 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ 1117 } 1118 1119 /** 1120 * sdma_v5_0_ring_pad_ib - pad the IB 1121 * @ib: indirect buffer to fill with padding 1122 * 1123 * Pad the IB with NOPs to a boundary multiple of 8. 1124 */ 1125 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1126 { 1127 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 1128 u32 pad_count; 1129 int i; 1130 1131 pad_count = (-ib->length_dw) & 0x7; 1132 for (i = 0; i < pad_count; i++) 1133 if (sdma && sdma->burst_nop && (i == 0)) 1134 ib->ptr[ib->length_dw++] = 1135 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | 1136 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 1137 else 1138 ib->ptr[ib->length_dw++] = 1139 SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 1140 } 1141 1142 1143 /** 1144 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline 1145 * 1146 * @ring: amdgpu_ring pointer 1147 * 1148 * Make sure all previous operations are completed (CIK). 1149 */ 1150 static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1151 { 1152 uint32_t seq = ring->fence_drv.sync_seq; 1153 uint64_t addr = ring->fence_drv.gpu_addr; 1154 1155 /* wait for idle */ 1156 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1157 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1158 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1159 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1160 amdgpu_ring_write(ring, addr & 0xfffffffc); 1161 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1162 amdgpu_ring_write(ring, seq); /* reference */ 1163 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1164 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1165 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1166 } 1167 1168 1169 /** 1170 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA 1171 * 1172 * @ring: amdgpu_ring pointer 1173 * @vm: amdgpu_vm pointer 1174 * 1175 * Update the page table base and flush the VM TLB 1176 * using sDMA (NAVI10). 1177 */ 1178 static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1179 unsigned vmid, uint64_t pd_addr) 1180 { 1181 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1182 } 1183 1184 static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring, 1185 uint32_t reg, uint32_t val) 1186 { 1187 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1188 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1189 amdgpu_ring_write(ring, reg); 1190 amdgpu_ring_write(ring, val); 1191 } 1192 1193 static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1194 uint32_t val, uint32_t mask) 1195 { 1196 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1197 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1198 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ 1199 amdgpu_ring_write(ring, reg << 2); 1200 amdgpu_ring_write(ring, 0); 1201 amdgpu_ring_write(ring, val); /* reference */ 1202 amdgpu_ring_write(ring, mask); /* mask */ 1203 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1204 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1205 } 1206 1207 static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 1208 uint32_t reg0, uint32_t reg1, 1209 uint32_t ref, uint32_t mask) 1210 { 1211 amdgpu_ring_emit_wreg(ring, reg0, ref); 1212 /* wait for a cycle to reset vm_inv_eng*_ack */ 1213 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); 1214 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); 1215 } 1216 1217 static int sdma_v5_0_early_init(void *handle) 1218 { 1219 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1220 1221 adev->sdma.num_instances = 2; 1222 1223 sdma_v5_0_set_ring_funcs(adev); 1224 sdma_v5_0_set_buffer_funcs(adev); 1225 sdma_v5_0_set_vm_pte_funcs(adev); 1226 sdma_v5_0_set_irq_funcs(adev); 1227 1228 return 0; 1229 } 1230 1231 1232 static int sdma_v5_0_sw_init(void *handle) 1233 { 1234 struct amdgpu_ring *ring; 1235 int r, i; 1236 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1237 1238 /* SDMA trap event */ 1239 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 1240 SDMA0_5_0__SRCID__SDMA_TRAP, 1241 &adev->sdma.trap_irq); 1242 if (r) 1243 return r; 1244 1245 /* SDMA trap event */ 1246 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 1247 SDMA1_5_0__SRCID__SDMA_TRAP, 1248 &adev->sdma.trap_irq); 1249 if (r) 1250 return r; 1251 1252 r = sdma_v5_0_init_microcode(adev); 1253 if (r) { 1254 DRM_ERROR("Failed to load sdma firmware!\n"); 1255 return r; 1256 } 1257 1258 for (i = 0; i < adev->sdma.num_instances; i++) { 1259 ring = &adev->sdma.instance[i].ring; 1260 ring->ring_obj = NULL; 1261 ring->use_doorbell = true; 1262 1263 DRM_INFO("use_doorbell being set to: [%s]\n", 1264 ring->use_doorbell?"true":"false"); 1265 1266 ring->doorbell_index = (i == 0) ? 1267 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset 1268 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset 1269 1270 sprintf(ring->name, "sdma%d", i); 1271 r = amdgpu_ring_init(adev, ring, 1024, 1272 &adev->sdma.trap_irq, 1273 (i == 0) ? 1274 AMDGPU_SDMA_IRQ_INSTANCE0 : 1275 AMDGPU_SDMA_IRQ_INSTANCE1, 1276 AMDGPU_RING_PRIO_DEFAULT); 1277 if (r) 1278 return r; 1279 } 1280 1281 return r; 1282 } 1283 1284 static int sdma_v5_0_sw_fini(void *handle) 1285 { 1286 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1287 int i; 1288 1289 for (i = 0; i < adev->sdma.num_instances; i++) 1290 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1291 1292 return 0; 1293 } 1294 1295 static int sdma_v5_0_hw_init(void *handle) 1296 { 1297 int r; 1298 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1299 1300 sdma_v5_0_init_golden_registers(adev); 1301 1302 r = sdma_v5_0_start(adev); 1303 1304 return r; 1305 } 1306 1307 static int sdma_v5_0_hw_fini(void *handle) 1308 { 1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1310 1311 if (amdgpu_sriov_vf(adev)) 1312 return 0; 1313 1314 sdma_v5_0_ctx_switch_enable(adev, false); 1315 sdma_v5_0_enable(adev, false); 1316 1317 return 0; 1318 } 1319 1320 static int sdma_v5_0_suspend(void *handle) 1321 { 1322 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1323 1324 return sdma_v5_0_hw_fini(adev); 1325 } 1326 1327 static int sdma_v5_0_resume(void *handle) 1328 { 1329 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1330 1331 return sdma_v5_0_hw_init(adev); 1332 } 1333 1334 static bool sdma_v5_0_is_idle(void *handle) 1335 { 1336 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1337 u32 i; 1338 1339 for (i = 0; i < adev->sdma.num_instances; i++) { 1340 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG)); 1341 1342 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1343 return false; 1344 } 1345 1346 return true; 1347 } 1348 1349 static int sdma_v5_0_wait_for_idle(void *handle) 1350 { 1351 unsigned i; 1352 u32 sdma0, sdma1; 1353 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1354 1355 for (i = 0; i < adev->usec_timeout; i++) { 1356 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); 1357 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG)); 1358 1359 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) 1360 return 0; 1361 udelay(1); 1362 } 1363 return -ETIMEDOUT; 1364 } 1365 1366 static int sdma_v5_0_soft_reset(void *handle) 1367 { 1368 /* todo */ 1369 1370 return 0; 1371 } 1372 1373 static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) 1374 { 1375 int i, r = 0; 1376 struct amdgpu_device *adev = ring->adev; 1377 u32 index = 0; 1378 u64 sdma_gfx_preempt; 1379 1380 amdgpu_sdma_get_index_from_ring(ring, &index); 1381 if (index == 0) 1382 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT; 1383 else 1384 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT; 1385 1386 /* assert preemption condition */ 1387 amdgpu_ring_set_preempt_cond_exec(ring, false); 1388 1389 /* emit the trailing fence */ 1390 ring->trail_seq += 1; 1391 amdgpu_ring_alloc(ring, 10); 1392 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, 1393 ring->trail_seq, 0); 1394 amdgpu_ring_commit(ring); 1395 1396 /* assert IB preemption */ 1397 WREG32(sdma_gfx_preempt, 1); 1398 1399 /* poll the trailing fence */ 1400 for (i = 0; i < adev->usec_timeout; i++) { 1401 if (ring->trail_seq == 1402 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 1403 break; 1404 udelay(1); 1405 } 1406 1407 if (i >= adev->usec_timeout) { 1408 r = -EINVAL; 1409 DRM_ERROR("ring %d failed to be preempted\n", ring->idx); 1410 } 1411 1412 /* deassert IB preemption */ 1413 WREG32(sdma_gfx_preempt, 0); 1414 1415 /* deassert the preemption condition */ 1416 amdgpu_ring_set_preempt_cond_exec(ring, true); 1417 return r; 1418 } 1419 1420 static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, 1421 struct amdgpu_irq_src *source, 1422 unsigned type, 1423 enum amdgpu_interrupt_state state) 1424 { 1425 u32 sdma_cntl; 1426 1427 if (!amdgpu_sriov_vf(adev)) { 1428 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? 1429 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : 1430 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); 1431 1432 sdma_cntl = RREG32(reg_offset); 1433 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1434 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 1435 WREG32(reg_offset, sdma_cntl); 1436 } 1437 1438 return 0; 1439 } 1440 1441 static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev, 1442 struct amdgpu_irq_src *source, 1443 struct amdgpu_iv_entry *entry) 1444 { 1445 DRM_DEBUG("IH: SDMA trap\n"); 1446 switch (entry->client_id) { 1447 case SOC15_IH_CLIENTID_SDMA0: 1448 switch (entry->ring_id) { 1449 case 0: 1450 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1451 break; 1452 case 1: 1453 /* XXX compute */ 1454 break; 1455 case 2: 1456 /* XXX compute */ 1457 break; 1458 case 3: 1459 /* XXX page queue*/ 1460 break; 1461 } 1462 break; 1463 case SOC15_IH_CLIENTID_SDMA1: 1464 switch (entry->ring_id) { 1465 case 0: 1466 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1467 break; 1468 case 1: 1469 /* XXX compute */ 1470 break; 1471 case 2: 1472 /* XXX compute */ 1473 break; 1474 case 3: 1475 /* XXX page queue*/ 1476 break; 1477 } 1478 break; 1479 } 1480 return 0; 1481 } 1482 1483 static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev, 1484 struct amdgpu_irq_src *source, 1485 struct amdgpu_iv_entry *entry) 1486 { 1487 return 0; 1488 } 1489 1490 static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 1491 bool enable) 1492 { 1493 uint32_t data, def; 1494 int i; 1495 1496 for (i = 0; i < adev->sdma.num_instances; i++) { 1497 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 1498 /* Enable sdma clock gating */ 1499 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); 1500 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1501 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1502 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1503 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1504 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1505 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1506 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1507 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); 1508 if (def != data) 1509 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); 1510 } else { 1511 /* Disable sdma clock gating */ 1512 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); 1513 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1514 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1515 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1516 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1517 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1518 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1519 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1520 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); 1521 if (def != data) 1522 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data); 1523 } 1524 } 1525 } 1526 1527 static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, 1528 bool enable) 1529 { 1530 uint32_t data, def; 1531 int i; 1532 1533 for (i = 0; i < adev->sdma.num_instances; i++) { 1534 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { 1535 /* Enable sdma mem light sleep */ 1536 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); 1537 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1538 if (def != data) 1539 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); 1540 1541 } else { 1542 /* Disable sdma mem light sleep */ 1543 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); 1544 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1545 if (def != data) 1546 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data); 1547 1548 } 1549 } 1550 } 1551 1552 static int sdma_v5_0_set_clockgating_state(void *handle, 1553 enum amd_clockgating_state state) 1554 { 1555 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1556 1557 if (amdgpu_sriov_vf(adev)) 1558 return 0; 1559 1560 switch (adev->asic_type) { 1561 case CHIP_NAVI10: 1562 case CHIP_NAVI14: 1563 case CHIP_NAVI12: 1564 sdma_v5_0_update_medium_grain_clock_gating(adev, 1565 state == AMD_CG_STATE_GATE); 1566 sdma_v5_0_update_medium_grain_light_sleep(adev, 1567 state == AMD_CG_STATE_GATE); 1568 break; 1569 default: 1570 break; 1571 } 1572 1573 return 0; 1574 } 1575 1576 static int sdma_v5_0_set_powergating_state(void *handle, 1577 enum amd_powergating_state state) 1578 { 1579 return 0; 1580 } 1581 1582 static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags) 1583 { 1584 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1585 int data; 1586 1587 if (amdgpu_sriov_vf(adev)) 1588 *flags = 0; 1589 1590 /* AMD_CG_SUPPORT_SDMA_MGCG */ 1591 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL)); 1592 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK)) 1593 *flags |= AMD_CG_SUPPORT_SDMA_MGCG; 1594 1595 /* AMD_CG_SUPPORT_SDMA_LS */ 1596 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL)); 1597 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) 1598 *flags |= AMD_CG_SUPPORT_SDMA_LS; 1599 } 1600 1601 const struct amd_ip_funcs sdma_v5_0_ip_funcs = { 1602 .name = "sdma_v5_0", 1603 .early_init = sdma_v5_0_early_init, 1604 .late_init = NULL, 1605 .sw_init = sdma_v5_0_sw_init, 1606 .sw_fini = sdma_v5_0_sw_fini, 1607 .hw_init = sdma_v5_0_hw_init, 1608 .hw_fini = sdma_v5_0_hw_fini, 1609 .suspend = sdma_v5_0_suspend, 1610 .resume = sdma_v5_0_resume, 1611 .is_idle = sdma_v5_0_is_idle, 1612 .wait_for_idle = sdma_v5_0_wait_for_idle, 1613 .soft_reset = sdma_v5_0_soft_reset, 1614 .set_clockgating_state = sdma_v5_0_set_clockgating_state, 1615 .set_powergating_state = sdma_v5_0_set_powergating_state, 1616 .get_clockgating_state = sdma_v5_0_get_clockgating_state, 1617 }; 1618 1619 static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { 1620 .type = AMDGPU_RING_TYPE_SDMA, 1621 .align_mask = 0xf, 1622 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 1623 .support_64bit_ptrs = true, 1624 .vmhub = AMDGPU_GFXHUB_0, 1625 .get_rptr = sdma_v5_0_ring_get_rptr, 1626 .get_wptr = sdma_v5_0_ring_get_wptr, 1627 .set_wptr = sdma_v5_0_ring_set_wptr, 1628 .emit_frame_size = 1629 5 + /* sdma_v5_0_ring_init_cond_exec */ 1630 6 + /* sdma_v5_0_ring_emit_hdp_flush */ 1631 3 + /* hdp_invalidate */ 1632 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ 1633 /* sdma_v5_0_ring_emit_vm_flush */ 1634 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1635 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 1636 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 1637 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ 1638 .emit_ib = sdma_v5_0_ring_emit_ib, 1639 .emit_fence = sdma_v5_0_ring_emit_fence, 1640 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, 1641 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush, 1642 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush, 1643 .test_ring = sdma_v5_0_ring_test_ring, 1644 .test_ib = sdma_v5_0_ring_test_ib, 1645 .insert_nop = sdma_v5_0_ring_insert_nop, 1646 .pad_ib = sdma_v5_0_ring_pad_ib, 1647 .emit_wreg = sdma_v5_0_ring_emit_wreg, 1648 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, 1649 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait, 1650 .init_cond_exec = sdma_v5_0_ring_init_cond_exec, 1651 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, 1652 .preempt_ib = sdma_v5_0_ring_preempt_ib, 1653 }; 1654 1655 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) 1656 { 1657 int i; 1658 1659 for (i = 0; i < adev->sdma.num_instances; i++) { 1660 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs; 1661 adev->sdma.instance[i].ring.me = i; 1662 } 1663 } 1664 1665 static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = { 1666 .set = sdma_v5_0_set_trap_irq_state, 1667 .process = sdma_v5_0_process_trap_irq, 1668 }; 1669 1670 static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = { 1671 .process = sdma_v5_0_process_illegal_inst_irq, 1672 }; 1673 1674 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev) 1675 { 1676 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + 1677 adev->sdma.num_instances; 1678 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs; 1679 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs; 1680 } 1681 1682 /** 1683 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine 1684 * 1685 * @ring: amdgpu_ring structure holding ring information 1686 * @src_offset: src GPU address 1687 * @dst_offset: dst GPU address 1688 * @byte_count: number of bytes to xfer 1689 * 1690 * Copy GPU buffers using the DMA engine (NAVI10). 1691 * Used by the amdgpu ttm implementation to move pages if 1692 * registered as the asic copy callback. 1693 */ 1694 static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib, 1695 uint64_t src_offset, 1696 uint64_t dst_offset, 1697 uint32_t byte_count) 1698 { 1699 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1700 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1701 ib->ptr[ib->length_dw++] = byte_count - 1; 1702 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1703 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1704 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1705 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1706 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1707 } 1708 1709 /** 1710 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine 1711 * 1712 * @ring: amdgpu_ring structure holding ring information 1713 * @src_data: value to write to buffer 1714 * @dst_offset: dst GPU address 1715 * @byte_count: number of bytes to xfer 1716 * 1717 * Fill GPU buffers using the DMA engine (NAVI10). 1718 */ 1719 static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib, 1720 uint32_t src_data, 1721 uint64_t dst_offset, 1722 uint32_t byte_count) 1723 { 1724 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); 1725 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1726 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1727 ib->ptr[ib->length_dw++] = src_data; 1728 ib->ptr[ib->length_dw++] = byte_count - 1; 1729 } 1730 1731 static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = { 1732 .copy_max_bytes = 0x400000, 1733 .copy_num_dw = 7, 1734 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer, 1735 1736 .fill_max_bytes = 0x400000, 1737 .fill_num_dw = 5, 1738 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer, 1739 }; 1740 1741 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev) 1742 { 1743 if (adev->mman.buffer_funcs == NULL) { 1744 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs; 1745 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1746 } 1747 } 1748 1749 static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = { 1750 .copy_pte_num_dw = 7, 1751 .copy_pte = sdma_v5_0_vm_copy_pte, 1752 .write_pte = sdma_v5_0_vm_write_pte, 1753 .set_pte_pde = sdma_v5_0_vm_set_pte_pde, 1754 }; 1755 1756 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1757 { 1758 unsigned i; 1759 1760 if (adev->vm_manager.vm_pte_funcs == NULL) { 1761 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; 1762 for (i = 0; i < adev->sdma.num_instances; i++) { 1763 adev->vm_manager.vm_pte_scheds[i] = 1764 &adev->sdma.instance[i].ring.sched; 1765 } 1766 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1767 } 1768 } 1769 1770 const struct amdgpu_ip_block_version sdma_v5_0_ip_block = { 1771 .type = AMD_IP_BLOCK_TYPE_SDMA, 1772 .major = 5, 1773 .minor = 0, 1774 .rev = 0, 1775 .funcs = &sdma_v5_0_ip_funcs, 1776 }; 1777