1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_trace.h" 29 #include "vi.h" 30 #include "vid.h" 31 32 #include "oss/oss_2_4_d.h" 33 #include "oss/oss_2_4_sh_mask.h" 34 35 #include "gmc/gmc_8_1_d.h" 36 #include "gmc/gmc_8_1_sh_mask.h" 37 38 #include "gca/gfx_8_0_d.h" 39 #include "gca/gfx_8_0_enum.h" 40 #include "gca/gfx_8_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "iceland_sdma_pkt_open.h" 46 47 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); 48 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); 49 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); 50 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); 51 52 MODULE_FIRMWARE("amdgpu/topaz_sdma.bin"); 53 MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin"); 54 55 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 56 { 57 SDMA0_REGISTER_OFFSET, 58 SDMA1_REGISTER_OFFSET 59 }; 60 61 static const u32 golden_settings_iceland_a11[] = 62 { 63 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 64 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 65 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 66 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 67 }; 68 69 static const u32 iceland_mgcg_cgcg_init[] = 70 { 71 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, 72 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 73 }; 74 75 /* 76 * sDMA - System DMA 77 * Starting with CIK, the GPU has new asynchronous 78 * DMA engines. These engines are used for compute 79 * and gfx. There are two DMA engines (SDMA0, SDMA1) 80 * and each one supports 1 ring buffer used for gfx 81 * and 2 queues used for compute. 82 * 83 * The programming model is very similar to the CP 84 * (ring buffer, IBs, etc.), but sDMA has it's own 85 * packet format that is different from the PM4 format 86 * used by the CP. sDMA supports copying data, writing 87 * embedded data, solid fills, and a number of other 88 * things. It also has support for tiling/detiling of 89 * buffers. 90 */ 91 92 static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) 93 { 94 switch (adev->asic_type) { 95 case CHIP_TOPAZ: 96 amdgpu_program_register_sequence(adev, 97 iceland_mgcg_cgcg_init, 98 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 99 amdgpu_program_register_sequence(adev, 100 golden_settings_iceland_a11, 101 (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); 102 break; 103 default: 104 break; 105 } 106 } 107 108 /** 109 * sdma_v2_4_init_microcode - load ucode images from disk 110 * 111 * @adev: amdgpu_device pointer 112 * 113 * Use the firmware interface to load the ucode images into 114 * the driver (not loaded into hw). 115 * Returns 0 on success, error on failure. 116 */ 117 static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) 118 { 119 const char *chip_name; 120 char fw_name[30]; 121 int err, i; 122 struct amdgpu_firmware_info *info = NULL; 123 const struct common_firmware_header *header = NULL; 124 const struct sdma_firmware_header_v1_0 *hdr; 125 126 DRM_DEBUG("\n"); 127 128 switch (adev->asic_type) { 129 case CHIP_TOPAZ: 130 chip_name = "topaz"; 131 break; 132 default: BUG(); 133 } 134 135 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 136 if (i == 0) 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 138 else 139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 140 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 141 if (err) 142 goto out; 143 err = amdgpu_ucode_validate(adev->sdma[i].fw); 144 if (err) 145 goto out; 146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 149 150 if (adev->firmware.smu_load) { 151 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 152 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 153 info->fw = adev->sdma[i].fw; 154 header = (const struct common_firmware_header *)info->fw->data; 155 adev->firmware.fw_size += 156 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 157 } 158 } 159 160 out: 161 if (err) { 162 printk(KERN_ERR 163 "sdma_v2_4: Failed to load firmware \"%s\"\n", 164 fw_name); 165 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 166 release_firmware(adev->sdma[i].fw); 167 adev->sdma[i].fw = NULL; 168 } 169 } 170 return err; 171 } 172 173 /** 174 * sdma_v2_4_ring_get_rptr - get the current read pointer 175 * 176 * @ring: amdgpu ring pointer 177 * 178 * Get the current rptr from the hardware (VI+). 179 */ 180 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) 181 { 182 u32 rptr; 183 184 /* XXX check if swapping is necessary on BE */ 185 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; 186 187 return rptr; 188 } 189 190 /** 191 * sdma_v2_4_ring_get_wptr - get the current write pointer 192 * 193 * @ring: amdgpu ring pointer 194 * 195 * Get the current wptr from the hardware (VI+). 196 */ 197 static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) 198 { 199 struct amdgpu_device *adev = ring->adev; 200 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 201 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; 202 203 return wptr; 204 } 205 206 /** 207 * sdma_v2_4_ring_set_wptr - commit the write pointer 208 * 209 * @ring: amdgpu ring pointer 210 * 211 * Write the wptr back to the hardware (VI+). 212 */ 213 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) 214 { 215 struct amdgpu_device *adev = ring->adev; 216 int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; 217 218 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 219 } 220 221 /** 222 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine 223 * 224 * @ring: amdgpu ring pointer 225 * @ib: IB object to schedule 226 * 227 * Schedule an IB in the DMA ring (VI). 228 */ 229 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, 230 struct amdgpu_ib *ib) 231 { 232 u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 233 u32 next_rptr = ring->wptr + 5; 234 235 while ((next_rptr & 7) != 2) 236 next_rptr++; 237 238 next_rptr += 6; 239 240 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 241 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 242 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); 243 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); 244 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 245 amdgpu_ring_write(ring, next_rptr); 246 247 /* IB packet must end on a 8 DW boundary */ 248 while ((ring->wptr & 7) != 2) 249 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); 250 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 251 SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); 252 /* base must be 32 byte aligned */ 253 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 254 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 255 amdgpu_ring_write(ring, ib->length_dw); 256 amdgpu_ring_write(ring, 0); 257 amdgpu_ring_write(ring, 0); 258 259 } 260 261 /** 262 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring 263 * 264 * @ring: amdgpu ring pointer 265 * 266 * Emit an hdp flush packet on the requested DMA ring. 267 */ 268 static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) 269 { 270 u32 ref_and_mask = 0; 271 272 if (ring == &ring->adev->sdma[0].ring) 273 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 274 else 275 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 276 277 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 278 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 279 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 280 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 281 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 282 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 283 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 284 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 285 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 286 } 287 288 /** 289 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring 290 * 291 * @ring: amdgpu ring pointer 292 * @fence: amdgpu fence object 293 * 294 * Add a DMA fence packet to the ring to write 295 * the fence seq number and DMA trap packet to generate 296 * an interrupt if needed (VI). 297 */ 298 static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 299 unsigned flags) 300 { 301 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 302 /* write the fence */ 303 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 304 amdgpu_ring_write(ring, lower_32_bits(addr)); 305 amdgpu_ring_write(ring, upper_32_bits(addr)); 306 amdgpu_ring_write(ring, lower_32_bits(seq)); 307 308 /* optionally write high bits as well */ 309 if (write64bit) { 310 addr += 4; 311 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 312 amdgpu_ring_write(ring, lower_32_bits(addr)); 313 amdgpu_ring_write(ring, upper_32_bits(addr)); 314 amdgpu_ring_write(ring, upper_32_bits(seq)); 315 } 316 317 /* generate an interrupt */ 318 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); 319 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 320 } 321 322 /** 323 * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring 324 * 325 * @ring: amdgpu_ring structure holding ring information 326 * @semaphore: amdgpu semaphore object 327 * @emit_wait: wait or signal semaphore 328 * 329 * Add a DMA semaphore packet to the ring wait on or signal 330 * other rings (VI). 331 */ 332 static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, 333 struct amdgpu_semaphore *semaphore, 334 bool emit_wait) 335 { 336 u64 addr = semaphore->gpu_addr; 337 u32 sig = emit_wait ? 0 : 1; 338 339 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | 340 SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); 341 amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); 342 amdgpu_ring_write(ring, upper_32_bits(addr)); 343 344 return true; 345 } 346 347 /** 348 * sdma_v2_4_gfx_stop - stop the gfx async dma engines 349 * 350 * @adev: amdgpu_device pointer 351 * 352 * Stop the gfx async dma ring buffers (VI). 353 */ 354 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) 355 { 356 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 357 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 358 u32 rb_cntl, ib_cntl; 359 int i; 360 361 if ((adev->mman.buffer_funcs_ring == sdma0) || 362 (adev->mman.buffer_funcs_ring == sdma1)) 363 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 364 365 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 366 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 367 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 368 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 369 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 370 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 371 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 372 } 373 sdma0->ready = false; 374 sdma1->ready = false; 375 } 376 377 /** 378 * sdma_v2_4_rlc_stop - stop the compute async dma engines 379 * 380 * @adev: amdgpu_device pointer 381 * 382 * Stop the compute async dma queues (VI). 383 */ 384 static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) 385 { 386 /* XXX todo */ 387 } 388 389 /** 390 * sdma_v2_4_enable - stop the async dma engines 391 * 392 * @adev: amdgpu_device pointer 393 * @enable: enable/disable the DMA MEs. 394 * 395 * Halt or unhalt the async dma engines (VI). 396 */ 397 static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) 398 { 399 u32 f32_cntl; 400 int i; 401 402 if (enable == false) { 403 sdma_v2_4_gfx_stop(adev); 404 sdma_v2_4_rlc_stop(adev); 405 } 406 407 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 408 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 409 if (enable) 410 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 411 else 412 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); 413 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); 414 } 415 } 416 417 /** 418 * sdma_v2_4_gfx_resume - setup and start the async dma engines 419 * 420 * @adev: amdgpu_device pointer 421 * 422 * Set up the gfx DMA ring buffers and enable them (VI). 423 * Returns 0 for success, error for failure. 424 */ 425 static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) 426 { 427 struct amdgpu_ring *ring; 428 u32 rb_cntl, ib_cntl; 429 u32 rb_bufsz; 430 u32 wb_offset; 431 int i, j, r; 432 433 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 434 ring = &adev->sdma[i].ring; 435 wb_offset = (ring->rptr_offs * 4); 436 437 mutex_lock(&adev->srbm_mutex); 438 for (j = 0; j < 16; j++) { 439 vi_srbm_select(adev, 0, 0, 0, j); 440 /* SDMA GFX */ 441 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 442 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 443 } 444 vi_srbm_select(adev, 0, 0, 0, 0); 445 mutex_unlock(&adev->srbm_mutex); 446 447 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 448 449 /* Set ring buffer size in dwords */ 450 rb_bufsz = order_base_2(ring->ring_size / 4); 451 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 452 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); 453 #ifdef __BIG_ENDIAN 454 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); 455 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 456 RPTR_WRITEBACK_SWAP_ENABLE, 1); 457 #endif 458 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 459 460 /* Initialize the ring buffer's read and write pointers */ 461 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 462 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 463 464 /* set the wb address whether it's enabled or not */ 465 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 466 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 467 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 468 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); 469 470 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 471 472 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 473 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 474 475 ring->wptr = 0; 476 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); 477 478 /* enable DMA RB */ 479 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); 480 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 481 482 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 483 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); 484 #ifdef __BIG_ENDIAN 485 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); 486 #endif 487 /* enable DMA IBs */ 488 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 489 490 ring->ready = true; 491 492 r = amdgpu_ring_test_ring(ring); 493 if (r) { 494 ring->ready = false; 495 return r; 496 } 497 498 if (adev->mman.buffer_funcs_ring == ring) 499 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 500 } 501 502 return 0; 503 } 504 505 /** 506 * sdma_v2_4_rlc_resume - setup and start the async dma engines 507 * 508 * @adev: amdgpu_device pointer 509 * 510 * Set up the compute DMA queues and enable them (VI). 511 * Returns 0 for success, error for failure. 512 */ 513 static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) 514 { 515 /* XXX todo */ 516 return 0; 517 } 518 519 /** 520 * sdma_v2_4_load_microcode - load the sDMA ME ucode 521 * 522 * @adev: amdgpu_device pointer 523 * 524 * Loads the sDMA0/1 ucode. 525 * Returns 0 for success, -EINVAL if the ucode is not available. 526 */ 527 static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) 528 { 529 const struct sdma_firmware_header_v1_0 *hdr; 530 const __le32 *fw_data; 531 u32 fw_size; 532 int i, j; 533 bool smc_loads_fw = false; /* XXX fix me */ 534 535 if (!adev->sdma[0].fw || !adev->sdma[1].fw) 536 return -EINVAL; 537 538 /* halt the MEs */ 539 sdma_v2_4_enable(adev, false); 540 541 if (smc_loads_fw) { 542 /* XXX query SMC for fw load complete */ 543 } else { 544 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 545 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 546 amdgpu_ucode_print_sdma_hdr(&hdr->header); 547 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 548 fw_data = (const __le32 *) 549 (adev->sdma[i].fw->data + 550 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 551 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 552 for (j = 0; j < fw_size; j++) 553 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 554 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 555 } 556 } 557 558 return 0; 559 } 560 561 /** 562 * sdma_v2_4_start - setup and start the async dma engines 563 * 564 * @adev: amdgpu_device pointer 565 * 566 * Set up the DMA engines and enable them (VI). 567 * Returns 0 for success, error for failure. 568 */ 569 static int sdma_v2_4_start(struct amdgpu_device *adev) 570 { 571 int r; 572 573 if (!adev->firmware.smu_load) { 574 r = sdma_v2_4_load_microcode(adev); 575 if (r) 576 return r; 577 } else { 578 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 579 AMDGPU_UCODE_ID_SDMA0); 580 if (r) 581 return -EINVAL; 582 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 583 AMDGPU_UCODE_ID_SDMA1); 584 if (r) 585 return -EINVAL; 586 } 587 588 /* unhalt the MEs */ 589 sdma_v2_4_enable(adev, true); 590 591 /* start the gfx rings and rlc compute queues */ 592 r = sdma_v2_4_gfx_resume(adev); 593 if (r) 594 return r; 595 r = sdma_v2_4_rlc_resume(adev); 596 if (r) 597 return r; 598 599 return 0; 600 } 601 602 /** 603 * sdma_v2_4_ring_test_ring - simple async dma engine test 604 * 605 * @ring: amdgpu_ring structure holding ring information 606 * 607 * Test the DMA engine by writing using it to write an 608 * value to memory. (VI). 609 * Returns 0 for success, error for failure. 610 */ 611 static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) 612 { 613 struct amdgpu_device *adev = ring->adev; 614 unsigned i; 615 unsigned index; 616 int r; 617 u32 tmp; 618 u64 gpu_addr; 619 620 r = amdgpu_wb_get(adev, &index); 621 if (r) { 622 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 623 return r; 624 } 625 626 gpu_addr = adev->wb.gpu_addr + (index * 4); 627 tmp = 0xCAFEDEAD; 628 adev->wb.wb[index] = cpu_to_le32(tmp); 629 630 r = amdgpu_ring_lock(ring, 5); 631 if (r) { 632 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 633 amdgpu_wb_free(adev, index); 634 return r; 635 } 636 637 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 638 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 639 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 640 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 641 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 642 amdgpu_ring_write(ring, 0xDEADBEEF); 643 amdgpu_ring_unlock_commit(ring); 644 645 for (i = 0; i < adev->usec_timeout; i++) { 646 tmp = le32_to_cpu(adev->wb.wb[index]); 647 if (tmp == 0xDEADBEEF) 648 break; 649 DRM_UDELAY(1); 650 } 651 652 if (i < adev->usec_timeout) { 653 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 654 } else { 655 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 656 ring->idx, tmp); 657 r = -EINVAL; 658 } 659 amdgpu_wb_free(adev, index); 660 661 return r; 662 } 663 664 /** 665 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine 666 * 667 * @ring: amdgpu_ring structure holding ring information 668 * 669 * Test a simple IB in the DMA ring (VI). 670 * Returns 0 on success, error on failure. 671 */ 672 static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) 673 { 674 struct amdgpu_device *adev = ring->adev; 675 struct amdgpu_ib ib; 676 struct fence *f = NULL; 677 unsigned i; 678 unsigned index; 679 int r; 680 u32 tmp = 0; 681 u64 gpu_addr; 682 683 r = amdgpu_wb_get(adev, &index); 684 if (r) { 685 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 686 return r; 687 } 688 689 gpu_addr = adev->wb.gpu_addr + (index * 4); 690 tmp = 0xCAFEDEAD; 691 adev->wb.wb[index] = cpu_to_le32(tmp); 692 memset(&ib, 0, sizeof(ib)); 693 r = amdgpu_ib_get(ring, NULL, 256, &ib); 694 if (r) { 695 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 696 goto err0; 697 } 698 699 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 700 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 701 ib.ptr[1] = lower_32_bits(gpu_addr); 702 ib.ptr[2] = upper_32_bits(gpu_addr); 703 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); 704 ib.ptr[4] = 0xDEADBEEF; 705 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 706 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 707 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 708 ib.length_dw = 8; 709 710 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL, 711 AMDGPU_FENCE_OWNER_UNDEFINED, 712 &f); 713 if (r) 714 goto err1; 715 716 r = fence_wait(f, false); 717 if (r) { 718 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 719 goto err1; 720 } 721 for (i = 0; i < adev->usec_timeout; i++) { 722 tmp = le32_to_cpu(adev->wb.wb[index]); 723 if (tmp == 0xDEADBEEF) 724 break; 725 DRM_UDELAY(1); 726 } 727 if (i < adev->usec_timeout) { 728 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 729 ring->idx, i); 730 goto err1; 731 } else { 732 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 733 r = -EINVAL; 734 } 735 736 err1: 737 fence_put(f); 738 amdgpu_ib_free(adev, &ib); 739 err0: 740 amdgpu_wb_free(adev, index); 741 return r; 742 } 743 744 /** 745 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART 746 * 747 * @ib: indirect buffer to fill with commands 748 * @pe: addr of the page entry 749 * @src: src addr to copy from 750 * @count: number of page entries to update 751 * 752 * Update PTEs by copying them from the GART using sDMA (CIK). 753 */ 754 static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, 755 uint64_t pe, uint64_t src, 756 unsigned count) 757 { 758 while (count) { 759 unsigned bytes = count * 8; 760 if (bytes > 0x1FFFF8) 761 bytes = 0x1FFFF8; 762 763 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 764 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 765 ib->ptr[ib->length_dw++] = bytes; 766 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 767 ib->ptr[ib->length_dw++] = lower_32_bits(src); 768 ib->ptr[ib->length_dw++] = upper_32_bits(src); 769 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 770 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 771 772 pe += bytes; 773 src += bytes; 774 count -= bytes / 8; 775 } 776 } 777 778 /** 779 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually 780 * 781 * @ib: indirect buffer to fill with commands 782 * @pe: addr of the page entry 783 * @addr: dst addr to write into pe 784 * @count: number of page entries to update 785 * @incr: increase next addr by incr bytes 786 * @flags: access flags 787 * 788 * Update PTEs by writing them manually using sDMA (CIK). 789 */ 790 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, 791 uint64_t pe, 792 uint64_t addr, unsigned count, 793 uint32_t incr, uint32_t flags) 794 { 795 uint64_t value; 796 unsigned ndw; 797 798 while (count) { 799 ndw = count * 2; 800 if (ndw > 0xFFFFE) 801 ndw = 0xFFFFE; 802 803 /* for non-physically contiguous pages (system) */ 804 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 805 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 806 ib->ptr[ib->length_dw++] = pe; 807 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 808 ib->ptr[ib->length_dw++] = ndw; 809 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 810 if (flags & AMDGPU_PTE_SYSTEM) { 811 value = amdgpu_vm_map_gart(ib->ring->adev, addr); 812 value &= 0xFFFFFFFFFFFFF000ULL; 813 } else if (flags & AMDGPU_PTE_VALID) { 814 value = addr; 815 } else { 816 value = 0; 817 } 818 addr += incr; 819 value |= flags; 820 ib->ptr[ib->length_dw++] = value; 821 ib->ptr[ib->length_dw++] = upper_32_bits(value); 822 } 823 } 824 } 825 826 /** 827 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA 828 * 829 * @ib: indirect buffer to fill with commands 830 * @pe: addr of the page entry 831 * @addr: dst addr to write into pe 832 * @count: number of page entries to update 833 * @incr: increase next addr by incr bytes 834 * @flags: access flags 835 * 836 * Update the page tables using sDMA (CIK). 837 */ 838 static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, 839 uint64_t pe, 840 uint64_t addr, unsigned count, 841 uint32_t incr, uint32_t flags) 842 { 843 uint64_t value; 844 unsigned ndw; 845 846 while (count) { 847 ndw = count; 848 if (ndw > 0x7FFFF) 849 ndw = 0x7FFFF; 850 851 if (flags & AMDGPU_PTE_VALID) 852 value = addr; 853 else 854 value = 0; 855 856 /* for physically contiguous pages (vram) */ 857 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); 858 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 859 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 860 ib->ptr[ib->length_dw++] = flags; /* mask */ 861 ib->ptr[ib->length_dw++] = 0; 862 ib->ptr[ib->length_dw++] = value; /* value */ 863 ib->ptr[ib->length_dw++] = upper_32_bits(value); 864 ib->ptr[ib->length_dw++] = incr; /* increment size */ 865 ib->ptr[ib->length_dw++] = 0; 866 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 867 868 pe += ndw * 8; 869 addr += ndw * incr; 870 count -= ndw; 871 } 872 } 873 874 /** 875 * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw 876 * 877 * @ib: indirect buffer to fill with padding 878 * 879 */ 880 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) 881 { 882 while (ib->length_dw & 0x7) 883 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 884 } 885 886 /** 887 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA 888 * 889 * @ring: amdgpu_ring pointer 890 * @vm: amdgpu_vm pointer 891 * 892 * Update the page table base and flush the VM TLB 893 * using sDMA (VI). 894 */ 895 static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, 896 unsigned vm_id, uint64_t pd_addr) 897 { 898 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 899 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 900 if (vm_id < 8) { 901 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 902 } else { 903 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); 904 } 905 amdgpu_ring_write(ring, pd_addr >> 12); 906 907 /* flush TLB */ 908 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 909 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 910 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 911 amdgpu_ring_write(ring, 1 << vm_id); 912 913 /* wait for flush */ 914 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 915 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 916 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ 917 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 918 amdgpu_ring_write(ring, 0); 919 amdgpu_ring_write(ring, 0); /* reference */ 920 amdgpu_ring_write(ring, 0); /* mask */ 921 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 922 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 923 } 924 925 static int sdma_v2_4_early_init(void *handle) 926 { 927 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 928 929 sdma_v2_4_set_ring_funcs(adev); 930 sdma_v2_4_set_buffer_funcs(adev); 931 sdma_v2_4_set_vm_pte_funcs(adev); 932 sdma_v2_4_set_irq_funcs(adev); 933 934 return 0; 935 } 936 937 static int sdma_v2_4_sw_init(void *handle) 938 { 939 struct amdgpu_ring *ring; 940 int r; 941 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 942 943 /* SDMA trap event */ 944 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 945 if (r) 946 return r; 947 948 /* SDMA Privileged inst */ 949 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 950 if (r) 951 return r; 952 953 /* SDMA Privileged inst */ 954 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 955 if (r) 956 return r; 957 958 r = sdma_v2_4_init_microcode(adev); 959 if (r) { 960 DRM_ERROR("Failed to load sdma firmware!\n"); 961 return r; 962 } 963 964 ring = &adev->sdma[0].ring; 965 ring->ring_obj = NULL; 966 ring->use_doorbell = false; 967 968 ring = &adev->sdma[1].ring; 969 ring->ring_obj = NULL; 970 ring->use_doorbell = false; 971 972 ring = &adev->sdma[0].ring; 973 sprintf(ring->name, "sdma0"); 974 r = amdgpu_ring_init(adev, ring, 256 * 1024, 975 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 976 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 977 AMDGPU_RING_TYPE_SDMA); 978 if (r) 979 return r; 980 981 ring = &adev->sdma[1].ring; 982 sprintf(ring->name, "sdma1"); 983 r = amdgpu_ring_init(adev, ring, 256 * 1024, 984 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 985 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, 986 AMDGPU_RING_TYPE_SDMA); 987 if (r) 988 return r; 989 990 return r; 991 } 992 993 static int sdma_v2_4_sw_fini(void *handle) 994 { 995 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 996 997 amdgpu_ring_fini(&adev->sdma[0].ring); 998 amdgpu_ring_fini(&adev->sdma[1].ring); 999 1000 return 0; 1001 } 1002 1003 static int sdma_v2_4_hw_init(void *handle) 1004 { 1005 int r; 1006 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1007 1008 sdma_v2_4_init_golden_registers(adev); 1009 1010 r = sdma_v2_4_start(adev); 1011 if (r) 1012 return r; 1013 1014 return r; 1015 } 1016 1017 static int sdma_v2_4_hw_fini(void *handle) 1018 { 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1020 1021 sdma_v2_4_enable(adev, false); 1022 1023 return 0; 1024 } 1025 1026 static int sdma_v2_4_suspend(void *handle) 1027 { 1028 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1029 1030 return sdma_v2_4_hw_fini(adev); 1031 } 1032 1033 static int sdma_v2_4_resume(void *handle) 1034 { 1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1036 1037 return sdma_v2_4_hw_init(adev); 1038 } 1039 1040 static bool sdma_v2_4_is_idle(void *handle) 1041 { 1042 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1043 u32 tmp = RREG32(mmSRBM_STATUS2); 1044 1045 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1046 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1047 return false; 1048 1049 return true; 1050 } 1051 1052 static int sdma_v2_4_wait_for_idle(void *handle) 1053 { 1054 unsigned i; 1055 u32 tmp; 1056 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1057 1058 for (i = 0; i < adev->usec_timeout; i++) { 1059 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1060 SRBM_STATUS2__SDMA1_BUSY_MASK); 1061 1062 if (!tmp) 1063 return 0; 1064 udelay(1); 1065 } 1066 return -ETIMEDOUT; 1067 } 1068 1069 static void sdma_v2_4_print_status(void *handle) 1070 { 1071 int i, j; 1072 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1073 1074 dev_info(adev->dev, "VI SDMA registers\n"); 1075 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1076 RREG32(mmSRBM_STATUS2)); 1077 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1078 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1079 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1080 dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", 1081 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); 1082 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", 1083 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); 1084 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", 1085 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); 1086 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", 1087 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); 1088 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", 1089 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); 1090 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", 1091 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); 1092 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", 1093 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); 1094 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", 1095 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); 1096 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", 1097 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); 1098 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", 1099 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); 1100 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", 1101 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1102 mutex_lock(&adev->srbm_mutex); 1103 for (j = 0; j < 16; j++) { 1104 vi_srbm_select(adev, 0, 0, 0, j); 1105 dev_info(adev->dev, " VM %d:\n", j); 1106 dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", 1107 i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); 1108 dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", 1109 i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); 1110 } 1111 vi_srbm_select(adev, 0, 0, 0, 0); 1112 mutex_unlock(&adev->srbm_mutex); 1113 } 1114 } 1115 1116 static int sdma_v2_4_soft_reset(void *handle) 1117 { 1118 u32 srbm_soft_reset = 0; 1119 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1120 u32 tmp = RREG32(mmSRBM_STATUS2); 1121 1122 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1123 /* sdma0 */ 1124 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1125 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); 1126 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1127 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1128 } 1129 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1130 /* sdma1 */ 1131 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1132 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); 1133 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1134 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1135 } 1136 1137 if (srbm_soft_reset) { 1138 sdma_v2_4_print_status((void *)adev); 1139 1140 tmp = RREG32(mmSRBM_SOFT_RESET); 1141 tmp |= srbm_soft_reset; 1142 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1143 WREG32(mmSRBM_SOFT_RESET, tmp); 1144 tmp = RREG32(mmSRBM_SOFT_RESET); 1145 1146 udelay(50); 1147 1148 tmp &= ~srbm_soft_reset; 1149 WREG32(mmSRBM_SOFT_RESET, tmp); 1150 tmp = RREG32(mmSRBM_SOFT_RESET); 1151 1152 /* Wait a little for things to settle down */ 1153 udelay(50); 1154 1155 sdma_v2_4_print_status((void *)adev); 1156 } 1157 1158 return 0; 1159 } 1160 1161 static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, 1162 struct amdgpu_irq_src *src, 1163 unsigned type, 1164 enum amdgpu_interrupt_state state) 1165 { 1166 u32 sdma_cntl; 1167 1168 switch (type) { 1169 case AMDGPU_SDMA_IRQ_TRAP0: 1170 switch (state) { 1171 case AMDGPU_IRQ_STATE_DISABLE: 1172 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1173 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1174 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1175 break; 1176 case AMDGPU_IRQ_STATE_ENABLE: 1177 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1178 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1179 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1180 break; 1181 default: 1182 break; 1183 } 1184 break; 1185 case AMDGPU_SDMA_IRQ_TRAP1: 1186 switch (state) { 1187 case AMDGPU_IRQ_STATE_DISABLE: 1188 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1189 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1190 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1191 break; 1192 case AMDGPU_IRQ_STATE_ENABLE: 1193 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1194 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1195 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1196 break; 1197 default: 1198 break; 1199 } 1200 break; 1201 default: 1202 break; 1203 } 1204 return 0; 1205 } 1206 1207 static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, 1208 struct amdgpu_irq_src *source, 1209 struct amdgpu_iv_entry *entry) 1210 { 1211 u8 instance_id, queue_id; 1212 1213 instance_id = (entry->ring_id & 0x3) >> 0; 1214 queue_id = (entry->ring_id & 0xc) >> 2; 1215 DRM_DEBUG("IH: SDMA trap\n"); 1216 switch (instance_id) { 1217 case 0: 1218 switch (queue_id) { 1219 case 0: 1220 amdgpu_fence_process(&adev->sdma[0].ring); 1221 break; 1222 case 1: 1223 /* XXX compute */ 1224 break; 1225 case 2: 1226 /* XXX compute */ 1227 break; 1228 } 1229 break; 1230 case 1: 1231 switch (queue_id) { 1232 case 0: 1233 amdgpu_fence_process(&adev->sdma[1].ring); 1234 break; 1235 case 1: 1236 /* XXX compute */ 1237 break; 1238 case 2: 1239 /* XXX compute */ 1240 break; 1241 } 1242 break; 1243 } 1244 return 0; 1245 } 1246 1247 static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, 1248 struct amdgpu_irq_src *source, 1249 struct amdgpu_iv_entry *entry) 1250 { 1251 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1252 schedule_work(&adev->reset_work); 1253 return 0; 1254 } 1255 1256 static int sdma_v2_4_set_clockgating_state(void *handle, 1257 enum amd_clockgating_state state) 1258 { 1259 /* XXX handled via the smc on VI */ 1260 return 0; 1261 } 1262 1263 static int sdma_v2_4_set_powergating_state(void *handle, 1264 enum amd_powergating_state state) 1265 { 1266 return 0; 1267 } 1268 1269 const struct amd_ip_funcs sdma_v2_4_ip_funcs = { 1270 .early_init = sdma_v2_4_early_init, 1271 .late_init = NULL, 1272 .sw_init = sdma_v2_4_sw_init, 1273 .sw_fini = sdma_v2_4_sw_fini, 1274 .hw_init = sdma_v2_4_hw_init, 1275 .hw_fini = sdma_v2_4_hw_fini, 1276 .suspend = sdma_v2_4_suspend, 1277 .resume = sdma_v2_4_resume, 1278 .is_idle = sdma_v2_4_is_idle, 1279 .wait_for_idle = sdma_v2_4_wait_for_idle, 1280 .soft_reset = sdma_v2_4_soft_reset, 1281 .print_status = sdma_v2_4_print_status, 1282 .set_clockgating_state = sdma_v2_4_set_clockgating_state, 1283 .set_powergating_state = sdma_v2_4_set_powergating_state, 1284 }; 1285 1286 /** 1287 * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up 1288 * 1289 * @ring: amdgpu_ring structure holding ring information 1290 * 1291 * Check if the async DMA engine is locked up (VI). 1292 * Returns true if the engine appears to be locked up, false if not. 1293 */ 1294 static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring) 1295 { 1296 1297 if (sdma_v2_4_is_idle(ring->adev)) { 1298 amdgpu_ring_lockup_update(ring); 1299 return false; 1300 } 1301 return amdgpu_ring_test_lockup(ring); 1302 } 1303 1304 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { 1305 .get_rptr = sdma_v2_4_ring_get_rptr, 1306 .get_wptr = sdma_v2_4_ring_get_wptr, 1307 .set_wptr = sdma_v2_4_ring_set_wptr, 1308 .parse_cs = NULL, 1309 .emit_ib = sdma_v2_4_ring_emit_ib, 1310 .emit_fence = sdma_v2_4_ring_emit_fence, 1311 .emit_semaphore = sdma_v2_4_ring_emit_semaphore, 1312 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1313 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1314 .test_ring = sdma_v2_4_ring_test_ring, 1315 .test_ib = sdma_v2_4_ring_test_ib, 1316 .is_lockup = sdma_v2_4_ring_is_lockup, 1317 }; 1318 1319 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) 1320 { 1321 adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; 1322 adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; 1323 } 1324 1325 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { 1326 .set = sdma_v2_4_set_trap_irq_state, 1327 .process = sdma_v2_4_process_trap_irq, 1328 }; 1329 1330 static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { 1331 .process = sdma_v2_4_process_illegal_inst_irq, 1332 }; 1333 1334 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) 1335 { 1336 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1337 adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; 1338 adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; 1339 } 1340 1341 /** 1342 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine 1343 * 1344 * @ring: amdgpu_ring structure holding ring information 1345 * @src_offset: src GPU address 1346 * @dst_offset: dst GPU address 1347 * @byte_count: number of bytes to xfer 1348 * 1349 * Copy GPU buffers using the DMA engine (VI). 1350 * Used by the amdgpu ttm implementation to move pages if 1351 * registered as the asic copy callback. 1352 */ 1353 static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, 1354 uint64_t src_offset, 1355 uint64_t dst_offset, 1356 uint32_t byte_count) 1357 { 1358 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1359 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1360 ib->ptr[ib->length_dw++] = byte_count; 1361 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1362 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1363 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1364 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1365 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1366 } 1367 1368 /** 1369 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine 1370 * 1371 * @ring: amdgpu_ring structure holding ring information 1372 * @src_data: value to write to buffer 1373 * @dst_offset: dst GPU address 1374 * @byte_count: number of bytes to xfer 1375 * 1376 * Fill GPU buffers using the DMA engine (VI). 1377 */ 1378 static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring, 1379 uint32_t src_data, 1380 uint64_t dst_offset, 1381 uint32_t byte_count) 1382 { 1383 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); 1384 amdgpu_ring_write(ring, lower_32_bits(dst_offset)); 1385 amdgpu_ring_write(ring, upper_32_bits(dst_offset)); 1386 amdgpu_ring_write(ring, src_data); 1387 amdgpu_ring_write(ring, byte_count); 1388 } 1389 1390 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { 1391 .copy_max_bytes = 0x1fffff, 1392 .copy_num_dw = 7, 1393 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, 1394 1395 .fill_max_bytes = 0x1fffff, 1396 .fill_num_dw = 7, 1397 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, 1398 }; 1399 1400 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) 1401 { 1402 if (adev->mman.buffer_funcs == NULL) { 1403 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; 1404 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1405 } 1406 } 1407 1408 static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { 1409 .copy_pte = sdma_v2_4_vm_copy_pte, 1410 .write_pte = sdma_v2_4_vm_write_pte, 1411 .set_pte_pde = sdma_v2_4_vm_set_pte_pde, 1412 .pad_ib = sdma_v2_4_vm_pad_ib, 1413 }; 1414 1415 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) 1416 { 1417 if (adev->vm_manager.vm_pte_funcs == NULL) { 1418 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1419 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1420 adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; 1421 } 1422 } 1423