1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_trace.h" 29 #include "vi.h" 30 #include "vid.h" 31 32 #include "oss/oss_2_4_d.h" 33 #include "oss/oss_2_4_sh_mask.h" 34 35 #include "gmc/gmc_7_1_d.h" 36 #include "gmc/gmc_7_1_sh_mask.h" 37 38 #include "gca/gfx_8_0_d.h" 39 #include "gca/gfx_8_0_enum.h" 40 #include "gca/gfx_8_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 45 #include "iceland_sdma_pkt_open.h" 46 47 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); 48 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); 49 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); 50 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); 51 52 MODULE_FIRMWARE("amdgpu/topaz_sdma.bin"); 53 MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin"); 54 55 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 56 { 57 SDMA0_REGISTER_OFFSET, 58 SDMA1_REGISTER_OFFSET 59 }; 60 61 static const u32 golden_settings_iceland_a11[] = 62 { 63 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 64 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 65 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 66 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 67 }; 68 69 static const u32 iceland_mgcg_cgcg_init[] = 70 { 71 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, 72 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 73 }; 74 75 /* 76 * sDMA - System DMA 77 * Starting with CIK, the GPU has new asynchronous 78 * DMA engines. These engines are used for compute 79 * and gfx. There are two DMA engines (SDMA0, SDMA1) 80 * and each one supports 1 ring buffer used for gfx 81 * and 2 queues used for compute. 82 * 83 * The programming model is very similar to the CP 84 * (ring buffer, IBs, etc.), but sDMA has it's own 85 * packet format that is different from the PM4 format 86 * used by the CP. sDMA supports copying data, writing 87 * embedded data, solid fills, and a number of other 88 * things. It also has support for tiling/detiling of 89 * buffers. 90 */ 91 92 static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) 93 { 94 switch (adev->asic_type) { 95 case CHIP_TOPAZ: 96 amdgpu_program_register_sequence(adev, 97 iceland_mgcg_cgcg_init, 98 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); 99 amdgpu_program_register_sequence(adev, 100 golden_settings_iceland_a11, 101 (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); 102 break; 103 default: 104 break; 105 } 106 } 107 108 /** 109 * sdma_v2_4_init_microcode - load ucode images from disk 110 * 111 * @adev: amdgpu_device pointer 112 * 113 * Use the firmware interface to load the ucode images into 114 * the driver (not loaded into hw). 115 * Returns 0 on success, error on failure. 116 */ 117 static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) 118 { 119 const char *chip_name; 120 char fw_name[30]; 121 int err = 0, i; 122 struct amdgpu_firmware_info *info = NULL; 123 const struct common_firmware_header *header = NULL; 124 const struct sdma_firmware_header_v1_0 *hdr; 125 126 DRM_DEBUG("\n"); 127 128 switch (adev->asic_type) { 129 case CHIP_TOPAZ: 130 chip_name = "topaz"; 131 break; 132 default: BUG(); 133 } 134 135 for (i = 0; i < adev->sdma.num_instances; i++) { 136 if (i == 0) 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 138 else 139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 140 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 141 if (err) 142 goto out; 143 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 144 if (err) 145 goto out; 146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 147 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 148 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 149 if (adev->sdma.instance[i].feature_version >= 20) 150 adev->sdma.instance[i].burst_nop = true; 151 152 if (adev->firmware.smu_load) { 153 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 154 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 155 info->fw = adev->sdma.instance[i].fw; 156 header = (const struct common_firmware_header *)info->fw->data; 157 adev->firmware.fw_size += 158 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 159 } 160 } 161 162 out: 163 if (err) { 164 printk(KERN_ERR 165 "sdma_v2_4: Failed to load firmware \"%s\"\n", 166 fw_name); 167 for (i = 0; i < adev->sdma.num_instances; i++) { 168 release_firmware(adev->sdma.instance[i].fw); 169 adev->sdma.instance[i].fw = NULL; 170 } 171 } 172 return err; 173 } 174 175 /** 176 * sdma_v2_4_ring_get_rptr - get the current read pointer 177 * 178 * @ring: amdgpu ring pointer 179 * 180 * Get the current rptr from the hardware (VI+). 181 */ 182 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) 183 { 184 u32 rptr; 185 186 /* XXX check if swapping is necessary on BE */ 187 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; 188 189 return rptr; 190 } 191 192 /** 193 * sdma_v2_4_ring_get_wptr - get the current write pointer 194 * 195 * @ring: amdgpu ring pointer 196 * 197 * Get the current wptr from the hardware (VI+). 198 */ 199 static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) 200 { 201 struct amdgpu_device *adev = ring->adev; 202 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; 203 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; 204 205 return wptr; 206 } 207 208 /** 209 * sdma_v2_4_ring_set_wptr - commit the write pointer 210 * 211 * @ring: amdgpu ring pointer 212 * 213 * Write the wptr back to the hardware (VI+). 214 */ 215 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) 216 { 217 struct amdgpu_device *adev = ring->adev; 218 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; 219 220 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); 221 } 222 223 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 224 { 225 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 226 int i; 227 228 for (i = 0; i < count; i++) 229 if (sdma && sdma->burst_nop && (i == 0)) 230 amdgpu_ring_write(ring, ring->nop | 231 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 232 else 233 amdgpu_ring_write(ring, ring->nop); 234 } 235 236 /** 237 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine 238 * 239 * @ring: amdgpu ring pointer 240 * @ib: IB object to schedule 241 * 242 * Schedule an IB in the DMA ring (VI). 243 */ 244 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, 245 struct amdgpu_ib *ib, 246 unsigned vm_id, bool ctx_switch) 247 { 248 u32 vmid = vm_id & 0xf; 249 u32 next_rptr = ring->wptr + 5; 250 251 while ((next_rptr & 7) != 2) 252 next_rptr++; 253 254 next_rptr += 6; 255 256 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 257 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 258 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); 259 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); 260 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 261 amdgpu_ring_write(ring, next_rptr); 262 263 /* IB packet must end on a 8 DW boundary */ 264 sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); 265 266 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 267 SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); 268 /* base must be 32 byte aligned */ 269 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 270 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 271 amdgpu_ring_write(ring, ib->length_dw); 272 amdgpu_ring_write(ring, 0); 273 amdgpu_ring_write(ring, 0); 274 275 } 276 277 /** 278 * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring 279 * 280 * @ring: amdgpu ring pointer 281 * 282 * Emit an hdp flush packet on the requested DMA ring. 283 */ 284 static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) 285 { 286 u32 ref_and_mask = 0; 287 288 if (ring == &ring->adev->sdma.instance[0].ring) 289 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 290 else 291 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 292 293 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 294 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 295 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 296 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 297 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 298 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 299 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 300 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 301 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 302 } 303 304 static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 305 { 306 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 307 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 308 amdgpu_ring_write(ring, mmHDP_DEBUG0); 309 amdgpu_ring_write(ring, 1); 310 } 311 /** 312 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring 313 * 314 * @ring: amdgpu ring pointer 315 * @fence: amdgpu fence object 316 * 317 * Add a DMA fence packet to the ring to write 318 * the fence seq number and DMA trap packet to generate 319 * an interrupt if needed (VI). 320 */ 321 static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 322 unsigned flags) 323 { 324 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 325 /* write the fence */ 326 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 327 amdgpu_ring_write(ring, lower_32_bits(addr)); 328 amdgpu_ring_write(ring, upper_32_bits(addr)); 329 amdgpu_ring_write(ring, lower_32_bits(seq)); 330 331 /* optionally write high bits as well */ 332 if (write64bit) { 333 addr += 4; 334 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 335 amdgpu_ring_write(ring, lower_32_bits(addr)); 336 amdgpu_ring_write(ring, upper_32_bits(addr)); 337 amdgpu_ring_write(ring, upper_32_bits(seq)); 338 } 339 340 /* generate an interrupt */ 341 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); 342 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 343 } 344 345 /** 346 * sdma_v2_4_gfx_stop - stop the gfx async dma engines 347 * 348 * @adev: amdgpu_device pointer 349 * 350 * Stop the gfx async dma ring buffers (VI). 351 */ 352 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) 353 { 354 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 355 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 356 u32 rb_cntl, ib_cntl; 357 int i; 358 359 if ((adev->mman.buffer_funcs_ring == sdma0) || 360 (adev->mman.buffer_funcs_ring == sdma1)) 361 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 362 363 for (i = 0; i < adev->sdma.num_instances; i++) { 364 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 365 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 366 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 367 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 368 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 369 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 370 } 371 sdma0->ready = false; 372 sdma1->ready = false; 373 } 374 375 /** 376 * sdma_v2_4_rlc_stop - stop the compute async dma engines 377 * 378 * @adev: amdgpu_device pointer 379 * 380 * Stop the compute async dma queues (VI). 381 */ 382 static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) 383 { 384 /* XXX todo */ 385 } 386 387 /** 388 * sdma_v2_4_enable - stop the async dma engines 389 * 390 * @adev: amdgpu_device pointer 391 * @enable: enable/disable the DMA MEs. 392 * 393 * Halt or unhalt the async dma engines (VI). 394 */ 395 static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) 396 { 397 u32 f32_cntl; 398 int i; 399 400 if (enable == false) { 401 sdma_v2_4_gfx_stop(adev); 402 sdma_v2_4_rlc_stop(adev); 403 } 404 405 for (i = 0; i < adev->sdma.num_instances; i++) { 406 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 407 if (enable) 408 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 409 else 410 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); 411 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); 412 } 413 } 414 415 /** 416 * sdma_v2_4_gfx_resume - setup and start the async dma engines 417 * 418 * @adev: amdgpu_device pointer 419 * 420 * Set up the gfx DMA ring buffers and enable them (VI). 421 * Returns 0 for success, error for failure. 422 */ 423 static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) 424 { 425 struct amdgpu_ring *ring; 426 u32 rb_cntl, ib_cntl; 427 u32 rb_bufsz; 428 u32 wb_offset; 429 int i, j, r; 430 431 for (i = 0; i < adev->sdma.num_instances; i++) { 432 ring = &adev->sdma.instance[i].ring; 433 wb_offset = (ring->rptr_offs * 4); 434 435 mutex_lock(&adev->srbm_mutex); 436 for (j = 0; j < 16; j++) { 437 vi_srbm_select(adev, 0, 0, 0, j); 438 /* SDMA GFX */ 439 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 440 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 441 } 442 vi_srbm_select(adev, 0, 0, 0, 0); 443 mutex_unlock(&adev->srbm_mutex); 444 445 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 446 adev->gfx.config.gb_addr_config & 0x70); 447 448 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 449 450 /* Set ring buffer size in dwords */ 451 rb_bufsz = order_base_2(ring->ring_size / 4); 452 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 453 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); 454 #ifdef __BIG_ENDIAN 455 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); 456 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 457 RPTR_WRITEBACK_SWAP_ENABLE, 1); 458 #endif 459 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 460 461 /* Initialize the ring buffer's read and write pointers */ 462 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 463 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 464 465 /* set the wb address whether it's enabled or not */ 466 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 467 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 468 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 469 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); 470 471 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 472 473 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 474 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 475 476 ring->wptr = 0; 477 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); 478 479 /* enable DMA RB */ 480 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); 481 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 482 483 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 484 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); 485 #ifdef __BIG_ENDIAN 486 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); 487 #endif 488 /* enable DMA IBs */ 489 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 490 491 ring->ready = true; 492 493 r = amdgpu_ring_test_ring(ring); 494 if (r) { 495 ring->ready = false; 496 return r; 497 } 498 499 if (adev->mman.buffer_funcs_ring == ring) 500 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 501 } 502 503 return 0; 504 } 505 506 /** 507 * sdma_v2_4_rlc_resume - setup and start the async dma engines 508 * 509 * @adev: amdgpu_device pointer 510 * 511 * Set up the compute DMA queues and enable them (VI). 512 * Returns 0 for success, error for failure. 513 */ 514 static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) 515 { 516 /* XXX todo */ 517 return 0; 518 } 519 520 /** 521 * sdma_v2_4_load_microcode - load the sDMA ME ucode 522 * 523 * @adev: amdgpu_device pointer 524 * 525 * Loads the sDMA0/1 ucode. 526 * Returns 0 for success, -EINVAL if the ucode is not available. 527 */ 528 static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) 529 { 530 const struct sdma_firmware_header_v1_0 *hdr; 531 const __le32 *fw_data; 532 u32 fw_size; 533 int i, j; 534 535 /* halt the MEs */ 536 sdma_v2_4_enable(adev, false); 537 538 for (i = 0; i < adev->sdma.num_instances; i++) { 539 if (!adev->sdma.instance[i].fw) 540 return -EINVAL; 541 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 542 amdgpu_ucode_print_sdma_hdr(&hdr->header); 543 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 544 fw_data = (const __le32 *) 545 (adev->sdma.instance[i].fw->data + 546 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 547 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 548 for (j = 0; j < fw_size; j++) 549 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 550 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); 551 } 552 553 return 0; 554 } 555 556 /** 557 * sdma_v2_4_start - setup and start the async dma engines 558 * 559 * @adev: amdgpu_device pointer 560 * 561 * Set up the DMA engines and enable them (VI). 562 * Returns 0 for success, error for failure. 563 */ 564 static int sdma_v2_4_start(struct amdgpu_device *adev) 565 { 566 int r; 567 568 if (!adev->firmware.smu_load) { 569 r = sdma_v2_4_load_microcode(adev); 570 if (r) 571 return r; 572 } else { 573 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 574 AMDGPU_UCODE_ID_SDMA0); 575 if (r) 576 return -EINVAL; 577 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 578 AMDGPU_UCODE_ID_SDMA1); 579 if (r) 580 return -EINVAL; 581 } 582 583 /* unhalt the MEs */ 584 sdma_v2_4_enable(adev, true); 585 586 /* start the gfx rings and rlc compute queues */ 587 r = sdma_v2_4_gfx_resume(adev); 588 if (r) 589 return r; 590 r = sdma_v2_4_rlc_resume(adev); 591 if (r) 592 return r; 593 594 return 0; 595 } 596 597 /** 598 * sdma_v2_4_ring_test_ring - simple async dma engine test 599 * 600 * @ring: amdgpu_ring structure holding ring information 601 * 602 * Test the DMA engine by writing using it to write an 603 * value to memory. (VI). 604 * Returns 0 for success, error for failure. 605 */ 606 static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) 607 { 608 struct amdgpu_device *adev = ring->adev; 609 unsigned i; 610 unsigned index; 611 int r; 612 u32 tmp; 613 u64 gpu_addr; 614 615 r = amdgpu_wb_get(adev, &index); 616 if (r) { 617 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 618 return r; 619 } 620 621 gpu_addr = adev->wb.gpu_addr + (index * 4); 622 tmp = 0xCAFEDEAD; 623 adev->wb.wb[index] = cpu_to_le32(tmp); 624 625 r = amdgpu_ring_alloc(ring, 5); 626 if (r) { 627 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 628 amdgpu_wb_free(adev, index); 629 return r; 630 } 631 632 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 633 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 634 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 635 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 636 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 637 amdgpu_ring_write(ring, 0xDEADBEEF); 638 amdgpu_ring_commit(ring); 639 640 for (i = 0; i < adev->usec_timeout; i++) { 641 tmp = le32_to_cpu(adev->wb.wb[index]); 642 if (tmp == 0xDEADBEEF) 643 break; 644 DRM_UDELAY(1); 645 } 646 647 if (i < adev->usec_timeout) { 648 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 649 } else { 650 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 651 ring->idx, tmp); 652 r = -EINVAL; 653 } 654 amdgpu_wb_free(adev, index); 655 656 return r; 657 } 658 659 /** 660 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine 661 * 662 * @ring: amdgpu_ring structure holding ring information 663 * 664 * Test a simple IB in the DMA ring (VI). 665 * Returns 0 on success, error on failure. 666 */ 667 static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) 668 { 669 struct amdgpu_device *adev = ring->adev; 670 struct amdgpu_ib ib; 671 struct fence *f = NULL; 672 unsigned i; 673 unsigned index; 674 int r; 675 u32 tmp = 0; 676 u64 gpu_addr; 677 678 r = amdgpu_wb_get(adev, &index); 679 if (r) { 680 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 681 return r; 682 } 683 684 gpu_addr = adev->wb.gpu_addr + (index * 4); 685 tmp = 0xCAFEDEAD; 686 adev->wb.wb[index] = cpu_to_le32(tmp); 687 memset(&ib, 0, sizeof(ib)); 688 r = amdgpu_ib_get(adev, NULL, 256, &ib); 689 if (r) { 690 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 691 goto err0; 692 } 693 694 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 695 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 696 ib.ptr[1] = lower_32_bits(gpu_addr); 697 ib.ptr[2] = upper_32_bits(gpu_addr); 698 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); 699 ib.ptr[4] = 0xDEADBEEF; 700 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 701 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 702 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 703 ib.length_dw = 8; 704 705 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); 706 if (r) 707 goto err1; 708 709 r = fence_wait(f, false); 710 if (r) { 711 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 712 goto err1; 713 } 714 for (i = 0; i < adev->usec_timeout; i++) { 715 tmp = le32_to_cpu(adev->wb.wb[index]); 716 if (tmp == 0xDEADBEEF) 717 break; 718 DRM_UDELAY(1); 719 } 720 if (i < adev->usec_timeout) { 721 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 722 ring->idx, i); 723 goto err1; 724 } else { 725 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 726 r = -EINVAL; 727 } 728 729 err1: 730 fence_put(f); 731 amdgpu_ib_free(adev, &ib, NULL); 732 fence_put(f); 733 err0: 734 amdgpu_wb_free(adev, index); 735 return r; 736 } 737 738 /** 739 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART 740 * 741 * @ib: indirect buffer to fill with commands 742 * @pe: addr of the page entry 743 * @src: src addr to copy from 744 * @count: number of page entries to update 745 * 746 * Update PTEs by copying them from the GART using sDMA (CIK). 747 */ 748 static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, 749 uint64_t pe, uint64_t src, 750 unsigned count) 751 { 752 while (count) { 753 unsigned bytes = count * 8; 754 if (bytes > 0x1FFFF8) 755 bytes = 0x1FFFF8; 756 757 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 758 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 759 ib->ptr[ib->length_dw++] = bytes; 760 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 761 ib->ptr[ib->length_dw++] = lower_32_bits(src); 762 ib->ptr[ib->length_dw++] = upper_32_bits(src); 763 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 764 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 765 766 pe += bytes; 767 src += bytes; 768 count -= bytes / 8; 769 } 770 } 771 772 /** 773 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually 774 * 775 * @ib: indirect buffer to fill with commands 776 * @pe: addr of the page entry 777 * @addr: dst addr to write into pe 778 * @count: number of page entries to update 779 * @incr: increase next addr by incr bytes 780 * @flags: access flags 781 * 782 * Update PTEs by writing them manually using sDMA (CIK). 783 */ 784 static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, 785 const dma_addr_t *pages_addr, uint64_t pe, 786 uint64_t addr, unsigned count, 787 uint32_t incr, uint32_t flags) 788 { 789 uint64_t value; 790 unsigned ndw; 791 792 while (count) { 793 ndw = count * 2; 794 if (ndw > 0xFFFFE) 795 ndw = 0xFFFFE; 796 797 /* for non-physically contiguous pages (system) */ 798 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 799 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 800 ib->ptr[ib->length_dw++] = pe; 801 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 802 ib->ptr[ib->length_dw++] = ndw; 803 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 804 value = amdgpu_vm_map_gart(pages_addr, addr); 805 addr += incr; 806 value |= flags; 807 ib->ptr[ib->length_dw++] = value; 808 ib->ptr[ib->length_dw++] = upper_32_bits(value); 809 } 810 } 811 } 812 813 /** 814 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA 815 * 816 * @ib: indirect buffer to fill with commands 817 * @pe: addr of the page entry 818 * @addr: dst addr to write into pe 819 * @count: number of page entries to update 820 * @incr: increase next addr by incr bytes 821 * @flags: access flags 822 * 823 * Update the page tables using sDMA (CIK). 824 */ 825 static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, 826 uint64_t pe, 827 uint64_t addr, unsigned count, 828 uint32_t incr, uint32_t flags) 829 { 830 uint64_t value; 831 unsigned ndw; 832 833 while (count) { 834 ndw = count; 835 if (ndw > 0x7FFFF) 836 ndw = 0x7FFFF; 837 838 if (flags & AMDGPU_PTE_VALID) 839 value = addr; 840 else 841 value = 0; 842 843 /* for physically contiguous pages (vram) */ 844 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); 845 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 846 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 847 ib->ptr[ib->length_dw++] = flags; /* mask */ 848 ib->ptr[ib->length_dw++] = 0; 849 ib->ptr[ib->length_dw++] = value; /* value */ 850 ib->ptr[ib->length_dw++] = upper_32_bits(value); 851 ib->ptr[ib->length_dw++] = incr; /* increment size */ 852 ib->ptr[ib->length_dw++] = 0; 853 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 854 855 pe += ndw * 8; 856 addr += ndw * incr; 857 count -= ndw; 858 } 859 } 860 861 /** 862 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw 863 * 864 * @ib: indirect buffer to fill with padding 865 * 866 */ 867 static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 868 { 869 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 870 u32 pad_count; 871 int i; 872 873 pad_count = (8 - (ib->length_dw & 0x7)) % 8; 874 for (i = 0; i < pad_count; i++) 875 if (sdma && sdma->burst_nop && (i == 0)) 876 ib->ptr[ib->length_dw++] = 877 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | 878 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 879 else 880 ib->ptr[ib->length_dw++] = 881 SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 882 } 883 884 /** 885 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline 886 * 887 * @ring: amdgpu_ring pointer 888 * 889 * Make sure all previous operations are completed (CIK). 890 */ 891 static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 892 { 893 uint32_t seq = ring->fence_drv.sync_seq; 894 uint64_t addr = ring->fence_drv.gpu_addr; 895 896 /* wait for idle */ 897 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 898 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 899 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 900 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 901 amdgpu_ring_write(ring, addr & 0xfffffffc); 902 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 903 amdgpu_ring_write(ring, seq); /* reference */ 904 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 905 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 906 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 907 } 908 909 /** 910 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA 911 * 912 * @ring: amdgpu_ring pointer 913 * @vm: amdgpu_vm pointer 914 * 915 * Update the page table base and flush the VM TLB 916 * using sDMA (VI). 917 */ 918 static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, 919 unsigned vm_id, uint64_t pd_addr) 920 { 921 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 922 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 923 if (vm_id < 8) { 924 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 925 } else { 926 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); 927 } 928 amdgpu_ring_write(ring, pd_addr >> 12); 929 930 /* flush TLB */ 931 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 932 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 933 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 934 amdgpu_ring_write(ring, 1 << vm_id); 935 936 /* wait for flush */ 937 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 938 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 939 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ 940 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 941 amdgpu_ring_write(ring, 0); 942 amdgpu_ring_write(ring, 0); /* reference */ 943 amdgpu_ring_write(ring, 0); /* mask */ 944 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 945 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 946 } 947 948 static int sdma_v2_4_early_init(void *handle) 949 { 950 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 951 952 adev->sdma.num_instances = SDMA_MAX_INSTANCE; 953 954 sdma_v2_4_set_ring_funcs(adev); 955 sdma_v2_4_set_buffer_funcs(adev); 956 sdma_v2_4_set_vm_pte_funcs(adev); 957 sdma_v2_4_set_irq_funcs(adev); 958 959 return 0; 960 } 961 962 static int sdma_v2_4_sw_init(void *handle) 963 { 964 struct amdgpu_ring *ring; 965 int r, i; 966 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 967 968 /* SDMA trap event */ 969 r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); 970 if (r) 971 return r; 972 973 /* SDMA Privileged inst */ 974 r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); 975 if (r) 976 return r; 977 978 /* SDMA Privileged inst */ 979 r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); 980 if (r) 981 return r; 982 983 r = sdma_v2_4_init_microcode(adev); 984 if (r) { 985 DRM_ERROR("Failed to load sdma firmware!\n"); 986 return r; 987 } 988 989 for (i = 0; i < adev->sdma.num_instances; i++) { 990 ring = &adev->sdma.instance[i].ring; 991 ring->ring_obj = NULL; 992 ring->use_doorbell = false; 993 sprintf(ring->name, "sdma%d", i); 994 r = amdgpu_ring_init(adev, ring, 1024, 995 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, 996 &adev->sdma.trap_irq, 997 (i == 0) ? 998 AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, 999 AMDGPU_RING_TYPE_SDMA); 1000 if (r) 1001 return r; 1002 } 1003 1004 return r; 1005 } 1006 1007 static int sdma_v2_4_sw_fini(void *handle) 1008 { 1009 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1010 int i; 1011 1012 for (i = 0; i < adev->sdma.num_instances; i++) 1013 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1014 1015 return 0; 1016 } 1017 1018 static int sdma_v2_4_hw_init(void *handle) 1019 { 1020 int r; 1021 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1022 1023 sdma_v2_4_init_golden_registers(adev); 1024 1025 r = sdma_v2_4_start(adev); 1026 if (r) 1027 return r; 1028 1029 return r; 1030 } 1031 1032 static int sdma_v2_4_hw_fini(void *handle) 1033 { 1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1035 1036 sdma_v2_4_enable(adev, false); 1037 1038 return 0; 1039 } 1040 1041 static int sdma_v2_4_suspend(void *handle) 1042 { 1043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1044 1045 return sdma_v2_4_hw_fini(adev); 1046 } 1047 1048 static int sdma_v2_4_resume(void *handle) 1049 { 1050 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1051 1052 return sdma_v2_4_hw_init(adev); 1053 } 1054 1055 static bool sdma_v2_4_is_idle(void *handle) 1056 { 1057 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1058 u32 tmp = RREG32(mmSRBM_STATUS2); 1059 1060 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1061 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1062 return false; 1063 1064 return true; 1065 } 1066 1067 static int sdma_v2_4_wait_for_idle(void *handle) 1068 { 1069 unsigned i; 1070 u32 tmp; 1071 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1072 1073 for (i = 0; i < adev->usec_timeout; i++) { 1074 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1075 SRBM_STATUS2__SDMA1_BUSY_MASK); 1076 1077 if (!tmp) 1078 return 0; 1079 udelay(1); 1080 } 1081 return -ETIMEDOUT; 1082 } 1083 1084 static int sdma_v2_4_soft_reset(void *handle) 1085 { 1086 u32 srbm_soft_reset = 0; 1087 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1088 u32 tmp = RREG32(mmSRBM_STATUS2); 1089 1090 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1091 /* sdma0 */ 1092 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1093 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); 1094 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1095 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1096 } 1097 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1098 /* sdma1 */ 1099 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1100 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); 1101 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1102 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1103 } 1104 1105 if (srbm_soft_reset) { 1106 tmp = RREG32(mmSRBM_SOFT_RESET); 1107 tmp |= srbm_soft_reset; 1108 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1109 WREG32(mmSRBM_SOFT_RESET, tmp); 1110 tmp = RREG32(mmSRBM_SOFT_RESET); 1111 1112 udelay(50); 1113 1114 tmp &= ~srbm_soft_reset; 1115 WREG32(mmSRBM_SOFT_RESET, tmp); 1116 tmp = RREG32(mmSRBM_SOFT_RESET); 1117 1118 /* Wait a little for things to settle down */ 1119 udelay(50); 1120 } 1121 1122 return 0; 1123 } 1124 1125 static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, 1126 struct amdgpu_irq_src *src, 1127 unsigned type, 1128 enum amdgpu_interrupt_state state) 1129 { 1130 u32 sdma_cntl; 1131 1132 switch (type) { 1133 case AMDGPU_SDMA_IRQ_TRAP0: 1134 switch (state) { 1135 case AMDGPU_IRQ_STATE_DISABLE: 1136 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1137 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1138 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1139 break; 1140 case AMDGPU_IRQ_STATE_ENABLE: 1141 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1142 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1143 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1144 break; 1145 default: 1146 break; 1147 } 1148 break; 1149 case AMDGPU_SDMA_IRQ_TRAP1: 1150 switch (state) { 1151 case AMDGPU_IRQ_STATE_DISABLE: 1152 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1153 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1154 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1155 break; 1156 case AMDGPU_IRQ_STATE_ENABLE: 1157 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1158 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1159 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1160 break; 1161 default: 1162 break; 1163 } 1164 break; 1165 default: 1166 break; 1167 } 1168 return 0; 1169 } 1170 1171 static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, 1172 struct amdgpu_irq_src *source, 1173 struct amdgpu_iv_entry *entry) 1174 { 1175 u8 instance_id, queue_id; 1176 1177 instance_id = (entry->ring_id & 0x3) >> 0; 1178 queue_id = (entry->ring_id & 0xc) >> 2; 1179 DRM_DEBUG("IH: SDMA trap\n"); 1180 switch (instance_id) { 1181 case 0: 1182 switch (queue_id) { 1183 case 0: 1184 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1185 break; 1186 case 1: 1187 /* XXX compute */ 1188 break; 1189 case 2: 1190 /* XXX compute */ 1191 break; 1192 } 1193 break; 1194 case 1: 1195 switch (queue_id) { 1196 case 0: 1197 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1198 break; 1199 case 1: 1200 /* XXX compute */ 1201 break; 1202 case 2: 1203 /* XXX compute */ 1204 break; 1205 } 1206 break; 1207 } 1208 return 0; 1209 } 1210 1211 static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, 1212 struct amdgpu_irq_src *source, 1213 struct amdgpu_iv_entry *entry) 1214 { 1215 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1216 schedule_work(&adev->reset_work); 1217 return 0; 1218 } 1219 1220 static int sdma_v2_4_set_clockgating_state(void *handle, 1221 enum amd_clockgating_state state) 1222 { 1223 /* XXX handled via the smc on VI */ 1224 return 0; 1225 } 1226 1227 static int sdma_v2_4_set_powergating_state(void *handle, 1228 enum amd_powergating_state state) 1229 { 1230 return 0; 1231 } 1232 1233 const struct amd_ip_funcs sdma_v2_4_ip_funcs = { 1234 .name = "sdma_v2_4", 1235 .early_init = sdma_v2_4_early_init, 1236 .late_init = NULL, 1237 .sw_init = sdma_v2_4_sw_init, 1238 .sw_fini = sdma_v2_4_sw_fini, 1239 .hw_init = sdma_v2_4_hw_init, 1240 .hw_fini = sdma_v2_4_hw_fini, 1241 .suspend = sdma_v2_4_suspend, 1242 .resume = sdma_v2_4_resume, 1243 .is_idle = sdma_v2_4_is_idle, 1244 .wait_for_idle = sdma_v2_4_wait_for_idle, 1245 .soft_reset = sdma_v2_4_soft_reset, 1246 .set_clockgating_state = sdma_v2_4_set_clockgating_state, 1247 .set_powergating_state = sdma_v2_4_set_powergating_state, 1248 }; 1249 1250 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { 1251 .get_rptr = sdma_v2_4_ring_get_rptr, 1252 .get_wptr = sdma_v2_4_ring_get_wptr, 1253 .set_wptr = sdma_v2_4_ring_set_wptr, 1254 .parse_cs = NULL, 1255 .emit_ib = sdma_v2_4_ring_emit_ib, 1256 .emit_fence = sdma_v2_4_ring_emit_fence, 1257 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, 1258 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, 1259 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, 1260 .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate, 1261 .test_ring = sdma_v2_4_ring_test_ring, 1262 .test_ib = sdma_v2_4_ring_test_ib, 1263 .insert_nop = sdma_v2_4_ring_insert_nop, 1264 .pad_ib = sdma_v2_4_ring_pad_ib, 1265 }; 1266 1267 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) 1268 { 1269 int i; 1270 1271 for (i = 0; i < adev->sdma.num_instances; i++) 1272 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; 1273 } 1274 1275 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { 1276 .set = sdma_v2_4_set_trap_irq_state, 1277 .process = sdma_v2_4_process_trap_irq, 1278 }; 1279 1280 static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { 1281 .process = sdma_v2_4_process_illegal_inst_irq, 1282 }; 1283 1284 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) 1285 { 1286 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1287 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; 1288 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; 1289 } 1290 1291 /** 1292 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine 1293 * 1294 * @ring: amdgpu_ring structure holding ring information 1295 * @src_offset: src GPU address 1296 * @dst_offset: dst GPU address 1297 * @byte_count: number of bytes to xfer 1298 * 1299 * Copy GPU buffers using the DMA engine (VI). 1300 * Used by the amdgpu ttm implementation to move pages if 1301 * registered as the asic copy callback. 1302 */ 1303 static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, 1304 uint64_t src_offset, 1305 uint64_t dst_offset, 1306 uint32_t byte_count) 1307 { 1308 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1309 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1310 ib->ptr[ib->length_dw++] = byte_count; 1311 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1312 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1313 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1314 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1315 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1316 } 1317 1318 /** 1319 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine 1320 * 1321 * @ring: amdgpu_ring structure holding ring information 1322 * @src_data: value to write to buffer 1323 * @dst_offset: dst GPU address 1324 * @byte_count: number of bytes to xfer 1325 * 1326 * Fill GPU buffers using the DMA engine (VI). 1327 */ 1328 static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib, 1329 uint32_t src_data, 1330 uint64_t dst_offset, 1331 uint32_t byte_count) 1332 { 1333 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); 1334 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1335 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1336 ib->ptr[ib->length_dw++] = src_data; 1337 ib->ptr[ib->length_dw++] = byte_count; 1338 } 1339 1340 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { 1341 .copy_max_bytes = 0x1fffff, 1342 .copy_num_dw = 7, 1343 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, 1344 1345 .fill_max_bytes = 0x1fffff, 1346 .fill_num_dw = 7, 1347 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, 1348 }; 1349 1350 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) 1351 { 1352 if (adev->mman.buffer_funcs == NULL) { 1353 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; 1354 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1355 } 1356 } 1357 1358 static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { 1359 .copy_pte = sdma_v2_4_vm_copy_pte, 1360 .write_pte = sdma_v2_4_vm_write_pte, 1361 .set_pte_pde = sdma_v2_4_vm_set_pte_pde, 1362 }; 1363 1364 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) 1365 { 1366 unsigned i; 1367 1368 if (adev->vm_manager.vm_pte_funcs == NULL) { 1369 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1370 for (i = 0; i < adev->sdma.num_instances; i++) 1371 adev->vm_manager.vm_pte_rings[i] = 1372 &adev->sdma.instance[i].ring; 1373 1374 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; 1375 } 1376 } 1377