1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_trace.h" 29 #include "cikd.h" 30 #include "cik.h" 31 32 #include "bif/bif_4_1_d.h" 33 #include "bif/bif_4_1_sh_mask.h" 34 35 #include "gca/gfx_7_2_d.h" 36 #include "gca/gfx_7_2_enum.h" 37 #include "gca/gfx_7_2_sh_mask.h" 38 39 #include "gmc/gmc_7_1_d.h" 40 #include "gmc/gmc_7_1_sh_mask.h" 41 42 #include "oss/oss_2_0_d.h" 43 #include "oss/oss_2_0_sh_mask.h" 44 45 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 46 { 47 SDMA0_REGISTER_OFFSET, 48 SDMA1_REGISTER_OFFSET 49 }; 50 51 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); 52 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); 53 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); 54 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 55 56 MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 57 MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); 58 MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 59 MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); 60 MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); 61 MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); 62 MODULE_FIRMWARE("radeon/kabini_sdma.bin"); 63 MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); 64 MODULE_FIRMWARE("radeon/mullins_sdma.bin"); 65 MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); 66 67 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 68 69 /* 70 * sDMA - System DMA 71 * Starting with CIK, the GPU has new asynchronous 72 * DMA engines. These engines are used for compute 73 * and gfx. There are two DMA engines (SDMA0, SDMA1) 74 * and each one supports 1 ring buffer used for gfx 75 * and 2 queues used for compute. 76 * 77 * The programming model is very similar to the CP 78 * (ring buffer, IBs, etc.), but sDMA has it's own 79 * packet format that is different from the PM4 format 80 * used by the CP. sDMA supports copying data, writing 81 * embedded data, solid fills, and a number of other 82 * things. It also has support for tiling/detiling of 83 * buffers. 84 */ 85 86 /** 87 * cik_sdma_init_microcode - load ucode images from disk 88 * 89 * @adev: amdgpu_device pointer 90 * 91 * Use the firmware interface to load the ucode images into 92 * the driver (not loaded into hw). 93 * Returns 0 on success, error on failure. 94 */ 95 static int cik_sdma_init_microcode(struct amdgpu_device *adev) 96 { 97 const char *chip_name; 98 char fw_name[30]; 99 int err, i; 100 101 DRM_DEBUG("\n"); 102 103 switch (adev->asic_type) { 104 case CHIP_BONAIRE: 105 chip_name = "bonaire"; 106 break; 107 case CHIP_HAWAII: 108 chip_name = "hawaii"; 109 break; 110 case CHIP_KAVERI: 111 chip_name = "kaveri"; 112 break; 113 case CHIP_KABINI: 114 chip_name = "kabini"; 115 break; 116 case CHIP_MULLINS: 117 chip_name = "mullins"; 118 break; 119 default: BUG(); 120 } 121 122 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 123 if (i == 0) 124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 125 else 126 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); 127 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 128 if (err) 129 goto out; 130 err = amdgpu_ucode_validate(adev->sdma[i].fw); 131 } 132 out: 133 if (err) { 134 printk(KERN_ERR 135 "cik_sdma: Failed to load firmware \"%s\"\n", 136 fw_name); 137 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 138 release_firmware(adev->sdma[i].fw); 139 adev->sdma[i].fw = NULL; 140 } 141 } 142 return err; 143 } 144 145 /** 146 * cik_sdma_ring_get_rptr - get the current read pointer 147 * 148 * @ring: amdgpu ring pointer 149 * 150 * Get the current rptr from the hardware (CIK+). 151 */ 152 static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) 153 { 154 u32 rptr; 155 156 rptr = ring->adev->wb.wb[ring->rptr_offs]; 157 158 return (rptr & 0x3fffc) >> 2; 159 } 160 161 /** 162 * cik_sdma_ring_get_wptr - get the current write pointer 163 * 164 * @ring: amdgpu ring pointer 165 * 166 * Get the current wptr from the hardware (CIK+). 167 */ 168 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 169 { 170 struct amdgpu_device *adev = ring->adev; 171 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 172 173 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 174 } 175 176 /** 177 * cik_sdma_ring_set_wptr - commit the write pointer 178 * 179 * @ring: amdgpu ring pointer 180 * 181 * Write the wptr back to the hardware (CIK+). 182 */ 183 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 184 { 185 struct amdgpu_device *adev = ring->adev; 186 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 187 188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); 189 } 190 191 /** 192 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine 193 * 194 * @ring: amdgpu ring pointer 195 * @ib: IB object to schedule 196 * 197 * Schedule an IB in the DMA ring (CIK). 198 */ 199 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 200 struct amdgpu_ib *ib) 201 { 202 u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 203 u32 next_rptr = ring->wptr + 5; 204 205 while ((next_rptr & 7) != 4) 206 next_rptr++; 207 208 next_rptr += 4; 209 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 210 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 211 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 212 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 213 amdgpu_ring_write(ring, next_rptr); 214 215 /* IB packet must end on a 8 DW boundary */ 216 while ((ring->wptr & 7) != 4) 217 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 218 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 219 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 220 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 221 amdgpu_ring_write(ring, ib->length_dw); 222 223 } 224 225 /** 226 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 227 * 228 * @ring: amdgpu ring pointer 229 * 230 * Emit an hdp flush packet on the requested DMA ring. 231 */ 232 static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) 233 { 234 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 235 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 236 u32 ref_and_mask; 237 238 if (ring == &ring->adev->sdma[0].ring) 239 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 240 else 241 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 242 243 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 244 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 245 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 246 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 247 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 248 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 249 } 250 251 /** 252 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring 253 * 254 * @ring: amdgpu ring pointer 255 * @fence: amdgpu fence object 256 * 257 * Add a DMA fence packet to the ring to write 258 * the fence seq number and DMA trap packet to generate 259 * an interrupt if needed (CIK). 260 */ 261 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 262 unsigned flags) 263 { 264 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 265 /* write the fence */ 266 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 267 amdgpu_ring_write(ring, lower_32_bits(addr)); 268 amdgpu_ring_write(ring, upper_32_bits(addr)); 269 amdgpu_ring_write(ring, lower_32_bits(seq)); 270 271 /* optionally write high bits as well */ 272 if (write64bit) { 273 addr += 4; 274 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 275 amdgpu_ring_write(ring, lower_32_bits(addr)); 276 amdgpu_ring_write(ring, upper_32_bits(addr)); 277 amdgpu_ring_write(ring, upper_32_bits(seq)); 278 } 279 280 /* generate an interrupt */ 281 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 282 } 283 284 /** 285 * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring 286 * 287 * @ring: amdgpu_ring structure holding ring information 288 * @semaphore: amdgpu semaphore object 289 * @emit_wait: wait or signal semaphore 290 * 291 * Add a DMA semaphore packet to the ring wait on or signal 292 * other rings (CIK). 293 */ 294 static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, 295 struct amdgpu_semaphore *semaphore, 296 bool emit_wait) 297 { 298 u64 addr = semaphore->gpu_addr; 299 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; 300 301 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 302 amdgpu_ring_write(ring, addr & 0xfffffff8); 303 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 304 305 return true; 306 } 307 308 /** 309 * cik_sdma_gfx_stop - stop the gfx async dma engines 310 * 311 * @adev: amdgpu_device pointer 312 * 313 * Stop the gfx async dma ring buffers (CIK). 314 */ 315 static void cik_sdma_gfx_stop(struct amdgpu_device *adev) 316 { 317 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 318 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 319 u32 rb_cntl; 320 int i; 321 322 if ((adev->mman.buffer_funcs_ring == sdma0) || 323 (adev->mman.buffer_funcs_ring == sdma1)) 324 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 325 326 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 327 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 328 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; 329 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 330 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); 331 } 332 sdma0->ready = false; 333 sdma1->ready = false; 334 } 335 336 /** 337 * cik_sdma_rlc_stop - stop the compute async dma engines 338 * 339 * @adev: amdgpu_device pointer 340 * 341 * Stop the compute async dma queues (CIK). 342 */ 343 static void cik_sdma_rlc_stop(struct amdgpu_device *adev) 344 { 345 /* XXX todo */ 346 } 347 348 /** 349 * cik_sdma_enable - stop the async dma engines 350 * 351 * @adev: amdgpu_device pointer 352 * @enable: enable/disable the DMA MEs. 353 * 354 * Halt or unhalt the async dma engines (CIK). 355 */ 356 static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) 357 { 358 u32 me_cntl; 359 int i; 360 361 if (enable == false) { 362 cik_sdma_gfx_stop(adev); 363 cik_sdma_rlc_stop(adev); 364 } 365 366 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 367 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 368 if (enable) 369 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; 370 else 371 me_cntl |= SDMA0_F32_CNTL__HALT_MASK; 372 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); 373 } 374 } 375 376 /** 377 * cik_sdma_gfx_resume - setup and start the async dma engines 378 * 379 * @adev: amdgpu_device pointer 380 * 381 * Set up the gfx DMA ring buffers and enable them (CIK). 382 * Returns 0 for success, error for failure. 383 */ 384 static int cik_sdma_gfx_resume(struct amdgpu_device *adev) 385 { 386 struct amdgpu_ring *ring; 387 u32 rb_cntl, ib_cntl; 388 u32 rb_bufsz; 389 u32 wb_offset; 390 int i, j, r; 391 392 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 393 ring = &adev->sdma[i].ring; 394 wb_offset = (ring->rptr_offs * 4); 395 396 mutex_lock(&adev->srbm_mutex); 397 for (j = 0; j < 16; j++) { 398 cik_srbm_select(adev, 0, 0, 0, j); 399 /* SDMA GFX */ 400 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 401 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 402 /* XXX SDMA RLC - todo */ 403 } 404 cik_srbm_select(adev, 0, 0, 0, 0); 405 mutex_unlock(&adev->srbm_mutex); 406 407 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 408 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 409 410 /* Set ring buffer size in dwords */ 411 rb_bufsz = order_base_2(ring->ring_size / 4); 412 rb_cntl = rb_bufsz << 1; 413 #ifdef __BIG_ENDIAN 414 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | 415 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; 416 #endif 417 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 418 419 /* Initialize the ring buffer's read and write pointers */ 420 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 421 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 422 423 /* set the wb address whether it's enabled or not */ 424 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 425 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 426 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 427 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 428 429 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; 430 431 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 432 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 433 434 ring->wptr = 0; 435 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); 436 437 /* enable DMA RB */ 438 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], 439 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK); 440 441 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK; 442 #ifdef __BIG_ENDIAN 443 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; 444 #endif 445 /* enable DMA IBs */ 446 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 447 448 ring->ready = true; 449 450 r = amdgpu_ring_test_ring(ring); 451 if (r) { 452 ring->ready = false; 453 return r; 454 } 455 456 if (adev->mman.buffer_funcs_ring == ring) 457 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 458 } 459 460 return 0; 461 } 462 463 /** 464 * cik_sdma_rlc_resume - setup and start the async dma engines 465 * 466 * @adev: amdgpu_device pointer 467 * 468 * Set up the compute DMA queues and enable them (CIK). 469 * Returns 0 for success, error for failure. 470 */ 471 static int cik_sdma_rlc_resume(struct amdgpu_device *adev) 472 { 473 /* XXX todo */ 474 return 0; 475 } 476 477 /** 478 * cik_sdma_load_microcode - load the sDMA ME ucode 479 * 480 * @adev: amdgpu_device pointer 481 * 482 * Loads the sDMA0/1 ucode. 483 * Returns 0 for success, -EINVAL if the ucode is not available. 484 */ 485 static int cik_sdma_load_microcode(struct amdgpu_device *adev) 486 { 487 const struct sdma_firmware_header_v1_0 *hdr; 488 const __le32 *fw_data; 489 u32 fw_size; 490 int i, j; 491 492 if (!adev->sdma[0].fw || !adev->sdma[1].fw) 493 return -EINVAL; 494 495 /* halt the MEs */ 496 cik_sdma_enable(adev, false); 497 498 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 499 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 503 fw_data = (const __le32 *) 504 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 505 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 506 for (j = 0; j < fw_size; j++) 507 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 508 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 509 } 510 511 return 0; 512 } 513 514 /** 515 * cik_sdma_start - setup and start the async dma engines 516 * 517 * @adev: amdgpu_device pointer 518 * 519 * Set up the DMA engines and enable them (CIK). 520 * Returns 0 for success, error for failure. 521 */ 522 static int cik_sdma_start(struct amdgpu_device *adev) 523 { 524 int r; 525 526 r = cik_sdma_load_microcode(adev); 527 if (r) 528 return r; 529 530 /* unhalt the MEs */ 531 cik_sdma_enable(adev, true); 532 533 /* start the gfx rings and rlc compute queues */ 534 r = cik_sdma_gfx_resume(adev); 535 if (r) 536 return r; 537 r = cik_sdma_rlc_resume(adev); 538 if (r) 539 return r; 540 541 return 0; 542 } 543 544 /** 545 * cik_sdma_ring_test_ring - simple async dma engine test 546 * 547 * @ring: amdgpu_ring structure holding ring information 548 * 549 * Test the DMA engine by writing using it to write an 550 * value to memory. (CIK). 551 * Returns 0 for success, error for failure. 552 */ 553 static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) 554 { 555 struct amdgpu_device *adev = ring->adev; 556 unsigned i; 557 unsigned index; 558 int r; 559 u32 tmp; 560 u64 gpu_addr; 561 562 r = amdgpu_wb_get(adev, &index); 563 if (r) { 564 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 565 return r; 566 } 567 568 gpu_addr = adev->wb.gpu_addr + (index * 4); 569 tmp = 0xCAFEDEAD; 570 adev->wb.wb[index] = cpu_to_le32(tmp); 571 572 r = amdgpu_ring_lock(ring, 5); 573 if (r) { 574 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 575 amdgpu_wb_free(adev, index); 576 return r; 577 } 578 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 579 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 580 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 581 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 582 amdgpu_ring_write(ring, 0xDEADBEEF); 583 amdgpu_ring_unlock_commit(ring); 584 585 for (i = 0; i < adev->usec_timeout; i++) { 586 tmp = le32_to_cpu(adev->wb.wb[index]); 587 if (tmp == 0xDEADBEEF) 588 break; 589 DRM_UDELAY(1); 590 } 591 592 if (i < adev->usec_timeout) { 593 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 594 } else { 595 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 596 ring->idx, tmp); 597 r = -EINVAL; 598 } 599 amdgpu_wb_free(adev, index); 600 601 return r; 602 } 603 604 /** 605 * cik_sdma_ring_test_ib - test an IB on the DMA engine 606 * 607 * @ring: amdgpu_ring structure holding ring information 608 * 609 * Test a simple IB in the DMA ring (CIK). 610 * Returns 0 on success, error on failure. 611 */ 612 static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) 613 { 614 struct amdgpu_device *adev = ring->adev; 615 struct amdgpu_ib ib; 616 unsigned i; 617 unsigned index; 618 int r; 619 u32 tmp = 0; 620 u64 gpu_addr; 621 622 r = amdgpu_wb_get(adev, &index); 623 if (r) { 624 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 625 return r; 626 } 627 628 gpu_addr = adev->wb.gpu_addr + (index * 4); 629 tmp = 0xCAFEDEAD; 630 adev->wb.wb[index] = cpu_to_le32(tmp); 631 632 r = amdgpu_ib_get(ring, NULL, 256, &ib); 633 if (r) { 634 amdgpu_wb_free(adev, index); 635 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 636 return r; 637 } 638 639 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 640 ib.ptr[1] = lower_32_bits(gpu_addr); 641 ib.ptr[2] = upper_32_bits(gpu_addr); 642 ib.ptr[3] = 1; 643 ib.ptr[4] = 0xDEADBEEF; 644 ib.length_dw = 5; 645 646 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 647 if (r) { 648 amdgpu_ib_free(adev, &ib); 649 amdgpu_wb_free(adev, index); 650 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 651 return r; 652 } 653 r = amdgpu_fence_wait(ib.fence, false); 654 if (r) { 655 amdgpu_ib_free(adev, &ib); 656 amdgpu_wb_free(adev, index); 657 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 658 return r; 659 } 660 for (i = 0; i < adev->usec_timeout; i++) { 661 tmp = le32_to_cpu(adev->wb.wb[index]); 662 if (tmp == 0xDEADBEEF) 663 break; 664 DRM_UDELAY(1); 665 } 666 if (i < adev->usec_timeout) { 667 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 668 ib.fence->ring->idx, i); 669 } else { 670 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 671 r = -EINVAL; 672 } 673 amdgpu_ib_free(adev, &ib); 674 amdgpu_wb_free(adev, index); 675 return r; 676 } 677 678 /** 679 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART 680 * 681 * @ib: indirect buffer to fill with commands 682 * @pe: addr of the page entry 683 * @src: src addr to copy from 684 * @count: number of page entries to update 685 * 686 * Update PTEs by copying them from the GART using sDMA (CIK). 687 */ 688 static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, 689 uint64_t pe, uint64_t src, 690 unsigned count) 691 { 692 while (count) { 693 unsigned bytes = count * 8; 694 if (bytes > 0x1FFFF8) 695 bytes = 0x1FFFF8; 696 697 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, 698 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 699 ib->ptr[ib->length_dw++] = bytes; 700 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 701 ib->ptr[ib->length_dw++] = lower_32_bits(src); 702 ib->ptr[ib->length_dw++] = upper_32_bits(src); 703 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 704 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 705 706 pe += bytes; 707 src += bytes; 708 count -= bytes / 8; 709 } 710 } 711 712 /** 713 * cik_sdma_vm_write_pages - update PTEs by writing them manually 714 * 715 * @ib: indirect buffer to fill with commands 716 * @pe: addr of the page entry 717 * @addr: dst addr to write into pe 718 * @count: number of page entries to update 719 * @incr: increase next addr by incr bytes 720 * @flags: access flags 721 * 722 * Update PTEs by writing them manually using sDMA (CIK). 723 */ 724 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, 725 uint64_t pe, 726 uint64_t addr, unsigned count, 727 uint32_t incr, uint32_t flags) 728 { 729 uint64_t value; 730 unsigned ndw; 731 732 while (count) { 733 ndw = count * 2; 734 if (ndw > 0xFFFFE) 735 ndw = 0xFFFFE; 736 737 /* for non-physically contiguous pages (system) */ 738 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 739 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 740 ib->ptr[ib->length_dw++] = pe; 741 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 742 ib->ptr[ib->length_dw++] = ndw; 743 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 744 if (flags & AMDGPU_PTE_SYSTEM) { 745 value = amdgpu_vm_map_gart(ib->ring->adev, addr); 746 value &= 0xFFFFFFFFFFFFF000ULL; 747 } else if (flags & AMDGPU_PTE_VALID) { 748 value = addr; 749 } else { 750 value = 0; 751 } 752 addr += incr; 753 value |= flags; 754 ib->ptr[ib->length_dw++] = value; 755 ib->ptr[ib->length_dw++] = upper_32_bits(value); 756 } 757 } 758 } 759 760 /** 761 * cik_sdma_vm_set_pages - update the page tables using sDMA 762 * 763 * @ib: indirect buffer to fill with commands 764 * @pe: addr of the page entry 765 * @addr: dst addr to write into pe 766 * @count: number of page entries to update 767 * @incr: increase next addr by incr bytes 768 * @flags: access flags 769 * 770 * Update the page tables using sDMA (CIK). 771 */ 772 static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, 773 uint64_t pe, 774 uint64_t addr, unsigned count, 775 uint32_t incr, uint32_t flags) 776 { 777 uint64_t value; 778 unsigned ndw; 779 780 while (count) { 781 ndw = count; 782 if (ndw > 0x7FFFF) 783 ndw = 0x7FFFF; 784 785 if (flags & AMDGPU_PTE_VALID) 786 value = addr; 787 else 788 value = 0; 789 790 /* for physically contiguous pages (vram) */ 791 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 792 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 793 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 794 ib->ptr[ib->length_dw++] = flags; /* mask */ 795 ib->ptr[ib->length_dw++] = 0; 796 ib->ptr[ib->length_dw++] = value; /* value */ 797 ib->ptr[ib->length_dw++] = upper_32_bits(value); 798 ib->ptr[ib->length_dw++] = incr; /* increment size */ 799 ib->ptr[ib->length_dw++] = 0; 800 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 801 802 pe += ndw * 8; 803 addr += ndw * incr; 804 count -= ndw; 805 } 806 } 807 808 /** 809 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw 810 * 811 * @ib: indirect buffer to fill with padding 812 * 813 */ 814 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) 815 { 816 while (ib->length_dw & 0x7) 817 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 818 } 819 820 /** 821 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA 822 * 823 * @ring: amdgpu_ring pointer 824 * @vm: amdgpu_vm pointer 825 * 826 * Update the page table base and flush the VM TLB 827 * using sDMA (CIK). 828 */ 829 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, 830 unsigned vm_id, uint64_t pd_addr) 831 { 832 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 833 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 834 835 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 836 if (vm_id < 8) { 837 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 838 } else { 839 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); 840 } 841 amdgpu_ring_write(ring, pd_addr >> 12); 842 843 /* flush TLB */ 844 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 845 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 846 amdgpu_ring_write(ring, 1 << vm_id); 847 848 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 849 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 850 amdgpu_ring_write(ring, 0); 851 amdgpu_ring_write(ring, 0); /* reference */ 852 amdgpu_ring_write(ring, 0); /* mask */ 853 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 854 } 855 856 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 857 bool enable) 858 { 859 u32 orig, data; 860 861 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { 862 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 863 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 864 } else { 865 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); 866 data |= 0xff000000; 867 if (data != orig) 868 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); 869 870 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); 871 data |= 0xff000000; 872 if (data != orig) 873 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); 874 } 875 } 876 877 static void cik_enable_sdma_mgls(struct amdgpu_device *adev, 878 bool enable) 879 { 880 u32 orig, data; 881 882 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { 883 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 884 data |= 0x100; 885 if (orig != data) 886 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 887 888 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 889 data |= 0x100; 890 if (orig != data) 891 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 892 } else { 893 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 894 data &= ~0x100; 895 if (orig != data) 896 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 897 898 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 899 data &= ~0x100; 900 if (orig != data) 901 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 902 } 903 } 904 905 static int cik_sdma_early_init(void *handle) 906 { 907 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 908 909 cik_sdma_set_ring_funcs(adev); 910 cik_sdma_set_irq_funcs(adev); 911 cik_sdma_set_buffer_funcs(adev); 912 cik_sdma_set_vm_pte_funcs(adev); 913 914 return 0; 915 } 916 917 static int cik_sdma_sw_init(void *handle) 918 { 919 struct amdgpu_ring *ring; 920 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 921 int r; 922 923 r = cik_sdma_init_microcode(adev); 924 if (r) { 925 DRM_ERROR("Failed to load sdma firmware!\n"); 926 return r; 927 } 928 929 /* SDMA trap event */ 930 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 931 if (r) 932 return r; 933 934 /* SDMA Privileged inst */ 935 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 936 if (r) 937 return r; 938 939 /* SDMA Privileged inst */ 940 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 941 if (r) 942 return r; 943 944 ring = &adev->sdma[0].ring; 945 ring->ring_obj = NULL; 946 947 ring = &adev->sdma[1].ring; 948 ring->ring_obj = NULL; 949 950 ring = &adev->sdma[0].ring; 951 sprintf(ring->name, "sdma0"); 952 r = amdgpu_ring_init(adev, ring, 256 * 1024, 953 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, 954 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 955 AMDGPU_RING_TYPE_SDMA); 956 if (r) 957 return r; 958 959 ring = &adev->sdma[1].ring; 960 sprintf(ring->name, "sdma1"); 961 r = amdgpu_ring_init(adev, ring, 256 * 1024, 962 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, 963 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, 964 AMDGPU_RING_TYPE_SDMA); 965 if (r) 966 return r; 967 968 return r; 969 } 970 971 static int cik_sdma_sw_fini(void *handle) 972 { 973 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 974 975 amdgpu_ring_fini(&adev->sdma[0].ring); 976 amdgpu_ring_fini(&adev->sdma[1].ring); 977 978 return 0; 979 } 980 981 static int cik_sdma_hw_init(void *handle) 982 { 983 int r; 984 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 985 986 r = cik_sdma_start(adev); 987 if (r) 988 return r; 989 990 return r; 991 } 992 993 static int cik_sdma_hw_fini(void *handle) 994 { 995 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 996 997 cik_sdma_enable(adev, false); 998 999 return 0; 1000 } 1001 1002 static int cik_sdma_suspend(void *handle) 1003 { 1004 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1005 1006 return cik_sdma_hw_fini(adev); 1007 } 1008 1009 static int cik_sdma_resume(void *handle) 1010 { 1011 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1012 1013 return cik_sdma_hw_init(adev); 1014 } 1015 1016 static bool cik_sdma_is_idle(void *handle) 1017 { 1018 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1019 u32 tmp = RREG32(mmSRBM_STATUS2); 1020 1021 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1022 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1023 return false; 1024 1025 return true; 1026 } 1027 1028 static int cik_sdma_wait_for_idle(void *handle) 1029 { 1030 unsigned i; 1031 u32 tmp; 1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 1034 for (i = 0; i < adev->usec_timeout; i++) { 1035 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1036 SRBM_STATUS2__SDMA1_BUSY_MASK); 1037 1038 if (!tmp) 1039 return 0; 1040 udelay(1); 1041 } 1042 return -ETIMEDOUT; 1043 } 1044 1045 static void cik_sdma_print_status(void *handle) 1046 { 1047 int i, j; 1048 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1049 1050 dev_info(adev->dev, "CIK SDMA registers\n"); 1051 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1052 RREG32(mmSRBM_STATUS2)); 1053 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1054 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1055 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1056 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", 1057 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); 1058 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", 1059 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); 1060 dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", 1061 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i])); 1062 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", 1063 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); 1064 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", 1065 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); 1066 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", 1067 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); 1068 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", 1069 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); 1070 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", 1071 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); 1072 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", 1073 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); 1074 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", 1075 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); 1076 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", 1077 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); 1078 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", 1079 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1080 mutex_lock(&adev->srbm_mutex); 1081 for (j = 0; j < 16; j++) { 1082 cik_srbm_select(adev, 0, 0, 0, j); 1083 dev_info(adev->dev, " VM %d:\n", j); 1084 dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n", 1085 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); 1086 dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n", 1087 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); 1088 } 1089 cik_srbm_select(adev, 0, 0, 0, 0); 1090 mutex_unlock(&adev->srbm_mutex); 1091 } 1092 } 1093 1094 static int cik_sdma_soft_reset(void *handle) 1095 { 1096 u32 srbm_soft_reset = 0; 1097 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1098 u32 tmp = RREG32(mmSRBM_STATUS2); 1099 1100 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1101 /* sdma0 */ 1102 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1103 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1104 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1105 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1106 } 1107 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1108 /* sdma1 */ 1109 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1110 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1111 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1112 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1113 } 1114 1115 if (srbm_soft_reset) { 1116 cik_sdma_print_status((void *)adev); 1117 1118 tmp = RREG32(mmSRBM_SOFT_RESET); 1119 tmp |= srbm_soft_reset; 1120 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1121 WREG32(mmSRBM_SOFT_RESET, tmp); 1122 tmp = RREG32(mmSRBM_SOFT_RESET); 1123 1124 udelay(50); 1125 1126 tmp &= ~srbm_soft_reset; 1127 WREG32(mmSRBM_SOFT_RESET, tmp); 1128 tmp = RREG32(mmSRBM_SOFT_RESET); 1129 1130 /* Wait a little for things to settle down */ 1131 udelay(50); 1132 1133 cik_sdma_print_status((void *)adev); 1134 } 1135 1136 return 0; 1137 } 1138 1139 static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, 1140 struct amdgpu_irq_src *src, 1141 unsigned type, 1142 enum amdgpu_interrupt_state state) 1143 { 1144 u32 sdma_cntl; 1145 1146 switch (type) { 1147 case AMDGPU_SDMA_IRQ_TRAP0: 1148 switch (state) { 1149 case AMDGPU_IRQ_STATE_DISABLE: 1150 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1151 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1152 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1153 break; 1154 case AMDGPU_IRQ_STATE_ENABLE: 1155 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1156 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1157 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1158 break; 1159 default: 1160 break; 1161 } 1162 break; 1163 case AMDGPU_SDMA_IRQ_TRAP1: 1164 switch (state) { 1165 case AMDGPU_IRQ_STATE_DISABLE: 1166 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1167 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1168 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1169 break; 1170 case AMDGPU_IRQ_STATE_ENABLE: 1171 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1172 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1173 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1174 break; 1175 default: 1176 break; 1177 } 1178 break; 1179 default: 1180 break; 1181 } 1182 return 0; 1183 } 1184 1185 static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, 1186 struct amdgpu_irq_src *source, 1187 struct amdgpu_iv_entry *entry) 1188 { 1189 u8 instance_id, queue_id; 1190 1191 instance_id = (entry->ring_id & 0x3) >> 0; 1192 queue_id = (entry->ring_id & 0xc) >> 2; 1193 DRM_DEBUG("IH: SDMA trap\n"); 1194 switch (instance_id) { 1195 case 0: 1196 switch (queue_id) { 1197 case 0: 1198 amdgpu_fence_process(&adev->sdma[0].ring); 1199 break; 1200 case 1: 1201 /* XXX compute */ 1202 break; 1203 case 2: 1204 /* XXX compute */ 1205 break; 1206 } 1207 break; 1208 case 1: 1209 switch (queue_id) { 1210 case 0: 1211 amdgpu_fence_process(&adev->sdma[1].ring); 1212 break; 1213 case 1: 1214 /* XXX compute */ 1215 break; 1216 case 2: 1217 /* XXX compute */ 1218 break; 1219 } 1220 break; 1221 } 1222 1223 return 0; 1224 } 1225 1226 static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, 1227 struct amdgpu_irq_src *source, 1228 struct amdgpu_iv_entry *entry) 1229 { 1230 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1231 schedule_work(&adev->reset_work); 1232 return 0; 1233 } 1234 1235 static int cik_sdma_set_clockgating_state(void *handle, 1236 enum amd_clockgating_state state) 1237 { 1238 bool gate = false; 1239 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1240 1241 if (state == AMD_CG_STATE_GATE) 1242 gate = true; 1243 1244 cik_enable_sdma_mgcg(adev, gate); 1245 cik_enable_sdma_mgls(adev, gate); 1246 1247 return 0; 1248 } 1249 1250 static int cik_sdma_set_powergating_state(void *handle, 1251 enum amd_powergating_state state) 1252 { 1253 return 0; 1254 } 1255 1256 const struct amd_ip_funcs cik_sdma_ip_funcs = { 1257 .early_init = cik_sdma_early_init, 1258 .late_init = NULL, 1259 .sw_init = cik_sdma_sw_init, 1260 .sw_fini = cik_sdma_sw_fini, 1261 .hw_init = cik_sdma_hw_init, 1262 .hw_fini = cik_sdma_hw_fini, 1263 .suspend = cik_sdma_suspend, 1264 .resume = cik_sdma_resume, 1265 .is_idle = cik_sdma_is_idle, 1266 .wait_for_idle = cik_sdma_wait_for_idle, 1267 .soft_reset = cik_sdma_soft_reset, 1268 .print_status = cik_sdma_print_status, 1269 .set_clockgating_state = cik_sdma_set_clockgating_state, 1270 .set_powergating_state = cik_sdma_set_powergating_state, 1271 }; 1272 1273 /** 1274 * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up 1275 * 1276 * @ring: amdgpu_ring structure holding ring information 1277 * 1278 * Check if the async DMA engine is locked up (CIK). 1279 * Returns true if the engine appears to be locked up, false if not. 1280 */ 1281 static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring) 1282 { 1283 1284 if (cik_sdma_is_idle(ring->adev)) { 1285 amdgpu_ring_lockup_update(ring); 1286 return false; 1287 } 1288 return amdgpu_ring_test_lockup(ring); 1289 } 1290 1291 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { 1292 .get_rptr = cik_sdma_ring_get_rptr, 1293 .get_wptr = cik_sdma_ring_get_wptr, 1294 .set_wptr = cik_sdma_ring_set_wptr, 1295 .parse_cs = NULL, 1296 .emit_ib = cik_sdma_ring_emit_ib, 1297 .emit_fence = cik_sdma_ring_emit_fence, 1298 .emit_semaphore = cik_sdma_ring_emit_semaphore, 1299 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1300 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1301 .test_ring = cik_sdma_ring_test_ring, 1302 .test_ib = cik_sdma_ring_test_ib, 1303 .is_lockup = cik_sdma_ring_is_lockup, 1304 }; 1305 1306 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1307 { 1308 adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; 1309 adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; 1310 } 1311 1312 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1313 .set = cik_sdma_set_trap_irq_state, 1314 .process = cik_sdma_process_trap_irq, 1315 }; 1316 1317 static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { 1318 .process = cik_sdma_process_illegal_inst_irq, 1319 }; 1320 1321 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) 1322 { 1323 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1324 adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; 1325 adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; 1326 } 1327 1328 /** 1329 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine 1330 * 1331 * @ring: amdgpu_ring structure holding ring information 1332 * @src_offset: src GPU address 1333 * @dst_offset: dst GPU address 1334 * @byte_count: number of bytes to xfer 1335 * 1336 * Copy GPU buffers using the DMA engine (CIK). 1337 * Used by the amdgpu ttm implementation to move pages if 1338 * registered as the asic copy callback. 1339 */ 1340 static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring, 1341 uint64_t src_offset, 1342 uint64_t dst_offset, 1343 uint32_t byte_count) 1344 { 1345 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 1346 amdgpu_ring_write(ring, byte_count); 1347 amdgpu_ring_write(ring, 0); /* src/dst endian swap */ 1348 amdgpu_ring_write(ring, lower_32_bits(src_offset)); 1349 amdgpu_ring_write(ring, upper_32_bits(src_offset)); 1350 amdgpu_ring_write(ring, lower_32_bits(dst_offset)); 1351 amdgpu_ring_write(ring, upper_32_bits(dst_offset)); 1352 } 1353 1354 /** 1355 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine 1356 * 1357 * @ring: amdgpu_ring structure holding ring information 1358 * @src_data: value to write to buffer 1359 * @dst_offset: dst GPU address 1360 * @byte_count: number of bytes to xfer 1361 * 1362 * Fill GPU buffers using the DMA engine (CIK). 1363 */ 1364 static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring, 1365 uint32_t src_data, 1366 uint64_t dst_offset, 1367 uint32_t byte_count) 1368 { 1369 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0)); 1370 amdgpu_ring_write(ring, lower_32_bits(dst_offset)); 1371 amdgpu_ring_write(ring, upper_32_bits(dst_offset)); 1372 amdgpu_ring_write(ring, src_data); 1373 amdgpu_ring_write(ring, byte_count); 1374 } 1375 1376 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { 1377 .copy_max_bytes = 0x1fffff, 1378 .copy_num_dw = 7, 1379 .emit_copy_buffer = cik_sdma_emit_copy_buffer, 1380 1381 .fill_max_bytes = 0x1fffff, 1382 .fill_num_dw = 5, 1383 .emit_fill_buffer = cik_sdma_emit_fill_buffer, 1384 }; 1385 1386 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) 1387 { 1388 if (adev->mman.buffer_funcs == NULL) { 1389 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; 1390 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1391 } 1392 } 1393 1394 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { 1395 .copy_pte = cik_sdma_vm_copy_pte, 1396 .write_pte = cik_sdma_vm_write_pte, 1397 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1398 .pad_ib = cik_sdma_vm_pad_ib, 1399 }; 1400 1401 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1402 { 1403 if (adev->vm_manager.vm_pte_funcs == NULL) { 1404 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1405 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1406 } 1407 } 1408