1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_trace.h" 29 #include "cikd.h" 30 #include "cik.h" 31 32 #include "bif/bif_4_1_d.h" 33 #include "bif/bif_4_1_sh_mask.h" 34 35 #include "gca/gfx_7_2_d.h" 36 #include "gca/gfx_7_2_enum.h" 37 #include "gca/gfx_7_2_sh_mask.h" 38 39 #include "gmc/gmc_7_1_d.h" 40 #include "gmc/gmc_7_1_sh_mask.h" 41 42 #include "oss/oss_2_0_d.h" 43 #include "oss/oss_2_0_sh_mask.h" 44 45 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 46 { 47 SDMA0_REGISTER_OFFSET, 48 SDMA1_REGISTER_OFFSET 49 }; 50 51 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); 52 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); 53 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); 54 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 55 56 MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 57 MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); 58 MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 59 MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); 60 MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); 61 MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); 62 MODULE_FIRMWARE("radeon/kabini_sdma.bin"); 63 MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); 64 MODULE_FIRMWARE("radeon/mullins_sdma.bin"); 65 MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); 66 67 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 68 69 /* 70 * sDMA - System DMA 71 * Starting with CIK, the GPU has new asynchronous 72 * DMA engines. These engines are used for compute 73 * and gfx. There are two DMA engines (SDMA0, SDMA1) 74 * and each one supports 1 ring buffer used for gfx 75 * and 2 queues used for compute. 76 * 77 * The programming model is very similar to the CP 78 * (ring buffer, IBs, etc.), but sDMA has it's own 79 * packet format that is different from the PM4 format 80 * used by the CP. sDMA supports copying data, writing 81 * embedded data, solid fills, and a number of other 82 * things. It also has support for tiling/detiling of 83 * buffers. 84 */ 85 86 /** 87 * cik_sdma_init_microcode - load ucode images from disk 88 * 89 * @adev: amdgpu_device pointer 90 * 91 * Use the firmware interface to load the ucode images into 92 * the driver (not loaded into hw). 93 * Returns 0 on success, error on failure. 94 */ 95 static int cik_sdma_init_microcode(struct amdgpu_device *adev) 96 { 97 const char *chip_name; 98 char fw_name[30]; 99 int err, i; 100 101 DRM_DEBUG("\n"); 102 103 switch (adev->asic_type) { 104 case CHIP_BONAIRE: 105 chip_name = "bonaire"; 106 break; 107 case CHIP_HAWAII: 108 chip_name = "hawaii"; 109 break; 110 case CHIP_KAVERI: 111 chip_name = "kaveri"; 112 break; 113 case CHIP_KABINI: 114 chip_name = "kabini"; 115 break; 116 case CHIP_MULLINS: 117 chip_name = "mullins"; 118 break; 119 default: BUG(); 120 } 121 122 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 123 if (i == 0) 124 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 125 else 126 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); 127 err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); 128 if (err) 129 goto out; 130 err = amdgpu_ucode_validate(adev->sdma[i].fw); 131 } 132 out: 133 if (err) { 134 printk(KERN_ERR 135 "cik_sdma: Failed to load firmware \"%s\"\n", 136 fw_name); 137 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 138 release_firmware(adev->sdma[i].fw); 139 adev->sdma[i].fw = NULL; 140 } 141 } 142 return err; 143 } 144 145 /** 146 * cik_sdma_ring_get_rptr - get the current read pointer 147 * 148 * @ring: amdgpu ring pointer 149 * 150 * Get the current rptr from the hardware (CIK+). 151 */ 152 static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) 153 { 154 u32 rptr; 155 156 rptr = ring->adev->wb.wb[ring->rptr_offs]; 157 158 return (rptr & 0x3fffc) >> 2; 159 } 160 161 /** 162 * cik_sdma_ring_get_wptr - get the current write pointer 163 * 164 * @ring: amdgpu ring pointer 165 * 166 * Get the current wptr from the hardware (CIK+). 167 */ 168 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 169 { 170 struct amdgpu_device *adev = ring->adev; 171 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 172 173 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 174 } 175 176 /** 177 * cik_sdma_ring_set_wptr - commit the write pointer 178 * 179 * @ring: amdgpu ring pointer 180 * 181 * Write the wptr back to the hardware (CIK+). 182 */ 183 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 184 { 185 struct amdgpu_device *adev = ring->adev; 186 u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; 187 188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); 189 } 190 191 /** 192 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine 193 * 194 * @ring: amdgpu ring pointer 195 * @ib: IB object to schedule 196 * 197 * Schedule an IB in the DMA ring (CIK). 198 */ 199 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 200 struct amdgpu_ib *ib) 201 { 202 u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; 203 u32 next_rptr = ring->wptr + 5; 204 205 while ((next_rptr & 7) != 4) 206 next_rptr++; 207 208 next_rptr += 4; 209 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 210 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 211 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); 212 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 213 amdgpu_ring_write(ring, next_rptr); 214 215 /* IB packet must end on a 8 DW boundary */ 216 while ((ring->wptr & 7) != 4) 217 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); 218 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 219 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 220 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 221 amdgpu_ring_write(ring, ib->length_dw); 222 223 } 224 225 /** 226 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 227 * 228 * @ring: amdgpu ring pointer 229 * 230 * Emit an hdp flush packet on the requested DMA ring. 231 */ 232 static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) 233 { 234 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 235 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 236 u32 ref_and_mask; 237 238 if (ring == &ring->adev->sdma[0].ring) 239 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 240 else 241 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 242 243 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 244 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 245 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 246 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 247 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 248 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 249 } 250 251 /** 252 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring 253 * 254 * @ring: amdgpu ring pointer 255 * @fence: amdgpu fence object 256 * 257 * Add a DMA fence packet to the ring to write 258 * the fence seq number and DMA trap packet to generate 259 * an interrupt if needed (CIK). 260 */ 261 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 262 unsigned flags) 263 { 264 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 265 /* write the fence */ 266 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 267 amdgpu_ring_write(ring, lower_32_bits(addr)); 268 amdgpu_ring_write(ring, upper_32_bits(addr)); 269 amdgpu_ring_write(ring, lower_32_bits(seq)); 270 271 /* optionally write high bits as well */ 272 if (write64bit) { 273 addr += 4; 274 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 275 amdgpu_ring_write(ring, lower_32_bits(addr)); 276 amdgpu_ring_write(ring, upper_32_bits(addr)); 277 amdgpu_ring_write(ring, upper_32_bits(seq)); 278 } 279 280 /* generate an interrupt */ 281 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 282 } 283 284 /** 285 * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring 286 * 287 * @ring: amdgpu_ring structure holding ring information 288 * @semaphore: amdgpu semaphore object 289 * @emit_wait: wait or signal semaphore 290 * 291 * Add a DMA semaphore packet to the ring wait on or signal 292 * other rings (CIK). 293 */ 294 static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, 295 struct amdgpu_semaphore *semaphore, 296 bool emit_wait) 297 { 298 u64 addr = semaphore->gpu_addr; 299 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; 300 301 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); 302 amdgpu_ring_write(ring, addr & 0xfffffff8); 303 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 304 305 return true; 306 } 307 308 /** 309 * cik_sdma_gfx_stop - stop the gfx async dma engines 310 * 311 * @adev: amdgpu_device pointer 312 * 313 * Stop the gfx async dma ring buffers (CIK). 314 */ 315 static void cik_sdma_gfx_stop(struct amdgpu_device *adev) 316 { 317 struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; 318 struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; 319 u32 rb_cntl; 320 int i; 321 322 if ((adev->mman.buffer_funcs_ring == sdma0) || 323 (adev->mman.buffer_funcs_ring == sdma1)) 324 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 325 326 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 327 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 328 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; 329 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 330 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); 331 } 332 sdma0->ready = false; 333 sdma1->ready = false; 334 } 335 336 /** 337 * cik_sdma_rlc_stop - stop the compute async dma engines 338 * 339 * @adev: amdgpu_device pointer 340 * 341 * Stop the compute async dma queues (CIK). 342 */ 343 static void cik_sdma_rlc_stop(struct amdgpu_device *adev) 344 { 345 /* XXX todo */ 346 } 347 348 /** 349 * cik_sdma_enable - stop the async dma engines 350 * 351 * @adev: amdgpu_device pointer 352 * @enable: enable/disable the DMA MEs. 353 * 354 * Halt or unhalt the async dma engines (CIK). 355 */ 356 static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) 357 { 358 u32 me_cntl; 359 int i; 360 361 if (enable == false) { 362 cik_sdma_gfx_stop(adev); 363 cik_sdma_rlc_stop(adev); 364 } 365 366 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 367 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 368 if (enable) 369 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; 370 else 371 me_cntl |= SDMA0_F32_CNTL__HALT_MASK; 372 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); 373 } 374 } 375 376 /** 377 * cik_sdma_gfx_resume - setup and start the async dma engines 378 * 379 * @adev: amdgpu_device pointer 380 * 381 * Set up the gfx DMA ring buffers and enable them (CIK). 382 * Returns 0 for success, error for failure. 383 */ 384 static int cik_sdma_gfx_resume(struct amdgpu_device *adev) 385 { 386 struct amdgpu_ring *ring; 387 u32 rb_cntl, ib_cntl; 388 u32 rb_bufsz; 389 u32 wb_offset; 390 int i, j, r; 391 392 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 393 ring = &adev->sdma[i].ring; 394 wb_offset = (ring->rptr_offs * 4); 395 396 mutex_lock(&adev->srbm_mutex); 397 for (j = 0; j < 16; j++) { 398 cik_srbm_select(adev, 0, 0, 0, j); 399 /* SDMA GFX */ 400 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 401 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 402 /* XXX SDMA RLC - todo */ 403 } 404 cik_srbm_select(adev, 0, 0, 0, 0); 405 mutex_unlock(&adev->srbm_mutex); 406 407 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 408 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 409 410 /* Set ring buffer size in dwords */ 411 rb_bufsz = order_base_2(ring->ring_size / 4); 412 rb_cntl = rb_bufsz << 1; 413 #ifdef __BIG_ENDIAN 414 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | 415 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; 416 #endif 417 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 418 419 /* Initialize the ring buffer's read and write pointers */ 420 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 421 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 422 423 /* set the wb address whether it's enabled or not */ 424 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 425 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 426 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 427 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 428 429 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; 430 431 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 432 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 433 434 ring->wptr = 0; 435 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); 436 437 /* enable DMA RB */ 438 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], 439 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK); 440 441 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK; 442 #ifdef __BIG_ENDIAN 443 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; 444 #endif 445 /* enable DMA IBs */ 446 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 447 448 ring->ready = true; 449 450 r = amdgpu_ring_test_ring(ring); 451 if (r) { 452 ring->ready = false; 453 return r; 454 } 455 456 if (adev->mman.buffer_funcs_ring == ring) 457 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 458 } 459 460 return 0; 461 } 462 463 /** 464 * cik_sdma_rlc_resume - setup and start the async dma engines 465 * 466 * @adev: amdgpu_device pointer 467 * 468 * Set up the compute DMA queues and enable them (CIK). 469 * Returns 0 for success, error for failure. 470 */ 471 static int cik_sdma_rlc_resume(struct amdgpu_device *adev) 472 { 473 /* XXX todo */ 474 return 0; 475 } 476 477 /** 478 * cik_sdma_load_microcode - load the sDMA ME ucode 479 * 480 * @adev: amdgpu_device pointer 481 * 482 * Loads the sDMA0/1 ucode. 483 * Returns 0 for success, -EINVAL if the ucode is not available. 484 */ 485 static int cik_sdma_load_microcode(struct amdgpu_device *adev) 486 { 487 const struct sdma_firmware_header_v1_0 *hdr; 488 const __le32 *fw_data; 489 u32 fw_size; 490 int i, j; 491 492 if (!adev->sdma[0].fw || !adev->sdma[1].fw) 493 return -EINVAL; 494 495 /* halt the MEs */ 496 cik_sdma_enable(adev, false); 497 498 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 499 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 503 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 504 fw_data = (const __le32 *) 505 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 506 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 507 for (j = 0; j < fw_size; j++) 508 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 509 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); 510 } 511 512 return 0; 513 } 514 515 /** 516 * cik_sdma_start - setup and start the async dma engines 517 * 518 * @adev: amdgpu_device pointer 519 * 520 * Set up the DMA engines and enable them (CIK). 521 * Returns 0 for success, error for failure. 522 */ 523 static int cik_sdma_start(struct amdgpu_device *adev) 524 { 525 int r; 526 527 r = cik_sdma_load_microcode(adev); 528 if (r) 529 return r; 530 531 /* unhalt the MEs */ 532 cik_sdma_enable(adev, true); 533 534 /* start the gfx rings and rlc compute queues */ 535 r = cik_sdma_gfx_resume(adev); 536 if (r) 537 return r; 538 r = cik_sdma_rlc_resume(adev); 539 if (r) 540 return r; 541 542 return 0; 543 } 544 545 /** 546 * cik_sdma_ring_test_ring - simple async dma engine test 547 * 548 * @ring: amdgpu_ring structure holding ring information 549 * 550 * Test the DMA engine by writing using it to write an 551 * value to memory. (CIK). 552 * Returns 0 for success, error for failure. 553 */ 554 static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) 555 { 556 struct amdgpu_device *adev = ring->adev; 557 unsigned i; 558 unsigned index; 559 int r; 560 u32 tmp; 561 u64 gpu_addr; 562 563 r = amdgpu_wb_get(adev, &index); 564 if (r) { 565 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 566 return r; 567 } 568 569 gpu_addr = adev->wb.gpu_addr + (index * 4); 570 tmp = 0xCAFEDEAD; 571 adev->wb.wb[index] = cpu_to_le32(tmp); 572 573 r = amdgpu_ring_lock(ring, 5); 574 if (r) { 575 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 576 amdgpu_wb_free(adev, index); 577 return r; 578 } 579 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 580 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 581 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 582 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 583 amdgpu_ring_write(ring, 0xDEADBEEF); 584 amdgpu_ring_unlock_commit(ring); 585 586 for (i = 0; i < adev->usec_timeout; i++) { 587 tmp = le32_to_cpu(adev->wb.wb[index]); 588 if (tmp == 0xDEADBEEF) 589 break; 590 DRM_UDELAY(1); 591 } 592 593 if (i < adev->usec_timeout) { 594 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 595 } else { 596 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 597 ring->idx, tmp); 598 r = -EINVAL; 599 } 600 amdgpu_wb_free(adev, index); 601 602 return r; 603 } 604 605 /** 606 * cik_sdma_ring_test_ib - test an IB on the DMA engine 607 * 608 * @ring: amdgpu_ring structure holding ring information 609 * 610 * Test a simple IB in the DMA ring (CIK). 611 * Returns 0 on success, error on failure. 612 */ 613 static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) 614 { 615 struct amdgpu_device *adev = ring->adev; 616 struct amdgpu_ib ib; 617 unsigned i; 618 unsigned index; 619 int r; 620 u32 tmp = 0; 621 u64 gpu_addr; 622 623 r = amdgpu_wb_get(adev, &index); 624 if (r) { 625 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 626 return r; 627 } 628 629 gpu_addr = adev->wb.gpu_addr + (index * 4); 630 tmp = 0xCAFEDEAD; 631 adev->wb.wb[index] = cpu_to_le32(tmp); 632 633 r = amdgpu_ib_get(ring, NULL, 256, &ib); 634 if (r) { 635 amdgpu_wb_free(adev, index); 636 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 637 return r; 638 } 639 640 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 641 ib.ptr[1] = lower_32_bits(gpu_addr); 642 ib.ptr[2] = upper_32_bits(gpu_addr); 643 ib.ptr[3] = 1; 644 ib.ptr[4] = 0xDEADBEEF; 645 ib.length_dw = 5; 646 647 r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); 648 if (r) { 649 amdgpu_ib_free(adev, &ib); 650 amdgpu_wb_free(adev, index); 651 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); 652 return r; 653 } 654 r = amdgpu_fence_wait(ib.fence, false); 655 if (r) { 656 amdgpu_ib_free(adev, &ib); 657 amdgpu_wb_free(adev, index); 658 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 659 return r; 660 } 661 for (i = 0; i < adev->usec_timeout; i++) { 662 tmp = le32_to_cpu(adev->wb.wb[index]); 663 if (tmp == 0xDEADBEEF) 664 break; 665 DRM_UDELAY(1); 666 } 667 if (i < adev->usec_timeout) { 668 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", 669 ib.fence->ring->idx, i); 670 } else { 671 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 672 r = -EINVAL; 673 } 674 amdgpu_ib_free(adev, &ib); 675 amdgpu_wb_free(adev, index); 676 return r; 677 } 678 679 /** 680 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART 681 * 682 * @ib: indirect buffer to fill with commands 683 * @pe: addr of the page entry 684 * @src: src addr to copy from 685 * @count: number of page entries to update 686 * 687 * Update PTEs by copying them from the GART using sDMA (CIK). 688 */ 689 static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, 690 uint64_t pe, uint64_t src, 691 unsigned count) 692 { 693 while (count) { 694 unsigned bytes = count * 8; 695 if (bytes > 0x1FFFF8) 696 bytes = 0x1FFFF8; 697 698 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, 699 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 700 ib->ptr[ib->length_dw++] = bytes; 701 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 702 ib->ptr[ib->length_dw++] = lower_32_bits(src); 703 ib->ptr[ib->length_dw++] = upper_32_bits(src); 704 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 705 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 706 707 pe += bytes; 708 src += bytes; 709 count -= bytes / 8; 710 } 711 } 712 713 /** 714 * cik_sdma_vm_write_pages - update PTEs by writing them manually 715 * 716 * @ib: indirect buffer to fill with commands 717 * @pe: addr of the page entry 718 * @addr: dst addr to write into pe 719 * @count: number of page entries to update 720 * @incr: increase next addr by incr bytes 721 * @flags: access flags 722 * 723 * Update PTEs by writing them manually using sDMA (CIK). 724 */ 725 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, 726 uint64_t pe, 727 uint64_t addr, unsigned count, 728 uint32_t incr, uint32_t flags) 729 { 730 uint64_t value; 731 unsigned ndw; 732 733 while (count) { 734 ndw = count * 2; 735 if (ndw > 0xFFFFE) 736 ndw = 0xFFFFE; 737 738 /* for non-physically contiguous pages (system) */ 739 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 740 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 741 ib->ptr[ib->length_dw++] = pe; 742 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 743 ib->ptr[ib->length_dw++] = ndw; 744 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 745 if (flags & AMDGPU_PTE_SYSTEM) { 746 value = amdgpu_vm_map_gart(ib->ring->adev, addr); 747 value &= 0xFFFFFFFFFFFFF000ULL; 748 } else if (flags & AMDGPU_PTE_VALID) { 749 value = addr; 750 } else { 751 value = 0; 752 } 753 addr += incr; 754 value |= flags; 755 ib->ptr[ib->length_dw++] = value; 756 ib->ptr[ib->length_dw++] = upper_32_bits(value); 757 } 758 } 759 } 760 761 /** 762 * cik_sdma_vm_set_pages - update the page tables using sDMA 763 * 764 * @ib: indirect buffer to fill with commands 765 * @pe: addr of the page entry 766 * @addr: dst addr to write into pe 767 * @count: number of page entries to update 768 * @incr: increase next addr by incr bytes 769 * @flags: access flags 770 * 771 * Update the page tables using sDMA (CIK). 772 */ 773 static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, 774 uint64_t pe, 775 uint64_t addr, unsigned count, 776 uint32_t incr, uint32_t flags) 777 { 778 uint64_t value; 779 unsigned ndw; 780 781 while (count) { 782 ndw = count; 783 if (ndw > 0x7FFFF) 784 ndw = 0x7FFFF; 785 786 if (flags & AMDGPU_PTE_VALID) 787 value = addr; 788 else 789 value = 0; 790 791 /* for physically contiguous pages (vram) */ 792 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 793 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 794 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 795 ib->ptr[ib->length_dw++] = flags; /* mask */ 796 ib->ptr[ib->length_dw++] = 0; 797 ib->ptr[ib->length_dw++] = value; /* value */ 798 ib->ptr[ib->length_dw++] = upper_32_bits(value); 799 ib->ptr[ib->length_dw++] = incr; /* increment size */ 800 ib->ptr[ib->length_dw++] = 0; 801 ib->ptr[ib->length_dw++] = ndw; /* number of entries */ 802 803 pe += ndw * 8; 804 addr += ndw * incr; 805 count -= ndw; 806 } 807 } 808 809 /** 810 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw 811 * 812 * @ib: indirect buffer to fill with padding 813 * 814 */ 815 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) 816 { 817 while (ib->length_dw & 0x7) 818 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 819 } 820 821 /** 822 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA 823 * 824 * @ring: amdgpu_ring pointer 825 * @vm: amdgpu_vm pointer 826 * 827 * Update the page table base and flush the VM TLB 828 * using sDMA (CIK). 829 */ 830 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, 831 unsigned vm_id, uint64_t pd_addr) 832 { 833 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 834 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 835 836 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 837 if (vm_id < 8) { 838 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 839 } else { 840 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); 841 } 842 amdgpu_ring_write(ring, pd_addr >> 12); 843 844 /* flush TLB */ 845 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 846 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 847 amdgpu_ring_write(ring, 1 << vm_id); 848 849 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 850 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 851 amdgpu_ring_write(ring, 0); 852 amdgpu_ring_write(ring, 0); /* reference */ 853 amdgpu_ring_write(ring, 0); /* mask */ 854 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 855 } 856 857 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 858 bool enable) 859 { 860 u32 orig, data; 861 862 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { 863 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 864 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 865 } else { 866 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); 867 data |= 0xff000000; 868 if (data != orig) 869 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); 870 871 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); 872 data |= 0xff000000; 873 if (data != orig) 874 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); 875 } 876 } 877 878 static void cik_enable_sdma_mgls(struct amdgpu_device *adev, 879 bool enable) 880 { 881 u32 orig, data; 882 883 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { 884 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 885 data |= 0x100; 886 if (orig != data) 887 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 888 889 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 890 data |= 0x100; 891 if (orig != data) 892 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 893 } else { 894 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 895 data &= ~0x100; 896 if (orig != data) 897 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 898 899 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 900 data &= ~0x100; 901 if (orig != data) 902 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 903 } 904 } 905 906 static int cik_sdma_early_init(void *handle) 907 { 908 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 909 910 cik_sdma_set_ring_funcs(adev); 911 cik_sdma_set_irq_funcs(adev); 912 cik_sdma_set_buffer_funcs(adev); 913 cik_sdma_set_vm_pte_funcs(adev); 914 915 return 0; 916 } 917 918 static int cik_sdma_sw_init(void *handle) 919 { 920 struct amdgpu_ring *ring; 921 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 922 int r; 923 924 r = cik_sdma_init_microcode(adev); 925 if (r) { 926 DRM_ERROR("Failed to load sdma firmware!\n"); 927 return r; 928 } 929 930 /* SDMA trap event */ 931 r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); 932 if (r) 933 return r; 934 935 /* SDMA Privileged inst */ 936 r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); 937 if (r) 938 return r; 939 940 /* SDMA Privileged inst */ 941 r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); 942 if (r) 943 return r; 944 945 ring = &adev->sdma[0].ring; 946 ring->ring_obj = NULL; 947 948 ring = &adev->sdma[1].ring; 949 ring->ring_obj = NULL; 950 951 ring = &adev->sdma[0].ring; 952 sprintf(ring->name, "sdma0"); 953 r = amdgpu_ring_init(adev, ring, 256 * 1024, 954 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, 955 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, 956 AMDGPU_RING_TYPE_SDMA); 957 if (r) 958 return r; 959 960 ring = &adev->sdma[1].ring; 961 sprintf(ring->name, "sdma1"); 962 r = amdgpu_ring_init(adev, ring, 256 * 1024, 963 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, 964 &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, 965 AMDGPU_RING_TYPE_SDMA); 966 if (r) 967 return r; 968 969 return r; 970 } 971 972 static int cik_sdma_sw_fini(void *handle) 973 { 974 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 975 976 amdgpu_ring_fini(&adev->sdma[0].ring); 977 amdgpu_ring_fini(&adev->sdma[1].ring); 978 979 return 0; 980 } 981 982 static int cik_sdma_hw_init(void *handle) 983 { 984 int r; 985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 986 987 r = cik_sdma_start(adev); 988 if (r) 989 return r; 990 991 return r; 992 } 993 994 static int cik_sdma_hw_fini(void *handle) 995 { 996 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 997 998 cik_sdma_enable(adev, false); 999 1000 return 0; 1001 } 1002 1003 static int cik_sdma_suspend(void *handle) 1004 { 1005 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1006 1007 return cik_sdma_hw_fini(adev); 1008 } 1009 1010 static int cik_sdma_resume(void *handle) 1011 { 1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1013 1014 return cik_sdma_hw_init(adev); 1015 } 1016 1017 static bool cik_sdma_is_idle(void *handle) 1018 { 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1020 u32 tmp = RREG32(mmSRBM_STATUS2); 1021 1022 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1023 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1024 return false; 1025 1026 return true; 1027 } 1028 1029 static int cik_sdma_wait_for_idle(void *handle) 1030 { 1031 unsigned i; 1032 u32 tmp; 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1034 1035 for (i = 0; i < adev->usec_timeout; i++) { 1036 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1037 SRBM_STATUS2__SDMA1_BUSY_MASK); 1038 1039 if (!tmp) 1040 return 0; 1041 udelay(1); 1042 } 1043 return -ETIMEDOUT; 1044 } 1045 1046 static void cik_sdma_print_status(void *handle) 1047 { 1048 int i, j; 1049 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1050 1051 dev_info(adev->dev, "CIK SDMA registers\n"); 1052 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", 1053 RREG32(mmSRBM_STATUS2)); 1054 for (i = 0; i < SDMA_MAX_INSTANCE; i++) { 1055 dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", 1056 i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); 1057 dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", 1058 i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); 1059 dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", 1060 i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); 1061 dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n", 1062 i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i])); 1063 dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", 1064 i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); 1065 dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", 1066 i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); 1067 dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", 1068 i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); 1069 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", 1070 i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); 1071 dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", 1072 i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); 1073 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", 1074 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); 1075 dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", 1076 i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); 1077 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", 1078 i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); 1079 dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", 1080 i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); 1081 mutex_lock(&adev->srbm_mutex); 1082 for (j = 0; j < 16; j++) { 1083 cik_srbm_select(adev, 0, 0, 0, j); 1084 dev_info(adev->dev, " VM %d:\n", j); 1085 dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n", 1086 RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); 1087 dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n", 1088 RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); 1089 } 1090 cik_srbm_select(adev, 0, 0, 0, 0); 1091 mutex_unlock(&adev->srbm_mutex); 1092 } 1093 } 1094 1095 static int cik_sdma_soft_reset(void *handle) 1096 { 1097 u32 srbm_soft_reset = 0; 1098 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1099 u32 tmp = RREG32(mmSRBM_STATUS2); 1100 1101 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1102 /* sdma0 */ 1103 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1104 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1105 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1106 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1107 } 1108 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1109 /* sdma1 */ 1110 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1111 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1112 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1113 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1114 } 1115 1116 if (srbm_soft_reset) { 1117 cik_sdma_print_status((void *)adev); 1118 1119 tmp = RREG32(mmSRBM_SOFT_RESET); 1120 tmp |= srbm_soft_reset; 1121 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1122 WREG32(mmSRBM_SOFT_RESET, tmp); 1123 tmp = RREG32(mmSRBM_SOFT_RESET); 1124 1125 udelay(50); 1126 1127 tmp &= ~srbm_soft_reset; 1128 WREG32(mmSRBM_SOFT_RESET, tmp); 1129 tmp = RREG32(mmSRBM_SOFT_RESET); 1130 1131 /* Wait a little for things to settle down */ 1132 udelay(50); 1133 1134 cik_sdma_print_status((void *)adev); 1135 } 1136 1137 return 0; 1138 } 1139 1140 static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, 1141 struct amdgpu_irq_src *src, 1142 unsigned type, 1143 enum amdgpu_interrupt_state state) 1144 { 1145 u32 sdma_cntl; 1146 1147 switch (type) { 1148 case AMDGPU_SDMA_IRQ_TRAP0: 1149 switch (state) { 1150 case AMDGPU_IRQ_STATE_DISABLE: 1151 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1152 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1153 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1154 break; 1155 case AMDGPU_IRQ_STATE_ENABLE: 1156 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1157 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1158 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1159 break; 1160 default: 1161 break; 1162 } 1163 break; 1164 case AMDGPU_SDMA_IRQ_TRAP1: 1165 switch (state) { 1166 case AMDGPU_IRQ_STATE_DISABLE: 1167 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1168 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1169 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1170 break; 1171 case AMDGPU_IRQ_STATE_ENABLE: 1172 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1173 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1174 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1175 break; 1176 default: 1177 break; 1178 } 1179 break; 1180 default: 1181 break; 1182 } 1183 return 0; 1184 } 1185 1186 static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, 1187 struct amdgpu_irq_src *source, 1188 struct amdgpu_iv_entry *entry) 1189 { 1190 u8 instance_id, queue_id; 1191 1192 instance_id = (entry->ring_id & 0x3) >> 0; 1193 queue_id = (entry->ring_id & 0xc) >> 2; 1194 DRM_DEBUG("IH: SDMA trap\n"); 1195 switch (instance_id) { 1196 case 0: 1197 switch (queue_id) { 1198 case 0: 1199 amdgpu_fence_process(&adev->sdma[0].ring); 1200 break; 1201 case 1: 1202 /* XXX compute */ 1203 break; 1204 case 2: 1205 /* XXX compute */ 1206 break; 1207 } 1208 break; 1209 case 1: 1210 switch (queue_id) { 1211 case 0: 1212 amdgpu_fence_process(&adev->sdma[1].ring); 1213 break; 1214 case 1: 1215 /* XXX compute */ 1216 break; 1217 case 2: 1218 /* XXX compute */ 1219 break; 1220 } 1221 break; 1222 } 1223 1224 return 0; 1225 } 1226 1227 static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, 1228 struct amdgpu_irq_src *source, 1229 struct amdgpu_iv_entry *entry) 1230 { 1231 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1232 schedule_work(&adev->reset_work); 1233 return 0; 1234 } 1235 1236 static int cik_sdma_set_clockgating_state(void *handle, 1237 enum amd_clockgating_state state) 1238 { 1239 bool gate = false; 1240 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1241 1242 if (state == AMD_CG_STATE_GATE) 1243 gate = true; 1244 1245 cik_enable_sdma_mgcg(adev, gate); 1246 cik_enable_sdma_mgls(adev, gate); 1247 1248 return 0; 1249 } 1250 1251 static int cik_sdma_set_powergating_state(void *handle, 1252 enum amd_powergating_state state) 1253 { 1254 return 0; 1255 } 1256 1257 const struct amd_ip_funcs cik_sdma_ip_funcs = { 1258 .early_init = cik_sdma_early_init, 1259 .late_init = NULL, 1260 .sw_init = cik_sdma_sw_init, 1261 .sw_fini = cik_sdma_sw_fini, 1262 .hw_init = cik_sdma_hw_init, 1263 .hw_fini = cik_sdma_hw_fini, 1264 .suspend = cik_sdma_suspend, 1265 .resume = cik_sdma_resume, 1266 .is_idle = cik_sdma_is_idle, 1267 .wait_for_idle = cik_sdma_wait_for_idle, 1268 .soft_reset = cik_sdma_soft_reset, 1269 .print_status = cik_sdma_print_status, 1270 .set_clockgating_state = cik_sdma_set_clockgating_state, 1271 .set_powergating_state = cik_sdma_set_powergating_state, 1272 }; 1273 1274 /** 1275 * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up 1276 * 1277 * @ring: amdgpu_ring structure holding ring information 1278 * 1279 * Check if the async DMA engine is locked up (CIK). 1280 * Returns true if the engine appears to be locked up, false if not. 1281 */ 1282 static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring) 1283 { 1284 1285 if (cik_sdma_is_idle(ring->adev)) { 1286 amdgpu_ring_lockup_update(ring); 1287 return false; 1288 } 1289 return amdgpu_ring_test_lockup(ring); 1290 } 1291 1292 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { 1293 .get_rptr = cik_sdma_ring_get_rptr, 1294 .get_wptr = cik_sdma_ring_get_wptr, 1295 .set_wptr = cik_sdma_ring_set_wptr, 1296 .parse_cs = NULL, 1297 .emit_ib = cik_sdma_ring_emit_ib, 1298 .emit_fence = cik_sdma_ring_emit_fence, 1299 .emit_semaphore = cik_sdma_ring_emit_semaphore, 1300 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1301 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1302 .test_ring = cik_sdma_ring_test_ring, 1303 .test_ib = cik_sdma_ring_test_ib, 1304 .is_lockup = cik_sdma_ring_is_lockup, 1305 }; 1306 1307 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1308 { 1309 adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; 1310 adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; 1311 } 1312 1313 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1314 .set = cik_sdma_set_trap_irq_state, 1315 .process = cik_sdma_process_trap_irq, 1316 }; 1317 1318 static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { 1319 .process = cik_sdma_process_illegal_inst_irq, 1320 }; 1321 1322 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) 1323 { 1324 adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1325 adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; 1326 adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; 1327 } 1328 1329 /** 1330 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine 1331 * 1332 * @ring: amdgpu_ring structure holding ring information 1333 * @src_offset: src GPU address 1334 * @dst_offset: dst GPU address 1335 * @byte_count: number of bytes to xfer 1336 * 1337 * Copy GPU buffers using the DMA engine (CIK). 1338 * Used by the amdgpu ttm implementation to move pages if 1339 * registered as the asic copy callback. 1340 */ 1341 static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring, 1342 uint64_t src_offset, 1343 uint64_t dst_offset, 1344 uint32_t byte_count) 1345 { 1346 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); 1347 amdgpu_ring_write(ring, byte_count); 1348 amdgpu_ring_write(ring, 0); /* src/dst endian swap */ 1349 amdgpu_ring_write(ring, lower_32_bits(src_offset)); 1350 amdgpu_ring_write(ring, upper_32_bits(src_offset)); 1351 amdgpu_ring_write(ring, lower_32_bits(dst_offset)); 1352 amdgpu_ring_write(ring, upper_32_bits(dst_offset)); 1353 } 1354 1355 /** 1356 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine 1357 * 1358 * @ring: amdgpu_ring structure holding ring information 1359 * @src_data: value to write to buffer 1360 * @dst_offset: dst GPU address 1361 * @byte_count: number of bytes to xfer 1362 * 1363 * Fill GPU buffers using the DMA engine (CIK). 1364 */ 1365 static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring, 1366 uint32_t src_data, 1367 uint64_t dst_offset, 1368 uint32_t byte_count) 1369 { 1370 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0)); 1371 amdgpu_ring_write(ring, lower_32_bits(dst_offset)); 1372 amdgpu_ring_write(ring, upper_32_bits(dst_offset)); 1373 amdgpu_ring_write(ring, src_data); 1374 amdgpu_ring_write(ring, byte_count); 1375 } 1376 1377 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { 1378 .copy_max_bytes = 0x1fffff, 1379 .copy_num_dw = 7, 1380 .emit_copy_buffer = cik_sdma_emit_copy_buffer, 1381 1382 .fill_max_bytes = 0x1fffff, 1383 .fill_num_dw = 5, 1384 .emit_fill_buffer = cik_sdma_emit_fill_buffer, 1385 }; 1386 1387 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) 1388 { 1389 if (adev->mman.buffer_funcs == NULL) { 1390 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; 1391 adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; 1392 } 1393 } 1394 1395 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { 1396 .copy_pte = cik_sdma_vm_copy_pte, 1397 .write_pte = cik_sdma_vm_write_pte, 1398 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1399 .pad_ib = cik_sdma_vm_pad_ib, 1400 }; 1401 1402 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1403 { 1404 if (adev->vm_manager.vm_pte_funcs == NULL) { 1405 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1406 adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; 1407 } 1408 } 1409