1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <drm/drmP.h> 25 #include "amdgpu.h" 26 #include "amdgpu_trace.h" 27 #include "si.h" 28 #include "sid.h" 29 30 const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 31 { 32 DMA0_REGISTER_OFFSET, 33 DMA1_REGISTER_OFFSET 34 }; 35 36 static void si_dma_set_ring_funcs(struct amdgpu_device *adev); 37 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); 38 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); 39 static void si_dma_set_irq_funcs(struct amdgpu_device *adev); 40 41 static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) 42 { 43 return ring->adev->wb.wb[ring->rptr_offs>>2]; 44 } 45 46 static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) 47 { 48 struct amdgpu_device *adev = ring->adev; 49 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 50 51 return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 52 } 53 54 static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) 55 { 56 struct amdgpu_device *adev = ring->adev; 57 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 58 59 WREG32(DMA_RB_WPTR + sdma_offsets[me], 60 (lower_32_bits(ring->wptr) << 2) & 0x3fffc); 61 } 62 63 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, 64 struct amdgpu_ib *ib, 65 unsigned vmid, bool ctx_switch) 66 { 67 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 68 * Pad as necessary with NOPs. 69 */ 70 while ((lower_32_bits(ring->wptr) & 7) != 5) 71 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 72 amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); 73 amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 74 amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 75 76 } 77 78 /** 79 * si_dma_ring_emit_fence - emit a fence on the DMA ring 80 * 81 * @ring: amdgpu ring pointer 82 * @fence: amdgpu fence object 83 * 84 * Add a DMA fence packet to the ring to write 85 * the fence seq number and DMA trap packet to generate 86 * an interrupt if needed (VI). 87 */ 88 static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 89 unsigned flags) 90 { 91 92 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 93 /* write the fence */ 94 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); 95 amdgpu_ring_write(ring, addr & 0xfffffffc); 96 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); 97 amdgpu_ring_write(ring, seq); 98 /* optionally write high bits as well */ 99 if (write64bit) { 100 addr += 4; 101 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); 102 amdgpu_ring_write(ring, addr & 0xfffffffc); 103 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); 104 amdgpu_ring_write(ring, upper_32_bits(seq)); 105 } 106 /* generate an interrupt */ 107 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); 108 } 109 110 static void si_dma_stop(struct amdgpu_device *adev) 111 { 112 struct amdgpu_ring *ring; 113 u32 rb_cntl; 114 unsigned i; 115 116 for (i = 0; i < adev->sdma.num_instances; i++) { 117 ring = &adev->sdma.instance[i].ring; 118 /* dma0 */ 119 rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); 120 rb_cntl &= ~DMA_RB_ENABLE; 121 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); 122 123 if (adev->mman.buffer_funcs_ring == ring) 124 amdgpu_ttm_set_buffer_funcs_status(adev, false); 125 ring->sched.ready = false; 126 } 127 } 128 129 static int si_dma_start(struct amdgpu_device *adev) 130 { 131 struct amdgpu_ring *ring; 132 u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; 133 int i, r; 134 uint64_t rptr_addr; 135 136 for (i = 0; i < adev->sdma.num_instances; i++) { 137 ring = &adev->sdma.instance[i].ring; 138 139 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 140 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 141 142 /* Set ring buffer size in dwords */ 143 rb_bufsz = order_base_2(ring->ring_size / 4); 144 rb_cntl = rb_bufsz << 1; 145 #ifdef __BIG_ENDIAN 146 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 147 #endif 148 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); 149 150 /* Initialize the ring buffer's read and write pointers */ 151 WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); 152 WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); 153 154 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 155 156 WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); 157 WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); 158 159 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 160 161 WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 162 163 /* enable DMA IBs */ 164 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 165 #ifdef __BIG_ENDIAN 166 ib_cntl |= DMA_IB_SWAP_ENABLE; 167 #endif 168 WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); 169 170 dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); 171 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 172 WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); 173 174 ring->wptr = 0; 175 WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); 176 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); 177 178 ring->sched.ready = true; 179 180 r = amdgpu_ring_test_helper(ring); 181 if (r) 182 return r; 183 184 if (adev->mman.buffer_funcs_ring == ring) 185 amdgpu_ttm_set_buffer_funcs_status(adev, true); 186 } 187 188 return 0; 189 } 190 191 /** 192 * si_dma_ring_test_ring - simple async dma engine test 193 * 194 * @ring: amdgpu_ring structure holding ring information 195 * 196 * Test the DMA engine by writing using it to write an 197 * value to memory. (VI). 198 * Returns 0 for success, error for failure. 199 */ 200 static int si_dma_ring_test_ring(struct amdgpu_ring *ring) 201 { 202 struct amdgpu_device *adev = ring->adev; 203 unsigned i; 204 unsigned index; 205 int r; 206 u32 tmp; 207 u64 gpu_addr; 208 209 r = amdgpu_device_wb_get(adev, &index); 210 if (r) 211 return r; 212 213 gpu_addr = adev->wb.gpu_addr + (index * 4); 214 tmp = 0xCAFEDEAD; 215 adev->wb.wb[index] = cpu_to_le32(tmp); 216 217 r = amdgpu_ring_alloc(ring, 4); 218 if (r) 219 goto error_free_wb; 220 221 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); 222 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 223 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); 224 amdgpu_ring_write(ring, 0xDEADBEEF); 225 amdgpu_ring_commit(ring); 226 227 for (i = 0; i < adev->usec_timeout; i++) { 228 tmp = le32_to_cpu(adev->wb.wb[index]); 229 if (tmp == 0xDEADBEEF) 230 break; 231 DRM_UDELAY(1); 232 } 233 234 if (i >= adev->usec_timeout) 235 r = -ETIMEDOUT; 236 237 error_free_wb: 238 amdgpu_device_wb_free(adev, index); 239 return r; 240 } 241 242 /** 243 * si_dma_ring_test_ib - test an IB on the DMA engine 244 * 245 * @ring: amdgpu_ring structure holding ring information 246 * 247 * Test a simple IB in the DMA ring (VI). 248 * Returns 0 on success, error on failure. 249 */ 250 static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) 251 { 252 struct amdgpu_device *adev = ring->adev; 253 struct amdgpu_ib ib; 254 struct dma_fence *f = NULL; 255 unsigned index; 256 u32 tmp = 0; 257 u64 gpu_addr; 258 long r; 259 260 r = amdgpu_device_wb_get(adev, &index); 261 if (r) { 262 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 263 return r; 264 } 265 266 gpu_addr = adev->wb.gpu_addr + (index * 4); 267 tmp = 0xCAFEDEAD; 268 adev->wb.wb[index] = cpu_to_le32(tmp); 269 memset(&ib, 0, sizeof(ib)); 270 r = amdgpu_ib_get(adev, NULL, 256, &ib); 271 if (r) { 272 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 273 goto err0; 274 } 275 276 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); 277 ib.ptr[1] = lower_32_bits(gpu_addr); 278 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; 279 ib.ptr[3] = 0xDEADBEEF; 280 ib.length_dw = 4; 281 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 282 if (r) 283 goto err1; 284 285 r = dma_fence_wait_timeout(f, false, timeout); 286 if (r == 0) { 287 DRM_ERROR("amdgpu: IB test timed out\n"); 288 r = -ETIMEDOUT; 289 goto err1; 290 } else if (r < 0) { 291 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 292 goto err1; 293 } 294 tmp = le32_to_cpu(adev->wb.wb[index]); 295 if (tmp == 0xDEADBEEF) { 296 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 297 r = 0; 298 } else { 299 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 300 r = -EINVAL; 301 } 302 303 err1: 304 amdgpu_ib_free(adev, &ib, NULL); 305 dma_fence_put(f); 306 err0: 307 amdgpu_device_wb_free(adev, index); 308 return r; 309 } 310 311 /** 312 * cik_dma_vm_copy_pte - update PTEs by copying them from the GART 313 * 314 * @ib: indirect buffer to fill with commands 315 * @pe: addr of the page entry 316 * @src: src addr to copy from 317 * @count: number of page entries to update 318 * 319 * Update PTEs by copying them from the GART using DMA (SI). 320 */ 321 static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, 322 uint64_t pe, uint64_t src, 323 unsigned count) 324 { 325 unsigned bytes = count * 8; 326 327 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 328 1, 0, 0, bytes); 329 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 330 ib->ptr[ib->length_dw++] = lower_32_bits(src); 331 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 332 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; 333 } 334 335 /** 336 * si_dma_vm_write_pte - update PTEs by writing them manually 337 * 338 * @ib: indirect buffer to fill with commands 339 * @pe: addr of the page entry 340 * @value: dst addr to write into pe 341 * @count: number of page entries to update 342 * @incr: increase next addr by incr bytes 343 * 344 * Update PTEs by writing them manually using DMA (SI). 345 */ 346 static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 347 uint64_t value, unsigned count, 348 uint32_t incr) 349 { 350 unsigned ndw = count * 2; 351 352 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); 353 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 354 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 355 for (; ndw > 0; ndw -= 2) { 356 ib->ptr[ib->length_dw++] = lower_32_bits(value); 357 ib->ptr[ib->length_dw++] = upper_32_bits(value); 358 value += incr; 359 } 360 } 361 362 /** 363 * si_dma_vm_set_pte_pde - update the page tables using sDMA 364 * 365 * @ib: indirect buffer to fill with commands 366 * @pe: addr of the page entry 367 * @addr: dst addr to write into pe 368 * @count: number of page entries to update 369 * @incr: increase next addr by incr bytes 370 * @flags: access flags 371 * 372 * Update the page tables using sDMA (CIK). 373 */ 374 static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, 375 uint64_t pe, 376 uint64_t addr, unsigned count, 377 uint32_t incr, uint64_t flags) 378 { 379 uint64_t value; 380 unsigned ndw; 381 382 while (count) { 383 ndw = count * 2; 384 if (ndw > 0xFFFFE) 385 ndw = 0xFFFFE; 386 387 if (flags & AMDGPU_PTE_VALID) 388 value = addr; 389 else 390 value = 0; 391 392 /* for physically contiguous pages (vram) */ 393 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 394 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 395 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 396 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 397 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 398 ib->ptr[ib->length_dw++] = value; /* value */ 399 ib->ptr[ib->length_dw++] = upper_32_bits(value); 400 ib->ptr[ib->length_dw++] = incr; /* increment size */ 401 ib->ptr[ib->length_dw++] = 0; 402 pe += ndw * 4; 403 addr += (ndw / 2) * incr; 404 count -= ndw / 2; 405 } 406 } 407 408 /** 409 * si_dma_pad_ib - pad the IB to the required number of dw 410 * 411 * @ib: indirect buffer to fill with padding 412 * 413 */ 414 static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 415 { 416 while (ib->length_dw & 0x7) 417 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); 418 } 419 420 /** 421 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline 422 * 423 * @ring: amdgpu_ring pointer 424 * 425 * Make sure all previous operations are completed (CIK). 426 */ 427 static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 428 { 429 uint32_t seq = ring->fence_drv.sync_seq; 430 uint64_t addr = ring->fence_drv.gpu_addr; 431 432 /* wait for idle */ 433 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | 434 (1 << 27)); /* Poll memory */ 435 amdgpu_ring_write(ring, lower_32_bits(addr)); 436 amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ 437 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 438 amdgpu_ring_write(ring, seq); /* value */ 439 amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ 440 } 441 442 /** 443 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA 444 * 445 * @ring: amdgpu_ring pointer 446 * @vm: amdgpu_vm pointer 447 * 448 * Update the page table base and flush the VM TLB 449 * using sDMA (VI). 450 */ 451 static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, 452 unsigned vmid, uint64_t pd_addr) 453 { 454 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 455 456 /* wait for invalidate to complete */ 457 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); 458 amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); 459 amdgpu_ring_write(ring, 0xff << 16); /* retry */ 460 amdgpu_ring_write(ring, 1 << vmid); /* mask */ 461 amdgpu_ring_write(ring, 0); /* value */ 462 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ 463 } 464 465 static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, 466 uint32_t reg, uint32_t val) 467 { 468 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 469 amdgpu_ring_write(ring, (0xf << 16) | reg); 470 amdgpu_ring_write(ring, val); 471 } 472 473 static int si_dma_early_init(void *handle) 474 { 475 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 476 477 adev->sdma.num_instances = 2; 478 479 si_dma_set_ring_funcs(adev); 480 si_dma_set_buffer_funcs(adev); 481 si_dma_set_vm_pte_funcs(adev); 482 si_dma_set_irq_funcs(adev); 483 484 return 0; 485 } 486 487 static int si_dma_sw_init(void *handle) 488 { 489 struct amdgpu_ring *ring; 490 int r, i; 491 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 492 493 /* DMA0 trap event */ 494 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, 495 &adev->sdma.trap_irq); 496 if (r) 497 return r; 498 499 /* DMA1 trap event */ 500 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, 501 &adev->sdma.trap_irq); 502 if (r) 503 return r; 504 505 for (i = 0; i < adev->sdma.num_instances; i++) { 506 ring = &adev->sdma.instance[i].ring; 507 ring->ring_obj = NULL; 508 ring->use_doorbell = false; 509 sprintf(ring->name, "sdma%d", i); 510 r = amdgpu_ring_init(adev, ring, 1024, 511 &adev->sdma.trap_irq, 512 (i == 0) ? 513 AMDGPU_SDMA_IRQ_TRAP0 : 514 AMDGPU_SDMA_IRQ_TRAP1); 515 if (r) 516 return r; 517 } 518 519 return r; 520 } 521 522 static int si_dma_sw_fini(void *handle) 523 { 524 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 525 int i; 526 527 for (i = 0; i < adev->sdma.num_instances; i++) 528 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 529 530 return 0; 531 } 532 533 static int si_dma_hw_init(void *handle) 534 { 535 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 536 537 return si_dma_start(adev); 538 } 539 540 static int si_dma_hw_fini(void *handle) 541 { 542 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 543 544 si_dma_stop(adev); 545 546 return 0; 547 } 548 549 static int si_dma_suspend(void *handle) 550 { 551 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 552 553 return si_dma_hw_fini(adev); 554 } 555 556 static int si_dma_resume(void *handle) 557 { 558 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 559 560 return si_dma_hw_init(adev); 561 } 562 563 static bool si_dma_is_idle(void *handle) 564 { 565 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 566 u32 tmp = RREG32(SRBM_STATUS2); 567 568 if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) 569 return false; 570 571 return true; 572 } 573 574 static int si_dma_wait_for_idle(void *handle) 575 { 576 unsigned i; 577 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 578 579 for (i = 0; i < adev->usec_timeout; i++) { 580 if (si_dma_is_idle(handle)) 581 return 0; 582 udelay(1); 583 } 584 return -ETIMEDOUT; 585 } 586 587 static int si_dma_soft_reset(void *handle) 588 { 589 DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); 590 return 0; 591 } 592 593 static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, 594 struct amdgpu_irq_src *src, 595 unsigned type, 596 enum amdgpu_interrupt_state state) 597 { 598 u32 sdma_cntl; 599 600 switch (type) { 601 case AMDGPU_SDMA_IRQ_TRAP0: 602 switch (state) { 603 case AMDGPU_IRQ_STATE_DISABLE: 604 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); 605 sdma_cntl &= ~TRAP_ENABLE; 606 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); 607 break; 608 case AMDGPU_IRQ_STATE_ENABLE: 609 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); 610 sdma_cntl |= TRAP_ENABLE; 611 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); 612 break; 613 default: 614 break; 615 } 616 break; 617 case AMDGPU_SDMA_IRQ_TRAP1: 618 switch (state) { 619 case AMDGPU_IRQ_STATE_DISABLE: 620 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); 621 sdma_cntl &= ~TRAP_ENABLE; 622 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); 623 break; 624 case AMDGPU_IRQ_STATE_ENABLE: 625 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); 626 sdma_cntl |= TRAP_ENABLE; 627 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); 628 break; 629 default: 630 break; 631 } 632 break; 633 default: 634 break; 635 } 636 return 0; 637 } 638 639 static int si_dma_process_trap_irq(struct amdgpu_device *adev, 640 struct amdgpu_irq_src *source, 641 struct amdgpu_iv_entry *entry) 642 { 643 if (entry->src_id == 224) 644 amdgpu_fence_process(&adev->sdma.instance[0].ring); 645 else 646 amdgpu_fence_process(&adev->sdma.instance[1].ring); 647 return 0; 648 } 649 650 static int si_dma_set_clockgating_state(void *handle, 651 enum amd_clockgating_state state) 652 { 653 u32 orig, data, offset; 654 int i; 655 bool enable; 656 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 657 658 enable = (state == AMD_CG_STATE_GATE) ? true : false; 659 660 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 661 for (i = 0; i < adev->sdma.num_instances; i++) { 662 if (i == 0) 663 offset = DMA0_REGISTER_OFFSET; 664 else 665 offset = DMA1_REGISTER_OFFSET; 666 orig = data = RREG32(DMA_POWER_CNTL + offset); 667 data &= ~MEM_POWER_OVERRIDE; 668 if (data != orig) 669 WREG32(DMA_POWER_CNTL + offset, data); 670 WREG32(DMA_CLK_CTRL + offset, 0x00000100); 671 } 672 } else { 673 for (i = 0; i < adev->sdma.num_instances; i++) { 674 if (i == 0) 675 offset = DMA0_REGISTER_OFFSET; 676 else 677 offset = DMA1_REGISTER_OFFSET; 678 orig = data = RREG32(DMA_POWER_CNTL + offset); 679 data |= MEM_POWER_OVERRIDE; 680 if (data != orig) 681 WREG32(DMA_POWER_CNTL + offset, data); 682 683 orig = data = RREG32(DMA_CLK_CTRL + offset); 684 data = 0xff000000; 685 if (data != orig) 686 WREG32(DMA_CLK_CTRL + offset, data); 687 } 688 } 689 690 return 0; 691 } 692 693 static int si_dma_set_powergating_state(void *handle, 694 enum amd_powergating_state state) 695 { 696 u32 tmp; 697 698 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 699 700 WREG32(DMA_PGFSM_WRITE, 0x00002000); 701 WREG32(DMA_PGFSM_CONFIG, 0x100010ff); 702 703 for (tmp = 0; tmp < 5; tmp++) 704 WREG32(DMA_PGFSM_WRITE, 0); 705 706 return 0; 707 } 708 709 static const struct amd_ip_funcs si_dma_ip_funcs = { 710 .name = "si_dma", 711 .early_init = si_dma_early_init, 712 .late_init = NULL, 713 .sw_init = si_dma_sw_init, 714 .sw_fini = si_dma_sw_fini, 715 .hw_init = si_dma_hw_init, 716 .hw_fini = si_dma_hw_fini, 717 .suspend = si_dma_suspend, 718 .resume = si_dma_resume, 719 .is_idle = si_dma_is_idle, 720 .wait_for_idle = si_dma_wait_for_idle, 721 .soft_reset = si_dma_soft_reset, 722 .set_clockgating_state = si_dma_set_clockgating_state, 723 .set_powergating_state = si_dma_set_powergating_state, 724 }; 725 726 static const struct amdgpu_ring_funcs si_dma_ring_funcs = { 727 .type = AMDGPU_RING_TYPE_SDMA, 728 .align_mask = 0xf, 729 .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 730 .support_64bit_ptrs = false, 731 .get_rptr = si_dma_ring_get_rptr, 732 .get_wptr = si_dma_ring_get_wptr, 733 .set_wptr = si_dma_ring_set_wptr, 734 .emit_frame_size = 735 3 + 3 + /* hdp flush / invalidate */ 736 6 + /* si_dma_ring_emit_pipeline_sync */ 737 SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ 738 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ 739 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ 740 .emit_ib = si_dma_ring_emit_ib, 741 .emit_fence = si_dma_ring_emit_fence, 742 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, 743 .emit_vm_flush = si_dma_ring_emit_vm_flush, 744 .test_ring = si_dma_ring_test_ring, 745 .test_ib = si_dma_ring_test_ib, 746 .insert_nop = amdgpu_ring_insert_nop, 747 .pad_ib = si_dma_ring_pad_ib, 748 .emit_wreg = si_dma_ring_emit_wreg, 749 }; 750 751 static void si_dma_set_ring_funcs(struct amdgpu_device *adev) 752 { 753 int i; 754 755 for (i = 0; i < adev->sdma.num_instances; i++) 756 adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; 757 } 758 759 static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { 760 .set = si_dma_set_trap_irq_state, 761 .process = si_dma_process_trap_irq, 762 }; 763 764 static void si_dma_set_irq_funcs(struct amdgpu_device *adev) 765 { 766 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 767 adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; 768 } 769 770 /** 771 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine 772 * 773 * @ring: amdgpu_ring structure holding ring information 774 * @src_offset: src GPU address 775 * @dst_offset: dst GPU address 776 * @byte_count: number of bytes to xfer 777 * 778 * Copy GPU buffers using the DMA engine (VI). 779 * Used by the amdgpu ttm implementation to move pages if 780 * registered as the asic copy callback. 781 */ 782 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, 783 uint64_t src_offset, 784 uint64_t dst_offset, 785 uint32_t byte_count) 786 { 787 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 788 1, 0, 0, byte_count); 789 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 790 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 791 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; 792 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; 793 } 794 795 /** 796 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine 797 * 798 * @ring: amdgpu_ring structure holding ring information 799 * @src_data: value to write to buffer 800 * @dst_offset: dst GPU address 801 * @byte_count: number of bytes to xfer 802 * 803 * Fill GPU buffers using the DMA engine (VI). 804 */ 805 static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, 806 uint32_t src_data, 807 uint64_t dst_offset, 808 uint32_t byte_count) 809 { 810 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, 811 0, 0, 0, byte_count / 4); 812 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 813 ib->ptr[ib->length_dw++] = src_data; 814 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; 815 } 816 817 818 static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { 819 .copy_max_bytes = 0xffff8, 820 .copy_num_dw = 5, 821 .emit_copy_buffer = si_dma_emit_copy_buffer, 822 823 .fill_max_bytes = 0xffff8, 824 .fill_num_dw = 4, 825 .emit_fill_buffer = si_dma_emit_fill_buffer, 826 }; 827 828 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) 829 { 830 adev->mman.buffer_funcs = &si_dma_buffer_funcs; 831 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 832 } 833 834 static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { 835 .copy_pte_num_dw = 5, 836 .copy_pte = si_dma_vm_copy_pte, 837 838 .write_pte = si_dma_vm_write_pte, 839 .set_pte_pde = si_dma_vm_set_pte_pde, 840 }; 841 842 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) 843 { 844 struct drm_gpu_scheduler *sched; 845 unsigned i; 846 847 adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; 848 for (i = 0; i < adev->sdma.num_instances; i++) { 849 sched = &adev->sdma.instance[i].ring.sched; 850 adev->vm_manager.vm_pte_rqs[i] = 851 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 852 } 853 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 854 } 855 856 const struct amdgpu_ip_block_version si_dma_ip_block = 857 { 858 .type = AMD_IP_BLOCK_TYPE_SDMA, 859 .major = 1, 860 .minor = 0, 861 .rev = 0, 862 .funcs = &si_dma_ip_funcs, 863 }; 864