1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_trace.h" 27 #include "si.h" 28 #include "sid.h" 29 30 const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 31 { 32 DMA0_REGISTER_OFFSET, 33 DMA1_REGISTER_OFFSET 34 }; 35 36 static void si_dma_set_ring_funcs(struct amdgpu_device *adev); 37 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); 38 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); 39 static void si_dma_set_irq_funcs(struct amdgpu_device *adev); 40 41 static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) 42 { 43 return ring->adev->wb.wb[ring->rptr_offs>>2]; 44 } 45 46 static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) 47 { 48 struct amdgpu_device *adev = ring->adev; 49 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 50 51 return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 52 } 53 54 static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) 55 { 56 struct amdgpu_device *adev = ring->adev; 57 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 58 59 WREG32(DMA_RB_WPTR + sdma_offsets[me], 60 (lower_32_bits(ring->wptr) << 2) & 0x3fffc); 61 } 62 63 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, 64 struct amdgpu_job *job, 65 struct amdgpu_ib *ib, 66 uint32_t flags) 67 { 68 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 69 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 70 * Pad as necessary with NOPs. 71 */ 72 while ((lower_32_bits(ring->wptr) & 7) != 5) 73 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); 74 amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); 75 amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 76 amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 77 78 } 79 80 /** 81 * si_dma_ring_emit_fence - emit a fence on the DMA ring 82 * 83 * @ring: amdgpu ring pointer 84 * @addr: address 85 * @seq: sequence number 86 * @flags: fence related flags 87 * 88 * Add a DMA fence packet to the ring to write 89 * the fence seq number and DMA trap packet to generate 90 * an interrupt if needed (VI). 91 */ 92 static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 93 unsigned flags) 94 { 95 96 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 97 /* write the fence */ 98 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); 99 amdgpu_ring_write(ring, addr & 0xfffffffc); 100 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); 101 amdgpu_ring_write(ring, seq); 102 /* optionally write high bits as well */ 103 if (write64bit) { 104 addr += 4; 105 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); 106 amdgpu_ring_write(ring, addr & 0xfffffffc); 107 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); 108 amdgpu_ring_write(ring, upper_32_bits(seq)); 109 } 110 /* generate an interrupt */ 111 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); 112 } 113 114 static void si_dma_stop(struct amdgpu_device *adev) 115 { 116 struct amdgpu_ring *ring; 117 u32 rb_cntl; 118 unsigned i; 119 120 for (i = 0; i < adev->sdma.num_instances; i++) { 121 ring = &adev->sdma.instance[i].ring; 122 /* dma0 */ 123 rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); 124 rb_cntl &= ~DMA_RB_ENABLE; 125 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); 126 127 if (adev->mman.buffer_funcs_ring == ring) 128 amdgpu_ttm_set_buffer_funcs_status(adev, false); 129 } 130 } 131 132 static int si_dma_start(struct amdgpu_device *adev) 133 { 134 struct amdgpu_ring *ring; 135 u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; 136 int i, r; 137 uint64_t rptr_addr; 138 139 for (i = 0; i < adev->sdma.num_instances; i++) { 140 ring = &adev->sdma.instance[i].ring; 141 142 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 143 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 144 145 /* Set ring buffer size in dwords */ 146 rb_bufsz = order_base_2(ring->ring_size / 4); 147 rb_cntl = rb_bufsz << 1; 148 #ifdef __BIG_ENDIAN 149 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 150 #endif 151 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); 152 153 /* Initialize the ring buffer's read and write pointers */ 154 WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); 155 WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); 156 157 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 158 159 WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); 160 WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); 161 162 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 163 164 WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 165 166 /* enable DMA IBs */ 167 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 168 #ifdef __BIG_ENDIAN 169 ib_cntl |= DMA_IB_SWAP_ENABLE; 170 #endif 171 WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); 172 173 dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); 174 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 175 WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); 176 177 ring->wptr = 0; 178 WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); 179 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); 180 181 ring->sched.ready = true; 182 183 r = amdgpu_ring_test_helper(ring); 184 if (r) 185 return r; 186 187 if (adev->mman.buffer_funcs_ring == ring) 188 amdgpu_ttm_set_buffer_funcs_status(adev, true); 189 } 190 191 return 0; 192 } 193 194 /** 195 * si_dma_ring_test_ring - simple async dma engine test 196 * 197 * @ring: amdgpu_ring structure holding ring information 198 * 199 * Test the DMA engine by writing using it to write an 200 * value to memory. (VI). 201 * Returns 0 for success, error for failure. 202 */ 203 static int si_dma_ring_test_ring(struct amdgpu_ring *ring) 204 { 205 struct amdgpu_device *adev = ring->adev; 206 unsigned i; 207 unsigned index; 208 int r; 209 u32 tmp; 210 u64 gpu_addr; 211 212 r = amdgpu_device_wb_get(adev, &index); 213 if (r) 214 return r; 215 216 gpu_addr = adev->wb.gpu_addr + (index * 4); 217 tmp = 0xCAFEDEAD; 218 adev->wb.wb[index] = cpu_to_le32(tmp); 219 220 r = amdgpu_ring_alloc(ring, 4); 221 if (r) 222 goto error_free_wb; 223 224 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); 225 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 226 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); 227 amdgpu_ring_write(ring, 0xDEADBEEF); 228 amdgpu_ring_commit(ring); 229 230 for (i = 0; i < adev->usec_timeout; i++) { 231 tmp = le32_to_cpu(adev->wb.wb[index]); 232 if (tmp == 0xDEADBEEF) 233 break; 234 udelay(1); 235 } 236 237 if (i >= adev->usec_timeout) 238 r = -ETIMEDOUT; 239 240 error_free_wb: 241 amdgpu_device_wb_free(adev, index); 242 return r; 243 } 244 245 /** 246 * si_dma_ring_test_ib - test an IB on the DMA engine 247 * 248 * @ring: amdgpu_ring structure holding ring information 249 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 250 * 251 * Test a simple IB in the DMA ring (VI). 252 * Returns 0 on success, error on failure. 253 */ 254 static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) 255 { 256 struct amdgpu_device *adev = ring->adev; 257 struct amdgpu_ib ib; 258 struct dma_fence *f = NULL; 259 unsigned index; 260 u32 tmp = 0; 261 u64 gpu_addr; 262 long r; 263 264 r = amdgpu_device_wb_get(adev, &index); 265 if (r) 266 return r; 267 268 gpu_addr = adev->wb.gpu_addr + (index * 4); 269 tmp = 0xCAFEDEAD; 270 adev->wb.wb[index] = cpu_to_le32(tmp); 271 memset(&ib, 0, sizeof(ib)); 272 r = amdgpu_ib_get(adev, NULL, 256, 273 AMDGPU_IB_POOL_DIRECT, &ib); 274 if (r) 275 goto err0; 276 277 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); 278 ib.ptr[1] = lower_32_bits(gpu_addr); 279 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; 280 ib.ptr[3] = 0xDEADBEEF; 281 ib.length_dw = 4; 282 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 283 if (r) 284 goto err1; 285 286 r = dma_fence_wait_timeout(f, false, timeout); 287 if (r == 0) { 288 r = -ETIMEDOUT; 289 goto err1; 290 } else if (r < 0) { 291 goto err1; 292 } 293 tmp = le32_to_cpu(adev->wb.wb[index]); 294 if (tmp == 0xDEADBEEF) 295 r = 0; 296 else 297 r = -EINVAL; 298 299 err1: 300 amdgpu_ib_free(adev, &ib, NULL); 301 dma_fence_put(f); 302 err0: 303 amdgpu_device_wb_free(adev, index); 304 return r; 305 } 306 307 /** 308 * cik_dma_vm_copy_pte - update PTEs by copying them from the GART 309 * 310 * @ib: indirect buffer to fill with commands 311 * @pe: addr of the page entry 312 * @src: src addr to copy from 313 * @count: number of page entries to update 314 * 315 * Update PTEs by copying them from the GART using DMA (SI). 316 */ 317 static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, 318 uint64_t pe, uint64_t src, 319 unsigned count) 320 { 321 unsigned bytes = count * 8; 322 323 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 324 1, 0, 0, bytes); 325 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 326 ib->ptr[ib->length_dw++] = lower_32_bits(src); 327 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 328 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; 329 } 330 331 /** 332 * si_dma_vm_write_pte - update PTEs by writing them manually 333 * 334 * @ib: indirect buffer to fill with commands 335 * @pe: addr of the page entry 336 * @value: dst addr to write into pe 337 * @count: number of page entries to update 338 * @incr: increase next addr by incr bytes 339 * 340 * Update PTEs by writing them manually using DMA (SI). 341 */ 342 static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 343 uint64_t value, unsigned count, 344 uint32_t incr) 345 { 346 unsigned ndw = count * 2; 347 348 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); 349 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 350 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 351 for (; ndw > 0; ndw -= 2) { 352 ib->ptr[ib->length_dw++] = lower_32_bits(value); 353 ib->ptr[ib->length_dw++] = upper_32_bits(value); 354 value += incr; 355 } 356 } 357 358 /** 359 * si_dma_vm_set_pte_pde - update the page tables using sDMA 360 * 361 * @ib: indirect buffer to fill with commands 362 * @pe: addr of the page entry 363 * @addr: dst addr to write into pe 364 * @count: number of page entries to update 365 * @incr: increase next addr by incr bytes 366 * @flags: access flags 367 * 368 * Update the page tables using sDMA (CIK). 369 */ 370 static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, 371 uint64_t pe, 372 uint64_t addr, unsigned count, 373 uint32_t incr, uint64_t flags) 374 { 375 uint64_t value; 376 unsigned ndw; 377 378 while (count) { 379 ndw = count * 2; 380 if (ndw > 0xFFFFE) 381 ndw = 0xFFFFE; 382 383 if (flags & AMDGPU_PTE_VALID) 384 value = addr; 385 else 386 value = 0; 387 388 /* for physically contiguous pages (vram) */ 389 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); 390 ib->ptr[ib->length_dw++] = pe; /* dst addr */ 391 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; 392 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 393 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 394 ib->ptr[ib->length_dw++] = value; /* value */ 395 ib->ptr[ib->length_dw++] = upper_32_bits(value); 396 ib->ptr[ib->length_dw++] = incr; /* increment size */ 397 ib->ptr[ib->length_dw++] = 0; 398 pe += ndw * 4; 399 addr += (ndw / 2) * incr; 400 count -= ndw / 2; 401 } 402 } 403 404 /** 405 * si_dma_pad_ib - pad the IB to the required number of dw 406 * 407 * @ring: amdgpu_ring pointer 408 * @ib: indirect buffer to fill with padding 409 * 410 */ 411 static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 412 { 413 while (ib->length_dw & 0x7) 414 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); 415 } 416 417 /** 418 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline 419 * 420 * @ring: amdgpu_ring pointer 421 * 422 * Make sure all previous operations are completed (CIK). 423 */ 424 static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 425 { 426 uint32_t seq = ring->fence_drv.sync_seq; 427 uint64_t addr = ring->fence_drv.gpu_addr; 428 429 /* wait for idle */ 430 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | 431 (1 << 27)); /* Poll memory */ 432 amdgpu_ring_write(ring, lower_32_bits(addr)); 433 amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ 434 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 435 amdgpu_ring_write(ring, seq); /* value */ 436 amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ 437 } 438 439 /** 440 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA 441 * 442 * @ring: amdgpu_ring pointer 443 * @vmid: vmid number to use 444 * @pd_addr: address 445 * 446 * Update the page table base and flush the VM TLB 447 * using sDMA (VI). 448 */ 449 static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, 450 unsigned vmid, uint64_t pd_addr) 451 { 452 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 453 454 /* wait for invalidate to complete */ 455 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); 456 amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); 457 amdgpu_ring_write(ring, 0xff << 16); /* retry */ 458 amdgpu_ring_write(ring, 1 << vmid); /* mask */ 459 amdgpu_ring_write(ring, 0); /* value */ 460 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ 461 } 462 463 static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, 464 uint32_t reg, uint32_t val) 465 { 466 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 467 amdgpu_ring_write(ring, (0xf << 16) | reg); 468 amdgpu_ring_write(ring, val); 469 } 470 471 static int si_dma_early_init(void *handle) 472 { 473 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 474 475 adev->sdma.num_instances = 2; 476 477 si_dma_set_ring_funcs(adev); 478 si_dma_set_buffer_funcs(adev); 479 si_dma_set_vm_pte_funcs(adev); 480 si_dma_set_irq_funcs(adev); 481 482 return 0; 483 } 484 485 static int si_dma_sw_init(void *handle) 486 { 487 struct amdgpu_ring *ring; 488 int r, i; 489 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 490 491 /* DMA0 trap event */ 492 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, 493 &adev->sdma.trap_irq); 494 if (r) 495 return r; 496 497 /* DMA1 trap event */ 498 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, 499 &adev->sdma.trap_irq); 500 if (r) 501 return r; 502 503 for (i = 0; i < adev->sdma.num_instances; i++) { 504 ring = &adev->sdma.instance[i].ring; 505 ring->ring_obj = NULL; 506 ring->use_doorbell = false; 507 sprintf(ring->name, "sdma%d", i); 508 r = amdgpu_ring_init(adev, ring, 1024, 509 &adev->sdma.trap_irq, 510 (i == 0) ? 511 AMDGPU_SDMA_IRQ_INSTANCE0 : 512 AMDGPU_SDMA_IRQ_INSTANCE1, 513 AMDGPU_RING_PRIO_DEFAULT); 514 if (r) 515 return r; 516 } 517 518 return r; 519 } 520 521 static int si_dma_sw_fini(void *handle) 522 { 523 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 524 int i; 525 526 for (i = 0; i < adev->sdma.num_instances; i++) 527 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 528 529 return 0; 530 } 531 532 static int si_dma_hw_init(void *handle) 533 { 534 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 535 536 return si_dma_start(adev); 537 } 538 539 static int si_dma_hw_fini(void *handle) 540 { 541 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 542 543 si_dma_stop(adev); 544 545 return 0; 546 } 547 548 static int si_dma_suspend(void *handle) 549 { 550 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 551 552 return si_dma_hw_fini(adev); 553 } 554 555 static int si_dma_resume(void *handle) 556 { 557 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 558 559 return si_dma_hw_init(adev); 560 } 561 562 static bool si_dma_is_idle(void *handle) 563 { 564 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 565 u32 tmp = RREG32(SRBM_STATUS2); 566 567 if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) 568 return false; 569 570 return true; 571 } 572 573 static int si_dma_wait_for_idle(void *handle) 574 { 575 unsigned i; 576 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 577 578 for (i = 0; i < adev->usec_timeout; i++) { 579 if (si_dma_is_idle(handle)) 580 return 0; 581 udelay(1); 582 } 583 return -ETIMEDOUT; 584 } 585 586 static int si_dma_soft_reset(void *handle) 587 { 588 DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); 589 return 0; 590 } 591 592 static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, 593 struct amdgpu_irq_src *src, 594 unsigned type, 595 enum amdgpu_interrupt_state state) 596 { 597 u32 sdma_cntl; 598 599 switch (type) { 600 case AMDGPU_SDMA_IRQ_INSTANCE0: 601 switch (state) { 602 case AMDGPU_IRQ_STATE_DISABLE: 603 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); 604 sdma_cntl &= ~TRAP_ENABLE; 605 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); 606 break; 607 case AMDGPU_IRQ_STATE_ENABLE: 608 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); 609 sdma_cntl |= TRAP_ENABLE; 610 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); 611 break; 612 default: 613 break; 614 } 615 break; 616 case AMDGPU_SDMA_IRQ_INSTANCE1: 617 switch (state) { 618 case AMDGPU_IRQ_STATE_DISABLE: 619 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); 620 sdma_cntl &= ~TRAP_ENABLE; 621 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); 622 break; 623 case AMDGPU_IRQ_STATE_ENABLE: 624 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); 625 sdma_cntl |= TRAP_ENABLE; 626 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); 627 break; 628 default: 629 break; 630 } 631 break; 632 default: 633 break; 634 } 635 return 0; 636 } 637 638 static int si_dma_process_trap_irq(struct amdgpu_device *adev, 639 struct amdgpu_irq_src *source, 640 struct amdgpu_iv_entry *entry) 641 { 642 if (entry->src_id == 224) 643 amdgpu_fence_process(&adev->sdma.instance[0].ring); 644 else 645 amdgpu_fence_process(&adev->sdma.instance[1].ring); 646 return 0; 647 } 648 649 static int si_dma_set_clockgating_state(void *handle, 650 enum amd_clockgating_state state) 651 { 652 u32 orig, data, offset; 653 int i; 654 bool enable; 655 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 656 657 enable = (state == AMD_CG_STATE_GATE); 658 659 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 660 for (i = 0; i < adev->sdma.num_instances; i++) { 661 if (i == 0) 662 offset = DMA0_REGISTER_OFFSET; 663 else 664 offset = DMA1_REGISTER_OFFSET; 665 orig = data = RREG32(DMA_POWER_CNTL + offset); 666 data &= ~MEM_POWER_OVERRIDE; 667 if (data != orig) 668 WREG32(DMA_POWER_CNTL + offset, data); 669 WREG32(DMA_CLK_CTRL + offset, 0x00000100); 670 } 671 } else { 672 for (i = 0; i < adev->sdma.num_instances; i++) { 673 if (i == 0) 674 offset = DMA0_REGISTER_OFFSET; 675 else 676 offset = DMA1_REGISTER_OFFSET; 677 orig = data = RREG32(DMA_POWER_CNTL + offset); 678 data |= MEM_POWER_OVERRIDE; 679 if (data != orig) 680 WREG32(DMA_POWER_CNTL + offset, data); 681 682 orig = data = RREG32(DMA_CLK_CTRL + offset); 683 data = 0xff000000; 684 if (data != orig) 685 WREG32(DMA_CLK_CTRL + offset, data); 686 } 687 } 688 689 return 0; 690 } 691 692 static int si_dma_set_powergating_state(void *handle, 693 enum amd_powergating_state state) 694 { 695 u32 tmp; 696 697 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 698 699 WREG32(DMA_PGFSM_WRITE, 0x00002000); 700 WREG32(DMA_PGFSM_CONFIG, 0x100010ff); 701 702 for (tmp = 0; tmp < 5; tmp++) 703 WREG32(DMA_PGFSM_WRITE, 0); 704 705 return 0; 706 } 707 708 static const struct amd_ip_funcs si_dma_ip_funcs = { 709 .name = "si_dma", 710 .early_init = si_dma_early_init, 711 .late_init = NULL, 712 .sw_init = si_dma_sw_init, 713 .sw_fini = si_dma_sw_fini, 714 .hw_init = si_dma_hw_init, 715 .hw_fini = si_dma_hw_fini, 716 .suspend = si_dma_suspend, 717 .resume = si_dma_resume, 718 .is_idle = si_dma_is_idle, 719 .wait_for_idle = si_dma_wait_for_idle, 720 .soft_reset = si_dma_soft_reset, 721 .set_clockgating_state = si_dma_set_clockgating_state, 722 .set_powergating_state = si_dma_set_powergating_state, 723 }; 724 725 static const struct amdgpu_ring_funcs si_dma_ring_funcs = { 726 .type = AMDGPU_RING_TYPE_SDMA, 727 .align_mask = 0xf, 728 .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 729 .support_64bit_ptrs = false, 730 .get_rptr = si_dma_ring_get_rptr, 731 .get_wptr = si_dma_ring_get_wptr, 732 .set_wptr = si_dma_ring_set_wptr, 733 .emit_frame_size = 734 3 + 3 + /* hdp flush / invalidate */ 735 6 + /* si_dma_ring_emit_pipeline_sync */ 736 SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ 737 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ 738 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ 739 .emit_ib = si_dma_ring_emit_ib, 740 .emit_fence = si_dma_ring_emit_fence, 741 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, 742 .emit_vm_flush = si_dma_ring_emit_vm_flush, 743 .test_ring = si_dma_ring_test_ring, 744 .test_ib = si_dma_ring_test_ib, 745 .insert_nop = amdgpu_ring_insert_nop, 746 .pad_ib = si_dma_ring_pad_ib, 747 .emit_wreg = si_dma_ring_emit_wreg, 748 }; 749 750 static void si_dma_set_ring_funcs(struct amdgpu_device *adev) 751 { 752 int i; 753 754 for (i = 0; i < adev->sdma.num_instances; i++) 755 adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; 756 } 757 758 static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { 759 .set = si_dma_set_trap_irq_state, 760 .process = si_dma_process_trap_irq, 761 }; 762 763 static void si_dma_set_irq_funcs(struct amdgpu_device *adev) 764 { 765 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 766 adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; 767 } 768 769 /** 770 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine 771 * 772 * @ib: indirect buffer to copy to 773 * @src_offset: src GPU address 774 * @dst_offset: dst GPU address 775 * @byte_count: number of bytes to xfer 776 * @tmz: is this a secure operation 777 * 778 * Copy GPU buffers using the DMA engine (VI). 779 * Used by the amdgpu ttm implementation to move pages if 780 * registered as the asic copy callback. 781 */ 782 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, 783 uint64_t src_offset, 784 uint64_t dst_offset, 785 uint32_t byte_count, 786 bool tmz) 787 { 788 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, 789 1, 0, 0, byte_count); 790 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 791 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 792 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; 793 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; 794 } 795 796 /** 797 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine 798 * 799 * @ib: indirect buffer to copy to 800 * @src_data: value to write to buffer 801 * @dst_offset: dst GPU address 802 * @byte_count: number of bytes to xfer 803 * 804 * Fill GPU buffers using the DMA engine (VI). 805 */ 806 static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, 807 uint32_t src_data, 808 uint64_t dst_offset, 809 uint32_t byte_count) 810 { 811 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, 812 0, 0, 0, byte_count / 4); 813 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 814 ib->ptr[ib->length_dw++] = src_data; 815 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; 816 } 817 818 819 static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { 820 .copy_max_bytes = 0xffff8, 821 .copy_num_dw = 5, 822 .emit_copy_buffer = si_dma_emit_copy_buffer, 823 824 .fill_max_bytes = 0xffff8, 825 .fill_num_dw = 4, 826 .emit_fill_buffer = si_dma_emit_fill_buffer, 827 }; 828 829 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) 830 { 831 adev->mman.buffer_funcs = &si_dma_buffer_funcs; 832 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 833 } 834 835 static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { 836 .copy_pte_num_dw = 5, 837 .copy_pte = si_dma_vm_copy_pte, 838 839 .write_pte = si_dma_vm_write_pte, 840 .set_pte_pde = si_dma_vm_set_pte_pde, 841 }; 842 843 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) 844 { 845 unsigned i; 846 847 adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; 848 for (i = 0; i < adev->sdma.num_instances; i++) { 849 adev->vm_manager.vm_pte_scheds[i] = 850 &adev->sdma.instance[i].ring.sched; 851 } 852 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 853 } 854 855 const struct amdgpu_ip_block_version si_dma_ip_block = 856 { 857 .type = AMD_IP_BLOCK_TYPE_SDMA, 858 .major = 1, 859 .minor = 0, 860 .rev = 0, 861 .funcs = &si_dma_ip_funcs, 862 }; 863