1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_ucode.h" 28 #include "amdgpu_trace.h" 29 #include "cikd.h" 30 #include "cik.h" 31 32 #include "bif/bif_4_1_d.h" 33 #include "bif/bif_4_1_sh_mask.h" 34 35 #include "gca/gfx_7_2_d.h" 36 #include "gca/gfx_7_2_enum.h" 37 #include "gca/gfx_7_2_sh_mask.h" 38 39 #include "gmc/gmc_7_1_d.h" 40 #include "gmc/gmc_7_1_sh_mask.h" 41 42 #include "oss/oss_2_0_d.h" 43 #include "oss/oss_2_0_sh_mask.h" 44 45 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 46 { 47 SDMA0_REGISTER_OFFSET, 48 SDMA1_REGISTER_OFFSET 49 }; 50 51 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); 52 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); 53 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); 54 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 55 static int cik_sdma_soft_reset(void *handle); 56 57 MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 58 MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); 59 MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 60 MODULE_FIRMWARE("radeon/hawaii_sdma1.bin"); 61 MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); 62 MODULE_FIRMWARE("radeon/kaveri_sdma1.bin"); 63 MODULE_FIRMWARE("radeon/kabini_sdma.bin"); 64 MODULE_FIRMWARE("radeon/kabini_sdma1.bin"); 65 MODULE_FIRMWARE("radeon/mullins_sdma.bin"); 66 MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); 67 68 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 69 70 71 static void cik_sdma_free_microcode(struct amdgpu_device *adev) 72 { 73 int i; 74 for (i = 0; i < adev->sdma.num_instances; i++) { 75 release_firmware(adev->sdma.instance[i].fw); 76 adev->sdma.instance[i].fw = NULL; 77 } 78 } 79 80 /* 81 * sDMA - System DMA 82 * Starting with CIK, the GPU has new asynchronous 83 * DMA engines. These engines are used for compute 84 * and gfx. There are two DMA engines (SDMA0, SDMA1) 85 * and each one supports 1 ring buffer used for gfx 86 * and 2 queues used for compute. 87 * 88 * The programming model is very similar to the CP 89 * (ring buffer, IBs, etc.), but sDMA has it's own 90 * packet format that is different from the PM4 format 91 * used by the CP. sDMA supports copying data, writing 92 * embedded data, solid fills, and a number of other 93 * things. It also has support for tiling/detiling of 94 * buffers. 95 */ 96 97 /** 98 * cik_sdma_init_microcode - load ucode images from disk 99 * 100 * @adev: amdgpu_device pointer 101 * 102 * Use the firmware interface to load the ucode images into 103 * the driver (not loaded into hw). 104 * Returns 0 on success, error on failure. 105 */ 106 static int cik_sdma_init_microcode(struct amdgpu_device *adev) 107 { 108 const char *chip_name; 109 char fw_name[30]; 110 int err = 0, i; 111 112 DRM_DEBUG("\n"); 113 114 switch (adev->asic_type) { 115 case CHIP_BONAIRE: 116 chip_name = "bonaire"; 117 break; 118 case CHIP_HAWAII: 119 chip_name = "hawaii"; 120 break; 121 case CHIP_KAVERI: 122 chip_name = "kaveri"; 123 break; 124 case CHIP_KABINI: 125 chip_name = "kabini"; 126 break; 127 case CHIP_MULLINS: 128 chip_name = "mullins"; 129 break; 130 default: BUG(); 131 } 132 133 for (i = 0; i < adev->sdma.num_instances; i++) { 134 if (i == 0) 135 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); 136 else 137 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); 138 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 139 if (err) 140 goto out; 141 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 142 } 143 out: 144 if (err) { 145 pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name); 146 for (i = 0; i < adev->sdma.num_instances; i++) { 147 release_firmware(adev->sdma.instance[i].fw); 148 adev->sdma.instance[i].fw = NULL; 149 } 150 } 151 return err; 152 } 153 154 /** 155 * cik_sdma_ring_get_rptr - get the current read pointer 156 * 157 * @ring: amdgpu ring pointer 158 * 159 * Get the current rptr from the hardware (CIK+). 160 */ 161 static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) 162 { 163 u32 rptr; 164 165 rptr = ring->adev->wb.wb[ring->rptr_offs]; 166 167 return (rptr & 0x3fffc) >> 2; 168 } 169 170 /** 171 * cik_sdma_ring_get_wptr - get the current write pointer 172 * 173 * @ring: amdgpu ring pointer 174 * 175 * Get the current wptr from the hardware (CIK+). 176 */ 177 static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 178 { 179 struct amdgpu_device *adev = ring->adev; 180 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 181 182 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; 183 } 184 185 /** 186 * cik_sdma_ring_set_wptr - commit the write pointer 187 * 188 * @ring: amdgpu ring pointer 189 * 190 * Write the wptr back to the hardware (CIK+). 191 */ 192 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 193 { 194 struct amdgpu_device *adev = ring->adev; 195 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 196 197 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], 198 (lower_32_bits(ring->wptr) << 2) & 0x3fffc); 199 } 200 201 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 202 { 203 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 204 int i; 205 206 for (i = 0; i < count; i++) 207 if (sdma && sdma->burst_nop && (i == 0)) 208 amdgpu_ring_write(ring, ring->funcs->nop | 209 SDMA_NOP_COUNT(count - 1)); 210 else 211 amdgpu_ring_write(ring, ring->funcs->nop); 212 } 213 214 /** 215 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine 216 * 217 * @ring: amdgpu ring pointer 218 * @ib: IB object to schedule 219 * 220 * Schedule an IB in the DMA ring (CIK). 221 */ 222 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 223 struct amdgpu_ib *ib, 224 unsigned vm_id, bool ctx_switch) 225 { 226 u32 extra_bits = vm_id & 0xf; 227 228 /* IB packet must end on a 8 DW boundary */ 229 cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); 230 231 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 232 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 233 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 234 amdgpu_ring_write(ring, ib->length_dw); 235 236 } 237 238 /** 239 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 240 * 241 * @ring: amdgpu ring pointer 242 * 243 * Emit an hdp flush packet on the requested DMA ring. 244 */ 245 static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) 246 { 247 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 248 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 249 u32 ref_and_mask; 250 251 if (ring == &ring->adev->sdma.instance[0].ring) 252 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 253 else 254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 255 256 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 257 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 258 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 259 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 260 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 261 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 262 } 263 264 static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 265 { 266 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 267 amdgpu_ring_write(ring, mmHDP_DEBUG0); 268 amdgpu_ring_write(ring, 1); 269 } 270 271 /** 272 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring 273 * 274 * @ring: amdgpu ring pointer 275 * @fence: amdgpu fence object 276 * 277 * Add a DMA fence packet to the ring to write 278 * the fence seq number and DMA trap packet to generate 279 * an interrupt if needed (CIK). 280 */ 281 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 282 unsigned flags) 283 { 284 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 285 /* write the fence */ 286 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 287 amdgpu_ring_write(ring, lower_32_bits(addr)); 288 amdgpu_ring_write(ring, upper_32_bits(addr)); 289 amdgpu_ring_write(ring, lower_32_bits(seq)); 290 291 /* optionally write high bits as well */ 292 if (write64bit) { 293 addr += 4; 294 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 295 amdgpu_ring_write(ring, lower_32_bits(addr)); 296 amdgpu_ring_write(ring, upper_32_bits(addr)); 297 amdgpu_ring_write(ring, upper_32_bits(seq)); 298 } 299 300 /* generate an interrupt */ 301 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 302 } 303 304 /** 305 * cik_sdma_gfx_stop - stop the gfx async dma engines 306 * 307 * @adev: amdgpu_device pointer 308 * 309 * Stop the gfx async dma ring buffers (CIK). 310 */ 311 static void cik_sdma_gfx_stop(struct amdgpu_device *adev) 312 { 313 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 314 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 315 u32 rb_cntl; 316 int i; 317 318 if ((adev->mman.buffer_funcs_ring == sdma0) || 319 (adev->mman.buffer_funcs_ring == sdma1)) 320 amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); 321 322 for (i = 0; i < adev->sdma.num_instances; i++) { 323 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 324 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; 325 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 326 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); 327 } 328 sdma0->ready = false; 329 sdma1->ready = false; 330 } 331 332 /** 333 * cik_sdma_rlc_stop - stop the compute async dma engines 334 * 335 * @adev: amdgpu_device pointer 336 * 337 * Stop the compute async dma queues (CIK). 338 */ 339 static void cik_sdma_rlc_stop(struct amdgpu_device *adev) 340 { 341 /* XXX todo */ 342 } 343 344 /** 345 * cik_sdma_enable - stop the async dma engines 346 * 347 * @adev: amdgpu_device pointer 348 * @enable: enable/disable the DMA MEs. 349 * 350 * Halt or unhalt the async dma engines (CIK). 351 */ 352 static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) 353 { 354 u32 me_cntl; 355 int i; 356 357 if (!enable) { 358 cik_sdma_gfx_stop(adev); 359 cik_sdma_rlc_stop(adev); 360 } 361 362 for (i = 0; i < adev->sdma.num_instances; i++) { 363 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 364 if (enable) 365 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; 366 else 367 me_cntl |= SDMA0_F32_CNTL__HALT_MASK; 368 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); 369 } 370 } 371 372 /** 373 * cik_sdma_gfx_resume - setup and start the async dma engines 374 * 375 * @adev: amdgpu_device pointer 376 * 377 * Set up the gfx DMA ring buffers and enable them (CIK). 378 * Returns 0 for success, error for failure. 379 */ 380 static int cik_sdma_gfx_resume(struct amdgpu_device *adev) 381 { 382 struct amdgpu_ring *ring; 383 u32 rb_cntl, ib_cntl; 384 u32 rb_bufsz; 385 u32 wb_offset; 386 int i, j, r; 387 388 for (i = 0; i < adev->sdma.num_instances; i++) { 389 ring = &adev->sdma.instance[i].ring; 390 wb_offset = (ring->rptr_offs * 4); 391 392 mutex_lock(&adev->srbm_mutex); 393 for (j = 0; j < 16; j++) { 394 cik_srbm_select(adev, 0, 0, 0, j); 395 /* SDMA GFX */ 396 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 397 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 398 /* XXX SDMA RLC - todo */ 399 } 400 cik_srbm_select(adev, 0, 0, 0, 0); 401 mutex_unlock(&adev->srbm_mutex); 402 403 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 404 adev->gfx.config.gb_addr_config & 0x70); 405 406 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 407 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 408 409 /* Set ring buffer size in dwords */ 410 rb_bufsz = order_base_2(ring->ring_size / 4); 411 rb_cntl = rb_bufsz << 1; 412 #ifdef __BIG_ENDIAN 413 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | 414 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; 415 #endif 416 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 417 418 /* Initialize the ring buffer's read and write pointers */ 419 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 420 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 421 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); 422 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); 423 424 /* set the wb address whether it's enabled or not */ 425 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 426 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 427 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 428 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 429 430 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; 431 432 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 433 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 434 435 ring->wptr = 0; 436 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); 437 438 /* enable DMA RB */ 439 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], 440 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK); 441 442 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK; 443 #ifdef __BIG_ENDIAN 444 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; 445 #endif 446 /* enable DMA IBs */ 447 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 448 449 ring->ready = true; 450 } 451 452 cik_sdma_enable(adev, true); 453 454 for (i = 0; i < adev->sdma.num_instances; i++) { 455 ring = &adev->sdma.instance[i].ring; 456 r = amdgpu_ring_test_ring(ring); 457 if (r) { 458 ring->ready = false; 459 return r; 460 } 461 462 if (adev->mman.buffer_funcs_ring == ring) 463 amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); 464 } 465 466 return 0; 467 } 468 469 /** 470 * cik_sdma_rlc_resume - setup and start the async dma engines 471 * 472 * @adev: amdgpu_device pointer 473 * 474 * Set up the compute DMA queues and enable them (CIK). 475 * Returns 0 for success, error for failure. 476 */ 477 static int cik_sdma_rlc_resume(struct amdgpu_device *adev) 478 { 479 /* XXX todo */ 480 return 0; 481 } 482 483 /** 484 * cik_sdma_load_microcode - load the sDMA ME ucode 485 * 486 * @adev: amdgpu_device pointer 487 * 488 * Loads the sDMA0/1 ucode. 489 * Returns 0 for success, -EINVAL if the ucode is not available. 490 */ 491 static int cik_sdma_load_microcode(struct amdgpu_device *adev) 492 { 493 const struct sdma_firmware_header_v1_0 *hdr; 494 const __le32 *fw_data; 495 u32 fw_size; 496 int i, j; 497 498 /* halt the MEs */ 499 cik_sdma_enable(adev, false); 500 501 for (i = 0; i < adev->sdma.num_instances; i++) { 502 if (!adev->sdma.instance[i].fw) 503 return -EINVAL; 504 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 505 amdgpu_ucode_print_sdma_hdr(&hdr->header); 506 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 507 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 508 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 509 if (adev->sdma.instance[i].feature_version >= 20) 510 adev->sdma.instance[i].burst_nop = true; 511 fw_data = (const __le32 *) 512 (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 513 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 514 for (j = 0; j < fw_size; j++) 515 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 516 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); 517 } 518 519 return 0; 520 } 521 522 /** 523 * cik_sdma_start - setup and start the async dma engines 524 * 525 * @adev: amdgpu_device pointer 526 * 527 * Set up the DMA engines and enable them (CIK). 528 * Returns 0 for success, error for failure. 529 */ 530 static int cik_sdma_start(struct amdgpu_device *adev) 531 { 532 int r; 533 534 r = cik_sdma_load_microcode(adev); 535 if (r) 536 return r; 537 538 /* halt the engine before programing */ 539 cik_sdma_enable(adev, false); 540 541 /* start the gfx rings and rlc compute queues */ 542 r = cik_sdma_gfx_resume(adev); 543 if (r) 544 return r; 545 r = cik_sdma_rlc_resume(adev); 546 if (r) 547 return r; 548 549 return 0; 550 } 551 552 /** 553 * cik_sdma_ring_test_ring - simple async dma engine test 554 * 555 * @ring: amdgpu_ring structure holding ring information 556 * 557 * Test the DMA engine by writing using it to write an 558 * value to memory. (CIK). 559 * Returns 0 for success, error for failure. 560 */ 561 static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) 562 { 563 struct amdgpu_device *adev = ring->adev; 564 unsigned i; 565 unsigned index; 566 int r; 567 u32 tmp; 568 u64 gpu_addr; 569 570 r = amdgpu_wb_get(adev, &index); 571 if (r) { 572 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 573 return r; 574 } 575 576 gpu_addr = adev->wb.gpu_addr + (index * 4); 577 tmp = 0xCAFEDEAD; 578 adev->wb.wb[index] = cpu_to_le32(tmp); 579 580 r = amdgpu_ring_alloc(ring, 5); 581 if (r) { 582 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 583 amdgpu_wb_free(adev, index); 584 return r; 585 } 586 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 587 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 588 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 589 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 590 amdgpu_ring_write(ring, 0xDEADBEEF); 591 amdgpu_ring_commit(ring); 592 593 for (i = 0; i < adev->usec_timeout; i++) { 594 tmp = le32_to_cpu(adev->wb.wb[index]); 595 if (tmp == 0xDEADBEEF) 596 break; 597 DRM_UDELAY(1); 598 } 599 600 if (i < adev->usec_timeout) { 601 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 602 } else { 603 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 604 ring->idx, tmp); 605 r = -EINVAL; 606 } 607 amdgpu_wb_free(adev, index); 608 609 return r; 610 } 611 612 /** 613 * cik_sdma_ring_test_ib - test an IB on the DMA engine 614 * 615 * @ring: amdgpu_ring structure holding ring information 616 * 617 * Test a simple IB in the DMA ring (CIK). 618 * Returns 0 on success, error on failure. 619 */ 620 static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) 621 { 622 struct amdgpu_device *adev = ring->adev; 623 struct amdgpu_ib ib; 624 struct dma_fence *f = NULL; 625 unsigned index; 626 u32 tmp = 0; 627 u64 gpu_addr; 628 long r; 629 630 r = amdgpu_wb_get(adev, &index); 631 if (r) { 632 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 633 return r; 634 } 635 636 gpu_addr = adev->wb.gpu_addr + (index * 4); 637 tmp = 0xCAFEDEAD; 638 adev->wb.wb[index] = cpu_to_le32(tmp); 639 memset(&ib, 0, sizeof(ib)); 640 r = amdgpu_ib_get(adev, NULL, 256, &ib); 641 if (r) { 642 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 643 goto err0; 644 } 645 646 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, 647 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 648 ib.ptr[1] = lower_32_bits(gpu_addr); 649 ib.ptr[2] = upper_32_bits(gpu_addr); 650 ib.ptr[3] = 1; 651 ib.ptr[4] = 0xDEADBEEF; 652 ib.length_dw = 5; 653 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 654 if (r) 655 goto err1; 656 657 r = dma_fence_wait_timeout(f, false, timeout); 658 if (r == 0) { 659 DRM_ERROR("amdgpu: IB test timed out\n"); 660 r = -ETIMEDOUT; 661 goto err1; 662 } else if (r < 0) { 663 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 664 goto err1; 665 } 666 tmp = le32_to_cpu(adev->wb.wb[index]); 667 if (tmp == 0xDEADBEEF) { 668 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 669 r = 0; 670 } else { 671 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 672 r = -EINVAL; 673 } 674 675 err1: 676 amdgpu_ib_free(adev, &ib, NULL); 677 dma_fence_put(f); 678 err0: 679 amdgpu_wb_free(adev, index); 680 return r; 681 } 682 683 /** 684 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART 685 * 686 * @ib: indirect buffer to fill with commands 687 * @pe: addr of the page entry 688 * @src: src addr to copy from 689 * @count: number of page entries to update 690 * 691 * Update PTEs by copying them from the GART using sDMA (CIK). 692 */ 693 static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, 694 uint64_t pe, uint64_t src, 695 unsigned count) 696 { 697 unsigned bytes = count * 8; 698 699 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, 700 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 701 ib->ptr[ib->length_dw++] = bytes; 702 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 703 ib->ptr[ib->length_dw++] = lower_32_bits(src); 704 ib->ptr[ib->length_dw++] = upper_32_bits(src); 705 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 706 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 707 } 708 709 /** 710 * cik_sdma_vm_write_pages - update PTEs by writing them manually 711 * 712 * @ib: indirect buffer to fill with commands 713 * @pe: addr of the page entry 714 * @value: dst addr to write into pe 715 * @count: number of page entries to update 716 * @incr: increase next addr by incr bytes 717 * 718 * Update PTEs by writing them manually using sDMA (CIK). 719 */ 720 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 721 uint64_t value, unsigned count, 722 uint32_t incr) 723 { 724 unsigned ndw = count * 2; 725 726 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 727 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 728 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 729 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 730 ib->ptr[ib->length_dw++] = ndw; 731 for (; ndw > 0; ndw -= 2) { 732 ib->ptr[ib->length_dw++] = lower_32_bits(value); 733 ib->ptr[ib->length_dw++] = upper_32_bits(value); 734 value += incr; 735 } 736 } 737 738 /** 739 * cik_sdma_vm_set_pages - update the page tables using sDMA 740 * 741 * @ib: indirect buffer to fill with commands 742 * @pe: addr of the page entry 743 * @addr: dst addr to write into pe 744 * @count: number of page entries to update 745 * @incr: increase next addr by incr bytes 746 * @flags: access flags 747 * 748 * Update the page tables using sDMA (CIK). 749 */ 750 static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, 751 uint64_t addr, unsigned count, 752 uint32_t incr, uint64_t flags) 753 { 754 /* for physically contiguous pages (vram) */ 755 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 756 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 757 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 758 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 759 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 760 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 761 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 762 ib->ptr[ib->length_dw++] = incr; /* increment size */ 763 ib->ptr[ib->length_dw++] = 0; 764 ib->ptr[ib->length_dw++] = count; /* number of entries */ 765 } 766 767 /** 768 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw 769 * 770 * @ib: indirect buffer to fill with padding 771 * 772 */ 773 static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 774 { 775 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); 776 u32 pad_count; 777 int i; 778 779 pad_count = (8 - (ib->length_dw & 0x7)) % 8; 780 for (i = 0; i < pad_count; i++) 781 if (sdma && sdma->burst_nop && (i == 0)) 782 ib->ptr[ib->length_dw++] = 783 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) | 784 SDMA_NOP_COUNT(pad_count - 1); 785 else 786 ib->ptr[ib->length_dw++] = 787 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 788 } 789 790 /** 791 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline 792 * 793 * @ring: amdgpu_ring pointer 794 * 795 * Make sure all previous operations are completed (CIK). 796 */ 797 static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 798 { 799 uint32_t seq = ring->fence_drv.sync_seq; 800 uint64_t addr = ring->fence_drv.gpu_addr; 801 802 /* wait for idle */ 803 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, 804 SDMA_POLL_REG_MEM_EXTRA_OP(0) | 805 SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */ 806 SDMA_POLL_REG_MEM_EXTRA_M)); 807 amdgpu_ring_write(ring, addr & 0xfffffffc); 808 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 809 amdgpu_ring_write(ring, seq); /* reference */ 810 amdgpu_ring_write(ring, 0xfffffff); /* mask */ 811 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ 812 } 813 814 /** 815 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA 816 * 817 * @ring: amdgpu_ring pointer 818 * @vm: amdgpu_vm pointer 819 * 820 * Update the page table base and flush the VM TLB 821 * using sDMA (CIK). 822 */ 823 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, 824 unsigned vm_id, uint64_t pd_addr) 825 { 826 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 827 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 828 829 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 830 if (vm_id < 8) { 831 amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 832 } else { 833 amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); 834 } 835 amdgpu_ring_write(ring, pd_addr >> 12); 836 837 /* flush TLB */ 838 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 839 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 840 amdgpu_ring_write(ring, 1 << vm_id); 841 842 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 843 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 844 amdgpu_ring_write(ring, 0); 845 amdgpu_ring_write(ring, 0); /* reference */ 846 amdgpu_ring_write(ring, 0); /* mask */ 847 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 848 } 849 850 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 851 bool enable) 852 { 853 u32 orig, data; 854 855 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 856 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 857 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 858 } else { 859 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); 860 data |= 0xff000000; 861 if (data != orig) 862 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); 863 864 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); 865 data |= 0xff000000; 866 if (data != orig) 867 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); 868 } 869 } 870 871 static void cik_enable_sdma_mgls(struct amdgpu_device *adev, 872 bool enable) 873 { 874 u32 orig, data; 875 876 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { 877 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 878 data |= 0x100; 879 if (orig != data) 880 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 881 882 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 883 data |= 0x100; 884 if (orig != data) 885 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 886 } else { 887 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 888 data &= ~0x100; 889 if (orig != data) 890 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 891 892 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 893 data &= ~0x100; 894 if (orig != data) 895 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 896 } 897 } 898 899 static int cik_sdma_early_init(void *handle) 900 { 901 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 902 903 adev->sdma.num_instances = SDMA_MAX_INSTANCE; 904 905 cik_sdma_set_ring_funcs(adev); 906 cik_sdma_set_irq_funcs(adev); 907 cik_sdma_set_buffer_funcs(adev); 908 cik_sdma_set_vm_pte_funcs(adev); 909 910 return 0; 911 } 912 913 static int cik_sdma_sw_init(void *handle) 914 { 915 struct amdgpu_ring *ring; 916 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 917 int r, i; 918 919 r = cik_sdma_init_microcode(adev); 920 if (r) { 921 DRM_ERROR("Failed to load sdma firmware!\n"); 922 return r; 923 } 924 925 /* SDMA trap event */ 926 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, 927 &adev->sdma.trap_irq); 928 if (r) 929 return r; 930 931 /* SDMA Privileged inst */ 932 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, 933 &adev->sdma.illegal_inst_irq); 934 if (r) 935 return r; 936 937 /* SDMA Privileged inst */ 938 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, 939 &adev->sdma.illegal_inst_irq); 940 if (r) 941 return r; 942 943 for (i = 0; i < adev->sdma.num_instances; i++) { 944 ring = &adev->sdma.instance[i].ring; 945 ring->ring_obj = NULL; 946 sprintf(ring->name, "sdma%d", i); 947 r = amdgpu_ring_init(adev, ring, 1024, 948 &adev->sdma.trap_irq, 949 (i == 0) ? 950 AMDGPU_SDMA_IRQ_TRAP0 : 951 AMDGPU_SDMA_IRQ_TRAP1); 952 if (r) 953 return r; 954 } 955 956 return r; 957 } 958 959 static int cik_sdma_sw_fini(void *handle) 960 { 961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 962 int i; 963 964 for (i = 0; i < adev->sdma.num_instances; i++) 965 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 966 967 cik_sdma_free_microcode(adev); 968 return 0; 969 } 970 971 static int cik_sdma_hw_init(void *handle) 972 { 973 int r; 974 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 975 976 r = cik_sdma_start(adev); 977 if (r) 978 return r; 979 980 return r; 981 } 982 983 static int cik_sdma_hw_fini(void *handle) 984 { 985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 986 987 cik_sdma_enable(adev, false); 988 989 return 0; 990 } 991 992 static int cik_sdma_suspend(void *handle) 993 { 994 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 995 996 return cik_sdma_hw_fini(adev); 997 } 998 999 static int cik_sdma_resume(void *handle) 1000 { 1001 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1002 1003 cik_sdma_soft_reset(handle); 1004 1005 return cik_sdma_hw_init(adev); 1006 } 1007 1008 static bool cik_sdma_is_idle(void *handle) 1009 { 1010 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1011 u32 tmp = RREG32(mmSRBM_STATUS2); 1012 1013 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1014 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1015 return false; 1016 1017 return true; 1018 } 1019 1020 static int cik_sdma_wait_for_idle(void *handle) 1021 { 1022 unsigned i; 1023 u32 tmp; 1024 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1025 1026 for (i = 0; i < adev->usec_timeout; i++) { 1027 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1028 SRBM_STATUS2__SDMA1_BUSY_MASK); 1029 1030 if (!tmp) 1031 return 0; 1032 udelay(1); 1033 } 1034 return -ETIMEDOUT; 1035 } 1036 1037 static int cik_sdma_soft_reset(void *handle) 1038 { 1039 u32 srbm_soft_reset = 0; 1040 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1041 u32 tmp = RREG32(mmSRBM_STATUS2); 1042 1043 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1044 /* sdma0 */ 1045 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1046 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1047 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1048 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1049 } 1050 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1051 /* sdma1 */ 1052 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1053 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1054 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1055 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1056 } 1057 1058 if (srbm_soft_reset) { 1059 tmp = RREG32(mmSRBM_SOFT_RESET); 1060 tmp |= srbm_soft_reset; 1061 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1062 WREG32(mmSRBM_SOFT_RESET, tmp); 1063 tmp = RREG32(mmSRBM_SOFT_RESET); 1064 1065 udelay(50); 1066 1067 tmp &= ~srbm_soft_reset; 1068 WREG32(mmSRBM_SOFT_RESET, tmp); 1069 tmp = RREG32(mmSRBM_SOFT_RESET); 1070 1071 /* Wait a little for things to settle down */ 1072 udelay(50); 1073 } 1074 1075 return 0; 1076 } 1077 1078 static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, 1079 struct amdgpu_irq_src *src, 1080 unsigned type, 1081 enum amdgpu_interrupt_state state) 1082 { 1083 u32 sdma_cntl; 1084 1085 switch (type) { 1086 case AMDGPU_SDMA_IRQ_TRAP0: 1087 switch (state) { 1088 case AMDGPU_IRQ_STATE_DISABLE: 1089 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1090 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1091 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1092 break; 1093 case AMDGPU_IRQ_STATE_ENABLE: 1094 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1095 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1096 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1097 break; 1098 default: 1099 break; 1100 } 1101 break; 1102 case AMDGPU_SDMA_IRQ_TRAP1: 1103 switch (state) { 1104 case AMDGPU_IRQ_STATE_DISABLE: 1105 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1106 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1107 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1108 break; 1109 case AMDGPU_IRQ_STATE_ENABLE: 1110 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1111 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1112 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1113 break; 1114 default: 1115 break; 1116 } 1117 break; 1118 default: 1119 break; 1120 } 1121 return 0; 1122 } 1123 1124 static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, 1125 struct amdgpu_irq_src *source, 1126 struct amdgpu_iv_entry *entry) 1127 { 1128 u8 instance_id, queue_id; 1129 1130 instance_id = (entry->ring_id & 0x3) >> 0; 1131 queue_id = (entry->ring_id & 0xc) >> 2; 1132 DRM_DEBUG("IH: SDMA trap\n"); 1133 switch (instance_id) { 1134 case 0: 1135 switch (queue_id) { 1136 case 0: 1137 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1138 break; 1139 case 1: 1140 /* XXX compute */ 1141 break; 1142 case 2: 1143 /* XXX compute */ 1144 break; 1145 } 1146 break; 1147 case 1: 1148 switch (queue_id) { 1149 case 0: 1150 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1151 break; 1152 case 1: 1153 /* XXX compute */ 1154 break; 1155 case 2: 1156 /* XXX compute */ 1157 break; 1158 } 1159 break; 1160 } 1161 1162 return 0; 1163 } 1164 1165 static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, 1166 struct amdgpu_irq_src *source, 1167 struct amdgpu_iv_entry *entry) 1168 { 1169 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1170 schedule_work(&adev->reset_work); 1171 return 0; 1172 } 1173 1174 static int cik_sdma_set_clockgating_state(void *handle, 1175 enum amd_clockgating_state state) 1176 { 1177 bool gate = false; 1178 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1179 1180 if (state == AMD_CG_STATE_GATE) 1181 gate = true; 1182 1183 cik_enable_sdma_mgcg(adev, gate); 1184 cik_enable_sdma_mgls(adev, gate); 1185 1186 return 0; 1187 } 1188 1189 static int cik_sdma_set_powergating_state(void *handle, 1190 enum amd_powergating_state state) 1191 { 1192 return 0; 1193 } 1194 1195 static const struct amd_ip_funcs cik_sdma_ip_funcs = { 1196 .name = "cik_sdma", 1197 .early_init = cik_sdma_early_init, 1198 .late_init = NULL, 1199 .sw_init = cik_sdma_sw_init, 1200 .sw_fini = cik_sdma_sw_fini, 1201 .hw_init = cik_sdma_hw_init, 1202 .hw_fini = cik_sdma_hw_fini, 1203 .suspend = cik_sdma_suspend, 1204 .resume = cik_sdma_resume, 1205 .is_idle = cik_sdma_is_idle, 1206 .wait_for_idle = cik_sdma_wait_for_idle, 1207 .soft_reset = cik_sdma_soft_reset, 1208 .set_clockgating_state = cik_sdma_set_clockgating_state, 1209 .set_powergating_state = cik_sdma_set_powergating_state, 1210 }; 1211 1212 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { 1213 .type = AMDGPU_RING_TYPE_SDMA, 1214 .align_mask = 0xf, 1215 .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 1216 .support_64bit_ptrs = false, 1217 .get_rptr = cik_sdma_ring_get_rptr, 1218 .get_wptr = cik_sdma_ring_get_wptr, 1219 .set_wptr = cik_sdma_ring_set_wptr, 1220 .emit_frame_size = 1221 6 + /* cik_sdma_ring_emit_hdp_flush */ 1222 3 + /* cik_sdma_ring_emit_hdp_invalidate */ 1223 6 + /* cik_sdma_ring_emit_pipeline_sync */ 1224 12 + /* cik_sdma_ring_emit_vm_flush */ 1225 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ 1226 .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ 1227 .emit_ib = cik_sdma_ring_emit_ib, 1228 .emit_fence = cik_sdma_ring_emit_fence, 1229 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, 1230 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1231 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1232 .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate, 1233 .test_ring = cik_sdma_ring_test_ring, 1234 .test_ib = cik_sdma_ring_test_ib, 1235 .insert_nop = cik_sdma_ring_insert_nop, 1236 .pad_ib = cik_sdma_ring_pad_ib, 1237 }; 1238 1239 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1240 { 1241 int i; 1242 1243 for (i = 0; i < adev->sdma.num_instances; i++) 1244 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; 1245 } 1246 1247 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1248 .set = cik_sdma_set_trap_irq_state, 1249 .process = cik_sdma_process_trap_irq, 1250 }; 1251 1252 static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { 1253 .process = cik_sdma_process_illegal_inst_irq, 1254 }; 1255 1256 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) 1257 { 1258 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1259 adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs; 1260 adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; 1261 } 1262 1263 /** 1264 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine 1265 * 1266 * @ring: amdgpu_ring structure holding ring information 1267 * @src_offset: src GPU address 1268 * @dst_offset: dst GPU address 1269 * @byte_count: number of bytes to xfer 1270 * 1271 * Copy GPU buffers using the DMA engine (CIK). 1272 * Used by the amdgpu ttm implementation to move pages if 1273 * registered as the asic copy callback. 1274 */ 1275 static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, 1276 uint64_t src_offset, 1277 uint64_t dst_offset, 1278 uint32_t byte_count) 1279 { 1280 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); 1281 ib->ptr[ib->length_dw++] = byte_count; 1282 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1283 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1284 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1285 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1286 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1287 } 1288 1289 /** 1290 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine 1291 * 1292 * @ring: amdgpu_ring structure holding ring information 1293 * @src_data: value to write to buffer 1294 * @dst_offset: dst GPU address 1295 * @byte_count: number of bytes to xfer 1296 * 1297 * Fill GPU buffers using the DMA engine (CIK). 1298 */ 1299 static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib, 1300 uint32_t src_data, 1301 uint64_t dst_offset, 1302 uint32_t byte_count) 1303 { 1304 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0); 1305 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1306 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1307 ib->ptr[ib->length_dw++] = src_data; 1308 ib->ptr[ib->length_dw++] = byte_count; 1309 } 1310 1311 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { 1312 .copy_max_bytes = 0x1fffff, 1313 .copy_num_dw = 7, 1314 .emit_copy_buffer = cik_sdma_emit_copy_buffer, 1315 1316 .fill_max_bytes = 0x1fffff, 1317 .fill_num_dw = 5, 1318 .emit_fill_buffer = cik_sdma_emit_fill_buffer, 1319 }; 1320 1321 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) 1322 { 1323 if (adev->mman.buffer_funcs == NULL) { 1324 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; 1325 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1326 } 1327 } 1328 1329 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { 1330 .copy_pte = cik_sdma_vm_copy_pte, 1331 .write_pte = cik_sdma_vm_write_pte, 1332 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1333 }; 1334 1335 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1336 { 1337 unsigned i; 1338 1339 if (adev->vm_manager.vm_pte_funcs == NULL) { 1340 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1341 for (i = 0; i < adev->sdma.num_instances; i++) 1342 adev->vm_manager.vm_pte_rings[i] = 1343 &adev->sdma.instance[i].ring; 1344 1345 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; 1346 } 1347 } 1348 1349 const struct amdgpu_ip_block_version cik_sdma_ip_block = 1350 { 1351 .type = AMD_IP_BLOCK_TYPE_SDMA, 1352 .major = 2, 1353 .minor = 0, 1354 .rev = 0, 1355 .funcs = &cik_sdma_ip_funcs, 1356 }; 1357