1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_ucode.h" 30 #include "amdgpu_trace.h" 31 #include "cikd.h" 32 #include "cik.h" 33 34 #include "bif/bif_4_1_d.h" 35 #include "bif/bif_4_1_sh_mask.h" 36 37 #include "gca/gfx_7_2_d.h" 38 #include "gca/gfx_7_2_enum.h" 39 #include "gca/gfx_7_2_sh_mask.h" 40 41 #include "gmc/gmc_7_1_d.h" 42 #include "gmc/gmc_7_1_sh_mask.h" 43 44 #include "oss/oss_2_0_d.h" 45 #include "oss/oss_2_0_sh_mask.h" 46 47 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 48 { 49 SDMA0_REGISTER_OFFSET, 50 SDMA1_REGISTER_OFFSET 51 }; 52 53 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); 54 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); 55 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); 56 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); 57 static int cik_sdma_soft_reset(void *handle); 58 59 MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin"); 60 MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin"); 61 MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin"); 62 MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin"); 63 MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin"); 64 MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin"); 65 MODULE_FIRMWARE("amdgpu/kabini_sdma.bin"); 66 MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin"); 67 MODULE_FIRMWARE("amdgpu/mullins_sdma.bin"); 68 MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin"); 69 70 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); 71 72 73 static void cik_sdma_free_microcode(struct amdgpu_device *adev) 74 { 75 int i; 76 for (i = 0; i < adev->sdma.num_instances; i++) { 77 release_firmware(adev->sdma.instance[i].fw); 78 adev->sdma.instance[i].fw = NULL; 79 } 80 } 81 82 /* 83 * sDMA - System DMA 84 * Starting with CIK, the GPU has new asynchronous 85 * DMA engines. These engines are used for compute 86 * and gfx. There are two DMA engines (SDMA0, SDMA1) 87 * and each one supports 1 ring buffer used for gfx 88 * and 2 queues used for compute. 89 * 90 * The programming model is very similar to the CP 91 * (ring buffer, IBs, etc.), but sDMA has it's own 92 * packet format that is different from the PM4 format 93 * used by the CP. sDMA supports copying data, writing 94 * embedded data, solid fills, and a number of other 95 * things. It also has support for tiling/detiling of 96 * buffers. 97 */ 98 99 /** 100 * cik_sdma_init_microcode - load ucode images from disk 101 * 102 * @adev: amdgpu_device pointer 103 * 104 * Use the firmware interface to load the ucode images into 105 * the driver (not loaded into hw). 106 * Returns 0 on success, error on failure. 107 */ 108 static int cik_sdma_init_microcode(struct amdgpu_device *adev) 109 { 110 const char *chip_name; 111 char fw_name[30]; 112 int err = 0, i; 113 114 DRM_DEBUG("\n"); 115 116 switch (adev->asic_type) { 117 case CHIP_BONAIRE: 118 chip_name = "bonaire"; 119 break; 120 case CHIP_HAWAII: 121 chip_name = "hawaii"; 122 break; 123 case CHIP_KAVERI: 124 chip_name = "kaveri"; 125 break; 126 case CHIP_KABINI: 127 chip_name = "kabini"; 128 break; 129 case CHIP_MULLINS: 130 chip_name = "mullins"; 131 break; 132 default: BUG(); 133 } 134 135 for (i = 0; i < adev->sdma.num_instances; i++) { 136 if (i == 0) 137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 138 else 139 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 140 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 141 if (err) 142 goto out; 143 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 144 } 145 out: 146 if (err) { 147 pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name); 148 for (i = 0; i < adev->sdma.num_instances; i++) { 149 release_firmware(adev->sdma.instance[i].fw); 150 adev->sdma.instance[i].fw = NULL; 151 } 152 } 153 return err; 154 } 155 156 /** 157 * cik_sdma_ring_get_rptr - get the current read pointer 158 * 159 * @ring: amdgpu ring pointer 160 * 161 * Get the current rptr from the hardware (CIK+). 162 */ 163 static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) 164 { 165 u32 rptr; 166 167 rptr = ring->adev->wb.wb[ring->rptr_offs]; 168 169 return (rptr & 0x3fffc) >> 2; 170 } 171 172 /** 173 * cik_sdma_ring_get_wptr - get the current write pointer 174 * 175 * @ring: amdgpu ring pointer 176 * 177 * Get the current wptr from the hardware (CIK+). 178 */ 179 static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) 180 { 181 struct amdgpu_device *adev = ring->adev; 182 183 return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2; 184 } 185 186 /** 187 * cik_sdma_ring_set_wptr - commit the write pointer 188 * 189 * @ring: amdgpu ring pointer 190 * 191 * Write the wptr back to the hardware (CIK+). 192 */ 193 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) 194 { 195 struct amdgpu_device *adev = ring->adev; 196 197 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], 198 (lower_32_bits(ring->wptr) << 2) & 0x3fffc); 199 } 200 201 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 202 { 203 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 204 int i; 205 206 for (i = 0; i < count; i++) 207 if (sdma && sdma->burst_nop && (i == 0)) 208 amdgpu_ring_write(ring, ring->funcs->nop | 209 SDMA_NOP_COUNT(count - 1)); 210 else 211 amdgpu_ring_write(ring, ring->funcs->nop); 212 } 213 214 /** 215 * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine 216 * 217 * @ring: amdgpu ring pointer 218 * @ib: IB object to schedule 219 * 220 * Schedule an IB in the DMA ring (CIK). 221 */ 222 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 223 struct amdgpu_job *job, 224 struct amdgpu_ib *ib, 225 uint32_t flags) 226 { 227 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 228 u32 extra_bits = vmid & 0xf; 229 230 /* IB packet must end on a 8 DW boundary */ 231 cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); 232 233 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 234 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ 235 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); 236 amdgpu_ring_write(ring, ib->length_dw); 237 238 } 239 240 /** 241 * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 242 * 243 * @ring: amdgpu ring pointer 244 * 245 * Emit an hdp flush packet on the requested DMA ring. 246 */ 247 static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) 248 { 249 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | 250 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ 251 u32 ref_and_mask; 252 253 if (ring->me == 0) 254 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; 255 else 256 ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; 257 258 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 259 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 260 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 261 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 262 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 263 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 264 } 265 266 /** 267 * cik_sdma_ring_emit_fence - emit a fence on the DMA ring 268 * 269 * @ring: amdgpu ring pointer 270 * @fence: amdgpu fence object 271 * 272 * Add a DMA fence packet to the ring to write 273 * the fence seq number and DMA trap packet to generate 274 * an interrupt if needed (CIK). 275 */ 276 static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 277 unsigned flags) 278 { 279 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 280 /* write the fence */ 281 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 282 amdgpu_ring_write(ring, lower_32_bits(addr)); 283 amdgpu_ring_write(ring, upper_32_bits(addr)); 284 amdgpu_ring_write(ring, lower_32_bits(seq)); 285 286 /* optionally write high bits as well */ 287 if (write64bit) { 288 addr += 4; 289 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); 290 amdgpu_ring_write(ring, lower_32_bits(addr)); 291 amdgpu_ring_write(ring, upper_32_bits(addr)); 292 amdgpu_ring_write(ring, upper_32_bits(seq)); 293 } 294 295 /* generate an interrupt */ 296 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); 297 } 298 299 /** 300 * cik_sdma_gfx_stop - stop the gfx async dma engines 301 * 302 * @adev: amdgpu_device pointer 303 * 304 * Stop the gfx async dma ring buffers (CIK). 305 */ 306 static void cik_sdma_gfx_stop(struct amdgpu_device *adev) 307 { 308 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 309 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 310 u32 rb_cntl; 311 int i; 312 313 if ((adev->mman.buffer_funcs_ring == sdma0) || 314 (adev->mman.buffer_funcs_ring == sdma1)) 315 amdgpu_ttm_set_buffer_funcs_status(adev, false); 316 317 for (i = 0; i < adev->sdma.num_instances; i++) { 318 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 319 rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; 320 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 321 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); 322 } 323 } 324 325 /** 326 * cik_sdma_rlc_stop - stop the compute async dma engines 327 * 328 * @adev: amdgpu_device pointer 329 * 330 * Stop the compute async dma queues (CIK). 331 */ 332 static void cik_sdma_rlc_stop(struct amdgpu_device *adev) 333 { 334 /* XXX todo */ 335 } 336 337 /** 338 * cik_ctx_switch_enable - stop the async dma engines context switch 339 * 340 * @adev: amdgpu_device pointer 341 * @enable: enable/disable the DMA MEs context switch. 342 * 343 * Halt or unhalt the async dma engines context switch (VI). 344 */ 345 static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) 346 { 347 u32 f32_cntl, phase_quantum = 0; 348 int i; 349 350 if (amdgpu_sdma_phase_quantum) { 351 unsigned value = amdgpu_sdma_phase_quantum; 352 unsigned unit = 0; 353 354 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 355 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { 356 value = (value + 1) >> 1; 357 unit++; 358 } 359 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 360 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { 361 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 362 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); 363 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 364 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); 365 WARN_ONCE(1, 366 "clamping sdma_phase_quantum to %uK clock cycles\n", 367 value << unit); 368 } 369 phase_quantum = 370 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | 371 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; 372 } 373 374 for (i = 0; i < adev->sdma.num_instances; i++) { 375 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); 376 if (enable) { 377 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 378 AUTO_CTXSW_ENABLE, 1); 379 if (amdgpu_sdma_phase_quantum) { 380 WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], 381 phase_quantum); 382 WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], 383 phase_quantum); 384 } 385 } else { 386 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 387 AUTO_CTXSW_ENABLE, 0); 388 } 389 390 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); 391 } 392 } 393 394 /** 395 * cik_sdma_enable - stop the async dma engines 396 * 397 * @adev: amdgpu_device pointer 398 * @enable: enable/disable the DMA MEs. 399 * 400 * Halt or unhalt the async dma engines (CIK). 401 */ 402 static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) 403 { 404 u32 me_cntl; 405 int i; 406 407 if (!enable) { 408 cik_sdma_gfx_stop(adev); 409 cik_sdma_rlc_stop(adev); 410 } 411 412 for (i = 0; i < adev->sdma.num_instances; i++) { 413 me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 414 if (enable) 415 me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; 416 else 417 me_cntl |= SDMA0_F32_CNTL__HALT_MASK; 418 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl); 419 } 420 } 421 422 /** 423 * cik_sdma_gfx_resume - setup and start the async dma engines 424 * 425 * @adev: amdgpu_device pointer 426 * 427 * Set up the gfx DMA ring buffers and enable them (CIK). 428 * Returns 0 for success, error for failure. 429 */ 430 static int cik_sdma_gfx_resume(struct amdgpu_device *adev) 431 { 432 struct amdgpu_ring *ring; 433 u32 rb_cntl, ib_cntl; 434 u32 rb_bufsz; 435 u32 wb_offset; 436 int i, j, r; 437 438 for (i = 0; i < adev->sdma.num_instances; i++) { 439 ring = &adev->sdma.instance[i].ring; 440 wb_offset = (ring->rptr_offs * 4); 441 442 mutex_lock(&adev->srbm_mutex); 443 for (j = 0; j < 16; j++) { 444 cik_srbm_select(adev, 0, 0, 0, j); 445 /* SDMA GFX */ 446 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 447 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 448 /* XXX SDMA RLC - todo */ 449 } 450 cik_srbm_select(adev, 0, 0, 0, 0); 451 mutex_unlock(&adev->srbm_mutex); 452 453 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 454 adev->gfx.config.gb_addr_config & 0x70); 455 456 WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); 457 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 458 459 /* Set ring buffer size in dwords */ 460 rb_bufsz = order_base_2(ring->ring_size / 4); 461 rb_cntl = rb_bufsz << 1; 462 #ifdef __BIG_ENDIAN 463 rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | 464 SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK; 465 #endif 466 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 467 468 /* Initialize the ring buffer's read and write pointers */ 469 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 470 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 471 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); 472 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); 473 474 /* set the wb address whether it's enabled or not */ 475 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 476 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 477 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 478 ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 479 480 rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK; 481 482 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 483 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 484 485 ring->wptr = 0; 486 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); 487 488 /* enable DMA RB */ 489 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], 490 rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK); 491 492 ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK; 493 #ifdef __BIG_ENDIAN 494 ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK; 495 #endif 496 /* enable DMA IBs */ 497 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 498 499 ring->sched.ready = true; 500 } 501 502 cik_sdma_enable(adev, true); 503 504 for (i = 0; i < adev->sdma.num_instances; i++) { 505 ring = &adev->sdma.instance[i].ring; 506 r = amdgpu_ring_test_helper(ring); 507 if (r) 508 return r; 509 510 if (adev->mman.buffer_funcs_ring == ring) 511 amdgpu_ttm_set_buffer_funcs_status(adev, true); 512 } 513 514 return 0; 515 } 516 517 /** 518 * cik_sdma_rlc_resume - setup and start the async dma engines 519 * 520 * @adev: amdgpu_device pointer 521 * 522 * Set up the compute DMA queues and enable them (CIK). 523 * Returns 0 for success, error for failure. 524 */ 525 static int cik_sdma_rlc_resume(struct amdgpu_device *adev) 526 { 527 /* XXX todo */ 528 return 0; 529 } 530 531 /** 532 * cik_sdma_load_microcode - load the sDMA ME ucode 533 * 534 * @adev: amdgpu_device pointer 535 * 536 * Loads the sDMA0/1 ucode. 537 * Returns 0 for success, -EINVAL if the ucode is not available. 538 */ 539 static int cik_sdma_load_microcode(struct amdgpu_device *adev) 540 { 541 const struct sdma_firmware_header_v1_0 *hdr; 542 const __le32 *fw_data; 543 u32 fw_size; 544 int i, j; 545 546 /* halt the MEs */ 547 cik_sdma_enable(adev, false); 548 549 for (i = 0; i < adev->sdma.num_instances; i++) { 550 if (!adev->sdma.instance[i].fw) 551 return -EINVAL; 552 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 553 amdgpu_ucode_print_sdma_hdr(&hdr->header); 554 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 555 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 556 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 557 if (adev->sdma.instance[i].feature_version >= 20) 558 adev->sdma.instance[i].burst_nop = true; 559 fw_data = (const __le32 *) 560 (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 561 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 562 for (j = 0; j < fw_size; j++) 563 WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); 564 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); 565 } 566 567 return 0; 568 } 569 570 /** 571 * cik_sdma_start - setup and start the async dma engines 572 * 573 * @adev: amdgpu_device pointer 574 * 575 * Set up the DMA engines and enable them (CIK). 576 * Returns 0 for success, error for failure. 577 */ 578 static int cik_sdma_start(struct amdgpu_device *adev) 579 { 580 int r; 581 582 r = cik_sdma_load_microcode(adev); 583 if (r) 584 return r; 585 586 /* halt the engine before programing */ 587 cik_sdma_enable(adev, false); 588 /* enable sdma ring preemption */ 589 cik_ctx_switch_enable(adev, true); 590 591 /* start the gfx rings and rlc compute queues */ 592 r = cik_sdma_gfx_resume(adev); 593 if (r) 594 return r; 595 r = cik_sdma_rlc_resume(adev); 596 if (r) 597 return r; 598 599 return 0; 600 } 601 602 /** 603 * cik_sdma_ring_test_ring - simple async dma engine test 604 * 605 * @ring: amdgpu_ring structure holding ring information 606 * 607 * Test the DMA engine by writing using it to write an 608 * value to memory. (CIK). 609 * Returns 0 for success, error for failure. 610 */ 611 static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) 612 { 613 struct amdgpu_device *adev = ring->adev; 614 unsigned i; 615 unsigned index; 616 int r; 617 u32 tmp; 618 u64 gpu_addr; 619 620 r = amdgpu_device_wb_get(adev, &index); 621 if (r) 622 return r; 623 624 gpu_addr = adev->wb.gpu_addr + (index * 4); 625 tmp = 0xCAFEDEAD; 626 adev->wb.wb[index] = cpu_to_le32(tmp); 627 628 r = amdgpu_ring_alloc(ring, 5); 629 if (r) 630 goto error_free_wb; 631 632 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 633 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 634 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 635 amdgpu_ring_write(ring, 1); /* number of DWs to follow */ 636 amdgpu_ring_write(ring, 0xDEADBEEF); 637 amdgpu_ring_commit(ring); 638 639 for (i = 0; i < adev->usec_timeout; i++) { 640 tmp = le32_to_cpu(adev->wb.wb[index]); 641 if (tmp == 0xDEADBEEF) 642 break; 643 udelay(1); 644 } 645 646 if (i >= adev->usec_timeout) 647 r = -ETIMEDOUT; 648 649 error_free_wb: 650 amdgpu_device_wb_free(adev, index); 651 return r; 652 } 653 654 /** 655 * cik_sdma_ring_test_ib - test an IB on the DMA engine 656 * 657 * @ring: amdgpu_ring structure holding ring information 658 * 659 * Test a simple IB in the DMA ring (CIK). 660 * Returns 0 on success, error on failure. 661 */ 662 static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) 663 { 664 struct amdgpu_device *adev = ring->adev; 665 struct amdgpu_ib ib; 666 struct dma_fence *f = NULL; 667 unsigned index; 668 u32 tmp = 0; 669 u64 gpu_addr; 670 long r; 671 672 r = amdgpu_device_wb_get(adev, &index); 673 if (r) 674 return r; 675 676 gpu_addr = adev->wb.gpu_addr + (index * 4); 677 tmp = 0xCAFEDEAD; 678 adev->wb.wb[index] = cpu_to_le32(tmp); 679 memset(&ib, 0, sizeof(ib)); 680 r = amdgpu_ib_get(adev, NULL, 256, 681 AMDGPU_IB_POOL_DIRECT, &ib); 682 if (r) 683 goto err0; 684 685 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, 686 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 687 ib.ptr[1] = lower_32_bits(gpu_addr); 688 ib.ptr[2] = upper_32_bits(gpu_addr); 689 ib.ptr[3] = 1; 690 ib.ptr[4] = 0xDEADBEEF; 691 ib.length_dw = 5; 692 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 693 if (r) 694 goto err1; 695 696 r = dma_fence_wait_timeout(f, false, timeout); 697 if (r == 0) { 698 r = -ETIMEDOUT; 699 goto err1; 700 } else if (r < 0) { 701 goto err1; 702 } 703 tmp = le32_to_cpu(adev->wb.wb[index]); 704 if (tmp == 0xDEADBEEF) 705 r = 0; 706 else 707 r = -EINVAL; 708 709 err1: 710 amdgpu_ib_free(adev, &ib, NULL); 711 dma_fence_put(f); 712 err0: 713 amdgpu_device_wb_free(adev, index); 714 return r; 715 } 716 717 /** 718 * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART 719 * 720 * @ib: indirect buffer to fill with commands 721 * @pe: addr of the page entry 722 * @src: src addr to copy from 723 * @count: number of page entries to update 724 * 725 * Update PTEs by copying them from the GART using sDMA (CIK). 726 */ 727 static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, 728 uint64_t pe, uint64_t src, 729 unsigned count) 730 { 731 unsigned bytes = count * 8; 732 733 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, 734 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 735 ib->ptr[ib->length_dw++] = bytes; 736 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 737 ib->ptr[ib->length_dw++] = lower_32_bits(src); 738 ib->ptr[ib->length_dw++] = upper_32_bits(src); 739 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 740 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 741 } 742 743 /** 744 * cik_sdma_vm_write_pages - update PTEs by writing them manually 745 * 746 * @ib: indirect buffer to fill with commands 747 * @pe: addr of the page entry 748 * @value: dst addr to write into pe 749 * @count: number of page entries to update 750 * @incr: increase next addr by incr bytes 751 * 752 * Update PTEs by writing them manually using sDMA (CIK). 753 */ 754 static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 755 uint64_t value, unsigned count, 756 uint32_t incr) 757 { 758 unsigned ndw = count * 2; 759 760 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, 761 SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 762 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 763 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 764 ib->ptr[ib->length_dw++] = ndw; 765 for (; ndw > 0; ndw -= 2) { 766 ib->ptr[ib->length_dw++] = lower_32_bits(value); 767 ib->ptr[ib->length_dw++] = upper_32_bits(value); 768 value += incr; 769 } 770 } 771 772 /** 773 * cik_sdma_vm_set_pages - update the page tables using sDMA 774 * 775 * @ib: indirect buffer to fill with commands 776 * @pe: addr of the page entry 777 * @addr: dst addr to write into pe 778 * @count: number of page entries to update 779 * @incr: increase next addr by incr bytes 780 * @flags: access flags 781 * 782 * Update the page tables using sDMA (CIK). 783 */ 784 static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, 785 uint64_t addr, unsigned count, 786 uint32_t incr, uint64_t flags) 787 { 788 /* for physically contiguous pages (vram) */ 789 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); 790 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 791 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 792 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 793 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 794 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 795 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 796 ib->ptr[ib->length_dw++] = incr; /* increment size */ 797 ib->ptr[ib->length_dw++] = 0; 798 ib->ptr[ib->length_dw++] = count; /* number of entries */ 799 } 800 801 /** 802 * cik_sdma_vm_pad_ib - pad the IB to the required number of dw 803 * 804 * @ib: indirect buffer to fill with padding 805 * 806 */ 807 static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 808 { 809 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 810 u32 pad_count; 811 int i; 812 813 pad_count = (-ib->length_dw) & 7; 814 for (i = 0; i < pad_count; i++) 815 if (sdma && sdma->burst_nop && (i == 0)) 816 ib->ptr[ib->length_dw++] = 817 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) | 818 SDMA_NOP_COUNT(pad_count - 1); 819 else 820 ib->ptr[ib->length_dw++] = 821 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); 822 } 823 824 /** 825 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline 826 * 827 * @ring: amdgpu_ring pointer 828 * 829 * Make sure all previous operations are completed (CIK). 830 */ 831 static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 832 { 833 uint32_t seq = ring->fence_drv.sync_seq; 834 uint64_t addr = ring->fence_drv.gpu_addr; 835 836 /* wait for idle */ 837 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, 838 SDMA_POLL_REG_MEM_EXTRA_OP(0) | 839 SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */ 840 SDMA_POLL_REG_MEM_EXTRA_M)); 841 amdgpu_ring_write(ring, addr & 0xfffffffc); 842 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 843 amdgpu_ring_write(ring, seq); /* reference */ 844 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 845 amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ 846 } 847 848 /** 849 * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA 850 * 851 * @ring: amdgpu_ring pointer 852 * @vm: amdgpu_vm pointer 853 * 854 * Update the page table base and flush the VM TLB 855 * using sDMA (CIK). 856 */ 857 static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, 858 unsigned vmid, uint64_t pd_addr) 859 { 860 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | 861 SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ 862 863 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 864 865 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); 866 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 867 amdgpu_ring_write(ring, 0); 868 amdgpu_ring_write(ring, 0); /* reference */ 869 amdgpu_ring_write(ring, 0); /* mask */ 870 amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ 871 } 872 873 static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring, 874 uint32_t reg, uint32_t val) 875 { 876 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 877 amdgpu_ring_write(ring, reg); 878 amdgpu_ring_write(ring, val); 879 } 880 881 static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, 882 bool enable) 883 { 884 u32 orig, data; 885 886 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 887 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 888 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 889 } else { 890 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET); 891 data |= 0xff000000; 892 if (data != orig) 893 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data); 894 895 orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET); 896 data |= 0xff000000; 897 if (data != orig) 898 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data); 899 } 900 } 901 902 static void cik_enable_sdma_mgls(struct amdgpu_device *adev, 903 bool enable) 904 { 905 u32 orig, data; 906 907 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { 908 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 909 data |= 0x100; 910 if (orig != data) 911 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 912 913 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 914 data |= 0x100; 915 if (orig != data) 916 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 917 } else { 918 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 919 data &= ~0x100; 920 if (orig != data) 921 WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data); 922 923 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET); 924 data &= ~0x100; 925 if (orig != data) 926 WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data); 927 } 928 } 929 930 static int cik_sdma_early_init(void *handle) 931 { 932 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 933 934 adev->sdma.num_instances = SDMA_MAX_INSTANCE; 935 936 cik_sdma_set_ring_funcs(adev); 937 cik_sdma_set_irq_funcs(adev); 938 cik_sdma_set_buffer_funcs(adev); 939 cik_sdma_set_vm_pte_funcs(adev); 940 941 return 0; 942 } 943 944 static int cik_sdma_sw_init(void *handle) 945 { 946 struct amdgpu_ring *ring; 947 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 948 int r, i; 949 950 r = cik_sdma_init_microcode(adev); 951 if (r) { 952 DRM_ERROR("Failed to load sdma firmware!\n"); 953 return r; 954 } 955 956 /* SDMA trap event */ 957 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, 958 &adev->sdma.trap_irq); 959 if (r) 960 return r; 961 962 /* SDMA Privileged inst */ 963 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, 964 &adev->sdma.illegal_inst_irq); 965 if (r) 966 return r; 967 968 /* SDMA Privileged inst */ 969 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247, 970 &adev->sdma.illegal_inst_irq); 971 if (r) 972 return r; 973 974 for (i = 0; i < adev->sdma.num_instances; i++) { 975 ring = &adev->sdma.instance[i].ring; 976 ring->ring_obj = NULL; 977 sprintf(ring->name, "sdma%d", i); 978 r = amdgpu_ring_init(adev, ring, 1024, 979 &adev->sdma.trap_irq, 980 (i == 0) ? 981 AMDGPU_SDMA_IRQ_INSTANCE0 : 982 AMDGPU_SDMA_IRQ_INSTANCE1, 983 AMDGPU_RING_PRIO_DEFAULT); 984 if (r) 985 return r; 986 } 987 988 return r; 989 } 990 991 static int cik_sdma_sw_fini(void *handle) 992 { 993 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 994 int i; 995 996 for (i = 0; i < adev->sdma.num_instances; i++) 997 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 998 999 cik_sdma_free_microcode(adev); 1000 return 0; 1001 } 1002 1003 static int cik_sdma_hw_init(void *handle) 1004 { 1005 int r; 1006 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1007 1008 r = cik_sdma_start(adev); 1009 if (r) 1010 return r; 1011 1012 return r; 1013 } 1014 1015 static int cik_sdma_hw_fini(void *handle) 1016 { 1017 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1018 1019 cik_ctx_switch_enable(adev, false); 1020 cik_sdma_enable(adev, false); 1021 1022 return 0; 1023 } 1024 1025 static int cik_sdma_suspend(void *handle) 1026 { 1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1028 1029 return cik_sdma_hw_fini(adev); 1030 } 1031 1032 static int cik_sdma_resume(void *handle) 1033 { 1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1035 1036 cik_sdma_soft_reset(handle); 1037 1038 return cik_sdma_hw_init(adev); 1039 } 1040 1041 static bool cik_sdma_is_idle(void *handle) 1042 { 1043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1044 u32 tmp = RREG32(mmSRBM_STATUS2); 1045 1046 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1047 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1048 return false; 1049 1050 return true; 1051 } 1052 1053 static int cik_sdma_wait_for_idle(void *handle) 1054 { 1055 unsigned i; 1056 u32 tmp; 1057 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1058 1059 for (i = 0; i < adev->usec_timeout; i++) { 1060 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1061 SRBM_STATUS2__SDMA1_BUSY_MASK); 1062 1063 if (!tmp) 1064 return 0; 1065 udelay(1); 1066 } 1067 return -ETIMEDOUT; 1068 } 1069 1070 static int cik_sdma_soft_reset(void *handle) 1071 { 1072 u32 srbm_soft_reset = 0; 1073 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1074 u32 tmp = RREG32(mmSRBM_STATUS2); 1075 1076 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { 1077 /* sdma0 */ 1078 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); 1079 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1080 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); 1081 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1082 } 1083 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { 1084 /* sdma1 */ 1085 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); 1086 tmp |= SDMA0_F32_CNTL__HALT_MASK; 1087 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); 1088 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1089 } 1090 1091 if (srbm_soft_reset) { 1092 tmp = RREG32(mmSRBM_SOFT_RESET); 1093 tmp |= srbm_soft_reset; 1094 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1095 WREG32(mmSRBM_SOFT_RESET, tmp); 1096 tmp = RREG32(mmSRBM_SOFT_RESET); 1097 1098 udelay(50); 1099 1100 tmp &= ~srbm_soft_reset; 1101 WREG32(mmSRBM_SOFT_RESET, tmp); 1102 tmp = RREG32(mmSRBM_SOFT_RESET); 1103 1104 /* Wait a little for things to settle down */ 1105 udelay(50); 1106 } 1107 1108 return 0; 1109 } 1110 1111 static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev, 1112 struct amdgpu_irq_src *src, 1113 unsigned type, 1114 enum amdgpu_interrupt_state state) 1115 { 1116 u32 sdma_cntl; 1117 1118 switch (type) { 1119 case AMDGPU_SDMA_IRQ_INSTANCE0: 1120 switch (state) { 1121 case AMDGPU_IRQ_STATE_DISABLE: 1122 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1123 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1124 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1125 break; 1126 case AMDGPU_IRQ_STATE_ENABLE: 1127 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1128 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1129 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1130 break; 1131 default: 1132 break; 1133 } 1134 break; 1135 case AMDGPU_SDMA_IRQ_INSTANCE1: 1136 switch (state) { 1137 case AMDGPU_IRQ_STATE_DISABLE: 1138 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1139 sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK; 1140 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1141 break; 1142 case AMDGPU_IRQ_STATE_ENABLE: 1143 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1144 sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK; 1145 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1146 break; 1147 default: 1148 break; 1149 } 1150 break; 1151 default: 1152 break; 1153 } 1154 return 0; 1155 } 1156 1157 static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, 1158 struct amdgpu_irq_src *source, 1159 struct amdgpu_iv_entry *entry) 1160 { 1161 u8 instance_id, queue_id; 1162 1163 instance_id = (entry->ring_id & 0x3) >> 0; 1164 queue_id = (entry->ring_id & 0xc) >> 2; 1165 DRM_DEBUG("IH: SDMA trap\n"); 1166 switch (instance_id) { 1167 case 0: 1168 switch (queue_id) { 1169 case 0: 1170 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1171 break; 1172 case 1: 1173 /* XXX compute */ 1174 break; 1175 case 2: 1176 /* XXX compute */ 1177 break; 1178 } 1179 break; 1180 case 1: 1181 switch (queue_id) { 1182 case 0: 1183 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1184 break; 1185 case 1: 1186 /* XXX compute */ 1187 break; 1188 case 2: 1189 /* XXX compute */ 1190 break; 1191 } 1192 break; 1193 } 1194 1195 return 0; 1196 } 1197 1198 static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, 1199 struct amdgpu_irq_src *source, 1200 struct amdgpu_iv_entry *entry) 1201 { 1202 u8 instance_id; 1203 1204 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1205 instance_id = (entry->ring_id & 0x3) >> 0; 1206 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); 1207 return 0; 1208 } 1209 1210 static int cik_sdma_set_clockgating_state(void *handle, 1211 enum amd_clockgating_state state) 1212 { 1213 bool gate = false; 1214 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1215 1216 if (state == AMD_CG_STATE_GATE) 1217 gate = true; 1218 1219 cik_enable_sdma_mgcg(adev, gate); 1220 cik_enable_sdma_mgls(adev, gate); 1221 1222 return 0; 1223 } 1224 1225 static int cik_sdma_set_powergating_state(void *handle, 1226 enum amd_powergating_state state) 1227 { 1228 return 0; 1229 } 1230 1231 static const struct amd_ip_funcs cik_sdma_ip_funcs = { 1232 .name = "cik_sdma", 1233 .early_init = cik_sdma_early_init, 1234 .late_init = NULL, 1235 .sw_init = cik_sdma_sw_init, 1236 .sw_fini = cik_sdma_sw_fini, 1237 .hw_init = cik_sdma_hw_init, 1238 .hw_fini = cik_sdma_hw_fini, 1239 .suspend = cik_sdma_suspend, 1240 .resume = cik_sdma_resume, 1241 .is_idle = cik_sdma_is_idle, 1242 .wait_for_idle = cik_sdma_wait_for_idle, 1243 .soft_reset = cik_sdma_soft_reset, 1244 .set_clockgating_state = cik_sdma_set_clockgating_state, 1245 .set_powergating_state = cik_sdma_set_powergating_state, 1246 }; 1247 1248 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { 1249 .type = AMDGPU_RING_TYPE_SDMA, 1250 .align_mask = 0xf, 1251 .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 1252 .support_64bit_ptrs = false, 1253 .get_rptr = cik_sdma_ring_get_rptr, 1254 .get_wptr = cik_sdma_ring_get_wptr, 1255 .set_wptr = cik_sdma_ring_set_wptr, 1256 .emit_frame_size = 1257 6 + /* cik_sdma_ring_emit_hdp_flush */ 1258 3 + /* hdp invalidate */ 1259 6 + /* cik_sdma_ring_emit_pipeline_sync */ 1260 CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */ 1261 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ 1262 .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ 1263 .emit_ib = cik_sdma_ring_emit_ib, 1264 .emit_fence = cik_sdma_ring_emit_fence, 1265 .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, 1266 .emit_vm_flush = cik_sdma_ring_emit_vm_flush, 1267 .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, 1268 .test_ring = cik_sdma_ring_test_ring, 1269 .test_ib = cik_sdma_ring_test_ib, 1270 .insert_nop = cik_sdma_ring_insert_nop, 1271 .pad_ib = cik_sdma_ring_pad_ib, 1272 .emit_wreg = cik_sdma_ring_emit_wreg, 1273 }; 1274 1275 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) 1276 { 1277 int i; 1278 1279 for (i = 0; i < adev->sdma.num_instances; i++) { 1280 adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; 1281 adev->sdma.instance[i].ring.me = i; 1282 } 1283 } 1284 1285 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { 1286 .set = cik_sdma_set_trap_irq_state, 1287 .process = cik_sdma_process_trap_irq, 1288 }; 1289 1290 static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { 1291 .process = cik_sdma_process_illegal_inst_irq, 1292 }; 1293 1294 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) 1295 { 1296 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1297 adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs; 1298 adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; 1299 } 1300 1301 /** 1302 * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine 1303 * 1304 * @ring: amdgpu_ring structure holding ring information 1305 * @src_offset: src GPU address 1306 * @dst_offset: dst GPU address 1307 * @byte_count: number of bytes to xfer 1308 * 1309 * Copy GPU buffers using the DMA engine (CIK). 1310 * Used by the amdgpu ttm implementation to move pages if 1311 * registered as the asic copy callback. 1312 */ 1313 static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib, 1314 uint64_t src_offset, 1315 uint64_t dst_offset, 1316 uint32_t byte_count, 1317 bool tmz) 1318 { 1319 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0); 1320 ib->ptr[ib->length_dw++] = byte_count; 1321 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1322 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1323 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1324 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1325 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1326 } 1327 1328 /** 1329 * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine 1330 * 1331 * @ring: amdgpu_ring structure holding ring information 1332 * @src_data: value to write to buffer 1333 * @dst_offset: dst GPU address 1334 * @byte_count: number of bytes to xfer 1335 * 1336 * Fill GPU buffers using the DMA engine (CIK). 1337 */ 1338 static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib, 1339 uint32_t src_data, 1340 uint64_t dst_offset, 1341 uint32_t byte_count) 1342 { 1343 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0); 1344 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1345 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1346 ib->ptr[ib->length_dw++] = src_data; 1347 ib->ptr[ib->length_dw++] = byte_count; 1348 } 1349 1350 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { 1351 .copy_max_bytes = 0x1fffff, 1352 .copy_num_dw = 7, 1353 .emit_copy_buffer = cik_sdma_emit_copy_buffer, 1354 1355 .fill_max_bytes = 0x1fffff, 1356 .fill_num_dw = 5, 1357 .emit_fill_buffer = cik_sdma_emit_fill_buffer, 1358 }; 1359 1360 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) 1361 { 1362 adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; 1363 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1364 } 1365 1366 static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { 1367 .copy_pte_num_dw = 7, 1368 .copy_pte = cik_sdma_vm_copy_pte, 1369 1370 .write_pte = cik_sdma_vm_write_pte, 1371 .set_pte_pde = cik_sdma_vm_set_pte_pde, 1372 }; 1373 1374 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1375 { 1376 unsigned i; 1377 1378 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1379 for (i = 0; i < adev->sdma.num_instances; i++) { 1380 adev->vm_manager.vm_pte_scheds[i] = 1381 &adev->sdma.instance[i].ring.sched; 1382 } 1383 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1384 } 1385 1386 const struct amdgpu_ip_block_version cik_sdma_ip_block = 1387 { 1388 .type = AMD_IP_BLOCK_TYPE_SDMA, 1389 .major = 2, 1390 .minor = 0, 1391 .rev = 0, 1392 .funcs = &cik_sdma_ip_funcs, 1393 }; 1394