1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/mmu_context.h> 23 #include "amdgpu.h" 24 #include "amdgpu_amdkfd.h" 25 #include "gc/gc_10_3_0_offset.h" 26 #include "gc/gc_10_3_0_sh_mask.h" 27 #include "navi10_enum.h" 28 #include "oss/osssys_5_0_0_offset.h" 29 #include "oss/osssys_5_0_0_sh_mask.h" 30 #include "soc15_common.h" 31 #include "v10_structs.h" 32 #include "nv.h" 33 #include "nvd.h" 34 #include "gfxhub_v2_1.h" 35 36 enum hqd_dequeue_request_type { 37 NO_ACTION = 0, 38 DRAIN_PIPE, 39 RESET_WAVES, 40 SAVE_WAVES 41 }; 42 43 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 44 { 45 return (struct amdgpu_device *)kgd; 46 } 47 48 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 49 uint32_t queue, uint32_t vmid) 50 { 51 struct amdgpu_device *adev = get_amdgpu_device(kgd); 52 53 mutex_lock(&adev->srbm_mutex); 54 nv_grbm_select(adev, mec, pipe, queue, vmid); 55 } 56 57 static void unlock_srbm(struct kgd_dev *kgd) 58 { 59 struct amdgpu_device *adev = get_amdgpu_device(kgd); 60 61 nv_grbm_select(adev, 0, 0, 0, 0); 62 mutex_unlock(&adev->srbm_mutex); 63 } 64 65 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 66 uint32_t queue_id) 67 { 68 struct amdgpu_device *adev = get_amdgpu_device(kgd); 69 70 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 71 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 72 73 lock_srbm(kgd, mec, pipe, queue_id, 0); 74 } 75 76 static uint64_t get_queue_mask(struct amdgpu_device *adev, 77 uint32_t pipe_id, uint32_t queue_id) 78 { 79 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + 80 queue_id; 81 82 return 1ull << bit; 83 } 84 85 static void release_queue(struct kgd_dev *kgd) 86 { 87 unlock_srbm(kgd); 88 } 89 90 static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, 91 uint32_t sh_mem_config, 92 uint32_t sh_mem_ape1_base, 93 uint32_t sh_mem_ape1_limit, 94 uint32_t sh_mem_bases) 95 { 96 struct amdgpu_device *adev = get_amdgpu_device(kgd); 97 98 lock_srbm(kgd, 0, 0, 0, vmid); 99 100 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); 101 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); 102 /* APE1 no longer exists on GFX9 */ 103 104 unlock_srbm(kgd); 105 } 106 107 /* ATC is defeatured on Sienna_Cichlid */ 108 static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, 109 unsigned int vmid) 110 { 111 struct amdgpu_device *adev = get_amdgpu_device(kgd); 112 113 uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; 114 115 /* Mapping vmid to pasid also for IH block */ 116 pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n", 117 vmid, pasid); 118 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value); 119 120 return 0; 121 } 122 123 static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) 124 { 125 struct amdgpu_device *adev = get_amdgpu_device(kgd); 126 uint32_t mec; 127 uint32_t pipe; 128 129 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 130 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 131 132 lock_srbm(kgd, mec, pipe, 0, 0); 133 134 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), 135 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | 136 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); 137 138 unlock_srbm(kgd); 139 140 return 0; 141 } 142 143 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, 144 unsigned int engine_id, 145 unsigned int queue_id) 146 { 147 uint32_t sdma_engine_reg_base = 0; 148 uint32_t sdma_rlc_reg_offset; 149 150 switch (engine_id) { 151 default: 152 dev_warn(adev->dev, 153 "Invalid sdma engine id (%d), using engine id 0\n", 154 engine_id); 155 /* fall through */ 156 case 0: 157 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, 158 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 159 break; 160 case 1: 161 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, 162 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 163 break; 164 case 2: 165 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, 166 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 167 break; 168 case 3: 169 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, 170 mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 171 break; 172 } 173 174 sdma_rlc_reg_offset = sdma_engine_reg_base 175 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); 176 177 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, 178 queue_id, sdma_rlc_reg_offset); 179 180 return sdma_rlc_reg_offset; 181 } 182 183 static inline struct v10_compute_mqd *get_mqd(void *mqd) 184 { 185 return (struct v10_compute_mqd *)mqd; 186 } 187 188 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) 189 { 190 return (struct v10_sdma_mqd *)mqd; 191 } 192 193 static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 194 uint32_t queue_id, uint32_t __user *wptr, 195 uint32_t wptr_shift, uint32_t wptr_mask, 196 struct mm_struct *mm) 197 { 198 struct amdgpu_device *adev = get_amdgpu_device(kgd); 199 struct v10_compute_mqd *m; 200 uint32_t *mqd_hqd; 201 uint32_t reg, hqd_base, data; 202 203 m = get_mqd(mqd); 204 205 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); 206 acquire_queue(kgd, pipe_id, queue_id); 207 208 /* HIQ is set during driver init period with vmid set to 0*/ 209 if (m->cp_hqd_vmid == 0) { 210 uint32_t value, mec, pipe; 211 212 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 213 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 214 215 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 216 mec, pipe, queue_id); 217 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); 218 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, 219 ((mec << 5) | (pipe << 3) | queue_id | 0x80)); 220 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); 221 } 222 223 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ 224 mqd_hqd = &m->cp_mqd_base_addr_lo; 225 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 226 227 for (reg = hqd_base; 228 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 229 WREG32(reg, mqd_hqd[reg - hqd_base]); 230 231 232 /* Activate doorbell logic before triggering WPTR poll. */ 233 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, 234 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); 235 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); 236 237 if (wptr) { 238 /* Don't read wptr with get_user because the user 239 * context may not be accessible (if this function 240 * runs in a work queue). Instead trigger a one-shot 241 * polling read from memory in the CP. This assumes 242 * that wptr is GPU-accessible in the queue's VMID via 243 * ATC or SVM. WPTR==RPTR before starting the poll so 244 * the CP starts fetching new commands from the right 245 * place. 246 * 247 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit 248 * tricky. Assume that the queue didn't overflow. The 249 * number of valid bits in the 32-bit RPTR depends on 250 * the queue size. The remaining bits are taken from 251 * the saved 64-bit WPTR. If the WPTR wrapped, add the 252 * queue size. 253 */ 254 uint32_t queue_size = 255 2 << REG_GET_FIELD(m->cp_hqd_pq_control, 256 CP_HQD_PQ_CONTROL, QUEUE_SIZE); 257 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); 258 259 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) 260 guessed_wptr += queue_size; 261 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); 262 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; 263 264 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), 265 lower_32_bits(guessed_wptr)); 266 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), 267 upper_32_bits(guessed_wptr)); 268 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), 269 lower_32_bits((uint64_t)wptr)); 270 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), 271 upper_32_bits((uint64_t)wptr)); 272 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, 273 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 274 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), 275 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 276 } 277 278 /* Start the EOP fetcher */ 279 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), 280 REG_SET_FIELD(m->cp_hqd_eop_rptr, 281 CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); 282 283 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); 284 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); 285 286 release_queue(kgd); 287 288 return 0; 289 } 290 291 static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, 292 uint32_t pipe_id, uint32_t queue_id, 293 uint32_t doorbell_off) 294 { 295 struct amdgpu_device *adev = get_amdgpu_device(kgd); 296 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 297 struct v10_compute_mqd *m; 298 uint32_t mec, pipe; 299 int r; 300 301 m = get_mqd(mqd); 302 303 acquire_queue(kgd, pipe_id, queue_id); 304 305 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 306 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 307 308 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 309 mec, pipe, queue_id); 310 311 spin_lock(&adev->gfx.kiq.ring_lock); 312 r = amdgpu_ring_alloc(kiq_ring, 7); 313 if (r) { 314 pr_err("Failed to alloc KIQ (%d).\n", r); 315 goto out_unlock; 316 } 317 318 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 319 amdgpu_ring_write(kiq_ring, 320 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 321 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ 322 PACKET3_MAP_QUEUES_QUEUE(queue_id) | 323 PACKET3_MAP_QUEUES_PIPE(pipe) | 324 PACKET3_MAP_QUEUES_ME((mec - 1)) | 325 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 326 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 327 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ 328 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 329 amdgpu_ring_write(kiq_ring, 330 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); 331 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); 332 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); 333 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); 334 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); 335 amdgpu_ring_commit(kiq_ring); 336 337 out_unlock: 338 spin_unlock(&adev->gfx.kiq.ring_lock); 339 release_queue(kgd); 340 341 return r; 342 } 343 344 static int hqd_dump_v10_3(struct kgd_dev *kgd, 345 uint32_t pipe_id, uint32_t queue_id, 346 uint32_t (**dump)[2], uint32_t *n_regs) 347 { 348 struct amdgpu_device *adev = get_amdgpu_device(kgd); 349 uint32_t i = 0, reg; 350 #define HQD_N_REGS 56 351 #define DUMP_REG(addr) do { \ 352 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ 353 break; \ 354 (*dump)[i][0] = (addr) << 2; \ 355 (*dump)[i++][1] = RREG32(addr); \ 356 } while (0) 357 358 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 359 if (*dump == NULL) 360 return -ENOMEM; 361 362 acquire_queue(kgd, pipe_id, queue_id); 363 364 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 365 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 366 DUMP_REG(reg); 367 368 release_queue(kgd); 369 370 WARN_ON_ONCE(i != HQD_N_REGS); 371 *n_regs = i; 372 373 return 0; 374 } 375 376 static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, 377 uint32_t __user *wptr, struct mm_struct *mm) 378 { 379 struct amdgpu_device *adev = get_amdgpu_device(kgd); 380 struct v10_sdma_mqd *m; 381 uint32_t sdma_rlc_reg_offset; 382 unsigned long end_jiffies; 383 uint32_t data; 384 uint64_t data64; 385 uint64_t __user *wptr64 = (uint64_t __user *)wptr; 386 387 m = get_sdma_mqd(mqd); 388 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 389 m->sdma_queue_id); 390 391 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 392 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); 393 394 end_jiffies = msecs_to_jiffies(2000) + jiffies; 395 while (true) { 396 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 397 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 398 break; 399 if (time_after(jiffies, end_jiffies)) { 400 pr_err("SDMA RLC not idle in %s\n", __func__); 401 return -ETIME; 402 } 403 usleep_range(500, 1000); 404 } 405 406 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, 407 m->sdmax_rlcx_doorbell_offset); 408 409 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, 410 ENABLE, 1); 411 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); 412 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, 413 m->sdmax_rlcx_rb_rptr); 414 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, 415 m->sdmax_rlcx_rb_rptr_hi); 416 417 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); 418 if (read_user_wptr(mm, wptr64, data64)) { 419 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 420 lower_32_bits(data64)); 421 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 422 upper_32_bits(data64)); 423 } else { 424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 425 m->sdmax_rlcx_rb_rptr); 426 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 427 m->sdmax_rlcx_rb_rptr_hi); 428 } 429 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); 430 431 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); 432 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, 433 m->sdmax_rlcx_rb_base_hi); 434 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 435 m->sdmax_rlcx_rb_rptr_addr_lo); 436 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 437 m->sdmax_rlcx_rb_rptr_addr_hi); 438 439 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, 440 RB_ENABLE, 1); 441 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); 442 443 return 0; 444 } 445 446 static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, 447 uint32_t engine_id, uint32_t queue_id, 448 uint32_t (**dump)[2], uint32_t *n_regs) 449 { 450 struct amdgpu_device *adev = get_amdgpu_device(kgd); 451 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 452 engine_id, queue_id); 453 uint32_t i = 0, reg; 454 #undef HQD_N_REGS 455 #define HQD_N_REGS (19+6+7+10) 456 457 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 458 if (*dump == NULL) 459 return -ENOMEM; 460 461 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) 462 DUMP_REG(sdma_rlc_reg_offset + reg); 463 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) 464 DUMP_REG(sdma_rlc_reg_offset + reg); 465 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; 466 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) 467 DUMP_REG(sdma_rlc_reg_offset + reg); 468 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; 469 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) 470 DUMP_REG(sdma_rlc_reg_offset + reg); 471 472 WARN_ON_ONCE(i != HQD_N_REGS); 473 *n_regs = i; 474 475 return 0; 476 } 477 478 static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, 479 uint32_t pipe_id, uint32_t queue_id) 480 { 481 struct amdgpu_device *adev = get_amdgpu_device(kgd); 482 uint32_t act; 483 bool retval = false; 484 uint32_t low, high; 485 486 acquire_queue(kgd, pipe_id, queue_id); 487 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 488 if (act) { 489 low = lower_32_bits(queue_address >> 8); 490 high = upper_32_bits(queue_address >> 8); 491 492 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && 493 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) 494 retval = true; 495 } 496 release_queue(kgd); 497 return retval; 498 } 499 500 static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) 501 { 502 struct amdgpu_device *adev = get_amdgpu_device(kgd); 503 struct v10_sdma_mqd *m; 504 uint32_t sdma_rlc_reg_offset; 505 uint32_t sdma_rlc_rb_cntl; 506 507 m = get_sdma_mqd(mqd); 508 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 509 m->sdma_queue_id); 510 511 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 512 513 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) 514 return true; 515 516 return false; 517 } 518 519 static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 520 enum kfd_preempt_type reset_type, 521 unsigned int utimeout, uint32_t pipe_id, 522 uint32_t queue_id) 523 { 524 struct amdgpu_device *adev = get_amdgpu_device(kgd); 525 enum hqd_dequeue_request_type type; 526 unsigned long end_jiffies; 527 uint32_t temp; 528 struct v10_compute_mqd *m = get_mqd(mqd); 529 530 acquire_queue(kgd, pipe_id, queue_id); 531 532 if (m->cp_hqd_vmid == 0) 533 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); 534 535 switch (reset_type) { 536 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: 537 type = DRAIN_PIPE; 538 break; 539 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: 540 type = RESET_WAVES; 541 break; 542 default: 543 type = DRAIN_PIPE; 544 break; 545 } 546 547 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); 548 549 end_jiffies = (utimeout * HZ / 1000) + jiffies; 550 while (true) { 551 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 552 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) 553 break; 554 if (time_after(jiffies, end_jiffies)) { 555 pr_err("cp queue pipe %d queue %d preemption failed\n", 556 pipe_id, queue_id); 557 release_queue(kgd); 558 return -ETIME; 559 } 560 usleep_range(500, 1000); 561 } 562 563 release_queue(kgd); 564 return 0; 565 } 566 567 static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 568 unsigned int utimeout) 569 { 570 struct amdgpu_device *adev = get_amdgpu_device(kgd); 571 struct v10_sdma_mqd *m; 572 uint32_t sdma_rlc_reg_offset; 573 uint32_t temp; 574 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; 575 576 m = get_sdma_mqd(mqd); 577 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 578 m->sdma_queue_id); 579 580 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 581 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; 582 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); 583 584 while (true) { 585 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 586 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 587 break; 588 if (time_after(jiffies, end_jiffies)) { 589 pr_err("SDMA RLC not idle in %s\n", __func__); 590 return -ETIME; 591 } 592 usleep_range(500, 1000); 593 } 594 595 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); 596 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 597 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | 598 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); 599 600 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); 601 m->sdmax_rlcx_rb_rptr_hi = 602 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); 603 604 return 0; 605 } 606 607 608 static int address_watch_disable_v10_3(struct kgd_dev *kgd) 609 { 610 return 0; 611 } 612 613 static int address_watch_execute_v10_3(struct kgd_dev *kgd, 614 unsigned int watch_point_id, 615 uint32_t cntl_val, 616 uint32_t addr_hi, 617 uint32_t addr_lo) 618 { 619 return 0; 620 } 621 622 static int wave_control_execute_v10_3(struct kgd_dev *kgd, 623 uint32_t gfx_index_val, 624 uint32_t sq_cmd) 625 { 626 struct amdgpu_device *adev = get_amdgpu_device(kgd); 627 uint32_t data = 0; 628 629 mutex_lock(&adev->grbm_idx_mutex); 630 631 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); 632 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); 633 634 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 635 INSTANCE_BROADCAST_WRITES, 1); 636 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 637 SA_BROADCAST_WRITES, 1); 638 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 639 SE_BROADCAST_WRITES, 1); 640 641 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); 642 mutex_unlock(&adev->grbm_idx_mutex); 643 644 return 0; 645 } 646 647 static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, 648 unsigned int watch_point_id, 649 unsigned int reg_offset) 650 { 651 return 0; 652 } 653 654 static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, 655 uint64_t page_table_base) 656 { 657 struct amdgpu_device *adev = get_amdgpu_device(kgd); 658 659 /* SDMA is on gfxhub as well for Navi1* series */ 660 gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base); 661 } 662 663 #if 0 664 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, 665 uint32_t trap_debug_wave_launch_mode, 666 uint32_t vmid) 667 { 668 struct amdgpu_device *adev = get_amdgpu_device(kgd); 669 uint32_t data = 0; 670 uint32_t orig_wave_cntl_value; 671 uint32_t orig_stall_vmid; 672 673 mutex_lock(&adev->grbm_idx_mutex); 674 675 orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC, 676 0, 677 mmSPI_GDBG_WAVE_CNTL)); 678 orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value, 679 SPI_GDBG_WAVE_CNTL, 680 STALL_VMID); 681 682 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 683 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 684 685 data = 0; 686 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 687 688 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid); 689 690 mutex_unlock(&adev->grbm_idx_mutex); 691 692 return 0; 693 } 694 695 uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) 696 { 697 struct amdgpu_device *adev = get_amdgpu_device(kgd); 698 699 mutex_lock(&adev->grbm_idx_mutex); 700 701 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); 702 703 mutex_unlock(&adev->grbm_idx_mutex); 704 705 return 0; 706 } 707 708 uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, 709 uint32_t trap_override, 710 uint32_t trap_mask) 711 { 712 struct amdgpu_device *adev = get_amdgpu_device(kgd); 713 uint32_t data = 0; 714 715 mutex_lock(&adev->grbm_idx_mutex); 716 717 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 718 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 719 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 720 721 data = 0; 722 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 723 EXCP_EN, trap_mask); 724 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 725 REPLACE, trap_override); 726 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 727 728 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 729 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0); 730 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 731 732 mutex_unlock(&adev->grbm_idx_mutex); 733 734 return 0; 735 } 736 737 uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, 738 uint8_t wave_launch_mode, 739 uint32_t vmid) 740 { 741 struct amdgpu_device *adev = get_amdgpu_device(kgd); 742 uint32_t data = 0; 743 bool is_stall_mode; 744 bool is_mode_set; 745 746 is_stall_mode = (wave_launch_mode == 4); 747 is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4); 748 749 mutex_lock(&adev->grbm_idx_mutex); 750 751 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 752 VMID_MASK, is_mode_set ? 1 << vmid : 0); 753 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 754 MODE, is_mode_set ? wave_launch_mode : 0); 755 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); 756 757 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 758 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 759 STALL_VMID, is_stall_mode ? 1 << vmid : 0); 760 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 761 STALL_RA, is_stall_mode ? 1 : 0); 762 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 763 764 mutex_unlock(&adev->grbm_idx_mutex); 765 766 return 0; 767 } 768 769 /* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values 770 * The values read are: 771 * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. 772 * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. 773 * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. 774 * gws_wait_time -- Wait Count for Global Wave Syncs. 775 * que_sleep_wait_time -- Wait Count for Dequeue Retry. 776 * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. 777 * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. 778 * deq_retry_wait_time -- Wait Count for Global Wave Syncs. 779 */ 780 void get_iq_wait_times_v10_3(struct kgd_dev *kgd, 781 uint32_t *wait_times) 782 783 { 784 struct amdgpu_device *adev = get_amdgpu_device(kgd); 785 786 *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); 787 } 788 789 void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, 790 uint32_t wait_times, 791 uint32_t grace_period, 792 uint32_t *reg_offset, 793 uint32_t *reg_data) 794 { 795 *reg_data = wait_times; 796 797 *reg_data = REG_SET_FIELD(*reg_data, 798 CP_IQ_WAIT_TIME2, 799 SCH_WAVE, 800 grace_period); 801 802 *reg_offset = mmCP_IQ_WAIT_TIME2; 803 } 804 #endif 805 806 const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { 807 .program_sh_mem_settings = program_sh_mem_settings_v10_3, 808 .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, 809 .init_interrupts = init_interrupts_v10_3, 810 .hqd_load = hqd_load_v10_3, 811 .hiq_mqd_load = hiq_mqd_load_v10_3, 812 .hqd_sdma_load = hqd_sdma_load_v10_3, 813 .hqd_dump = hqd_dump_v10_3, 814 .hqd_sdma_dump = hqd_sdma_dump_v10_3, 815 .hqd_is_occupied = hqd_is_occupied_v10_3, 816 .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, 817 .hqd_destroy = hqd_destroy_v10_3, 818 .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, 819 .address_watch_disable = address_watch_disable_v10_3, 820 .address_watch_execute = address_watch_execute_v10_3, 821 .wave_control_execute = wave_control_execute_v10_3, 822 .address_watch_get_offset = address_watch_get_offset_v10_3, 823 .get_atc_vmid_pasid_mapping_info = NULL, 824 .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, 825 .get_hive_id = amdgpu_amdkfd_get_hive_id, 826 #if 0 827 .enable_debug_trap = enable_debug_trap_v10_3, 828 .disable_debug_trap = disable_debug_trap_v10_3, 829 .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3, 830 .set_wave_launch_mode = set_wave_launch_mode_v10_3, 831 .get_iq_wait_times = get_iq_wait_times_v10_3, 832 .build_grace_period_packet_info = build_grace_period_packet_info_v10_3, 833 #endif 834 }; 835