1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/mmu_context.h> 23 #include "amdgpu.h" 24 #include "amdgpu_amdkfd.h" 25 #include "gc/gc_10_3_0_offset.h" 26 #include "gc/gc_10_3_0_sh_mask.h" 27 #include "navi10_enum.h" 28 #include "oss/osssys_5_0_0_offset.h" 29 #include "oss/osssys_5_0_0_sh_mask.h" 30 #include "soc15_common.h" 31 #include "v10_structs.h" 32 #include "nv.h" 33 #include "nvd.h" 34 35 enum hqd_dequeue_request_type { 36 NO_ACTION = 0, 37 DRAIN_PIPE, 38 RESET_WAVES, 39 SAVE_WAVES 40 }; 41 42 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 43 { 44 return (struct amdgpu_device *)kgd; 45 } 46 47 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 48 uint32_t queue, uint32_t vmid) 49 { 50 struct amdgpu_device *adev = get_amdgpu_device(kgd); 51 52 mutex_lock(&adev->srbm_mutex); 53 nv_grbm_select(adev, mec, pipe, queue, vmid); 54 } 55 56 static void unlock_srbm(struct kgd_dev *kgd) 57 { 58 struct amdgpu_device *adev = get_amdgpu_device(kgd); 59 60 nv_grbm_select(adev, 0, 0, 0, 0); 61 mutex_unlock(&adev->srbm_mutex); 62 } 63 64 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 65 uint32_t queue_id) 66 { 67 struct amdgpu_device *adev = get_amdgpu_device(kgd); 68 69 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 70 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 71 72 lock_srbm(kgd, mec, pipe, queue_id, 0); 73 } 74 75 static uint64_t get_queue_mask(struct amdgpu_device *adev, 76 uint32_t pipe_id, uint32_t queue_id) 77 { 78 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + 79 queue_id; 80 81 return 1ull << bit; 82 } 83 84 static void release_queue(struct kgd_dev *kgd) 85 { 86 unlock_srbm(kgd); 87 } 88 89 static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, 90 uint32_t sh_mem_config, 91 uint32_t sh_mem_ape1_base, 92 uint32_t sh_mem_ape1_limit, 93 uint32_t sh_mem_bases) 94 { 95 struct amdgpu_device *adev = get_amdgpu_device(kgd); 96 97 lock_srbm(kgd, 0, 0, 0, vmid); 98 99 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); 100 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); 101 /* APE1 no longer exists on GFX9 */ 102 103 unlock_srbm(kgd); 104 } 105 106 /* ATC is defeatured on Sienna_Cichlid */ 107 static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, 108 unsigned int vmid) 109 { 110 struct amdgpu_device *adev = get_amdgpu_device(kgd); 111 112 uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; 113 114 /* Mapping vmid to pasid also for IH block */ 115 pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n", 116 vmid, pasid); 117 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value); 118 119 return 0; 120 } 121 122 static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) 123 { 124 struct amdgpu_device *adev = get_amdgpu_device(kgd); 125 uint32_t mec; 126 uint32_t pipe; 127 128 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 129 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 130 131 lock_srbm(kgd, mec, pipe, 0, 0); 132 133 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), 134 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | 135 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); 136 137 unlock_srbm(kgd); 138 139 return 0; 140 } 141 142 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, 143 unsigned int engine_id, 144 unsigned int queue_id) 145 { 146 uint32_t sdma_engine_reg_base = 0; 147 uint32_t sdma_rlc_reg_offset; 148 149 switch (engine_id) { 150 default: 151 dev_warn(adev->dev, 152 "Invalid sdma engine id (%d), using engine id 0\n", 153 engine_id); 154 fallthrough; 155 case 0: 156 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, 157 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 158 break; 159 case 1: 160 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, 161 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 162 break; 163 case 2: 164 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, 165 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 166 break; 167 case 3: 168 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, 169 mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 170 break; 171 } 172 173 sdma_rlc_reg_offset = sdma_engine_reg_base 174 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); 175 176 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, 177 queue_id, sdma_rlc_reg_offset); 178 179 return sdma_rlc_reg_offset; 180 } 181 182 static inline struct v10_compute_mqd *get_mqd(void *mqd) 183 { 184 return (struct v10_compute_mqd *)mqd; 185 } 186 187 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) 188 { 189 return (struct v10_sdma_mqd *)mqd; 190 } 191 192 static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 193 uint32_t queue_id, uint32_t __user *wptr, 194 uint32_t wptr_shift, uint32_t wptr_mask, 195 struct mm_struct *mm) 196 { 197 struct amdgpu_device *adev = get_amdgpu_device(kgd); 198 struct v10_compute_mqd *m; 199 uint32_t *mqd_hqd; 200 uint32_t reg, hqd_base, data; 201 202 m = get_mqd(mqd); 203 204 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); 205 acquire_queue(kgd, pipe_id, queue_id); 206 207 /* HIQ is set during driver init period with vmid set to 0*/ 208 if (m->cp_hqd_vmid == 0) { 209 uint32_t value, mec, pipe; 210 211 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 212 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 213 214 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 215 mec, pipe, queue_id); 216 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); 217 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, 218 ((mec << 5) | (pipe << 3) | queue_id | 0x80)); 219 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); 220 } 221 222 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ 223 mqd_hqd = &m->cp_mqd_base_addr_lo; 224 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 225 226 for (reg = hqd_base; 227 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 228 WREG32(reg, mqd_hqd[reg - hqd_base]); 229 230 231 /* Activate doorbell logic before triggering WPTR poll. */ 232 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, 233 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); 234 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); 235 236 if (wptr) { 237 /* Don't read wptr with get_user because the user 238 * context may not be accessible (if this function 239 * runs in a work queue). Instead trigger a one-shot 240 * polling read from memory in the CP. This assumes 241 * that wptr is GPU-accessible in the queue's VMID via 242 * ATC or SVM. WPTR==RPTR before starting the poll so 243 * the CP starts fetching new commands from the right 244 * place. 245 * 246 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit 247 * tricky. Assume that the queue didn't overflow. The 248 * number of valid bits in the 32-bit RPTR depends on 249 * the queue size. The remaining bits are taken from 250 * the saved 64-bit WPTR. If the WPTR wrapped, add the 251 * queue size. 252 */ 253 uint32_t queue_size = 254 2 << REG_GET_FIELD(m->cp_hqd_pq_control, 255 CP_HQD_PQ_CONTROL, QUEUE_SIZE); 256 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); 257 258 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) 259 guessed_wptr += queue_size; 260 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); 261 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; 262 263 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), 264 lower_32_bits(guessed_wptr)); 265 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), 266 upper_32_bits(guessed_wptr)); 267 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), 268 lower_32_bits((uint64_t)wptr)); 269 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), 270 upper_32_bits((uint64_t)wptr)); 271 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, 272 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 273 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), 274 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 275 } 276 277 /* Start the EOP fetcher */ 278 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), 279 REG_SET_FIELD(m->cp_hqd_eop_rptr, 280 CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); 281 282 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); 283 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); 284 285 release_queue(kgd); 286 287 return 0; 288 } 289 290 static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, 291 uint32_t pipe_id, uint32_t queue_id, 292 uint32_t doorbell_off) 293 { 294 struct amdgpu_device *adev = get_amdgpu_device(kgd); 295 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 296 struct v10_compute_mqd *m; 297 uint32_t mec, pipe; 298 int r; 299 300 m = get_mqd(mqd); 301 302 acquire_queue(kgd, pipe_id, queue_id); 303 304 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 305 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 306 307 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 308 mec, pipe, queue_id); 309 310 spin_lock(&adev->gfx.kiq.ring_lock); 311 r = amdgpu_ring_alloc(kiq_ring, 7); 312 if (r) { 313 pr_err("Failed to alloc KIQ (%d).\n", r); 314 goto out_unlock; 315 } 316 317 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 318 amdgpu_ring_write(kiq_ring, 319 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 320 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ 321 PACKET3_MAP_QUEUES_QUEUE(queue_id) | 322 PACKET3_MAP_QUEUES_PIPE(pipe) | 323 PACKET3_MAP_QUEUES_ME((mec - 1)) | 324 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 325 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 326 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ 327 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 328 amdgpu_ring_write(kiq_ring, 329 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); 330 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); 331 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); 332 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); 333 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); 334 amdgpu_ring_commit(kiq_ring); 335 336 out_unlock: 337 spin_unlock(&adev->gfx.kiq.ring_lock); 338 release_queue(kgd); 339 340 return r; 341 } 342 343 static int hqd_dump_v10_3(struct kgd_dev *kgd, 344 uint32_t pipe_id, uint32_t queue_id, 345 uint32_t (**dump)[2], uint32_t *n_regs) 346 { 347 struct amdgpu_device *adev = get_amdgpu_device(kgd); 348 uint32_t i = 0, reg; 349 #define HQD_N_REGS 56 350 #define DUMP_REG(addr) do { \ 351 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ 352 break; \ 353 (*dump)[i][0] = (addr) << 2; \ 354 (*dump)[i++][1] = RREG32(addr); \ 355 } while (0) 356 357 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 358 if (*dump == NULL) 359 return -ENOMEM; 360 361 acquire_queue(kgd, pipe_id, queue_id); 362 363 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 364 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 365 DUMP_REG(reg); 366 367 release_queue(kgd); 368 369 WARN_ON_ONCE(i != HQD_N_REGS); 370 *n_regs = i; 371 372 return 0; 373 } 374 375 static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, 376 uint32_t __user *wptr, struct mm_struct *mm) 377 { 378 struct amdgpu_device *adev = get_amdgpu_device(kgd); 379 struct v10_sdma_mqd *m; 380 uint32_t sdma_rlc_reg_offset; 381 unsigned long end_jiffies; 382 uint32_t data; 383 uint64_t data64; 384 uint64_t __user *wptr64 = (uint64_t __user *)wptr; 385 386 m = get_sdma_mqd(mqd); 387 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 388 m->sdma_queue_id); 389 390 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 391 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); 392 393 end_jiffies = msecs_to_jiffies(2000) + jiffies; 394 while (true) { 395 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 396 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 397 break; 398 if (time_after(jiffies, end_jiffies)) { 399 pr_err("SDMA RLC not idle in %s\n", __func__); 400 return -ETIME; 401 } 402 usleep_range(500, 1000); 403 } 404 405 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, 406 m->sdmax_rlcx_doorbell_offset); 407 408 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, 409 ENABLE, 1); 410 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); 411 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, 412 m->sdmax_rlcx_rb_rptr); 413 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, 414 m->sdmax_rlcx_rb_rptr_hi); 415 416 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); 417 if (read_user_wptr(mm, wptr64, data64)) { 418 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 419 lower_32_bits(data64)); 420 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 421 upper_32_bits(data64)); 422 } else { 423 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 424 m->sdmax_rlcx_rb_rptr); 425 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 426 m->sdmax_rlcx_rb_rptr_hi); 427 } 428 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); 429 430 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); 431 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, 432 m->sdmax_rlcx_rb_base_hi); 433 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 434 m->sdmax_rlcx_rb_rptr_addr_lo); 435 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 436 m->sdmax_rlcx_rb_rptr_addr_hi); 437 438 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, 439 RB_ENABLE, 1); 440 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); 441 442 return 0; 443 } 444 445 static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, 446 uint32_t engine_id, uint32_t queue_id, 447 uint32_t (**dump)[2], uint32_t *n_regs) 448 { 449 struct amdgpu_device *adev = get_amdgpu_device(kgd); 450 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 451 engine_id, queue_id); 452 uint32_t i = 0, reg; 453 #undef HQD_N_REGS 454 #define HQD_N_REGS (19+6+7+10) 455 456 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 457 if (*dump == NULL) 458 return -ENOMEM; 459 460 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) 461 DUMP_REG(sdma_rlc_reg_offset + reg); 462 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) 463 DUMP_REG(sdma_rlc_reg_offset + reg); 464 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; 465 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) 466 DUMP_REG(sdma_rlc_reg_offset + reg); 467 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; 468 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) 469 DUMP_REG(sdma_rlc_reg_offset + reg); 470 471 WARN_ON_ONCE(i != HQD_N_REGS); 472 *n_regs = i; 473 474 return 0; 475 } 476 477 static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, 478 uint32_t pipe_id, uint32_t queue_id) 479 { 480 struct amdgpu_device *adev = get_amdgpu_device(kgd); 481 uint32_t act; 482 bool retval = false; 483 uint32_t low, high; 484 485 acquire_queue(kgd, pipe_id, queue_id); 486 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 487 if (act) { 488 low = lower_32_bits(queue_address >> 8); 489 high = upper_32_bits(queue_address >> 8); 490 491 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && 492 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) 493 retval = true; 494 } 495 release_queue(kgd); 496 return retval; 497 } 498 499 static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) 500 { 501 struct amdgpu_device *adev = get_amdgpu_device(kgd); 502 struct v10_sdma_mqd *m; 503 uint32_t sdma_rlc_reg_offset; 504 uint32_t sdma_rlc_rb_cntl; 505 506 m = get_sdma_mqd(mqd); 507 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 508 m->sdma_queue_id); 509 510 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 511 512 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) 513 return true; 514 515 return false; 516 } 517 518 static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 519 enum kfd_preempt_type reset_type, 520 unsigned int utimeout, uint32_t pipe_id, 521 uint32_t queue_id) 522 { 523 struct amdgpu_device *adev = get_amdgpu_device(kgd); 524 enum hqd_dequeue_request_type type; 525 unsigned long end_jiffies; 526 uint32_t temp; 527 struct v10_compute_mqd *m = get_mqd(mqd); 528 529 acquire_queue(kgd, pipe_id, queue_id); 530 531 if (m->cp_hqd_vmid == 0) 532 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); 533 534 switch (reset_type) { 535 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: 536 type = DRAIN_PIPE; 537 break; 538 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: 539 type = RESET_WAVES; 540 break; 541 default: 542 type = DRAIN_PIPE; 543 break; 544 } 545 546 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); 547 548 end_jiffies = (utimeout * HZ / 1000) + jiffies; 549 while (true) { 550 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 551 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) 552 break; 553 if (time_after(jiffies, end_jiffies)) { 554 pr_err("cp queue pipe %d queue %d preemption failed\n", 555 pipe_id, queue_id); 556 release_queue(kgd); 557 return -ETIME; 558 } 559 usleep_range(500, 1000); 560 } 561 562 release_queue(kgd); 563 return 0; 564 } 565 566 static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 567 unsigned int utimeout) 568 { 569 struct amdgpu_device *adev = get_amdgpu_device(kgd); 570 struct v10_sdma_mqd *m; 571 uint32_t sdma_rlc_reg_offset; 572 uint32_t temp; 573 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; 574 575 m = get_sdma_mqd(mqd); 576 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 577 m->sdma_queue_id); 578 579 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 580 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; 581 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); 582 583 while (true) { 584 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 585 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 586 break; 587 if (time_after(jiffies, end_jiffies)) { 588 pr_err("SDMA RLC not idle in %s\n", __func__); 589 return -ETIME; 590 } 591 usleep_range(500, 1000); 592 } 593 594 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); 595 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 596 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | 597 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); 598 599 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); 600 m->sdmax_rlcx_rb_rptr_hi = 601 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); 602 603 return 0; 604 } 605 606 607 static int address_watch_disable_v10_3(struct kgd_dev *kgd) 608 { 609 return 0; 610 } 611 612 static int address_watch_execute_v10_3(struct kgd_dev *kgd, 613 unsigned int watch_point_id, 614 uint32_t cntl_val, 615 uint32_t addr_hi, 616 uint32_t addr_lo) 617 { 618 return 0; 619 } 620 621 static int wave_control_execute_v10_3(struct kgd_dev *kgd, 622 uint32_t gfx_index_val, 623 uint32_t sq_cmd) 624 { 625 struct amdgpu_device *adev = get_amdgpu_device(kgd); 626 uint32_t data = 0; 627 628 mutex_lock(&adev->grbm_idx_mutex); 629 630 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); 631 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); 632 633 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 634 INSTANCE_BROADCAST_WRITES, 1); 635 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 636 SA_BROADCAST_WRITES, 1); 637 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 638 SE_BROADCAST_WRITES, 1); 639 640 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); 641 mutex_unlock(&adev->grbm_idx_mutex); 642 643 return 0; 644 } 645 646 static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, 647 unsigned int watch_point_id, 648 unsigned int reg_offset) 649 { 650 return 0; 651 } 652 653 static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, 654 uint64_t page_table_base) 655 { 656 struct amdgpu_device *adev = get_amdgpu_device(kgd); 657 658 /* SDMA is on gfxhub as well for Navi1* series */ 659 adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); 660 } 661 662 #if 0 663 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, 664 uint32_t trap_debug_wave_launch_mode, 665 uint32_t vmid) 666 { 667 struct amdgpu_device *adev = get_amdgpu_device(kgd); 668 uint32_t data = 0; 669 uint32_t orig_wave_cntl_value; 670 uint32_t orig_stall_vmid; 671 672 mutex_lock(&adev->grbm_idx_mutex); 673 674 orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC, 675 0, 676 mmSPI_GDBG_WAVE_CNTL)); 677 orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value, 678 SPI_GDBG_WAVE_CNTL, 679 STALL_VMID); 680 681 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 682 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 683 684 data = 0; 685 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 686 687 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid); 688 689 mutex_unlock(&adev->grbm_idx_mutex); 690 691 return 0; 692 } 693 694 uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) 695 { 696 struct amdgpu_device *adev = get_amdgpu_device(kgd); 697 698 mutex_lock(&adev->grbm_idx_mutex); 699 700 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); 701 702 mutex_unlock(&adev->grbm_idx_mutex); 703 704 return 0; 705 } 706 707 uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, 708 uint32_t trap_override, 709 uint32_t trap_mask) 710 { 711 struct amdgpu_device *adev = get_amdgpu_device(kgd); 712 uint32_t data = 0; 713 714 mutex_lock(&adev->grbm_idx_mutex); 715 716 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 717 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 718 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 719 720 data = 0; 721 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 722 EXCP_EN, trap_mask); 723 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 724 REPLACE, trap_override); 725 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 726 727 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 728 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0); 729 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 730 731 mutex_unlock(&adev->grbm_idx_mutex); 732 733 return 0; 734 } 735 736 uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, 737 uint8_t wave_launch_mode, 738 uint32_t vmid) 739 { 740 struct amdgpu_device *adev = get_amdgpu_device(kgd); 741 uint32_t data = 0; 742 bool is_stall_mode; 743 bool is_mode_set; 744 745 is_stall_mode = (wave_launch_mode == 4); 746 is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4); 747 748 mutex_lock(&adev->grbm_idx_mutex); 749 750 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 751 VMID_MASK, is_mode_set ? 1 << vmid : 0); 752 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 753 MODE, is_mode_set ? wave_launch_mode : 0); 754 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); 755 756 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 757 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 758 STALL_VMID, is_stall_mode ? 1 << vmid : 0); 759 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 760 STALL_RA, is_stall_mode ? 1 : 0); 761 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 762 763 mutex_unlock(&adev->grbm_idx_mutex); 764 765 return 0; 766 } 767 768 /* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values 769 * The values read are: 770 * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. 771 * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. 772 * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. 773 * gws_wait_time -- Wait Count for Global Wave Syncs. 774 * que_sleep_wait_time -- Wait Count for Dequeue Retry. 775 * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. 776 * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. 777 * deq_retry_wait_time -- Wait Count for Global Wave Syncs. 778 */ 779 void get_iq_wait_times_v10_3(struct kgd_dev *kgd, 780 uint32_t *wait_times) 781 782 { 783 struct amdgpu_device *adev = get_amdgpu_device(kgd); 784 785 *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); 786 } 787 788 void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, 789 uint32_t wait_times, 790 uint32_t grace_period, 791 uint32_t *reg_offset, 792 uint32_t *reg_data) 793 { 794 *reg_data = wait_times; 795 796 *reg_data = REG_SET_FIELD(*reg_data, 797 CP_IQ_WAIT_TIME2, 798 SCH_WAVE, 799 grace_period); 800 801 *reg_offset = mmCP_IQ_WAIT_TIME2; 802 } 803 #endif 804 805 const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { 806 .program_sh_mem_settings = program_sh_mem_settings_v10_3, 807 .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, 808 .init_interrupts = init_interrupts_v10_3, 809 .hqd_load = hqd_load_v10_3, 810 .hiq_mqd_load = hiq_mqd_load_v10_3, 811 .hqd_sdma_load = hqd_sdma_load_v10_3, 812 .hqd_dump = hqd_dump_v10_3, 813 .hqd_sdma_dump = hqd_sdma_dump_v10_3, 814 .hqd_is_occupied = hqd_is_occupied_v10_3, 815 .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, 816 .hqd_destroy = hqd_destroy_v10_3, 817 .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, 818 .address_watch_disable = address_watch_disable_v10_3, 819 .address_watch_execute = address_watch_execute_v10_3, 820 .wave_control_execute = wave_control_execute_v10_3, 821 .address_watch_get_offset = address_watch_get_offset_v10_3, 822 .get_atc_vmid_pasid_mapping_info = NULL, 823 .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, 824 #if 0 825 .enable_debug_trap = enable_debug_trap_v10_3, 826 .disable_debug_trap = disable_debug_trap_v10_3, 827 .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3, 828 .set_wave_launch_mode = set_wave_launch_mode_v10_3, 829 .get_iq_wait_times = get_iq_wait_times_v10_3, 830 .build_grace_period_packet_info = build_grace_period_packet_info_v10_3, 831 #endif 832 }; 833