1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <linux/mmu_context.h> 23 #include "amdgpu.h" 24 #include "amdgpu_amdkfd.h" 25 #include "gc/gc_10_3_0_offset.h" 26 #include "gc/gc_10_3_0_sh_mask.h" 27 #include "oss/osssys_5_0_0_offset.h" 28 #include "oss/osssys_5_0_0_sh_mask.h" 29 #include "soc15_common.h" 30 #include "v10_structs.h" 31 #include "nv.h" 32 #include "nvd.h" 33 34 enum hqd_dequeue_request_type { 35 NO_ACTION = 0, 36 DRAIN_PIPE, 37 RESET_WAVES, 38 SAVE_WAVES 39 }; 40 41 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 42 { 43 return (struct amdgpu_device *)kgd; 44 } 45 46 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 47 uint32_t queue, uint32_t vmid) 48 { 49 struct amdgpu_device *adev = get_amdgpu_device(kgd); 50 51 mutex_lock(&adev->srbm_mutex); 52 nv_grbm_select(adev, mec, pipe, queue, vmid); 53 } 54 55 static void unlock_srbm(struct kgd_dev *kgd) 56 { 57 struct amdgpu_device *adev = get_amdgpu_device(kgd); 58 59 nv_grbm_select(adev, 0, 0, 0, 0); 60 mutex_unlock(&adev->srbm_mutex); 61 } 62 63 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 64 uint32_t queue_id) 65 { 66 struct amdgpu_device *adev = get_amdgpu_device(kgd); 67 68 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 69 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 70 71 lock_srbm(kgd, mec, pipe, queue_id, 0); 72 } 73 74 static uint64_t get_queue_mask(struct amdgpu_device *adev, 75 uint32_t pipe_id, uint32_t queue_id) 76 { 77 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + 78 queue_id; 79 80 return 1ull << bit; 81 } 82 83 static void release_queue(struct kgd_dev *kgd) 84 { 85 unlock_srbm(kgd); 86 } 87 88 static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, 89 uint32_t sh_mem_config, 90 uint32_t sh_mem_ape1_base, 91 uint32_t sh_mem_ape1_limit, 92 uint32_t sh_mem_bases) 93 { 94 struct amdgpu_device *adev = get_amdgpu_device(kgd); 95 96 lock_srbm(kgd, 0, 0, 0, vmid); 97 98 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); 99 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); 100 /* APE1 no longer exists on GFX9 */ 101 102 unlock_srbm(kgd); 103 } 104 105 /* ATC is defeatured on Sienna_Cichlid */ 106 static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, 107 unsigned int vmid) 108 { 109 struct amdgpu_device *adev = get_amdgpu_device(kgd); 110 111 uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; 112 113 /* Mapping vmid to pasid also for IH block */ 114 pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n", 115 vmid, pasid); 116 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value); 117 118 return 0; 119 } 120 121 static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) 122 { 123 struct amdgpu_device *adev = get_amdgpu_device(kgd); 124 uint32_t mec; 125 uint32_t pipe; 126 127 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 128 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 129 130 lock_srbm(kgd, mec, pipe, 0, 0); 131 132 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), 133 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | 134 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); 135 136 unlock_srbm(kgd); 137 138 return 0; 139 } 140 141 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, 142 unsigned int engine_id, 143 unsigned int queue_id) 144 { 145 uint32_t sdma_engine_reg_base = 0; 146 uint32_t sdma_rlc_reg_offset; 147 148 switch (engine_id) { 149 default: 150 dev_warn(adev->dev, 151 "Invalid sdma engine id (%d), using engine id 0\n", 152 engine_id); 153 fallthrough; 154 case 0: 155 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, 156 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 157 break; 158 case 1: 159 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, 160 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; 161 break; 162 case 2: 163 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, 164 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 165 break; 166 case 3: 167 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, 168 mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; 169 break; 170 } 171 172 sdma_rlc_reg_offset = sdma_engine_reg_base 173 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); 174 175 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, 176 queue_id, sdma_rlc_reg_offset); 177 178 return sdma_rlc_reg_offset; 179 } 180 181 static inline struct v10_compute_mqd *get_mqd(void *mqd) 182 { 183 return (struct v10_compute_mqd *)mqd; 184 } 185 186 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) 187 { 188 return (struct v10_sdma_mqd *)mqd; 189 } 190 191 static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 192 uint32_t queue_id, uint32_t __user *wptr, 193 uint32_t wptr_shift, uint32_t wptr_mask, 194 struct mm_struct *mm) 195 { 196 struct amdgpu_device *adev = get_amdgpu_device(kgd); 197 struct v10_compute_mqd *m; 198 uint32_t *mqd_hqd; 199 uint32_t reg, hqd_base, data; 200 201 m = get_mqd(mqd); 202 203 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); 204 acquire_queue(kgd, pipe_id, queue_id); 205 206 /* HIQ is set during driver init period with vmid set to 0*/ 207 if (m->cp_hqd_vmid == 0) { 208 uint32_t value, mec, pipe; 209 210 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 211 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 212 213 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 214 mec, pipe, queue_id); 215 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); 216 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, 217 ((mec << 5) | (pipe << 3) | queue_id | 0x80)); 218 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); 219 } 220 221 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ 222 mqd_hqd = &m->cp_mqd_base_addr_lo; 223 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 224 225 for (reg = hqd_base; 226 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 227 WREG32(reg, mqd_hqd[reg - hqd_base]); 228 229 230 /* Activate doorbell logic before triggering WPTR poll. */ 231 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, 232 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); 233 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); 234 235 if (wptr) { 236 /* Don't read wptr with get_user because the user 237 * context may not be accessible (if this function 238 * runs in a work queue). Instead trigger a one-shot 239 * polling read from memory in the CP. This assumes 240 * that wptr is GPU-accessible in the queue's VMID via 241 * ATC or SVM. WPTR==RPTR before starting the poll so 242 * the CP starts fetching new commands from the right 243 * place. 244 * 245 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit 246 * tricky. Assume that the queue didn't overflow. The 247 * number of valid bits in the 32-bit RPTR depends on 248 * the queue size. The remaining bits are taken from 249 * the saved 64-bit WPTR. If the WPTR wrapped, add the 250 * queue size. 251 */ 252 uint32_t queue_size = 253 2 << REG_GET_FIELD(m->cp_hqd_pq_control, 254 CP_HQD_PQ_CONTROL, QUEUE_SIZE); 255 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); 256 257 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) 258 guessed_wptr += queue_size; 259 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); 260 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; 261 262 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), 263 lower_32_bits(guessed_wptr)); 264 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), 265 upper_32_bits(guessed_wptr)); 266 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), 267 lower_32_bits((uint64_t)wptr)); 268 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), 269 upper_32_bits((uint64_t)wptr)); 270 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, 271 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 272 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), 273 (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); 274 } 275 276 /* Start the EOP fetcher */ 277 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), 278 REG_SET_FIELD(m->cp_hqd_eop_rptr, 279 CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); 280 281 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); 282 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); 283 284 release_queue(kgd); 285 286 return 0; 287 } 288 289 static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, 290 uint32_t pipe_id, uint32_t queue_id, 291 uint32_t doorbell_off) 292 { 293 struct amdgpu_device *adev = get_amdgpu_device(kgd); 294 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 295 struct v10_compute_mqd *m; 296 uint32_t mec, pipe; 297 int r; 298 299 m = get_mqd(mqd); 300 301 acquire_queue(kgd, pipe_id, queue_id); 302 303 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 304 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 305 306 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 307 mec, pipe, queue_id); 308 309 spin_lock(&adev->gfx.kiq.ring_lock); 310 r = amdgpu_ring_alloc(kiq_ring, 7); 311 if (r) { 312 pr_err("Failed to alloc KIQ (%d).\n", r); 313 goto out_unlock; 314 } 315 316 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 317 amdgpu_ring_write(kiq_ring, 318 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 319 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ 320 PACKET3_MAP_QUEUES_QUEUE(queue_id) | 321 PACKET3_MAP_QUEUES_PIPE(pipe) | 322 PACKET3_MAP_QUEUES_ME((mec - 1)) | 323 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 324 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 325 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ 326 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 327 amdgpu_ring_write(kiq_ring, 328 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); 329 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo); 330 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi); 331 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo); 332 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi); 333 amdgpu_ring_commit(kiq_ring); 334 335 out_unlock: 336 spin_unlock(&adev->gfx.kiq.ring_lock); 337 release_queue(kgd); 338 339 return r; 340 } 341 342 static int hqd_dump_v10_3(struct kgd_dev *kgd, 343 uint32_t pipe_id, uint32_t queue_id, 344 uint32_t (**dump)[2], uint32_t *n_regs) 345 { 346 struct amdgpu_device *adev = get_amdgpu_device(kgd); 347 uint32_t i = 0, reg; 348 #define HQD_N_REGS 56 349 #define DUMP_REG(addr) do { \ 350 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ 351 break; \ 352 (*dump)[i][0] = (addr) << 2; \ 353 (*dump)[i++][1] = RREG32(addr); \ 354 } while (0) 355 356 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 357 if (*dump == NULL) 358 return -ENOMEM; 359 360 acquire_queue(kgd, pipe_id, queue_id); 361 362 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); 363 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) 364 DUMP_REG(reg); 365 366 release_queue(kgd); 367 368 WARN_ON_ONCE(i != HQD_N_REGS); 369 *n_regs = i; 370 371 return 0; 372 } 373 374 static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, 375 uint32_t __user *wptr, struct mm_struct *mm) 376 { 377 struct amdgpu_device *adev = get_amdgpu_device(kgd); 378 struct v10_sdma_mqd *m; 379 uint32_t sdma_rlc_reg_offset; 380 unsigned long end_jiffies; 381 uint32_t data; 382 uint64_t data64; 383 uint64_t __user *wptr64 = (uint64_t __user *)wptr; 384 385 m = get_sdma_mqd(mqd); 386 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 387 m->sdma_queue_id); 388 389 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 390 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); 391 392 end_jiffies = msecs_to_jiffies(2000) + jiffies; 393 while (true) { 394 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 395 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 396 break; 397 if (time_after(jiffies, end_jiffies)) { 398 pr_err("SDMA RLC not idle in %s\n", __func__); 399 return -ETIME; 400 } 401 usleep_range(500, 1000); 402 } 403 404 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, 405 m->sdmax_rlcx_doorbell_offset); 406 407 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, 408 ENABLE, 1); 409 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); 410 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, 411 m->sdmax_rlcx_rb_rptr); 412 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, 413 m->sdmax_rlcx_rb_rptr_hi); 414 415 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); 416 if (read_user_wptr(mm, wptr64, data64)) { 417 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 418 lower_32_bits(data64)); 419 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 420 upper_32_bits(data64)); 421 } else { 422 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, 423 m->sdmax_rlcx_rb_rptr); 424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, 425 m->sdmax_rlcx_rb_rptr_hi); 426 } 427 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); 428 429 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); 430 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, 431 m->sdmax_rlcx_rb_base_hi); 432 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 433 m->sdmax_rlcx_rb_rptr_addr_lo); 434 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 435 m->sdmax_rlcx_rb_rptr_addr_hi); 436 437 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, 438 RB_ENABLE, 1); 439 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); 440 441 return 0; 442 } 443 444 static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, 445 uint32_t engine_id, uint32_t queue_id, 446 uint32_t (**dump)[2], uint32_t *n_regs) 447 { 448 struct amdgpu_device *adev = get_amdgpu_device(kgd); 449 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 450 engine_id, queue_id); 451 uint32_t i = 0, reg; 452 #undef HQD_N_REGS 453 #define HQD_N_REGS (19+6+7+10) 454 455 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); 456 if (*dump == NULL) 457 return -ENOMEM; 458 459 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) 460 DUMP_REG(sdma_rlc_reg_offset + reg); 461 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) 462 DUMP_REG(sdma_rlc_reg_offset + reg); 463 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; 464 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) 465 DUMP_REG(sdma_rlc_reg_offset + reg); 466 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; 467 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) 468 DUMP_REG(sdma_rlc_reg_offset + reg); 469 470 WARN_ON_ONCE(i != HQD_N_REGS); 471 *n_regs = i; 472 473 return 0; 474 } 475 476 static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, 477 uint32_t pipe_id, uint32_t queue_id) 478 { 479 struct amdgpu_device *adev = get_amdgpu_device(kgd); 480 uint32_t act; 481 bool retval = false; 482 uint32_t low, high; 483 484 acquire_queue(kgd, pipe_id, queue_id); 485 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 486 if (act) { 487 low = lower_32_bits(queue_address >> 8); 488 high = upper_32_bits(queue_address >> 8); 489 490 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && 491 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) 492 retval = true; 493 } 494 release_queue(kgd); 495 return retval; 496 } 497 498 static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) 499 { 500 struct amdgpu_device *adev = get_amdgpu_device(kgd); 501 struct v10_sdma_mqd *m; 502 uint32_t sdma_rlc_reg_offset; 503 uint32_t sdma_rlc_rb_cntl; 504 505 m = get_sdma_mqd(mqd); 506 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 507 m->sdma_queue_id); 508 509 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 510 511 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) 512 return true; 513 514 return false; 515 } 516 517 static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 518 enum kfd_preempt_type reset_type, 519 unsigned int utimeout, uint32_t pipe_id, 520 uint32_t queue_id) 521 { 522 struct amdgpu_device *adev = get_amdgpu_device(kgd); 523 enum hqd_dequeue_request_type type; 524 unsigned long end_jiffies; 525 uint32_t temp; 526 struct v10_compute_mqd *m = get_mqd(mqd); 527 528 acquire_queue(kgd, pipe_id, queue_id); 529 530 if (m->cp_hqd_vmid == 0) 531 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); 532 533 switch (reset_type) { 534 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: 535 type = DRAIN_PIPE; 536 break; 537 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: 538 type = RESET_WAVES; 539 break; 540 default: 541 type = DRAIN_PIPE; 542 break; 543 } 544 545 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); 546 547 end_jiffies = (utimeout * HZ / 1000) + jiffies; 548 while (true) { 549 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); 550 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) 551 break; 552 if (time_after(jiffies, end_jiffies)) { 553 pr_err("cp queue pipe %d queue %d preemption failed\n", 554 pipe_id, queue_id); 555 release_queue(kgd); 556 return -ETIME; 557 } 558 usleep_range(500, 1000); 559 } 560 561 release_queue(kgd); 562 return 0; 563 } 564 565 static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 566 unsigned int utimeout) 567 { 568 struct amdgpu_device *adev = get_amdgpu_device(kgd); 569 struct v10_sdma_mqd *m; 570 uint32_t sdma_rlc_reg_offset; 571 uint32_t temp; 572 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; 573 574 m = get_sdma_mqd(mqd); 575 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, 576 m->sdma_queue_id); 577 578 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); 579 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; 580 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); 581 582 while (true) { 583 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); 584 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 585 break; 586 if (time_after(jiffies, end_jiffies)) { 587 pr_err("SDMA RLC not idle in %s\n", __func__); 588 return -ETIME; 589 } 590 usleep_range(500, 1000); 591 } 592 593 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); 594 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, 595 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | 596 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); 597 598 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); 599 m->sdmax_rlcx_rb_rptr_hi = 600 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); 601 602 return 0; 603 } 604 605 606 static int address_watch_disable_v10_3(struct kgd_dev *kgd) 607 { 608 return 0; 609 } 610 611 static int address_watch_execute_v10_3(struct kgd_dev *kgd, 612 unsigned int watch_point_id, 613 uint32_t cntl_val, 614 uint32_t addr_hi, 615 uint32_t addr_lo) 616 { 617 return 0; 618 } 619 620 static int wave_control_execute_v10_3(struct kgd_dev *kgd, 621 uint32_t gfx_index_val, 622 uint32_t sq_cmd) 623 { 624 struct amdgpu_device *adev = get_amdgpu_device(kgd); 625 uint32_t data = 0; 626 627 mutex_lock(&adev->grbm_idx_mutex); 628 629 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); 630 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); 631 632 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 633 INSTANCE_BROADCAST_WRITES, 1); 634 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 635 SA_BROADCAST_WRITES, 1); 636 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 637 SE_BROADCAST_WRITES, 1); 638 639 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); 640 mutex_unlock(&adev->grbm_idx_mutex); 641 642 return 0; 643 } 644 645 static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, 646 unsigned int watch_point_id, 647 unsigned int reg_offset) 648 { 649 return 0; 650 } 651 652 static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, 653 uint64_t page_table_base) 654 { 655 struct amdgpu_device *adev = get_amdgpu_device(kgd); 656 657 /* SDMA is on gfxhub as well for Navi1* series */ 658 adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); 659 } 660 661 #if 0 662 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, 663 uint32_t trap_debug_wave_launch_mode, 664 uint32_t vmid) 665 { 666 struct amdgpu_device *adev = get_amdgpu_device(kgd); 667 uint32_t data = 0; 668 uint32_t orig_wave_cntl_value; 669 uint32_t orig_stall_vmid; 670 671 mutex_lock(&adev->grbm_idx_mutex); 672 673 orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC, 674 0, 675 mmSPI_GDBG_WAVE_CNTL)); 676 orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value, 677 SPI_GDBG_WAVE_CNTL, 678 STALL_VMID); 679 680 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 681 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 682 683 data = 0; 684 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 685 686 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid); 687 688 mutex_unlock(&adev->grbm_idx_mutex); 689 690 return 0; 691 } 692 693 uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) 694 { 695 struct amdgpu_device *adev = get_amdgpu_device(kgd); 696 697 mutex_lock(&adev->grbm_idx_mutex); 698 699 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); 700 701 mutex_unlock(&adev->grbm_idx_mutex); 702 703 return 0; 704 } 705 706 uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, 707 uint32_t trap_override, 708 uint32_t trap_mask) 709 { 710 struct amdgpu_device *adev = get_amdgpu_device(kgd); 711 uint32_t data = 0; 712 713 mutex_lock(&adev->grbm_idx_mutex); 714 715 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 716 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1); 717 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 718 719 data = 0; 720 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 721 EXCP_EN, trap_mask); 722 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, 723 REPLACE, trap_override); 724 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data); 725 726 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 727 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0); 728 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 729 730 mutex_unlock(&adev->grbm_idx_mutex); 731 732 return 0; 733 } 734 735 uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, 736 uint8_t wave_launch_mode, 737 uint32_t vmid) 738 { 739 struct amdgpu_device *adev = get_amdgpu_device(kgd); 740 uint32_t data = 0; 741 bool is_stall_mode; 742 bool is_mode_set; 743 744 is_stall_mode = (wave_launch_mode == 4); 745 is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4); 746 747 mutex_lock(&adev->grbm_idx_mutex); 748 749 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 750 VMID_MASK, is_mode_set ? 1 << vmid : 0); 751 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2, 752 MODE, is_mode_set ? wave_launch_mode : 0); 753 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data); 754 755 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 756 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 757 STALL_VMID, is_stall_mode ? 1 << vmid : 0); 758 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, 759 STALL_RA, is_stall_mode ? 1 : 0); 760 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data); 761 762 mutex_unlock(&adev->grbm_idx_mutex); 763 764 return 0; 765 } 766 767 /* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values 768 * The values read are: 769 * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. 770 * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. 771 * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. 772 * gws_wait_time -- Wait Count for Global Wave Syncs. 773 * que_sleep_wait_time -- Wait Count for Dequeue Retry. 774 * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. 775 * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. 776 * deq_retry_wait_time -- Wait Count for Global Wave Syncs. 777 */ 778 void get_iq_wait_times_v10_3(struct kgd_dev *kgd, 779 uint32_t *wait_times) 780 781 { 782 struct amdgpu_device *adev = get_amdgpu_device(kgd); 783 784 *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); 785 } 786 787 void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, 788 uint32_t wait_times, 789 uint32_t grace_period, 790 uint32_t *reg_offset, 791 uint32_t *reg_data) 792 { 793 *reg_data = wait_times; 794 795 *reg_data = REG_SET_FIELD(*reg_data, 796 CP_IQ_WAIT_TIME2, 797 SCH_WAVE, 798 grace_period); 799 800 *reg_offset = mmCP_IQ_WAIT_TIME2; 801 } 802 #endif 803 804 const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { 805 .program_sh_mem_settings = program_sh_mem_settings_v10_3, 806 .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, 807 .init_interrupts = init_interrupts_v10_3, 808 .hqd_load = hqd_load_v10_3, 809 .hiq_mqd_load = hiq_mqd_load_v10_3, 810 .hqd_sdma_load = hqd_sdma_load_v10_3, 811 .hqd_dump = hqd_dump_v10_3, 812 .hqd_sdma_dump = hqd_sdma_dump_v10_3, 813 .hqd_is_occupied = hqd_is_occupied_v10_3, 814 .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, 815 .hqd_destroy = hqd_destroy_v10_3, 816 .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, 817 .address_watch_disable = address_watch_disable_v10_3, 818 .address_watch_execute = address_watch_execute_v10_3, 819 .wave_control_execute = wave_control_execute_v10_3, 820 .address_watch_get_offset = address_watch_get_offset_v10_3, 821 .get_atc_vmid_pasid_mapping_info = NULL, 822 .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, 823 #if 0 824 .enable_debug_trap = enable_debug_trap_v10_3, 825 .disable_debug_trap = disable_debug_trap_v10_3, 826 .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3, 827 .set_wave_launch_mode = set_wave_launch_mode_v10_3, 828 .get_iq_wait_times = get_iq_wait_times_v10_3, 829 .build_grace_period_packet_info = build_grace_period_packet_info_v10_3, 830 #endif 831 }; 832