1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/fdtable.h> 24 #include <linux/uaccess.h> 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_amdkfd.h" 29 #include "cikd.h" 30 #include "cik_sdma.h" 31 #include "amdgpu_ucode.h" 32 #include "gfx_v7_0.h" 33 #include "gca/gfx_7_2_d.h" 34 #include "gca/gfx_7_2_enum.h" 35 #include "gca/gfx_7_2_sh_mask.h" 36 #include "oss/oss_2_0_d.h" 37 #include "oss/oss_2_0_sh_mask.h" 38 #include "gmc/gmc_7_1_d.h" 39 #include "gmc/gmc_7_1_sh_mask.h" 40 #include "cik_structs.h" 41 42 enum hqd_dequeue_request_type { 43 NO_ACTION = 0, 44 DRAIN_PIPE, 45 RESET_WAVES 46 }; 47 48 enum { 49 MAX_TRAPID = 8, /* 3 bits in the bitfield. */ 50 MAX_WATCH_ADDRESSES = 4 51 }; 52 53 enum { 54 ADDRESS_WATCH_REG_ADDR_HI = 0, 55 ADDRESS_WATCH_REG_ADDR_LO, 56 ADDRESS_WATCH_REG_CNTL, 57 ADDRESS_WATCH_REG_MAX 58 }; 59 60 /* not defined in the CI/KV reg file */ 61 enum { 62 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, 63 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, 64 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, 65 /* extend the mask to 26 bits to match the low address field */ 66 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, 67 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF 68 }; 69 70 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = { 71 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL, 72 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL, 73 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL, 74 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL 75 }; 76 77 union TCP_WATCH_CNTL_BITS { 78 struct { 79 uint32_t mask:24; 80 uint32_t vmid:4; 81 uint32_t atc:1; 82 uint32_t mode:2; 83 uint32_t valid:1; 84 } bitfields, bits; 85 uint32_t u32All; 86 signed int i32All; 87 float f32All; 88 }; 89 90 /* 91 * Register access functions 92 */ 93 94 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 95 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, 96 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); 97 98 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 99 unsigned int vmid); 100 101 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); 102 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 103 uint32_t queue_id, uint32_t __user *wptr, 104 uint32_t wptr_shift, uint32_t wptr_mask, 105 struct mm_struct *mm); 106 static int kgd_hqd_dump(struct kgd_dev *kgd, 107 uint32_t pipe_id, uint32_t queue_id, 108 uint32_t (**dump)[2], uint32_t *n_regs); 109 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 110 uint32_t __user *wptr, struct mm_struct *mm); 111 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 112 uint32_t engine_id, uint32_t queue_id, 113 uint32_t (**dump)[2], uint32_t *n_regs); 114 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 115 uint32_t pipe_id, uint32_t queue_id); 116 117 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 118 enum kfd_preempt_type reset_type, 119 unsigned int utimeout, uint32_t pipe_id, 120 uint32_t queue_id); 121 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); 122 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 123 unsigned int utimeout); 124 static int kgd_address_watch_disable(struct kgd_dev *kgd); 125 static int kgd_address_watch_execute(struct kgd_dev *kgd, 126 unsigned int watch_point_id, 127 uint32_t cntl_val, 128 uint32_t addr_hi, 129 uint32_t addr_lo); 130 static int kgd_wave_control_execute(struct kgd_dev *kgd, 131 uint32_t gfx_index_val, 132 uint32_t sq_cmd); 133 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, 134 unsigned int watch_point_id, 135 unsigned int reg_offset); 136 137 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); 138 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 139 uint8_t vmid); 140 141 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 142 static void set_scratch_backing_va(struct kgd_dev *kgd, 143 uint64_t va, uint32_t vmid); 144 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 145 uint32_t page_table_base); 146 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); 147 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); 148 149 /* Because of REG_GET_FIELD() being used, we put this function in the 150 * asic specific file. 151 */ 152 static int get_tile_config(struct kgd_dev *kgd, 153 struct tile_config *config) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 156 157 config->gb_addr_config = adev->gfx.config.gb_addr_config; 158 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, 159 MC_ARB_RAMCFG, NOOFBANK); 160 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, 161 MC_ARB_RAMCFG, NOOFRANKS); 162 163 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 164 config->num_tile_configs = 165 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 166 config->macro_tile_config_ptr = 167 adev->gfx.config.macrotile_mode_array; 168 config->num_macro_tile_configs = 169 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 170 171 return 0; 172 } 173 174 static const struct kfd2kgd_calls kfd2kgd = { 175 .init_gtt_mem_allocation = alloc_gtt_mem, 176 .free_gtt_mem = free_gtt_mem, 177 .get_local_mem_info = get_local_mem_info, 178 .get_gpu_clock_counter = get_gpu_clock_counter, 179 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, 180 .alloc_pasid = amdgpu_pasid_alloc, 181 .free_pasid = amdgpu_pasid_free, 182 .program_sh_mem_settings = kgd_program_sh_mem_settings, 183 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, 184 .init_interrupts = kgd_init_interrupts, 185 .hqd_load = kgd_hqd_load, 186 .hqd_sdma_load = kgd_hqd_sdma_load, 187 .hqd_dump = kgd_hqd_dump, 188 .hqd_sdma_dump = kgd_hqd_sdma_dump, 189 .hqd_is_occupied = kgd_hqd_is_occupied, 190 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, 191 .hqd_destroy = kgd_hqd_destroy, 192 .hqd_sdma_destroy = kgd_hqd_sdma_destroy, 193 .address_watch_disable = kgd_address_watch_disable, 194 .address_watch_execute = kgd_address_watch_execute, 195 .wave_control_execute = kgd_wave_control_execute, 196 .address_watch_get_offset = kgd_address_watch_get_offset, 197 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, 198 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, 199 .get_fw_version = get_fw_version, 200 .set_scratch_backing_va = set_scratch_backing_va, 201 .get_tile_config = get_tile_config, 202 .get_cu_info = get_cu_info, 203 .get_vram_usage = amdgpu_amdkfd_get_vram_usage, 204 .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, 205 .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, 206 .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, 207 .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, 208 .set_vm_context_page_table_base = set_vm_context_page_table_base, 209 .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, 210 .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, 211 .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, 212 .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, 213 .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, 214 .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, 215 .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, 216 .invalidate_tlbs = invalidate_tlbs, 217 .invalidate_tlbs_vmid = invalidate_tlbs_vmid, 218 .submit_ib = amdgpu_amdkfd_submit_ib, 219 }; 220 221 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) 222 { 223 return (struct kfd2kgd_calls *)&kfd2kgd; 224 } 225 226 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 227 { 228 return (struct amdgpu_device *)kgd; 229 } 230 231 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 232 uint32_t queue, uint32_t vmid) 233 { 234 struct amdgpu_device *adev = get_amdgpu_device(kgd); 235 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); 236 237 mutex_lock(&adev->srbm_mutex); 238 WREG32(mmSRBM_GFX_CNTL, value); 239 } 240 241 static void unlock_srbm(struct kgd_dev *kgd) 242 { 243 struct amdgpu_device *adev = get_amdgpu_device(kgd); 244 245 WREG32(mmSRBM_GFX_CNTL, 0); 246 mutex_unlock(&adev->srbm_mutex); 247 } 248 249 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 250 uint32_t queue_id) 251 { 252 struct amdgpu_device *adev = get_amdgpu_device(kgd); 253 254 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 255 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 256 257 lock_srbm(kgd, mec, pipe, queue_id, 0); 258 } 259 260 static void release_queue(struct kgd_dev *kgd) 261 { 262 unlock_srbm(kgd); 263 } 264 265 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 266 uint32_t sh_mem_config, 267 uint32_t sh_mem_ape1_base, 268 uint32_t sh_mem_ape1_limit, 269 uint32_t sh_mem_bases) 270 { 271 struct amdgpu_device *adev = get_amdgpu_device(kgd); 272 273 lock_srbm(kgd, 0, 0, 0, vmid); 274 275 WREG32(mmSH_MEM_CONFIG, sh_mem_config); 276 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); 277 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); 278 WREG32(mmSH_MEM_BASES, sh_mem_bases); 279 280 unlock_srbm(kgd); 281 } 282 283 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 284 unsigned int vmid) 285 { 286 struct amdgpu_device *adev = get_amdgpu_device(kgd); 287 288 /* 289 * We have to assume that there is no outstanding mapping. 290 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because 291 * a mapping is in progress or because a mapping finished and the 292 * SW cleared it. So the protocol is to always wait & clear. 293 */ 294 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | 295 ATC_VMID0_PASID_MAPPING__VALID_MASK; 296 297 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); 298 299 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) 300 cpu_relax(); 301 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 302 303 /* Mapping vmid to pasid also for IH block */ 304 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); 305 306 return 0; 307 } 308 309 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) 310 { 311 struct amdgpu_device *adev = get_amdgpu_device(kgd); 312 uint32_t mec; 313 uint32_t pipe; 314 315 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 316 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 317 318 lock_srbm(kgd, mec, pipe, 0, 0); 319 320 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | 321 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); 322 323 unlock_srbm(kgd); 324 325 return 0; 326 } 327 328 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) 329 { 330 uint32_t retval; 331 332 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + 333 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; 334 335 pr_debug("kfd: sdma base address: 0x%x\n", retval); 336 337 return retval; 338 } 339 340 static inline struct cik_mqd *get_mqd(void *mqd) 341 { 342 return (struct cik_mqd *)mqd; 343 } 344 345 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) 346 { 347 return (struct cik_sdma_rlc_registers *)mqd; 348 } 349 350 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 351 uint32_t queue_id, uint32_t __user *wptr, 352 uint32_t wptr_shift, uint32_t wptr_mask, 353 struct mm_struct *mm) 354 { 355 struct amdgpu_device *adev = get_amdgpu_device(kgd); 356 struct cik_mqd *m; 357 uint32_t *mqd_hqd; 358 uint32_t reg, wptr_val, data; 359 bool valid_wptr = false; 360 361 m = get_mqd(mqd); 362 363 acquire_queue(kgd, pipe_id, queue_id); 364 365 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ 366 mqd_hqd = &m->cp_mqd_base_addr_lo; 367 368 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) 369 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); 370 371 /* Copy userspace write pointer value to register. 372 * Activate doorbell logic to monitor subsequent changes. 373 */ 374 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, 375 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); 376 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); 377 378 /* read_user_ptr may take the mm->mmap_sem. 379 * release srbm_mutex to avoid circular dependency between 380 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. 381 */ 382 release_queue(kgd); 383 valid_wptr = read_user_wptr(mm, wptr, wptr_val); 384 acquire_queue(kgd, pipe_id, queue_id); 385 if (valid_wptr) 386 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); 387 388 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); 389 WREG32(mmCP_HQD_ACTIVE, data); 390 391 release_queue(kgd); 392 393 return 0; 394 } 395 396 static int kgd_hqd_dump(struct kgd_dev *kgd, 397 uint32_t pipe_id, uint32_t queue_id, 398 uint32_t (**dump)[2], uint32_t *n_regs) 399 { 400 struct amdgpu_device *adev = get_amdgpu_device(kgd); 401 uint32_t i = 0, reg; 402 #define HQD_N_REGS (35+4) 403 #define DUMP_REG(addr) do { \ 404 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ 405 break; \ 406 (*dump)[i][0] = (addr) << 2; \ 407 (*dump)[i++][1] = RREG32(addr); \ 408 } while (0) 409 410 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); 411 if (*dump == NULL) 412 return -ENOMEM; 413 414 acquire_queue(kgd, pipe_id, queue_id); 415 416 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); 417 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); 418 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2); 419 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3); 420 421 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) 422 DUMP_REG(reg); 423 424 release_queue(kgd); 425 426 WARN_ON_ONCE(i != HQD_N_REGS); 427 *n_regs = i; 428 429 return 0; 430 } 431 432 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 433 uint32_t __user *wptr, struct mm_struct *mm) 434 { 435 struct amdgpu_device *adev = get_amdgpu_device(kgd); 436 struct cik_sdma_rlc_registers *m; 437 unsigned long end_jiffies; 438 uint32_t sdma_base_addr; 439 uint32_t data; 440 441 m = get_sdma_mqd(mqd); 442 sdma_base_addr = get_sdma_base_addr(m); 443 444 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 445 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); 446 447 end_jiffies = msecs_to_jiffies(2000) + jiffies; 448 while (true) { 449 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 450 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 451 break; 452 if (time_after(jiffies, end_jiffies)) 453 return -ETIME; 454 usleep_range(500, 1000); 455 } 456 if (m->sdma_engine_id) { 457 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); 458 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, 459 RESUME_CTX, 0); 460 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); 461 } else { 462 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); 463 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, 464 RESUME_CTX, 0); 465 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); 466 } 467 468 data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL, 469 ENABLE, 1); 470 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); 471 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr); 472 473 if (read_user_wptr(mm, wptr, data)) 474 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); 475 else 476 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 477 m->sdma_rlc_rb_rptr); 478 479 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, 480 m->sdma_rlc_virtual_addr); 481 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); 482 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, 483 m->sdma_rlc_rb_base_hi); 484 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 485 m->sdma_rlc_rb_rptr_addr_lo); 486 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 487 m->sdma_rlc_rb_rptr_addr_hi); 488 489 data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL, 490 RB_ENABLE, 1); 491 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); 492 493 return 0; 494 } 495 496 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 497 uint32_t engine_id, uint32_t queue_id, 498 uint32_t (**dump)[2], uint32_t *n_regs) 499 { 500 struct amdgpu_device *adev = get_amdgpu_device(kgd); 501 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + 502 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; 503 uint32_t i = 0, reg; 504 #undef HQD_N_REGS 505 #define HQD_N_REGS (19+4) 506 507 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); 508 if (*dump == NULL) 509 return -ENOMEM; 510 511 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) 512 DUMP_REG(sdma_offset + reg); 513 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK; 514 reg++) 515 DUMP_REG(sdma_offset + reg); 516 517 WARN_ON_ONCE(i != HQD_N_REGS); 518 *n_regs = i; 519 520 return 0; 521 } 522 523 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 524 uint32_t pipe_id, uint32_t queue_id) 525 { 526 struct amdgpu_device *adev = get_amdgpu_device(kgd); 527 uint32_t act; 528 bool retval = false; 529 uint32_t low, high; 530 531 acquire_queue(kgd, pipe_id, queue_id); 532 act = RREG32(mmCP_HQD_ACTIVE); 533 if (act) { 534 low = lower_32_bits(queue_address >> 8); 535 high = upper_32_bits(queue_address >> 8); 536 537 if (low == RREG32(mmCP_HQD_PQ_BASE) && 538 high == RREG32(mmCP_HQD_PQ_BASE_HI)) 539 retval = true; 540 } 541 release_queue(kgd); 542 return retval; 543 } 544 545 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 546 { 547 struct amdgpu_device *adev = get_amdgpu_device(kgd); 548 struct cik_sdma_rlc_registers *m; 549 uint32_t sdma_base_addr; 550 uint32_t sdma_rlc_rb_cntl; 551 552 m = get_sdma_mqd(mqd); 553 sdma_base_addr = get_sdma_base_addr(m); 554 555 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); 556 557 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) 558 return true; 559 560 return false; 561 } 562 563 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 564 enum kfd_preempt_type reset_type, 565 unsigned int utimeout, uint32_t pipe_id, 566 uint32_t queue_id) 567 { 568 struct amdgpu_device *adev = get_amdgpu_device(kgd); 569 uint32_t temp; 570 enum hqd_dequeue_request_type type; 571 unsigned long flags, end_jiffies; 572 int retry; 573 574 acquire_queue(kgd, pipe_id, queue_id); 575 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); 576 577 switch (reset_type) { 578 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: 579 type = DRAIN_PIPE; 580 break; 581 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: 582 type = RESET_WAVES; 583 break; 584 default: 585 type = DRAIN_PIPE; 586 break; 587 } 588 589 /* Workaround: If IQ timer is active and the wait time is close to or 590 * equal to 0, dequeueing is not safe. Wait until either the wait time 591 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is 592 * cleared before continuing. Also, ensure wait times are set to at 593 * least 0x3. 594 */ 595 local_irq_save(flags); 596 preempt_disable(); 597 retry = 5000; /* wait for 500 usecs at maximum */ 598 while (true) { 599 temp = RREG32(mmCP_HQD_IQ_TIMER); 600 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { 601 pr_debug("HW is processing IQ\n"); 602 goto loop; 603 } 604 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { 605 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) 606 == 3) /* SEM-rearm is safe */ 607 break; 608 /* Wait time 3 is safe for CP, but our MMIO read/write 609 * time is close to 1 microsecond, so check for 10 to 610 * leave more buffer room 611 */ 612 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) 613 >= 10) 614 break; 615 pr_debug("IQ timer is active\n"); 616 } else 617 break; 618 loop: 619 if (!retry) { 620 pr_err("CP HQD IQ timer status time out\n"); 621 break; 622 } 623 ndelay(100); 624 --retry; 625 } 626 retry = 1000; 627 while (true) { 628 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); 629 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) 630 break; 631 pr_debug("Dequeue request is pending\n"); 632 633 if (!retry) { 634 pr_err("CP HQD dequeue request time out\n"); 635 break; 636 } 637 ndelay(100); 638 --retry; 639 } 640 local_irq_restore(flags); 641 preempt_enable(); 642 643 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); 644 645 end_jiffies = (utimeout * HZ / 1000) + jiffies; 646 while (true) { 647 temp = RREG32(mmCP_HQD_ACTIVE); 648 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) 649 break; 650 if (time_after(jiffies, end_jiffies)) { 651 pr_err("cp queue preemption time out\n"); 652 release_queue(kgd); 653 return -ETIME; 654 } 655 usleep_range(500, 1000); 656 } 657 658 release_queue(kgd); 659 return 0; 660 } 661 662 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 663 unsigned int utimeout) 664 { 665 struct amdgpu_device *adev = get_amdgpu_device(kgd); 666 struct cik_sdma_rlc_registers *m; 667 uint32_t sdma_base_addr; 668 uint32_t temp; 669 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; 670 671 m = get_sdma_mqd(mqd); 672 sdma_base_addr = get_sdma_base_addr(m); 673 674 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); 675 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; 676 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); 677 678 while (true) { 679 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 680 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) 681 break; 682 if (time_after(jiffies, end_jiffies)) 683 return -ETIME; 684 usleep_range(500, 1000); 685 } 686 687 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); 688 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 689 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | 690 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); 691 692 m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); 693 694 return 0; 695 } 696 697 static int kgd_address_watch_disable(struct kgd_dev *kgd) 698 { 699 struct amdgpu_device *adev = get_amdgpu_device(kgd); 700 union TCP_WATCH_CNTL_BITS cntl; 701 unsigned int i; 702 703 cntl.u32All = 0; 704 705 cntl.bitfields.valid = 0; 706 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK; 707 cntl.bitfields.atc = 1; 708 709 /* Turning off this address until we set all the registers */ 710 for (i = 0; i < MAX_WATCH_ADDRESSES; i++) 711 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX + 712 ADDRESS_WATCH_REG_CNTL], cntl.u32All); 713 714 return 0; 715 } 716 717 static int kgd_address_watch_execute(struct kgd_dev *kgd, 718 unsigned int watch_point_id, 719 uint32_t cntl_val, 720 uint32_t addr_hi, 721 uint32_t addr_lo) 722 { 723 struct amdgpu_device *adev = get_amdgpu_device(kgd); 724 union TCP_WATCH_CNTL_BITS cntl; 725 726 cntl.u32All = cntl_val; 727 728 /* Turning off this watch point until we set all the registers */ 729 cntl.bitfields.valid = 0; 730 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + 731 ADDRESS_WATCH_REG_CNTL], cntl.u32All); 732 733 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + 734 ADDRESS_WATCH_REG_ADDR_HI], addr_hi); 735 736 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + 737 ADDRESS_WATCH_REG_ADDR_LO], addr_lo); 738 739 /* Enable the watch point */ 740 cntl.bitfields.valid = 1; 741 742 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + 743 ADDRESS_WATCH_REG_CNTL], cntl.u32All); 744 745 return 0; 746 } 747 748 static int kgd_wave_control_execute(struct kgd_dev *kgd, 749 uint32_t gfx_index_val, 750 uint32_t sq_cmd) 751 { 752 struct amdgpu_device *adev = get_amdgpu_device(kgd); 753 uint32_t data; 754 755 mutex_lock(&adev->grbm_idx_mutex); 756 757 WREG32(mmGRBM_GFX_INDEX, gfx_index_val); 758 WREG32(mmSQ_CMD, sq_cmd); 759 760 /* Restore the GRBM_GFX_INDEX register */ 761 762 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | 763 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 764 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; 765 766 WREG32(mmGRBM_GFX_INDEX, data); 767 768 mutex_unlock(&adev->grbm_idx_mutex); 769 770 return 0; 771 } 772 773 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, 774 unsigned int watch_point_id, 775 unsigned int reg_offset) 776 { 777 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; 778 } 779 780 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, 781 uint8_t vmid) 782 { 783 uint32_t reg; 784 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 785 786 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 787 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; 788 } 789 790 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 791 uint8_t vmid) 792 { 793 uint32_t reg; 794 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 795 796 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 797 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; 798 } 799 800 static void set_scratch_backing_va(struct kgd_dev *kgd, 801 uint64_t va, uint32_t vmid) 802 { 803 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 804 805 lock_srbm(kgd, 0, 0, 0, vmid); 806 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); 807 unlock_srbm(kgd); 808 } 809 810 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 811 { 812 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 813 const union amdgpu_firmware_header *hdr; 814 815 switch (type) { 816 case KGD_ENGINE_PFP: 817 hdr = (const union amdgpu_firmware_header *) 818 adev->gfx.pfp_fw->data; 819 break; 820 821 case KGD_ENGINE_ME: 822 hdr = (const union amdgpu_firmware_header *) 823 adev->gfx.me_fw->data; 824 break; 825 826 case KGD_ENGINE_CE: 827 hdr = (const union amdgpu_firmware_header *) 828 adev->gfx.ce_fw->data; 829 break; 830 831 case KGD_ENGINE_MEC1: 832 hdr = (const union amdgpu_firmware_header *) 833 adev->gfx.mec_fw->data; 834 break; 835 836 case KGD_ENGINE_MEC2: 837 hdr = (const union amdgpu_firmware_header *) 838 adev->gfx.mec2_fw->data; 839 break; 840 841 case KGD_ENGINE_RLC: 842 hdr = (const union amdgpu_firmware_header *) 843 adev->gfx.rlc_fw->data; 844 break; 845 846 case KGD_ENGINE_SDMA1: 847 hdr = (const union amdgpu_firmware_header *) 848 adev->sdma.instance[0].fw->data; 849 break; 850 851 case KGD_ENGINE_SDMA2: 852 hdr = (const union amdgpu_firmware_header *) 853 adev->sdma.instance[1].fw->data; 854 break; 855 856 default: 857 return 0; 858 } 859 860 if (hdr == NULL) 861 return 0; 862 863 /* Only 12 bit in use*/ 864 return hdr->common.ucode_version; 865 } 866 867 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 868 uint32_t page_table_base) 869 { 870 struct amdgpu_device *adev = get_amdgpu_device(kgd); 871 872 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { 873 pr_err("trying to set page table base for wrong VMID\n"); 874 return; 875 } 876 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base); 877 } 878 879 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) 880 { 881 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 882 int vmid; 883 unsigned int tmp; 884 885 for (vmid = 0; vmid < 16; vmid++) { 886 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) 887 continue; 888 889 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 890 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && 891 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { 892 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 893 RREG32(mmVM_INVALIDATE_RESPONSE); 894 break; 895 } 896 } 897 898 return 0; 899 } 900 901 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) 902 { 903 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 904 905 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { 906 pr_err("non kfd vmid\n"); 907 return 0; 908 } 909 910 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 911 RREG32(mmVM_INVALIDATE_RESPONSE); 912 return 0; 913 } 914