1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/fdtable.h> 25 #include <linux/uaccess.h> 26 #include <linux/firmware.h> 27 #include <drm/drmP.h> 28 #include "amdgpu.h" 29 #include "amdgpu_amdkfd.h" 30 #include "amdgpu_ucode.h" 31 #include "gfx_v8_0.h" 32 #include "gca/gfx_8_0_sh_mask.h" 33 #include "gca/gfx_8_0_d.h" 34 #include "gca/gfx_8_0_enum.h" 35 #include "oss/oss_3_0_sh_mask.h" 36 #include "oss/oss_3_0_d.h" 37 #include "gmc/gmc_8_1_sh_mask.h" 38 #include "gmc/gmc_8_1_d.h" 39 #include "vi_structs.h" 40 #include "vid.h" 41 42 enum hqd_dequeue_request_type { 43 NO_ACTION = 0, 44 DRAIN_PIPE, 45 RESET_WAVES 46 }; 47 48 struct cik_sdma_rlc_registers; 49 50 /* 51 * Register access functions 52 */ 53 54 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 55 uint32_t sh_mem_config, 56 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, 57 uint32_t sh_mem_bases); 58 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 59 unsigned int vmid); 60 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 61 uint32_t hpd_size, uint64_t hpd_gpu_addr); 62 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); 63 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 64 uint32_t queue_id, uint32_t __user *wptr, 65 uint32_t wptr_shift, uint32_t wptr_mask, 66 struct mm_struct *mm); 67 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); 68 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 69 uint32_t pipe_id, uint32_t queue_id); 70 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); 71 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 72 enum kfd_preempt_type reset_type, 73 unsigned int utimeout, uint32_t pipe_id, 74 uint32_t queue_id); 75 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 76 unsigned int utimeout); 77 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); 78 static int kgd_address_watch_disable(struct kgd_dev *kgd); 79 static int kgd_address_watch_execute(struct kgd_dev *kgd, 80 unsigned int watch_point_id, 81 uint32_t cntl_val, 82 uint32_t addr_hi, 83 uint32_t addr_lo); 84 static int kgd_wave_control_execute(struct kgd_dev *kgd, 85 uint32_t gfx_index_val, 86 uint32_t sq_cmd); 87 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, 88 unsigned int watch_point_id, 89 unsigned int reg_offset); 90 91 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, 92 uint8_t vmid); 93 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 94 uint8_t vmid); 95 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); 96 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 97 static void set_scratch_backing_va(struct kgd_dev *kgd, 98 uint64_t va, uint32_t vmid); 99 100 /* Because of REG_GET_FIELD() being used, we put this function in the 101 * asic specific file. 102 */ 103 static int get_tile_config(struct kgd_dev *kgd, 104 struct tile_config *config) 105 { 106 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 107 108 config->gb_addr_config = adev->gfx.config.gb_addr_config; 109 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, 110 MC_ARB_RAMCFG, NOOFBANK); 111 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, 112 MC_ARB_RAMCFG, NOOFRANKS); 113 114 config->tile_config_ptr = adev->gfx.config.tile_mode_array; 115 config->num_tile_configs = 116 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 117 config->macro_tile_config_ptr = 118 adev->gfx.config.macrotile_mode_array; 119 config->num_macro_tile_configs = 120 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 121 122 return 0; 123 } 124 125 static const struct kfd2kgd_calls kfd2kgd = { 126 .init_gtt_mem_allocation = alloc_gtt_mem, 127 .free_gtt_mem = free_gtt_mem, 128 .get_vmem_size = get_vmem_size, 129 .get_gpu_clock_counter = get_gpu_clock_counter, 130 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, 131 .alloc_pasid = amdgpu_vm_alloc_pasid, 132 .free_pasid = amdgpu_vm_free_pasid, 133 .program_sh_mem_settings = kgd_program_sh_mem_settings, 134 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, 135 .init_pipeline = kgd_init_pipeline, 136 .init_interrupts = kgd_init_interrupts, 137 .hqd_load = kgd_hqd_load, 138 .hqd_sdma_load = kgd_hqd_sdma_load, 139 .hqd_is_occupied = kgd_hqd_is_occupied, 140 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, 141 .hqd_destroy = kgd_hqd_destroy, 142 .hqd_sdma_destroy = kgd_hqd_sdma_destroy, 143 .address_watch_disable = kgd_address_watch_disable, 144 .address_watch_execute = kgd_address_watch_execute, 145 .wave_control_execute = kgd_wave_control_execute, 146 .address_watch_get_offset = kgd_address_watch_get_offset, 147 .get_atc_vmid_pasid_mapping_pasid = 148 get_atc_vmid_pasid_mapping_pasid, 149 .get_atc_vmid_pasid_mapping_valid = 150 get_atc_vmid_pasid_mapping_valid, 151 .write_vmid_invalidate_request = write_vmid_invalidate_request, 152 .get_fw_version = get_fw_version, 153 .set_scratch_backing_va = set_scratch_backing_va, 154 .get_tile_config = get_tile_config, 155 }; 156 157 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) 158 { 159 return (struct kfd2kgd_calls *)&kfd2kgd; 160 } 161 162 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) 163 { 164 return (struct amdgpu_device *)kgd; 165 } 166 167 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, 168 uint32_t queue, uint32_t vmid) 169 { 170 struct amdgpu_device *adev = get_amdgpu_device(kgd); 171 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); 172 173 mutex_lock(&adev->srbm_mutex); 174 WREG32(mmSRBM_GFX_CNTL, value); 175 } 176 177 static void unlock_srbm(struct kgd_dev *kgd) 178 { 179 struct amdgpu_device *adev = get_amdgpu_device(kgd); 180 181 WREG32(mmSRBM_GFX_CNTL, 0); 182 mutex_unlock(&adev->srbm_mutex); 183 } 184 185 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, 186 uint32_t queue_id) 187 { 188 struct amdgpu_device *adev = get_amdgpu_device(kgd); 189 190 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 191 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 192 193 lock_srbm(kgd, mec, pipe, queue_id, 0); 194 } 195 196 static void release_queue(struct kgd_dev *kgd) 197 { 198 unlock_srbm(kgd); 199 } 200 201 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, 202 uint32_t sh_mem_config, 203 uint32_t sh_mem_ape1_base, 204 uint32_t sh_mem_ape1_limit, 205 uint32_t sh_mem_bases) 206 { 207 struct amdgpu_device *adev = get_amdgpu_device(kgd); 208 209 lock_srbm(kgd, 0, 0, 0, vmid); 210 211 WREG32(mmSH_MEM_CONFIG, sh_mem_config); 212 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); 213 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); 214 WREG32(mmSH_MEM_BASES, sh_mem_bases); 215 216 unlock_srbm(kgd); 217 } 218 219 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, 220 unsigned int vmid) 221 { 222 struct amdgpu_device *adev = get_amdgpu_device(kgd); 223 224 /* 225 * We have to assume that there is no outstanding mapping. 226 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because 227 * a mapping is in progress or because a mapping finished 228 * and the SW cleared it. 229 * So the protocol is to always wait & clear. 230 */ 231 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | 232 ATC_VMID0_PASID_MAPPING__VALID_MASK; 233 234 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); 235 236 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) 237 cpu_relax(); 238 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 239 240 /* Mapping vmid to pasid also for IH block */ 241 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); 242 243 return 0; 244 } 245 246 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, 247 uint32_t hpd_size, uint64_t hpd_gpu_addr) 248 { 249 /* amdgpu owns the per-pipe state */ 250 return 0; 251 } 252 253 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) 254 { 255 struct amdgpu_device *adev = get_amdgpu_device(kgd); 256 uint32_t mec; 257 uint32_t pipe; 258 259 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 260 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 261 262 lock_srbm(kgd, mec, pipe, 0, 0); 263 264 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK); 265 266 unlock_srbm(kgd); 267 268 return 0; 269 } 270 271 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) 272 { 273 return 0; 274 } 275 276 static inline struct vi_mqd *get_mqd(void *mqd) 277 { 278 return (struct vi_mqd *)mqd; 279 } 280 281 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) 282 { 283 return (struct cik_sdma_rlc_registers *)mqd; 284 } 285 286 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 287 uint32_t queue_id, uint32_t __user *wptr, 288 uint32_t wptr_shift, uint32_t wptr_mask, 289 struct mm_struct *mm) 290 { 291 struct amdgpu_device *adev = get_amdgpu_device(kgd); 292 struct vi_mqd *m; 293 uint32_t *mqd_hqd; 294 uint32_t reg, wptr_val, data; 295 bool valid_wptr = false; 296 297 m = get_mqd(mqd); 298 299 acquire_queue(kgd, pipe_id, queue_id); 300 301 /* HIQ is set during driver init period with vmid set to 0*/ 302 if (m->cp_hqd_vmid == 0) { 303 uint32_t value, mec, pipe; 304 305 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; 306 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); 307 308 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", 309 mec, pipe, queue_id); 310 value = RREG32(mmRLC_CP_SCHEDULERS); 311 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, 312 ((mec << 5) | (pipe << 3) | queue_id | 0x80)); 313 WREG32(mmRLC_CP_SCHEDULERS, value); 314 } 315 316 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ 317 mqd_hqd = &m->cp_mqd_base_addr_lo; 318 319 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++) 320 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); 321 322 /* Tonga errata: EOP RPTR/WPTR should be left unmodified. 323 * This is safe since EOP RPTR==WPTR for any inactive HQD 324 * on ASICs that do not support context-save. 325 * EOP writes/reads can start anywhere in the ring. 326 */ 327 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { 328 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); 329 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); 330 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); 331 } 332 333 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++) 334 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); 335 336 /* Copy userspace write pointer value to register. 337 * Activate doorbell logic to monitor subsequent changes. 338 */ 339 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, 340 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); 341 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); 342 343 /* read_user_ptr may take the mm->mmap_sem. 344 * release srbm_mutex to avoid circular dependency between 345 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. 346 */ 347 release_queue(kgd); 348 valid_wptr = read_user_wptr(mm, wptr, wptr_val); 349 acquire_queue(kgd, pipe_id, queue_id); 350 if (valid_wptr) 351 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); 352 353 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); 354 WREG32(mmCP_HQD_ACTIVE, data); 355 356 release_queue(kgd); 357 358 return 0; 359 } 360 361 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) 362 { 363 return 0; 364 } 365 366 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 367 uint32_t pipe_id, uint32_t queue_id) 368 { 369 struct amdgpu_device *adev = get_amdgpu_device(kgd); 370 uint32_t act; 371 bool retval = false; 372 uint32_t low, high; 373 374 acquire_queue(kgd, pipe_id, queue_id); 375 act = RREG32(mmCP_HQD_ACTIVE); 376 if (act) { 377 low = lower_32_bits(queue_address >> 8); 378 high = upper_32_bits(queue_address >> 8); 379 380 if (low == RREG32(mmCP_HQD_PQ_BASE) && 381 high == RREG32(mmCP_HQD_PQ_BASE_HI)) 382 retval = true; 383 } 384 release_queue(kgd); 385 return retval; 386 } 387 388 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 389 { 390 struct amdgpu_device *adev = get_amdgpu_device(kgd); 391 struct cik_sdma_rlc_registers *m; 392 uint32_t sdma_base_addr; 393 uint32_t sdma_rlc_rb_cntl; 394 395 m = get_sdma_mqd(mqd); 396 sdma_base_addr = get_sdma_base_addr(m); 397 398 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); 399 400 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) 401 return true; 402 403 return false; 404 } 405 406 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 407 enum kfd_preempt_type reset_type, 408 unsigned int utimeout, uint32_t pipe_id, 409 uint32_t queue_id) 410 { 411 struct amdgpu_device *adev = get_amdgpu_device(kgd); 412 uint32_t temp; 413 enum hqd_dequeue_request_type type; 414 unsigned long flags, end_jiffies; 415 int retry; 416 struct vi_mqd *m = get_mqd(mqd); 417 418 acquire_queue(kgd, pipe_id, queue_id); 419 420 if (m->cp_hqd_vmid == 0) 421 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); 422 423 switch (reset_type) { 424 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: 425 type = DRAIN_PIPE; 426 break; 427 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: 428 type = RESET_WAVES; 429 break; 430 default: 431 type = DRAIN_PIPE; 432 break; 433 } 434 435 /* Workaround: If IQ timer is active and the wait time is close to or 436 * equal to 0, dequeueing is not safe. Wait until either the wait time 437 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is 438 * cleared before continuing. Also, ensure wait times are set to at 439 * least 0x3. 440 */ 441 local_irq_save(flags); 442 preempt_disable(); 443 retry = 5000; /* wait for 500 usecs at maximum */ 444 while (true) { 445 temp = RREG32(mmCP_HQD_IQ_TIMER); 446 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { 447 pr_debug("HW is processing IQ\n"); 448 goto loop; 449 } 450 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { 451 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) 452 == 3) /* SEM-rearm is safe */ 453 break; 454 /* Wait time 3 is safe for CP, but our MMIO read/write 455 * time is close to 1 microsecond, so check for 10 to 456 * leave more buffer room 457 */ 458 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) 459 >= 10) 460 break; 461 pr_debug("IQ timer is active\n"); 462 } else 463 break; 464 loop: 465 if (!retry) { 466 pr_err("CP HQD IQ timer status time out\n"); 467 break; 468 } 469 ndelay(100); 470 --retry; 471 } 472 retry = 1000; 473 while (true) { 474 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); 475 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) 476 break; 477 pr_debug("Dequeue request is pending\n"); 478 479 if (!retry) { 480 pr_err("CP HQD dequeue request time out\n"); 481 break; 482 } 483 ndelay(100); 484 --retry; 485 } 486 local_irq_restore(flags); 487 preempt_enable(); 488 489 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); 490 491 end_jiffies = (utimeout * HZ / 1000) + jiffies; 492 while (true) { 493 temp = RREG32(mmCP_HQD_ACTIVE); 494 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) 495 break; 496 if (time_after(jiffies, end_jiffies)) { 497 pr_err("cp queue preemption time out.\n"); 498 release_queue(kgd); 499 return -ETIME; 500 } 501 usleep_range(500, 1000); 502 } 503 504 release_queue(kgd); 505 return 0; 506 } 507 508 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 509 unsigned int utimeout) 510 { 511 struct amdgpu_device *adev = get_amdgpu_device(kgd); 512 struct cik_sdma_rlc_registers *m; 513 uint32_t sdma_base_addr; 514 uint32_t temp; 515 int timeout = utimeout; 516 517 m = get_sdma_mqd(mqd); 518 sdma_base_addr = get_sdma_base_addr(m); 519 520 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); 521 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; 522 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); 523 524 while (true) { 525 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 526 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) 527 break; 528 if (timeout <= 0) 529 return -ETIME; 530 msleep(20); 531 timeout -= 20; 532 } 533 534 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); 535 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); 536 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); 537 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); 538 539 return 0; 540 } 541 542 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, 543 uint8_t vmid) 544 { 545 uint32_t reg; 546 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 547 548 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 549 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; 550 } 551 552 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 553 uint8_t vmid) 554 { 555 uint32_t reg; 556 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 557 558 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 559 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; 560 } 561 562 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) 563 { 564 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 565 566 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 567 } 568 569 static int kgd_address_watch_disable(struct kgd_dev *kgd) 570 { 571 return 0; 572 } 573 574 static int kgd_address_watch_execute(struct kgd_dev *kgd, 575 unsigned int watch_point_id, 576 uint32_t cntl_val, 577 uint32_t addr_hi, 578 uint32_t addr_lo) 579 { 580 return 0; 581 } 582 583 static int kgd_wave_control_execute(struct kgd_dev *kgd, 584 uint32_t gfx_index_val, 585 uint32_t sq_cmd) 586 { 587 struct amdgpu_device *adev = get_amdgpu_device(kgd); 588 uint32_t data = 0; 589 590 mutex_lock(&adev->grbm_idx_mutex); 591 592 WREG32(mmGRBM_GFX_INDEX, gfx_index_val); 593 WREG32(mmSQ_CMD, sq_cmd); 594 595 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 596 INSTANCE_BROADCAST_WRITES, 1); 597 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 598 SH_BROADCAST_WRITES, 1); 599 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, 600 SE_BROADCAST_WRITES, 1); 601 602 WREG32(mmGRBM_GFX_INDEX, data); 603 mutex_unlock(&adev->grbm_idx_mutex); 604 605 return 0; 606 } 607 608 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, 609 unsigned int watch_point_id, 610 unsigned int reg_offset) 611 { 612 return 0; 613 } 614 615 static void set_scratch_backing_va(struct kgd_dev *kgd, 616 uint64_t va, uint32_t vmid) 617 { 618 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 619 620 lock_srbm(kgd, 0, 0, 0, vmid); 621 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); 622 unlock_srbm(kgd); 623 } 624 625 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 626 { 627 struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 628 const union amdgpu_firmware_header *hdr; 629 630 BUG_ON(kgd == NULL); 631 632 switch (type) { 633 case KGD_ENGINE_PFP: 634 hdr = (const union amdgpu_firmware_header *) 635 adev->gfx.pfp_fw->data; 636 break; 637 638 case KGD_ENGINE_ME: 639 hdr = (const union amdgpu_firmware_header *) 640 adev->gfx.me_fw->data; 641 break; 642 643 case KGD_ENGINE_CE: 644 hdr = (const union amdgpu_firmware_header *) 645 adev->gfx.ce_fw->data; 646 break; 647 648 case KGD_ENGINE_MEC1: 649 hdr = (const union amdgpu_firmware_header *) 650 adev->gfx.mec_fw->data; 651 break; 652 653 case KGD_ENGINE_MEC2: 654 hdr = (const union amdgpu_firmware_header *) 655 adev->gfx.mec2_fw->data; 656 break; 657 658 case KGD_ENGINE_RLC: 659 hdr = (const union amdgpu_firmware_header *) 660 adev->gfx.rlc_fw->data; 661 break; 662 663 case KGD_ENGINE_SDMA1: 664 hdr = (const union amdgpu_firmware_header *) 665 adev->sdma.instance[0].fw->data; 666 break; 667 668 case KGD_ENGINE_SDMA2: 669 hdr = (const union amdgpu_firmware_header *) 670 adev->sdma.instance[1].fw->data; 671 break; 672 673 default: 674 return 0; 675 } 676 677 if (hdr == NULL) 678 return 0; 679 680 /* Only 12 bit in use*/ 681 return hdr->common.ucode_version; 682 } 683