1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v11_0.h" 34 #include "soc21.h" 35 #include "nvd.h" 36 37 #include "gc/gc_11_0_0_offset.h" 38 #include "gc/gc_11_0_0_sh_mask.h" 39 #include "smuio/smuio_13_0_6_offset.h" 40 #include "smuio/smuio_13_0_6_sh_mask.h" 41 #include "navi10_enum.h" 42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 43 44 #include "soc15.h" 45 #include "soc15d.h" 46 #include "clearstate_gfx11.h" 47 #include "v11_structs.h" 48 #include "gfx_v11_0.h" 49 #include "nbio_v4_3.h" 50 #include "mes_v11_0.h" 51 52 #define GFX11_NUM_GFX_RINGS 1 53 #define GFX11_MEC_HPD_SIZE 2048 54 55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 56 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388 57 58 #define regCGTT_WD_CLK_CTRL 0x5086 59 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 60 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 62 63 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 68 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 72 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 76 77 static const struct soc15_reg_golden golden_settings_gc_11_0[] = 78 { 79 /* Pending on emulation bring up */ 80 }; 81 82 static const struct soc15_reg_golden golden_settings_gc_11_0_0[] = 83 { 84 /* Pending on emulation bring up */ 85 }; 86 87 static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] = 88 { 89 /* Pending on emulation bring up */ 90 }; 91 92 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 93 { 94 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 97 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 99 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 100 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 101 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 102 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 103 }; 104 105 #define DEFAULT_SH_MEM_CONFIG \ 106 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 107 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 108 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 109 110 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 111 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 112 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 113 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 114 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 115 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 116 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 117 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 118 struct amdgpu_cu_info *cu_info); 119 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 120 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 121 u32 sh_num, u32 instance); 122 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 123 124 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 125 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 126 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 127 uint32_t val); 128 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 129 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 130 uint16_t pasid, uint32_t flush_type, 131 bool all_hub, uint8_t dst_sel); 132 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev); 133 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev); 134 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 135 bool enable); 136 137 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 138 { 139 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 140 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 141 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 142 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 143 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 144 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 145 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 146 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 147 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 148 } 149 150 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 151 struct amdgpu_ring *ring) 152 { 153 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 154 uint64_t wptr_addr = ring->wptr_gpu_addr; 155 uint32_t me = 0, eng_sel = 0; 156 157 switch (ring->funcs->type) { 158 case AMDGPU_RING_TYPE_COMPUTE: 159 me = 1; 160 eng_sel = 0; 161 break; 162 case AMDGPU_RING_TYPE_GFX: 163 me = 0; 164 eng_sel = 4; 165 break; 166 case AMDGPU_RING_TYPE_MES: 167 me = 2; 168 eng_sel = 5; 169 break; 170 default: 171 WARN_ON(1); 172 } 173 174 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 175 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 176 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 177 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 178 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 179 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 180 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 181 PACKET3_MAP_QUEUES_ME((me)) | 182 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 183 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 184 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 185 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 186 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 187 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 188 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 189 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 190 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 191 } 192 193 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 194 struct amdgpu_ring *ring, 195 enum amdgpu_unmap_queues_action action, 196 u64 gpu_addr, u64 seq) 197 { 198 struct amdgpu_device *adev = kiq_ring->adev; 199 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 200 201 if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { 202 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 203 return; 204 } 205 206 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 207 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 208 PACKET3_UNMAP_QUEUES_ACTION(action) | 209 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 210 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 211 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 212 amdgpu_ring_write(kiq_ring, 213 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 214 215 if (action == PREEMPT_QUEUES_NO_UNMAP) { 216 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 217 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 218 amdgpu_ring_write(kiq_ring, seq); 219 } else { 220 amdgpu_ring_write(kiq_ring, 0); 221 amdgpu_ring_write(kiq_ring, 0); 222 amdgpu_ring_write(kiq_ring, 0); 223 } 224 } 225 226 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 227 struct amdgpu_ring *ring, 228 u64 addr, 229 u64 seq) 230 { 231 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 232 233 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 234 amdgpu_ring_write(kiq_ring, 235 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 236 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 237 PACKET3_QUERY_STATUS_COMMAND(2)); 238 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 239 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 240 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 241 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 242 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 243 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 244 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 245 } 246 247 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 248 uint16_t pasid, uint32_t flush_type, 249 bool all_hub) 250 { 251 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 252 } 253 254 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 255 .kiq_set_resources = gfx11_kiq_set_resources, 256 .kiq_map_queues = gfx11_kiq_map_queues, 257 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 258 .kiq_query_status = gfx11_kiq_query_status, 259 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 260 .set_resources_size = 8, 261 .map_queues_size = 7, 262 .unmap_queues_size = 6, 263 .query_status_size = 7, 264 .invalidate_tlbs_size = 2, 265 }; 266 267 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 268 { 269 adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; 270 } 271 272 static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev) 273 { 274 switch (adev->ip_versions[GC_HWIP][0]) { 275 case IP_VERSION(11, 0, 0): 276 soc15_program_register_sequence(adev, 277 golden_settings_gc_rlc_spm_11_0, 278 (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0)); 279 break; 280 default: 281 break; 282 } 283 } 284 285 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 286 { 287 switch (adev->ip_versions[GC_HWIP][0]) { 288 case IP_VERSION(11, 0, 0): 289 soc15_program_register_sequence(adev, 290 golden_settings_gc_11_0, 291 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 292 soc15_program_register_sequence(adev, 293 golden_settings_gc_11_0_0, 294 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0)); 295 break; 296 case IP_VERSION(11, 0, 1): 297 soc15_program_register_sequence(adev, 298 golden_settings_gc_11_0, 299 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 300 soc15_program_register_sequence(adev, 301 golden_settings_gc_11_0_1, 302 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 303 break; 304 default: 305 break; 306 } 307 gfx_v11_0_init_spm_golden_registers(adev); 308 } 309 310 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 311 bool wc, uint32_t reg, uint32_t val) 312 { 313 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 314 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 315 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 316 amdgpu_ring_write(ring, reg); 317 amdgpu_ring_write(ring, 0); 318 amdgpu_ring_write(ring, val); 319 } 320 321 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 322 int mem_space, int opt, uint32_t addr0, 323 uint32_t addr1, uint32_t ref, uint32_t mask, 324 uint32_t inv) 325 { 326 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 327 amdgpu_ring_write(ring, 328 /* memory (1) or register (0) */ 329 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 330 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 331 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 332 WAIT_REG_MEM_ENGINE(eng_sel))); 333 334 if (mem_space) 335 BUG_ON(addr0 & 0x3); /* Dword align */ 336 amdgpu_ring_write(ring, addr0); 337 amdgpu_ring_write(ring, addr1); 338 amdgpu_ring_write(ring, ref); 339 amdgpu_ring_write(ring, mask); 340 amdgpu_ring_write(ring, inv); /* poll interval */ 341 } 342 343 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 344 { 345 struct amdgpu_device *adev = ring->adev; 346 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 347 uint32_t tmp = 0; 348 unsigned i; 349 int r; 350 351 WREG32(scratch, 0xCAFEDEAD); 352 r = amdgpu_ring_alloc(ring, 5); 353 if (r) { 354 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 355 ring->idx, r); 356 return r; 357 } 358 359 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 360 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 361 } else { 362 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 363 amdgpu_ring_write(ring, scratch - 364 PACKET3_SET_UCONFIG_REG_START); 365 amdgpu_ring_write(ring, 0xDEADBEEF); 366 } 367 amdgpu_ring_commit(ring); 368 369 for (i = 0; i < adev->usec_timeout; i++) { 370 tmp = RREG32(scratch); 371 if (tmp == 0xDEADBEEF) 372 break; 373 if (amdgpu_emu_mode == 1) 374 msleep(1); 375 else 376 udelay(1); 377 } 378 379 if (i >= adev->usec_timeout) 380 r = -ETIMEDOUT; 381 return r; 382 } 383 384 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 385 { 386 struct amdgpu_device *adev = ring->adev; 387 struct amdgpu_ib ib; 388 struct dma_fence *f = NULL; 389 unsigned index; 390 uint64_t gpu_addr; 391 volatile uint32_t *cpu_ptr; 392 long r; 393 394 /* MES KIQ fw hasn't indirect buffer support for now */ 395 if (adev->enable_mes_kiq && 396 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 397 return 0; 398 399 memset(&ib, 0, sizeof(ib)); 400 401 if (ring->is_mes_queue) { 402 uint32_t padding, offset; 403 404 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 405 padding = amdgpu_mes_ctx_get_offs(ring, 406 AMDGPU_MES_CTX_PADDING_OFFS); 407 408 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 409 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 410 411 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 412 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 413 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 414 } else { 415 r = amdgpu_device_wb_get(adev, &index); 416 if (r) 417 return r; 418 419 gpu_addr = adev->wb.gpu_addr + (index * 4); 420 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 421 cpu_ptr = &adev->wb.wb[index]; 422 423 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 424 if (r) { 425 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 426 goto err1; 427 } 428 } 429 430 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 431 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 432 ib.ptr[2] = lower_32_bits(gpu_addr); 433 ib.ptr[3] = upper_32_bits(gpu_addr); 434 ib.ptr[4] = 0xDEADBEEF; 435 ib.length_dw = 5; 436 437 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 438 if (r) 439 goto err2; 440 441 r = dma_fence_wait_timeout(f, false, timeout); 442 if (r == 0) { 443 r = -ETIMEDOUT; 444 goto err2; 445 } else if (r < 0) { 446 goto err2; 447 } 448 449 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 450 r = 0; 451 else 452 r = -EINVAL; 453 err2: 454 if (!ring->is_mes_queue) 455 amdgpu_ib_free(adev, &ib, NULL); 456 dma_fence_put(f); 457 err1: 458 if (!ring->is_mes_queue) 459 amdgpu_device_wb_free(adev, index); 460 return r; 461 } 462 463 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 464 { 465 release_firmware(adev->gfx.pfp_fw); 466 adev->gfx.pfp_fw = NULL; 467 release_firmware(adev->gfx.me_fw); 468 adev->gfx.me_fw = NULL; 469 release_firmware(adev->gfx.rlc_fw); 470 adev->gfx.rlc_fw = NULL; 471 release_firmware(adev->gfx.mec_fw); 472 adev->gfx.mec_fw = NULL; 473 474 kfree(adev->gfx.rlc.register_list_format); 475 } 476 477 static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 478 { 479 const struct rlc_firmware_header_v2_1 *rlc_hdr; 480 481 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 482 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 483 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 484 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 485 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 486 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 487 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 488 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 489 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 490 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 491 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 492 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 493 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 494 adev->gfx.rlc.reg_list_format_direct_reg_list_length = 495 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 496 } 497 498 static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) 499 { 500 const struct rlc_firmware_header_v2_2 *rlc_hdr; 501 502 rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 503 adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); 504 adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); 505 adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); 506 adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); 507 } 508 509 static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev) 510 { 511 const struct rlc_firmware_header_v2_3 *rlc_hdr; 512 513 rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 514 adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); 515 adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); 516 adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); 517 adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); 518 } 519 520 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 521 { 522 char fw_name[40]; 523 char ucode_prefix[30]; 524 int err; 525 struct amdgpu_firmware_info *info = NULL; 526 const struct common_firmware_header *header = NULL; 527 const struct gfx_firmware_header_v1_0 *cp_hdr; 528 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; 529 const struct rlc_firmware_header_v2_0 *rlc_hdr; 530 unsigned int *tmp = NULL; 531 unsigned int i = 0; 532 uint16_t version_major; 533 uint16_t version_minor; 534 535 DRM_DEBUG("\n"); 536 537 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 538 539 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); 540 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 541 if (err) 542 goto out; 543 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 544 if (err) 545 goto out; 546 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 547 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 548 (union amdgpu_firmware_header *) 549 adev->gfx.pfp_fw->data, 2, 0); 550 if (adev->gfx.rs64_enable) { 551 dev_info(adev->dev, "CP RS64 enable\n"); 552 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; 553 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 554 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 555 556 } else { 557 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 558 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 559 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 560 } 561 562 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); 563 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 564 if (err) 565 goto out; 566 err = amdgpu_ucode_validate(adev->gfx.me_fw); 567 if (err) 568 goto out; 569 if (adev->gfx.rs64_enable) { 570 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; 571 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 572 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 573 574 } else { 575 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 576 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 577 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 578 } 579 580 if (!amdgpu_sriov_vf(adev)) { 581 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); 582 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 583 if (err) 584 goto out; 585 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 586 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 587 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 588 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 589 590 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 591 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 592 adev->gfx.rlc.save_and_restore_offset = 593 le32_to_cpu(rlc_hdr->save_and_restore_offset); 594 adev->gfx.rlc.clear_state_descriptor_offset = 595 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 596 adev->gfx.rlc.avail_scratch_ram_locations = 597 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 598 adev->gfx.rlc.reg_restore_list_size = 599 le32_to_cpu(rlc_hdr->reg_restore_list_size); 600 adev->gfx.rlc.reg_list_format_start = 601 le32_to_cpu(rlc_hdr->reg_list_format_start); 602 adev->gfx.rlc.reg_list_format_separate_start = 603 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 604 adev->gfx.rlc.starting_offsets_start = 605 le32_to_cpu(rlc_hdr->starting_offsets_start); 606 adev->gfx.rlc.reg_list_format_size_bytes = 607 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 608 adev->gfx.rlc.reg_list_size_bytes = 609 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 610 adev->gfx.rlc.register_list_format = 611 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 612 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 613 if (!adev->gfx.rlc.register_list_format) { 614 err = -ENOMEM; 615 goto out; 616 } 617 618 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 619 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 620 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 621 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 622 623 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 624 625 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 626 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 627 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 628 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 629 630 if (version_major == 2) { 631 if (version_minor >= 1) 632 gfx_v11_0_init_rlc_ext_microcode(adev); 633 if (version_minor >= 2) 634 gfx_v11_0_init_rlc_iram_dram_microcode(adev); 635 if (version_minor == 3) 636 gfx_v11_0_init_rlcp_rlcv_microcode(adev); 637 } 638 } 639 640 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); 641 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 642 if (err) 643 goto out; 644 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 645 if (err) 646 goto out; 647 if (adev->gfx.rs64_enable) { 648 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 649 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 650 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 651 652 } else { 653 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 654 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 655 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 656 } 657 658 /* only one MEC for gfx 11.0.0. */ 659 adev->gfx.mec2_fw = NULL; 660 661 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 662 if (adev->gfx.rs64_enable) { 663 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; 664 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP]; 665 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP; 666 info->fw = adev->gfx.pfp_fw; 667 header = (const struct common_firmware_header *)info->fw->data; 668 adev->firmware.fw_size += 669 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 670 671 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK]; 672 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK; 673 info->fw = adev->gfx.pfp_fw; 674 header = (const struct common_firmware_header *)info->fw->data; 675 adev->firmware.fw_size += 676 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 677 678 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK]; 679 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK; 680 info->fw = adev->gfx.pfp_fw; 681 header = (const struct common_firmware_header *)info->fw->data; 682 adev->firmware.fw_size += 683 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 684 685 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; 686 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME]; 687 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME; 688 info->fw = adev->gfx.me_fw; 689 header = (const struct common_firmware_header *)info->fw->data; 690 adev->firmware.fw_size += 691 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 692 693 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK]; 694 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK; 695 info->fw = adev->gfx.me_fw; 696 header = (const struct common_firmware_header *)info->fw->data; 697 adev->firmware.fw_size += 698 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 699 700 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK]; 701 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK; 702 info->fw = adev->gfx.me_fw; 703 header = (const struct common_firmware_header *)info->fw->data; 704 adev->firmware.fw_size += 705 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 706 707 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 708 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC]; 709 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC; 710 info->fw = adev->gfx.mec_fw; 711 header = (const struct common_firmware_header *)info->fw->data; 712 adev->firmware.fw_size += 713 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 714 715 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK]; 716 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK; 717 info->fw = adev->gfx.mec_fw; 718 header = (const struct common_firmware_header *)info->fw->data; 719 adev->firmware.fw_size += 720 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 721 722 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK]; 723 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK; 724 info->fw = adev->gfx.mec_fw; 725 header = (const struct common_firmware_header *)info->fw->data; 726 adev->firmware.fw_size += 727 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 728 729 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK]; 730 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK; 731 info->fw = adev->gfx.mec_fw; 732 header = (const struct common_firmware_header *)info->fw->data; 733 adev->firmware.fw_size += 734 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 735 736 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK]; 737 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK; 738 info->fw = adev->gfx.mec_fw; 739 header = (const struct common_firmware_header *)info->fw->data; 740 adev->firmware.fw_size += 741 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 742 } else { 743 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 744 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 745 info->fw = adev->gfx.pfp_fw; 746 header = (const struct common_firmware_header *)info->fw->data; 747 adev->firmware.fw_size += 748 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 749 750 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 751 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 752 info->fw = adev->gfx.me_fw; 753 header = (const struct common_firmware_header *)info->fw->data; 754 adev->firmware.fw_size += 755 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 756 757 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 758 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 759 info->fw = adev->gfx.mec_fw; 760 header = (const struct common_firmware_header *)info->fw->data; 761 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 762 adev->firmware.fw_size += 763 ALIGN(le32_to_cpu(header->ucode_size_bytes) - 764 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 765 766 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 767 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 768 info->fw = adev->gfx.mec_fw; 769 adev->firmware.fw_size += 770 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 771 } 772 773 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 774 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 775 info->fw = adev->gfx.rlc_fw; 776 if (info->fw) { 777 header = (const struct common_firmware_header *)info->fw->data; 778 adev->firmware.fw_size += 779 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 780 } 781 if (adev->gfx.rlc.save_restore_list_gpm_size_bytes && 782 adev->gfx.rlc.save_restore_list_srm_size_bytes) { 783 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 784 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 785 info->fw = adev->gfx.rlc_fw; 786 adev->firmware.fw_size += 787 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 788 789 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 790 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 791 info->fw = adev->gfx.rlc_fw; 792 adev->firmware.fw_size += 793 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 794 } 795 796 if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && 797 adev->gfx.rlc.rlc_dram_ucode_size_bytes) { 798 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; 799 info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; 800 info->fw = adev->gfx.rlc_fw; 801 adev->firmware.fw_size += 802 ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); 803 804 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; 805 info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; 806 info->fw = adev->gfx.rlc_fw; 807 adev->firmware.fw_size += 808 ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); 809 } 810 811 if (adev->gfx.rlc.rlcp_ucode_size_bytes) { 812 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; 813 info->ucode_id = AMDGPU_UCODE_ID_RLC_P; 814 info->fw = adev->gfx.rlc_fw; 815 adev->firmware.fw_size += 816 ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); 817 } 818 819 if (adev->gfx.rlc.rlcv_ucode_size_bytes) { 820 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; 821 info->ucode_id = AMDGPU_UCODE_ID_RLC_V; 822 info->fw = adev->gfx.rlc_fw; 823 adev->firmware.fw_size += 824 ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); 825 } 826 } 827 828 out: 829 if (err) { 830 dev_err(adev->dev, 831 "gfx11: Failed to load firmware \"%s\"\n", 832 fw_name); 833 release_firmware(adev->gfx.pfp_fw); 834 adev->gfx.pfp_fw = NULL; 835 release_firmware(adev->gfx.me_fw); 836 adev->gfx.me_fw = NULL; 837 release_firmware(adev->gfx.rlc_fw); 838 adev->gfx.rlc_fw = NULL; 839 release_firmware(adev->gfx.mec_fw); 840 adev->gfx.mec_fw = NULL; 841 } 842 843 return err; 844 } 845 846 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev) 847 { 848 const struct psp_firmware_header_v1_0 *toc_hdr; 849 int err = 0; 850 char fw_name[40]; 851 char ucode_prefix[30]; 852 853 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 854 855 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); 856 err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); 857 if (err) 858 goto out; 859 860 err = amdgpu_ucode_validate(adev->psp.toc_fw); 861 if (err) 862 goto out; 863 864 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 865 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 866 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 867 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 868 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 869 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 870 return 0; 871 out: 872 dev_err(adev->dev, "Failed to load TOC microcode\n"); 873 release_firmware(adev->psp.toc_fw); 874 adev->psp.toc_fw = NULL; 875 return err; 876 } 877 878 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 879 { 880 u32 count = 0; 881 const struct cs_section_def *sect = NULL; 882 const struct cs_extent_def *ext = NULL; 883 884 /* begin clear state */ 885 count += 2; 886 /* context control state */ 887 count += 3; 888 889 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 890 for (ext = sect->section; ext->extent != NULL; ++ext) { 891 if (sect->id == SECT_CONTEXT) 892 count += 2 + ext->reg_count; 893 else 894 return 0; 895 } 896 } 897 898 /* set PA_SC_TILE_STEERING_OVERRIDE */ 899 count += 3; 900 /* end clear state */ 901 count += 2; 902 /* clear state */ 903 count += 2; 904 905 return count; 906 } 907 908 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 909 volatile u32 *buffer) 910 { 911 u32 count = 0, i; 912 const struct cs_section_def *sect = NULL; 913 const struct cs_extent_def *ext = NULL; 914 int ctx_reg_offset; 915 916 if (adev->gfx.rlc.cs_data == NULL) 917 return; 918 if (buffer == NULL) 919 return; 920 921 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 922 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 923 924 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 925 buffer[count++] = cpu_to_le32(0x80000000); 926 buffer[count++] = cpu_to_le32(0x80000000); 927 928 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 929 for (ext = sect->section; ext->extent != NULL; ++ext) { 930 if (sect->id == SECT_CONTEXT) { 931 buffer[count++] = 932 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 933 buffer[count++] = cpu_to_le32(ext->reg_index - 934 PACKET3_SET_CONTEXT_REG_START); 935 for (i = 0; i < ext->reg_count; i++) 936 buffer[count++] = cpu_to_le32(ext->extent[i]); 937 } else { 938 return; 939 } 940 } 941 } 942 943 ctx_reg_offset = 944 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 945 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 946 buffer[count++] = cpu_to_le32(ctx_reg_offset); 947 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 948 949 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 950 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 951 952 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 953 buffer[count++] = cpu_to_le32(0); 954 } 955 956 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 957 { 958 /* clear state block */ 959 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 960 &adev->gfx.rlc.clear_state_gpu_addr, 961 (void **)&adev->gfx.rlc.cs_ptr); 962 963 /* jump table block */ 964 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 965 &adev->gfx.rlc.cp_table_gpu_addr, 966 (void **)&adev->gfx.rlc.cp_table_ptr); 967 } 968 969 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 970 { 971 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 972 973 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; 974 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 975 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 976 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 977 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 978 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 979 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 980 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 981 adev->gfx.rlc.rlcg_reg_access_supported = true; 982 } 983 984 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 985 { 986 const struct cs_section_def *cs_data; 987 int r; 988 989 adev->gfx.rlc.cs_data = gfx11_cs_data; 990 991 cs_data = adev->gfx.rlc.cs_data; 992 993 if (cs_data) { 994 /* init clear state block */ 995 r = amdgpu_gfx_rlc_init_csb(adev); 996 if (r) 997 return r; 998 } 999 1000 /* init spm vmid with 0xf */ 1001 if (adev->gfx.rlc.funcs->update_spm_vmid) 1002 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); 1003 1004 return 0; 1005 } 1006 1007 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 1008 { 1009 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1010 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 1011 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 1012 } 1013 1014 static int gfx_v11_0_me_init(struct amdgpu_device *adev) 1015 { 1016 int r; 1017 1018 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 1019 1020 amdgpu_gfx_graphics_queue_acquire(adev); 1021 1022 r = gfx_v11_0_init_microcode(adev); 1023 if (r) 1024 DRM_ERROR("Failed to load gfx firmware!\n"); 1025 1026 return r; 1027 } 1028 1029 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 1030 { 1031 int r; 1032 u32 *hpd; 1033 size_t mec_hpd_size; 1034 1035 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1036 1037 /* take ownership of the relevant compute queues */ 1038 amdgpu_gfx_compute_queue_acquire(adev); 1039 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 1040 1041 if (mec_hpd_size) { 1042 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1043 AMDGPU_GEM_DOMAIN_GTT, 1044 &adev->gfx.mec.hpd_eop_obj, 1045 &adev->gfx.mec.hpd_eop_gpu_addr, 1046 (void **)&hpd); 1047 if (r) { 1048 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 1049 gfx_v11_0_mec_fini(adev); 1050 return r; 1051 } 1052 1053 memset(hpd, 0, mec_hpd_size); 1054 1055 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 1056 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 1057 } 1058 1059 return 0; 1060 } 1061 1062 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 1063 { 1064 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 1065 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1066 (address << SQ_IND_INDEX__INDEX__SHIFT)); 1067 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 1068 } 1069 1070 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 1071 uint32_t thread, uint32_t regno, 1072 uint32_t num, uint32_t *out) 1073 { 1074 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 1075 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1076 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 1077 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 1078 (SQ_IND_INDEX__AUTO_INCR_MASK)); 1079 while (num--) 1080 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 1081 } 1082 1083 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 1084 { 1085 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 1086 * field when performing a select_se_sh so it should be 1087 * zero here */ 1088 WARN_ON(simd != 0); 1089 1090 /* type 2 wave data */ 1091 dst[(*no_fields)++] = 2; 1092 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 1093 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 1094 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 1095 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 1096 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 1097 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 1098 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 1099 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 1100 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 1101 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 1102 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 1103 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 1104 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 1105 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 1106 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 1107 } 1108 1109 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 1110 uint32_t wave, uint32_t start, 1111 uint32_t size, uint32_t *dst) 1112 { 1113 WARN_ON(simd != 0); 1114 1115 wave_read_regs( 1116 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1117 dst); 1118 } 1119 1120 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 1121 uint32_t wave, uint32_t thread, 1122 uint32_t start, uint32_t size, 1123 uint32_t *dst) 1124 { 1125 wave_read_regs( 1126 adev, wave, thread, 1127 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1128 } 1129 1130 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1131 u32 me, u32 pipe, u32 q, u32 vm) 1132 { 1133 soc21_grbm_select(adev, me, pipe, q, vm); 1134 } 1135 1136 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1137 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1138 .select_se_sh = &gfx_v11_0_select_se_sh, 1139 .read_wave_data = &gfx_v11_0_read_wave_data, 1140 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1141 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1142 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1143 .init_spm_golden = &gfx_v11_0_init_spm_golden_registers, 1144 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk, 1145 }; 1146 1147 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1148 { 1149 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 1150 1151 switch (adev->ip_versions[GC_HWIP][0]) { 1152 case IP_VERSION(11, 0, 0): 1153 case IP_VERSION(11, 0, 2): 1154 adev->gfx.config.max_hw_contexts = 8; 1155 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1156 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1157 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1158 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1159 break; 1160 case IP_VERSION(11, 0, 1): 1161 adev->gfx.config.max_hw_contexts = 8; 1162 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1163 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1164 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1165 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1166 break; 1167 default: 1168 BUG(); 1169 break; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1176 int me, int pipe, int queue) 1177 { 1178 int r; 1179 struct amdgpu_ring *ring; 1180 unsigned int irq_type; 1181 1182 ring = &adev->gfx.gfx_ring[ring_id]; 1183 1184 ring->me = me; 1185 ring->pipe = pipe; 1186 ring->queue = queue; 1187 1188 ring->ring_obj = NULL; 1189 ring->use_doorbell = true; 1190 1191 if (!ring_id) 1192 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1193 else 1194 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1195 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1196 1197 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1198 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1199 AMDGPU_RING_PRIO_DEFAULT, NULL); 1200 if (r) 1201 return r; 1202 return 0; 1203 } 1204 1205 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1206 int mec, int pipe, int queue) 1207 { 1208 int r; 1209 unsigned irq_type; 1210 struct amdgpu_ring *ring; 1211 unsigned int hw_prio; 1212 1213 ring = &adev->gfx.compute_ring[ring_id]; 1214 1215 /* mec0 is me1 */ 1216 ring->me = mec + 1; 1217 ring->pipe = pipe; 1218 ring->queue = queue; 1219 1220 ring->ring_obj = NULL; 1221 ring->use_doorbell = true; 1222 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1223 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1224 + (ring_id * GFX11_MEC_HPD_SIZE); 1225 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1226 1227 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1228 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1229 + ring->pipe; 1230 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1231 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1232 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1233 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1234 hw_prio, NULL); 1235 if (r) 1236 return r; 1237 1238 return 0; 1239 } 1240 1241 static struct { 1242 SOC21_FIRMWARE_ID id; 1243 unsigned int offset; 1244 unsigned int size; 1245 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1246 1247 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1248 { 1249 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1250 1251 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1252 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1253 rlc_autoload_info[ucode->id].id = ucode->id; 1254 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1255 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1256 1257 ucode++; 1258 } 1259 } 1260 1261 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1262 { 1263 uint32_t total_size = 0; 1264 SOC21_FIRMWARE_ID id; 1265 1266 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1267 1268 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1269 total_size += rlc_autoload_info[id].size; 1270 1271 /* In case the offset in rlc toc ucode is aligned */ 1272 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1273 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1274 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1275 1276 return total_size; 1277 } 1278 1279 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1280 { 1281 int r; 1282 uint32_t total_size; 1283 1284 total_size = gfx_v11_0_calc_toc_total_size(adev); 1285 1286 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1287 AMDGPU_GEM_DOMAIN_VRAM, 1288 &adev->gfx.rlc.rlc_autoload_bo, 1289 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1290 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1291 1292 if (r) { 1293 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1294 return r; 1295 } 1296 1297 return 0; 1298 } 1299 1300 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1301 SOC21_FIRMWARE_ID id, 1302 const void *fw_data, 1303 uint32_t fw_size, 1304 uint32_t *fw_autoload_mask) 1305 { 1306 uint32_t toc_offset; 1307 uint32_t toc_fw_size; 1308 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1309 1310 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1311 return; 1312 1313 toc_offset = rlc_autoload_info[id].offset; 1314 toc_fw_size = rlc_autoload_info[id].size; 1315 1316 if (fw_size == 0) 1317 fw_size = toc_fw_size; 1318 1319 if (fw_size > toc_fw_size) 1320 fw_size = toc_fw_size; 1321 1322 memcpy(ptr + toc_offset, fw_data, fw_size); 1323 1324 if (fw_size < toc_fw_size) 1325 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1326 1327 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1328 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1329 } 1330 1331 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1332 uint32_t *fw_autoload_mask) 1333 { 1334 void *data; 1335 uint32_t size; 1336 uint64_t *toc_ptr; 1337 1338 *(uint64_t *)fw_autoload_mask |= 0x1; 1339 1340 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1341 1342 data = adev->psp.toc.start_addr; 1343 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1344 1345 toc_ptr = (uint64_t *)data + size / 8 - 1; 1346 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1347 1348 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1349 data, size, fw_autoload_mask); 1350 } 1351 1352 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1353 uint32_t *fw_autoload_mask) 1354 { 1355 const __le32 *fw_data; 1356 uint32_t fw_size; 1357 const struct gfx_firmware_header_v1_0 *cp_hdr; 1358 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1359 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1360 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1361 uint16_t version_major, version_minor; 1362 1363 if (adev->gfx.rs64_enable) { 1364 /* pfp ucode */ 1365 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1366 adev->gfx.pfp_fw->data; 1367 /* instruction */ 1368 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1369 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1370 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1371 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1372 fw_data, fw_size, fw_autoload_mask); 1373 /* data */ 1374 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1375 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1376 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1377 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1378 fw_data, fw_size, fw_autoload_mask); 1379 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1380 fw_data, fw_size, fw_autoload_mask); 1381 /* me ucode */ 1382 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1383 adev->gfx.me_fw->data; 1384 /* instruction */ 1385 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1386 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1387 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1388 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1389 fw_data, fw_size, fw_autoload_mask); 1390 /* data */ 1391 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1392 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1393 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1394 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1395 fw_data, fw_size, fw_autoload_mask); 1396 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1397 fw_data, fw_size, fw_autoload_mask); 1398 /* mec ucode */ 1399 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1400 adev->gfx.mec_fw->data; 1401 /* instruction */ 1402 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1403 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1404 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1405 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1406 fw_data, fw_size, fw_autoload_mask); 1407 /* data */ 1408 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1409 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1410 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1411 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1412 fw_data, fw_size, fw_autoload_mask); 1413 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1414 fw_data, fw_size, fw_autoload_mask); 1415 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1416 fw_data, fw_size, fw_autoload_mask); 1417 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1418 fw_data, fw_size, fw_autoload_mask); 1419 } else { 1420 /* pfp ucode */ 1421 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1422 adev->gfx.pfp_fw->data; 1423 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1424 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1425 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1426 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1427 fw_data, fw_size, fw_autoload_mask); 1428 1429 /* me ucode */ 1430 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1431 adev->gfx.me_fw->data; 1432 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1433 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1434 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1435 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1436 fw_data, fw_size, fw_autoload_mask); 1437 1438 /* mec ucode */ 1439 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1440 adev->gfx.mec_fw->data; 1441 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1442 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1443 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1444 cp_hdr->jt_size * 4; 1445 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1446 fw_data, fw_size, fw_autoload_mask); 1447 } 1448 1449 /* rlc ucode */ 1450 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1451 adev->gfx.rlc_fw->data; 1452 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1453 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1454 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1455 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1456 fw_data, fw_size, fw_autoload_mask); 1457 1458 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1459 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1460 if (version_major == 2) { 1461 if (version_minor >= 2) { 1462 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1463 1464 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1465 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1466 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1467 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1468 fw_data, fw_size, fw_autoload_mask); 1469 1470 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1471 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1472 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1473 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1474 fw_data, fw_size, fw_autoload_mask); 1475 } 1476 } 1477 } 1478 1479 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1480 uint32_t *fw_autoload_mask) 1481 { 1482 const __le32 *fw_data; 1483 uint32_t fw_size; 1484 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1485 1486 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1487 adev->sdma.instance[0].fw->data; 1488 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1489 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1490 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1491 1492 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1493 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1494 1495 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1496 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1497 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1498 1499 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1500 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1501 } 1502 1503 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1504 uint32_t *fw_autoload_mask) 1505 { 1506 const __le32 *fw_data; 1507 unsigned fw_size; 1508 const struct mes_firmware_header_v1_0 *mes_hdr; 1509 int pipe, ucode_id, data_id; 1510 1511 for (pipe = 0; pipe < 2; pipe++) { 1512 if (pipe==0) { 1513 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1514 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1515 } else { 1516 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1517 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1518 } 1519 1520 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1521 adev->mes.fw[pipe]->data; 1522 1523 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1524 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1525 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1526 1527 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1528 ucode_id, fw_data, fw_size, fw_autoload_mask); 1529 1530 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1531 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1532 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1533 1534 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1535 data_id, fw_data, fw_size, fw_autoload_mask); 1536 } 1537 } 1538 1539 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1540 { 1541 uint32_t rlc_g_offset, rlc_g_size; 1542 uint64_t gpu_addr; 1543 uint32_t autoload_fw_id[2]; 1544 1545 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1546 1547 /* RLC autoload sequence 2: copy ucode */ 1548 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1549 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1550 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1551 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1552 1553 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1554 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1555 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1556 1557 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1558 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1559 1560 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1561 1562 /* RLC autoload sequence 3: load IMU fw */ 1563 if (adev->gfx.imu.funcs->load_microcode) 1564 adev->gfx.imu.funcs->load_microcode(adev); 1565 /* RLC autoload sequence 4 init IMU fw */ 1566 if (adev->gfx.imu.funcs->setup_imu) 1567 adev->gfx.imu.funcs->setup_imu(adev); 1568 if (adev->gfx.imu.funcs->start_imu) 1569 adev->gfx.imu.funcs->start_imu(adev); 1570 1571 /* RLC autoload sequence 5 disable gpa mode */ 1572 gfx_v11_0_disable_gpa_mode(adev); 1573 1574 return 0; 1575 } 1576 1577 static int gfx_v11_0_sw_init(void *handle) 1578 { 1579 int i, j, k, r, ring_id = 0; 1580 struct amdgpu_kiq *kiq; 1581 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1582 1583 adev->gfxhub.funcs->init(adev); 1584 1585 switch (adev->ip_versions[GC_HWIP][0]) { 1586 case IP_VERSION(11, 0, 0): 1587 case IP_VERSION(11, 0, 1): 1588 case IP_VERSION(11, 0, 2): 1589 adev->gfx.me.num_me = 1; 1590 adev->gfx.me.num_pipe_per_me = 1; 1591 adev->gfx.me.num_queue_per_pipe = 1; 1592 adev->gfx.mec.num_mec = 2; 1593 adev->gfx.mec.num_pipe_per_mec = 4; 1594 adev->gfx.mec.num_queue_per_pipe = 4; 1595 break; 1596 default: 1597 adev->gfx.me.num_me = 1; 1598 adev->gfx.me.num_pipe_per_me = 1; 1599 adev->gfx.me.num_queue_per_pipe = 1; 1600 adev->gfx.mec.num_mec = 1; 1601 adev->gfx.mec.num_pipe_per_mec = 4; 1602 adev->gfx.mec.num_queue_per_pipe = 8; 1603 break; 1604 } 1605 1606 /* EOP Event */ 1607 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1608 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1609 &adev->gfx.eop_irq); 1610 if (r) 1611 return r; 1612 1613 /* Privileged reg */ 1614 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1615 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1616 &adev->gfx.priv_reg_irq); 1617 if (r) 1618 return r; 1619 1620 /* Privileged inst */ 1621 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1622 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1623 &adev->gfx.priv_inst_irq); 1624 if (r) 1625 return r; 1626 1627 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1628 1629 if (adev->gfx.imu.funcs) { 1630 if (adev->gfx.imu.funcs->init_microcode) { 1631 r = adev->gfx.imu.funcs->init_microcode(adev); 1632 if (r) 1633 DRM_ERROR("Failed to load imu firmware!\n"); 1634 } 1635 } 1636 1637 r = gfx_v11_0_me_init(adev); 1638 if (r) 1639 return r; 1640 1641 r = gfx_v11_0_rlc_init(adev); 1642 if (r) { 1643 DRM_ERROR("Failed to init rlc BOs!\n"); 1644 return r; 1645 } 1646 1647 r = gfx_v11_0_mec_init(adev); 1648 if (r) { 1649 DRM_ERROR("Failed to init MEC BOs!\n"); 1650 return r; 1651 } 1652 1653 /* set up the gfx ring */ 1654 for (i = 0; i < adev->gfx.me.num_me; i++) { 1655 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1656 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1657 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1658 continue; 1659 1660 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1661 i, k, j); 1662 if (r) 1663 return r; 1664 ring_id++; 1665 } 1666 } 1667 } 1668 1669 ring_id = 0; 1670 /* set up the compute queues - allocate horizontally across pipes */ 1671 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1672 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1673 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1674 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, 1675 j)) 1676 continue; 1677 1678 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1679 i, k, j); 1680 if (r) 1681 return r; 1682 1683 ring_id++; 1684 } 1685 } 1686 } 1687 1688 if (!adev->enable_mes_kiq) { 1689 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); 1690 if (r) { 1691 DRM_ERROR("Failed to init KIQ BOs!\n"); 1692 return r; 1693 } 1694 1695 kiq = &adev->gfx.kiq; 1696 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1697 if (r) 1698 return r; 1699 } 1700 1701 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); 1702 if (r) 1703 return r; 1704 1705 /* allocate visible FB for rlc auto-loading fw */ 1706 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1707 r = gfx_v11_0_init_toc_microcode(adev); 1708 if (r) 1709 dev_err(adev->dev, "Failed to load toc firmware!\n"); 1710 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1711 if (r) 1712 return r; 1713 } 1714 1715 r = gfx_v11_0_gpu_early_init(adev); 1716 if (r) 1717 return r; 1718 1719 return 0; 1720 } 1721 1722 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1723 { 1724 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1725 &adev->gfx.pfp.pfp_fw_gpu_addr, 1726 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1727 1728 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1729 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1730 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1731 } 1732 1733 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1734 { 1735 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1736 &adev->gfx.me.me_fw_gpu_addr, 1737 (void **)&adev->gfx.me.me_fw_ptr); 1738 1739 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1740 &adev->gfx.me.me_fw_data_gpu_addr, 1741 (void **)&adev->gfx.me.me_fw_data_ptr); 1742 } 1743 1744 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1745 { 1746 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1747 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1748 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1749 } 1750 1751 static int gfx_v11_0_sw_fini(void *handle) 1752 { 1753 int i; 1754 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1755 1756 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1757 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1758 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1759 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1760 1761 amdgpu_gfx_mqd_sw_fini(adev); 1762 1763 if (!adev->enable_mes_kiq) { 1764 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); 1765 amdgpu_gfx_kiq_fini(adev); 1766 } 1767 1768 gfx_v11_0_pfp_fini(adev); 1769 gfx_v11_0_me_fini(adev); 1770 gfx_v11_0_rlc_fini(adev); 1771 gfx_v11_0_mec_fini(adev); 1772 1773 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1774 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1775 1776 gfx_v11_0_free_microcode(adev); 1777 1778 return 0; 1779 } 1780 1781 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1782 u32 sh_num, u32 instance) 1783 { 1784 u32 data; 1785 1786 if (instance == 0xffffffff) 1787 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1788 INSTANCE_BROADCAST_WRITES, 1); 1789 else 1790 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1791 instance); 1792 1793 if (se_num == 0xffffffff) 1794 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1795 1); 1796 else 1797 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1798 1799 if (sh_num == 0xffffffff) 1800 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1801 1); 1802 else 1803 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1804 1805 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1806 } 1807 1808 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1809 { 1810 u32 data, mask; 1811 1812 data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1813 data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1814 1815 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1816 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1817 1818 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1819 adev->gfx.config.max_sh_per_se); 1820 1821 return (~data) & mask; 1822 } 1823 1824 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1825 { 1826 int i, j; 1827 u32 data; 1828 u32 active_rbs = 0; 1829 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1830 adev->gfx.config.max_sh_per_se; 1831 1832 mutex_lock(&adev->grbm_idx_mutex); 1833 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1834 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1835 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); 1836 data = gfx_v11_0_get_rb_active_bitmap(adev); 1837 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1838 rb_bitmap_width_per_sh); 1839 } 1840 } 1841 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1842 mutex_unlock(&adev->grbm_idx_mutex); 1843 1844 adev->gfx.config.backend_enable_mask = active_rbs; 1845 adev->gfx.config.num_rbs = hweight32(active_rbs); 1846 } 1847 1848 #define DEFAULT_SH_MEM_BASES (0x6000) 1849 #define LDS_APP_BASE 0x1 1850 #define SCRATCH_APP_BASE 0x2 1851 1852 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 1853 { 1854 int i; 1855 uint32_t sh_mem_bases; 1856 uint32_t data; 1857 1858 /* 1859 * Configure apertures: 1860 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1861 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1862 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1863 */ 1864 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1865 SCRATCH_APP_BASE; 1866 1867 mutex_lock(&adev->srbm_mutex); 1868 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1869 soc21_grbm_select(adev, 0, 0, 0, i); 1870 /* CP and shaders */ 1871 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1872 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1873 1874 /* Enable trap for each kfd vmid. */ 1875 data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); 1876 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1877 } 1878 soc21_grbm_select(adev, 0, 0, 0, 0); 1879 mutex_unlock(&adev->srbm_mutex); 1880 1881 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1882 acccess. These should be enabled by FW for target VMIDs. */ 1883 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1884 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 1885 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 1886 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 1887 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 1888 } 1889 } 1890 1891 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 1892 { 1893 int vmid; 1894 1895 /* 1896 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1897 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1898 * the driver can enable them for graphics. VMID0 should maintain 1899 * access so that HWS firmware can save/restore entries. 1900 */ 1901 for (vmid = 1; vmid < 16; vmid++) { 1902 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 1903 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 1904 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 1905 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 1906 } 1907 } 1908 1909 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 1910 { 1911 /* TODO: harvest feature to be added later. */ 1912 } 1913 1914 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 1915 { 1916 /* TCCs are global (not instanced). */ 1917 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 1918 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 1919 1920 adev->gfx.config.tcc_disabled_mask = 1921 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 1922 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 1923 } 1924 1925 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 1926 { 1927 u32 tmp; 1928 int i; 1929 1930 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1931 1932 gfx_v11_0_setup_rb(adev); 1933 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 1934 gfx_v11_0_get_tcc_info(adev); 1935 adev->gfx.config.pa_sc_tile_steering_override = 0; 1936 1937 /* XXX SH_MEM regs */ 1938 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1939 mutex_lock(&adev->srbm_mutex); 1940 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { 1941 soc21_grbm_select(adev, 0, 0, 0, i); 1942 /* CP and shaders */ 1943 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1944 if (i != 0) { 1945 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1946 (adev->gmc.private_aperture_start >> 48)); 1947 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1948 (adev->gmc.shared_aperture_start >> 48)); 1949 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1950 } 1951 } 1952 soc21_grbm_select(adev, 0, 0, 0, 0); 1953 1954 mutex_unlock(&adev->srbm_mutex); 1955 1956 gfx_v11_0_init_compute_vmid(adev); 1957 gfx_v11_0_init_gds_vmid(adev); 1958 } 1959 1960 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1961 bool enable) 1962 { 1963 u32 tmp; 1964 1965 if (amdgpu_sriov_vf(adev)) 1966 return; 1967 1968 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); 1969 1970 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1971 enable ? 1 : 0); 1972 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1973 enable ? 1 : 0); 1974 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1975 enable ? 1 : 0); 1976 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1977 enable ? 1 : 0); 1978 1979 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); 1980 } 1981 1982 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 1983 { 1984 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1985 1986 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1987 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1988 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1989 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1990 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1991 1992 return 0; 1993 } 1994 1995 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 1996 { 1997 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1998 1999 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 2000 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 2001 } 2002 2003 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2004 { 2005 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2006 udelay(50); 2007 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2008 udelay(50); 2009 } 2010 2011 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2012 bool enable) 2013 { 2014 uint32_t rlc_pg_cntl; 2015 2016 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2017 2018 if (!enable) { 2019 /* RLC_PG_CNTL[23] = 0 (default) 2020 * RLC will wait for handshake acks with SMU 2021 * GFXOFF will be enabled 2022 * RLC_PG_CNTL[23] = 1 2023 * RLC will not issue any message to SMU 2024 * hence no handshake between SMU & RLC 2025 * GFXOFF will be disabled 2026 */ 2027 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2028 } else 2029 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2030 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2031 } 2032 2033 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2034 { 2035 /* TODO: enable rlc & smu handshake until smu 2036 * and gfxoff feature works as expected */ 2037 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2038 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2039 2040 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2041 udelay(50); 2042 } 2043 2044 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2045 { 2046 uint32_t tmp; 2047 2048 /* enable Save Restore Machine */ 2049 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2050 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2051 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2052 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2053 } 2054 2055 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2056 { 2057 const struct rlc_firmware_header_v2_0 *hdr; 2058 const __le32 *fw_data; 2059 unsigned i, fw_size; 2060 2061 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2062 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2063 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2064 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2065 2066 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2067 RLCG_UCODE_LOADING_START_ADDRESS); 2068 2069 for (i = 0; i < fw_size; i++) 2070 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2071 le32_to_cpup(fw_data++)); 2072 2073 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2074 } 2075 2076 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2077 { 2078 const struct rlc_firmware_header_v2_2 *hdr; 2079 const __le32 *fw_data; 2080 unsigned i, fw_size; 2081 u32 tmp; 2082 2083 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2084 2085 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2086 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2087 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2088 2089 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2090 2091 for (i = 0; i < fw_size; i++) { 2092 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2093 msleep(1); 2094 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2095 le32_to_cpup(fw_data++)); 2096 } 2097 2098 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2099 2100 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2101 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2102 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2103 2104 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2105 for (i = 0; i < fw_size; i++) { 2106 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2107 msleep(1); 2108 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2109 le32_to_cpup(fw_data++)); 2110 } 2111 2112 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2113 2114 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2115 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2116 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2117 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2118 } 2119 2120 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2121 { 2122 const struct rlc_firmware_header_v2_3 *hdr; 2123 const __le32 *fw_data; 2124 unsigned i, fw_size; 2125 u32 tmp; 2126 2127 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2128 2129 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2130 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2131 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2132 2133 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2134 2135 for (i = 0; i < fw_size; i++) { 2136 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2137 msleep(1); 2138 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2139 le32_to_cpup(fw_data++)); 2140 } 2141 2142 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2143 2144 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2145 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2146 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2147 2148 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2149 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2150 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2151 2152 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2153 2154 for (i = 0; i < fw_size; i++) { 2155 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2156 msleep(1); 2157 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2158 le32_to_cpup(fw_data++)); 2159 } 2160 2161 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2162 2163 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2164 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2165 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2166 } 2167 2168 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2169 { 2170 const struct rlc_firmware_header_v2_0 *hdr; 2171 uint16_t version_major; 2172 uint16_t version_minor; 2173 2174 if (!adev->gfx.rlc_fw) 2175 return -EINVAL; 2176 2177 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2178 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2179 2180 version_major = le16_to_cpu(hdr->header.header_version_major); 2181 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2182 2183 if (version_major == 2) { 2184 gfx_v11_0_load_rlcg_microcode(adev); 2185 if (amdgpu_dpm == 1) { 2186 if (version_minor >= 2) 2187 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2188 if (version_minor == 3) 2189 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2190 } 2191 2192 return 0; 2193 } 2194 2195 return -EINVAL; 2196 } 2197 2198 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2199 { 2200 int r; 2201 2202 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2203 gfx_v11_0_init_csb(adev); 2204 2205 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2206 gfx_v11_0_rlc_enable_srm(adev); 2207 } else { 2208 if (amdgpu_sriov_vf(adev)) { 2209 gfx_v11_0_init_csb(adev); 2210 return 0; 2211 } 2212 2213 adev->gfx.rlc.funcs->stop(adev); 2214 2215 /* disable CG */ 2216 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2217 2218 /* disable PG */ 2219 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2220 2221 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2222 /* legacy rlc firmware loading */ 2223 r = gfx_v11_0_rlc_load_microcode(adev); 2224 if (r) 2225 return r; 2226 } 2227 2228 gfx_v11_0_init_csb(adev); 2229 2230 adev->gfx.rlc.funcs->start(adev); 2231 } 2232 return 0; 2233 } 2234 2235 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2236 { 2237 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2238 uint32_t tmp; 2239 int i; 2240 2241 /* Trigger an invalidation of the L1 instruction caches */ 2242 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2243 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2244 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2245 2246 /* Wait for invalidation complete */ 2247 for (i = 0; i < usec_timeout; i++) { 2248 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2249 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2250 INVALIDATE_CACHE_COMPLETE)) 2251 break; 2252 udelay(1); 2253 } 2254 2255 if (i >= usec_timeout) { 2256 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2257 return -EINVAL; 2258 } 2259 2260 if (amdgpu_emu_mode == 1) 2261 adev->hdp.funcs->flush_hdp(adev, NULL); 2262 2263 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2264 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2265 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2266 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2267 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2268 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2269 2270 /* Program me ucode address into intruction cache address register */ 2271 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2272 lower_32_bits(addr) & 0xFFFFF000); 2273 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2274 upper_32_bits(addr)); 2275 2276 return 0; 2277 } 2278 2279 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2280 { 2281 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2282 uint32_t tmp; 2283 int i; 2284 2285 /* Trigger an invalidation of the L1 instruction caches */ 2286 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2287 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2288 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2289 2290 /* Wait for invalidation complete */ 2291 for (i = 0; i < usec_timeout; i++) { 2292 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2293 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2294 INVALIDATE_CACHE_COMPLETE)) 2295 break; 2296 udelay(1); 2297 } 2298 2299 if (i >= usec_timeout) { 2300 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2301 return -EINVAL; 2302 } 2303 2304 if (amdgpu_emu_mode == 1) 2305 adev->hdp.funcs->flush_hdp(adev, NULL); 2306 2307 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2308 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2309 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2310 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2311 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2312 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2313 2314 /* Program pfp ucode address into intruction cache address register */ 2315 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2316 lower_32_bits(addr) & 0xFFFFF000); 2317 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2318 upper_32_bits(addr)); 2319 2320 return 0; 2321 } 2322 2323 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2324 { 2325 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2326 uint32_t tmp; 2327 int i; 2328 2329 /* Trigger an invalidation of the L1 instruction caches */ 2330 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2331 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2332 2333 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2334 2335 /* Wait for invalidation complete */ 2336 for (i = 0; i < usec_timeout; i++) { 2337 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2338 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2339 INVALIDATE_CACHE_COMPLETE)) 2340 break; 2341 udelay(1); 2342 } 2343 2344 if (i >= usec_timeout) { 2345 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2346 return -EINVAL; 2347 } 2348 2349 if (amdgpu_emu_mode == 1) 2350 adev->hdp.funcs->flush_hdp(adev, NULL); 2351 2352 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2353 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2354 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2355 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2356 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2357 2358 /* Program mec1 ucode address into intruction cache address register */ 2359 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2360 lower_32_bits(addr) & 0xFFFFF000); 2361 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2362 upper_32_bits(addr)); 2363 2364 return 0; 2365 } 2366 2367 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2368 { 2369 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2370 uint32_t tmp; 2371 unsigned i, pipe_id; 2372 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2373 2374 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2375 adev->gfx.pfp_fw->data; 2376 2377 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2378 lower_32_bits(addr)); 2379 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2380 upper_32_bits(addr)); 2381 2382 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2383 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2384 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2385 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2386 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2387 2388 /* 2389 * Programming any of the CP_PFP_IC_BASE registers 2390 * forces invalidation of the ME L1 I$. Wait for the 2391 * invalidation complete 2392 */ 2393 for (i = 0; i < usec_timeout; i++) { 2394 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2395 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2396 INVALIDATE_CACHE_COMPLETE)) 2397 break; 2398 udelay(1); 2399 } 2400 2401 if (i >= usec_timeout) { 2402 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2403 return -EINVAL; 2404 } 2405 2406 /* Prime the L1 instruction caches */ 2407 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2408 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2409 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2410 /* Waiting for cache primed*/ 2411 for (i = 0; i < usec_timeout; i++) { 2412 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2413 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2414 ICACHE_PRIMED)) 2415 break; 2416 udelay(1); 2417 } 2418 2419 if (i >= usec_timeout) { 2420 dev_err(adev->dev, "failed to prime instruction cache\n"); 2421 return -EINVAL; 2422 } 2423 2424 mutex_lock(&adev->srbm_mutex); 2425 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2426 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2427 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2428 (pfp_hdr->ucode_start_addr_hi << 30) | 2429 (pfp_hdr->ucode_start_addr_lo >> 2)); 2430 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2431 pfp_hdr->ucode_start_addr_hi >> 2); 2432 2433 /* 2434 * Program CP_ME_CNTL to reset given PIPE to take 2435 * effect of CP_PFP_PRGRM_CNTR_START. 2436 */ 2437 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2438 if (pipe_id == 0) 2439 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2440 PFP_PIPE0_RESET, 1); 2441 else 2442 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2443 PFP_PIPE1_RESET, 1); 2444 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2445 2446 /* Clear pfp pipe0 reset bit. */ 2447 if (pipe_id == 0) 2448 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2449 PFP_PIPE0_RESET, 0); 2450 else 2451 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2452 PFP_PIPE1_RESET, 0); 2453 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2454 2455 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2456 lower_32_bits(addr2)); 2457 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2458 upper_32_bits(addr2)); 2459 } 2460 soc21_grbm_select(adev, 0, 0, 0, 0); 2461 mutex_unlock(&adev->srbm_mutex); 2462 2463 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2464 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2465 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2466 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2467 2468 /* Invalidate the data caches */ 2469 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2470 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2471 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2472 2473 for (i = 0; i < usec_timeout; i++) { 2474 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2475 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2476 INVALIDATE_DCACHE_COMPLETE)) 2477 break; 2478 udelay(1); 2479 } 2480 2481 if (i >= usec_timeout) { 2482 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2483 return -EINVAL; 2484 } 2485 2486 return 0; 2487 } 2488 2489 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2490 { 2491 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2492 uint32_t tmp; 2493 unsigned i, pipe_id; 2494 const struct gfx_firmware_header_v2_0 *me_hdr; 2495 2496 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2497 adev->gfx.me_fw->data; 2498 2499 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2500 lower_32_bits(addr)); 2501 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2502 upper_32_bits(addr)); 2503 2504 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2505 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2506 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2507 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2508 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2509 2510 /* 2511 * Programming any of the CP_ME_IC_BASE registers 2512 * forces invalidation of the ME L1 I$. Wait for the 2513 * invalidation complete 2514 */ 2515 for (i = 0; i < usec_timeout; i++) { 2516 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2517 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2518 INVALIDATE_CACHE_COMPLETE)) 2519 break; 2520 udelay(1); 2521 } 2522 2523 if (i >= usec_timeout) { 2524 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2525 return -EINVAL; 2526 } 2527 2528 /* Prime the instruction caches */ 2529 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2530 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2531 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2532 2533 /* Waiting for instruction cache primed*/ 2534 for (i = 0; i < usec_timeout; i++) { 2535 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2536 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2537 ICACHE_PRIMED)) 2538 break; 2539 udelay(1); 2540 } 2541 2542 if (i >= usec_timeout) { 2543 dev_err(adev->dev, "failed to prime instruction cache\n"); 2544 return -EINVAL; 2545 } 2546 2547 mutex_lock(&adev->srbm_mutex); 2548 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2549 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2550 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2551 (me_hdr->ucode_start_addr_hi << 30) | 2552 (me_hdr->ucode_start_addr_lo >> 2) ); 2553 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2554 me_hdr->ucode_start_addr_hi>>2); 2555 2556 /* 2557 * Program CP_ME_CNTL to reset given PIPE to take 2558 * effect of CP_PFP_PRGRM_CNTR_START. 2559 */ 2560 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2561 if (pipe_id == 0) 2562 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2563 ME_PIPE0_RESET, 1); 2564 else 2565 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2566 ME_PIPE1_RESET, 1); 2567 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2568 2569 /* Clear pfp pipe0 reset bit. */ 2570 if (pipe_id == 0) 2571 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2572 ME_PIPE0_RESET, 0); 2573 else 2574 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2575 ME_PIPE1_RESET, 0); 2576 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2577 2578 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2579 lower_32_bits(addr2)); 2580 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2581 upper_32_bits(addr2)); 2582 } 2583 soc21_grbm_select(adev, 0, 0, 0, 0); 2584 mutex_unlock(&adev->srbm_mutex); 2585 2586 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2587 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2588 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2589 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2590 2591 /* Invalidate the data caches */ 2592 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2593 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2594 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2595 2596 for (i = 0; i < usec_timeout; i++) { 2597 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2598 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2599 INVALIDATE_DCACHE_COMPLETE)) 2600 break; 2601 udelay(1); 2602 } 2603 2604 if (i >= usec_timeout) { 2605 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2606 return -EINVAL; 2607 } 2608 2609 return 0; 2610 } 2611 2612 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2613 { 2614 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2615 uint32_t tmp; 2616 unsigned i; 2617 const struct gfx_firmware_header_v2_0 *mec_hdr; 2618 2619 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2620 adev->gfx.mec_fw->data; 2621 2622 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2623 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2624 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2625 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2626 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2627 2628 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2629 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2630 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2631 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2632 2633 mutex_lock(&adev->srbm_mutex); 2634 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2635 soc21_grbm_select(adev, 1, i, 0, 0); 2636 2637 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2638 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2639 upper_32_bits(addr2)); 2640 2641 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2642 mec_hdr->ucode_start_addr_lo >> 2 | 2643 mec_hdr->ucode_start_addr_hi << 30); 2644 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2645 mec_hdr->ucode_start_addr_hi >> 2); 2646 2647 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2648 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2649 upper_32_bits(addr)); 2650 } 2651 mutex_unlock(&adev->srbm_mutex); 2652 soc21_grbm_select(adev, 0, 0, 0, 0); 2653 2654 /* Trigger an invalidation of the L1 instruction caches */ 2655 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2656 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2657 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2658 2659 /* Wait for invalidation complete */ 2660 for (i = 0; i < usec_timeout; i++) { 2661 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2662 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2663 INVALIDATE_DCACHE_COMPLETE)) 2664 break; 2665 udelay(1); 2666 } 2667 2668 if (i >= usec_timeout) { 2669 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2670 return -EINVAL; 2671 } 2672 2673 /* Trigger an invalidation of the L1 instruction caches */ 2674 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2675 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2676 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2677 2678 /* Wait for invalidation complete */ 2679 for (i = 0; i < usec_timeout; i++) { 2680 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2681 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2682 INVALIDATE_CACHE_COMPLETE)) 2683 break; 2684 udelay(1); 2685 } 2686 2687 if (i >= usec_timeout) { 2688 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2689 return -EINVAL; 2690 } 2691 2692 return 0; 2693 } 2694 2695 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2696 { 2697 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2698 const struct gfx_firmware_header_v2_0 *me_hdr; 2699 const struct gfx_firmware_header_v2_0 *mec_hdr; 2700 uint32_t pipe_id, tmp; 2701 2702 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2703 adev->gfx.mec_fw->data; 2704 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2705 adev->gfx.me_fw->data; 2706 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2707 adev->gfx.pfp_fw->data; 2708 2709 /* config pfp program start addr */ 2710 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2711 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2712 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2713 (pfp_hdr->ucode_start_addr_hi << 30) | 2714 (pfp_hdr->ucode_start_addr_lo >> 2)); 2715 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2716 pfp_hdr->ucode_start_addr_hi >> 2); 2717 } 2718 soc21_grbm_select(adev, 0, 0, 0, 0); 2719 2720 /* reset pfp pipe */ 2721 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2722 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2723 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2724 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2725 2726 /* clear pfp pipe reset */ 2727 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2728 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2729 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2730 2731 /* config me program start addr */ 2732 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2733 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2734 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2735 (me_hdr->ucode_start_addr_hi << 30) | 2736 (me_hdr->ucode_start_addr_lo >> 2) ); 2737 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2738 me_hdr->ucode_start_addr_hi>>2); 2739 } 2740 soc21_grbm_select(adev, 0, 0, 0, 0); 2741 2742 /* reset me pipe */ 2743 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2744 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2745 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2746 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2747 2748 /* clear me pipe reset */ 2749 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2750 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2751 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2752 2753 /* config mec program start addr */ 2754 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2755 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2756 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2757 mec_hdr->ucode_start_addr_lo >> 2 | 2758 mec_hdr->ucode_start_addr_hi << 30); 2759 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2760 mec_hdr->ucode_start_addr_hi >> 2); 2761 } 2762 soc21_grbm_select(adev, 0, 0, 0, 0); 2763 } 2764 2765 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2766 { 2767 uint32_t cp_status; 2768 uint32_t bootload_status; 2769 int i, r; 2770 uint64_t addr, addr2; 2771 2772 for (i = 0; i < adev->usec_timeout; i++) { 2773 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2774 2775 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1)) 2776 bootload_status = RREG32_SOC15(GC, 0, 2777 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2778 else 2779 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2780 2781 if ((cp_status == 0) && 2782 (REG_GET_FIELD(bootload_status, 2783 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2784 break; 2785 } 2786 udelay(1); 2787 } 2788 2789 if (i >= adev->usec_timeout) { 2790 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2791 return -ETIMEDOUT; 2792 } 2793 2794 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2795 if (adev->gfx.rs64_enable) { 2796 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2797 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 2798 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2799 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 2800 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 2801 if (r) 2802 return r; 2803 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2804 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 2805 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2806 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 2807 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 2808 if (r) 2809 return r; 2810 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2811 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 2812 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2813 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 2814 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 2815 if (r) 2816 return r; 2817 } else { 2818 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2819 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 2820 r = gfx_v11_0_config_me_cache(adev, addr); 2821 if (r) 2822 return r; 2823 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2824 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 2825 r = gfx_v11_0_config_pfp_cache(adev, addr); 2826 if (r) 2827 return r; 2828 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2829 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 2830 r = gfx_v11_0_config_mec_cache(adev, addr); 2831 if (r) 2832 return r; 2833 } 2834 } 2835 2836 return 0; 2837 } 2838 2839 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2840 { 2841 int i; 2842 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2843 2844 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2845 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2846 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2847 2848 for (i = 0; i < adev->usec_timeout; i++) { 2849 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2850 break; 2851 udelay(1); 2852 } 2853 2854 if (i >= adev->usec_timeout) 2855 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2856 2857 return 0; 2858 } 2859 2860 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 2861 { 2862 int r; 2863 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2864 const __le32 *fw_data; 2865 unsigned i, fw_size; 2866 2867 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2868 adev->gfx.pfp_fw->data; 2869 2870 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2871 2872 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2873 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2874 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 2875 2876 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 2877 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2878 &adev->gfx.pfp.pfp_fw_obj, 2879 &adev->gfx.pfp.pfp_fw_gpu_addr, 2880 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2881 if (r) { 2882 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 2883 gfx_v11_0_pfp_fini(adev); 2884 return r; 2885 } 2886 2887 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 2888 2889 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2890 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2891 2892 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 2893 2894 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 2895 2896 for (i = 0; i < pfp_hdr->jt_size; i++) 2897 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 2898 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 2899 2900 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2901 2902 return 0; 2903 } 2904 2905 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2906 { 2907 int r; 2908 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2909 const __le32 *fw_ucode, *fw_data; 2910 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2911 uint32_t tmp; 2912 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2913 2914 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2915 adev->gfx.pfp_fw->data; 2916 2917 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2918 2919 /* instruction */ 2920 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2921 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2922 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2923 /* data */ 2924 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2925 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2926 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2927 2928 /* 64kb align */ 2929 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2930 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2931 &adev->gfx.pfp.pfp_fw_obj, 2932 &adev->gfx.pfp.pfp_fw_gpu_addr, 2933 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2934 if (r) { 2935 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2936 gfx_v11_0_pfp_fini(adev); 2937 return r; 2938 } 2939 2940 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2941 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2942 &adev->gfx.pfp.pfp_fw_data_obj, 2943 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2944 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2945 if (r) { 2946 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2947 gfx_v11_0_pfp_fini(adev); 2948 return r; 2949 } 2950 2951 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2952 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2953 2954 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2955 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2956 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2957 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2958 2959 if (amdgpu_emu_mode == 1) 2960 adev->hdp.funcs->flush_hdp(adev, NULL); 2961 2962 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2963 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2964 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2965 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2966 2967 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2968 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2969 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2970 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2971 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2972 2973 /* 2974 * Programming any of the CP_PFP_IC_BASE registers 2975 * forces invalidation of the ME L1 I$. Wait for the 2976 * invalidation complete 2977 */ 2978 for (i = 0; i < usec_timeout; i++) { 2979 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2980 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2981 INVALIDATE_CACHE_COMPLETE)) 2982 break; 2983 udelay(1); 2984 } 2985 2986 if (i >= usec_timeout) { 2987 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2988 return -EINVAL; 2989 } 2990 2991 /* Prime the L1 instruction caches */ 2992 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2993 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2994 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2995 /* Waiting for cache primed*/ 2996 for (i = 0; i < usec_timeout; i++) { 2997 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2998 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2999 ICACHE_PRIMED)) 3000 break; 3001 udelay(1); 3002 } 3003 3004 if (i >= usec_timeout) { 3005 dev_err(adev->dev, "failed to prime instruction cache\n"); 3006 return -EINVAL; 3007 } 3008 3009 mutex_lock(&adev->srbm_mutex); 3010 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3011 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3012 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3013 (pfp_hdr->ucode_start_addr_hi << 30) | 3014 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3015 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3016 pfp_hdr->ucode_start_addr_hi>>2); 3017 3018 /* 3019 * Program CP_ME_CNTL to reset given PIPE to take 3020 * effect of CP_PFP_PRGRM_CNTR_START. 3021 */ 3022 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3023 if (pipe_id == 0) 3024 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3025 PFP_PIPE0_RESET, 1); 3026 else 3027 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3028 PFP_PIPE1_RESET, 1); 3029 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3030 3031 /* Clear pfp pipe0 reset bit. */ 3032 if (pipe_id == 0) 3033 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3034 PFP_PIPE0_RESET, 0); 3035 else 3036 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3037 PFP_PIPE1_RESET, 0); 3038 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3039 3040 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3041 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3042 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3043 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3044 } 3045 soc21_grbm_select(adev, 0, 0, 0, 0); 3046 mutex_unlock(&adev->srbm_mutex); 3047 3048 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3049 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3050 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3051 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3052 3053 /* Invalidate the data caches */ 3054 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3055 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3056 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3057 3058 for (i = 0; i < usec_timeout; i++) { 3059 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3060 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3061 INVALIDATE_DCACHE_COMPLETE)) 3062 break; 3063 udelay(1); 3064 } 3065 3066 if (i >= usec_timeout) { 3067 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3068 return -EINVAL; 3069 } 3070 3071 return 0; 3072 } 3073 3074 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3075 { 3076 int r; 3077 const struct gfx_firmware_header_v1_0 *me_hdr; 3078 const __le32 *fw_data; 3079 unsigned i, fw_size; 3080 3081 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3082 adev->gfx.me_fw->data; 3083 3084 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3085 3086 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3087 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3088 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3089 3090 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3091 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3092 &adev->gfx.me.me_fw_obj, 3093 &adev->gfx.me.me_fw_gpu_addr, 3094 (void **)&adev->gfx.me.me_fw_ptr); 3095 if (r) { 3096 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3097 gfx_v11_0_me_fini(adev); 3098 return r; 3099 } 3100 3101 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3102 3103 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3104 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3105 3106 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3107 3108 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3109 3110 for (i = 0; i < me_hdr->jt_size; i++) 3111 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3112 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3113 3114 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3115 3116 return 0; 3117 } 3118 3119 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3120 { 3121 int r; 3122 const struct gfx_firmware_header_v2_0 *me_hdr; 3123 const __le32 *fw_ucode, *fw_data; 3124 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3125 uint32_t tmp; 3126 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3127 3128 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3129 adev->gfx.me_fw->data; 3130 3131 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3132 3133 /* instruction */ 3134 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3135 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3136 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3137 /* data */ 3138 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3139 le32_to_cpu(me_hdr->data_offset_bytes)); 3140 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3141 3142 /* 64kb align*/ 3143 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3144 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3145 &adev->gfx.me.me_fw_obj, 3146 &adev->gfx.me.me_fw_gpu_addr, 3147 (void **)&adev->gfx.me.me_fw_ptr); 3148 if (r) { 3149 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3150 gfx_v11_0_me_fini(adev); 3151 return r; 3152 } 3153 3154 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3155 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3156 &adev->gfx.me.me_fw_data_obj, 3157 &adev->gfx.me.me_fw_data_gpu_addr, 3158 (void **)&adev->gfx.me.me_fw_data_ptr); 3159 if (r) { 3160 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3161 gfx_v11_0_pfp_fini(adev); 3162 return r; 3163 } 3164 3165 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3166 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3167 3168 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3169 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3170 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3171 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3172 3173 if (amdgpu_emu_mode == 1) 3174 adev->hdp.funcs->flush_hdp(adev, NULL); 3175 3176 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3177 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3178 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3179 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3180 3181 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3182 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3183 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3184 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3185 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3186 3187 /* 3188 * Programming any of the CP_ME_IC_BASE registers 3189 * forces invalidation of the ME L1 I$. Wait for the 3190 * invalidation complete 3191 */ 3192 for (i = 0; i < usec_timeout; i++) { 3193 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3194 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3195 INVALIDATE_CACHE_COMPLETE)) 3196 break; 3197 udelay(1); 3198 } 3199 3200 if (i >= usec_timeout) { 3201 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3202 return -EINVAL; 3203 } 3204 3205 /* Prime the instruction caches */ 3206 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3207 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3208 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3209 3210 /* Waiting for instruction cache primed*/ 3211 for (i = 0; i < usec_timeout; i++) { 3212 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3213 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3214 ICACHE_PRIMED)) 3215 break; 3216 udelay(1); 3217 } 3218 3219 if (i >= usec_timeout) { 3220 dev_err(adev->dev, "failed to prime instruction cache\n"); 3221 return -EINVAL; 3222 } 3223 3224 mutex_lock(&adev->srbm_mutex); 3225 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3226 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3227 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3228 (me_hdr->ucode_start_addr_hi << 30) | 3229 (me_hdr->ucode_start_addr_lo >> 2) ); 3230 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3231 me_hdr->ucode_start_addr_hi>>2); 3232 3233 /* 3234 * Program CP_ME_CNTL to reset given PIPE to take 3235 * effect of CP_PFP_PRGRM_CNTR_START. 3236 */ 3237 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3238 if (pipe_id == 0) 3239 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3240 ME_PIPE0_RESET, 1); 3241 else 3242 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3243 ME_PIPE1_RESET, 1); 3244 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3245 3246 /* Clear pfp pipe0 reset bit. */ 3247 if (pipe_id == 0) 3248 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3249 ME_PIPE0_RESET, 0); 3250 else 3251 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3252 ME_PIPE1_RESET, 0); 3253 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3254 3255 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3256 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3257 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3258 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3259 } 3260 soc21_grbm_select(adev, 0, 0, 0, 0); 3261 mutex_unlock(&adev->srbm_mutex); 3262 3263 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3264 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3265 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3266 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3267 3268 /* Invalidate the data caches */ 3269 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3270 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3271 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3272 3273 for (i = 0; i < usec_timeout; i++) { 3274 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3275 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3276 INVALIDATE_DCACHE_COMPLETE)) 3277 break; 3278 udelay(1); 3279 } 3280 3281 if (i >= usec_timeout) { 3282 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3283 return -EINVAL; 3284 } 3285 3286 return 0; 3287 } 3288 3289 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3290 { 3291 int r; 3292 3293 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3294 return -EINVAL; 3295 3296 gfx_v11_0_cp_gfx_enable(adev, false); 3297 3298 if (adev->gfx.rs64_enable) 3299 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3300 else 3301 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3302 if (r) { 3303 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3304 return r; 3305 } 3306 3307 if (adev->gfx.rs64_enable) 3308 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3309 else 3310 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3311 if (r) { 3312 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3313 return r; 3314 } 3315 3316 return 0; 3317 } 3318 3319 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3320 { 3321 struct amdgpu_ring *ring; 3322 const struct cs_section_def *sect = NULL; 3323 const struct cs_extent_def *ext = NULL; 3324 int r, i; 3325 int ctx_reg_offset; 3326 3327 /* init the CP */ 3328 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3329 adev->gfx.config.max_hw_contexts - 1); 3330 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3331 3332 if (!amdgpu_async_gfx_ring) 3333 gfx_v11_0_cp_gfx_enable(adev, true); 3334 3335 ring = &adev->gfx.gfx_ring[0]; 3336 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3337 if (r) { 3338 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3339 return r; 3340 } 3341 3342 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3343 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3344 3345 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3346 amdgpu_ring_write(ring, 0x80000000); 3347 amdgpu_ring_write(ring, 0x80000000); 3348 3349 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3350 for (ext = sect->section; ext->extent != NULL; ++ext) { 3351 if (sect->id == SECT_CONTEXT) { 3352 amdgpu_ring_write(ring, 3353 PACKET3(PACKET3_SET_CONTEXT_REG, 3354 ext->reg_count)); 3355 amdgpu_ring_write(ring, ext->reg_index - 3356 PACKET3_SET_CONTEXT_REG_START); 3357 for (i = 0; i < ext->reg_count; i++) 3358 amdgpu_ring_write(ring, ext->extent[i]); 3359 } 3360 } 3361 } 3362 3363 ctx_reg_offset = 3364 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3365 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3366 amdgpu_ring_write(ring, ctx_reg_offset); 3367 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3368 3369 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3370 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3371 3372 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3373 amdgpu_ring_write(ring, 0); 3374 3375 amdgpu_ring_commit(ring); 3376 3377 /* submit cs packet to copy state 0 to next available state */ 3378 if (adev->gfx.num_gfx_rings > 1) { 3379 /* maximum supported gfx ring is 2 */ 3380 ring = &adev->gfx.gfx_ring[1]; 3381 r = amdgpu_ring_alloc(ring, 2); 3382 if (r) { 3383 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3384 return r; 3385 } 3386 3387 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3388 amdgpu_ring_write(ring, 0); 3389 3390 amdgpu_ring_commit(ring); 3391 } 3392 return 0; 3393 } 3394 3395 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3396 CP_PIPE_ID pipe) 3397 { 3398 u32 tmp; 3399 3400 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3401 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3402 3403 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3404 } 3405 3406 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3407 struct amdgpu_ring *ring) 3408 { 3409 u32 tmp; 3410 3411 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3412 if (ring->use_doorbell) { 3413 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3414 DOORBELL_OFFSET, ring->doorbell_index); 3415 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3416 DOORBELL_EN, 1); 3417 } else { 3418 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3419 DOORBELL_EN, 0); 3420 } 3421 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3422 3423 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3424 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3425 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3426 3427 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3428 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3429 } 3430 3431 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3432 { 3433 struct amdgpu_ring *ring; 3434 u32 tmp; 3435 u32 rb_bufsz; 3436 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3437 u32 i; 3438 3439 /* Set the write pointer delay */ 3440 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3441 3442 /* set the RB to use vmid 0 */ 3443 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3444 3445 /* Init gfx ring 0 for pipe 0 */ 3446 mutex_lock(&adev->srbm_mutex); 3447 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3448 3449 /* Set ring buffer size */ 3450 ring = &adev->gfx.gfx_ring[0]; 3451 rb_bufsz = order_base_2(ring->ring_size / 8); 3452 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3453 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3454 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3455 3456 /* Initialize the ring buffer's write pointers */ 3457 ring->wptr = 0; 3458 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3459 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3460 3461 /* set the wb address wether it's enabled or not */ 3462 rptr_addr = ring->rptr_gpu_addr; 3463 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3464 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3465 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3466 3467 wptr_gpu_addr = ring->wptr_gpu_addr; 3468 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3469 lower_32_bits(wptr_gpu_addr)); 3470 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3471 upper_32_bits(wptr_gpu_addr)); 3472 3473 mdelay(1); 3474 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3475 3476 rb_addr = ring->gpu_addr >> 8; 3477 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3478 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3479 3480 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3481 3482 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3483 mutex_unlock(&adev->srbm_mutex); 3484 3485 /* Init gfx ring 1 for pipe 1 */ 3486 if (adev->gfx.num_gfx_rings > 1) { 3487 mutex_lock(&adev->srbm_mutex); 3488 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3489 /* maximum supported gfx ring is 2 */ 3490 ring = &adev->gfx.gfx_ring[1]; 3491 rb_bufsz = order_base_2(ring->ring_size / 8); 3492 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3493 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3494 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3495 /* Initialize the ring buffer's write pointers */ 3496 ring->wptr = 0; 3497 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3498 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3499 /* Set the wb address wether it's enabled or not */ 3500 rptr_addr = ring->rptr_gpu_addr; 3501 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3502 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3503 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3504 wptr_gpu_addr = ring->wptr_gpu_addr; 3505 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3506 lower_32_bits(wptr_gpu_addr)); 3507 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3508 upper_32_bits(wptr_gpu_addr)); 3509 3510 mdelay(1); 3511 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3512 3513 rb_addr = ring->gpu_addr >> 8; 3514 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3515 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3516 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3517 3518 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3519 mutex_unlock(&adev->srbm_mutex); 3520 } 3521 /* Switch to pipe 0 */ 3522 mutex_lock(&adev->srbm_mutex); 3523 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3524 mutex_unlock(&adev->srbm_mutex); 3525 3526 /* start the ring */ 3527 gfx_v11_0_cp_gfx_start(adev); 3528 3529 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3530 ring = &adev->gfx.gfx_ring[i]; 3531 ring->sched.ready = true; 3532 } 3533 3534 return 0; 3535 } 3536 3537 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3538 { 3539 u32 data; 3540 3541 if (adev->gfx.rs64_enable) { 3542 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3543 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3544 enable ? 0 : 1); 3545 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3546 enable ? 0 : 1); 3547 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3548 enable ? 0 : 1); 3549 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3550 enable ? 0 : 1); 3551 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3552 enable ? 0 : 1); 3553 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3554 enable ? 1 : 0); 3555 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3556 enable ? 1 : 0); 3557 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3558 enable ? 1 : 0); 3559 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3560 enable ? 1 : 0); 3561 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3562 enable ? 0 : 1); 3563 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3564 } else { 3565 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3566 3567 if (enable) { 3568 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3569 if (!adev->enable_mes_kiq) 3570 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3571 MEC_ME2_HALT, 0); 3572 } else { 3573 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3574 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3575 } 3576 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3577 } 3578 3579 adev->gfx.kiq.ring.sched.ready = enable; 3580 3581 udelay(50); 3582 } 3583 3584 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3585 { 3586 const struct gfx_firmware_header_v1_0 *mec_hdr; 3587 const __le32 *fw_data; 3588 unsigned i, fw_size; 3589 u32 *fw = NULL; 3590 int r; 3591 3592 if (!adev->gfx.mec_fw) 3593 return -EINVAL; 3594 3595 gfx_v11_0_cp_compute_enable(adev, false); 3596 3597 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3598 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3599 3600 fw_data = (const __le32 *) 3601 (adev->gfx.mec_fw->data + 3602 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3603 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3604 3605 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3606 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3607 &adev->gfx.mec.mec_fw_obj, 3608 &adev->gfx.mec.mec_fw_gpu_addr, 3609 (void **)&fw); 3610 if (r) { 3611 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3612 gfx_v11_0_mec_fini(adev); 3613 return r; 3614 } 3615 3616 memcpy(fw, fw_data, fw_size); 3617 3618 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3619 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3620 3621 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3622 3623 /* MEC1 */ 3624 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3625 3626 for (i = 0; i < mec_hdr->jt_size; i++) 3627 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3628 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3629 3630 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3631 3632 return 0; 3633 } 3634 3635 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3636 { 3637 const struct gfx_firmware_header_v2_0 *mec_hdr; 3638 const __le32 *fw_ucode, *fw_data; 3639 u32 tmp, fw_ucode_size, fw_data_size; 3640 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3641 u32 *fw_ucode_ptr, *fw_data_ptr; 3642 int r; 3643 3644 if (!adev->gfx.mec_fw) 3645 return -EINVAL; 3646 3647 gfx_v11_0_cp_compute_enable(adev, false); 3648 3649 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3650 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3651 3652 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3653 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3654 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3655 3656 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3657 le32_to_cpu(mec_hdr->data_offset_bytes)); 3658 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3659 3660 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3661 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3662 &adev->gfx.mec.mec_fw_obj, 3663 &adev->gfx.mec.mec_fw_gpu_addr, 3664 (void **)&fw_ucode_ptr); 3665 if (r) { 3666 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3667 gfx_v11_0_mec_fini(adev); 3668 return r; 3669 } 3670 3671 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3672 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3673 &adev->gfx.mec.mec_fw_data_obj, 3674 &adev->gfx.mec.mec_fw_data_gpu_addr, 3675 (void **)&fw_data_ptr); 3676 if (r) { 3677 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3678 gfx_v11_0_mec_fini(adev); 3679 return r; 3680 } 3681 3682 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3683 memcpy(fw_data_ptr, fw_data, fw_data_size); 3684 3685 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3686 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3687 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3688 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3689 3690 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3691 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3692 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3693 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3694 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3695 3696 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3697 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3698 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3699 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3700 3701 mutex_lock(&adev->srbm_mutex); 3702 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3703 soc21_grbm_select(adev, 1, i, 0, 0); 3704 3705 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3706 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3707 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3708 3709 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3710 mec_hdr->ucode_start_addr_lo >> 2 | 3711 mec_hdr->ucode_start_addr_hi << 30); 3712 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3713 mec_hdr->ucode_start_addr_hi >> 2); 3714 3715 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3716 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3717 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3718 } 3719 mutex_unlock(&adev->srbm_mutex); 3720 soc21_grbm_select(adev, 0, 0, 0, 0); 3721 3722 /* Trigger an invalidation of the L1 instruction caches */ 3723 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3724 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3725 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3726 3727 /* Wait for invalidation complete */ 3728 for (i = 0; i < usec_timeout; i++) { 3729 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3730 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3731 INVALIDATE_DCACHE_COMPLETE)) 3732 break; 3733 udelay(1); 3734 } 3735 3736 if (i >= usec_timeout) { 3737 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3738 return -EINVAL; 3739 } 3740 3741 /* Trigger an invalidation of the L1 instruction caches */ 3742 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3743 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 3744 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 3745 3746 /* Wait for invalidation complete */ 3747 for (i = 0; i < usec_timeout; i++) { 3748 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3749 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 3750 INVALIDATE_CACHE_COMPLETE)) 3751 break; 3752 udelay(1); 3753 } 3754 3755 if (i >= usec_timeout) { 3756 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3757 return -EINVAL; 3758 } 3759 3760 return 0; 3761 } 3762 3763 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 3764 { 3765 uint32_t tmp; 3766 struct amdgpu_device *adev = ring->adev; 3767 3768 /* tell RLC which is KIQ queue */ 3769 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3770 tmp &= 0xffffff00; 3771 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 3772 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3773 tmp |= 0x80; 3774 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3775 } 3776 3777 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 3778 { 3779 /* set graphics engine doorbell range */ 3780 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 3781 (adev->doorbell_index.gfx_ring0 * 2) << 2); 3782 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3783 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 3784 3785 /* set compute engine doorbell range */ 3786 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3787 (adev->doorbell_index.kiq * 2) << 2); 3788 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3789 (adev->doorbell_index.userqueue_end * 2) << 2); 3790 } 3791 3792 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 3793 struct amdgpu_mqd_prop *prop) 3794 { 3795 struct v11_gfx_mqd *mqd = m; 3796 uint64_t hqd_gpu_addr, wb_gpu_addr; 3797 uint32_t tmp; 3798 uint32_t rb_bufsz; 3799 3800 /* set up gfx hqd wptr */ 3801 mqd->cp_gfx_hqd_wptr = 0; 3802 mqd->cp_gfx_hqd_wptr_hi = 0; 3803 3804 /* set the pointer to the MQD */ 3805 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 3806 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3807 3808 /* set up mqd control */ 3809 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 3810 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 3811 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 3812 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 3813 mqd->cp_gfx_mqd_control = tmp; 3814 3815 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 3816 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 3817 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 3818 mqd->cp_gfx_hqd_vmid = 0; 3819 3820 /* set up default queue priority level 3821 * 0x0 = low priority, 0x1 = high priority */ 3822 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 3823 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 3824 mqd->cp_gfx_hqd_queue_priority = tmp; 3825 3826 /* set up time quantum */ 3827 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 3828 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 3829 mqd->cp_gfx_hqd_quantum = tmp; 3830 3831 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 3832 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3833 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 3834 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 3835 3836 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 3837 wb_gpu_addr = prop->rptr_gpu_addr; 3838 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 3839 mqd->cp_gfx_hqd_rptr_addr_hi = 3840 upper_32_bits(wb_gpu_addr) & 0xffff; 3841 3842 /* set up rb_wptr_poll addr */ 3843 wb_gpu_addr = prop->wptr_gpu_addr; 3844 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3845 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3846 3847 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 3848 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 3849 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 3850 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 3851 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 3852 #ifdef __BIG_ENDIAN 3853 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 3854 #endif 3855 mqd->cp_gfx_hqd_cntl = tmp; 3856 3857 /* set up cp_doorbell_control */ 3858 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3859 if (prop->use_doorbell) { 3860 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3861 DOORBELL_OFFSET, prop->doorbell_index); 3862 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3863 DOORBELL_EN, 1); 3864 } else 3865 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3866 DOORBELL_EN, 0); 3867 mqd->cp_rb_doorbell_control = tmp; 3868 3869 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3870 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 3871 3872 /* active the queue */ 3873 mqd->cp_gfx_hqd_active = 1; 3874 3875 return 0; 3876 } 3877 3878 #ifdef BRING_UP_DEBUG 3879 static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring) 3880 { 3881 struct amdgpu_device *adev = ring->adev; 3882 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3883 3884 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ 3885 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); 3886 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); 3887 3888 /* set GFX_MQD_BASE */ 3889 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); 3890 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); 3891 3892 /* set GFX_MQD_CONTROL */ 3893 WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); 3894 3895 /* set GFX_HQD_VMID to 0 */ 3896 WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); 3897 3898 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY, 3899 mqd->cp_gfx_hqd_queue_priority); 3900 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); 3901 3902 /* set GFX_HQD_BASE, similar as CP_RB_BASE */ 3903 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); 3904 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); 3905 3906 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ 3907 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); 3908 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); 3909 3910 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ 3911 WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); 3912 3913 /* set RB_WPTR_POLL_ADDR */ 3914 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); 3915 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); 3916 3917 /* set RB_DOORBELL_CONTROL */ 3918 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); 3919 3920 /* active the queue */ 3921 WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); 3922 3923 return 0; 3924 } 3925 #endif 3926 3927 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) 3928 { 3929 struct amdgpu_device *adev = ring->adev; 3930 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3931 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 3932 3933 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 3934 memset((void *)mqd, 0, sizeof(*mqd)); 3935 mutex_lock(&adev->srbm_mutex); 3936 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3937 amdgpu_ring_init_mqd(ring); 3938 #ifdef BRING_UP_DEBUG 3939 gfx_v11_0_gfx_queue_init_register(ring); 3940 #endif 3941 soc21_grbm_select(adev, 0, 0, 0, 0); 3942 mutex_unlock(&adev->srbm_mutex); 3943 if (adev->gfx.me.mqd_backup[mqd_idx]) 3944 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3945 } else if (amdgpu_in_reset(adev)) { 3946 /* reset mqd with the backup copy */ 3947 if (adev->gfx.me.mqd_backup[mqd_idx]) 3948 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 3949 /* reset the ring */ 3950 ring->wptr = 0; 3951 *ring->wptr_cpu_addr = 0; 3952 amdgpu_ring_clear_ring(ring); 3953 #ifdef BRING_UP_DEBUG 3954 mutex_lock(&adev->srbm_mutex); 3955 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3956 gfx_v11_0_gfx_queue_init_register(ring); 3957 soc21_grbm_select(adev, 0, 0, 0, 0); 3958 mutex_unlock(&adev->srbm_mutex); 3959 #endif 3960 } else { 3961 amdgpu_ring_clear_ring(ring); 3962 } 3963 3964 return 0; 3965 } 3966 3967 #ifndef BRING_UP_DEBUG 3968 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) 3969 { 3970 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 3971 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 3972 int r, i; 3973 3974 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 3975 return -EINVAL; 3976 3977 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 3978 adev->gfx.num_gfx_rings); 3979 if (r) { 3980 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 3981 return r; 3982 } 3983 3984 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3985 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); 3986 3987 return amdgpu_ring_test_helper(kiq_ring); 3988 } 3989 #endif 3990 3991 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3992 { 3993 int r, i; 3994 struct amdgpu_ring *ring; 3995 3996 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3997 ring = &adev->gfx.gfx_ring[i]; 3998 3999 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4000 if (unlikely(r != 0)) 4001 goto done; 4002 4003 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4004 if (!r) { 4005 r = gfx_v11_0_gfx_init_queue(ring); 4006 amdgpu_bo_kunmap(ring->mqd_obj); 4007 ring->mqd_ptr = NULL; 4008 } 4009 amdgpu_bo_unreserve(ring->mqd_obj); 4010 if (r) 4011 goto done; 4012 } 4013 #ifndef BRING_UP_DEBUG 4014 r = gfx_v11_0_kiq_enable_kgq(adev); 4015 if (r) 4016 goto done; 4017 #endif 4018 r = gfx_v11_0_cp_gfx_start(adev); 4019 if (r) 4020 goto done; 4021 4022 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4023 ring = &adev->gfx.gfx_ring[i]; 4024 ring->sched.ready = true; 4025 } 4026 done: 4027 return r; 4028 } 4029 4030 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4031 struct amdgpu_mqd_prop *prop) 4032 { 4033 struct v11_compute_mqd *mqd = m; 4034 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4035 uint32_t tmp; 4036 4037 mqd->header = 0xC0310800; 4038 mqd->compute_pipelinestat_enable = 0x00000001; 4039 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4040 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4041 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4042 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4043 mqd->compute_misc_reserved = 0x00000007; 4044 4045 eop_base_addr = prop->eop_gpu_addr >> 8; 4046 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4047 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4048 4049 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4050 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 4051 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4052 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4053 4054 mqd->cp_hqd_eop_control = tmp; 4055 4056 /* enable doorbell? */ 4057 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4058 4059 if (prop->use_doorbell) { 4060 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4061 DOORBELL_OFFSET, prop->doorbell_index); 4062 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4063 DOORBELL_EN, 1); 4064 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4065 DOORBELL_SOURCE, 0); 4066 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4067 DOORBELL_HIT, 0); 4068 } else { 4069 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4070 DOORBELL_EN, 0); 4071 } 4072 4073 mqd->cp_hqd_pq_doorbell_control = tmp; 4074 4075 /* disable the queue if it's active */ 4076 mqd->cp_hqd_dequeue_request = 0; 4077 mqd->cp_hqd_pq_rptr = 0; 4078 mqd->cp_hqd_pq_wptr_lo = 0; 4079 mqd->cp_hqd_pq_wptr_hi = 0; 4080 4081 /* set the pointer to the MQD */ 4082 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4083 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4084 4085 /* set MQD vmid to 0 */ 4086 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 4087 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4088 mqd->cp_mqd_control = tmp; 4089 4090 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4091 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4092 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4093 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4094 4095 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4096 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 4097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4098 (order_base_2(prop->queue_size / 4) - 1)); 4099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4100 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4101 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 4102 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 4103 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4104 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4105 mqd->cp_hqd_pq_control = tmp; 4106 4107 /* set the wb address whether it's enabled or not */ 4108 wb_gpu_addr = prop->rptr_gpu_addr; 4109 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4110 mqd->cp_hqd_pq_rptr_report_addr_hi = 4111 upper_32_bits(wb_gpu_addr) & 0xffff; 4112 4113 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4114 wb_gpu_addr = prop->wptr_gpu_addr; 4115 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4116 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4117 4118 tmp = 0; 4119 /* enable the doorbell if requested */ 4120 if (prop->use_doorbell) { 4121 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4122 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4123 DOORBELL_OFFSET, prop->doorbell_index); 4124 4125 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4126 DOORBELL_EN, 1); 4127 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4128 DOORBELL_SOURCE, 0); 4129 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4130 DOORBELL_HIT, 0); 4131 } 4132 4133 mqd->cp_hqd_pq_doorbell_control = tmp; 4134 4135 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4136 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 4137 4138 /* set the vmid for the queue */ 4139 mqd->cp_hqd_vmid = 0; 4140 4141 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 4142 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4143 mqd->cp_hqd_persistent_state = tmp; 4144 4145 /* set MIN_IB_AVAIL_SIZE */ 4146 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 4147 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4148 mqd->cp_hqd_ib_control = tmp; 4149 4150 /* set static priority for a compute queue/ring */ 4151 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4152 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4153 4154 mqd->cp_hqd_active = prop->hqd_active; 4155 4156 return 0; 4157 } 4158 4159 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4160 { 4161 struct amdgpu_device *adev = ring->adev; 4162 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4163 int j; 4164 4165 /* inactivate the queue */ 4166 if (amdgpu_sriov_vf(adev)) 4167 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4168 4169 /* disable wptr polling */ 4170 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4171 4172 /* write the EOP addr */ 4173 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4174 mqd->cp_hqd_eop_base_addr_lo); 4175 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4176 mqd->cp_hqd_eop_base_addr_hi); 4177 4178 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4179 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4180 mqd->cp_hqd_eop_control); 4181 4182 /* enable doorbell? */ 4183 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4184 mqd->cp_hqd_pq_doorbell_control); 4185 4186 /* disable the queue if it's active */ 4187 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4188 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4189 for (j = 0; j < adev->usec_timeout; j++) { 4190 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4191 break; 4192 udelay(1); 4193 } 4194 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4195 mqd->cp_hqd_dequeue_request); 4196 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4197 mqd->cp_hqd_pq_rptr); 4198 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4199 mqd->cp_hqd_pq_wptr_lo); 4200 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4201 mqd->cp_hqd_pq_wptr_hi); 4202 } 4203 4204 /* set the pointer to the MQD */ 4205 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4206 mqd->cp_mqd_base_addr_lo); 4207 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4208 mqd->cp_mqd_base_addr_hi); 4209 4210 /* set MQD vmid to 0 */ 4211 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4212 mqd->cp_mqd_control); 4213 4214 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4215 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4216 mqd->cp_hqd_pq_base_lo); 4217 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4218 mqd->cp_hqd_pq_base_hi); 4219 4220 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4221 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4222 mqd->cp_hqd_pq_control); 4223 4224 /* set the wb address whether it's enabled or not */ 4225 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4226 mqd->cp_hqd_pq_rptr_report_addr_lo); 4227 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4228 mqd->cp_hqd_pq_rptr_report_addr_hi); 4229 4230 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4231 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4232 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4233 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4234 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4235 4236 /* enable the doorbell if requested */ 4237 if (ring->use_doorbell) { 4238 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4239 (adev->doorbell_index.kiq * 2) << 2); 4240 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4241 (adev->doorbell_index.userqueue_end * 2) << 2); 4242 } 4243 4244 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4245 mqd->cp_hqd_pq_doorbell_control); 4246 4247 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4248 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4249 mqd->cp_hqd_pq_wptr_lo); 4250 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4251 mqd->cp_hqd_pq_wptr_hi); 4252 4253 /* set the vmid for the queue */ 4254 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4255 4256 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4257 mqd->cp_hqd_persistent_state); 4258 4259 /* activate the queue */ 4260 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4261 mqd->cp_hqd_active); 4262 4263 if (ring->use_doorbell) 4264 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4265 4266 return 0; 4267 } 4268 4269 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4270 { 4271 struct amdgpu_device *adev = ring->adev; 4272 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4273 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 4274 4275 gfx_v11_0_kiq_setting(ring); 4276 4277 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4278 /* reset MQD to a clean status */ 4279 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4280 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4281 4282 /* reset ring buffer */ 4283 ring->wptr = 0; 4284 amdgpu_ring_clear_ring(ring); 4285 4286 mutex_lock(&adev->srbm_mutex); 4287 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4288 gfx_v11_0_kiq_init_register(ring); 4289 soc21_grbm_select(adev, 0, 0, 0, 0); 4290 mutex_unlock(&adev->srbm_mutex); 4291 } else { 4292 memset((void *)mqd, 0, sizeof(*mqd)); 4293 mutex_lock(&adev->srbm_mutex); 4294 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4295 amdgpu_ring_init_mqd(ring); 4296 gfx_v11_0_kiq_init_register(ring); 4297 soc21_grbm_select(adev, 0, 0, 0, 0); 4298 mutex_unlock(&adev->srbm_mutex); 4299 4300 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4301 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4302 } 4303 4304 return 0; 4305 } 4306 4307 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) 4308 { 4309 struct amdgpu_device *adev = ring->adev; 4310 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4311 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4312 4313 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 4314 memset((void *)mqd, 0, sizeof(*mqd)); 4315 mutex_lock(&adev->srbm_mutex); 4316 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4317 amdgpu_ring_init_mqd(ring); 4318 soc21_grbm_select(adev, 0, 0, 0, 0); 4319 mutex_unlock(&adev->srbm_mutex); 4320 4321 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4322 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4323 } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4324 /* reset MQD to a clean status */ 4325 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4326 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4327 4328 /* reset ring buffer */ 4329 ring->wptr = 0; 4330 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4331 amdgpu_ring_clear_ring(ring); 4332 } else { 4333 amdgpu_ring_clear_ring(ring); 4334 } 4335 4336 return 0; 4337 } 4338 4339 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4340 { 4341 struct amdgpu_ring *ring; 4342 int r; 4343 4344 ring = &adev->gfx.kiq.ring; 4345 4346 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4347 if (unlikely(r != 0)) 4348 return r; 4349 4350 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4351 if (unlikely(r != 0)) { 4352 amdgpu_bo_unreserve(ring->mqd_obj); 4353 return r; 4354 } 4355 4356 gfx_v11_0_kiq_init_queue(ring); 4357 amdgpu_bo_kunmap(ring->mqd_obj); 4358 ring->mqd_ptr = NULL; 4359 amdgpu_bo_unreserve(ring->mqd_obj); 4360 ring->sched.ready = true; 4361 return 0; 4362 } 4363 4364 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4365 { 4366 struct amdgpu_ring *ring = NULL; 4367 int r = 0, i; 4368 4369 if (!amdgpu_async_gfx_ring) 4370 gfx_v11_0_cp_compute_enable(adev, true); 4371 4372 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4373 ring = &adev->gfx.compute_ring[i]; 4374 4375 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4376 if (unlikely(r != 0)) 4377 goto done; 4378 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4379 if (!r) { 4380 r = gfx_v11_0_kcq_init_queue(ring); 4381 amdgpu_bo_kunmap(ring->mqd_obj); 4382 ring->mqd_ptr = NULL; 4383 } 4384 amdgpu_bo_unreserve(ring->mqd_obj); 4385 if (r) 4386 goto done; 4387 } 4388 4389 r = amdgpu_gfx_enable_kcq(adev); 4390 done: 4391 return r; 4392 } 4393 4394 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4395 { 4396 int r, i; 4397 struct amdgpu_ring *ring; 4398 4399 if (!(adev->flags & AMD_IS_APU)) 4400 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4401 4402 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4403 /* legacy firmware loading */ 4404 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4405 if (r) 4406 return r; 4407 4408 if (adev->gfx.rs64_enable) 4409 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4410 else 4411 r = gfx_v11_0_cp_compute_load_microcode(adev); 4412 if (r) 4413 return r; 4414 } 4415 4416 gfx_v11_0_cp_set_doorbell_range(adev); 4417 4418 if (amdgpu_async_gfx_ring) { 4419 gfx_v11_0_cp_compute_enable(adev, true); 4420 gfx_v11_0_cp_gfx_enable(adev, true); 4421 } 4422 4423 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4424 r = amdgpu_mes_kiq_hw_init(adev); 4425 else 4426 r = gfx_v11_0_kiq_resume(adev); 4427 if (r) 4428 return r; 4429 4430 r = gfx_v11_0_kcq_resume(adev); 4431 if (r) 4432 return r; 4433 4434 if (!amdgpu_async_gfx_ring) { 4435 r = gfx_v11_0_cp_gfx_resume(adev); 4436 if (r) 4437 return r; 4438 } else { 4439 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4440 if (r) 4441 return r; 4442 } 4443 4444 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4445 ring = &adev->gfx.gfx_ring[i]; 4446 r = amdgpu_ring_test_helper(ring); 4447 if (r) 4448 return r; 4449 } 4450 4451 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4452 ring = &adev->gfx.compute_ring[i]; 4453 r = amdgpu_ring_test_helper(ring); 4454 if (r) 4455 return r; 4456 } 4457 4458 return 0; 4459 } 4460 4461 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4462 { 4463 gfx_v11_0_cp_gfx_enable(adev, enable); 4464 gfx_v11_0_cp_compute_enable(adev, enable); 4465 } 4466 4467 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4468 { 4469 int r; 4470 bool value; 4471 4472 r = adev->gfxhub.funcs->gart_enable(adev); 4473 if (r) 4474 return r; 4475 4476 adev->hdp.funcs->flush_hdp(adev, NULL); 4477 4478 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4479 false : true; 4480 4481 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4482 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 4483 4484 return 0; 4485 } 4486 4487 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4488 { 4489 u32 tmp; 4490 4491 /* select RS64 */ 4492 if (adev->gfx.rs64_enable) { 4493 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4494 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4495 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4496 4497 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4498 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4499 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4500 } 4501 4502 if (amdgpu_emu_mode == 1) 4503 msleep(100); 4504 } 4505 4506 static int get_gb_addr_config(struct amdgpu_device * adev) 4507 { 4508 u32 gb_addr_config; 4509 4510 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4511 if (gb_addr_config == 0) 4512 return -EINVAL; 4513 4514 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4515 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4516 4517 adev->gfx.config.gb_addr_config = gb_addr_config; 4518 4519 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4520 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4521 GB_ADDR_CONFIG, NUM_PIPES); 4522 4523 adev->gfx.config.max_tile_pipes = 4524 adev->gfx.config.gb_addr_config_fields.num_pipes; 4525 4526 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4527 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4528 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4529 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4530 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4531 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4532 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4533 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4534 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4535 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4536 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4537 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4538 4539 return 0; 4540 } 4541 4542 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4543 { 4544 uint32_t data; 4545 4546 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4547 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4548 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4549 4550 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4551 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4552 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4553 } 4554 4555 static int gfx_v11_0_hw_init(void *handle) 4556 { 4557 int r; 4558 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4559 4560 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4561 if (adev->gfx.imu.funcs) { 4562 /* RLC autoload sequence 1: Program rlc ram */ 4563 if (adev->gfx.imu.funcs->program_rlc_ram) 4564 adev->gfx.imu.funcs->program_rlc_ram(adev); 4565 } 4566 /* rlc autoload firmware */ 4567 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4568 if (r) 4569 return r; 4570 } else { 4571 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4572 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4573 if (adev->gfx.imu.funcs->load_microcode) 4574 adev->gfx.imu.funcs->load_microcode(adev); 4575 if (adev->gfx.imu.funcs->setup_imu) 4576 adev->gfx.imu.funcs->setup_imu(adev); 4577 if (adev->gfx.imu.funcs->start_imu) 4578 adev->gfx.imu.funcs->start_imu(adev); 4579 } 4580 4581 /* disable gpa mode in backdoor loading */ 4582 gfx_v11_0_disable_gpa_mode(adev); 4583 } 4584 } 4585 4586 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4587 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4588 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4589 if (r) { 4590 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4591 return r; 4592 } 4593 } 4594 4595 adev->gfx.is_poweron = true; 4596 4597 if(get_gb_addr_config(adev)) 4598 DRM_WARN("Invalid gb_addr_config !\n"); 4599 4600 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4601 adev->gfx.rs64_enable) 4602 gfx_v11_0_config_gfx_rs64(adev); 4603 4604 r = gfx_v11_0_gfxhub_enable(adev); 4605 if (r) 4606 return r; 4607 4608 if (!amdgpu_emu_mode) 4609 gfx_v11_0_init_golden_registers(adev); 4610 4611 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4612 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4613 /** 4614 * For gfx 11, rlc firmware loading relies on smu firmware is 4615 * loaded firstly, so in direct type, it has to load smc ucode 4616 * here before rlc. 4617 */ 4618 if (!(adev->flags & AMD_IS_APU)) { 4619 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4620 if (r) 4621 return r; 4622 } 4623 } 4624 4625 gfx_v11_0_constants_init(adev); 4626 4627 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4628 gfx_v11_0_select_cp_fw_arch(adev); 4629 4630 if (adev->nbio.funcs->gc_doorbell_init) 4631 adev->nbio.funcs->gc_doorbell_init(adev); 4632 4633 r = gfx_v11_0_rlc_resume(adev); 4634 if (r) 4635 return r; 4636 4637 /* 4638 * init golden registers and rlc resume may override some registers, 4639 * reconfig them here 4640 */ 4641 gfx_v11_0_tcp_harvest(adev); 4642 4643 r = gfx_v11_0_cp_resume(adev); 4644 if (r) 4645 return r; 4646 4647 return r; 4648 } 4649 4650 #ifndef BRING_UP_DEBUG 4651 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) 4652 { 4653 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4654 struct amdgpu_ring *kiq_ring = &kiq->ring; 4655 int i, r = 0; 4656 4657 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4658 return -EINVAL; 4659 4660 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 4661 adev->gfx.num_gfx_rings)) 4662 return -ENOMEM; 4663 4664 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4665 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 4666 PREEMPT_QUEUES, 0, 0); 4667 4668 if (adev->gfx.kiq.ring.sched.ready) 4669 r = amdgpu_ring_test_helper(kiq_ring); 4670 4671 return r; 4672 } 4673 #endif 4674 4675 static int gfx_v11_0_hw_fini(void *handle) 4676 { 4677 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4678 int r; 4679 uint32_t tmp; 4680 4681 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4682 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4683 4684 if (!adev->no_hw_access) { 4685 #ifndef BRING_UP_DEBUG 4686 if (amdgpu_async_gfx_ring) { 4687 r = gfx_v11_0_kiq_disable_kgq(adev); 4688 if (r) 4689 DRM_ERROR("KGQ disable failed\n"); 4690 } 4691 #endif 4692 if (amdgpu_gfx_disable_kcq(adev)) 4693 DRM_ERROR("KCQ disable failed\n"); 4694 4695 amdgpu_mes_kiq_hw_fini(adev); 4696 } 4697 4698 if (amdgpu_sriov_vf(adev)) { 4699 gfx_v11_0_cp_gfx_enable(adev, false); 4700 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 4701 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 4702 tmp &= 0xffffff00; 4703 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 4704 4705 return 0; 4706 } 4707 gfx_v11_0_cp_enable(adev, false); 4708 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4709 4710 adev->gfxhub.funcs->gart_disable(adev); 4711 4712 adev->gfx.is_poweron = false; 4713 4714 return 0; 4715 } 4716 4717 static int gfx_v11_0_suspend(void *handle) 4718 { 4719 return gfx_v11_0_hw_fini(handle); 4720 } 4721 4722 static int gfx_v11_0_resume(void *handle) 4723 { 4724 return gfx_v11_0_hw_init(handle); 4725 } 4726 4727 static bool gfx_v11_0_is_idle(void *handle) 4728 { 4729 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4730 4731 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4732 GRBM_STATUS, GUI_ACTIVE)) 4733 return false; 4734 else 4735 return true; 4736 } 4737 4738 static int gfx_v11_0_wait_for_idle(void *handle) 4739 { 4740 unsigned i; 4741 u32 tmp; 4742 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4743 4744 for (i = 0; i < adev->usec_timeout; i++) { 4745 /* read MC_STATUS */ 4746 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4747 GRBM_STATUS__GUI_ACTIVE_MASK; 4748 4749 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4750 return 0; 4751 udelay(1); 4752 } 4753 return -ETIMEDOUT; 4754 } 4755 4756 static int gfx_v11_0_soft_reset(void *handle) 4757 { 4758 u32 grbm_soft_reset = 0; 4759 u32 tmp; 4760 int i, j, k; 4761 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4762 4763 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4764 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4765 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4766 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4767 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4768 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4769 4770 gfx_v11_0_set_safe_mode(adev); 4771 4772 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4773 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4774 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4775 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4776 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4777 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4778 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4779 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4780 4781 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4782 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4783 } 4784 } 4785 } 4786 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4787 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4788 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4789 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4790 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4791 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4792 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4793 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4794 4795 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4796 } 4797 } 4798 } 4799 4800 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 4801 4802 // Read CP_VMID_RESET register three times. 4803 // to get sufficient time for GFX_HQD_ACTIVE reach 0 4804 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4805 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4806 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4807 4808 for (i = 0; i < adev->usec_timeout; i++) { 4809 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 4810 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 4811 break; 4812 udelay(1); 4813 } 4814 if (i >= adev->usec_timeout) { 4815 printk("Failed to wait all pipes clean\n"); 4816 return -EINVAL; 4817 } 4818 4819 /********** trigger soft reset ***********/ 4820 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4821 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4822 SOFT_RESET_CP, 1); 4823 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4824 SOFT_RESET_GFX, 1); 4825 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4826 SOFT_RESET_CPF, 1); 4827 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4828 SOFT_RESET_CPC, 1); 4829 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4830 SOFT_RESET_CPG, 1); 4831 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4832 /********** exit soft reset ***********/ 4833 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4834 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4835 SOFT_RESET_CP, 0); 4836 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4837 SOFT_RESET_GFX, 0); 4838 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4839 SOFT_RESET_CPF, 0); 4840 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4841 SOFT_RESET_CPC, 0); 4842 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4843 SOFT_RESET_CPG, 0); 4844 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4845 4846 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 4847 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 4848 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 4849 4850 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 4851 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 4852 4853 for (i = 0; i < adev->usec_timeout; i++) { 4854 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 4855 break; 4856 udelay(1); 4857 } 4858 if (i >= adev->usec_timeout) { 4859 printk("Failed to wait CP_VMID_RESET to 0\n"); 4860 return -EINVAL; 4861 } 4862 4863 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4864 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4865 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4866 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4867 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4868 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4869 4870 gfx_v11_0_unset_safe_mode(adev); 4871 4872 return gfx_v11_0_cp_resume(adev); 4873 } 4874 4875 static bool gfx_v11_0_check_soft_reset(void *handle) 4876 { 4877 int i, r; 4878 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4879 struct amdgpu_ring *ring; 4880 long tmo = msecs_to_jiffies(1000); 4881 4882 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4883 ring = &adev->gfx.gfx_ring[i]; 4884 r = amdgpu_ring_test_ib(ring, tmo); 4885 if (r) 4886 return true; 4887 } 4888 4889 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4890 ring = &adev->gfx.compute_ring[i]; 4891 r = amdgpu_ring_test_ib(ring, tmo); 4892 if (r) 4893 return true; 4894 } 4895 4896 return false; 4897 } 4898 4899 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 4900 { 4901 uint64_t clock; 4902 4903 amdgpu_gfx_off_ctrl(adev, false); 4904 mutex_lock(&adev->gfx.gpu_clock_mutex); 4905 clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | 4906 ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); 4907 mutex_unlock(&adev->gfx.gpu_clock_mutex); 4908 amdgpu_gfx_off_ctrl(adev, true); 4909 return clock; 4910 } 4911 4912 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 4913 uint32_t vmid, 4914 uint32_t gds_base, uint32_t gds_size, 4915 uint32_t gws_base, uint32_t gws_size, 4916 uint32_t oa_base, uint32_t oa_size) 4917 { 4918 struct amdgpu_device *adev = ring->adev; 4919 4920 /* GDS Base */ 4921 gfx_v11_0_write_data_to_reg(ring, 0, false, 4922 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 4923 gds_base); 4924 4925 /* GDS Size */ 4926 gfx_v11_0_write_data_to_reg(ring, 0, false, 4927 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 4928 gds_size); 4929 4930 /* GWS */ 4931 gfx_v11_0_write_data_to_reg(ring, 0, false, 4932 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 4933 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 4934 4935 /* OA */ 4936 gfx_v11_0_write_data_to_reg(ring, 0, false, 4937 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 4938 (1 << (oa_size + oa_base)) - (1 << oa_base)); 4939 } 4940 4941 static int gfx_v11_0_early_init(void *handle) 4942 { 4943 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4944 4945 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 4946 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 4947 AMDGPU_MAX_COMPUTE_RINGS); 4948 4949 gfx_v11_0_set_kiq_pm4_funcs(adev); 4950 gfx_v11_0_set_ring_funcs(adev); 4951 gfx_v11_0_set_irq_funcs(adev); 4952 gfx_v11_0_set_gds_init(adev); 4953 gfx_v11_0_set_rlc_funcs(adev); 4954 gfx_v11_0_set_mqd_funcs(adev); 4955 gfx_v11_0_set_imu_funcs(adev); 4956 4957 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 4958 4959 return 0; 4960 } 4961 4962 static int gfx_v11_0_late_init(void *handle) 4963 { 4964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4965 int r; 4966 4967 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 4968 if (r) 4969 return r; 4970 4971 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 4972 if (r) 4973 return r; 4974 4975 return 0; 4976 } 4977 4978 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 4979 { 4980 uint32_t rlc_cntl; 4981 4982 /* if RLC is not enabled, do nothing */ 4983 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 4984 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 4985 } 4986 4987 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) 4988 { 4989 uint32_t data; 4990 unsigned i; 4991 4992 data = RLC_SAFE_MODE__CMD_MASK; 4993 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 4994 4995 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 4996 4997 /* wait for RLC_SAFE_MODE */ 4998 for (i = 0; i < adev->usec_timeout; i++) { 4999 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 5000 RLC_SAFE_MODE, CMD)) 5001 break; 5002 udelay(1); 5003 } 5004 } 5005 5006 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev) 5007 { 5008 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5009 } 5010 5011 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5012 bool enable) 5013 { 5014 uint32_t def, data; 5015 5016 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5017 return; 5018 5019 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5020 5021 if (enable) 5022 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5023 else 5024 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5025 5026 if (def != data) 5027 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5028 } 5029 5030 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5031 bool enable) 5032 { 5033 uint32_t def, data; 5034 5035 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5036 return; 5037 5038 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5039 5040 if (enable) 5041 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5042 else 5043 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5044 5045 if (def != data) 5046 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5047 } 5048 5049 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5050 bool enable) 5051 { 5052 uint32_t def, data; 5053 5054 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5055 return; 5056 5057 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5058 5059 if (enable) 5060 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5061 else 5062 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5063 5064 if (def != data) 5065 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5066 } 5067 5068 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5069 bool enable) 5070 { 5071 uint32_t data, def; 5072 5073 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5074 return; 5075 5076 /* It is disabled by HW by default */ 5077 if (enable) { 5078 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5079 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5080 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5081 5082 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5083 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5084 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5085 5086 if (def != data) 5087 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5088 } 5089 } else { 5090 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5091 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5092 5093 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5094 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5095 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5096 5097 if (def != data) 5098 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5099 } 5100 } 5101 } 5102 5103 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5104 bool enable) 5105 { 5106 uint32_t def, data; 5107 5108 if (!(adev->cg_flags & 5109 (AMD_CG_SUPPORT_GFX_CGCG | 5110 AMD_CG_SUPPORT_GFX_CGLS | 5111 AMD_CG_SUPPORT_GFX_3D_CGCG | 5112 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5113 return; 5114 5115 if (enable) { 5116 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5117 5118 /* unset CGCG override */ 5119 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5120 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5121 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5122 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5123 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5124 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5125 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5126 5127 /* update CGCG override bits */ 5128 if (def != data) 5129 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5130 5131 /* enable cgcg FSM(0x0000363F) */ 5132 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5133 5134 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5135 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5136 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5137 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5138 } 5139 5140 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5141 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5142 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5143 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5144 } 5145 5146 if (def != data) 5147 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5148 5149 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5150 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5151 5152 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5153 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5154 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5155 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5156 } 5157 5158 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5159 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5160 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5161 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5162 } 5163 5164 if (def != data) 5165 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5166 5167 /* set IDLE_POLL_COUNT(0x00900100) */ 5168 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5169 5170 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5171 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5172 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5173 5174 if (def != data) 5175 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5176 5177 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5178 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5179 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5180 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5181 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5182 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5183 5184 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5185 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5186 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5187 5188 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5189 if (adev->sdma.num_instances > 1) { 5190 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5191 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5192 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5193 } 5194 } else { 5195 /* Program RLC_CGCG_CGLS_CTRL */ 5196 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5197 5198 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5199 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5200 5201 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5202 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5203 5204 if (def != data) 5205 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5206 5207 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5208 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5209 5210 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5211 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5212 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5213 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5214 5215 if (def != data) 5216 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5217 5218 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5219 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5220 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5221 5222 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ 5223 if (adev->sdma.num_instances > 1) { 5224 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5225 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5226 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5227 } 5228 } 5229 } 5230 5231 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5232 bool enable) 5233 { 5234 amdgpu_gfx_rlc_enter_safe_mode(adev); 5235 5236 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5237 5238 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5239 5240 gfx_v11_0_update_repeater_fgcg(adev, enable); 5241 5242 gfx_v11_0_update_sram_fgcg(adev, enable); 5243 5244 gfx_v11_0_update_perf_clk(adev, enable); 5245 5246 if (adev->cg_flags & 5247 (AMD_CG_SUPPORT_GFX_MGCG | 5248 AMD_CG_SUPPORT_GFX_CGLS | 5249 AMD_CG_SUPPORT_GFX_CGCG | 5250 AMD_CG_SUPPORT_GFX_3D_CGCG | 5251 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5252 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5253 5254 amdgpu_gfx_rlc_exit_safe_mode(adev); 5255 5256 return 0; 5257 } 5258 5259 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) 5260 { 5261 u32 reg, data; 5262 5263 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5264 if (amdgpu_sriov_is_pp_one_vf(adev)) 5265 data = RREG32_NO_KIQ(reg); 5266 else 5267 data = RREG32(reg); 5268 5269 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 5270 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5271 5272 if (amdgpu_sriov_is_pp_one_vf(adev)) 5273 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5274 else 5275 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5276 } 5277 5278 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5279 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5280 .set_safe_mode = gfx_v11_0_set_safe_mode, 5281 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5282 .init = gfx_v11_0_rlc_init, 5283 .get_csb_size = gfx_v11_0_get_csb_size, 5284 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5285 .resume = gfx_v11_0_rlc_resume, 5286 .stop = gfx_v11_0_rlc_stop, 5287 .reset = gfx_v11_0_rlc_reset, 5288 .start = gfx_v11_0_rlc_start, 5289 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5290 }; 5291 5292 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable) 5293 { 5294 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 5295 5296 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5297 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5298 else 5299 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 5300 5301 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data); 5302 5303 // Program RLC_PG_DELAY3 for CGPG hysteresis 5304 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5305 switch (adev->ip_versions[GC_HWIP][0]) { 5306 case IP_VERSION(11, 0, 1): 5307 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); 5308 break; 5309 default: 5310 break; 5311 } 5312 } 5313 } 5314 5315 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable) 5316 { 5317 amdgpu_gfx_rlc_enter_safe_mode(adev); 5318 5319 gfx_v11_cntl_power_gating(adev, enable); 5320 5321 amdgpu_gfx_rlc_exit_safe_mode(adev); 5322 } 5323 5324 static int gfx_v11_0_set_powergating_state(void *handle, 5325 enum amd_powergating_state state) 5326 { 5327 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5328 bool enable = (state == AMD_PG_STATE_GATE); 5329 5330 if (amdgpu_sriov_vf(adev)) 5331 return 0; 5332 5333 switch (adev->ip_versions[GC_HWIP][0]) { 5334 case IP_VERSION(11, 0, 0): 5335 case IP_VERSION(11, 0, 2): 5336 amdgpu_gfx_off_ctrl(adev, enable); 5337 break; 5338 case IP_VERSION(11, 0, 1): 5339 gfx_v11_cntl_pg(adev, enable); 5340 amdgpu_gfx_off_ctrl(adev, enable); 5341 break; 5342 default: 5343 break; 5344 } 5345 5346 return 0; 5347 } 5348 5349 static int gfx_v11_0_set_clockgating_state(void *handle, 5350 enum amd_clockgating_state state) 5351 { 5352 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5353 5354 if (amdgpu_sriov_vf(adev)) 5355 return 0; 5356 5357 switch (adev->ip_versions[GC_HWIP][0]) { 5358 case IP_VERSION(11, 0, 0): 5359 case IP_VERSION(11, 0, 1): 5360 case IP_VERSION(11, 0, 2): 5361 gfx_v11_0_update_gfx_clock_gating(adev, 5362 state == AMD_CG_STATE_GATE); 5363 break; 5364 default: 5365 break; 5366 } 5367 5368 return 0; 5369 } 5370 5371 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags) 5372 { 5373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5374 int data; 5375 5376 /* AMD_CG_SUPPORT_GFX_MGCG */ 5377 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5378 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5379 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5380 5381 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5382 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5383 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5384 5385 /* AMD_CG_SUPPORT_GFX_FGCG */ 5386 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5387 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5388 5389 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5390 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5391 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5392 5393 /* AMD_CG_SUPPORT_GFX_CGCG */ 5394 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5395 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5396 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5397 5398 /* AMD_CG_SUPPORT_GFX_CGLS */ 5399 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5400 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5401 5402 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5403 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5404 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5405 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5406 5407 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5408 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5409 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5410 } 5411 5412 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5413 { 5414 /* gfx11 is 32bit rptr*/ 5415 return *(uint32_t *)ring->rptr_cpu_addr; 5416 } 5417 5418 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5419 { 5420 struct amdgpu_device *adev = ring->adev; 5421 u64 wptr; 5422 5423 /* XXX check if swapping is necessary on BE */ 5424 if (ring->use_doorbell) { 5425 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5426 } else { 5427 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5428 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5429 } 5430 5431 return wptr; 5432 } 5433 5434 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5435 { 5436 struct amdgpu_device *adev = ring->adev; 5437 uint32_t *wptr_saved; 5438 uint32_t *is_queue_unmap; 5439 uint64_t aggregated_db_index; 5440 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 5441 uint64_t wptr_tmp; 5442 5443 if (ring->is_mes_queue) { 5444 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5445 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5446 sizeof(uint32_t)); 5447 aggregated_db_index = 5448 amdgpu_mes_get_aggregated_doorbell_index(adev, 5449 ring->hw_prio); 5450 5451 wptr_tmp = ring->wptr & ring->buf_mask; 5452 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5453 *wptr_saved = wptr_tmp; 5454 /* assume doorbell always being used by mes mapped queue */ 5455 if (*is_queue_unmap) { 5456 WDOORBELL64(aggregated_db_index, wptr_tmp); 5457 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5458 } else { 5459 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5460 5461 if (*is_queue_unmap) 5462 WDOORBELL64(aggregated_db_index, wptr_tmp); 5463 } 5464 } else { 5465 if (ring->use_doorbell) { 5466 /* XXX check if swapping is necessary on BE */ 5467 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5468 ring->wptr); 5469 WDOORBELL64(ring->doorbell_index, ring->wptr); 5470 } else { 5471 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5472 lower_32_bits(ring->wptr)); 5473 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5474 upper_32_bits(ring->wptr)); 5475 } 5476 } 5477 } 5478 5479 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5480 { 5481 /* gfx11 hardware is 32bit rptr */ 5482 return *(uint32_t *)ring->rptr_cpu_addr; 5483 } 5484 5485 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5486 { 5487 u64 wptr; 5488 5489 /* XXX check if swapping is necessary on BE */ 5490 if (ring->use_doorbell) 5491 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5492 else 5493 BUG(); 5494 return wptr; 5495 } 5496 5497 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5498 { 5499 struct amdgpu_device *adev = ring->adev; 5500 uint32_t *wptr_saved; 5501 uint32_t *is_queue_unmap; 5502 uint64_t aggregated_db_index; 5503 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 5504 uint64_t wptr_tmp; 5505 5506 if (ring->is_mes_queue) { 5507 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5508 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5509 sizeof(uint32_t)); 5510 aggregated_db_index = 5511 amdgpu_mes_get_aggregated_doorbell_index(adev, 5512 ring->hw_prio); 5513 5514 wptr_tmp = ring->wptr & ring->buf_mask; 5515 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5516 *wptr_saved = wptr_tmp; 5517 /* assume doorbell always used by mes mapped queue */ 5518 if (*is_queue_unmap) { 5519 WDOORBELL64(aggregated_db_index, wptr_tmp); 5520 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5521 } else { 5522 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5523 5524 if (*is_queue_unmap) 5525 WDOORBELL64(aggregated_db_index, wptr_tmp); 5526 } 5527 } else { 5528 /* XXX check if swapping is necessary on BE */ 5529 if (ring->use_doorbell) { 5530 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5531 ring->wptr); 5532 WDOORBELL64(ring->doorbell_index, ring->wptr); 5533 } else { 5534 BUG(); /* only DOORBELL method supported on gfx11 now */ 5535 } 5536 } 5537 } 5538 5539 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5540 { 5541 struct amdgpu_device *adev = ring->adev; 5542 u32 ref_and_mask, reg_mem_engine; 5543 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5544 5545 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5546 switch (ring->me) { 5547 case 1: 5548 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5549 break; 5550 case 2: 5551 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5552 break; 5553 default: 5554 return; 5555 } 5556 reg_mem_engine = 0; 5557 } else { 5558 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 5559 reg_mem_engine = 1; /* pfp */ 5560 } 5561 5562 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5563 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5564 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5565 ref_and_mask, ref_and_mask, 0x20); 5566 } 5567 5568 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5569 struct amdgpu_job *job, 5570 struct amdgpu_ib *ib, 5571 uint32_t flags) 5572 { 5573 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5574 u32 header, control = 0; 5575 5576 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5577 5578 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5579 5580 control |= ib->length_dw | (vmid << 24); 5581 5582 if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5583 control |= INDIRECT_BUFFER_PRE_ENB(1); 5584 5585 if (flags & AMDGPU_IB_PREEMPTED) 5586 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5587 5588 if (vmid) 5589 gfx_v11_0_ring_emit_de_meta(ring, 5590 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5591 } 5592 5593 if (ring->is_mes_queue) 5594 /* inherit vmid from mqd */ 5595 control |= 0x400000; 5596 5597 amdgpu_ring_write(ring, header); 5598 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5599 amdgpu_ring_write(ring, 5600 #ifdef __BIG_ENDIAN 5601 (2 << 0) | 5602 #endif 5603 lower_32_bits(ib->gpu_addr)); 5604 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5605 amdgpu_ring_write(ring, control); 5606 } 5607 5608 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5609 struct amdgpu_job *job, 5610 struct amdgpu_ib *ib, 5611 uint32_t flags) 5612 { 5613 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5614 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5615 5616 if (ring->is_mes_queue) 5617 /* inherit vmid from mqd */ 5618 control |= 0x40000000; 5619 5620 /* Currently, there is a high possibility to get wave ID mismatch 5621 * between ME and GDS, leading to a hw deadlock, because ME generates 5622 * different wave IDs than the GDS expects. This situation happens 5623 * randomly when at least 5 compute pipes use GDS ordered append. 5624 * The wave IDs generated by ME are also wrong after suspend/resume. 5625 * Those are probably bugs somewhere else in the kernel driver. 5626 * 5627 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5628 * GDS to 0 for this ring (me/pipe). 5629 */ 5630 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5631 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5632 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5633 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5634 } 5635 5636 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5637 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5638 amdgpu_ring_write(ring, 5639 #ifdef __BIG_ENDIAN 5640 (2 << 0) | 5641 #endif 5642 lower_32_bits(ib->gpu_addr)); 5643 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5644 amdgpu_ring_write(ring, control); 5645 } 5646 5647 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5648 u64 seq, unsigned flags) 5649 { 5650 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5651 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5652 5653 /* RELEASE_MEM - flush caches, send int */ 5654 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5655 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5656 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5657 PACKET3_RELEASE_MEM_GCR_GL2_INV | 5658 PACKET3_RELEASE_MEM_GCR_GL2_US | 5659 PACKET3_RELEASE_MEM_GCR_GL1_INV | 5660 PACKET3_RELEASE_MEM_GCR_GLV_INV | 5661 PACKET3_RELEASE_MEM_GCR_GLM_INV | 5662 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5663 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5664 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5665 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5666 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5667 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5668 5669 /* 5670 * the address should be Qword aligned if 64bit write, Dword 5671 * aligned if only send 32bit data low (discard data high) 5672 */ 5673 if (write64bit) 5674 BUG_ON(addr & 0x7); 5675 else 5676 BUG_ON(addr & 0x3); 5677 amdgpu_ring_write(ring, lower_32_bits(addr)); 5678 amdgpu_ring_write(ring, upper_32_bits(addr)); 5679 amdgpu_ring_write(ring, lower_32_bits(seq)); 5680 amdgpu_ring_write(ring, upper_32_bits(seq)); 5681 amdgpu_ring_write(ring, ring->is_mes_queue ? 5682 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 5683 } 5684 5685 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5686 { 5687 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5688 uint32_t seq = ring->fence_drv.sync_seq; 5689 uint64_t addr = ring->fence_drv.gpu_addr; 5690 5691 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5692 upper_32_bits(addr), seq, 0xffffffff, 4); 5693 } 5694 5695 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5696 uint16_t pasid, uint32_t flush_type, 5697 bool all_hub, uint8_t dst_sel) 5698 { 5699 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5700 amdgpu_ring_write(ring, 5701 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5702 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5703 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5704 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5705 } 5706 5707 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5708 unsigned vmid, uint64_t pd_addr) 5709 { 5710 if (ring->is_mes_queue) 5711 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 5712 else 5713 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5714 5715 /* compute doesn't have PFP */ 5716 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5717 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5718 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5719 amdgpu_ring_write(ring, 0x0); 5720 } 5721 } 5722 5723 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5724 u64 seq, unsigned int flags) 5725 { 5726 struct amdgpu_device *adev = ring->adev; 5727 5728 /* we only allocate 32bit for each seq wb address */ 5729 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5730 5731 /* write fence seq to the "addr" */ 5732 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5733 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5734 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5735 amdgpu_ring_write(ring, lower_32_bits(addr)); 5736 amdgpu_ring_write(ring, upper_32_bits(addr)); 5737 amdgpu_ring_write(ring, lower_32_bits(seq)); 5738 5739 if (flags & AMDGPU_FENCE_FLAG_INT) { 5740 /* set register to trigger INT */ 5741 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5742 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5743 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 5744 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 5745 amdgpu_ring_write(ring, 0); 5746 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 5747 } 5748 } 5749 5750 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 5751 uint32_t flags) 5752 { 5753 uint32_t dw2 = 0; 5754 5755 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5756 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 5757 /* set load_global_config & load_global_uconfig */ 5758 dw2 |= 0x8001; 5759 /* set load_cs_sh_regs */ 5760 dw2 |= 0x01000000; 5761 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 5762 dw2 |= 0x10002; 5763 } 5764 5765 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 5766 amdgpu_ring_write(ring, dw2); 5767 amdgpu_ring_write(ring, 0); 5768 } 5769 5770 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 5771 { 5772 unsigned ret; 5773 5774 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 5775 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 5776 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 5777 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 5778 ret = ring->wptr & ring->buf_mask; 5779 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 5780 5781 return ret; 5782 } 5783 5784 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 5785 { 5786 unsigned cur; 5787 BUG_ON(offset > ring->buf_mask); 5788 BUG_ON(ring->ring[offset] != 0x55aa55aa); 5789 5790 cur = (ring->wptr - 1) & ring->buf_mask; 5791 if (likely(cur > offset)) 5792 ring->ring[offset] = cur - offset; 5793 else 5794 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 5795 } 5796 5797 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 5798 { 5799 int i, r = 0; 5800 struct amdgpu_device *adev = ring->adev; 5801 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5802 struct amdgpu_ring *kiq_ring = &kiq->ring; 5803 unsigned long flags; 5804 5805 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5806 return -EINVAL; 5807 5808 spin_lock_irqsave(&kiq->ring_lock, flags); 5809 5810 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5811 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5812 return -ENOMEM; 5813 } 5814 5815 /* assert preemption condition */ 5816 amdgpu_ring_set_preempt_cond_exec(ring, false); 5817 5818 /* assert IB preemption, emit the trailing fence */ 5819 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 5820 ring->trail_fence_gpu_addr, 5821 ++ring->trail_seq); 5822 amdgpu_ring_commit(kiq_ring); 5823 5824 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5825 5826 /* poll the trailing fence */ 5827 for (i = 0; i < adev->usec_timeout; i++) { 5828 if (ring->trail_seq == 5829 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 5830 break; 5831 udelay(1); 5832 } 5833 5834 if (i >= adev->usec_timeout) { 5835 r = -EINVAL; 5836 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 5837 } 5838 5839 /* deassert preemption condition */ 5840 amdgpu_ring_set_preempt_cond_exec(ring, true); 5841 return r; 5842 } 5843 5844 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 5845 { 5846 struct amdgpu_device *adev = ring->adev; 5847 struct v10_de_ib_state de_payload = {0}; 5848 uint64_t offset, gds_addr, de_payload_gpu_addr; 5849 void *de_payload_cpu_addr; 5850 int cnt; 5851 5852 if (ring->is_mes_queue) { 5853 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5854 gfx[0].gfx_meta_data) + 5855 offsetof(struct v10_gfx_meta_data, de_payload); 5856 de_payload_gpu_addr = 5857 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5858 de_payload_cpu_addr = 5859 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5860 5861 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5862 gfx[0].gds_backup) + 5863 offsetof(struct v10_gfx_meta_data, de_payload); 5864 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5865 } else { 5866 offset = offsetof(struct v10_gfx_meta_data, de_payload); 5867 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5868 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5869 5870 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 5871 AMDGPU_CSA_SIZE - adev->gds.gds_size, 5872 PAGE_SIZE); 5873 } 5874 5875 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 5876 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 5877 5878 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 5879 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 5880 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 5881 WRITE_DATA_DST_SEL(8) | 5882 WR_CONFIRM) | 5883 WRITE_DATA_CACHE_POLICY(0)); 5884 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 5885 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 5886 5887 if (resume) 5888 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 5889 sizeof(de_payload) >> 2); 5890 else 5891 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 5892 sizeof(de_payload) >> 2); 5893 } 5894 5895 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 5896 bool secure) 5897 { 5898 uint32_t v = secure ? FRAME_TMZ : 0; 5899 5900 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 5901 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 5902 } 5903 5904 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 5905 uint32_t reg_val_offs) 5906 { 5907 struct amdgpu_device *adev = ring->adev; 5908 5909 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 5910 amdgpu_ring_write(ring, 0 | /* src: register*/ 5911 (5 << 8) | /* dst: memory */ 5912 (1 << 20)); /* write confirm */ 5913 amdgpu_ring_write(ring, reg); 5914 amdgpu_ring_write(ring, 0); 5915 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 5916 reg_val_offs * 4)); 5917 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 5918 reg_val_offs * 4)); 5919 } 5920 5921 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 5922 uint32_t val) 5923 { 5924 uint32_t cmd = 0; 5925 5926 switch (ring->funcs->type) { 5927 case AMDGPU_RING_TYPE_GFX: 5928 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 5929 break; 5930 case AMDGPU_RING_TYPE_KIQ: 5931 cmd = (1 << 16); /* no inc addr */ 5932 break; 5933 default: 5934 cmd = WR_CONFIRM; 5935 break; 5936 } 5937 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5938 amdgpu_ring_write(ring, cmd); 5939 amdgpu_ring_write(ring, reg); 5940 amdgpu_ring_write(ring, 0); 5941 amdgpu_ring_write(ring, val); 5942 } 5943 5944 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 5945 uint32_t val, uint32_t mask) 5946 { 5947 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 5948 } 5949 5950 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 5951 uint32_t reg0, uint32_t reg1, 5952 uint32_t ref, uint32_t mask) 5953 { 5954 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5955 5956 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 5957 ref, mask, 0x20); 5958 } 5959 5960 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 5961 unsigned vmid) 5962 { 5963 struct amdgpu_device *adev = ring->adev; 5964 uint32_t value = 0; 5965 5966 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 5967 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 5968 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 5969 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 5970 WREG32_SOC15(GC, 0, regSQ_CMD, value); 5971 } 5972 5973 static void 5974 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 5975 uint32_t me, uint32_t pipe, 5976 enum amdgpu_interrupt_state state) 5977 { 5978 uint32_t cp_int_cntl, cp_int_cntl_reg; 5979 5980 if (!me) { 5981 switch (pipe) { 5982 case 0: 5983 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 5984 break; 5985 case 1: 5986 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 5987 break; 5988 default: 5989 DRM_DEBUG("invalid pipe %d\n", pipe); 5990 return; 5991 } 5992 } else { 5993 DRM_DEBUG("invalid me %d\n", me); 5994 return; 5995 } 5996 5997 switch (state) { 5998 case AMDGPU_IRQ_STATE_DISABLE: 5999 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6000 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6001 TIME_STAMP_INT_ENABLE, 0); 6002 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6003 GENERIC0_INT_ENABLE, 0); 6004 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6005 break; 6006 case AMDGPU_IRQ_STATE_ENABLE: 6007 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 6008 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6009 TIME_STAMP_INT_ENABLE, 1); 6010 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 6011 GENERIC0_INT_ENABLE, 1); 6012 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 6013 break; 6014 default: 6015 break; 6016 } 6017 } 6018 6019 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6020 int me, int pipe, 6021 enum amdgpu_interrupt_state state) 6022 { 6023 u32 mec_int_cntl, mec_int_cntl_reg; 6024 6025 /* 6026 * amdgpu controls only the first MEC. That's why this function only 6027 * handles the setting of interrupts for this specific MEC. All other 6028 * pipes' interrupts are set by amdkfd. 6029 */ 6030 6031 if (me == 1) { 6032 switch (pipe) { 6033 case 0: 6034 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6035 break; 6036 case 1: 6037 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 6038 break; 6039 case 2: 6040 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 6041 break; 6042 case 3: 6043 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 6044 break; 6045 default: 6046 DRM_DEBUG("invalid pipe %d\n", pipe); 6047 return; 6048 } 6049 } else { 6050 DRM_DEBUG("invalid me %d\n", me); 6051 return; 6052 } 6053 6054 switch (state) { 6055 case AMDGPU_IRQ_STATE_DISABLE: 6056 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6057 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6058 TIME_STAMP_INT_ENABLE, 0); 6059 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6060 GENERIC0_INT_ENABLE, 0); 6061 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6062 break; 6063 case AMDGPU_IRQ_STATE_ENABLE: 6064 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6065 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6066 TIME_STAMP_INT_ENABLE, 1); 6067 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6068 GENERIC0_INT_ENABLE, 1); 6069 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6070 break; 6071 default: 6072 break; 6073 } 6074 } 6075 6076 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6077 struct amdgpu_irq_src *src, 6078 unsigned type, 6079 enum amdgpu_interrupt_state state) 6080 { 6081 switch (type) { 6082 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6083 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6084 break; 6085 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6086 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6087 break; 6088 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6089 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6090 break; 6091 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6092 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6093 break; 6094 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6095 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6096 break; 6097 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6098 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6099 break; 6100 default: 6101 break; 6102 } 6103 return 0; 6104 } 6105 6106 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6107 struct amdgpu_irq_src *source, 6108 struct amdgpu_iv_entry *entry) 6109 { 6110 int i; 6111 u8 me_id, pipe_id, queue_id; 6112 struct amdgpu_ring *ring; 6113 uint32_t mes_queue_id = entry->src_data[0]; 6114 6115 DRM_DEBUG("IH: CP EOP\n"); 6116 6117 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 6118 struct amdgpu_mes_queue *queue; 6119 6120 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 6121 6122 spin_lock(&adev->mes.queue_id_lock); 6123 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 6124 if (queue) { 6125 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 6126 amdgpu_fence_process(queue->ring); 6127 } 6128 spin_unlock(&adev->mes.queue_id_lock); 6129 } else { 6130 me_id = (entry->ring_id & 0x0c) >> 2; 6131 pipe_id = (entry->ring_id & 0x03) >> 0; 6132 queue_id = (entry->ring_id & 0x70) >> 4; 6133 6134 switch (me_id) { 6135 case 0: 6136 if (pipe_id == 0) 6137 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6138 else 6139 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6140 break; 6141 case 1: 6142 case 2: 6143 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6144 ring = &adev->gfx.compute_ring[i]; 6145 /* Per-queue interrupt is supported for MEC starting from VI. 6146 * The interrupt can only be enabled/disabled per pipe instead 6147 * of per queue. 6148 */ 6149 if ((ring->me == me_id) && 6150 (ring->pipe == pipe_id) && 6151 (ring->queue == queue_id)) 6152 amdgpu_fence_process(ring); 6153 } 6154 break; 6155 } 6156 } 6157 6158 return 0; 6159 } 6160 6161 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6162 struct amdgpu_irq_src *source, 6163 unsigned type, 6164 enum amdgpu_interrupt_state state) 6165 { 6166 switch (state) { 6167 case AMDGPU_IRQ_STATE_DISABLE: 6168 case AMDGPU_IRQ_STATE_ENABLE: 6169 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 6170 PRIV_REG_INT_ENABLE, 6171 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6172 break; 6173 default: 6174 break; 6175 } 6176 6177 return 0; 6178 } 6179 6180 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6181 struct amdgpu_irq_src *source, 6182 unsigned type, 6183 enum amdgpu_interrupt_state state) 6184 { 6185 switch (state) { 6186 case AMDGPU_IRQ_STATE_DISABLE: 6187 case AMDGPU_IRQ_STATE_ENABLE: 6188 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 6189 PRIV_INSTR_INT_ENABLE, 6190 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6191 break; 6192 default: 6193 break; 6194 } 6195 6196 return 0; 6197 } 6198 6199 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6200 struct amdgpu_iv_entry *entry) 6201 { 6202 u8 me_id, pipe_id, queue_id; 6203 struct amdgpu_ring *ring; 6204 int i; 6205 6206 me_id = (entry->ring_id & 0x0c) >> 2; 6207 pipe_id = (entry->ring_id & 0x03) >> 0; 6208 queue_id = (entry->ring_id & 0x70) >> 4; 6209 6210 switch (me_id) { 6211 case 0: 6212 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6213 ring = &adev->gfx.gfx_ring[i]; 6214 /* we only enabled 1 gfx queue per pipe for now */ 6215 if (ring->me == me_id && ring->pipe == pipe_id) 6216 drm_sched_fault(&ring->sched); 6217 } 6218 break; 6219 case 1: 6220 case 2: 6221 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6222 ring = &adev->gfx.compute_ring[i]; 6223 if (ring->me == me_id && ring->pipe == pipe_id && 6224 ring->queue == queue_id) 6225 drm_sched_fault(&ring->sched); 6226 } 6227 break; 6228 default: 6229 BUG(); 6230 break; 6231 } 6232 } 6233 6234 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6235 struct amdgpu_irq_src *source, 6236 struct amdgpu_iv_entry *entry) 6237 { 6238 DRM_ERROR("Illegal register access in command stream\n"); 6239 gfx_v11_0_handle_priv_fault(adev, entry); 6240 return 0; 6241 } 6242 6243 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6244 struct amdgpu_irq_src *source, 6245 struct amdgpu_iv_entry *entry) 6246 { 6247 DRM_ERROR("Illegal instruction in command stream\n"); 6248 gfx_v11_0_handle_priv_fault(adev, entry); 6249 return 0; 6250 } 6251 6252 #if 0 6253 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6254 struct amdgpu_irq_src *src, 6255 unsigned int type, 6256 enum amdgpu_interrupt_state state) 6257 { 6258 uint32_t tmp, target; 6259 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 6260 6261 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6262 target += ring->pipe; 6263 6264 switch (type) { 6265 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6266 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6267 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6268 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6269 GENERIC2_INT_ENABLE, 0); 6270 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6271 6272 tmp = RREG32_SOC15_IP(GC, target); 6273 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6274 GENERIC2_INT_ENABLE, 0); 6275 WREG32_SOC15_IP(GC, target, tmp); 6276 } else { 6277 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6278 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6279 GENERIC2_INT_ENABLE, 1); 6280 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6281 6282 tmp = RREG32_SOC15_IP(GC, target); 6283 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6284 GENERIC2_INT_ENABLE, 1); 6285 WREG32_SOC15_IP(GC, target, tmp); 6286 } 6287 break; 6288 default: 6289 BUG(); /* kiq only support GENERIC2_INT now */ 6290 break; 6291 } 6292 return 0; 6293 } 6294 #endif 6295 6296 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6297 { 6298 const unsigned int gcr_cntl = 6299 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6300 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6301 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6302 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6303 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6304 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6305 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6306 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6307 6308 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6309 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6310 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6311 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6312 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6313 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6314 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6315 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6316 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6317 } 6318 6319 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 6320 .name = "gfx_v11_0", 6321 .early_init = gfx_v11_0_early_init, 6322 .late_init = gfx_v11_0_late_init, 6323 .sw_init = gfx_v11_0_sw_init, 6324 .sw_fini = gfx_v11_0_sw_fini, 6325 .hw_init = gfx_v11_0_hw_init, 6326 .hw_fini = gfx_v11_0_hw_fini, 6327 .suspend = gfx_v11_0_suspend, 6328 .resume = gfx_v11_0_resume, 6329 .is_idle = gfx_v11_0_is_idle, 6330 .wait_for_idle = gfx_v11_0_wait_for_idle, 6331 .soft_reset = gfx_v11_0_soft_reset, 6332 .check_soft_reset = gfx_v11_0_check_soft_reset, 6333 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 6334 .set_powergating_state = gfx_v11_0_set_powergating_state, 6335 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 6336 }; 6337 6338 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 6339 .type = AMDGPU_RING_TYPE_GFX, 6340 .align_mask = 0xff, 6341 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6342 .support_64bit_ptrs = true, 6343 .vmhub = AMDGPU_GFXHUB_0, 6344 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 6345 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 6346 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 6347 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 6348 5 + /* COND_EXEC */ 6349 7 + /* PIPELINE_SYNC */ 6350 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6351 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6352 2 + /* VM_FLUSH */ 6353 8 + /* FENCE for VM_FLUSH */ 6354 20 + /* GDS switch */ 6355 5 + /* COND_EXEC */ 6356 7 + /* HDP_flush */ 6357 4 + /* VGT_flush */ 6358 31 + /* DE_META */ 6359 3 + /* CNTX_CTRL */ 6360 5 + /* HDP_INVL */ 6361 8 + 8 + /* FENCE x2 */ 6362 8, /* gfx_v11_0_emit_mem_sync */ 6363 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 6364 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 6365 .emit_fence = gfx_v11_0_ring_emit_fence, 6366 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6367 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6368 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6369 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6370 .test_ring = gfx_v11_0_ring_test_ring, 6371 .test_ib = gfx_v11_0_ring_test_ib, 6372 .insert_nop = amdgpu_ring_insert_nop, 6373 .pad_ib = amdgpu_ring_generic_pad_ib, 6374 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 6375 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 6376 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec, 6377 .preempt_ib = gfx_v11_0_ring_preempt_ib, 6378 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 6379 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6380 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6381 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6382 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6383 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6384 }; 6385 6386 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 6387 .type = AMDGPU_RING_TYPE_COMPUTE, 6388 .align_mask = 0xff, 6389 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6390 .support_64bit_ptrs = true, 6391 .vmhub = AMDGPU_GFXHUB_0, 6392 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6393 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6394 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6395 .emit_frame_size = 6396 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6397 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6398 5 + /* hdp invalidate */ 6399 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6400 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6401 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6402 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6403 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6404 8, /* gfx_v11_0_emit_mem_sync */ 6405 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6406 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6407 .emit_fence = gfx_v11_0_ring_emit_fence, 6408 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6409 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6410 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6411 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6412 .test_ring = gfx_v11_0_ring_test_ring, 6413 .test_ib = gfx_v11_0_ring_test_ib, 6414 .insert_nop = amdgpu_ring_insert_nop, 6415 .pad_ib = amdgpu_ring_generic_pad_ib, 6416 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6417 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6418 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6419 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6420 }; 6421 6422 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 6423 .type = AMDGPU_RING_TYPE_KIQ, 6424 .align_mask = 0xff, 6425 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6426 .support_64bit_ptrs = true, 6427 .vmhub = AMDGPU_GFXHUB_0, 6428 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6429 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6430 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6431 .emit_frame_size = 6432 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6433 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6434 5 + /*hdp invalidate */ 6435 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6436 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6437 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6438 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6439 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6440 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6441 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6442 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 6443 .test_ring = gfx_v11_0_ring_test_ring, 6444 .test_ib = gfx_v11_0_ring_test_ib, 6445 .insert_nop = amdgpu_ring_insert_nop, 6446 .pad_ib = amdgpu_ring_generic_pad_ib, 6447 .emit_rreg = gfx_v11_0_ring_emit_rreg, 6448 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6449 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6450 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6451 }; 6452 6453 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 6454 { 6455 int i; 6456 6457 adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; 6458 6459 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6460 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 6461 6462 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6463 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 6464 } 6465 6466 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 6467 .set = gfx_v11_0_set_eop_interrupt_state, 6468 .process = gfx_v11_0_eop_irq, 6469 }; 6470 6471 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 6472 .set = gfx_v11_0_set_priv_reg_fault_state, 6473 .process = gfx_v11_0_priv_reg_irq, 6474 }; 6475 6476 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 6477 .set = gfx_v11_0_set_priv_inst_fault_state, 6478 .process = gfx_v11_0_priv_inst_irq, 6479 }; 6480 6481 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 6482 { 6483 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 6484 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 6485 6486 adev->gfx.priv_reg_irq.num_types = 1; 6487 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 6488 6489 adev->gfx.priv_inst_irq.num_types = 1; 6490 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 6491 } 6492 6493 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 6494 { 6495 if (adev->flags & AMD_IS_APU) 6496 adev->gfx.imu.mode = MISSION_MODE; 6497 else 6498 adev->gfx.imu.mode = DEBUG_MODE; 6499 6500 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 6501 } 6502 6503 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 6504 { 6505 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 6506 } 6507 6508 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 6509 { 6510 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 6511 adev->gfx.config.max_sh_per_se * 6512 adev->gfx.config.max_shader_engines; 6513 6514 adev->gds.gds_size = 0x1000; 6515 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 6516 adev->gds.gws_size = 64; 6517 adev->gds.oa_size = 16; 6518 } 6519 6520 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 6521 { 6522 /* set gfx eng mqd */ 6523 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 6524 sizeof(struct v11_gfx_mqd); 6525 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 6526 gfx_v11_0_gfx_mqd_init; 6527 /* set compute eng mqd */ 6528 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 6529 sizeof(struct v11_compute_mqd); 6530 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 6531 gfx_v11_0_compute_mqd_init; 6532 } 6533 6534 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 6535 u32 bitmap) 6536 { 6537 u32 data; 6538 6539 if (!bitmap) 6540 return; 6541 6542 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6543 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6544 6545 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 6546 } 6547 6548 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 6549 { 6550 u32 data, wgp_bitmask; 6551 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 6552 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 6553 6554 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6555 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6556 6557 wgp_bitmask = 6558 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 6559 6560 return (~data) & wgp_bitmask; 6561 } 6562 6563 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 6564 { 6565 u32 wgp_idx, wgp_active_bitmap; 6566 u32 cu_bitmap_per_wgp, cu_active_bitmap; 6567 6568 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 6569 cu_active_bitmap = 0; 6570 6571 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 6572 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 6573 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 6574 if (wgp_active_bitmap & (1 << wgp_idx)) 6575 cu_active_bitmap |= cu_bitmap_per_wgp; 6576 } 6577 6578 return cu_active_bitmap; 6579 } 6580 6581 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 6582 struct amdgpu_cu_info *cu_info) 6583 { 6584 int i, j, k, counter, active_cu_number = 0; 6585 u32 mask, bitmap; 6586 unsigned disable_masks[8 * 2]; 6587 6588 if (!adev || !cu_info) 6589 return -EINVAL; 6590 6591 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 6592 6593 mutex_lock(&adev->grbm_idx_mutex); 6594 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 6595 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 6596 mask = 1; 6597 counter = 0; 6598 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); 6599 if (i < 8 && j < 2) 6600 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 6601 adev, disable_masks[i * 2 + j]); 6602 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 6603 6604 /** 6605 * GFX11 could support more than 4 SEs, while the bitmap 6606 * in cu_info struct is 4x4 and ioctl interface struct 6607 * drm_amdgpu_info_device should keep stable. 6608 * So we use last two columns of bitmap to store cu mask for 6609 * SEs 4 to 7, the layout of the bitmap is as below: 6610 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 6611 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 6612 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 6613 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 6614 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 6615 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 6616 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 6617 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 6618 */ 6619 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; 6620 6621 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 6622 if (bitmap & mask) 6623 counter++; 6624 6625 mask <<= 1; 6626 } 6627 active_cu_number += counter; 6628 } 6629 } 6630 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 6631 mutex_unlock(&adev->grbm_idx_mutex); 6632 6633 cu_info->number = active_cu_number; 6634 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 6635 6636 return 0; 6637 } 6638 6639 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 6640 { 6641 .type = AMD_IP_BLOCK_TYPE_GFX, 6642 .major = 11, 6643 .minor = 0, 6644 .rev = 0, 6645 .funcs = &gfx_v11_0_ip_funcs, 6646 }; 6647