1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/delay.h> 24 #include <linux/kernel.h> 25 #include <linux/firmware.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include "amdgpu.h" 29 #include "amdgpu_gfx.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_smu.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "imu_v11_0.h" 34 #include "soc21.h" 35 #include "nvd.h" 36 37 #include "gc/gc_11_0_0_offset.h" 38 #include "gc/gc_11_0_0_sh_mask.h" 39 #include "smuio/smuio_13_0_6_offset.h" 40 #include "smuio/smuio_13_0_6_sh_mask.h" 41 #include "navi10_enum.h" 42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 43 44 #include "soc15.h" 45 #include "soc15d.h" 46 #include "clearstate_gfx11.h" 47 #include "v11_structs.h" 48 #include "gfx_v11_0.h" 49 #include "nbio_v4_3.h" 50 #include "mes_v11_0.h" 51 52 #define GFX11_NUM_GFX_RINGS 1 53 #define GFX11_MEC_HPD_SIZE 2048 54 55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 56 57 #define regCGTT_WD_CLK_CTRL 0x5086 58 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1 59 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e 60 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1 61 62 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); 63 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); 64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); 65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); 66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); 67 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); 68 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); 69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin"); 70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin"); 71 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin"); 72 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin"); 73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin"); 74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin"); 75 76 static const struct soc15_reg_golden golden_settings_gc_11_0[] = 77 { 78 /* Pending on emulation bring up */ 79 }; 80 81 static const struct soc15_reg_golden golden_settings_gc_11_0_0[] = 82 { 83 /* Pending on emulation bring up */ 84 }; 85 86 static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] = 87 { 88 /* Pending on emulation bring up */ 89 }; 90 91 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = 92 { 93 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010), 94 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988), 97 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008), 99 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100), 100 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), 101 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) 102 }; 103 104 #define DEFAULT_SH_MEM_CONFIG \ 105 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 106 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 107 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 108 109 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev); 110 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev); 111 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev); 112 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev); 113 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev); 114 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev); 115 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev); 116 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 117 struct amdgpu_cu_info *cu_info); 118 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev); 119 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 120 u32 sh_num, u32 instance); 121 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 122 123 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 124 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 125 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 126 uint32_t val); 127 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 128 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 129 uint16_t pasid, uint32_t flush_type, 130 bool all_hub, uint8_t dst_sel); 131 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev); 132 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev); 133 134 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 135 { 136 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 137 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 138 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 139 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 140 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 141 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 142 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 143 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 144 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 145 } 146 147 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring, 148 struct amdgpu_ring *ring) 149 { 150 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 151 uint64_t wptr_addr = ring->wptr_gpu_addr; 152 uint32_t me = 0, eng_sel = 0; 153 154 switch (ring->funcs->type) { 155 case AMDGPU_RING_TYPE_COMPUTE: 156 me = 1; 157 eng_sel = 0; 158 break; 159 case AMDGPU_RING_TYPE_GFX: 160 me = 0; 161 eng_sel = 4; 162 break; 163 case AMDGPU_RING_TYPE_MES: 164 me = 2; 165 eng_sel = 5; 166 break; 167 default: 168 WARN_ON(1); 169 } 170 171 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 172 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 173 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 174 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 175 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 176 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 177 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 178 PACKET3_MAP_QUEUES_ME((me)) | 179 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 180 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 181 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 182 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 183 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 184 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 185 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 186 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 187 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 188 } 189 190 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 191 struct amdgpu_ring *ring, 192 enum amdgpu_unmap_queues_action action, 193 u64 gpu_addr, u64 seq) 194 { 195 struct amdgpu_device *adev = kiq_ring->adev; 196 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 197 198 if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) { 199 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 200 return; 201 } 202 203 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 204 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 205 PACKET3_UNMAP_QUEUES_ACTION(action) | 206 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 207 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 208 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 209 amdgpu_ring_write(kiq_ring, 210 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 211 212 if (action == PREEMPT_QUEUES_NO_UNMAP) { 213 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 214 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 215 amdgpu_ring_write(kiq_ring, seq); 216 } else { 217 amdgpu_ring_write(kiq_ring, 0); 218 amdgpu_ring_write(kiq_ring, 0); 219 amdgpu_ring_write(kiq_ring, 0); 220 } 221 } 222 223 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring, 224 struct amdgpu_ring *ring, 225 u64 addr, 226 u64 seq) 227 { 228 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 229 230 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 231 amdgpu_ring_write(kiq_ring, 232 PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 233 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 234 PACKET3_QUERY_STATUS_COMMAND(2)); 235 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 236 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 237 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 238 amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 239 amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 240 amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 241 amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 242 } 243 244 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 245 uint16_t pasid, uint32_t flush_type, 246 bool all_hub) 247 { 248 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 249 } 250 251 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = { 252 .kiq_set_resources = gfx11_kiq_set_resources, 253 .kiq_map_queues = gfx11_kiq_map_queues, 254 .kiq_unmap_queues = gfx11_kiq_unmap_queues, 255 .kiq_query_status = gfx11_kiq_query_status, 256 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs, 257 .set_resources_size = 8, 258 .map_queues_size = 7, 259 .unmap_queues_size = 6, 260 .query_status_size = 7, 261 .invalidate_tlbs_size = 2, 262 }; 263 264 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 265 { 266 adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs; 267 } 268 269 static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev) 270 { 271 switch (adev->ip_versions[GC_HWIP][0]) { 272 case IP_VERSION(11, 0, 0): 273 soc15_program_register_sequence(adev, 274 golden_settings_gc_rlc_spm_11_0, 275 (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0)); 276 break; 277 default: 278 break; 279 } 280 } 281 282 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 283 { 284 switch (adev->ip_versions[GC_HWIP][0]) { 285 case IP_VERSION(11, 0, 0): 286 soc15_program_register_sequence(adev, 287 golden_settings_gc_11_0, 288 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 289 soc15_program_register_sequence(adev, 290 golden_settings_gc_11_0_0, 291 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0)); 292 break; 293 case IP_VERSION(11, 0, 1): 294 soc15_program_register_sequence(adev, 295 golden_settings_gc_11_0, 296 (const u32)ARRAY_SIZE(golden_settings_gc_11_0)); 297 soc15_program_register_sequence(adev, 298 golden_settings_gc_11_0_1, 299 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); 300 break; 301 default: 302 break; 303 } 304 gfx_v11_0_init_spm_golden_registers(adev); 305 } 306 307 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 308 bool wc, uint32_t reg, uint32_t val) 309 { 310 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 311 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 312 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 313 amdgpu_ring_write(ring, reg); 314 amdgpu_ring_write(ring, 0); 315 amdgpu_ring_write(ring, val); 316 } 317 318 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 319 int mem_space, int opt, uint32_t addr0, 320 uint32_t addr1, uint32_t ref, uint32_t mask, 321 uint32_t inv) 322 { 323 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 324 amdgpu_ring_write(ring, 325 /* memory (1) or register (0) */ 326 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 327 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 328 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 329 WAIT_REG_MEM_ENGINE(eng_sel))); 330 331 if (mem_space) 332 BUG_ON(addr0 & 0x3); /* Dword align */ 333 amdgpu_ring_write(ring, addr0); 334 amdgpu_ring_write(ring, addr1); 335 amdgpu_ring_write(ring, ref); 336 amdgpu_ring_write(ring, mask); 337 amdgpu_ring_write(ring, inv); /* poll interval */ 338 } 339 340 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring) 341 { 342 struct amdgpu_device *adev = ring->adev; 343 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 344 uint32_t tmp = 0; 345 unsigned i; 346 int r; 347 348 WREG32(scratch, 0xCAFEDEAD); 349 r = amdgpu_ring_alloc(ring, 5); 350 if (r) { 351 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 352 ring->idx, r); 353 return r; 354 } 355 356 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 357 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 358 } else { 359 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 360 amdgpu_ring_write(ring, scratch - 361 PACKET3_SET_UCONFIG_REG_START); 362 amdgpu_ring_write(ring, 0xDEADBEEF); 363 } 364 amdgpu_ring_commit(ring); 365 366 for (i = 0; i < adev->usec_timeout; i++) { 367 tmp = RREG32(scratch); 368 if (tmp == 0xDEADBEEF) 369 break; 370 if (amdgpu_emu_mode == 1) 371 msleep(1); 372 else 373 udelay(1); 374 } 375 376 if (i >= adev->usec_timeout) 377 r = -ETIMEDOUT; 378 return r; 379 } 380 381 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 382 { 383 struct amdgpu_device *adev = ring->adev; 384 struct amdgpu_ib ib; 385 struct dma_fence *f = NULL; 386 unsigned index; 387 uint64_t gpu_addr; 388 volatile uint32_t *cpu_ptr; 389 long r; 390 391 /* MES KIQ fw hasn't indirect buffer support for now */ 392 if (adev->enable_mes_kiq && 393 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 394 return 0; 395 396 memset(&ib, 0, sizeof(ib)); 397 398 if (ring->is_mes_queue) { 399 uint32_t padding, offset; 400 401 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 402 padding = amdgpu_mes_ctx_get_offs(ring, 403 AMDGPU_MES_CTX_PADDING_OFFS); 404 405 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 406 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 407 408 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 409 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 410 *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 411 } else { 412 r = amdgpu_device_wb_get(adev, &index); 413 if (r) 414 return r; 415 416 gpu_addr = adev->wb.gpu_addr + (index * 4); 417 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 418 cpu_ptr = &adev->wb.wb[index]; 419 420 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 421 if (r) { 422 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 423 goto err1; 424 } 425 } 426 427 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 428 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 429 ib.ptr[2] = lower_32_bits(gpu_addr); 430 ib.ptr[3] = upper_32_bits(gpu_addr); 431 ib.ptr[4] = 0xDEADBEEF; 432 ib.length_dw = 5; 433 434 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 435 if (r) 436 goto err2; 437 438 r = dma_fence_wait_timeout(f, false, timeout); 439 if (r == 0) { 440 r = -ETIMEDOUT; 441 goto err2; 442 } else if (r < 0) { 443 goto err2; 444 } 445 446 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 447 r = 0; 448 else 449 r = -EINVAL; 450 err2: 451 if (!ring->is_mes_queue) 452 amdgpu_ib_free(adev, &ib, NULL); 453 dma_fence_put(f); 454 err1: 455 if (!ring->is_mes_queue) 456 amdgpu_device_wb_free(adev, index); 457 return r; 458 } 459 460 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev) 461 { 462 release_firmware(adev->gfx.pfp_fw); 463 adev->gfx.pfp_fw = NULL; 464 release_firmware(adev->gfx.me_fw); 465 adev->gfx.me_fw = NULL; 466 release_firmware(adev->gfx.rlc_fw); 467 adev->gfx.rlc_fw = NULL; 468 release_firmware(adev->gfx.mec_fw); 469 adev->gfx.mec_fw = NULL; 470 471 kfree(adev->gfx.rlc.register_list_format); 472 } 473 474 static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 475 { 476 const struct rlc_firmware_header_v2_1 *rlc_hdr; 477 478 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 479 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 480 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 481 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 482 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 483 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 484 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 485 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 486 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 487 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 488 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 489 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 490 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 491 adev->gfx.rlc.reg_list_format_direct_reg_list_length = 492 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 493 } 494 495 static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev) 496 { 497 const struct rlc_firmware_header_v2_2 *rlc_hdr; 498 499 rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 500 adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); 501 adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); 502 adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); 503 adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); 504 } 505 506 static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev) 507 { 508 const struct rlc_firmware_header_v2_3 *rlc_hdr; 509 510 rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 511 adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); 512 adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); 513 adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); 514 adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); 515 } 516 517 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) 518 { 519 char fw_name[40]; 520 char ucode_prefix[30]; 521 int err; 522 struct amdgpu_firmware_info *info = NULL; 523 const struct common_firmware_header *header = NULL; 524 const struct gfx_firmware_header_v1_0 *cp_hdr; 525 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; 526 const struct rlc_firmware_header_v2_0 *rlc_hdr; 527 unsigned int *tmp = NULL; 528 unsigned int i = 0; 529 uint16_t version_major; 530 uint16_t version_minor; 531 532 DRM_DEBUG("\n"); 533 534 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 535 536 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); 537 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 538 if (err) 539 goto out; 540 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 541 if (err) 542 goto out; 543 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/ 544 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version( 545 (union amdgpu_firmware_header *) 546 adev->gfx.pfp_fw->data, 2, 0); 547 if (adev->gfx.rs64_enable) { 548 dev_info(adev->dev, "CP RS64 enable\n"); 549 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; 550 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 551 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 552 553 } else { 554 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 555 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 556 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 557 } 558 559 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); 560 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 561 if (err) 562 goto out; 563 err = amdgpu_ucode_validate(adev->gfx.me_fw); 564 if (err) 565 goto out; 566 if (adev->gfx.rs64_enable) { 567 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; 568 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 569 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 570 571 } else { 572 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 573 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 574 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 575 } 576 577 if (!amdgpu_sriov_vf(adev)) { 578 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); 579 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 580 if (err) 581 goto out; 582 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 583 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 584 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 585 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 586 587 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 588 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 589 adev->gfx.rlc.save_and_restore_offset = 590 le32_to_cpu(rlc_hdr->save_and_restore_offset); 591 adev->gfx.rlc.clear_state_descriptor_offset = 592 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 593 adev->gfx.rlc.avail_scratch_ram_locations = 594 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 595 adev->gfx.rlc.reg_restore_list_size = 596 le32_to_cpu(rlc_hdr->reg_restore_list_size); 597 adev->gfx.rlc.reg_list_format_start = 598 le32_to_cpu(rlc_hdr->reg_list_format_start); 599 adev->gfx.rlc.reg_list_format_separate_start = 600 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 601 adev->gfx.rlc.starting_offsets_start = 602 le32_to_cpu(rlc_hdr->starting_offsets_start); 603 adev->gfx.rlc.reg_list_format_size_bytes = 604 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 605 adev->gfx.rlc.reg_list_size_bytes = 606 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 607 adev->gfx.rlc.register_list_format = 608 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 609 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 610 if (!adev->gfx.rlc.register_list_format) { 611 err = -ENOMEM; 612 goto out; 613 } 614 615 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 616 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 617 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 618 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 619 620 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 621 622 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 623 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 624 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 625 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 626 627 if (version_major == 2) { 628 if (version_minor >= 1) 629 gfx_v11_0_init_rlc_ext_microcode(adev); 630 if (version_minor >= 2) 631 gfx_v11_0_init_rlc_iram_dram_microcode(adev); 632 if (version_minor == 3) 633 gfx_v11_0_init_rlcp_rlcv_microcode(adev); 634 } 635 } 636 637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); 638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 639 if (err) 640 goto out; 641 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 642 if (err) 643 goto out; 644 if (adev->gfx.rs64_enable) { 645 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 646 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version); 647 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); 648 649 } else { 650 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 651 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 652 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 653 } 654 655 /* only one MEC for gfx 11.0.0. */ 656 adev->gfx.mec2_fw = NULL; 657 658 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 659 if (adev->gfx.rs64_enable) { 660 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data; 661 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP]; 662 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP; 663 info->fw = adev->gfx.pfp_fw; 664 header = (const struct common_firmware_header *)info->fw->data; 665 adev->firmware.fw_size += 666 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 667 668 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK]; 669 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK; 670 info->fw = adev->gfx.pfp_fw; 671 header = (const struct common_firmware_header *)info->fw->data; 672 adev->firmware.fw_size += 673 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 674 675 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK]; 676 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK; 677 info->fw = adev->gfx.pfp_fw; 678 header = (const struct common_firmware_header *)info->fw->data; 679 adev->firmware.fw_size += 680 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 681 682 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data; 683 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME]; 684 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME; 685 info->fw = adev->gfx.me_fw; 686 header = (const struct common_firmware_header *)info->fw->data; 687 adev->firmware.fw_size += 688 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 689 690 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK]; 691 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK; 692 info->fw = adev->gfx.me_fw; 693 header = (const struct common_firmware_header *)info->fw->data; 694 adev->firmware.fw_size += 695 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 696 697 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK]; 698 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK; 699 info->fw = adev->gfx.me_fw; 700 header = (const struct common_firmware_header *)info->fw->data; 701 adev->firmware.fw_size += 702 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 703 704 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 705 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC]; 706 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC; 707 info->fw = adev->gfx.mec_fw; 708 header = (const struct common_firmware_header *)info->fw->data; 709 adev->firmware.fw_size += 710 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE); 711 712 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK]; 713 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK; 714 info->fw = adev->gfx.mec_fw; 715 header = (const struct common_firmware_header *)info->fw->data; 716 adev->firmware.fw_size += 717 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 718 719 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK]; 720 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK; 721 info->fw = adev->gfx.mec_fw; 722 header = (const struct common_firmware_header *)info->fw->data; 723 adev->firmware.fw_size += 724 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 725 726 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK]; 727 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK; 728 info->fw = adev->gfx.mec_fw; 729 header = (const struct common_firmware_header *)info->fw->data; 730 adev->firmware.fw_size += 731 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 732 733 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK]; 734 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK; 735 info->fw = adev->gfx.mec_fw; 736 header = (const struct common_firmware_header *)info->fw->data; 737 adev->firmware.fw_size += 738 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE); 739 } else { 740 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 741 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 742 info->fw = adev->gfx.pfp_fw; 743 header = (const struct common_firmware_header *)info->fw->data; 744 adev->firmware.fw_size += 745 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 746 747 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 748 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 749 info->fw = adev->gfx.me_fw; 750 header = (const struct common_firmware_header *)info->fw->data; 751 adev->firmware.fw_size += 752 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 753 754 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 755 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 756 info->fw = adev->gfx.mec_fw; 757 header = (const struct common_firmware_header *)info->fw->data; 758 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 759 adev->firmware.fw_size += 760 ALIGN(le32_to_cpu(header->ucode_size_bytes) - 761 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 762 763 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 764 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 765 info->fw = adev->gfx.mec_fw; 766 adev->firmware.fw_size += 767 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 768 } 769 770 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 771 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 772 info->fw = adev->gfx.rlc_fw; 773 if (info->fw) { 774 header = (const struct common_firmware_header *)info->fw->data; 775 adev->firmware.fw_size += 776 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 777 } 778 if (adev->gfx.rlc.save_restore_list_gpm_size_bytes && 779 adev->gfx.rlc.save_restore_list_srm_size_bytes) { 780 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 781 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 782 info->fw = adev->gfx.rlc_fw; 783 adev->firmware.fw_size += 784 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 785 786 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 787 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 788 info->fw = adev->gfx.rlc_fw; 789 adev->firmware.fw_size += 790 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 791 } 792 793 if (adev->gfx.rlc.rlc_iram_ucode_size_bytes && 794 adev->gfx.rlc.rlc_dram_ucode_size_bytes) { 795 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; 796 info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; 797 info->fw = adev->gfx.rlc_fw; 798 adev->firmware.fw_size += 799 ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); 800 801 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; 802 info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; 803 info->fw = adev->gfx.rlc_fw; 804 adev->firmware.fw_size += 805 ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); 806 } 807 808 if (adev->gfx.rlc.rlcp_ucode_size_bytes) { 809 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; 810 info->ucode_id = AMDGPU_UCODE_ID_RLC_P; 811 info->fw = adev->gfx.rlc_fw; 812 adev->firmware.fw_size += 813 ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); 814 } 815 816 if (adev->gfx.rlc.rlcv_ucode_size_bytes) { 817 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; 818 info->ucode_id = AMDGPU_UCODE_ID_RLC_V; 819 info->fw = adev->gfx.rlc_fw; 820 adev->firmware.fw_size += 821 ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); 822 } 823 } 824 825 out: 826 if (err) { 827 dev_err(adev->dev, 828 "gfx11: Failed to load firmware \"%s\"\n", 829 fw_name); 830 release_firmware(adev->gfx.pfp_fw); 831 adev->gfx.pfp_fw = NULL; 832 release_firmware(adev->gfx.me_fw); 833 adev->gfx.me_fw = NULL; 834 release_firmware(adev->gfx.rlc_fw); 835 adev->gfx.rlc_fw = NULL; 836 release_firmware(adev->gfx.mec_fw); 837 adev->gfx.mec_fw = NULL; 838 } 839 840 return err; 841 } 842 843 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev) 844 { 845 const struct psp_firmware_header_v1_0 *toc_hdr; 846 int err = 0; 847 char fw_name[40]; 848 char ucode_prefix[30]; 849 850 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 851 852 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); 853 err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev); 854 if (err) 855 goto out; 856 857 err = amdgpu_ucode_validate(adev->psp.toc_fw); 858 if (err) 859 goto out; 860 861 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 862 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 863 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 864 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 865 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 866 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 867 return 0; 868 out: 869 dev_err(adev->dev, "Failed to load TOC microcode\n"); 870 release_firmware(adev->psp.toc_fw); 871 adev->psp.toc_fw = NULL; 872 return err; 873 } 874 875 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev) 876 { 877 u32 count = 0; 878 const struct cs_section_def *sect = NULL; 879 const struct cs_extent_def *ext = NULL; 880 881 /* begin clear state */ 882 count += 2; 883 /* context control state */ 884 count += 3; 885 886 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 887 for (ext = sect->section; ext->extent != NULL; ++ext) { 888 if (sect->id == SECT_CONTEXT) 889 count += 2 + ext->reg_count; 890 else 891 return 0; 892 } 893 } 894 895 /* set PA_SC_TILE_STEERING_OVERRIDE */ 896 count += 3; 897 /* end clear state */ 898 count += 2; 899 /* clear state */ 900 count += 2; 901 902 return count; 903 } 904 905 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 906 volatile u32 *buffer) 907 { 908 u32 count = 0, i; 909 const struct cs_section_def *sect = NULL; 910 const struct cs_extent_def *ext = NULL; 911 int ctx_reg_offset; 912 913 if (adev->gfx.rlc.cs_data == NULL) 914 return; 915 if (buffer == NULL) 916 return; 917 918 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 919 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 920 921 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 922 buffer[count++] = cpu_to_le32(0x80000000); 923 buffer[count++] = cpu_to_le32(0x80000000); 924 925 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 926 for (ext = sect->section; ext->extent != NULL; ++ext) { 927 if (sect->id == SECT_CONTEXT) { 928 buffer[count++] = 929 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 930 buffer[count++] = cpu_to_le32(ext->reg_index - 931 PACKET3_SET_CONTEXT_REG_START); 932 for (i = 0; i < ext->reg_count; i++) 933 buffer[count++] = cpu_to_le32(ext->extent[i]); 934 } else { 935 return; 936 } 937 } 938 } 939 940 ctx_reg_offset = 941 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 942 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 943 buffer[count++] = cpu_to_le32(ctx_reg_offset); 944 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 945 946 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 947 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 948 949 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 950 buffer[count++] = cpu_to_le32(0); 951 } 952 953 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev) 954 { 955 /* clear state block */ 956 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 957 &adev->gfx.rlc.clear_state_gpu_addr, 958 (void **)&adev->gfx.rlc.cs_ptr); 959 960 /* jump table block */ 961 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 962 &adev->gfx.rlc.cp_table_gpu_addr, 963 (void **)&adev->gfx.rlc.cp_table_ptr); 964 } 965 966 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 967 { 968 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 969 970 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; 971 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 972 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 973 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 974 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 975 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 976 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 977 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 978 adev->gfx.rlc.rlcg_reg_access_supported = true; 979 } 980 981 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev) 982 { 983 const struct cs_section_def *cs_data; 984 int r; 985 986 adev->gfx.rlc.cs_data = gfx11_cs_data; 987 988 cs_data = adev->gfx.rlc.cs_data; 989 990 if (cs_data) { 991 /* init clear state block */ 992 r = amdgpu_gfx_rlc_init_csb(adev); 993 if (r) 994 return r; 995 } 996 997 /* init spm vmid with 0xf */ 998 if (adev->gfx.rlc.funcs->update_spm_vmid) 999 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); 1000 1001 return 0; 1002 } 1003 1004 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev) 1005 { 1006 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1007 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 1008 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 1009 } 1010 1011 static int gfx_v11_0_me_init(struct amdgpu_device *adev) 1012 { 1013 int r; 1014 1015 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 1016 1017 amdgpu_gfx_graphics_queue_acquire(adev); 1018 1019 r = gfx_v11_0_init_microcode(adev); 1020 if (r) 1021 DRM_ERROR("Failed to load gfx firmware!\n"); 1022 1023 return r; 1024 } 1025 1026 static int gfx_v11_0_mec_init(struct amdgpu_device *adev) 1027 { 1028 int r; 1029 u32 *hpd; 1030 size_t mec_hpd_size; 1031 1032 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1033 1034 /* take ownership of the relevant compute queues */ 1035 amdgpu_gfx_compute_queue_acquire(adev); 1036 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE; 1037 1038 if (mec_hpd_size) { 1039 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1040 AMDGPU_GEM_DOMAIN_GTT, 1041 &adev->gfx.mec.hpd_eop_obj, 1042 &adev->gfx.mec.hpd_eop_gpu_addr, 1043 (void **)&hpd); 1044 if (r) { 1045 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 1046 gfx_v11_0_mec_fini(adev); 1047 return r; 1048 } 1049 1050 memset(hpd, 0, mec_hpd_size); 1051 1052 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 1053 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 1054 } 1055 1056 return 0; 1057 } 1058 1059 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 1060 { 1061 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 1062 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1063 (address << SQ_IND_INDEX__INDEX__SHIFT)); 1064 return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 1065 } 1066 1067 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 1068 uint32_t thread, uint32_t regno, 1069 uint32_t num, uint32_t *out) 1070 { 1071 WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 1072 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1073 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 1074 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 1075 (SQ_IND_INDEX__AUTO_INCR_MASK)); 1076 while (num--) 1077 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 1078 } 1079 1080 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 1081 { 1082 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE 1083 * field when performing a select_se_sh so it should be 1084 * zero here */ 1085 WARN_ON(simd != 0); 1086 1087 /* type 2 wave data */ 1088 dst[(*no_fields)++] = 2; 1089 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 1090 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 1091 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 1092 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 1093 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 1094 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 1095 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 1096 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 1097 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 1098 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 1099 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 1100 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 1101 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 1102 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 1103 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 1104 } 1105 1106 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 1107 uint32_t wave, uint32_t start, 1108 uint32_t size, uint32_t *dst) 1109 { 1110 WARN_ON(simd != 0); 1111 1112 wave_read_regs( 1113 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 1114 dst); 1115 } 1116 1117 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 1118 uint32_t wave, uint32_t thread, 1119 uint32_t start, uint32_t size, 1120 uint32_t *dst) 1121 { 1122 wave_read_regs( 1123 adev, wave, thread, 1124 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1125 } 1126 1127 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev, 1128 u32 me, u32 pipe, u32 q, u32 vm) 1129 { 1130 soc21_grbm_select(adev, me, pipe, q, vm); 1131 } 1132 1133 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = { 1134 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter, 1135 .select_se_sh = &gfx_v11_0_select_se_sh, 1136 .read_wave_data = &gfx_v11_0_read_wave_data, 1137 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs, 1138 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs, 1139 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q, 1140 .init_spm_golden = &gfx_v11_0_init_spm_golden_registers, 1141 }; 1142 1143 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 1144 { 1145 adev->gfx.funcs = &gfx_v11_0_gfx_funcs; 1146 1147 switch (adev->ip_versions[GC_HWIP][0]) { 1148 case IP_VERSION(11, 0, 0): 1149 case IP_VERSION(11, 0, 2): 1150 adev->gfx.config.max_hw_contexts = 8; 1151 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1152 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1153 adev->gfx.config.sc_hiz_tile_fifo_size = 0; 1154 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1155 break; 1156 case IP_VERSION(11, 0, 1): 1157 adev->gfx.config.max_hw_contexts = 8; 1158 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1159 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1160 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; 1161 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300; 1162 break; 1163 default: 1164 BUG(); 1165 break; 1166 } 1167 1168 return 0; 1169 } 1170 1171 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1172 int me, int pipe, int queue) 1173 { 1174 int r; 1175 struct amdgpu_ring *ring; 1176 unsigned int irq_type; 1177 1178 ring = &adev->gfx.gfx_ring[ring_id]; 1179 1180 ring->me = me; 1181 ring->pipe = pipe; 1182 ring->queue = queue; 1183 1184 ring->ring_obj = NULL; 1185 ring->use_doorbell = true; 1186 1187 if (!ring_id) 1188 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1189 else 1190 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1191 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1192 1193 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1194 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1195 AMDGPU_RING_PRIO_DEFAULT, NULL); 1196 if (r) 1197 return r; 1198 return 0; 1199 } 1200 1201 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1202 int mec, int pipe, int queue) 1203 { 1204 int r; 1205 unsigned irq_type; 1206 struct amdgpu_ring *ring; 1207 unsigned int hw_prio; 1208 1209 ring = &adev->gfx.compute_ring[ring_id]; 1210 1211 /* mec0 is me1 */ 1212 ring->me = mec + 1; 1213 ring->pipe = pipe; 1214 ring->queue = queue; 1215 1216 ring->ring_obj = NULL; 1217 ring->use_doorbell = true; 1218 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1219 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1220 + (ring_id * GFX11_MEC_HPD_SIZE); 1221 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1222 1223 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1224 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1225 + ring->pipe; 1226 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 1227 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 1228 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1229 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 1230 hw_prio, NULL); 1231 if (r) 1232 return r; 1233 1234 return 0; 1235 } 1236 1237 static struct { 1238 SOC21_FIRMWARE_ID id; 1239 unsigned int offset; 1240 unsigned int size; 1241 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX]; 1242 1243 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 1244 { 1245 RLC_TABLE_OF_CONTENT *ucode = rlc_toc; 1246 1247 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) && 1248 (ucode->id < SOC21_FIRMWARE_ID_MAX)) { 1249 rlc_autoload_info[ucode->id].id = ucode->id; 1250 rlc_autoload_info[ucode->id].offset = ucode->offset * 4; 1251 rlc_autoload_info[ucode->id].size = ucode->size * 4; 1252 1253 ucode++; 1254 } 1255 } 1256 1257 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev) 1258 { 1259 uint32_t total_size = 0; 1260 SOC21_FIRMWARE_ID id; 1261 1262 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 1263 1264 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++) 1265 total_size += rlc_autoload_info[id].size; 1266 1267 /* In case the offset in rlc toc ucode is aligned */ 1268 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset) 1269 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset + 1270 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size; 1271 1272 return total_size; 1273 } 1274 1275 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 1276 { 1277 int r; 1278 uint32_t total_size; 1279 1280 total_size = gfx_v11_0_calc_toc_total_size(adev); 1281 1282 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 1283 AMDGPU_GEM_DOMAIN_VRAM, 1284 &adev->gfx.rlc.rlc_autoload_bo, 1285 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1286 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1287 1288 if (r) { 1289 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1290 return r; 1291 } 1292 1293 return 0; 1294 } 1295 1296 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1297 SOC21_FIRMWARE_ID id, 1298 const void *fw_data, 1299 uint32_t fw_size, 1300 uint32_t *fw_autoload_mask) 1301 { 1302 uint32_t toc_offset; 1303 uint32_t toc_fw_size; 1304 char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1305 1306 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX) 1307 return; 1308 1309 toc_offset = rlc_autoload_info[id].offset; 1310 toc_fw_size = rlc_autoload_info[id].size; 1311 1312 if (fw_size == 0) 1313 fw_size = toc_fw_size; 1314 1315 if (fw_size > toc_fw_size) 1316 fw_size = toc_fw_size; 1317 1318 memcpy(ptr + toc_offset, fw_data, fw_size); 1319 1320 if (fw_size < toc_fw_size) 1321 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1322 1323 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) 1324 *(uint64_t *)fw_autoload_mask |= 1ULL << id; 1325 } 1326 1327 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, 1328 uint32_t *fw_autoload_mask) 1329 { 1330 void *data; 1331 uint32_t size; 1332 uint64_t *toc_ptr; 1333 1334 *(uint64_t *)fw_autoload_mask |= 0x1; 1335 1336 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask); 1337 1338 data = adev->psp.toc.start_addr; 1339 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size; 1340 1341 toc_ptr = (uint64_t *)data + size / 8 - 1; 1342 *toc_ptr = *(uint64_t *)fw_autoload_mask; 1343 1344 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC, 1345 data, size, fw_autoload_mask); 1346 } 1347 1348 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev, 1349 uint32_t *fw_autoload_mask) 1350 { 1351 const __le32 *fw_data; 1352 uint32_t fw_size; 1353 const struct gfx_firmware_header_v1_0 *cp_hdr; 1354 const struct gfx_firmware_header_v2_0 *cpv2_hdr; 1355 const struct rlc_firmware_header_v2_0 *rlc_hdr; 1356 const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 1357 uint16_t version_major, version_minor; 1358 1359 if (adev->gfx.rs64_enable) { 1360 /* pfp ucode */ 1361 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1362 adev->gfx.pfp_fw->data; 1363 /* instruction */ 1364 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1365 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1366 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1367 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP, 1368 fw_data, fw_size, fw_autoload_mask); 1369 /* data */ 1370 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1371 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1372 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1373 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK, 1374 fw_data, fw_size, fw_autoload_mask); 1375 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK, 1376 fw_data, fw_size, fw_autoload_mask); 1377 /* me ucode */ 1378 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1379 adev->gfx.me_fw->data; 1380 /* instruction */ 1381 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1382 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1383 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1384 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME, 1385 fw_data, fw_size, fw_autoload_mask); 1386 /* data */ 1387 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1388 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1389 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1390 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK, 1391 fw_data, fw_size, fw_autoload_mask); 1392 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK, 1393 fw_data, fw_size, fw_autoload_mask); 1394 /* mec ucode */ 1395 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 1396 adev->gfx.mec_fw->data; 1397 /* instruction */ 1398 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1399 le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 1400 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 1401 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC, 1402 fw_data, fw_size, fw_autoload_mask); 1403 /* data */ 1404 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1405 le32_to_cpu(cpv2_hdr->data_offset_bytes)); 1406 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 1407 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK, 1408 fw_data, fw_size, fw_autoload_mask); 1409 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK, 1410 fw_data, fw_size, fw_autoload_mask); 1411 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK, 1412 fw_data, fw_size, fw_autoload_mask); 1413 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK, 1414 fw_data, fw_size, fw_autoload_mask); 1415 } else { 1416 /* pfp ucode */ 1417 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1418 adev->gfx.pfp_fw->data; 1419 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1420 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1421 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1422 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP, 1423 fw_data, fw_size, fw_autoload_mask); 1424 1425 /* me ucode */ 1426 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1427 adev->gfx.me_fw->data; 1428 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1429 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1430 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1431 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME, 1432 fw_data, fw_size, fw_autoload_mask); 1433 1434 /* mec ucode */ 1435 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1436 adev->gfx.mec_fw->data; 1437 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1438 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1439 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1440 cp_hdr->jt_size * 4; 1441 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC, 1442 fw_data, fw_size, fw_autoload_mask); 1443 } 1444 1445 /* rlc ucode */ 1446 rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1447 adev->gfx.rlc_fw->data; 1448 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1449 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1450 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1451 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE, 1452 fw_data, fw_size, fw_autoload_mask); 1453 1454 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 1455 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 1456 if (version_major == 2) { 1457 if (version_minor >= 2) { 1458 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1459 1460 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1461 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 1462 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 1463 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE, 1464 fw_data, fw_size, fw_autoload_mask); 1465 1466 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1467 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 1468 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1469 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT, 1470 fw_data, fw_size, fw_autoload_mask); 1471 } 1472 } 1473 } 1474 1475 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev, 1476 uint32_t *fw_autoload_mask) 1477 { 1478 const __le32 *fw_data; 1479 uint32_t fw_size; 1480 const struct sdma_firmware_header_v2_0 *sdma_hdr; 1481 1482 sdma_hdr = (const struct sdma_firmware_header_v2_0 *) 1483 adev->sdma.instance[0].fw->data; 1484 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1485 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1486 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 1487 1488 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1489 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask); 1490 1491 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1492 le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 1493 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 1494 1495 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1496 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask); 1497 } 1498 1499 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev, 1500 uint32_t *fw_autoload_mask) 1501 { 1502 const __le32 *fw_data; 1503 unsigned fw_size; 1504 const struct mes_firmware_header_v1_0 *mes_hdr; 1505 int pipe, ucode_id, data_id; 1506 1507 for (pipe = 0; pipe < 2; pipe++) { 1508 if (pipe==0) { 1509 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0; 1510 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK; 1511 } else { 1512 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1; 1513 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK; 1514 } 1515 1516 mes_hdr = (const struct mes_firmware_header_v1_0 *) 1517 adev->mes.fw[pipe]->data; 1518 1519 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1520 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1521 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1522 1523 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1524 ucode_id, fw_data, fw_size, fw_autoload_mask); 1525 1526 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1527 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1528 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1529 1530 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, 1531 data_id, fw_data, fw_size, fw_autoload_mask); 1532 } 1533 } 1534 1535 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1536 { 1537 uint32_t rlc_g_offset, rlc_g_size; 1538 uint64_t gpu_addr; 1539 uint32_t autoload_fw_id[2]; 1540 1541 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2); 1542 1543 /* RLC autoload sequence 2: copy ucode */ 1544 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id); 1545 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id); 1546 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id); 1547 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id); 1548 1549 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset; 1550 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size; 1551 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1552 1553 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1554 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1555 1556 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1557 1558 /* RLC autoload sequence 3: load IMU fw */ 1559 if (adev->gfx.imu.funcs->load_microcode) 1560 adev->gfx.imu.funcs->load_microcode(adev); 1561 /* RLC autoload sequence 4 init IMU fw */ 1562 if (adev->gfx.imu.funcs->setup_imu) 1563 adev->gfx.imu.funcs->setup_imu(adev); 1564 if (adev->gfx.imu.funcs->start_imu) 1565 adev->gfx.imu.funcs->start_imu(adev); 1566 1567 /* RLC autoload sequence 5 disable gpa mode */ 1568 gfx_v11_0_disable_gpa_mode(adev); 1569 1570 return 0; 1571 } 1572 1573 static int gfx_v11_0_sw_init(void *handle) 1574 { 1575 int i, j, k, r, ring_id = 0; 1576 struct amdgpu_kiq *kiq; 1577 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1578 1579 adev->gfxhub.funcs->init(adev); 1580 1581 switch (adev->ip_versions[GC_HWIP][0]) { 1582 case IP_VERSION(11, 0, 0): 1583 case IP_VERSION(11, 0, 1): 1584 case IP_VERSION(11, 0, 2): 1585 adev->gfx.me.num_me = 1; 1586 adev->gfx.me.num_pipe_per_me = 1; 1587 adev->gfx.me.num_queue_per_pipe = 1; 1588 adev->gfx.mec.num_mec = 2; 1589 adev->gfx.mec.num_pipe_per_mec = 4; 1590 adev->gfx.mec.num_queue_per_pipe = 4; 1591 break; 1592 default: 1593 adev->gfx.me.num_me = 1; 1594 adev->gfx.me.num_pipe_per_me = 1; 1595 adev->gfx.me.num_queue_per_pipe = 1; 1596 adev->gfx.mec.num_mec = 1; 1597 adev->gfx.mec.num_pipe_per_mec = 4; 1598 adev->gfx.mec.num_queue_per_pipe = 8; 1599 break; 1600 } 1601 1602 /* EOP Event */ 1603 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1604 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1605 &adev->gfx.eop_irq); 1606 if (r) 1607 return r; 1608 1609 /* Privileged reg */ 1610 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1611 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1612 &adev->gfx.priv_reg_irq); 1613 if (r) 1614 return r; 1615 1616 /* Privileged inst */ 1617 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1618 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1619 &adev->gfx.priv_inst_irq); 1620 if (r) 1621 return r; 1622 1623 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1624 1625 if (adev->gfx.imu.funcs) { 1626 if (adev->gfx.imu.funcs->init_microcode) { 1627 r = adev->gfx.imu.funcs->init_microcode(adev); 1628 if (r) 1629 DRM_ERROR("Failed to load imu firmware!\n"); 1630 } 1631 } 1632 1633 r = gfx_v11_0_me_init(adev); 1634 if (r) 1635 return r; 1636 1637 r = gfx_v11_0_rlc_init(adev); 1638 if (r) { 1639 DRM_ERROR("Failed to init rlc BOs!\n"); 1640 return r; 1641 } 1642 1643 r = gfx_v11_0_mec_init(adev); 1644 if (r) { 1645 DRM_ERROR("Failed to init MEC BOs!\n"); 1646 return r; 1647 } 1648 1649 /* set up the gfx ring */ 1650 for (i = 0; i < adev->gfx.me.num_me; i++) { 1651 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1652 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1653 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1654 continue; 1655 1656 r = gfx_v11_0_gfx_ring_init(adev, ring_id, 1657 i, k, j); 1658 if (r) 1659 return r; 1660 ring_id++; 1661 } 1662 } 1663 } 1664 1665 ring_id = 0; 1666 /* set up the compute queues - allocate horizontally across pipes */ 1667 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1668 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1669 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1670 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, 1671 j)) 1672 continue; 1673 1674 r = gfx_v11_0_compute_ring_init(adev, ring_id, 1675 i, k, j); 1676 if (r) 1677 return r; 1678 1679 ring_id++; 1680 } 1681 } 1682 } 1683 1684 if (!adev->enable_mes_kiq) { 1685 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE); 1686 if (r) { 1687 DRM_ERROR("Failed to init KIQ BOs!\n"); 1688 return r; 1689 } 1690 1691 kiq = &adev->gfx.kiq; 1692 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1693 if (r) 1694 return r; 1695 } 1696 1697 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd)); 1698 if (r) 1699 return r; 1700 1701 /* allocate visible FB for rlc auto-loading fw */ 1702 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1703 r = gfx_v11_0_init_toc_microcode(adev); 1704 if (r) 1705 dev_err(adev->dev, "Failed to load toc firmware!\n"); 1706 r = gfx_v11_0_rlc_autoload_buffer_init(adev); 1707 if (r) 1708 return r; 1709 } 1710 1711 r = gfx_v11_0_gpu_early_init(adev); 1712 if (r) 1713 return r; 1714 1715 return 0; 1716 } 1717 1718 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev) 1719 { 1720 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1721 &adev->gfx.pfp.pfp_fw_gpu_addr, 1722 (void **)&adev->gfx.pfp.pfp_fw_ptr); 1723 1724 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1725 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1726 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1727 } 1728 1729 static void gfx_v11_0_me_fini(struct amdgpu_device *adev) 1730 { 1731 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1732 &adev->gfx.me.me_fw_gpu_addr, 1733 (void **)&adev->gfx.me.me_fw_ptr); 1734 1735 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1736 &adev->gfx.me.me_fw_data_gpu_addr, 1737 (void **)&adev->gfx.me.me_fw_data_ptr); 1738 } 1739 1740 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1741 { 1742 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1743 &adev->gfx.rlc.rlc_autoload_gpu_addr, 1744 (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1745 } 1746 1747 static int gfx_v11_0_sw_fini(void *handle) 1748 { 1749 int i; 1750 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1751 1752 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1753 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1754 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1755 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1756 1757 amdgpu_gfx_mqd_sw_fini(adev); 1758 1759 if (!adev->enable_mes_kiq) { 1760 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); 1761 amdgpu_gfx_kiq_fini(adev); 1762 } 1763 1764 gfx_v11_0_pfp_fini(adev); 1765 gfx_v11_0_me_fini(adev); 1766 gfx_v11_0_rlc_fini(adev); 1767 gfx_v11_0_mec_fini(adev); 1768 1769 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1770 gfx_v11_0_rlc_autoload_buffer_fini(adev); 1771 1772 gfx_v11_0_free_microcode(adev); 1773 1774 return 0; 1775 } 1776 1777 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1778 u32 sh_num, u32 instance) 1779 { 1780 u32 data; 1781 1782 if (instance == 0xffffffff) 1783 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1784 INSTANCE_BROADCAST_WRITES, 1); 1785 else 1786 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1787 instance); 1788 1789 if (se_num == 0xffffffff) 1790 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1791 1); 1792 else 1793 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1794 1795 if (sh_num == 0xffffffff) 1796 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1797 1); 1798 else 1799 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1800 1801 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1802 } 1803 1804 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1805 { 1806 u32 data, mask; 1807 1808 data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1809 data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1810 1811 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1812 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1813 1814 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1815 adev->gfx.config.max_sh_per_se); 1816 1817 return (~data) & mask; 1818 } 1819 1820 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) 1821 { 1822 int i, j; 1823 u32 data; 1824 u32 active_rbs = 0; 1825 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1826 adev->gfx.config.max_sh_per_se; 1827 1828 mutex_lock(&adev->grbm_idx_mutex); 1829 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1830 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1831 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); 1832 data = gfx_v11_0_get_rb_active_bitmap(adev); 1833 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1834 rb_bitmap_width_per_sh); 1835 } 1836 } 1837 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1838 mutex_unlock(&adev->grbm_idx_mutex); 1839 1840 adev->gfx.config.backend_enable_mask = active_rbs; 1841 adev->gfx.config.num_rbs = hweight32(active_rbs); 1842 } 1843 1844 #define DEFAULT_SH_MEM_BASES (0x6000) 1845 #define LDS_APP_BASE 0x1 1846 #define SCRATCH_APP_BASE 0x2 1847 1848 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) 1849 { 1850 int i; 1851 uint32_t sh_mem_bases; 1852 uint32_t data; 1853 1854 /* 1855 * Configure apertures: 1856 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1857 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1858 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1859 */ 1860 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1861 SCRATCH_APP_BASE; 1862 1863 mutex_lock(&adev->srbm_mutex); 1864 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1865 soc21_grbm_select(adev, 0, 0, 0, i); 1866 /* CP and shaders */ 1867 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1868 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1869 1870 /* Enable trap for each kfd vmid. */ 1871 data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); 1872 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1873 } 1874 soc21_grbm_select(adev, 0, 0, 0, 0); 1875 mutex_unlock(&adev->srbm_mutex); 1876 1877 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1878 acccess. These should be enabled by FW for target VMIDs. */ 1879 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1880 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0); 1881 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0); 1882 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0); 1883 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0); 1884 } 1885 } 1886 1887 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev) 1888 { 1889 int vmid; 1890 1891 /* 1892 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1893 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1894 * the driver can enable them for graphics. VMID0 should maintain 1895 * access so that HWS firmware can save/restore entries. 1896 */ 1897 for (vmid = 1; vmid < 16; vmid++) { 1898 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0); 1899 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0); 1900 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0); 1901 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0); 1902 } 1903 } 1904 1905 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev) 1906 { 1907 /* TODO: harvest feature to be added later. */ 1908 } 1909 1910 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev) 1911 { 1912 /* TCCs are global (not instanced). */ 1913 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) | 1914 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE); 1915 1916 adev->gfx.config.tcc_disabled_mask = 1917 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 1918 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 1919 } 1920 1921 static void gfx_v11_0_constants_init(struct amdgpu_device *adev) 1922 { 1923 u32 tmp; 1924 int i; 1925 1926 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1927 1928 gfx_v11_0_setup_rb(adev); 1929 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info); 1930 gfx_v11_0_get_tcc_info(adev); 1931 adev->gfx.config.pa_sc_tile_steering_override = 0; 1932 1933 /* XXX SH_MEM regs */ 1934 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1935 mutex_lock(&adev->srbm_mutex); 1936 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { 1937 soc21_grbm_select(adev, 0, 0, 0, i); 1938 /* CP and shaders */ 1939 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1940 if (i != 0) { 1941 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1942 (adev->gmc.private_aperture_start >> 48)); 1943 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1944 (adev->gmc.shared_aperture_start >> 48)); 1945 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1946 } 1947 } 1948 soc21_grbm_select(adev, 0, 0, 0, 0); 1949 1950 mutex_unlock(&adev->srbm_mutex); 1951 1952 gfx_v11_0_init_compute_vmid(adev); 1953 gfx_v11_0_init_gds_vmid(adev); 1954 } 1955 1956 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1957 bool enable) 1958 { 1959 u32 tmp; 1960 1961 if (amdgpu_sriov_vf(adev)) 1962 return; 1963 1964 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); 1965 1966 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1967 enable ? 1 : 0); 1968 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1969 enable ? 1 : 0); 1970 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1971 enable ? 1 : 0); 1972 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1973 enable ? 1 : 0); 1974 1975 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); 1976 } 1977 1978 static int gfx_v11_0_init_csb(struct amdgpu_device *adev) 1979 { 1980 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1981 1982 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1983 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1984 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1985 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1986 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1987 1988 return 0; 1989 } 1990 1991 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) 1992 { 1993 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1994 1995 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1996 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1997 } 1998 1999 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev) 2000 { 2001 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2002 udelay(50); 2003 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2004 udelay(50); 2005 } 2006 2007 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 2008 bool enable) 2009 { 2010 uint32_t rlc_pg_cntl; 2011 2012 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 2013 2014 if (!enable) { 2015 /* RLC_PG_CNTL[23] = 0 (default) 2016 * RLC will wait for handshake acks with SMU 2017 * GFXOFF will be enabled 2018 * RLC_PG_CNTL[23] = 1 2019 * RLC will not issue any message to SMU 2020 * hence no handshake between SMU & RLC 2021 * GFXOFF will be disabled 2022 */ 2023 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2024 } else 2025 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 2026 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 2027 } 2028 2029 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev) 2030 { 2031 /* TODO: enable rlc & smu handshake until smu 2032 * and gfxoff feature works as expected */ 2033 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 2034 gfx_v11_0_rlc_smu_handshake_cntl(adev, false); 2035 2036 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2037 udelay(50); 2038 } 2039 2040 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev) 2041 { 2042 uint32_t tmp; 2043 2044 /* enable Save Restore Machine */ 2045 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 2046 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2047 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 2048 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 2049 } 2050 2051 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev) 2052 { 2053 const struct rlc_firmware_header_v2_0 *hdr; 2054 const __le32 *fw_data; 2055 unsigned i, fw_size; 2056 2057 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2058 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2059 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2060 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2061 2062 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 2063 RLCG_UCODE_LOADING_START_ADDRESS); 2064 2065 for (i = 0; i < fw_size; i++) 2066 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 2067 le32_to_cpup(fw_data++)); 2068 2069 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2070 } 2071 2072 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 2073 { 2074 const struct rlc_firmware_header_v2_2 *hdr; 2075 const __le32 *fw_data; 2076 unsigned i, fw_size; 2077 u32 tmp; 2078 2079 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 2080 2081 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2082 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 2083 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 2084 2085 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 2086 2087 for (i = 0; i < fw_size; i++) { 2088 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2089 msleep(1); 2090 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 2091 le32_to_cpup(fw_data++)); 2092 } 2093 2094 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2095 2096 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2097 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 2098 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 2099 2100 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 2101 for (i = 0; i < fw_size; i++) { 2102 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2103 msleep(1); 2104 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 2105 le32_to_cpup(fw_data++)); 2106 } 2107 2108 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 2109 2110 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 2111 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 2112 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 2113 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 2114 } 2115 2116 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev) 2117 { 2118 const struct rlc_firmware_header_v2_3 *hdr; 2119 const __le32 *fw_data; 2120 unsigned i, fw_size; 2121 u32 tmp; 2122 2123 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 2124 2125 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2126 le32_to_cpu(hdr->rlcp_ucode_offset_bytes)); 2127 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4; 2128 2129 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0); 2130 2131 for (i = 0; i < fw_size; i++) { 2132 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2133 msleep(1); 2134 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA, 2135 le32_to_cpup(fw_data++)); 2136 } 2137 2138 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version); 2139 2140 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 2141 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 2142 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp); 2143 2144 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2145 le32_to_cpu(hdr->rlcv_ucode_offset_bytes)); 2146 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4; 2147 2148 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0); 2149 2150 for (i = 0; i < fw_size; i++) { 2151 if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 2152 msleep(1); 2153 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA, 2154 le32_to_cpup(fw_data++)); 2155 } 2156 2157 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version); 2158 2159 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL); 2160 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1); 2161 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp); 2162 } 2163 2164 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) 2165 { 2166 const struct rlc_firmware_header_v2_0 *hdr; 2167 uint16_t version_major; 2168 uint16_t version_minor; 2169 2170 if (!adev->gfx.rlc_fw) 2171 return -EINVAL; 2172 2173 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2174 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2175 2176 version_major = le16_to_cpu(hdr->header.header_version_major); 2177 version_minor = le16_to_cpu(hdr->header.header_version_minor); 2178 2179 if (version_major == 2) { 2180 gfx_v11_0_load_rlcg_microcode(adev); 2181 if (amdgpu_dpm == 1) { 2182 if (version_minor >= 2) 2183 gfx_v11_0_load_rlc_iram_dram_microcode(adev); 2184 if (version_minor == 3) 2185 gfx_v11_0_load_rlcp_rlcv_microcode(adev); 2186 } 2187 2188 return 0; 2189 } 2190 2191 return -EINVAL; 2192 } 2193 2194 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev) 2195 { 2196 int r; 2197 2198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2199 gfx_v11_0_init_csb(adev); 2200 2201 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 2202 gfx_v11_0_rlc_enable_srm(adev); 2203 } else { 2204 if (amdgpu_sriov_vf(adev)) { 2205 gfx_v11_0_init_csb(adev); 2206 return 0; 2207 } 2208 2209 adev->gfx.rlc.funcs->stop(adev); 2210 2211 /* disable CG */ 2212 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 2213 2214 /* disable PG */ 2215 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 2216 2217 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 2218 /* legacy rlc firmware loading */ 2219 r = gfx_v11_0_rlc_load_microcode(adev); 2220 if (r) 2221 return r; 2222 } 2223 2224 gfx_v11_0_init_csb(adev); 2225 2226 adev->gfx.rlc.funcs->start(adev); 2227 } 2228 return 0; 2229 } 2230 2231 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr) 2232 { 2233 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2234 uint32_t tmp; 2235 int i; 2236 2237 /* Trigger an invalidation of the L1 instruction caches */ 2238 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2239 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2240 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2241 2242 /* Wait for invalidation complete */ 2243 for (i = 0; i < usec_timeout; i++) { 2244 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2245 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2246 INVALIDATE_CACHE_COMPLETE)) 2247 break; 2248 udelay(1); 2249 } 2250 2251 if (i >= usec_timeout) { 2252 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2253 return -EINVAL; 2254 } 2255 2256 if (amdgpu_emu_mode == 1) 2257 adev->hdp.funcs->flush_hdp(adev, NULL); 2258 2259 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2260 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2261 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2262 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2263 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2264 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2265 2266 /* Program me ucode address into intruction cache address register */ 2267 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2268 lower_32_bits(addr) & 0xFFFFF000); 2269 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2270 upper_32_bits(addr)); 2271 2272 return 0; 2273 } 2274 2275 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr) 2276 { 2277 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2278 uint32_t tmp; 2279 int i; 2280 2281 /* Trigger an invalidation of the L1 instruction caches */ 2282 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2283 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2284 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2285 2286 /* Wait for invalidation complete */ 2287 for (i = 0; i < usec_timeout; i++) { 2288 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2289 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2290 INVALIDATE_CACHE_COMPLETE)) 2291 break; 2292 udelay(1); 2293 } 2294 2295 if (i >= usec_timeout) { 2296 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2297 return -EINVAL; 2298 } 2299 2300 if (amdgpu_emu_mode == 1) 2301 adev->hdp.funcs->flush_hdp(adev, NULL); 2302 2303 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2304 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2305 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2306 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2307 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2308 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2309 2310 /* Program pfp ucode address into intruction cache address register */ 2311 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2312 lower_32_bits(addr) & 0xFFFFF000); 2313 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2314 upper_32_bits(addr)); 2315 2316 return 0; 2317 } 2318 2319 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr) 2320 { 2321 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2322 uint32_t tmp; 2323 int i; 2324 2325 /* Trigger an invalidation of the L1 instruction caches */ 2326 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2327 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2328 2329 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2330 2331 /* Wait for invalidation complete */ 2332 for (i = 0; i < usec_timeout; i++) { 2333 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2334 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2335 INVALIDATE_CACHE_COMPLETE)) 2336 break; 2337 udelay(1); 2338 } 2339 2340 if (i >= usec_timeout) { 2341 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2342 return -EINVAL; 2343 } 2344 2345 if (amdgpu_emu_mode == 1) 2346 adev->hdp.funcs->flush_hdp(adev, NULL); 2347 2348 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2349 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2350 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2351 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2352 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2353 2354 /* Program mec1 ucode address into intruction cache address register */ 2355 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, 2356 lower_32_bits(addr) & 0xFFFFF000); 2357 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2358 upper_32_bits(addr)); 2359 2360 return 0; 2361 } 2362 2363 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2364 { 2365 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2366 uint32_t tmp; 2367 unsigned i, pipe_id; 2368 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2369 2370 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2371 adev->gfx.pfp_fw->data; 2372 2373 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2374 lower_32_bits(addr)); 2375 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2376 upper_32_bits(addr)); 2377 2378 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2379 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2380 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2381 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2382 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2383 2384 /* 2385 * Programming any of the CP_PFP_IC_BASE registers 2386 * forces invalidation of the ME L1 I$. Wait for the 2387 * invalidation complete 2388 */ 2389 for (i = 0; i < usec_timeout; i++) { 2390 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2391 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2392 INVALIDATE_CACHE_COMPLETE)) 2393 break; 2394 udelay(1); 2395 } 2396 2397 if (i >= usec_timeout) { 2398 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2399 return -EINVAL; 2400 } 2401 2402 /* Prime the L1 instruction caches */ 2403 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2404 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2405 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2406 /* Waiting for cache primed*/ 2407 for (i = 0; i < usec_timeout; i++) { 2408 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2409 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2410 ICACHE_PRIMED)) 2411 break; 2412 udelay(1); 2413 } 2414 2415 if (i >= usec_timeout) { 2416 dev_err(adev->dev, "failed to prime instruction cache\n"); 2417 return -EINVAL; 2418 } 2419 2420 mutex_lock(&adev->srbm_mutex); 2421 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2422 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2423 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2424 (pfp_hdr->ucode_start_addr_hi << 30) | 2425 (pfp_hdr->ucode_start_addr_lo >> 2)); 2426 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2427 pfp_hdr->ucode_start_addr_hi >> 2); 2428 2429 /* 2430 * Program CP_ME_CNTL to reset given PIPE to take 2431 * effect of CP_PFP_PRGRM_CNTR_START. 2432 */ 2433 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2434 if (pipe_id == 0) 2435 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2436 PFP_PIPE0_RESET, 1); 2437 else 2438 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2439 PFP_PIPE1_RESET, 1); 2440 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2441 2442 /* Clear pfp pipe0 reset bit. */ 2443 if (pipe_id == 0) 2444 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2445 PFP_PIPE0_RESET, 0); 2446 else 2447 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2448 PFP_PIPE1_RESET, 0); 2449 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2450 2451 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 2452 lower_32_bits(addr2)); 2453 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 2454 upper_32_bits(addr2)); 2455 } 2456 soc21_grbm_select(adev, 0, 0, 0, 0); 2457 mutex_unlock(&adev->srbm_mutex); 2458 2459 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2460 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2461 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2462 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2463 2464 /* Invalidate the data caches */ 2465 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2466 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2467 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2468 2469 for (i = 0; i < usec_timeout; i++) { 2470 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2471 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2472 INVALIDATE_DCACHE_COMPLETE)) 2473 break; 2474 udelay(1); 2475 } 2476 2477 if (i >= usec_timeout) { 2478 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2479 return -EINVAL; 2480 } 2481 2482 return 0; 2483 } 2484 2485 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2486 { 2487 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2488 uint32_t tmp; 2489 unsigned i, pipe_id; 2490 const struct gfx_firmware_header_v2_0 *me_hdr; 2491 2492 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2493 adev->gfx.me_fw->data; 2494 2495 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2496 lower_32_bits(addr)); 2497 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2498 upper_32_bits(addr)); 2499 2500 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2501 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2502 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2503 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2504 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2505 2506 /* 2507 * Programming any of the CP_ME_IC_BASE registers 2508 * forces invalidation of the ME L1 I$. Wait for the 2509 * invalidation complete 2510 */ 2511 for (i = 0; i < usec_timeout; i++) { 2512 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2513 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2514 INVALIDATE_CACHE_COMPLETE)) 2515 break; 2516 udelay(1); 2517 } 2518 2519 if (i >= usec_timeout) { 2520 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2521 return -EINVAL; 2522 } 2523 2524 /* Prime the instruction caches */ 2525 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2526 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2527 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2528 2529 /* Waiting for instruction cache primed*/ 2530 for (i = 0; i < usec_timeout; i++) { 2531 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2532 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2533 ICACHE_PRIMED)) 2534 break; 2535 udelay(1); 2536 } 2537 2538 if (i >= usec_timeout) { 2539 dev_err(adev->dev, "failed to prime instruction cache\n"); 2540 return -EINVAL; 2541 } 2542 2543 mutex_lock(&adev->srbm_mutex); 2544 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2545 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2546 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2547 (me_hdr->ucode_start_addr_hi << 30) | 2548 (me_hdr->ucode_start_addr_lo >> 2) ); 2549 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2550 me_hdr->ucode_start_addr_hi>>2); 2551 2552 /* 2553 * Program CP_ME_CNTL to reset given PIPE to take 2554 * effect of CP_PFP_PRGRM_CNTR_START. 2555 */ 2556 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2557 if (pipe_id == 0) 2558 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2559 ME_PIPE0_RESET, 1); 2560 else 2561 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2562 ME_PIPE1_RESET, 1); 2563 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2564 2565 /* Clear pfp pipe0 reset bit. */ 2566 if (pipe_id == 0) 2567 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2568 ME_PIPE0_RESET, 0); 2569 else 2570 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2571 ME_PIPE1_RESET, 0); 2572 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2573 2574 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2575 lower_32_bits(addr2)); 2576 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2577 upper_32_bits(addr2)); 2578 } 2579 soc21_grbm_select(adev, 0, 0, 0, 0); 2580 mutex_unlock(&adev->srbm_mutex); 2581 2582 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2583 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2584 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2585 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2586 2587 /* Invalidate the data caches */ 2588 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2589 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2590 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2591 2592 for (i = 0; i < usec_timeout; i++) { 2593 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2594 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2595 INVALIDATE_DCACHE_COMPLETE)) 2596 break; 2597 udelay(1); 2598 } 2599 2600 if (i >= usec_timeout) { 2601 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2602 return -EINVAL; 2603 } 2604 2605 return 0; 2606 } 2607 2608 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2) 2609 { 2610 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2611 uint32_t tmp; 2612 unsigned i; 2613 const struct gfx_firmware_header_v2_0 *mec_hdr; 2614 2615 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2616 adev->gfx.mec_fw->data; 2617 2618 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2619 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2620 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2621 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2622 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2623 2624 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2625 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2626 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2627 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2628 2629 mutex_lock(&adev->srbm_mutex); 2630 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2631 soc21_grbm_select(adev, 1, i, 0, 0); 2632 2633 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2); 2634 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2635 upper_32_bits(addr2)); 2636 2637 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2638 mec_hdr->ucode_start_addr_lo >> 2 | 2639 mec_hdr->ucode_start_addr_hi << 30); 2640 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2641 mec_hdr->ucode_start_addr_hi >> 2); 2642 2643 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr); 2644 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2645 upper_32_bits(addr)); 2646 } 2647 mutex_unlock(&adev->srbm_mutex); 2648 soc21_grbm_select(adev, 0, 0, 0, 0); 2649 2650 /* Trigger an invalidation of the L1 instruction caches */ 2651 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2652 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2653 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2654 2655 /* Wait for invalidation complete */ 2656 for (i = 0; i < usec_timeout; i++) { 2657 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2658 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2659 INVALIDATE_DCACHE_COMPLETE)) 2660 break; 2661 udelay(1); 2662 } 2663 2664 if (i >= usec_timeout) { 2665 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2666 return -EINVAL; 2667 } 2668 2669 /* Trigger an invalidation of the L1 instruction caches */ 2670 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2671 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2672 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2673 2674 /* Wait for invalidation complete */ 2675 for (i = 0; i < usec_timeout; i++) { 2676 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2677 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2678 INVALIDATE_CACHE_COMPLETE)) 2679 break; 2680 udelay(1); 2681 } 2682 2683 if (i >= usec_timeout) { 2684 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2685 return -EINVAL; 2686 } 2687 2688 return 0; 2689 } 2690 2691 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev) 2692 { 2693 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2694 const struct gfx_firmware_header_v2_0 *me_hdr; 2695 const struct gfx_firmware_header_v2_0 *mec_hdr; 2696 uint32_t pipe_id, tmp; 2697 2698 mec_hdr = (const struct gfx_firmware_header_v2_0 *) 2699 adev->gfx.mec_fw->data; 2700 me_hdr = (const struct gfx_firmware_header_v2_0 *) 2701 adev->gfx.me_fw->data; 2702 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2703 adev->gfx.pfp_fw->data; 2704 2705 /* config pfp program start addr */ 2706 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2707 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2708 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 2709 (pfp_hdr->ucode_start_addr_hi << 30) | 2710 (pfp_hdr->ucode_start_addr_lo >> 2)); 2711 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 2712 pfp_hdr->ucode_start_addr_hi >> 2); 2713 } 2714 soc21_grbm_select(adev, 0, 0, 0, 0); 2715 2716 /* reset pfp pipe */ 2717 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2718 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 2719 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 2720 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2721 2722 /* clear pfp pipe reset */ 2723 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 2724 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 2725 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2726 2727 /* config me program start addr */ 2728 for (pipe_id = 0; pipe_id < 2; pipe_id++) { 2729 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 2730 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2731 (me_hdr->ucode_start_addr_hi << 30) | 2732 (me_hdr->ucode_start_addr_lo >> 2) ); 2733 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2734 me_hdr->ucode_start_addr_hi>>2); 2735 } 2736 soc21_grbm_select(adev, 0, 0, 0, 0); 2737 2738 /* reset me pipe */ 2739 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2740 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 2741 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 2742 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2743 2744 /* clear me pipe reset */ 2745 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 2746 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 2747 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2748 2749 /* config mec program start addr */ 2750 for (pipe_id = 0; pipe_id < 4; pipe_id++) { 2751 soc21_grbm_select(adev, 1, pipe_id, 0, 0); 2752 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2753 mec_hdr->ucode_start_addr_lo >> 2 | 2754 mec_hdr->ucode_start_addr_hi << 30); 2755 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2756 mec_hdr->ucode_start_addr_hi >> 2); 2757 } 2758 soc21_grbm_select(adev, 0, 0, 0, 0); 2759 } 2760 2761 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2762 { 2763 uint32_t cp_status; 2764 uint32_t bootload_status; 2765 int i, r; 2766 uint64_t addr, addr2; 2767 2768 for (i = 0; i < adev->usec_timeout; i++) { 2769 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2770 2771 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1)) 2772 bootload_status = RREG32_SOC15(GC, 0, 2773 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2774 else 2775 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 2776 2777 if ((cp_status == 0) && 2778 (REG_GET_FIELD(bootload_status, 2779 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2780 break; 2781 } 2782 udelay(1); 2783 } 2784 2785 if (i >= adev->usec_timeout) { 2786 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2787 return -ETIMEDOUT; 2788 } 2789 2790 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2791 if (adev->gfx.rs64_enable) { 2792 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2793 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset; 2794 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2795 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset; 2796 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2); 2797 if (r) 2798 return r; 2799 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2800 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset; 2801 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2802 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset; 2803 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2); 2804 if (r) 2805 return r; 2806 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2807 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset; 2808 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr + 2809 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset; 2810 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2); 2811 if (r) 2812 return r; 2813 } else { 2814 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2815 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset; 2816 r = gfx_v11_0_config_me_cache(adev, addr); 2817 if (r) 2818 return r; 2819 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2820 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset; 2821 r = gfx_v11_0_config_pfp_cache(adev, addr); 2822 if (r) 2823 return r; 2824 addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2825 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset; 2826 r = gfx_v11_0_config_mec_cache(adev, addr); 2827 if (r) 2828 return r; 2829 } 2830 } 2831 2832 return 0; 2833 } 2834 2835 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2836 { 2837 int i; 2838 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2839 2840 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2841 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2842 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2843 2844 for (i = 0; i < adev->usec_timeout; i++) { 2845 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 2846 break; 2847 udelay(1); 2848 } 2849 2850 if (i >= adev->usec_timeout) 2851 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 2852 2853 return 0; 2854 } 2855 2856 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 2857 { 2858 int r; 2859 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2860 const __le32 *fw_data; 2861 unsigned i, fw_size; 2862 2863 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2864 adev->gfx.pfp_fw->data; 2865 2866 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2867 2868 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2869 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2870 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 2871 2872 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 2873 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2874 &adev->gfx.pfp.pfp_fw_obj, 2875 &adev->gfx.pfp.pfp_fw_gpu_addr, 2876 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2877 if (r) { 2878 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 2879 gfx_v11_0_pfp_fini(adev); 2880 return r; 2881 } 2882 2883 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 2884 2885 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2886 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2887 2888 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr); 2889 2890 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0); 2891 2892 for (i = 0; i < pfp_hdr->jt_size; i++) 2893 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA, 2894 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i)); 2895 2896 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2897 2898 return 0; 2899 } 2900 2901 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 2902 { 2903 int r; 2904 const struct gfx_firmware_header_v2_0 *pfp_hdr; 2905 const __le32 *fw_ucode, *fw_data; 2906 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 2907 uint32_t tmp; 2908 uint32_t usec_timeout = 50000; /* wait for 50ms */ 2909 2910 pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 2911 adev->gfx.pfp_fw->data; 2912 2913 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2914 2915 /* instruction */ 2916 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 2917 le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 2918 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 2919 /* data */ 2920 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2921 le32_to_cpu(pfp_hdr->data_offset_bytes)); 2922 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 2923 2924 /* 64kb align */ 2925 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2926 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2927 &adev->gfx.pfp.pfp_fw_obj, 2928 &adev->gfx.pfp.pfp_fw_gpu_addr, 2929 (void **)&adev->gfx.pfp.pfp_fw_ptr); 2930 if (r) { 2931 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 2932 gfx_v11_0_pfp_fini(adev); 2933 return r; 2934 } 2935 2936 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2937 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2938 &adev->gfx.pfp.pfp_fw_data_obj, 2939 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2940 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 2941 if (r) { 2942 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 2943 gfx_v11_0_pfp_fini(adev); 2944 return r; 2945 } 2946 2947 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 2948 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 2949 2950 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2951 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 2952 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2953 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2954 2955 if (amdgpu_emu_mode == 1) 2956 adev->hdp.funcs->flush_hdp(adev, NULL); 2957 2958 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2959 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2960 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 2961 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2962 2963 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2964 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2965 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2966 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2967 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 2968 2969 /* 2970 * Programming any of the CP_PFP_IC_BASE registers 2971 * forces invalidation of the ME L1 I$. Wait for the 2972 * invalidation complete 2973 */ 2974 for (i = 0; i < usec_timeout; i++) { 2975 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2976 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2977 INVALIDATE_CACHE_COMPLETE)) 2978 break; 2979 udelay(1); 2980 } 2981 2982 if (i >= usec_timeout) { 2983 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2984 return -EINVAL; 2985 } 2986 2987 /* Prime the L1 instruction caches */ 2988 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2989 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 2990 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 2991 /* Waiting for cache primed*/ 2992 for (i = 0; i < usec_timeout; i++) { 2993 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 2994 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2995 ICACHE_PRIMED)) 2996 break; 2997 udelay(1); 2998 } 2999 3000 if (i >= usec_timeout) { 3001 dev_err(adev->dev, "failed to prime instruction cache\n"); 3002 return -EINVAL; 3003 } 3004 3005 mutex_lock(&adev->srbm_mutex); 3006 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3007 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3008 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 3009 (pfp_hdr->ucode_start_addr_hi << 30) | 3010 (pfp_hdr->ucode_start_addr_lo >> 2) ); 3011 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 3012 pfp_hdr->ucode_start_addr_hi>>2); 3013 3014 /* 3015 * Program CP_ME_CNTL to reset given PIPE to take 3016 * effect of CP_PFP_PRGRM_CNTR_START. 3017 */ 3018 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3019 if (pipe_id == 0) 3020 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3021 PFP_PIPE0_RESET, 1); 3022 else 3023 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3024 PFP_PIPE1_RESET, 1); 3025 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3026 3027 /* Clear pfp pipe0 reset bit. */ 3028 if (pipe_id == 0) 3029 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3030 PFP_PIPE0_RESET, 0); 3031 else 3032 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3033 PFP_PIPE1_RESET, 0); 3034 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3035 3036 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 3037 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3038 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 3039 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 3040 } 3041 soc21_grbm_select(adev, 0, 0, 0, 0); 3042 mutex_unlock(&adev->srbm_mutex); 3043 3044 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3045 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3046 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3047 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3048 3049 /* Invalidate the data caches */ 3050 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3051 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3052 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3053 3054 for (i = 0; i < usec_timeout; i++) { 3055 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3056 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3057 INVALIDATE_DCACHE_COMPLETE)) 3058 break; 3059 udelay(1); 3060 } 3061 3062 if (i >= usec_timeout) { 3063 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3064 return -EINVAL; 3065 } 3066 3067 return 0; 3068 } 3069 3070 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 3071 { 3072 int r; 3073 const struct gfx_firmware_header_v1_0 *me_hdr; 3074 const __le32 *fw_data; 3075 unsigned i, fw_size; 3076 3077 me_hdr = (const struct gfx_firmware_header_v1_0 *) 3078 adev->gfx.me_fw->data; 3079 3080 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3081 3082 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3083 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 3084 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 3085 3086 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 3087 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3088 &adev->gfx.me.me_fw_obj, 3089 &adev->gfx.me.me_fw_gpu_addr, 3090 (void **)&adev->gfx.me.me_fw_ptr); 3091 if (r) { 3092 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 3093 gfx_v11_0_me_fini(adev); 3094 return r; 3095 } 3096 3097 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 3098 3099 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3100 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3101 3102 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr); 3103 3104 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0); 3105 3106 for (i = 0; i < me_hdr->jt_size; i++) 3107 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA, 3108 le32_to_cpup(fw_data + me_hdr->jt_offset + i)); 3109 3110 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version); 3111 3112 return 0; 3113 } 3114 3115 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 3116 { 3117 int r; 3118 const struct gfx_firmware_header_v2_0 *me_hdr; 3119 const __le32 *fw_ucode, *fw_data; 3120 unsigned i, pipe_id, fw_ucode_size, fw_data_size; 3121 uint32_t tmp; 3122 uint32_t usec_timeout = 50000; /* wait for 50ms */ 3123 3124 me_hdr = (const struct gfx_firmware_header_v2_0 *) 3125 adev->gfx.me_fw->data; 3126 3127 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 3128 3129 /* instruction */ 3130 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 3131 le32_to_cpu(me_hdr->ucode_offset_bytes)); 3132 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 3133 /* data */ 3134 fw_data = (const __le32 *)(adev->gfx.me_fw->data + 3135 le32_to_cpu(me_hdr->data_offset_bytes)); 3136 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 3137 3138 /* 64kb align*/ 3139 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3140 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3141 &adev->gfx.me.me_fw_obj, 3142 &adev->gfx.me.me_fw_gpu_addr, 3143 (void **)&adev->gfx.me.me_fw_ptr); 3144 if (r) { 3145 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 3146 gfx_v11_0_me_fini(adev); 3147 return r; 3148 } 3149 3150 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3151 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3152 &adev->gfx.me.me_fw_data_obj, 3153 &adev->gfx.me.me_fw_data_gpu_addr, 3154 (void **)&adev->gfx.me.me_fw_data_ptr); 3155 if (r) { 3156 dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 3157 gfx_v11_0_pfp_fini(adev); 3158 return r; 3159 } 3160 3161 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 3162 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 3163 3164 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 3165 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 3166 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 3167 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3168 3169 if (amdgpu_emu_mode == 1) 3170 adev->hdp.funcs->flush_hdp(adev, NULL); 3171 3172 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3173 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3174 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 3175 upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 3176 3177 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 3178 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 3179 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 3180 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 3181 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 3182 3183 /* 3184 * Programming any of the CP_ME_IC_BASE registers 3185 * forces invalidation of the ME L1 I$. Wait for the 3186 * invalidation complete 3187 */ 3188 for (i = 0; i < usec_timeout; i++) { 3189 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3190 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3191 INVALIDATE_CACHE_COMPLETE)) 3192 break; 3193 udelay(1); 3194 } 3195 3196 if (i >= usec_timeout) { 3197 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3198 return -EINVAL; 3199 } 3200 3201 /* Prime the instruction caches */ 3202 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3203 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 3204 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 3205 3206 /* Waiting for instruction cache primed*/ 3207 for (i = 0; i < usec_timeout; i++) { 3208 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 3209 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 3210 ICACHE_PRIMED)) 3211 break; 3212 udelay(1); 3213 } 3214 3215 if (i >= usec_timeout) { 3216 dev_err(adev->dev, "failed to prime instruction cache\n"); 3217 return -EINVAL; 3218 } 3219 3220 mutex_lock(&adev->srbm_mutex); 3221 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 3222 soc21_grbm_select(adev, 0, pipe_id, 0, 0); 3223 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 3224 (me_hdr->ucode_start_addr_hi << 30) | 3225 (me_hdr->ucode_start_addr_lo >> 2) ); 3226 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 3227 me_hdr->ucode_start_addr_hi>>2); 3228 3229 /* 3230 * Program CP_ME_CNTL to reset given PIPE to take 3231 * effect of CP_PFP_PRGRM_CNTR_START. 3232 */ 3233 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 3234 if (pipe_id == 0) 3235 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3236 ME_PIPE0_RESET, 1); 3237 else 3238 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3239 ME_PIPE1_RESET, 1); 3240 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3241 3242 /* Clear pfp pipe0 reset bit. */ 3243 if (pipe_id == 0) 3244 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3245 ME_PIPE0_RESET, 0); 3246 else 3247 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 3248 ME_PIPE1_RESET, 0); 3249 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 3250 3251 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 3252 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3253 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 3254 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 3255 } 3256 soc21_grbm_select(adev, 0, 0, 0, 0); 3257 mutex_unlock(&adev->srbm_mutex); 3258 3259 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 3260 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 3261 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 3262 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 3263 3264 /* Invalidate the data caches */ 3265 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3266 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3267 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 3268 3269 for (i = 0; i < usec_timeout; i++) { 3270 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 3271 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 3272 INVALIDATE_DCACHE_COMPLETE)) 3273 break; 3274 udelay(1); 3275 } 3276 3277 if (i >= usec_timeout) { 3278 dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 3279 return -EINVAL; 3280 } 3281 3282 return 0; 3283 } 3284 3285 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 3286 { 3287 int r; 3288 3289 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 3290 return -EINVAL; 3291 3292 gfx_v11_0_cp_gfx_enable(adev, false); 3293 3294 if (adev->gfx.rs64_enable) 3295 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev); 3296 else 3297 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev); 3298 if (r) { 3299 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 3300 return r; 3301 } 3302 3303 if (adev->gfx.rs64_enable) 3304 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev); 3305 else 3306 r = gfx_v11_0_cp_gfx_load_me_microcode(adev); 3307 if (r) { 3308 dev_err(adev->dev, "(%d) failed to load me fw\n", r); 3309 return r; 3310 } 3311 3312 return 0; 3313 } 3314 3315 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev) 3316 { 3317 struct amdgpu_ring *ring; 3318 const struct cs_section_def *sect = NULL; 3319 const struct cs_extent_def *ext = NULL; 3320 int r, i; 3321 int ctx_reg_offset; 3322 3323 /* init the CP */ 3324 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 3325 adev->gfx.config.max_hw_contexts - 1); 3326 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 3327 3328 if (!amdgpu_async_gfx_ring) 3329 gfx_v11_0_cp_gfx_enable(adev, true); 3330 3331 ring = &adev->gfx.gfx_ring[0]; 3332 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3333 if (r) { 3334 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3335 return r; 3336 } 3337 3338 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3339 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3340 3341 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3342 amdgpu_ring_write(ring, 0x80000000); 3343 amdgpu_ring_write(ring, 0x80000000); 3344 3345 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) { 3346 for (ext = sect->section; ext->extent != NULL; ++ext) { 3347 if (sect->id == SECT_CONTEXT) { 3348 amdgpu_ring_write(ring, 3349 PACKET3(PACKET3_SET_CONTEXT_REG, 3350 ext->reg_count)); 3351 amdgpu_ring_write(ring, ext->reg_index - 3352 PACKET3_SET_CONTEXT_REG_START); 3353 for (i = 0; i < ext->reg_count; i++) 3354 amdgpu_ring_write(ring, ext->extent[i]); 3355 } 3356 } 3357 } 3358 3359 ctx_reg_offset = 3360 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 3361 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 3362 amdgpu_ring_write(ring, ctx_reg_offset); 3363 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 3364 3365 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3366 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 3367 3368 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3369 amdgpu_ring_write(ring, 0); 3370 3371 amdgpu_ring_commit(ring); 3372 3373 /* submit cs packet to copy state 0 to next available state */ 3374 if (adev->gfx.num_gfx_rings > 1) { 3375 /* maximum supported gfx ring is 2 */ 3376 ring = &adev->gfx.gfx_ring[1]; 3377 r = amdgpu_ring_alloc(ring, 2); 3378 if (r) { 3379 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3380 return r; 3381 } 3382 3383 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 3384 amdgpu_ring_write(ring, 0); 3385 3386 amdgpu_ring_commit(ring); 3387 } 3388 return 0; 3389 } 3390 3391 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 3392 CP_PIPE_ID pipe) 3393 { 3394 u32 tmp; 3395 3396 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 3397 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 3398 3399 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 3400 } 3401 3402 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 3403 struct amdgpu_ring *ring) 3404 { 3405 u32 tmp; 3406 3407 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3408 if (ring->use_doorbell) { 3409 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3410 DOORBELL_OFFSET, ring->doorbell_index); 3411 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3412 DOORBELL_EN, 1); 3413 } else { 3414 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3415 DOORBELL_EN, 0); 3416 } 3417 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 3418 3419 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 3420 DOORBELL_RANGE_LOWER, ring->doorbell_index); 3421 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 3422 3423 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3424 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 3425 } 3426 3427 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev) 3428 { 3429 struct amdgpu_ring *ring; 3430 u32 tmp; 3431 u32 rb_bufsz; 3432 u64 rb_addr, rptr_addr, wptr_gpu_addr; 3433 u32 i; 3434 3435 /* Set the write pointer delay */ 3436 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 3437 3438 /* set the RB to use vmid 0 */ 3439 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 3440 3441 /* Init gfx ring 0 for pipe 0 */ 3442 mutex_lock(&adev->srbm_mutex); 3443 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3444 3445 /* Set ring buffer size */ 3446 ring = &adev->gfx.gfx_ring[0]; 3447 rb_bufsz = order_base_2(ring->ring_size / 8); 3448 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 3449 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 3450 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3451 3452 /* Initialize the ring buffer's write pointers */ 3453 ring->wptr = 0; 3454 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3455 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3456 3457 /* set the wb address wether it's enabled or not */ 3458 rptr_addr = ring->rptr_gpu_addr; 3459 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 3460 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3461 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3462 3463 wptr_gpu_addr = ring->wptr_gpu_addr; 3464 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3465 lower_32_bits(wptr_gpu_addr)); 3466 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3467 upper_32_bits(wptr_gpu_addr)); 3468 3469 mdelay(1); 3470 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 3471 3472 rb_addr = ring->gpu_addr >> 8; 3473 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 3474 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 3475 3476 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 3477 3478 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3479 mutex_unlock(&adev->srbm_mutex); 3480 3481 /* Init gfx ring 1 for pipe 1 */ 3482 if (adev->gfx.num_gfx_rings > 1) { 3483 mutex_lock(&adev->srbm_mutex); 3484 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 3485 /* maximum supported gfx ring is 2 */ 3486 ring = &adev->gfx.gfx_ring[1]; 3487 rb_bufsz = order_base_2(ring->ring_size / 8); 3488 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 3489 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 3490 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3491 /* Initialize the ring buffer's write pointers */ 3492 ring->wptr = 0; 3493 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr)); 3494 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 3495 /* Set the wb address wether it's enabled or not */ 3496 rptr_addr = ring->rptr_gpu_addr; 3497 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 3498 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 3499 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 3500 wptr_gpu_addr = ring->wptr_gpu_addr; 3501 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 3502 lower_32_bits(wptr_gpu_addr)); 3503 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 3504 upper_32_bits(wptr_gpu_addr)); 3505 3506 mdelay(1); 3507 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp); 3508 3509 rb_addr = ring->gpu_addr >> 8; 3510 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr); 3511 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 3512 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1); 3513 3514 gfx_v11_0_cp_gfx_set_doorbell(adev, ring); 3515 mutex_unlock(&adev->srbm_mutex); 3516 } 3517 /* Switch to pipe 0 */ 3518 mutex_lock(&adev->srbm_mutex); 3519 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 3520 mutex_unlock(&adev->srbm_mutex); 3521 3522 /* start the ring */ 3523 gfx_v11_0_cp_gfx_start(adev); 3524 3525 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3526 ring = &adev->gfx.gfx_ring[i]; 3527 ring->sched.ready = true; 3528 } 3529 3530 return 0; 3531 } 3532 3533 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 3534 { 3535 u32 data; 3536 3537 if (adev->gfx.rs64_enable) { 3538 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 3539 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 3540 enable ? 0 : 1); 3541 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 3542 enable ? 0 : 1); 3543 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 3544 enable ? 0 : 1); 3545 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 3546 enable ? 0 : 1); 3547 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 3548 enable ? 0 : 1); 3549 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 3550 enable ? 1 : 0); 3551 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 3552 enable ? 1 : 0); 3553 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 3554 enable ? 1 : 0); 3555 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 3556 enable ? 1 : 0); 3557 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 3558 enable ? 0 : 1); 3559 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 3560 } else { 3561 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL); 3562 3563 if (enable) { 3564 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0); 3565 if (!adev->enable_mes_kiq) 3566 data = REG_SET_FIELD(data, CP_MEC_CNTL, 3567 MEC_ME2_HALT, 0); 3568 } else { 3569 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1); 3570 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1); 3571 } 3572 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data); 3573 } 3574 3575 adev->gfx.kiq.ring.sched.ready = enable; 3576 3577 udelay(50); 3578 } 3579 3580 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) 3581 { 3582 const struct gfx_firmware_header_v1_0 *mec_hdr; 3583 const __le32 *fw_data; 3584 unsigned i, fw_size; 3585 u32 *fw = NULL; 3586 int r; 3587 3588 if (!adev->gfx.mec_fw) 3589 return -EINVAL; 3590 3591 gfx_v11_0_cp_compute_enable(adev, false); 3592 3593 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3594 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3595 3596 fw_data = (const __le32 *) 3597 (adev->gfx.mec_fw->data + 3598 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 3599 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 3600 3601 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 3602 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 3603 &adev->gfx.mec.mec_fw_obj, 3604 &adev->gfx.mec.mec_fw_gpu_addr, 3605 (void **)&fw); 3606 if (r) { 3607 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 3608 gfx_v11_0_mec_fini(adev); 3609 return r; 3610 } 3611 3612 memcpy(fw, fw_data, fw_size); 3613 3614 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3615 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3616 3617 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr); 3618 3619 /* MEC1 */ 3620 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0); 3621 3622 for (i = 0; i < mec_hdr->jt_size; i++) 3623 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA, 3624 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 3625 3626 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 3627 3628 return 0; 3629 } 3630 3631 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 3632 { 3633 const struct gfx_firmware_header_v2_0 *mec_hdr; 3634 const __le32 *fw_ucode, *fw_data; 3635 u32 tmp, fw_ucode_size, fw_data_size; 3636 u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 3637 u32 *fw_ucode_ptr, *fw_data_ptr; 3638 int r; 3639 3640 if (!adev->gfx.mec_fw) 3641 return -EINVAL; 3642 3643 gfx_v11_0_cp_compute_enable(adev, false); 3644 3645 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 3646 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3647 3648 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 3649 le32_to_cpu(mec_hdr->ucode_offset_bytes)); 3650 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 3651 3652 fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 3653 le32_to_cpu(mec_hdr->data_offset_bytes)); 3654 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3655 3656 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3657 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3658 &adev->gfx.mec.mec_fw_obj, 3659 &adev->gfx.mec.mec_fw_gpu_addr, 3660 (void **)&fw_ucode_ptr); 3661 if (r) { 3662 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3663 gfx_v11_0_mec_fini(adev); 3664 return r; 3665 } 3666 3667 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3668 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3669 &adev->gfx.mec.mec_fw_data_obj, 3670 &adev->gfx.mec.mec_fw_data_gpu_addr, 3671 (void **)&fw_data_ptr); 3672 if (r) { 3673 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 3674 gfx_v11_0_mec_fini(adev); 3675 return r; 3676 } 3677 3678 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 3679 memcpy(fw_data_ptr, fw_data, fw_data_size); 3680 3681 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 3682 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 3683 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 3684 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 3685 3686 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 3687 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 3688 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 3689 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 3690 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 3691 3692 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 3693 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 3694 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 3695 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 3696 3697 mutex_lock(&adev->srbm_mutex); 3698 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 3699 soc21_grbm_select(adev, 1, i, 0, 0); 3700 3701 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 3702 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 3703 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 3704 3705 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 3706 mec_hdr->ucode_start_addr_lo >> 2 | 3707 mec_hdr->ucode_start_addr_hi << 30); 3708 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 3709 mec_hdr->ucode_start_addr_hi >> 2); 3710 3711 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 3712 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 3713 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 3714 } 3715 mutex_unlock(&adev->srbm_mutex); 3716 soc21_grbm_select(adev, 0, 0, 0, 0); 3717 3718 /* Trigger an invalidation of the L1 instruction caches */ 3719 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3720 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 3721 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 3722 3723 /* Wait for invalidation complete */ 3724 for (i = 0; i < usec_timeout; i++) { 3725 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 3726 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 3727 INVALIDATE_DCACHE_COMPLETE)) 3728 break; 3729 udelay(1); 3730 } 3731 3732 if (i >= usec_timeout) { 3733 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3734 return -EINVAL; 3735 } 3736 3737 /* Trigger an invalidation of the L1 instruction caches */ 3738 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3739 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 3740 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 3741 3742 /* Wait for invalidation complete */ 3743 for (i = 0; i < usec_timeout; i++) { 3744 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 3745 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 3746 INVALIDATE_CACHE_COMPLETE)) 3747 break; 3748 udelay(1); 3749 } 3750 3751 if (i >= usec_timeout) { 3752 dev_err(adev->dev, "failed to invalidate instruction cache\n"); 3753 return -EINVAL; 3754 } 3755 3756 return 0; 3757 } 3758 3759 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring) 3760 { 3761 uint32_t tmp; 3762 struct amdgpu_device *adev = ring->adev; 3763 3764 /* tell RLC which is KIQ queue */ 3765 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3766 tmp &= 0xffffff00; 3767 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 3768 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3769 tmp |= 0x80; 3770 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3771 } 3772 3773 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev) 3774 { 3775 /* set graphics engine doorbell range */ 3776 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 3777 (adev->doorbell_index.gfx_ring0 * 2) << 2); 3778 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 3779 (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 3780 3781 /* set compute engine doorbell range */ 3782 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 3783 (adev->doorbell_index.kiq * 2) << 2); 3784 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 3785 (adev->doorbell_index.userqueue_end * 2) << 2); 3786 } 3787 3788 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 3789 struct amdgpu_mqd_prop *prop) 3790 { 3791 struct v11_gfx_mqd *mqd = m; 3792 uint64_t hqd_gpu_addr, wb_gpu_addr; 3793 uint32_t tmp; 3794 uint32_t rb_bufsz; 3795 3796 /* set up gfx hqd wptr */ 3797 mqd->cp_gfx_hqd_wptr = 0; 3798 mqd->cp_gfx_hqd_wptr_hi = 0; 3799 3800 /* set the pointer to the MQD */ 3801 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 3802 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 3803 3804 /* set up mqd control */ 3805 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 3806 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 3807 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 3808 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 3809 mqd->cp_gfx_mqd_control = tmp; 3810 3811 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 3812 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 3813 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 3814 mqd->cp_gfx_hqd_vmid = 0; 3815 3816 /* set up default queue priority level 3817 * 0x0 = low priority, 0x1 = high priority */ 3818 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 3819 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 3820 mqd->cp_gfx_hqd_queue_priority = tmp; 3821 3822 /* set up time quantum */ 3823 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 3824 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 3825 mqd->cp_gfx_hqd_quantum = tmp; 3826 3827 /* set up gfx hqd base. this is similar as CP_RB_BASE */ 3828 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 3829 mqd->cp_gfx_hqd_base = hqd_gpu_addr; 3830 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 3831 3832 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 3833 wb_gpu_addr = prop->rptr_gpu_addr; 3834 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 3835 mqd->cp_gfx_hqd_rptr_addr_hi = 3836 upper_32_bits(wb_gpu_addr) & 0xffff; 3837 3838 /* set up rb_wptr_poll addr */ 3839 wb_gpu_addr = prop->wptr_gpu_addr; 3840 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3841 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3842 3843 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 3844 rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 3845 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 3846 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 3847 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 3848 #ifdef __BIG_ENDIAN 3849 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 3850 #endif 3851 mqd->cp_gfx_hqd_cntl = tmp; 3852 3853 /* set up cp_doorbell_control */ 3854 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 3855 if (prop->use_doorbell) { 3856 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3857 DOORBELL_OFFSET, prop->doorbell_index); 3858 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3859 DOORBELL_EN, 1); 3860 } else 3861 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 3862 DOORBELL_EN, 0); 3863 mqd->cp_rb_doorbell_control = tmp; 3864 3865 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3866 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 3867 3868 /* active the queue */ 3869 mqd->cp_gfx_hqd_active = 1; 3870 3871 return 0; 3872 } 3873 3874 #ifdef BRING_UP_DEBUG 3875 static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring) 3876 { 3877 struct amdgpu_device *adev = ring->adev; 3878 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3879 3880 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ 3881 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); 3882 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); 3883 3884 /* set GFX_MQD_BASE */ 3885 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); 3886 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); 3887 3888 /* set GFX_MQD_CONTROL */ 3889 WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); 3890 3891 /* set GFX_HQD_VMID to 0 */ 3892 WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); 3893 3894 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY, 3895 mqd->cp_gfx_hqd_queue_priority); 3896 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); 3897 3898 /* set GFX_HQD_BASE, similar as CP_RB_BASE */ 3899 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); 3900 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); 3901 3902 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ 3903 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); 3904 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); 3905 3906 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ 3907 WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); 3908 3909 /* set RB_WPTR_POLL_ADDR */ 3910 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); 3911 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); 3912 3913 /* set RB_DOORBELL_CONTROL */ 3914 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); 3915 3916 /* active the queue */ 3917 WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); 3918 3919 return 0; 3920 } 3921 #endif 3922 3923 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring) 3924 { 3925 struct amdgpu_device *adev = ring->adev; 3926 struct v11_gfx_mqd *mqd = ring->mqd_ptr; 3927 int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 3928 3929 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 3930 memset((void *)mqd, 0, sizeof(*mqd)); 3931 mutex_lock(&adev->srbm_mutex); 3932 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3933 amdgpu_ring_init_mqd(ring); 3934 #ifdef BRING_UP_DEBUG 3935 gfx_v11_0_gfx_queue_init_register(ring); 3936 #endif 3937 soc21_grbm_select(adev, 0, 0, 0, 0); 3938 mutex_unlock(&adev->srbm_mutex); 3939 if (adev->gfx.me.mqd_backup[mqd_idx]) 3940 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3941 } else if (amdgpu_in_reset(adev)) { 3942 /* reset mqd with the backup copy */ 3943 if (adev->gfx.me.mqd_backup[mqd_idx]) 3944 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 3945 /* reset the ring */ 3946 ring->wptr = 0; 3947 *ring->wptr_cpu_addr = 0; 3948 amdgpu_ring_clear_ring(ring); 3949 #ifdef BRING_UP_DEBUG 3950 mutex_lock(&adev->srbm_mutex); 3951 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3952 gfx_v11_0_gfx_queue_init_register(ring); 3953 soc21_grbm_select(adev, 0, 0, 0, 0); 3954 mutex_unlock(&adev->srbm_mutex); 3955 #endif 3956 } else { 3957 amdgpu_ring_clear_ring(ring); 3958 } 3959 3960 return 0; 3961 } 3962 3963 #ifndef BRING_UP_DEBUG 3964 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev) 3965 { 3966 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 3967 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 3968 int r, i; 3969 3970 if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 3971 return -EINVAL; 3972 3973 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 3974 adev->gfx.num_gfx_rings); 3975 if (r) { 3976 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 3977 return r; 3978 } 3979 3980 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3981 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); 3982 3983 return amdgpu_ring_test_helper(kiq_ring); 3984 } 3985 #endif 3986 3987 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3988 { 3989 int r, i; 3990 struct amdgpu_ring *ring; 3991 3992 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3993 ring = &adev->gfx.gfx_ring[i]; 3994 3995 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3996 if (unlikely(r != 0)) 3997 goto done; 3998 3999 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4000 if (!r) { 4001 r = gfx_v11_0_gfx_init_queue(ring); 4002 amdgpu_bo_kunmap(ring->mqd_obj); 4003 ring->mqd_ptr = NULL; 4004 } 4005 amdgpu_bo_unreserve(ring->mqd_obj); 4006 if (r) 4007 goto done; 4008 } 4009 #ifndef BRING_UP_DEBUG 4010 r = gfx_v11_0_kiq_enable_kgq(adev); 4011 if (r) 4012 goto done; 4013 #endif 4014 r = gfx_v11_0_cp_gfx_start(adev); 4015 if (r) 4016 goto done; 4017 4018 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4019 ring = &adev->gfx.gfx_ring[i]; 4020 ring->sched.ready = true; 4021 } 4022 done: 4023 return r; 4024 } 4025 4026 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 4027 struct amdgpu_mqd_prop *prop) 4028 { 4029 struct v11_compute_mqd *mqd = m; 4030 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4031 uint32_t tmp; 4032 4033 mqd->header = 0xC0310800; 4034 mqd->compute_pipelinestat_enable = 0x00000001; 4035 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4036 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4037 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4038 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4039 mqd->compute_misc_reserved = 0x00000007; 4040 4041 eop_base_addr = prop->eop_gpu_addr >> 8; 4042 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4043 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4044 4045 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4046 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 4047 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4048 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1)); 4049 4050 mqd->cp_hqd_eop_control = tmp; 4051 4052 /* enable doorbell? */ 4053 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4054 4055 if (prop->use_doorbell) { 4056 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4057 DOORBELL_OFFSET, prop->doorbell_index); 4058 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4059 DOORBELL_EN, 1); 4060 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4061 DOORBELL_SOURCE, 0); 4062 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4063 DOORBELL_HIT, 0); 4064 } else { 4065 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4066 DOORBELL_EN, 0); 4067 } 4068 4069 mqd->cp_hqd_pq_doorbell_control = tmp; 4070 4071 /* disable the queue if it's active */ 4072 mqd->cp_hqd_dequeue_request = 0; 4073 mqd->cp_hqd_pq_rptr = 0; 4074 mqd->cp_hqd_pq_wptr_lo = 0; 4075 mqd->cp_hqd_pq_wptr_hi = 0; 4076 4077 /* set the pointer to the MQD */ 4078 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 4079 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 4080 4081 /* set MQD vmid to 0 */ 4082 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 4083 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4084 mqd->cp_mqd_control = tmp; 4085 4086 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4087 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 4088 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4089 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4090 4091 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4092 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 4093 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4094 (order_base_2(prop->queue_size / 4) - 1)); 4095 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4096 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 4097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 4098 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 4099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4100 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4101 mqd->cp_hqd_pq_control = tmp; 4102 4103 /* set the wb address whether it's enabled or not */ 4104 wb_gpu_addr = prop->rptr_gpu_addr; 4105 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4106 mqd->cp_hqd_pq_rptr_report_addr_hi = 4107 upper_32_bits(wb_gpu_addr) & 0xffff; 4108 4109 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4110 wb_gpu_addr = prop->wptr_gpu_addr; 4111 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4112 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4113 4114 tmp = 0; 4115 /* enable the doorbell if requested */ 4116 if (prop->use_doorbell) { 4117 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 4118 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4119 DOORBELL_OFFSET, prop->doorbell_index); 4120 4121 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4122 DOORBELL_EN, 1); 4123 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4124 DOORBELL_SOURCE, 0); 4125 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4126 DOORBELL_HIT, 0); 4127 } 4128 4129 mqd->cp_hqd_pq_doorbell_control = tmp; 4130 4131 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4132 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 4133 4134 /* set the vmid for the queue */ 4135 mqd->cp_hqd_vmid = 0; 4136 4137 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 4138 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 4139 mqd->cp_hqd_persistent_state = tmp; 4140 4141 /* set MIN_IB_AVAIL_SIZE */ 4142 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 4143 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4144 mqd->cp_hqd_ib_control = tmp; 4145 4146 /* set static priority for a compute queue/ring */ 4147 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 4148 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 4149 4150 mqd->cp_hqd_active = prop->hqd_active; 4151 4152 return 0; 4153 } 4154 4155 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring) 4156 { 4157 struct amdgpu_device *adev = ring->adev; 4158 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4159 int j; 4160 4161 /* inactivate the queue */ 4162 if (amdgpu_sriov_vf(adev)) 4163 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 4164 4165 /* disable wptr polling */ 4166 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 4167 4168 /* write the EOP addr */ 4169 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 4170 mqd->cp_hqd_eop_base_addr_lo); 4171 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 4172 mqd->cp_hqd_eop_base_addr_hi); 4173 4174 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4175 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 4176 mqd->cp_hqd_eop_control); 4177 4178 /* enable doorbell? */ 4179 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4180 mqd->cp_hqd_pq_doorbell_control); 4181 4182 /* disable the queue if it's active */ 4183 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 4184 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 4185 for (j = 0; j < adev->usec_timeout; j++) { 4186 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 4187 break; 4188 udelay(1); 4189 } 4190 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 4191 mqd->cp_hqd_dequeue_request); 4192 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 4193 mqd->cp_hqd_pq_rptr); 4194 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4195 mqd->cp_hqd_pq_wptr_lo); 4196 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4197 mqd->cp_hqd_pq_wptr_hi); 4198 } 4199 4200 /* set the pointer to the MQD */ 4201 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 4202 mqd->cp_mqd_base_addr_lo); 4203 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 4204 mqd->cp_mqd_base_addr_hi); 4205 4206 /* set MQD vmid to 0 */ 4207 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 4208 mqd->cp_mqd_control); 4209 4210 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4211 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 4212 mqd->cp_hqd_pq_base_lo); 4213 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 4214 mqd->cp_hqd_pq_base_hi); 4215 4216 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4217 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 4218 mqd->cp_hqd_pq_control); 4219 4220 /* set the wb address whether it's enabled or not */ 4221 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 4222 mqd->cp_hqd_pq_rptr_report_addr_lo); 4223 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 4224 mqd->cp_hqd_pq_rptr_report_addr_hi); 4225 4226 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4227 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 4228 mqd->cp_hqd_pq_wptr_poll_addr_lo); 4229 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 4230 mqd->cp_hqd_pq_wptr_poll_addr_hi); 4231 4232 /* enable the doorbell if requested */ 4233 if (ring->use_doorbell) { 4234 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 4235 (adev->doorbell_index.kiq * 2) << 2); 4236 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 4237 (adev->doorbell_index.userqueue_end * 2) << 2); 4238 } 4239 4240 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 4241 mqd->cp_hqd_pq_doorbell_control); 4242 4243 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4244 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 4245 mqd->cp_hqd_pq_wptr_lo); 4246 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 4247 mqd->cp_hqd_pq_wptr_hi); 4248 4249 /* set the vmid for the queue */ 4250 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 4251 4252 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 4253 mqd->cp_hqd_persistent_state); 4254 4255 /* activate the queue */ 4256 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 4257 mqd->cp_hqd_active); 4258 4259 if (ring->use_doorbell) 4260 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4261 4262 return 0; 4263 } 4264 4265 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring) 4266 { 4267 struct amdgpu_device *adev = ring->adev; 4268 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4269 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 4270 4271 gfx_v11_0_kiq_setting(ring); 4272 4273 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4274 /* reset MQD to a clean status */ 4275 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4276 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4277 4278 /* reset ring buffer */ 4279 ring->wptr = 0; 4280 amdgpu_ring_clear_ring(ring); 4281 4282 mutex_lock(&adev->srbm_mutex); 4283 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4284 gfx_v11_0_kiq_init_register(ring); 4285 soc21_grbm_select(adev, 0, 0, 0, 0); 4286 mutex_unlock(&adev->srbm_mutex); 4287 } else { 4288 memset((void *)mqd, 0, sizeof(*mqd)); 4289 mutex_lock(&adev->srbm_mutex); 4290 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4291 amdgpu_ring_init_mqd(ring); 4292 gfx_v11_0_kiq_init_register(ring); 4293 soc21_grbm_select(adev, 0, 0, 0, 0); 4294 mutex_unlock(&adev->srbm_mutex); 4295 4296 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4297 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4298 } 4299 4300 return 0; 4301 } 4302 4303 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring) 4304 { 4305 struct amdgpu_device *adev = ring->adev; 4306 struct v11_compute_mqd *mqd = ring->mqd_ptr; 4307 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4308 4309 if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 4310 memset((void *)mqd, 0, sizeof(*mqd)); 4311 mutex_lock(&adev->srbm_mutex); 4312 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4313 amdgpu_ring_init_mqd(ring); 4314 soc21_grbm_select(adev, 0, 0, 0, 0); 4315 mutex_unlock(&adev->srbm_mutex); 4316 4317 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4318 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 4319 } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 4320 /* reset MQD to a clean status */ 4321 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4322 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 4323 4324 /* reset ring buffer */ 4325 ring->wptr = 0; 4326 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 4327 amdgpu_ring_clear_ring(ring); 4328 } else { 4329 amdgpu_ring_clear_ring(ring); 4330 } 4331 4332 return 0; 4333 } 4334 4335 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4336 { 4337 struct amdgpu_ring *ring; 4338 int r; 4339 4340 ring = &adev->gfx.kiq.ring; 4341 4342 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4343 if (unlikely(r != 0)) 4344 return r; 4345 4346 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4347 if (unlikely(r != 0)) { 4348 amdgpu_bo_unreserve(ring->mqd_obj); 4349 return r; 4350 } 4351 4352 gfx_v11_0_kiq_init_queue(ring); 4353 amdgpu_bo_kunmap(ring->mqd_obj); 4354 ring->mqd_ptr = NULL; 4355 amdgpu_bo_unreserve(ring->mqd_obj); 4356 ring->sched.ready = true; 4357 return 0; 4358 } 4359 4360 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4361 { 4362 struct amdgpu_ring *ring = NULL; 4363 int r = 0, i; 4364 4365 if (!amdgpu_async_gfx_ring) 4366 gfx_v11_0_cp_compute_enable(adev, true); 4367 4368 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4369 ring = &adev->gfx.compute_ring[i]; 4370 4371 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4372 if (unlikely(r != 0)) 4373 goto done; 4374 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4375 if (!r) { 4376 r = gfx_v11_0_kcq_init_queue(ring); 4377 amdgpu_bo_kunmap(ring->mqd_obj); 4378 ring->mqd_ptr = NULL; 4379 } 4380 amdgpu_bo_unreserve(ring->mqd_obj); 4381 if (r) 4382 goto done; 4383 } 4384 4385 r = amdgpu_gfx_enable_kcq(adev); 4386 done: 4387 return r; 4388 } 4389 4390 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) 4391 { 4392 int r, i; 4393 struct amdgpu_ring *ring; 4394 4395 if (!(adev->flags & AMD_IS_APU)) 4396 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4397 4398 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4399 /* legacy firmware loading */ 4400 r = gfx_v11_0_cp_gfx_load_microcode(adev); 4401 if (r) 4402 return r; 4403 4404 if (adev->gfx.rs64_enable) 4405 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev); 4406 else 4407 r = gfx_v11_0_cp_compute_load_microcode(adev); 4408 if (r) 4409 return r; 4410 } 4411 4412 gfx_v11_0_cp_set_doorbell_range(adev); 4413 4414 if (amdgpu_async_gfx_ring) { 4415 gfx_v11_0_cp_compute_enable(adev, true); 4416 gfx_v11_0_cp_gfx_enable(adev, true); 4417 } 4418 4419 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 4420 r = amdgpu_mes_kiq_hw_init(adev); 4421 else 4422 r = gfx_v11_0_kiq_resume(adev); 4423 if (r) 4424 return r; 4425 4426 r = gfx_v11_0_kcq_resume(adev); 4427 if (r) 4428 return r; 4429 4430 if (!amdgpu_async_gfx_ring) { 4431 r = gfx_v11_0_cp_gfx_resume(adev); 4432 if (r) 4433 return r; 4434 } else { 4435 r = gfx_v11_0_cp_async_gfx_ring_resume(adev); 4436 if (r) 4437 return r; 4438 } 4439 4440 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4441 ring = &adev->gfx.gfx_ring[i]; 4442 r = amdgpu_ring_test_helper(ring); 4443 if (r) 4444 return r; 4445 } 4446 4447 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4448 ring = &adev->gfx.compute_ring[i]; 4449 r = amdgpu_ring_test_helper(ring); 4450 if (r) 4451 return r; 4452 } 4453 4454 return 0; 4455 } 4456 4457 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable) 4458 { 4459 gfx_v11_0_cp_gfx_enable(adev, enable); 4460 gfx_v11_0_cp_compute_enable(adev, enable); 4461 } 4462 4463 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) 4464 { 4465 int r; 4466 bool value; 4467 4468 r = adev->gfxhub.funcs->gart_enable(adev); 4469 if (r) 4470 return r; 4471 4472 adev->hdp.funcs->flush_hdp(adev, NULL); 4473 4474 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4475 false : true; 4476 4477 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 4478 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 4479 4480 return 0; 4481 } 4482 4483 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev) 4484 { 4485 u32 tmp; 4486 4487 /* select RS64 */ 4488 if (adev->gfx.rs64_enable) { 4489 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL); 4490 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1); 4491 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp); 4492 4493 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL); 4494 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1); 4495 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp); 4496 } 4497 4498 if (amdgpu_emu_mode == 1) 4499 msleep(100); 4500 } 4501 4502 static int get_gb_addr_config(struct amdgpu_device * adev) 4503 { 4504 u32 gb_addr_config; 4505 4506 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 4507 if (gb_addr_config == 0) 4508 return -EINVAL; 4509 4510 adev->gfx.config.gb_addr_config_fields.num_pkrs = 4511 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 4512 4513 adev->gfx.config.gb_addr_config = gb_addr_config; 4514 4515 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 4516 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4517 GB_ADDR_CONFIG, NUM_PIPES); 4518 4519 adev->gfx.config.max_tile_pipes = 4520 adev->gfx.config.gb_addr_config_fields.num_pipes; 4521 4522 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 4523 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4524 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 4525 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 4526 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4527 GB_ADDR_CONFIG, NUM_RB_PER_SE); 4528 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 4529 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4530 GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 4531 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 4532 REG_GET_FIELD(adev->gfx.config.gb_addr_config, 4533 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 4534 4535 return 0; 4536 } 4537 4538 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev) 4539 { 4540 uint32_t data; 4541 4542 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 4543 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 4544 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 4545 4546 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 4547 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 4548 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 4549 } 4550 4551 static int gfx_v11_0_hw_init(void *handle) 4552 { 4553 int r; 4554 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4555 4556 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 4557 if (adev->gfx.imu.funcs) { 4558 /* RLC autoload sequence 1: Program rlc ram */ 4559 if (adev->gfx.imu.funcs->program_rlc_ram) 4560 adev->gfx.imu.funcs->program_rlc_ram(adev); 4561 } 4562 /* rlc autoload firmware */ 4563 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev); 4564 if (r) 4565 return r; 4566 } else { 4567 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 4568 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 4569 if (adev->gfx.imu.funcs->load_microcode) 4570 adev->gfx.imu.funcs->load_microcode(adev); 4571 if (adev->gfx.imu.funcs->setup_imu) 4572 adev->gfx.imu.funcs->setup_imu(adev); 4573 if (adev->gfx.imu.funcs->start_imu) 4574 adev->gfx.imu.funcs->start_imu(adev); 4575 } 4576 4577 /* disable gpa mode in backdoor loading */ 4578 gfx_v11_0_disable_gpa_mode(adev); 4579 } 4580 } 4581 4582 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 4583 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 4584 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev); 4585 if (r) { 4586 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 4587 return r; 4588 } 4589 } 4590 4591 adev->gfx.is_poweron = true; 4592 4593 if(get_gb_addr_config(adev)) 4594 DRM_WARN("Invalid gb_addr_config !\n"); 4595 4596 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4597 adev->gfx.rs64_enable) 4598 gfx_v11_0_config_gfx_rs64(adev); 4599 4600 r = gfx_v11_0_gfxhub_enable(adev); 4601 if (r) 4602 return r; 4603 4604 if (!amdgpu_emu_mode) 4605 gfx_v11_0_init_golden_registers(adev); 4606 4607 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 4608 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 4609 /** 4610 * For gfx 11, rlc firmware loading relies on smu firmware is 4611 * loaded firstly, so in direct type, it has to load smc ucode 4612 * here before rlc. 4613 */ 4614 if (!(adev->flags & AMD_IS_APU)) { 4615 r = amdgpu_pm_load_smu_firmware(adev, NULL); 4616 if (r) 4617 return r; 4618 } 4619 } 4620 4621 gfx_v11_0_constants_init(adev); 4622 4623 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 4624 gfx_v11_0_select_cp_fw_arch(adev); 4625 4626 if (adev->nbio.funcs->gc_doorbell_init) 4627 adev->nbio.funcs->gc_doorbell_init(adev); 4628 4629 r = gfx_v11_0_rlc_resume(adev); 4630 if (r) 4631 return r; 4632 4633 /* 4634 * init golden registers and rlc resume may override some registers, 4635 * reconfig them here 4636 */ 4637 gfx_v11_0_tcp_harvest(adev); 4638 4639 r = gfx_v11_0_cp_resume(adev); 4640 if (r) 4641 return r; 4642 4643 return r; 4644 } 4645 4646 #ifndef BRING_UP_DEBUG 4647 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev) 4648 { 4649 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4650 struct amdgpu_ring *kiq_ring = &kiq->ring; 4651 int i, r = 0; 4652 4653 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4654 return -EINVAL; 4655 4656 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 4657 adev->gfx.num_gfx_rings)) 4658 return -ENOMEM; 4659 4660 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4661 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 4662 PREEMPT_QUEUES, 0, 0); 4663 4664 if (adev->gfx.kiq.ring.sched.ready) 4665 r = amdgpu_ring_test_helper(kiq_ring); 4666 4667 return r; 4668 } 4669 #endif 4670 4671 static int gfx_v11_0_hw_fini(void *handle) 4672 { 4673 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4674 int r; 4675 uint32_t tmp; 4676 4677 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4678 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4679 4680 if (!adev->no_hw_access) { 4681 #ifndef BRING_UP_DEBUG 4682 if (amdgpu_async_gfx_ring) { 4683 r = gfx_v11_0_kiq_disable_kgq(adev); 4684 if (r) 4685 DRM_ERROR("KGQ disable failed\n"); 4686 } 4687 #endif 4688 if (amdgpu_gfx_disable_kcq(adev)) 4689 DRM_ERROR("KCQ disable failed\n"); 4690 4691 amdgpu_mes_kiq_hw_fini(adev); 4692 } 4693 4694 if (amdgpu_sriov_vf(adev)) { 4695 gfx_v11_0_cp_gfx_enable(adev, false); 4696 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 4697 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 4698 tmp &= 0xffffff00; 4699 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 4700 4701 return 0; 4702 } 4703 gfx_v11_0_cp_enable(adev, false); 4704 gfx_v11_0_enable_gui_idle_interrupt(adev, false); 4705 4706 adev->gfxhub.funcs->gart_disable(adev); 4707 4708 adev->gfx.is_poweron = false; 4709 4710 return 0; 4711 } 4712 4713 static int gfx_v11_0_suspend(void *handle) 4714 { 4715 return gfx_v11_0_hw_fini(handle); 4716 } 4717 4718 static int gfx_v11_0_resume(void *handle) 4719 { 4720 return gfx_v11_0_hw_init(handle); 4721 } 4722 4723 static bool gfx_v11_0_is_idle(void *handle) 4724 { 4725 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4726 4727 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4728 GRBM_STATUS, GUI_ACTIVE)) 4729 return false; 4730 else 4731 return true; 4732 } 4733 4734 static int gfx_v11_0_wait_for_idle(void *handle) 4735 { 4736 unsigned i; 4737 u32 tmp; 4738 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4739 4740 for (i = 0; i < adev->usec_timeout; i++) { 4741 /* read MC_STATUS */ 4742 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 4743 GRBM_STATUS__GUI_ACTIVE_MASK; 4744 4745 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 4746 return 0; 4747 udelay(1); 4748 } 4749 return -ETIMEDOUT; 4750 } 4751 4752 static int gfx_v11_0_soft_reset(void *handle) 4753 { 4754 u32 grbm_soft_reset = 0; 4755 u32 tmp; 4756 int i, j, k; 4757 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4758 4759 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4760 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0); 4761 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0); 4762 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0); 4763 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0); 4764 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4765 4766 gfx_v11_0_set_safe_mode(adev); 4767 4768 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4769 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4770 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4771 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4772 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4773 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4774 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4775 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4776 4777 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4778 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); 4779 } 4780 } 4781 } 4782 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4783 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4784 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4785 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4786 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4787 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4788 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4789 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4790 4791 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4792 } 4793 } 4794 } 4795 4796 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); 4797 4798 // Read CP_VMID_RESET register three times. 4799 // to get sufficient time for GFX_HQD_ACTIVE reach 0 4800 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4801 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4802 RREG32_SOC15(GC, 0, regCP_VMID_RESET); 4803 4804 for (i = 0; i < adev->usec_timeout; i++) { 4805 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && 4806 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) 4807 break; 4808 udelay(1); 4809 } 4810 if (i >= adev->usec_timeout) { 4811 printk("Failed to wait all pipes clean\n"); 4812 return -EINVAL; 4813 } 4814 4815 /********** trigger soft reset ***********/ 4816 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4817 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4818 SOFT_RESET_CP, 1); 4819 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4820 SOFT_RESET_GFX, 1); 4821 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4822 SOFT_RESET_CPF, 1); 4823 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4824 SOFT_RESET_CPC, 1); 4825 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4826 SOFT_RESET_CPG, 1); 4827 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4828 /********** exit soft reset ***********/ 4829 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 4830 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4831 SOFT_RESET_CP, 0); 4832 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4833 SOFT_RESET_GFX, 0); 4834 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4835 SOFT_RESET_CPF, 0); 4836 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4837 SOFT_RESET_CPC, 0); 4838 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4839 SOFT_RESET_CPG, 0); 4840 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset); 4841 4842 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL); 4843 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1); 4844 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp); 4845 4846 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0); 4847 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0); 4848 4849 for (i = 0; i < adev->usec_timeout; i++) { 4850 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET)) 4851 break; 4852 udelay(1); 4853 } 4854 if (i >= adev->usec_timeout) { 4855 printk("Failed to wait CP_VMID_RESET to 0\n"); 4856 return -EINVAL; 4857 } 4858 4859 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 4860 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 4861 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 4862 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 4863 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 4864 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp); 4865 4866 gfx_v11_0_unset_safe_mode(adev); 4867 4868 return gfx_v11_0_cp_resume(adev); 4869 } 4870 4871 static bool gfx_v11_0_check_soft_reset(void *handle) 4872 { 4873 int i, r; 4874 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4875 struct amdgpu_ring *ring; 4876 long tmo = msecs_to_jiffies(1000); 4877 4878 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4879 ring = &adev->gfx.gfx_ring[i]; 4880 r = amdgpu_ring_test_ib(ring, tmo); 4881 if (r) 4882 return true; 4883 } 4884 4885 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4886 ring = &adev->gfx.compute_ring[i]; 4887 r = amdgpu_ring_test_ib(ring, tmo); 4888 if (r) 4889 return true; 4890 } 4891 4892 return false; 4893 } 4894 4895 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) 4896 { 4897 uint64_t clock; 4898 4899 amdgpu_gfx_off_ctrl(adev, false); 4900 mutex_lock(&adev->gfx.gpu_clock_mutex); 4901 clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | 4902 ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); 4903 mutex_unlock(&adev->gfx.gpu_clock_mutex); 4904 amdgpu_gfx_off_ctrl(adev, true); 4905 return clock; 4906 } 4907 4908 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 4909 uint32_t vmid, 4910 uint32_t gds_base, uint32_t gds_size, 4911 uint32_t gws_base, uint32_t gws_size, 4912 uint32_t oa_base, uint32_t oa_size) 4913 { 4914 struct amdgpu_device *adev = ring->adev; 4915 4916 /* GDS Base */ 4917 gfx_v11_0_write_data_to_reg(ring, 0, false, 4918 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid, 4919 gds_base); 4920 4921 /* GDS Size */ 4922 gfx_v11_0_write_data_to_reg(ring, 0, false, 4923 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid, 4924 gds_size); 4925 4926 /* GWS */ 4927 gfx_v11_0_write_data_to_reg(ring, 0, false, 4928 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid, 4929 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 4930 4931 /* OA */ 4932 gfx_v11_0_write_data_to_reg(ring, 0, false, 4933 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid, 4934 (1 << (oa_size + oa_base)) - (1 << oa_base)); 4935 } 4936 4937 static int gfx_v11_0_early_init(void *handle) 4938 { 4939 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4940 4941 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS; 4942 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 4943 AMDGPU_MAX_COMPUTE_RINGS); 4944 4945 gfx_v11_0_set_kiq_pm4_funcs(adev); 4946 gfx_v11_0_set_ring_funcs(adev); 4947 gfx_v11_0_set_irq_funcs(adev); 4948 gfx_v11_0_set_gds_init(adev); 4949 gfx_v11_0_set_rlc_funcs(adev); 4950 gfx_v11_0_set_mqd_funcs(adev); 4951 gfx_v11_0_set_imu_funcs(adev); 4952 4953 gfx_v11_0_init_rlcg_reg_access_ctrl(adev); 4954 4955 return 0; 4956 } 4957 4958 static int gfx_v11_0_late_init(void *handle) 4959 { 4960 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4961 int r; 4962 4963 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 4964 if (r) 4965 return r; 4966 4967 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 4968 if (r) 4969 return r; 4970 4971 return 0; 4972 } 4973 4974 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev) 4975 { 4976 uint32_t rlc_cntl; 4977 4978 /* if RLC is not enabled, do nothing */ 4979 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 4980 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 4981 } 4982 4983 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev) 4984 { 4985 uint32_t data; 4986 unsigned i; 4987 4988 data = RLC_SAFE_MODE__CMD_MASK; 4989 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 4990 4991 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 4992 4993 /* wait for RLC_SAFE_MODE */ 4994 for (i = 0; i < adev->usec_timeout; i++) { 4995 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 4996 RLC_SAFE_MODE, CMD)) 4997 break; 4998 udelay(1); 4999 } 5000 } 5001 5002 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev) 5003 { 5004 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 5005 } 5006 5007 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev, 5008 bool enable) 5009 { 5010 uint32_t def, data; 5011 5012 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK)) 5013 return; 5014 5015 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5016 5017 if (enable) 5018 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5019 else 5020 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK; 5021 5022 if (def != data) 5023 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5024 } 5025 5026 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev, 5027 bool enable) 5028 { 5029 uint32_t def, data; 5030 5031 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) 5032 return; 5033 5034 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5035 5036 if (enable) 5037 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5038 else 5039 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; 5040 5041 if (def != data) 5042 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5043 } 5044 5045 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev, 5046 bool enable) 5047 { 5048 uint32_t def, data; 5049 5050 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) 5051 return; 5052 5053 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5054 5055 if (enable) 5056 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5057 else 5058 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK; 5059 5060 if (def != data) 5061 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5062 } 5063 5064 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5065 bool enable) 5066 { 5067 uint32_t data, def; 5068 5069 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS))) 5070 return; 5071 5072 /* It is disabled by HW by default */ 5073 if (enable) { 5074 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5075 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 5076 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5077 5078 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5079 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5080 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5081 5082 if (def != data) 5083 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5084 } 5085 } else { 5086 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5087 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5088 5089 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 5090 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 5091 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK); 5092 5093 if (def != data) 5094 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5095 } 5096 } 5097 } 5098 5099 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5100 bool enable) 5101 { 5102 uint32_t def, data; 5103 5104 if (!(adev->cg_flags & 5105 (AMD_CG_SUPPORT_GFX_CGCG | 5106 AMD_CG_SUPPORT_GFX_CGLS | 5107 AMD_CG_SUPPORT_GFX_3D_CGCG | 5108 AMD_CG_SUPPORT_GFX_3D_CGLS))) 5109 return; 5110 5111 if (enable) { 5112 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5113 5114 /* unset CGCG override */ 5115 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5116 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 5117 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5118 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 5119 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG || 5120 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5121 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 5122 5123 /* update CGCG override bits */ 5124 if (def != data) 5125 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data); 5126 5127 /* enable cgcg FSM(0x0000363F) */ 5128 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5129 5130 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5131 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK; 5132 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5133 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5134 } 5135 5136 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5137 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK; 5138 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5139 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5140 } 5141 5142 if (def != data) 5143 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5144 5145 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5146 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5147 5148 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5149 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK; 5150 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 5151 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5152 } 5153 5154 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5155 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK; 5156 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 5157 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5158 } 5159 5160 if (def != data) 5161 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5162 5163 /* set IDLE_POLL_COUNT(0x00900100) */ 5164 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL); 5165 5166 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK); 5167 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 5168 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 5169 5170 if (def != data) 5171 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data); 5172 5173 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL); 5174 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1); 5175 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1); 5176 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1); 5177 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1); 5178 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data); 5179 5180 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5181 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5182 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5183 5184 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5185 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); 5186 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5187 } else { 5188 /* Program RLC_CGCG_CGLS_CTRL */ 5189 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5190 5191 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) 5192 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5193 5194 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 5195 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5196 5197 if (def != data) 5198 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data); 5199 5200 /* Program RLC_CGCG_CGLS_CTRL_3D */ 5201 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5202 5203 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) 5204 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 5205 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 5206 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 5207 5208 if (def != data) 5209 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data); 5210 5211 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL); 5212 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5213 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); 5214 5215 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); 5216 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; 5217 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); 5218 } 5219 } 5220 5221 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5222 bool enable) 5223 { 5224 amdgpu_gfx_rlc_enter_safe_mode(adev); 5225 5226 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable); 5227 5228 gfx_v11_0_update_medium_grain_clock_gating(adev, enable); 5229 5230 gfx_v11_0_update_repeater_fgcg(adev, enable); 5231 5232 gfx_v11_0_update_sram_fgcg(adev, enable); 5233 5234 gfx_v11_0_update_perf_clk(adev, enable); 5235 5236 if (adev->cg_flags & 5237 (AMD_CG_SUPPORT_GFX_MGCG | 5238 AMD_CG_SUPPORT_GFX_CGLS | 5239 AMD_CG_SUPPORT_GFX_CGCG | 5240 AMD_CG_SUPPORT_GFX_3D_CGCG | 5241 AMD_CG_SUPPORT_GFX_3D_CGLS)) 5242 gfx_v11_0_enable_gui_idle_interrupt(adev, enable); 5243 5244 amdgpu_gfx_rlc_exit_safe_mode(adev); 5245 5246 return 0; 5247 } 5248 5249 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) 5250 { 5251 u32 reg, data; 5252 5253 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 5254 if (amdgpu_sriov_is_pp_one_vf(adev)) 5255 data = RREG32_NO_KIQ(reg); 5256 else 5257 data = RREG32(reg); 5258 5259 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 5260 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 5261 5262 if (amdgpu_sriov_is_pp_one_vf(adev)) 5263 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 5264 else 5265 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 5266 } 5267 5268 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = { 5269 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled, 5270 .set_safe_mode = gfx_v11_0_set_safe_mode, 5271 .unset_safe_mode = gfx_v11_0_unset_safe_mode, 5272 .init = gfx_v11_0_rlc_init, 5273 .get_csb_size = gfx_v11_0_get_csb_size, 5274 .get_csb_buffer = gfx_v11_0_get_csb_buffer, 5275 .resume = gfx_v11_0_rlc_resume, 5276 .stop = gfx_v11_0_rlc_stop, 5277 .reset = gfx_v11_0_rlc_reset, 5278 .start = gfx_v11_0_rlc_start, 5279 .update_spm_vmid = gfx_v11_0_update_spm_vmid, 5280 }; 5281 5282 static int gfx_v11_0_set_powergating_state(void *handle, 5283 enum amd_powergating_state state) 5284 { 5285 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5286 bool enable = (state == AMD_PG_STATE_GATE); 5287 5288 if (amdgpu_sriov_vf(adev)) 5289 return 0; 5290 5291 switch (adev->ip_versions[GC_HWIP][0]) { 5292 case IP_VERSION(11, 0, 0): 5293 case IP_VERSION(11, 0, 2): 5294 amdgpu_gfx_off_ctrl(adev, enable); 5295 break; 5296 default: 5297 break; 5298 } 5299 5300 return 0; 5301 } 5302 5303 static int gfx_v11_0_set_clockgating_state(void *handle, 5304 enum amd_clockgating_state state) 5305 { 5306 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5307 5308 if (amdgpu_sriov_vf(adev)) 5309 return 0; 5310 5311 switch (adev->ip_versions[GC_HWIP][0]) { 5312 case IP_VERSION(11, 0, 0): 5313 case IP_VERSION(11, 0, 2): 5314 gfx_v11_0_update_gfx_clock_gating(adev, 5315 state == AMD_CG_STATE_GATE); 5316 break; 5317 default: 5318 break; 5319 } 5320 5321 return 0; 5322 } 5323 5324 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags) 5325 { 5326 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5327 int data; 5328 5329 /* AMD_CG_SUPPORT_GFX_MGCG */ 5330 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE); 5331 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 5332 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5333 5334 /* AMD_CG_SUPPORT_REPEATER_FGCG */ 5335 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK)) 5336 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG; 5337 5338 /* AMD_CG_SUPPORT_GFX_FGCG */ 5339 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK)) 5340 *flags |= AMD_CG_SUPPORT_GFX_FGCG; 5341 5342 /* AMD_CG_SUPPORT_GFX_PERF_CLK */ 5343 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK)) 5344 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK; 5345 5346 /* AMD_CG_SUPPORT_GFX_CGCG */ 5347 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); 5348 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5349 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5350 5351 /* AMD_CG_SUPPORT_GFX_CGLS */ 5352 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5353 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5354 5355 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5356 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D); 5357 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 5358 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 5359 5360 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 5361 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 5362 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 5363 } 5364 5365 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 5366 { 5367 /* gfx11 is 32bit rptr*/ 5368 return *(uint32_t *)ring->rptr_cpu_addr; 5369 } 5370 5371 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5372 { 5373 struct amdgpu_device *adev = ring->adev; 5374 u64 wptr; 5375 5376 /* XXX check if swapping is necessary on BE */ 5377 if (ring->use_doorbell) { 5378 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5379 } else { 5380 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 5381 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 5382 } 5383 5384 return wptr; 5385 } 5386 5387 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5388 { 5389 struct amdgpu_device *adev = ring->adev; 5390 uint32_t *wptr_saved; 5391 uint32_t *is_queue_unmap; 5392 uint64_t aggregated_db_index; 5393 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 5394 uint64_t wptr_tmp; 5395 5396 if (ring->is_mes_queue) { 5397 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5398 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5399 sizeof(uint32_t)); 5400 aggregated_db_index = 5401 amdgpu_mes_get_aggregated_doorbell_index(adev, 5402 ring->hw_prio); 5403 5404 wptr_tmp = ring->wptr & ring->buf_mask; 5405 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5406 *wptr_saved = wptr_tmp; 5407 /* assume doorbell always being used by mes mapped queue */ 5408 if (*is_queue_unmap) { 5409 WDOORBELL64(aggregated_db_index, wptr_tmp); 5410 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5411 } else { 5412 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5413 5414 if (*is_queue_unmap) 5415 WDOORBELL64(aggregated_db_index, wptr_tmp); 5416 } 5417 } else { 5418 if (ring->use_doorbell) { 5419 /* XXX check if swapping is necessary on BE */ 5420 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5421 ring->wptr); 5422 WDOORBELL64(ring->doorbell_index, ring->wptr); 5423 } else { 5424 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 5425 lower_32_bits(ring->wptr)); 5426 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 5427 upper_32_bits(ring->wptr)); 5428 } 5429 } 5430 } 5431 5432 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 5433 { 5434 /* gfx11 hardware is 32bit rptr */ 5435 return *(uint32_t *)ring->rptr_cpu_addr; 5436 } 5437 5438 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 5439 { 5440 u64 wptr; 5441 5442 /* XXX check if swapping is necessary on BE */ 5443 if (ring->use_doorbell) 5444 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 5445 else 5446 BUG(); 5447 return wptr; 5448 } 5449 5450 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 5451 { 5452 struct amdgpu_device *adev = ring->adev; 5453 uint32_t *wptr_saved; 5454 uint32_t *is_queue_unmap; 5455 uint64_t aggregated_db_index; 5456 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 5457 uint64_t wptr_tmp; 5458 5459 if (ring->is_mes_queue) { 5460 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 5461 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 5462 sizeof(uint32_t)); 5463 aggregated_db_index = 5464 amdgpu_mes_get_aggregated_doorbell_index(adev, 5465 ring->hw_prio); 5466 5467 wptr_tmp = ring->wptr & ring->buf_mask; 5468 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 5469 *wptr_saved = wptr_tmp; 5470 /* assume doorbell always used by mes mapped queue */ 5471 if (*is_queue_unmap) { 5472 WDOORBELL64(aggregated_db_index, wptr_tmp); 5473 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5474 } else { 5475 WDOORBELL64(ring->doorbell_index, wptr_tmp); 5476 5477 if (*is_queue_unmap) 5478 WDOORBELL64(aggregated_db_index, wptr_tmp); 5479 } 5480 } else { 5481 /* XXX check if swapping is necessary on BE */ 5482 if (ring->use_doorbell) { 5483 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 5484 ring->wptr); 5485 WDOORBELL64(ring->doorbell_index, ring->wptr); 5486 } else { 5487 BUG(); /* only DOORBELL method supported on gfx11 now */ 5488 } 5489 } 5490 } 5491 5492 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 5493 { 5494 struct amdgpu_device *adev = ring->adev; 5495 u32 ref_and_mask, reg_mem_engine; 5496 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 5497 5498 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 5499 switch (ring->me) { 5500 case 1: 5501 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 5502 break; 5503 case 2: 5504 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 5505 break; 5506 default: 5507 return; 5508 } 5509 reg_mem_engine = 0; 5510 } else { 5511 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 5512 reg_mem_engine = 1; /* pfp */ 5513 } 5514 5515 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 5516 adev->nbio.funcs->get_hdp_flush_req_offset(adev), 5517 adev->nbio.funcs->get_hdp_flush_done_offset(adev), 5518 ref_and_mask, ref_and_mask, 0x20); 5519 } 5520 5521 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 5522 struct amdgpu_job *job, 5523 struct amdgpu_ib *ib, 5524 uint32_t flags) 5525 { 5526 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5527 u32 header, control = 0; 5528 5529 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 5530 5531 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5532 5533 control |= ib->length_dw | (vmid << 24); 5534 5535 if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5536 control |= INDIRECT_BUFFER_PRE_ENB(1); 5537 5538 if (flags & AMDGPU_IB_PREEMPTED) 5539 control |= INDIRECT_BUFFER_PRE_RESUME(1); 5540 5541 if (vmid) 5542 gfx_v11_0_ring_emit_de_meta(ring, 5543 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); 5544 } 5545 5546 if (ring->is_mes_queue) 5547 /* inherit vmid from mqd */ 5548 control |= 0x400000; 5549 5550 amdgpu_ring_write(ring, header); 5551 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5552 amdgpu_ring_write(ring, 5553 #ifdef __BIG_ENDIAN 5554 (2 << 0) | 5555 #endif 5556 lower_32_bits(ib->gpu_addr)); 5557 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5558 amdgpu_ring_write(ring, control); 5559 } 5560 5561 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 5562 struct amdgpu_job *job, 5563 struct amdgpu_ib *ib, 5564 uint32_t flags) 5565 { 5566 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 5567 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 5568 5569 if (ring->is_mes_queue) 5570 /* inherit vmid from mqd */ 5571 control |= 0x40000000; 5572 5573 /* Currently, there is a high possibility to get wave ID mismatch 5574 * between ME and GDS, leading to a hw deadlock, because ME generates 5575 * different wave IDs than the GDS expects. This situation happens 5576 * randomly when at least 5 compute pipes use GDS ordered append. 5577 * The wave IDs generated by ME are also wrong after suspend/resume. 5578 * Those are probably bugs somewhere else in the kernel driver. 5579 * 5580 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 5581 * GDS to 0 for this ring (me/pipe). 5582 */ 5583 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 5584 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 5585 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 5586 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 5587 } 5588 5589 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 5590 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 5591 amdgpu_ring_write(ring, 5592 #ifdef __BIG_ENDIAN 5593 (2 << 0) | 5594 #endif 5595 lower_32_bits(ib->gpu_addr)); 5596 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 5597 amdgpu_ring_write(ring, control); 5598 } 5599 5600 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 5601 u64 seq, unsigned flags) 5602 { 5603 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5604 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5605 5606 /* RELEASE_MEM - flush caches, send int */ 5607 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5608 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 5609 PACKET3_RELEASE_MEM_GCR_GL2_WB | 5610 PACKET3_RELEASE_MEM_GCR_GL2_INV | 5611 PACKET3_RELEASE_MEM_GCR_GL2_US | 5612 PACKET3_RELEASE_MEM_GCR_GL1_INV | 5613 PACKET3_RELEASE_MEM_GCR_GLV_INV | 5614 PACKET3_RELEASE_MEM_GCR_GLM_INV | 5615 PACKET3_RELEASE_MEM_GCR_GLM_WB | 5616 PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 5617 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5618 PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 5619 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 5620 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 5621 5622 /* 5623 * the address should be Qword aligned if 64bit write, Dword 5624 * aligned if only send 32bit data low (discard data high) 5625 */ 5626 if (write64bit) 5627 BUG_ON(addr & 0x7); 5628 else 5629 BUG_ON(addr & 0x3); 5630 amdgpu_ring_write(ring, lower_32_bits(addr)); 5631 amdgpu_ring_write(ring, upper_32_bits(addr)); 5632 amdgpu_ring_write(ring, lower_32_bits(seq)); 5633 amdgpu_ring_write(ring, upper_32_bits(seq)); 5634 amdgpu_ring_write(ring, ring->is_mes_queue ? 5635 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 5636 } 5637 5638 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 5639 { 5640 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5641 uint32_t seq = ring->fence_drv.sync_seq; 5642 uint64_t addr = ring->fence_drv.gpu_addr; 5643 5644 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 5645 upper_32_bits(addr), seq, 0xffffffff, 4); 5646 } 5647 5648 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 5649 uint16_t pasid, uint32_t flush_type, 5650 bool all_hub, uint8_t dst_sel) 5651 { 5652 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 5653 amdgpu_ring_write(ring, 5654 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 5655 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 5656 PACKET3_INVALIDATE_TLBS_PASID(pasid) | 5657 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 5658 } 5659 5660 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 5661 unsigned vmid, uint64_t pd_addr) 5662 { 5663 if (ring->is_mes_queue) 5664 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 5665 else 5666 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 5667 5668 /* compute doesn't have PFP */ 5669 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 5670 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5671 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5672 amdgpu_ring_write(ring, 0x0); 5673 } 5674 } 5675 5676 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 5677 u64 seq, unsigned int flags) 5678 { 5679 struct amdgpu_device *adev = ring->adev; 5680 5681 /* we only allocate 32bit for each seq wb address */ 5682 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 5683 5684 /* write fence seq to the "addr" */ 5685 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5686 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5687 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 5688 amdgpu_ring_write(ring, lower_32_bits(addr)); 5689 amdgpu_ring_write(ring, upper_32_bits(addr)); 5690 amdgpu_ring_write(ring, lower_32_bits(seq)); 5691 5692 if (flags & AMDGPU_FENCE_FLAG_INT) { 5693 /* set register to trigger INT */ 5694 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5695 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5696 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 5697 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 5698 amdgpu_ring_write(ring, 0); 5699 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 5700 } 5701 } 5702 5703 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 5704 uint32_t flags) 5705 { 5706 uint32_t dw2 = 0; 5707 5708 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5709 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 5710 /* set load_global_config & load_global_uconfig */ 5711 dw2 |= 0x8001; 5712 /* set load_cs_sh_regs */ 5713 dw2 |= 0x01000000; 5714 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 5715 dw2 |= 0x10002; 5716 } 5717 5718 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 5719 amdgpu_ring_write(ring, dw2); 5720 amdgpu_ring_write(ring, 0); 5721 } 5722 5723 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 5724 { 5725 unsigned ret; 5726 5727 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 5728 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 5729 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 5730 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 5731 ret = ring->wptr & ring->buf_mask; 5732 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 5733 5734 return ret; 5735 } 5736 5737 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 5738 { 5739 unsigned cur; 5740 BUG_ON(offset > ring->buf_mask); 5741 BUG_ON(ring->ring[offset] != 0x55aa55aa); 5742 5743 cur = (ring->wptr - 1) & ring->buf_mask; 5744 if (likely(cur > offset)) 5745 ring->ring[offset] = cur - offset; 5746 else 5747 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 5748 } 5749 5750 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring) 5751 { 5752 int i, r = 0; 5753 struct amdgpu_device *adev = ring->adev; 5754 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5755 struct amdgpu_ring *kiq_ring = &kiq->ring; 5756 unsigned long flags; 5757 5758 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5759 return -EINVAL; 5760 5761 spin_lock_irqsave(&kiq->ring_lock, flags); 5762 5763 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5764 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5765 return -ENOMEM; 5766 } 5767 5768 /* assert preemption condition */ 5769 amdgpu_ring_set_preempt_cond_exec(ring, false); 5770 5771 /* assert IB preemption, emit the trailing fence */ 5772 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 5773 ring->trail_fence_gpu_addr, 5774 ++ring->trail_seq); 5775 amdgpu_ring_commit(kiq_ring); 5776 5777 spin_unlock_irqrestore(&kiq->ring_lock, flags); 5778 5779 /* poll the trailing fence */ 5780 for (i = 0; i < adev->usec_timeout; i++) { 5781 if (ring->trail_seq == 5782 le32_to_cpu(*(ring->trail_fence_cpu_addr))) 5783 break; 5784 udelay(1); 5785 } 5786 5787 if (i >= adev->usec_timeout) { 5788 r = -EINVAL; 5789 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 5790 } 5791 5792 /* deassert preemption condition */ 5793 amdgpu_ring_set_preempt_cond_exec(ring, true); 5794 return r; 5795 } 5796 5797 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 5798 { 5799 struct amdgpu_device *adev = ring->adev; 5800 struct v10_de_ib_state de_payload = {0}; 5801 uint64_t offset, gds_addr, de_payload_gpu_addr; 5802 void *de_payload_cpu_addr; 5803 int cnt; 5804 5805 if (ring->is_mes_queue) { 5806 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5807 gfx[0].gfx_meta_data) + 5808 offsetof(struct v10_gfx_meta_data, de_payload); 5809 de_payload_gpu_addr = 5810 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5811 de_payload_cpu_addr = 5812 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5813 5814 offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5815 gfx[0].gds_backup) + 5816 offsetof(struct v10_gfx_meta_data, de_payload); 5817 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5818 } else { 5819 offset = offsetof(struct v10_gfx_meta_data, de_payload); 5820 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5821 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5822 5823 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 5824 AMDGPU_CSA_SIZE - adev->gds.gds_size, 5825 PAGE_SIZE); 5826 } 5827 5828 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 5829 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 5830 5831 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 5832 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 5833 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 5834 WRITE_DATA_DST_SEL(8) | 5835 WR_CONFIRM) | 5836 WRITE_DATA_CACHE_POLICY(0)); 5837 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 5838 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 5839 5840 if (resume) 5841 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 5842 sizeof(de_payload) >> 2); 5843 else 5844 amdgpu_ring_write_multiple(ring, (void *)&de_payload, 5845 sizeof(de_payload) >> 2); 5846 } 5847 5848 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, 5849 bool secure) 5850 { 5851 uint32_t v = secure ? FRAME_TMZ : 0; 5852 5853 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 5854 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 5855 } 5856 5857 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 5858 uint32_t reg_val_offs) 5859 { 5860 struct amdgpu_device *adev = ring->adev; 5861 5862 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 5863 amdgpu_ring_write(ring, 0 | /* src: register*/ 5864 (5 << 8) | /* dst: memory */ 5865 (1 << 20)); /* write confirm */ 5866 amdgpu_ring_write(ring, reg); 5867 amdgpu_ring_write(ring, 0); 5868 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 5869 reg_val_offs * 4)); 5870 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 5871 reg_val_offs * 4)); 5872 } 5873 5874 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 5875 uint32_t val) 5876 { 5877 uint32_t cmd = 0; 5878 5879 switch (ring->funcs->type) { 5880 case AMDGPU_RING_TYPE_GFX: 5881 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 5882 break; 5883 case AMDGPU_RING_TYPE_KIQ: 5884 cmd = (1 << 16); /* no inc addr */ 5885 break; 5886 default: 5887 cmd = WR_CONFIRM; 5888 break; 5889 } 5890 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5891 amdgpu_ring_write(ring, cmd); 5892 amdgpu_ring_write(ring, reg); 5893 amdgpu_ring_write(ring, 0); 5894 amdgpu_ring_write(ring, val); 5895 } 5896 5897 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 5898 uint32_t val, uint32_t mask) 5899 { 5900 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 5901 } 5902 5903 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 5904 uint32_t reg0, uint32_t reg1, 5905 uint32_t ref, uint32_t mask) 5906 { 5907 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 5908 5909 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 5910 ref, mask, 0x20); 5911 } 5912 5913 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, 5914 unsigned vmid) 5915 { 5916 struct amdgpu_device *adev = ring->adev; 5917 uint32_t value = 0; 5918 5919 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 5920 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 5921 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 5922 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 5923 WREG32_SOC15(GC, 0, regSQ_CMD, value); 5924 } 5925 5926 static void 5927 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 5928 uint32_t me, uint32_t pipe, 5929 enum amdgpu_interrupt_state state) 5930 { 5931 uint32_t cp_int_cntl, cp_int_cntl_reg; 5932 5933 if (!me) { 5934 switch (pipe) { 5935 case 0: 5936 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 5937 break; 5938 case 1: 5939 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1); 5940 break; 5941 default: 5942 DRM_DEBUG("invalid pipe %d\n", pipe); 5943 return; 5944 } 5945 } else { 5946 DRM_DEBUG("invalid me %d\n", me); 5947 return; 5948 } 5949 5950 switch (state) { 5951 case AMDGPU_IRQ_STATE_DISABLE: 5952 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 5953 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5954 TIME_STAMP_INT_ENABLE, 0); 5955 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5956 GENERIC0_INT_ENABLE, 0); 5957 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 5958 break; 5959 case AMDGPU_IRQ_STATE_ENABLE: 5960 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 5961 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5962 TIME_STAMP_INT_ENABLE, 1); 5963 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 5964 GENERIC0_INT_ENABLE, 1); 5965 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 5966 break; 5967 default: 5968 break; 5969 } 5970 } 5971 5972 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 5973 int me, int pipe, 5974 enum amdgpu_interrupt_state state) 5975 { 5976 u32 mec_int_cntl, mec_int_cntl_reg; 5977 5978 /* 5979 * amdgpu controls only the first MEC. That's why this function only 5980 * handles the setting of interrupts for this specific MEC. All other 5981 * pipes' interrupts are set by amdkfd. 5982 */ 5983 5984 if (me == 1) { 5985 switch (pipe) { 5986 case 0: 5987 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 5988 break; 5989 case 1: 5990 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 5991 break; 5992 case 2: 5993 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL); 5994 break; 5995 case 3: 5996 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL); 5997 break; 5998 default: 5999 DRM_DEBUG("invalid pipe %d\n", pipe); 6000 return; 6001 } 6002 } else { 6003 DRM_DEBUG("invalid me %d\n", me); 6004 return; 6005 } 6006 6007 switch (state) { 6008 case AMDGPU_IRQ_STATE_DISABLE: 6009 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6010 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6011 TIME_STAMP_INT_ENABLE, 0); 6012 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6013 GENERIC0_INT_ENABLE, 0); 6014 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6015 break; 6016 case AMDGPU_IRQ_STATE_ENABLE: 6017 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 6018 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6019 TIME_STAMP_INT_ENABLE, 1); 6020 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 6021 GENERIC0_INT_ENABLE, 1); 6022 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 6023 break; 6024 default: 6025 break; 6026 } 6027 } 6028 6029 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6030 struct amdgpu_irq_src *src, 6031 unsigned type, 6032 enum amdgpu_interrupt_state state) 6033 { 6034 switch (type) { 6035 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6036 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 6037 break; 6038 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 6039 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 6040 break; 6041 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6042 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6043 break; 6044 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6045 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6046 break; 6047 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6048 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6049 break; 6050 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6051 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6052 break; 6053 default: 6054 break; 6055 } 6056 return 0; 6057 } 6058 6059 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev, 6060 struct amdgpu_irq_src *source, 6061 struct amdgpu_iv_entry *entry) 6062 { 6063 int i; 6064 u8 me_id, pipe_id, queue_id; 6065 struct amdgpu_ring *ring; 6066 uint32_t mes_queue_id = entry->src_data[0]; 6067 6068 DRM_DEBUG("IH: CP EOP\n"); 6069 6070 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 6071 struct amdgpu_mes_queue *queue; 6072 6073 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 6074 6075 spin_lock(&adev->mes.queue_id_lock); 6076 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 6077 if (queue) { 6078 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 6079 amdgpu_fence_process(queue->ring); 6080 } 6081 spin_unlock(&adev->mes.queue_id_lock); 6082 } else { 6083 me_id = (entry->ring_id & 0x0c) >> 2; 6084 pipe_id = (entry->ring_id & 0x03) >> 0; 6085 queue_id = (entry->ring_id & 0x70) >> 4; 6086 6087 switch (me_id) { 6088 case 0: 6089 if (pipe_id == 0) 6090 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6091 else 6092 amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 6093 break; 6094 case 1: 6095 case 2: 6096 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6097 ring = &adev->gfx.compute_ring[i]; 6098 /* Per-queue interrupt is supported for MEC starting from VI. 6099 * The interrupt can only be enabled/disabled per pipe instead 6100 * of per queue. 6101 */ 6102 if ((ring->me == me_id) && 6103 (ring->pipe == pipe_id) && 6104 (ring->queue == queue_id)) 6105 amdgpu_fence_process(ring); 6106 } 6107 break; 6108 } 6109 } 6110 6111 return 0; 6112 } 6113 6114 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6115 struct amdgpu_irq_src *source, 6116 unsigned type, 6117 enum amdgpu_interrupt_state state) 6118 { 6119 switch (state) { 6120 case AMDGPU_IRQ_STATE_DISABLE: 6121 case AMDGPU_IRQ_STATE_ENABLE: 6122 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 6123 PRIV_REG_INT_ENABLE, 6124 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6125 break; 6126 default: 6127 break; 6128 } 6129 6130 return 0; 6131 } 6132 6133 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6134 struct amdgpu_irq_src *source, 6135 unsigned type, 6136 enum amdgpu_interrupt_state state) 6137 { 6138 switch (state) { 6139 case AMDGPU_IRQ_STATE_DISABLE: 6140 case AMDGPU_IRQ_STATE_ENABLE: 6141 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 6142 PRIV_INSTR_INT_ENABLE, 6143 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 6144 break; 6145 default: 6146 break; 6147 } 6148 6149 return 0; 6150 } 6151 6152 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, 6153 struct amdgpu_iv_entry *entry) 6154 { 6155 u8 me_id, pipe_id, queue_id; 6156 struct amdgpu_ring *ring; 6157 int i; 6158 6159 me_id = (entry->ring_id & 0x0c) >> 2; 6160 pipe_id = (entry->ring_id & 0x03) >> 0; 6161 queue_id = (entry->ring_id & 0x70) >> 4; 6162 6163 switch (me_id) { 6164 case 0: 6165 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6166 ring = &adev->gfx.gfx_ring[i]; 6167 /* we only enabled 1 gfx queue per pipe for now */ 6168 if (ring->me == me_id && ring->pipe == pipe_id) 6169 drm_sched_fault(&ring->sched); 6170 } 6171 break; 6172 case 1: 6173 case 2: 6174 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6175 ring = &adev->gfx.compute_ring[i]; 6176 if (ring->me == me_id && ring->pipe == pipe_id && 6177 ring->queue == queue_id) 6178 drm_sched_fault(&ring->sched); 6179 } 6180 break; 6181 default: 6182 BUG(); 6183 break; 6184 } 6185 } 6186 6187 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev, 6188 struct amdgpu_irq_src *source, 6189 struct amdgpu_iv_entry *entry) 6190 { 6191 DRM_ERROR("Illegal register access in command stream\n"); 6192 gfx_v11_0_handle_priv_fault(adev, entry); 6193 return 0; 6194 } 6195 6196 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev, 6197 struct amdgpu_irq_src *source, 6198 struct amdgpu_iv_entry *entry) 6199 { 6200 DRM_ERROR("Illegal instruction in command stream\n"); 6201 gfx_v11_0_handle_priv_fault(adev, entry); 6202 return 0; 6203 } 6204 6205 #if 0 6206 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 6207 struct amdgpu_irq_src *src, 6208 unsigned int type, 6209 enum amdgpu_interrupt_state state) 6210 { 6211 uint32_t tmp, target; 6212 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 6213 6214 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 6215 target += ring->pipe; 6216 6217 switch (type) { 6218 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 6219 if (state == AMDGPU_IRQ_STATE_DISABLE) { 6220 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6221 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6222 GENERIC2_INT_ENABLE, 0); 6223 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6224 6225 tmp = RREG32_SOC15_IP(GC, target); 6226 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6227 GENERIC2_INT_ENABLE, 0); 6228 WREG32_SOC15_IP(GC, target, tmp); 6229 } else { 6230 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL); 6231 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 6232 GENERIC2_INT_ENABLE, 1); 6233 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp); 6234 6235 tmp = RREG32_SOC15_IP(GC, target); 6236 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, 6237 GENERIC2_INT_ENABLE, 1); 6238 WREG32_SOC15_IP(GC, target, tmp); 6239 } 6240 break; 6241 default: 6242 BUG(); /* kiq only support GENERIC2_INT now */ 6243 break; 6244 } 6245 return 0; 6246 } 6247 #endif 6248 6249 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring) 6250 { 6251 const unsigned int gcr_cntl = 6252 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 6253 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 6254 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 6255 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 6256 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 6257 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 6258 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 6259 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 6260 6261 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 6262 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 6263 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 6264 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6265 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6266 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6267 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6268 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6269 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 6270 } 6271 6272 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { 6273 .name = "gfx_v11_0", 6274 .early_init = gfx_v11_0_early_init, 6275 .late_init = gfx_v11_0_late_init, 6276 .sw_init = gfx_v11_0_sw_init, 6277 .sw_fini = gfx_v11_0_sw_fini, 6278 .hw_init = gfx_v11_0_hw_init, 6279 .hw_fini = gfx_v11_0_hw_fini, 6280 .suspend = gfx_v11_0_suspend, 6281 .resume = gfx_v11_0_resume, 6282 .is_idle = gfx_v11_0_is_idle, 6283 .wait_for_idle = gfx_v11_0_wait_for_idle, 6284 .soft_reset = gfx_v11_0_soft_reset, 6285 .check_soft_reset = gfx_v11_0_check_soft_reset, 6286 .set_clockgating_state = gfx_v11_0_set_clockgating_state, 6287 .set_powergating_state = gfx_v11_0_set_powergating_state, 6288 .get_clockgating_state = gfx_v11_0_get_clockgating_state, 6289 }; 6290 6291 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { 6292 .type = AMDGPU_RING_TYPE_GFX, 6293 .align_mask = 0xff, 6294 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6295 .support_64bit_ptrs = true, 6296 .vmhub = AMDGPU_GFXHUB_0, 6297 .get_rptr = gfx_v11_0_ring_get_rptr_gfx, 6298 .get_wptr = gfx_v11_0_ring_get_wptr_gfx, 6299 .set_wptr = gfx_v11_0_ring_set_wptr_gfx, 6300 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 6301 5 + /* COND_EXEC */ 6302 7 + /* PIPELINE_SYNC */ 6303 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6304 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6305 2 + /* VM_FLUSH */ 6306 8 + /* FENCE for VM_FLUSH */ 6307 20 + /* GDS switch */ 6308 5 + /* COND_EXEC */ 6309 7 + /* HDP_flush */ 6310 4 + /* VGT_flush */ 6311 31 + /* DE_META */ 6312 3 + /* CNTX_CTRL */ 6313 5 + /* HDP_INVL */ 6314 8 + 8 + /* FENCE x2 */ 6315 8, /* gfx_v11_0_emit_mem_sync */ 6316 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */ 6317 .emit_ib = gfx_v11_0_ring_emit_ib_gfx, 6318 .emit_fence = gfx_v11_0_ring_emit_fence, 6319 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6320 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6321 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6322 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6323 .test_ring = gfx_v11_0_ring_test_ring, 6324 .test_ib = gfx_v11_0_ring_test_ib, 6325 .insert_nop = amdgpu_ring_insert_nop, 6326 .pad_ib = amdgpu_ring_generic_pad_ib, 6327 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl, 6328 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec, 6329 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec, 6330 .preempt_ib = gfx_v11_0_ring_preempt_ib, 6331 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl, 6332 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6333 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6334 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6335 .soft_recovery = gfx_v11_0_ring_soft_recovery, 6336 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6337 }; 6338 6339 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { 6340 .type = AMDGPU_RING_TYPE_COMPUTE, 6341 .align_mask = 0xff, 6342 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6343 .support_64bit_ptrs = true, 6344 .vmhub = AMDGPU_GFXHUB_0, 6345 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6346 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6347 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6348 .emit_frame_size = 6349 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6350 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6351 5 + /* hdp invalidate */ 6352 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6353 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6354 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6355 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6356 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */ 6357 8, /* gfx_v11_0_emit_mem_sync */ 6358 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6359 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6360 .emit_fence = gfx_v11_0_ring_emit_fence, 6361 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync, 6362 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush, 6363 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch, 6364 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, 6365 .test_ring = gfx_v11_0_ring_test_ring, 6366 .test_ib = gfx_v11_0_ring_test_ib, 6367 .insert_nop = amdgpu_ring_insert_nop, 6368 .pad_ib = amdgpu_ring_generic_pad_ib, 6369 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6370 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6371 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6372 .emit_mem_sync = gfx_v11_0_emit_mem_sync, 6373 }; 6374 6375 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { 6376 .type = AMDGPU_RING_TYPE_KIQ, 6377 .align_mask = 0xff, 6378 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6379 .support_64bit_ptrs = true, 6380 .vmhub = AMDGPU_GFXHUB_0, 6381 .get_rptr = gfx_v11_0_ring_get_rptr_compute, 6382 .get_wptr = gfx_v11_0_ring_get_wptr_compute, 6383 .set_wptr = gfx_v11_0_ring_set_wptr_compute, 6384 .emit_frame_size = 6385 20 + /* gfx_v11_0_ring_emit_gds_switch */ 6386 7 + /* gfx_v11_0_ring_emit_hdp_flush */ 6387 5 + /*hdp invalidate */ 6388 7 + /* gfx_v11_0_ring_emit_pipeline_sync */ 6389 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 6390 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 6391 2 + /* gfx_v11_0_ring_emit_vm_flush */ 6392 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6393 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */ 6394 .emit_ib = gfx_v11_0_ring_emit_ib_compute, 6395 .emit_fence = gfx_v11_0_ring_emit_fence_kiq, 6396 .test_ring = gfx_v11_0_ring_test_ring, 6397 .test_ib = gfx_v11_0_ring_test_ib, 6398 .insert_nop = amdgpu_ring_insert_nop, 6399 .pad_ib = amdgpu_ring_generic_pad_ib, 6400 .emit_rreg = gfx_v11_0_ring_emit_rreg, 6401 .emit_wreg = gfx_v11_0_ring_emit_wreg, 6402 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, 6403 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, 6404 }; 6405 6406 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) 6407 { 6408 int i; 6409 6410 adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq; 6411 6412 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6413 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx; 6414 6415 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6416 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute; 6417 } 6418 6419 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = { 6420 .set = gfx_v11_0_set_eop_interrupt_state, 6421 .process = gfx_v11_0_eop_irq, 6422 }; 6423 6424 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = { 6425 .set = gfx_v11_0_set_priv_reg_fault_state, 6426 .process = gfx_v11_0_priv_reg_irq, 6427 }; 6428 6429 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = { 6430 .set = gfx_v11_0_set_priv_inst_fault_state, 6431 .process = gfx_v11_0_priv_inst_irq, 6432 }; 6433 6434 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev) 6435 { 6436 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 6437 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs; 6438 6439 adev->gfx.priv_reg_irq.num_types = 1; 6440 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs; 6441 6442 adev->gfx.priv_inst_irq.num_types = 1; 6443 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs; 6444 } 6445 6446 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev) 6447 { 6448 if (adev->flags & AMD_IS_APU) 6449 adev->gfx.imu.mode = MISSION_MODE; 6450 else 6451 adev->gfx.imu.mode = DEBUG_MODE; 6452 6453 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs; 6454 } 6455 6456 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev) 6457 { 6458 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs; 6459 } 6460 6461 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev) 6462 { 6463 unsigned total_cu = adev->gfx.config.max_cu_per_sh * 6464 adev->gfx.config.max_sh_per_se * 6465 adev->gfx.config.max_shader_engines; 6466 6467 adev->gds.gds_size = 0x1000; 6468 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; 6469 adev->gds.gws_size = 64; 6470 adev->gds.oa_size = 16; 6471 } 6472 6473 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev) 6474 { 6475 /* set gfx eng mqd */ 6476 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 6477 sizeof(struct v11_gfx_mqd); 6478 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 6479 gfx_v11_0_gfx_mqd_init; 6480 /* set compute eng mqd */ 6481 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 6482 sizeof(struct v11_compute_mqd); 6483 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 6484 gfx_v11_0_compute_mqd_init; 6485 } 6486 6487 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 6488 u32 bitmap) 6489 { 6490 u32 data; 6491 6492 if (!bitmap) 6493 return; 6494 6495 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6496 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6497 6498 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 6499 } 6500 6501 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 6502 { 6503 u32 data, wgp_bitmask; 6504 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 6505 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 6506 6507 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 6508 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 6509 6510 wgp_bitmask = 6511 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 6512 6513 return (~data) & wgp_bitmask; 6514 } 6515 6516 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 6517 { 6518 u32 wgp_idx, wgp_active_bitmap; 6519 u32 cu_bitmap_per_wgp, cu_active_bitmap; 6520 6521 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev); 6522 cu_active_bitmap = 0; 6523 6524 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 6525 /* if there is one WGP enabled, it means 2 CUs will be enabled */ 6526 cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 6527 if (wgp_active_bitmap & (1 << wgp_idx)) 6528 cu_active_bitmap |= cu_bitmap_per_wgp; 6529 } 6530 6531 return cu_active_bitmap; 6532 } 6533 6534 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev, 6535 struct amdgpu_cu_info *cu_info) 6536 { 6537 int i, j, k, counter, active_cu_number = 0; 6538 u32 mask, bitmap; 6539 unsigned disable_masks[8 * 2]; 6540 6541 if (!adev || !cu_info) 6542 return -EINVAL; 6543 6544 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 6545 6546 mutex_lock(&adev->grbm_idx_mutex); 6547 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 6548 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 6549 mask = 1; 6550 counter = 0; 6551 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff); 6552 if (i < 8 && j < 2) 6553 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh( 6554 adev, disable_masks[i * 2 + j]); 6555 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev); 6556 6557 /** 6558 * GFX11 could support more than 4 SEs, while the bitmap 6559 * in cu_info struct is 4x4 and ioctl interface struct 6560 * drm_amdgpu_info_device should keep stable. 6561 * So we use last two columns of bitmap to store cu mask for 6562 * SEs 4 to 7, the layout of the bitmap is as below: 6563 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 6564 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 6565 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 6566 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 6567 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 6568 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 6569 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 6570 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 6571 */ 6572 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap; 6573 6574 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 6575 if (bitmap & mask) 6576 counter++; 6577 6578 mask <<= 1; 6579 } 6580 active_cu_number += counter; 6581 } 6582 } 6583 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 6584 mutex_unlock(&adev->grbm_idx_mutex); 6585 6586 cu_info->number = active_cu_number; 6587 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 6588 6589 return 0; 6590 } 6591 6592 const struct amdgpu_ip_block_version gfx_v11_0_ip_block = 6593 { 6594 .type = AMD_IP_BLOCK_TYPE_GFX, 6595 .major = 11, 6596 .minor = 0, 6597 .rev = 0, 6598 .funcs = &gfx_v11_0_ip_funcs, 6599 }; 6600