1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_fw.h" 8 #include "ivpu_hw.h" 9 #include "ivpu_hw_40xx_reg.h" 10 #include "ivpu_hw_reg_io.h" 11 #include "ivpu_ipc.h" 12 #include "ivpu_mmu.h" 13 #include "ivpu_pm.h" 14 15 #include <linux/dmi.h> 16 17 #define TILE_MAX_NUM 6 18 #define TILE_MAX_MASK 0x3f 19 20 #define LNL_HW_ID 0x4040 21 22 #define SKU_TILE_SHIFT 0u 23 #define SKU_TILE_MASK 0x0000ffffu 24 #define SKU_HW_ID_SHIFT 16u 25 #define SKU_HW_ID_MASK 0xffff0000u 26 27 #define PLL_CONFIG_DEFAULT 0x1 28 #define PLL_CDYN_DEFAULT 0x80 29 #define PLL_EPP_DEFAULT 0x80 30 #define PLL_REF_CLK_FREQ (50 * 1000000) 31 #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 32 33 #define PLL_PROFILING_FREQ_DEFAULT 38400000 34 #define PLL_PROFILING_FREQ_HIGH 400000000 35 36 #define TIM_SAFE_ENABLE 0xf1d0dead 37 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 38 39 #define TIMEOUT_US (150 * USEC_PER_MSEC) 40 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 41 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 42 43 #define WEIGHTS_DEFAULT 0xf711f711u 44 #define WEIGHTS_ATS_DEFAULT 0x0000f711u 45 46 #define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 47 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 48 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 49 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 50 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 51 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 52 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 53 54 #define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 55 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 56 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 57 58 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 59 60 #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ 61 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \ 62 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \ 63 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \ 64 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \ 65 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR))) 66 67 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 68 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) 69 70 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 71 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 72 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 73 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 74 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 75 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 76 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 77 78 static char *ivpu_platform_to_str(u32 platform) 79 { 80 switch (platform) { 81 case IVPU_PLATFORM_SILICON: 82 return "IVPU_PLATFORM_SILICON"; 83 case IVPU_PLATFORM_SIMICS: 84 return "IVPU_PLATFORM_SIMICS"; 85 case IVPU_PLATFORM_FPGA: 86 return "IVPU_PLATFORM_FPGA"; 87 default: 88 return "Invalid platform"; 89 } 90 } 91 92 static const struct dmi_system_id ivpu_dmi_platform_simulation[] = { 93 { 94 .ident = "Intel Simics", 95 .matches = { 96 DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"), 97 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 98 DMI_MATCH(DMI_BOARD_SERIAL, "123456789"), 99 }, 100 }, 101 { 102 .ident = "Intel Simics", 103 .matches = { 104 DMI_MATCH(DMI_BOARD_NAME, "Simics"), 105 }, 106 }, 107 { } 108 }; 109 110 static void ivpu_hw_read_platform(struct ivpu_device *vdev) 111 { 112 if (dmi_check_system(ivpu_dmi_platform_simulation)) 113 vdev->platform = IVPU_PLATFORM_SIMICS; 114 else 115 vdev->platform = IVPU_PLATFORM_SILICON; 116 117 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", 118 ivpu_platform_to_str(vdev->platform), vdev->platform); 119 } 120 121 static void ivpu_hw_wa_init(struct ivpu_device *vdev) 122 { 123 vdev->wa.punit_disabled = ivpu_is_fpga(vdev); 124 vdev->wa.clear_runtime_mem = false; 125 126 if (ivpu_hw_gen(vdev) == IVPU_HW_40XX) 127 vdev->wa.disable_clock_relinquish = true; 128 } 129 130 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) 131 { 132 if (ivpu_is_fpga(vdev)) { 133 vdev->timeout.boot = 100000; 134 vdev->timeout.jsm = 50000; 135 vdev->timeout.tdr = 2000000; 136 vdev->timeout.reschedule_suspend = 1000; 137 } else if (ivpu_is_simics(vdev)) { 138 vdev->timeout.boot = 50; 139 vdev->timeout.jsm = 500; 140 vdev->timeout.tdr = 10000; 141 vdev->timeout.reschedule_suspend = 10; 142 } else { 143 vdev->timeout.boot = 1000; 144 vdev->timeout.jsm = 500; 145 vdev->timeout.tdr = 2000; 146 vdev->timeout.reschedule_suspend = 10; 147 } 148 } 149 150 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) 151 { 152 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 153 } 154 155 static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, 156 u16 target_ratio, u16 epp, u16 config, u16 cdyn) 157 { 158 int ret; 159 u32 val; 160 161 ret = ivpu_pll_wait_for_cmd_send(vdev); 162 if (ret) { 163 ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); 164 return ret; 165 } 166 167 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0); 168 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); 169 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); 170 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val); 171 172 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1); 173 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); 174 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val); 175 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val); 176 177 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2); 178 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); 179 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val); 180 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val); 181 182 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD); 183 val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val); 184 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val); 185 186 ret = ivpu_pll_wait_for_cmd_send(vdev); 187 if (ret) 188 ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); 189 190 return ret; 191 } 192 193 static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) 194 { 195 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); 196 } 197 198 static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev) 199 { 200 if (ivpu_is_simics(vdev)) 201 return 0; 202 203 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); 204 } 205 206 static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) 207 { 208 struct ivpu_hw_info *hw = vdev->hw; 209 u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio; 210 u32 fmin_fuse, fmax_fuse; 211 212 fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE); 213 fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); 214 fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); 215 216 fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE); 217 fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); 218 219 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); 220 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); 221 hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 222 } 223 224 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) 225 { 226 u16 config = enable ? PLL_CONFIG_DEFAULT : 0; 227 u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0; 228 u16 epp = enable ? PLL_EPP_DEFAULT : 0; 229 struct ivpu_hw_info *hw = vdev->hw; 230 u16 target_ratio = hw->pll.pn_ratio; 231 int ret; 232 233 ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n", 234 PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn); 235 236 ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, 237 target_ratio, epp, config, cdyn); 238 if (ret) { 239 ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); 240 return ret; 241 } 242 243 if (enable) { 244 ret = ivpu_pll_wait_for_status_ready(vdev); 245 if (ret) { 246 ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); 247 return ret; 248 } 249 } 250 251 return 0; 252 } 253 254 static int ivpu_pll_enable(struct ivpu_device *vdev) 255 { 256 return ivpu_pll_drive(vdev, true); 257 } 258 259 static int ivpu_pll_disable(struct ivpu_device *vdev) 260 { 261 return ivpu_pll_drive(vdev, false); 262 } 263 264 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 265 { 266 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN); 267 268 if (enable) { 269 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 270 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 271 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 272 } else { 273 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 274 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 275 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 276 } 277 278 REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val); 279 } 280 281 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 282 { 283 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN); 284 285 if (enable) { 286 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 287 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 288 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 289 } else { 290 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 291 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 292 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 293 } 294 295 REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val); 296 } 297 298 static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 299 { 300 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 301 302 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 303 return -EIO; 304 305 return 0; 306 } 307 308 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 309 { 310 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN); 311 312 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 313 return -EIO; 314 315 return 0; 316 } 317 318 static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 319 { 320 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY); 321 322 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 323 return -EIO; 324 325 return 0; 326 } 327 328 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) 329 { 330 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 331 332 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 333 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 334 return -EIO; 335 336 return 0; 337 } 338 339 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 340 { 341 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN); 342 343 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 344 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 345 return -EIO; 346 347 return 0; 348 } 349 350 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 351 { 352 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY); 353 354 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 355 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 356 return -EIO; 357 358 return 0; 359 } 360 361 static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable) 362 { 363 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN); 364 365 if (enable) 366 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 367 else 368 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 369 370 REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val); 371 } 372 373 static int ivpu_boot_host_ss_check(struct ivpu_device *vdev) 374 { 375 int ret; 376 377 ret = ivpu_boot_noc_qreqn_check(vdev, 0x0); 378 if (ret) { 379 ivpu_err(vdev, "Failed qreqn check: %d\n", ret); 380 return ret; 381 } 382 383 ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0); 384 if (ret) { 385 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 386 return ret; 387 } 388 389 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 390 if (ret) 391 ivpu_err(vdev, "Failed qdeny check %d\n", ret); 392 393 return ret; 394 } 395 396 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 397 { 398 int ret; 399 u32 val; 400 401 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 402 if (enable) 403 val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 404 else 405 val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 406 REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val); 407 408 ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 409 if (ret) { 410 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 411 return ret; 412 } 413 414 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 415 if (ret) { 416 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 417 return ret; 418 } 419 420 if (enable) { 421 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); 422 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); 423 } 424 425 return ret; 426 } 427 428 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) 429 { 430 return ivpu_boot_host_ss_axi_drive(vdev, true); 431 } 432 433 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) 434 { 435 int ret; 436 u32 val; 437 438 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 439 if (enable) { 440 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 441 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 442 } else { 443 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 444 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 445 } 446 REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val); 447 448 ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 449 if (ret) { 450 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 451 return ret; 452 } 453 454 ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); 455 if (ret) 456 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 457 458 return ret; 459 } 460 461 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) 462 { 463 return ivpu_boot_host_ss_top_noc_drive(vdev, true); 464 } 465 466 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) 467 { 468 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 469 470 if (enable) 471 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 472 else 473 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 474 475 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 476 477 if (enable) 478 ndelay(500); 479 } 480 481 static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) 482 { 483 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0); 484 485 if (enable) 486 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 487 else 488 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 489 490 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 491 492 if (!enable) 493 ndelay(500); 494 } 495 496 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 497 { 498 if (ivpu_is_fpga(vdev)) 499 return 0; 500 501 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, 502 exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); 503 } 504 505 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 506 { 507 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0); 508 509 if (enable) 510 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 511 else 512 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 513 514 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val); 515 } 516 517 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 518 { 519 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); 520 521 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); 522 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 523 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 524 525 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); 526 } 527 528 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) 529 { 530 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV); 531 532 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 533 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 534 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); 535 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); 536 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 537 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 538 539 REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val); 540 } 541 542 static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 543 { 544 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN); 545 546 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val)) 547 return -EIO; 548 549 return 0; 550 } 551 552 static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 553 { 554 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY); 555 556 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val)) 557 return -EIO; 558 559 return 0; 560 } 561 562 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) 563 { 564 int ret; 565 566 ret = ivpu_wait_for_clock_own_resource_ack(vdev); 567 if (ret) { 568 ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n"); 569 return ret; 570 } 571 572 ivpu_boot_pwr_island_trickle_drive(vdev, true); 573 ivpu_boot_pwr_island_drive(vdev, true); 574 575 ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); 576 if (ret) { 577 ivpu_err(vdev, "Timed out waiting for power island status\n"); 578 return ret; 579 } 580 581 ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); 582 if (ret) { 583 ivpu_err(vdev, "Failed qrenqn check %d\n", ret); 584 return ret; 585 } 586 587 ivpu_boot_host_ss_clk_drive(vdev, true); 588 ivpu_boot_host_ss_rst_drive(vdev, true); 589 ivpu_boot_pwr_island_isolation_drive(vdev, false); 590 591 return ret; 592 } 593 594 static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable) 595 { 596 int ret; 597 u32 val; 598 599 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN); 600 if (enable) 601 val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 602 else 603 val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 604 REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val); 605 606 ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 607 if (ret) { 608 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 609 return ret; 610 } 611 612 ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0); 613 if (ret) 614 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 615 616 return ret; 617 } 618 619 static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev) 620 { 621 return ivpu_boot_soc_cpu_drive(vdev, true); 622 } 623 624 static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) 625 { 626 int ret; 627 u32 val; 628 u64 val64; 629 630 ret = ivpu_boot_soc_cpu_enable(vdev); 631 if (ret) { 632 ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret); 633 return ret; 634 } 635 636 val64 = vdev->fw->entry_point; 637 val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1; 638 REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64); 639 640 val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO); 641 val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val); 642 REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val); 643 644 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 645 ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume"); 646 647 return 0; 648 } 649 650 static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) 651 { 652 int ret; 653 u32 val; 654 655 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 656 if (ret) { 657 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 658 return ret; 659 } 660 661 val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL); 662 if (enable) 663 val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); 664 else 665 val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); 666 REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val); 667 668 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 669 if (ret) { 670 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 671 return ret; 672 } 673 674 return 0; 675 } 676 677 static bool ivpu_tile_disable_check(u32 config) 678 { 679 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ 680 if (config == 0) 681 return true; 682 683 if (config > BIT(TILE_MAX_NUM - 1)) 684 return false; 685 686 if ((config & (config - 1)) == 0) 687 return true; 688 689 return false; 690 } 691 692 static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) 693 { 694 struct ivpu_hw_info *hw = vdev->hw; 695 u32 tile_disable; 696 u32 tile_enable; 697 u32 fuse; 698 699 fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); 700 if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) { 701 ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); 702 return -EIO; 703 } 704 705 tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse); 706 if (!ivpu_tile_disable_check(tile_disable)) { 707 ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable); 708 return -EIO; 709 } 710 711 if (tile_disable) 712 ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", 713 TILE_MAX_NUM - 1, ffs(tile_disable) - 1); 714 else 715 ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); 716 717 tile_enable = (~tile_disable) & TILE_MAX_MASK; 718 719 hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); 720 hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); 721 hw->tile_fuse = tile_disable; 722 hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 723 724 ivpu_pll_init_frequency_ratios(vdev); 725 726 ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 727 ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M); 728 ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); 729 ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); 730 731 return 0; 732 } 733 734 static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) 735 { 736 int ret; 737 u32 val; 738 739 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); 740 if (ret) { 741 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); 742 return ret; 743 } 744 745 val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET); 746 val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val); 747 REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val); 748 749 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); 750 if (ret) 751 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 752 753 return ret; 754 } 755 756 static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev) 757 { 758 int ret; 759 760 if (IVPU_WA(punit_disabled)) 761 return 0; 762 763 ret = ivpu_boot_d0i3_drive(vdev, true); 764 if (ret) 765 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 766 767 udelay(5); /* VPU requires 5 us to complete the transition */ 768 769 return ret; 770 } 771 772 static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev) 773 { 774 int ret; 775 776 if (IVPU_WA(punit_disabled)) 777 return 0; 778 779 ret = ivpu_boot_d0i3_drive(vdev, false); 780 if (ret) 781 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 782 783 return ret; 784 } 785 786 static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev) 787 { 788 u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 789 790 if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) 791 val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); 792 else 793 val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); 794 795 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); 796 } 797 798 static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev) 799 { 800 ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", 801 REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable"); 802 } 803 804 static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev) 805 { 806 u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 807 808 val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); 809 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); 810 } 811 812 static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev) 813 { 814 int ret; 815 816 ret = ivpu_hw_40xx_reset(vdev); 817 if (ret) { 818 ivpu_err(vdev, "Failed to reset HW: %d\n", ret); 819 return ret; 820 } 821 822 ivpu_hw_read_platform(vdev); 823 ivpu_hw_wa_init(vdev); 824 ivpu_hw_timeouts_init(vdev); 825 826 ret = ivpu_hw_40xx_d0i3_disable(vdev); 827 if (ret) 828 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 829 830 ret = ivpu_pll_enable(vdev); 831 if (ret) { 832 ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); 833 return ret; 834 } 835 836 if (IVPU_WA(disable_clock_relinquish)) 837 ivpu_hw_40xx_clock_relinquish_disable(vdev); 838 ivpu_hw_40xx_profiling_freq_reg_set(vdev); 839 ivpu_hw_40xx_ats_print(vdev); 840 841 ret = ivpu_boot_host_ss_check(vdev); 842 if (ret) { 843 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 844 return ret; 845 } 846 847 ivpu_boot_idle_gen_drive(vdev, false); 848 849 ret = ivpu_boot_pwr_domain_enable(vdev); 850 if (ret) { 851 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 852 return ret; 853 } 854 855 ret = ivpu_boot_host_ss_axi_enable(vdev); 856 if (ret) { 857 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 858 return ret; 859 } 860 861 ret = ivpu_boot_host_ss_top_noc_enable(vdev); 862 if (ret) 863 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 864 865 return ret; 866 } 867 868 static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev) 869 { 870 int ret; 871 872 ivpu_boot_no_snoop_enable(vdev); 873 ivpu_boot_tbu_mmu_enable(vdev); 874 875 ret = ivpu_boot_soc_cpu_boot(vdev); 876 if (ret) 877 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret); 878 879 return ret; 880 } 881 882 static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) 883 { 884 u32 val; 885 886 if (IVPU_WA(punit_disabled)) 887 return true; 888 889 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 890 return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) && 891 REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val); 892 } 893 894 static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) 895 { 896 int ret = 0; 897 898 if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev)) 899 ivpu_warn(vdev, "Failed to reset the VPU\n"); 900 901 if (ivpu_pll_disable(vdev)) { 902 ivpu_err(vdev, "Failed to disable PLL\n"); 903 ret = -EIO; 904 } 905 906 if (ivpu_hw_40xx_d0i3_enable(vdev)) { 907 ivpu_err(vdev, "Failed to enter D0I3\n"); 908 ret = -EIO; 909 } 910 911 return ret; 912 } 913 914 static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev) 915 { 916 u32 val; 917 918 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 919 REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 920 921 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 922 REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0); 923 924 val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG); 925 val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 926 REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); 927 } 928 929 /* Register indirect accesses */ 930 static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) 931 { 932 u32 pll_curr_ratio; 933 934 pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ); 935 pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK; 936 937 return PLL_RATIO_TO_FREQ(pll_curr_ratio); 938 } 939 940 static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev) 941 { 942 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET); 943 } 944 945 static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev) 946 { 947 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE); 948 } 949 950 static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev) 951 { 952 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE); 953 } 954 955 static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) 956 { 957 u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0; 958 u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET); 959 960 REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 961 } 962 963 static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 964 { 965 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM); 966 } 967 968 static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) 969 { 970 u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT); 971 972 return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 973 } 974 975 static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 976 { 977 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 978 } 979 980 static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev) 981 { 982 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); 983 } 984 985 static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev) 986 { 987 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); 988 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); 989 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); 990 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); 991 } 992 993 static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev) 994 { 995 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); 996 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); 997 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 998 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul); 999 } 1000 1001 static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev) 1002 { 1003 /* TODO: For LNN hang consider engine reset instead of full recovery */ 1004 ivpu_pm_schedule_recovery(vdev); 1005 } 1006 1007 static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev) 1008 { 1009 ivpu_hw_wdt_disable(vdev); 1010 ivpu_pm_schedule_recovery(vdev); 1011 } 1012 1013 static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) 1014 { 1015 ivpu_pm_schedule_recovery(vdev); 1016 } 1017 1018 /* Handler for IRQs from VPU core (irqV) */ 1019 static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) 1020 { 1021 u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1022 irqreturn_t ret = IRQ_NONE; 1023 1024 if (!status) 1025 return IRQ_NONE; 1026 1027 REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); 1028 1029 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 1030 ivpu_mmu_irq_evtq_handler(vdev); 1031 1032 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1033 ret |= ivpu_ipc_irq_handler(vdev); 1034 1035 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1036 ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 1037 1038 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 1039 ivpu_mmu_irq_gerr_handler(vdev); 1040 1041 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 1042 ivpu_hw_40xx_irq_wdt_mss_handler(vdev); 1043 1044 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 1045 ivpu_hw_40xx_irq_wdt_nce_handler(vdev); 1046 1047 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 1048 ivpu_hw_40xx_irq_noc_firewall_handler(vdev); 1049 1050 return ret; 1051 } 1052 1053 /* Handler for IRQs from Buttress core (irqB) */ 1054 static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) 1055 { 1056 bool schedule_recovery = false; 1057 u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1058 1059 if (status == 0) 1060 return IRQ_NONE; 1061 1062 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) 1063 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); 1064 1065 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { 1066 ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1067 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), 1068 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); 1069 REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); 1070 schedule_recovery = true; 1071 } 1072 1073 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) { 1074 ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); 1075 REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1); 1076 schedule_recovery = true; 1077 } 1078 1079 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) { 1080 ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); 1081 REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1); 1082 schedule_recovery = true; 1083 } 1084 1085 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) { 1086 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", 1087 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), 1088 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); 1089 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1); 1090 schedule_recovery = true; 1091 } 1092 1093 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) { 1094 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", 1095 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), 1096 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); 1097 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1); 1098 schedule_recovery = true; 1099 } 1100 1101 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) { 1102 ivpu_err(vdev, "Survivability error detected\n"); 1103 schedule_recovery = true; 1104 } 1105 1106 /* This must be done after interrupts are cleared at the source. */ 1107 REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status); 1108 1109 if (schedule_recovery) 1110 ivpu_pm_schedule_recovery(vdev); 1111 1112 return IRQ_HANDLED; 1113 } 1114 1115 static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) 1116 { 1117 struct ivpu_device *vdev = ptr; 1118 irqreturn_t ret = IRQ_NONE; 1119 1120 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); 1121 1122 ret |= ivpu_hw_40xx_irqv_handler(vdev, irq); 1123 ret |= ivpu_hw_40xx_irqb_handler(vdev, irq); 1124 1125 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 1126 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); 1127 1128 if (ret & IRQ_WAKE_THREAD) 1129 return IRQ_WAKE_THREAD; 1130 1131 return ret; 1132 } 1133 1134 static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev) 1135 { 1136 u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1137 u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1138 1139 if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev)) 1140 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1141 1142 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) 1143 ivpu_err(vdev, "WDT MSS timeout detected\n"); 1144 1145 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) 1146 ivpu_err(vdev, "WDT NCE timeout detected\n"); 1147 1148 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) 1149 ivpu_err(vdev, "NOC Firewall irq detected\n"); 1150 1151 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) { 1152 ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1153 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), 1154 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); 1155 } 1156 1157 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb)) 1158 ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); 1159 1160 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb)) 1161 ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); 1162 1163 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb)) 1164 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", 1165 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), 1166 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); 1167 1168 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb)) 1169 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", 1170 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), 1171 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); 1172 1173 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb)) 1174 ivpu_err(vdev, "Survivability error detected\n"); 1175 } 1176 1177 const struct ivpu_hw_ops ivpu_hw_40xx_ops = { 1178 .info_init = ivpu_hw_40xx_info_init, 1179 .power_up = ivpu_hw_40xx_power_up, 1180 .is_idle = ivpu_hw_40xx_is_idle, 1181 .power_down = ivpu_hw_40xx_power_down, 1182 .reset = ivpu_hw_40xx_reset, 1183 .boot_fw = ivpu_hw_40xx_boot_fw, 1184 .wdt_disable = ivpu_hw_40xx_wdt_disable, 1185 .diagnose_failure = ivpu_hw_40xx_diagnose_failure, 1186 .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, 1187 .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, 1188 .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, 1189 .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get, 1190 .reg_db_set = ivpu_hw_40xx_reg_db_set, 1191 .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get, 1192 .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get, 1193 .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set, 1194 .irq_clear = ivpu_hw_40xx_irq_clear, 1195 .irq_enable = ivpu_hw_40xx_irq_enable, 1196 .irq_disable = ivpu_hw_40xx_irq_disable, 1197 .irq_handler = ivpu_hw_40xx_irq_handler, 1198 }; 1199