1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_fw.h" 8 #include "ivpu_hw.h" 9 #include "ivpu_hw_40xx_reg.h" 10 #include "ivpu_hw_reg_io.h" 11 #include "ivpu_ipc.h" 12 #include "ivpu_mmu.h" 13 #include "ivpu_pm.h" 14 15 #include <linux/dmi.h> 16 17 #define TILE_MAX_NUM 6 18 #define TILE_MAX_MASK 0x3f 19 20 #define LNL_HW_ID 0x4040 21 22 #define SKU_TILE_SHIFT 0u 23 #define SKU_TILE_MASK 0x0000ffffu 24 #define SKU_HW_ID_SHIFT 16u 25 #define SKU_HW_ID_MASK 0xffff0000u 26 27 #define PLL_CONFIG_DEFAULT 0x1 28 #define PLL_CDYN_DEFAULT 0x80 29 #define PLL_EPP_DEFAULT 0x80 30 #define PLL_REF_CLK_FREQ (50 * 1000000) 31 #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 32 33 #define PLL_PROFILING_FREQ_DEFAULT 38400000 34 #define PLL_PROFILING_FREQ_HIGH 400000000 35 36 #define TIM_SAFE_ENABLE 0xf1d0dead 37 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 38 39 #define TIMEOUT_US (150 * USEC_PER_MSEC) 40 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 41 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 42 43 #define WEIGHTS_DEFAULT 0xf711f711u 44 #define WEIGHTS_ATS_DEFAULT 0x0000f711u 45 46 #define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 47 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 48 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 49 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 50 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 51 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 52 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 53 54 #define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 55 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 56 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 57 58 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 59 60 #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ 61 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ 62 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \ 63 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \ 64 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \ 65 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR)) | \ 66 (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR))) 67 68 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 69 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) 70 71 #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 72 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 73 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 74 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 75 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 76 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 77 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 78 79 static char *ivpu_platform_to_str(u32 platform) 80 { 81 switch (platform) { 82 case IVPU_PLATFORM_SILICON: 83 return "IVPU_PLATFORM_SILICON"; 84 case IVPU_PLATFORM_SIMICS: 85 return "IVPU_PLATFORM_SIMICS"; 86 case IVPU_PLATFORM_FPGA: 87 return "IVPU_PLATFORM_FPGA"; 88 default: 89 return "Invalid platform"; 90 } 91 } 92 93 static const struct dmi_system_id ivpu_dmi_platform_simulation[] = { 94 { 95 .ident = "Intel Simics", 96 .matches = { 97 DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"), 98 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 99 DMI_MATCH(DMI_BOARD_SERIAL, "123456789"), 100 }, 101 }, 102 { 103 .ident = "Intel Simics", 104 .matches = { 105 DMI_MATCH(DMI_BOARD_NAME, "Simics"), 106 }, 107 }, 108 { } 109 }; 110 111 static void ivpu_hw_read_platform(struct ivpu_device *vdev) 112 { 113 if (dmi_check_system(ivpu_dmi_platform_simulation)) 114 vdev->platform = IVPU_PLATFORM_SIMICS; 115 else 116 vdev->platform = IVPU_PLATFORM_SILICON; 117 118 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", 119 ivpu_platform_to_str(vdev->platform), vdev->platform); 120 } 121 122 static void ivpu_hw_wa_init(struct ivpu_device *vdev) 123 { 124 vdev->wa.punit_disabled = ivpu_is_fpga(vdev); 125 vdev->wa.clear_runtime_mem = false; 126 127 if (ivpu_hw_gen(vdev) == IVPU_HW_40XX) 128 vdev->wa.disable_clock_relinquish = true; 129 } 130 131 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) 132 { 133 if (ivpu_is_fpga(vdev)) { 134 vdev->timeout.boot = 100000; 135 vdev->timeout.jsm = 50000; 136 vdev->timeout.tdr = 2000000; 137 vdev->timeout.reschedule_suspend = 1000; 138 } else if (ivpu_is_simics(vdev)) { 139 vdev->timeout.boot = 50; 140 vdev->timeout.jsm = 500; 141 vdev->timeout.tdr = 10000; 142 vdev->timeout.reschedule_suspend = 10; 143 } else { 144 vdev->timeout.boot = 1000; 145 vdev->timeout.jsm = 500; 146 vdev->timeout.tdr = 2000; 147 vdev->timeout.reschedule_suspend = 10; 148 } 149 } 150 151 static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) 152 { 153 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 154 } 155 156 static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, 157 u16 target_ratio, u16 epp, u16 config, u16 cdyn) 158 { 159 int ret; 160 u32 val; 161 162 ret = ivpu_pll_wait_for_cmd_send(vdev); 163 if (ret) { 164 ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); 165 return ret; 166 } 167 168 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0); 169 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); 170 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); 171 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0, val); 172 173 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1); 174 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); 175 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, epp, val); 176 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1, val); 177 178 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2); 179 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val); 180 val = REG_SET_FLD_NUM(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, CDYN, cdyn, val); 181 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2, val); 182 183 val = REGB_RD32(VPU_40XX_BUTTRESS_WP_REQ_CMD); 184 val = REG_SET_FLD(VPU_40XX_BUTTRESS_WP_REQ_CMD, SEND, val); 185 REGB_WR32(VPU_40XX_BUTTRESS_WP_REQ_CMD, val); 186 187 ret = ivpu_pll_wait_for_cmd_send(vdev); 188 if (ret) 189 ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); 190 191 return ret; 192 } 193 194 static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) 195 { 196 return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); 197 } 198 199 static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) 200 { 201 struct ivpu_hw_info *hw = vdev->hw; 202 u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio; 203 u32 fmin_fuse, fmax_fuse; 204 205 fmin_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMIN_FUSE); 206 fuse_min_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse); 207 fuse_pn_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse); 208 209 fmax_fuse = REGB_RD32(VPU_40XX_BUTTRESS_FMAX_FUSE); 210 fuse_max_ratio = REG_GET_FLD(VPU_40XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse); 211 212 hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); 213 hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); 214 hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 215 } 216 217 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) 218 { 219 u16 config = enable ? PLL_CONFIG_DEFAULT : 0; 220 u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0; 221 u16 epp = enable ? PLL_EPP_DEFAULT : 0; 222 struct ivpu_hw_info *hw = vdev->hw; 223 u16 target_ratio = hw->pll.pn_ratio; 224 int ret; 225 226 ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n", 227 PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn); 228 229 ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, 230 target_ratio, epp, config, cdyn); 231 if (ret) { 232 ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); 233 return ret; 234 } 235 236 if (enable) { 237 ret = ivpu_pll_wait_for_status_ready(vdev); 238 if (ret) { 239 ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); 240 return ret; 241 } 242 } 243 244 return 0; 245 } 246 247 static int ivpu_pll_enable(struct ivpu_device *vdev) 248 { 249 return ivpu_pll_drive(vdev, true); 250 } 251 252 static int ivpu_pll_disable(struct ivpu_device *vdev) 253 { 254 return ivpu_pll_drive(vdev, false); 255 } 256 257 static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 258 { 259 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN); 260 261 if (enable) { 262 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 263 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 264 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 265 } else { 266 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 267 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 268 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 269 } 270 271 REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val); 272 } 273 274 static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 275 { 276 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN); 277 278 if (enable) { 279 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 280 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 281 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 282 } else { 283 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 284 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 285 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 286 } 287 288 REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val); 289 } 290 291 static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 292 { 293 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 294 295 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 296 return -EIO; 297 298 return 0; 299 } 300 301 static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 302 { 303 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN); 304 305 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 306 return -EIO; 307 308 return 0; 309 } 310 311 static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 312 { 313 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY); 314 315 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 316 return -EIO; 317 318 return 0; 319 } 320 321 static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) 322 { 323 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 324 325 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 326 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 327 return -EIO; 328 329 return 0; 330 } 331 332 static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 333 { 334 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN); 335 336 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 337 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 338 return -EIO; 339 340 return 0; 341 } 342 343 static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 344 { 345 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY); 346 347 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 348 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 349 return -EIO; 350 351 return 0; 352 } 353 354 static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable) 355 { 356 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN); 357 358 if (enable) 359 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 360 else 361 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 362 363 REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val); 364 } 365 366 static int ivpu_boot_host_ss_check(struct ivpu_device *vdev) 367 { 368 int ret; 369 370 ret = ivpu_boot_noc_qreqn_check(vdev, 0x0); 371 if (ret) { 372 ivpu_err(vdev, "Failed qreqn check: %d\n", ret); 373 return ret; 374 } 375 376 ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0); 377 if (ret) { 378 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 379 return ret; 380 } 381 382 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 383 if (ret) 384 ivpu_err(vdev, "Failed qdeny check %d\n", ret); 385 386 return ret; 387 } 388 389 static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 390 { 391 int ret; 392 u32 val; 393 394 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 395 if (enable) 396 val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 397 else 398 val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 399 REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val); 400 401 ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 402 if (ret) { 403 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 404 return ret; 405 } 406 407 ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 408 if (ret) { 409 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 410 return ret; 411 } 412 413 if (enable) { 414 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); 415 REGB_WR32(VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); 416 } 417 418 return ret; 419 } 420 421 static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) 422 { 423 return ivpu_boot_host_ss_axi_drive(vdev, true); 424 } 425 426 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) 427 { 428 int ret; 429 u32 val; 430 431 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 432 if (enable) { 433 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 434 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 435 } else { 436 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 437 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 438 } 439 REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val); 440 441 ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 442 if (ret) { 443 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 444 return ret; 445 } 446 447 ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); 448 if (ret) 449 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 450 451 return ret; 452 } 453 454 static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) 455 { 456 return ivpu_boot_host_ss_top_noc_drive(vdev, true); 457 } 458 459 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) 460 { 461 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 462 463 if (enable) 464 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 465 else 466 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 467 468 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 469 470 if (enable) 471 ndelay(500); 472 } 473 474 static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) 475 { 476 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0); 477 478 if (enable) 479 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 480 else 481 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 482 483 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 484 485 if (!enable) 486 ndelay(500); 487 } 488 489 static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 490 { 491 if (ivpu_is_fpga(vdev)) 492 return 0; 493 494 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, 495 exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); 496 } 497 498 static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 499 { 500 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0); 501 502 if (enable) 503 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 504 else 505 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 506 507 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val); 508 } 509 510 static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 511 { 512 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); 513 514 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); 515 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 516 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 517 518 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); 519 } 520 521 static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) 522 { 523 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV); 524 525 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 526 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 527 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); 528 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); 529 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 530 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 531 532 REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val); 533 } 534 535 static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 536 { 537 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN); 538 539 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val)) 540 return -EIO; 541 542 return 0; 543 } 544 545 static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 546 { 547 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY); 548 549 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val)) 550 return -EIO; 551 552 return 0; 553 } 554 555 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) 556 { 557 int ret; 558 559 ivpu_boot_pwr_island_trickle_drive(vdev, true); 560 ivpu_boot_pwr_island_drive(vdev, true); 561 562 ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); 563 if (ret) { 564 ivpu_err(vdev, "Timed out waiting for power island status\n"); 565 return ret; 566 } 567 568 ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); 569 if (ret) { 570 ivpu_err(vdev, "Failed qrenqn check %d\n", ret); 571 return ret; 572 } 573 574 ivpu_boot_host_ss_clk_drive(vdev, true); 575 ivpu_boot_host_ss_rst_drive(vdev, true); 576 ivpu_boot_pwr_island_isolation_drive(vdev, false); 577 578 return ret; 579 } 580 581 static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable) 582 { 583 int ret; 584 u32 val; 585 586 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN); 587 if (enable) 588 val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 589 else 590 val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 591 REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val); 592 593 ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 594 if (ret) { 595 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 596 return ret; 597 } 598 599 ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0); 600 if (ret) 601 ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 602 603 return ret; 604 } 605 606 static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev) 607 { 608 return ivpu_boot_soc_cpu_drive(vdev, true); 609 } 610 611 static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) 612 { 613 int ret; 614 u32 val; 615 u64 val64; 616 617 ret = ivpu_boot_soc_cpu_enable(vdev); 618 if (ret) { 619 ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret); 620 return ret; 621 } 622 623 val64 = vdev->fw->entry_point; 624 val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1; 625 REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64); 626 627 val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO); 628 val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val); 629 REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val); 630 631 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 632 ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume"); 633 634 return 0; 635 } 636 637 static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) 638 { 639 int ret; 640 u32 val; 641 642 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 643 if (ret) { 644 ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 645 return ret; 646 } 647 648 val = REGB_RD32(VPU_40XX_BUTTRESS_D0I3_CONTROL); 649 if (enable) 650 val = REG_SET_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); 651 else 652 val = REG_CLR_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, I3, val); 653 REGB_WR32(VPU_40XX_BUTTRESS_D0I3_CONTROL, val); 654 655 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 656 if (ret) { 657 ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 658 return ret; 659 } 660 661 return 0; 662 } 663 664 static bool ivpu_tile_disable_check(u32 config) 665 { 666 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ 667 if (config == 0) 668 return true; 669 670 if (config > BIT(TILE_MAX_NUM - 1)) 671 return false; 672 673 if ((config & (config - 1)) == 0) 674 return true; 675 676 return false; 677 } 678 679 static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) 680 { 681 struct ivpu_hw_info *hw = vdev->hw; 682 u32 tile_disable; 683 u32 tile_enable; 684 u32 fuse; 685 686 fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE); 687 if (!REG_TEST_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, VALID, fuse)) { 688 ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); 689 return -EIO; 690 } 691 692 tile_disable = REG_GET_FLD(VPU_40XX_BUTTRESS_TILE_FUSE, CONFIG, fuse); 693 if (!ivpu_tile_disable_check(tile_disable)) { 694 ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable); 695 return -EIO; 696 } 697 698 if (tile_disable) 699 ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", 700 TILE_MAX_NUM - 1, ffs(tile_disable) - 1); 701 else 702 ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); 703 704 tile_enable = (~tile_disable) & TILE_MAX_MASK; 705 706 hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku); 707 hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku); 708 hw->tile_fuse = tile_disable; 709 hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 710 711 ivpu_pll_init_frequency_ratios(vdev); 712 713 ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 714 ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M); 715 ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); 716 ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); 717 718 return 0; 719 } 720 721 static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) 722 { 723 int ret; 724 u32 val; 725 726 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); 727 if (ret) { 728 ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); 729 return ret; 730 } 731 732 val = REGB_RD32(VPU_40XX_BUTTRESS_IP_RESET); 733 val = REG_SET_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, val); 734 REGB_WR32(VPU_40XX_BUTTRESS_IP_RESET, val); 735 736 ret = REGB_POLL_FLD(VPU_40XX_BUTTRESS_IP_RESET, TRIGGER, 0, TIMEOUT_US); 737 if (ret) 738 ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 739 740 return ret; 741 } 742 743 static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev) 744 { 745 int ret; 746 747 if (IVPU_WA(punit_disabled)) 748 return 0; 749 750 ret = ivpu_boot_d0i3_drive(vdev, true); 751 if (ret) 752 ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 753 754 udelay(5); /* VPU requires 5 us to complete the transition */ 755 756 return ret; 757 } 758 759 static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev) 760 { 761 int ret; 762 763 if (IVPU_WA(punit_disabled)) 764 return 0; 765 766 ret = ivpu_boot_d0i3_drive(vdev, false); 767 if (ret) 768 ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 769 770 return ret; 771 } 772 773 static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev) 774 { 775 u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 776 777 if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) 778 val = REG_CLR_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); 779 else 780 val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, PERF_CLK, val); 781 782 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); 783 } 784 785 static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev) 786 { 787 ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", 788 REGB_RD32(VPU_40XX_BUTTRESS_HM_ATS) ? "Enable" : "Disable"); 789 } 790 791 static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev) 792 { 793 u32 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 794 795 val = REG_SET_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); 796 REGB_WR32(VPU_40XX_BUTTRESS_VPU_STATUS, val); 797 } 798 799 static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev) 800 { 801 int ret; 802 803 ret = ivpu_hw_40xx_reset(vdev); 804 if (ret) { 805 ivpu_err(vdev, "Failed to reset HW: %d\n", ret); 806 return ret; 807 } 808 809 ivpu_hw_read_platform(vdev); 810 ivpu_hw_wa_init(vdev); 811 ivpu_hw_timeouts_init(vdev); 812 813 ret = ivpu_hw_40xx_d0i3_disable(vdev); 814 if (ret) 815 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 816 817 ret = ivpu_pll_enable(vdev); 818 if (ret) { 819 ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); 820 return ret; 821 } 822 823 if (IVPU_WA(disable_clock_relinquish)) 824 ivpu_hw_40xx_clock_relinquish_disable(vdev); 825 ivpu_hw_40xx_profiling_freq_reg_set(vdev); 826 ivpu_hw_40xx_ats_print(vdev); 827 828 ret = ivpu_boot_host_ss_check(vdev); 829 if (ret) { 830 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 831 return ret; 832 } 833 834 ivpu_boot_idle_gen_drive(vdev, false); 835 836 ret = ivpu_boot_pwr_domain_enable(vdev); 837 if (ret) { 838 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 839 return ret; 840 } 841 842 ret = ivpu_boot_host_ss_axi_enable(vdev); 843 if (ret) { 844 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 845 return ret; 846 } 847 848 ret = ivpu_boot_host_ss_top_noc_enable(vdev); 849 if (ret) 850 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 851 852 return ret; 853 } 854 855 static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev) 856 { 857 int ret; 858 859 ivpu_boot_no_snoop_enable(vdev); 860 ivpu_boot_tbu_mmu_enable(vdev); 861 862 ret = ivpu_boot_soc_cpu_boot(vdev); 863 if (ret) 864 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret); 865 866 return ret; 867 } 868 869 static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) 870 { 871 u32 val; 872 873 if (IVPU_WA(punit_disabled)) 874 return true; 875 876 val = REGB_RD32(VPU_40XX_BUTTRESS_VPU_STATUS); 877 return REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, val) && 878 REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val); 879 } 880 881 static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) 882 { 883 int ret = 0; 884 885 if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev)) 886 ivpu_warn(vdev, "Failed to reset the VPU\n"); 887 888 if (ivpu_pll_disable(vdev)) { 889 ivpu_err(vdev, "Failed to disable PLL\n"); 890 ret = -EIO; 891 } 892 893 if (ivpu_hw_40xx_d0i3_enable(vdev)) { 894 ivpu_err(vdev, "Failed to enter D0I3\n"); 895 ret = -EIO; 896 } 897 898 return ret; 899 } 900 901 static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev) 902 { 903 u32 val; 904 905 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 906 REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 907 908 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 909 REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0); 910 911 val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG); 912 val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 913 REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); 914 } 915 916 /* Register indirect accesses */ 917 static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) 918 { 919 u32 pll_curr_ratio; 920 921 pll_curr_ratio = REGB_RD32(VPU_40XX_BUTTRESS_PLL_FREQ); 922 pll_curr_ratio &= VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK; 923 924 return PLL_RATIO_TO_FREQ(pll_curr_ratio); 925 } 926 927 static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev) 928 { 929 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET); 930 } 931 932 static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev) 933 { 934 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE); 935 } 936 937 static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev) 938 { 939 return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE); 940 } 941 942 static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) 943 { 944 u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0; 945 u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET); 946 947 REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 948 } 949 950 static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 951 { 952 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM); 953 } 954 955 static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) 956 { 957 u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT); 958 959 return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 960 } 961 962 static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 963 { 964 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 965 } 966 967 static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev) 968 { 969 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); 970 } 971 972 static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev) 973 { 974 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); 975 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); 976 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); 977 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); 978 } 979 980 static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev) 981 { 982 REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); 983 REGB_WR32(VPU_40XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); 984 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 985 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul); 986 } 987 988 static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev) 989 { 990 /* TODO: For LNN hang consider engine reset instead of full recovery */ 991 ivpu_pm_schedule_recovery(vdev); 992 } 993 994 static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev) 995 { 996 ivpu_hw_wdt_disable(vdev); 997 ivpu_pm_schedule_recovery(vdev); 998 } 999 1000 static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) 1001 { 1002 ivpu_pm_schedule_recovery(vdev); 1003 } 1004 1005 /* Handler for IRQs from VPU core (irqV) */ 1006 static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) 1007 { 1008 u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1009 irqreturn_t ret = IRQ_NONE; 1010 1011 if (!status) 1012 return IRQ_NONE; 1013 1014 REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); 1015 1016 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 1017 ivpu_mmu_irq_evtq_handler(vdev); 1018 1019 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1020 ret |= ivpu_ipc_irq_handler(vdev); 1021 1022 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1023 ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 1024 1025 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 1026 ivpu_mmu_irq_gerr_handler(vdev); 1027 1028 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 1029 ivpu_hw_40xx_irq_wdt_mss_handler(vdev); 1030 1031 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 1032 ivpu_hw_40xx_irq_wdt_nce_handler(vdev); 1033 1034 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 1035 ivpu_hw_40xx_irq_noc_firewall_handler(vdev); 1036 1037 return ret; 1038 } 1039 1040 /* Handler for IRQs from Buttress core (irqB) */ 1041 static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) 1042 { 1043 bool schedule_recovery = false; 1044 u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1045 1046 if (status == 0) 1047 return IRQ_NONE; 1048 1049 REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status); 1050 1051 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) 1052 ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); 1053 1054 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) { 1055 ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1056 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), 1057 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); 1058 REGB_WR32(VPU_40XX_BUTTRESS_ATS_ERR_CLEAR, 0x1); 1059 schedule_recovery = true; 1060 } 1061 1062 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, status)) { 1063 ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); 1064 REGB_WR32(VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR, 0x1); 1065 schedule_recovery = true; 1066 } 1067 1068 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, status)) { 1069 ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); 1070 REGB_WR32(VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR, 0x1); 1071 schedule_recovery = true; 1072 } 1073 1074 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, status)) { 1075 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", 1076 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), 1077 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); 1078 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR, 0x1); 1079 schedule_recovery = true; 1080 } 1081 1082 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, status)) { 1083 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", 1084 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), 1085 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); 1086 REGB_WR32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR, 0x1); 1087 schedule_recovery = true; 1088 } 1089 1090 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, status)) { 1091 ivpu_err(vdev, "Survivability error detected\n"); 1092 schedule_recovery = true; 1093 } 1094 1095 if (schedule_recovery) 1096 ivpu_pm_schedule_recovery(vdev); 1097 1098 return IRQ_HANDLED; 1099 } 1100 1101 static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) 1102 { 1103 struct ivpu_device *vdev = ptr; 1104 irqreturn_t ret = IRQ_NONE; 1105 1106 ret |= ivpu_hw_40xx_irqv_handler(vdev, irq); 1107 ret |= ivpu_hw_40xx_irqb_handler(vdev, irq); 1108 1109 if (ret & IRQ_WAKE_THREAD) 1110 return IRQ_WAKE_THREAD; 1111 1112 return ret; 1113 } 1114 1115 static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev) 1116 { 1117 u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1118 u32 irqb = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1119 1120 if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev)) 1121 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1122 1123 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) 1124 ivpu_err(vdev, "WDT MSS timeout detected\n"); 1125 1126 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) 1127 ivpu_err(vdev, "WDT NCE timeout detected\n"); 1128 1129 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) 1130 ivpu_err(vdev, "NOC Firewall irq detected\n"); 1131 1132 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb)) { 1133 ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1134 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG1), 1135 REGB_RD32(VPU_40XX_BUTTRESS_ATS_ERR_LOG2)); 1136 } 1137 1138 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR, irqb)) 1139 ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI0_ERR_LOG)); 1140 1141 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR, irqb)) 1142 ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_40XX_BUTTRESS_CFI1_ERR_LOG)); 1143 1144 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR, irqb)) 1145 ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", 1146 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW), 1147 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH)); 1148 1149 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR1_ERR, irqb)) 1150 ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", 1151 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW), 1152 REGB_RD32(VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH)); 1153 1154 if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, SURV_ERR, irqb)) 1155 ivpu_err(vdev, "Survivability error detected\n"); 1156 } 1157 1158 const struct ivpu_hw_ops ivpu_hw_40xx_ops = { 1159 .info_init = ivpu_hw_40xx_info_init, 1160 .power_up = ivpu_hw_40xx_power_up, 1161 .is_idle = ivpu_hw_40xx_is_idle, 1162 .power_down = ivpu_hw_40xx_power_down, 1163 .boot_fw = ivpu_hw_40xx_boot_fw, 1164 .wdt_disable = ivpu_hw_40xx_wdt_disable, 1165 .diagnose_failure = ivpu_hw_40xx_diagnose_failure, 1166 .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, 1167 .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, 1168 .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, 1169 .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get, 1170 .reg_db_set = ivpu_hw_40xx_reg_db_set, 1171 .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get, 1172 .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get, 1173 .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set, 1174 .irq_clear = ivpu_hw_40xx_irq_clear, 1175 .irq_enable = ivpu_hw_40xx_irq_enable, 1176 .irq_disable = ivpu_hw_40xx_irq_disable, 1177 .irq_handler = ivpu_hw_40xx_irq_handler, 1178 }; 1179