1 /* 2 * Copyright 2014-2015 Freescale Semiconductor 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <fsl_immap.h> 9 #include <fsl_ifc.h> 10 #include <ahci.h> 11 #include <scsi.h> 12 #include <asm/arch/fsl_serdes.h> 13 #include <asm/arch/soc.h> 14 #include <asm/io.h> 15 #include <asm/global_data.h> 16 #include <asm/arch-fsl-layerscape/config.h> 17 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 18 #include <fsl_csu.h> 19 #endif 20 #ifdef CONFIG_SYS_FSL_DDR 21 #include <fsl_ddr_sdram.h> 22 #include <fsl_ddr.h> 23 #endif 24 #ifdef CONFIG_CHAIN_OF_TRUST 25 #include <fsl_validate.h> 26 #endif 27 #include <fsl_immap.h> 28 29 DECLARE_GLOBAL_DATA_PTR; 30 31 bool soc_has_dp_ddr(void) 32 { 33 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 34 u32 svr = gur_in32(&gur->svr); 35 36 /* LS2085A, LS2088A, LS2048A has DP_DDR */ 37 if ((SVR_SOC_VER(svr) == SVR_LS2085A) || 38 (SVR_SOC_VER(svr) == SVR_LS2088A) || 39 (SVR_SOC_VER(svr) == SVR_LS2048A)) 40 return true; 41 42 return false; 43 } 44 45 bool soc_has_aiop(void) 46 { 47 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 48 u32 svr = gur_in32(&gur->svr); 49 50 /* LS2085A has AIOP */ 51 if (SVR_SOC_VER(svr) == SVR_LS2085A) 52 return true; 53 54 return false; 55 } 56 57 static inline void set_usb_txvreftune(u32 __iomem *scfg, u32 offset) 58 { 59 scfg_clrsetbits32(scfg + offset / 4, 60 0xF << 6, 61 SCFG_USB_TXVREFTUNE << 6); 62 } 63 64 static void erratum_a009008(void) 65 { 66 #ifdef CONFIG_SYS_FSL_ERRATUM_A009008 67 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 68 69 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 70 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB1); 71 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB2); 72 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB3); 73 #elif defined(CONFIG_ARCH_LS2080A) 74 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR); 75 #endif 76 #endif /* CONFIG_SYS_FSL_ERRATUM_A009008 */ 77 } 78 79 static inline void set_usb_sqrxtune(u32 __iomem *scfg, u32 offset) 80 { 81 scfg_clrbits32(scfg + offset / 4, 82 SCFG_USB_SQRXTUNE_MASK << 23); 83 } 84 85 static void erratum_a009798(void) 86 { 87 #ifdef CONFIG_SYS_FSL_ERRATUM_A009798 88 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 89 90 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 91 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB1); 92 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB2); 93 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB3); 94 #elif defined(CONFIG_ARCH_LS2080A) 95 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR); 96 #endif 97 #endif /* CONFIG_SYS_FSL_ERRATUM_A009798 */ 98 } 99 100 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 101 static inline void set_usb_pcstxswingfull(u32 __iomem *scfg, u32 offset) 102 { 103 scfg_clrsetbits32(scfg + offset / 4, 104 0x7F << 9, 105 SCFG_USB_PCSTXSWINGFULL << 9); 106 } 107 #endif 108 109 static void erratum_a008997(void) 110 { 111 #ifdef CONFIG_SYS_FSL_ERRATUM_A008997 112 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 113 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 114 115 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB1); 116 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB2); 117 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB3); 118 #endif 119 #endif /* CONFIG_SYS_FSL_ERRATUM_A008997 */ 120 } 121 122 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 123 124 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \ 125 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \ 126 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \ 127 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \ 128 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4) 129 130 #elif defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 131 132 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \ 133 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \ 134 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \ 135 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \ 136 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4) 137 138 #endif 139 140 static void erratum_a009007(void) 141 { 142 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 143 void __iomem *usb_phy = (void __iomem *)SCFG_USB_PHY1; 144 145 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 146 147 usb_phy = (void __iomem *)SCFG_USB_PHY2; 148 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 149 150 usb_phy = (void __iomem *)SCFG_USB_PHY3; 151 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 152 #elif defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 153 void __iomem *dcsr = (void __iomem *)DCSR_BASE; 154 155 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY1); 156 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY2); 157 #endif /* CONFIG_SYS_FSL_ERRATUM_A009007 */ 158 } 159 160 #if defined(CONFIG_FSL_LSCH3) 161 /* 162 * This erratum requires setting a value to eddrtqcr1 to 163 * optimal the DDR performance. 164 */ 165 static void erratum_a008336(void) 166 { 167 #ifdef CONFIG_SYS_FSL_ERRATUM_A008336 168 u32 *eddrtqcr1; 169 170 #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR 171 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800; 172 if (fsl_ddr_get_version(0) == 0x50200) 173 out_le32(eddrtqcr1, 0x63b30002); 174 #endif 175 #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR 176 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800; 177 if (fsl_ddr_get_version(0) == 0x50200) 178 out_le32(eddrtqcr1, 0x63b30002); 179 #endif 180 #endif 181 } 182 183 /* 184 * This erratum requires a register write before being Memory 185 * controller 3 being enabled. 186 */ 187 static void erratum_a008514(void) 188 { 189 #ifdef CONFIG_SYS_FSL_ERRATUM_A008514 190 u32 *eddrtqcr1; 191 192 #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR 193 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800; 194 out_le32(eddrtqcr1, 0x63b20002); 195 #endif 196 #endif 197 } 198 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 199 #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val" 200 201 static unsigned long get_internval_val_mhz(void) 202 { 203 char *interval = env_get(PLATFORM_CYCLE_ENV_VAR); 204 /* 205 * interval is the number of platform cycles(MHz) between 206 * wake up events generated by EPU. 207 */ 208 ulong interval_mhz = get_bus_freq(0) / (1000 * 1000); 209 210 if (interval) 211 interval_mhz = simple_strtoul(interval, NULL, 10); 212 213 return interval_mhz; 214 } 215 216 void erratum_a009635(void) 217 { 218 u32 val; 219 unsigned long interval_mhz = get_internval_val_mhz(); 220 221 if (!interval_mhz) 222 return; 223 224 val = in_le32(DCSR_CGACRE5); 225 writel(val | 0x00000200, DCSR_CGACRE5); 226 227 val = in_le32(EPU_EPCMPR5); 228 writel(interval_mhz, EPU_EPCMPR5); 229 val = in_le32(EPU_EPCCR5); 230 writel(val | 0x82820000, EPU_EPCCR5); 231 val = in_le32(EPU_EPSMCR5); 232 writel(val | 0x002f0000, EPU_EPSMCR5); 233 val = in_le32(EPU_EPECR5); 234 writel(val | 0x20000000, EPU_EPECR5); 235 val = in_le32(EPU_EPGCR); 236 writel(val | 0x80000000, EPU_EPGCR); 237 } 238 #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */ 239 240 static void erratum_rcw_src(void) 241 { 242 #if defined(CONFIG_SPL) && defined(CONFIG_NAND_BOOT) 243 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 244 u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE; 245 u32 val; 246 247 val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 248 val &= ~DCFG_PORSR1_RCW_SRC; 249 val |= DCFG_PORSR1_RCW_SRC_NOR; 250 out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val); 251 #endif 252 } 253 254 #define I2C_DEBUG_REG 0x6 255 #define I2C_GLITCH_EN 0x8 256 /* 257 * This erratum requires setting glitch_en bit to enable 258 * digital glitch filter to improve clock stability. 259 */ 260 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203 261 static void erratum_a009203(void) 262 { 263 #ifdef CONFIG_SYS_I2C 264 u8 __iomem *ptr; 265 #ifdef I2C1_BASE_ADDR 266 ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG); 267 268 writeb(I2C_GLITCH_EN, ptr); 269 #endif 270 #ifdef I2C2_BASE_ADDR 271 ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG); 272 273 writeb(I2C_GLITCH_EN, ptr); 274 #endif 275 #ifdef I2C3_BASE_ADDR 276 ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG); 277 278 writeb(I2C_GLITCH_EN, ptr); 279 #endif 280 #ifdef I2C4_BASE_ADDR 281 ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG); 282 283 writeb(I2C_GLITCH_EN, ptr); 284 #endif 285 #endif 286 } 287 #endif 288 289 void bypass_smmu(void) 290 { 291 u32 val; 292 val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 293 out_le32(SMMU_SCR0, val); 294 val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 295 out_le32(SMMU_NSCR0, val); 296 } 297 void fsl_lsch3_early_init_f(void) 298 { 299 erratum_rcw_src(); 300 #ifdef CONFIG_FSL_IFC 301 init_early_memctl_regs(); /* tighten IFC timing */ 302 #endif 303 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203 304 erratum_a009203(); 305 #endif 306 erratum_a008514(); 307 erratum_a008336(); 308 erratum_a009008(); 309 erratum_a009798(); 310 erratum_a008997(); 311 erratum_a009007(); 312 #ifdef CONFIG_CHAIN_OF_TRUST 313 /* In case of Secure Boot, the IBR configures the SMMU 314 * to allow only Secure transactions. 315 * SMMU must be reset in bypass mode. 316 * Set the ClientPD bit and Clear the USFCFG Bit 317 */ 318 if (fsl_check_boot_mode_secure() == 1) 319 bypass_smmu(); 320 #endif 321 } 322 323 #ifdef CONFIG_SCSI_AHCI_PLAT 324 int sata_init(void) 325 { 326 struct ccsr_ahci __iomem *ccsr_ahci; 327 328 #ifdef CONFIG_SYS_SATA2 329 ccsr_ahci = (void *)CONFIG_SYS_SATA2; 330 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 331 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 332 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 333 #endif 334 335 #ifdef CONFIG_SYS_SATA1 336 ccsr_ahci = (void *)CONFIG_SYS_SATA1; 337 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 338 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 339 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 340 341 ahci_init((void __iomem *)CONFIG_SYS_SATA1); 342 scsi_scan(false); 343 #endif 344 345 return 0; 346 } 347 #endif 348 349 #elif defined(CONFIG_FSL_LSCH2) 350 #ifdef CONFIG_SCSI_AHCI_PLAT 351 int sata_init(void) 352 { 353 struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA; 354 355 /* Disable SATA ECC */ 356 out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000); 357 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 358 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 359 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 360 361 ahci_init((void __iomem *)CONFIG_SYS_SATA); 362 scsi_scan(false); 363 364 return 0; 365 } 366 #endif 367 368 static void erratum_a009929(void) 369 { 370 #ifdef CONFIG_SYS_FSL_ERRATUM_A009929 371 struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; 372 u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR; 373 u32 rstrqmr1 = gur_in32(&gur->rstrqmr1); 374 375 rstrqmr1 |= 0x00000400; 376 gur_out32(&gur->rstrqmr1, rstrqmr1); 377 writel(0x01000000, dcsr_cop_ccp); 378 #endif 379 } 380 381 /* 382 * This erratum requires setting a value to eddrtqcr1 to optimal 383 * the DDR performance. The eddrtqcr1 register is in SCFG space 384 * of LS1043A and the offset is 0x157_020c. 385 */ 386 #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \ 387 && defined(CONFIG_SYS_FSL_ERRATUM_A008514) 388 #error A009660 and A008514 can not be both enabled. 389 #endif 390 391 static void erratum_a009660(void) 392 { 393 #ifdef CONFIG_SYS_FSL_ERRATUM_A009660 394 u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c; 395 out_be32(eddrtqcr1, 0x63b20042); 396 #endif 397 } 398 399 static void erratum_a008850_early(void) 400 { 401 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 402 /* part 1 of 2 */ 403 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR + 404 CONFIG_SYS_CCI400_OFFSET); 405 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 406 407 /* Skip if running at lower exception level */ 408 if (current_el() < 3) 409 return; 410 411 /* disables propagation of barrier transactions to DDRC from CCI400 */ 412 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER); 413 414 /* disable the re-ordering in DDRC */ 415 ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 416 #endif 417 } 418 419 void erratum_a008850_post(void) 420 { 421 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 422 /* part 2 of 2 */ 423 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR + 424 CONFIG_SYS_CCI400_OFFSET); 425 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 426 u32 tmp; 427 428 /* Skip if running at lower exception level */ 429 if (current_el() < 3) 430 return; 431 432 /* enable propagation of barrier transactions to DDRC from CCI400 */ 433 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER); 434 435 /* enable the re-ordering in DDRC */ 436 tmp = ddr_in32(&ddr->eor); 437 tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 438 ddr_out32(&ddr->eor, tmp); 439 #endif 440 } 441 442 #ifdef CONFIG_SYS_FSL_ERRATUM_A010315 443 void erratum_a010315(void) 444 { 445 int i; 446 447 for (i = PCIE1; i <= PCIE4; i++) 448 if (!is_serdes_configured(i)) { 449 debug("PCIe%d: disabled all R/W permission!\n", i); 450 set_pcie_ns_access(i, 0); 451 } 452 } 453 #endif 454 455 static void erratum_a010539(void) 456 { 457 #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT) 458 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 459 u32 porsr1; 460 461 porsr1 = in_be32(&gur->porsr1); 462 porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK; 463 out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1), 464 porsr1); 465 #endif 466 } 467 468 /* Get VDD in the unit mV from voltage ID */ 469 int get_core_volt_from_fuse(void) 470 { 471 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 472 int vdd; 473 u32 fusesr; 474 u8 vid; 475 476 fusesr = in_be32(&gur->dcfg_fusesr); 477 debug("%s: fusesr = 0x%x\n", __func__, fusesr); 478 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) & 479 FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK; 480 if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) { 481 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) & 482 FSL_CHASSIS2_DCFG_FUSESR_VID_MASK; 483 } 484 debug("%s: VID = 0x%x\n", __func__, vid); 485 switch (vid) { 486 case 0x00: /* VID isn't supported */ 487 vdd = -EINVAL; 488 debug("%s: The VID feature is not supported\n", __func__); 489 break; 490 case 0x08: /* 0.9V silicon */ 491 vdd = 900; 492 break; 493 case 0x10: /* 1.0V silicon */ 494 vdd = 1000; 495 break; 496 default: /* Other core voltage */ 497 vdd = -EINVAL; 498 printf("%s: The VID(%x) isn't supported\n", __func__, vid); 499 break; 500 } 501 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd); 502 503 return vdd; 504 } 505 506 __weak int board_switch_core_volt(u32 vdd) 507 { 508 return 0; 509 } 510 511 static int setup_core_volt(u32 vdd) 512 { 513 return board_setup_core_volt(vdd); 514 } 515 516 #ifdef CONFIG_SYS_FSL_DDR 517 static void ddr_enable_0v9_volt(bool en) 518 { 519 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 520 u32 tmp; 521 522 tmp = ddr_in32(&ddr->ddr_cdr1); 523 524 if (en) 525 tmp |= DDR_CDR1_V0PT9_EN; 526 else 527 tmp &= ~DDR_CDR1_V0PT9_EN; 528 529 ddr_out32(&ddr->ddr_cdr1, tmp); 530 } 531 #endif 532 533 int setup_chip_volt(void) 534 { 535 int vdd; 536 537 vdd = get_core_volt_from_fuse(); 538 /* Nothing to do for silicons doesn't support VID */ 539 if (vdd < 0) 540 return vdd; 541 542 if (setup_core_volt(vdd)) 543 printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd); 544 #ifdef CONFIG_SYS_HAS_SERDES 545 if (setup_serdes_volt(vdd)) 546 printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd); 547 #endif 548 549 #ifdef CONFIG_SYS_FSL_DDR 550 if (vdd == 900) 551 ddr_enable_0v9_volt(true); 552 #endif 553 554 return 0; 555 } 556 557 void fsl_lsch2_early_init_f(void) 558 { 559 struct ccsr_cci400 *cci = (struct ccsr_cci400 *)(CONFIG_SYS_IMMR + 560 CONFIG_SYS_CCI400_OFFSET); 561 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR; 562 563 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 564 enable_layerscape_ns_access(); 565 #endif 566 567 #ifdef CONFIG_FSL_IFC 568 init_early_memctl_regs(); /* tighten IFC timing */ 569 #endif 570 571 #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT) 572 out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL); 573 #endif 574 /* Make SEC reads and writes snoopable */ 575 setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP | 576 SCFG_SNPCNFGCR_SECWRSNP | 577 SCFG_SNPCNFGCR_SATARDSNP | 578 SCFG_SNPCNFGCR_SATAWRSNP); 579 580 /* 581 * Enable snoop requests and DVM message requests for 582 * Slave insterface S4 (A53 core cluster) 583 */ 584 if (current_el() == 3) { 585 out_le32(&cci->slave[4].snoop_ctrl, 586 CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN); 587 } 588 589 /* Erratum */ 590 erratum_a008850_early(); /* part 1 of 2 */ 591 erratum_a009929(); 592 erratum_a009660(); 593 erratum_a010539(); 594 erratum_a009008(); 595 erratum_a009798(); 596 erratum_a008997(); 597 erratum_a009007(); 598 } 599 #endif 600 601 #ifdef CONFIG_QSPI_AHB_INIT 602 /* Enable 4bytes address support and fast read */ 603 int qspi_ahb_init(void) 604 { 605 u32 *qspi_lut, lut_key, *qspi_key; 606 607 qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300; 608 qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310; 609 610 lut_key = in_be32(qspi_key); 611 612 if (lut_key == 0x5af05af0) { 613 /* That means the register is BE */ 614 out_be32(qspi_key, 0x5af05af0); 615 /* Unlock the lut table */ 616 out_be32(qspi_key + 1, 0x00000002); 617 out_be32(qspi_lut, 0x0820040c); 618 out_be32(qspi_lut + 1, 0x1c080c08); 619 out_be32(qspi_lut + 2, 0x00002400); 620 /* Lock the lut table */ 621 out_be32(qspi_key, 0x5af05af0); 622 out_be32(qspi_key + 1, 0x00000001); 623 } else { 624 /* That means the register is LE */ 625 out_le32(qspi_key, 0x5af05af0); 626 /* Unlock the lut table */ 627 out_le32(qspi_key + 1, 0x00000002); 628 out_le32(qspi_lut, 0x0820040c); 629 out_le32(qspi_lut + 1, 0x1c080c08); 630 out_le32(qspi_lut + 2, 0x00002400); 631 /* Lock the lut table */ 632 out_le32(qspi_key, 0x5af05af0); 633 out_le32(qspi_key + 1, 0x00000001); 634 } 635 636 return 0; 637 } 638 #endif 639 640 #ifdef CONFIG_BOARD_LATE_INIT 641 int board_late_init(void) 642 { 643 #ifdef CONFIG_SCSI_AHCI_PLAT 644 sata_init(); 645 #endif 646 #ifdef CONFIG_CHAIN_OF_TRUST 647 fsl_setenv_chain_of_trust(); 648 #endif 649 #ifdef CONFIG_QSPI_AHB_INIT 650 qspi_ahb_init(); 651 #endif 652 653 return 0; 654 } 655 #endif 656