1 /* 2 * Copyright 2014-2015 Freescale Semiconductor 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <fsl_immap.h> 9 #include <fsl_ifc.h> 10 #include <ahci.h> 11 #include <scsi.h> 12 #include <asm/arch/fsl_serdes.h> 13 #include <asm/arch/soc.h> 14 #include <asm/io.h> 15 #include <asm/global_data.h> 16 #include <asm/arch-fsl-layerscape/config.h> 17 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 18 #include <fsl_csu.h> 19 #endif 20 #ifdef CONFIG_SYS_FSL_DDR 21 #include <fsl_ddr_sdram.h> 22 #include <fsl_ddr.h> 23 #endif 24 #ifdef CONFIG_CHAIN_OF_TRUST 25 #include <fsl_validate.h> 26 #endif 27 #include <fsl_immap.h> 28 29 DECLARE_GLOBAL_DATA_PTR; 30 31 bool soc_has_dp_ddr(void) 32 { 33 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 34 u32 svr = gur_in32(&gur->svr); 35 36 /* LS2085A, LS2088A, LS2048A has DP_DDR */ 37 if ((SVR_SOC_VER(svr) == SVR_LS2085A) || 38 (SVR_SOC_VER(svr) == SVR_LS2088A) || 39 (SVR_SOC_VER(svr) == SVR_LS2048A)) 40 return true; 41 42 return false; 43 } 44 45 bool soc_has_aiop(void) 46 { 47 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 48 u32 svr = gur_in32(&gur->svr); 49 50 /* LS2085A has AIOP */ 51 if (SVR_SOC_VER(svr) == SVR_LS2085A) 52 return true; 53 54 return false; 55 } 56 57 static inline void set_usb_txvreftune(u32 __iomem *scfg, u32 offset) 58 { 59 scfg_clrsetbits32(scfg + offset / 4, 60 0xF << 6, 61 SCFG_USB_TXVREFTUNE << 6); 62 } 63 64 static void erratum_a009008(void) 65 { 66 #ifdef CONFIG_SYS_FSL_ERRATUM_A009008 67 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 68 69 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 70 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB1); 71 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB2); 72 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB3); 73 #elif defined(CONFIG_ARCH_LS2080A) 74 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR); 75 #endif 76 #endif /* CONFIG_SYS_FSL_ERRATUM_A009008 */ 77 } 78 79 static inline void set_usb_sqrxtune(u32 __iomem *scfg, u32 offset) 80 { 81 scfg_clrbits32(scfg + offset / 4, 82 SCFG_USB_SQRXTUNE_MASK << 23); 83 } 84 85 static void erratum_a009798(void) 86 { 87 #ifdef CONFIG_SYS_FSL_ERRATUM_A009798 88 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 89 90 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 91 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB1); 92 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB2); 93 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB3); 94 #elif defined(CONFIG_ARCH_LS2080A) 95 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR); 96 #endif 97 #endif /* CONFIG_SYS_FSL_ERRATUM_A009798 */ 98 } 99 100 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 101 static inline void set_usb_pcstxswingfull(u32 __iomem *scfg, u32 offset) 102 { 103 scfg_clrsetbits32(scfg + offset / 4, 104 0x7F << 9, 105 SCFG_USB_PCSTXSWINGFULL << 9); 106 } 107 #endif 108 109 static void erratum_a008997(void) 110 { 111 #ifdef CONFIG_SYS_FSL_ERRATUM_A008997 112 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 113 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE; 114 115 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB1); 116 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB2); 117 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB3); 118 #endif 119 #endif /* CONFIG_SYS_FSL_ERRATUM_A008997 */ 120 } 121 122 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 123 124 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \ 125 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \ 126 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \ 127 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \ 128 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4) 129 130 #elif defined(CONFIG_ARCH_LS2080A) 131 132 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \ 133 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \ 134 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \ 135 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \ 136 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4) 137 138 #endif 139 140 static void erratum_a009007(void) 141 { 142 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) 143 void __iomem *usb_phy = (void __iomem *)SCFG_USB_PHY1; 144 145 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 146 147 usb_phy = (void __iomem *)SCFG_USB_PHY2; 148 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 149 150 usb_phy = (void __iomem *)SCFG_USB_PHY3; 151 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy); 152 #elif defined(CONFIG_ARCH_LS2080A) 153 void __iomem *dcsr = (void __iomem *)DCSR_BASE; 154 155 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY1); 156 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY2); 157 #endif /* CONFIG_SYS_FSL_ERRATUM_A009007 */ 158 } 159 160 #if defined(CONFIG_FSL_LSCH3) 161 /* 162 * This erratum requires setting a value to eddrtqcr1 to 163 * optimal the DDR performance. 164 */ 165 static void erratum_a008336(void) 166 { 167 #ifdef CONFIG_SYS_FSL_ERRATUM_A008336 168 u32 *eddrtqcr1; 169 170 #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR 171 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800; 172 if (fsl_ddr_get_version(0) == 0x50200) 173 out_le32(eddrtqcr1, 0x63b30002); 174 #endif 175 #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR 176 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800; 177 if (fsl_ddr_get_version(0) == 0x50200) 178 out_le32(eddrtqcr1, 0x63b30002); 179 #endif 180 #endif 181 } 182 183 /* 184 * This erratum requires a register write before being Memory 185 * controller 3 being enabled. 186 */ 187 static void erratum_a008514(void) 188 { 189 #ifdef CONFIG_SYS_FSL_ERRATUM_A008514 190 u32 *eddrtqcr1; 191 192 #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR 193 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800; 194 out_le32(eddrtqcr1, 0x63b20002); 195 #endif 196 #endif 197 } 198 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 199 #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val" 200 201 static unsigned long get_internval_val_mhz(void) 202 { 203 char *interval = env_get(PLATFORM_CYCLE_ENV_VAR); 204 /* 205 * interval is the number of platform cycles(MHz) between 206 * wake up events generated by EPU. 207 */ 208 ulong interval_mhz = get_bus_freq(0) / (1000 * 1000); 209 210 if (interval) 211 interval_mhz = simple_strtoul(interval, NULL, 10); 212 213 return interval_mhz; 214 } 215 216 void erratum_a009635(void) 217 { 218 u32 val; 219 unsigned long interval_mhz = get_internval_val_mhz(); 220 221 if (!interval_mhz) 222 return; 223 224 val = in_le32(DCSR_CGACRE5); 225 writel(val | 0x00000200, DCSR_CGACRE5); 226 227 val = in_le32(EPU_EPCMPR5); 228 writel(interval_mhz, EPU_EPCMPR5); 229 val = in_le32(EPU_EPCCR5); 230 writel(val | 0x82820000, EPU_EPCCR5); 231 val = in_le32(EPU_EPSMCR5); 232 writel(val | 0x002f0000, EPU_EPSMCR5); 233 val = in_le32(EPU_EPECR5); 234 writel(val | 0x20000000, EPU_EPECR5); 235 val = in_le32(EPU_EPGCR); 236 writel(val | 0x80000000, EPU_EPGCR); 237 } 238 #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */ 239 240 static void erratum_rcw_src(void) 241 { 242 #if defined(CONFIG_SPL) && defined(CONFIG_NAND_BOOT) 243 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 244 u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE; 245 u32 val; 246 247 val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 248 val &= ~DCFG_PORSR1_RCW_SRC; 249 val |= DCFG_PORSR1_RCW_SRC_NOR; 250 out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val); 251 #endif 252 } 253 254 #define I2C_DEBUG_REG 0x6 255 #define I2C_GLITCH_EN 0x8 256 /* 257 * This erratum requires setting glitch_en bit to enable 258 * digital glitch filter to improve clock stability. 259 */ 260 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203 261 static void erratum_a009203(void) 262 { 263 u8 __iomem *ptr; 264 #ifdef CONFIG_SYS_I2C 265 #ifdef I2C1_BASE_ADDR 266 ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG); 267 268 writeb(I2C_GLITCH_EN, ptr); 269 #endif 270 #ifdef I2C2_BASE_ADDR 271 ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG); 272 273 writeb(I2C_GLITCH_EN, ptr); 274 #endif 275 #ifdef I2C3_BASE_ADDR 276 ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG); 277 278 writeb(I2C_GLITCH_EN, ptr); 279 #endif 280 #ifdef I2C4_BASE_ADDR 281 ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG); 282 283 writeb(I2C_GLITCH_EN, ptr); 284 #endif 285 #endif 286 } 287 #endif 288 289 void bypass_smmu(void) 290 { 291 u32 val; 292 val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 293 out_le32(SMMU_SCR0, val); 294 val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 295 out_le32(SMMU_NSCR0, val); 296 } 297 void fsl_lsch3_early_init_f(void) 298 { 299 erratum_rcw_src(); 300 init_early_memctl_regs(); /* tighten IFC timing */ 301 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203 302 erratum_a009203(); 303 #endif 304 erratum_a008514(); 305 erratum_a008336(); 306 erratum_a009008(); 307 erratum_a009798(); 308 erratum_a008997(); 309 erratum_a009007(); 310 #ifdef CONFIG_CHAIN_OF_TRUST 311 /* In case of Secure Boot, the IBR configures the SMMU 312 * to allow only Secure transactions. 313 * SMMU must be reset in bypass mode. 314 * Set the ClientPD bit and Clear the USFCFG Bit 315 */ 316 if (fsl_check_boot_mode_secure() == 1) 317 bypass_smmu(); 318 #endif 319 } 320 321 #ifdef CONFIG_SCSI_AHCI_PLAT 322 int sata_init(void) 323 { 324 struct ccsr_ahci __iomem *ccsr_ahci; 325 326 ccsr_ahci = (void *)CONFIG_SYS_SATA2; 327 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 328 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 329 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 330 331 ccsr_ahci = (void *)CONFIG_SYS_SATA1; 332 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 333 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 334 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 335 336 ahci_init((void __iomem *)CONFIG_SYS_SATA1); 337 scsi_scan(false); 338 339 return 0; 340 } 341 #endif 342 343 #elif defined(CONFIG_FSL_LSCH2) 344 #ifdef CONFIG_SCSI_AHCI_PLAT 345 int sata_init(void) 346 { 347 struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA; 348 349 /* Disable SATA ECC */ 350 out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000); 351 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 352 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 353 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 354 355 ahci_init((void __iomem *)CONFIG_SYS_SATA); 356 scsi_scan(false); 357 358 return 0; 359 } 360 #endif 361 362 static void erratum_a009929(void) 363 { 364 #ifdef CONFIG_SYS_FSL_ERRATUM_A009929 365 struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; 366 u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR; 367 u32 rstrqmr1 = gur_in32(&gur->rstrqmr1); 368 369 rstrqmr1 |= 0x00000400; 370 gur_out32(&gur->rstrqmr1, rstrqmr1); 371 writel(0x01000000, dcsr_cop_ccp); 372 #endif 373 } 374 375 /* 376 * This erratum requires setting a value to eddrtqcr1 to optimal 377 * the DDR performance. The eddrtqcr1 register is in SCFG space 378 * of LS1043A and the offset is 0x157_020c. 379 */ 380 #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \ 381 && defined(CONFIG_SYS_FSL_ERRATUM_A008514) 382 #error A009660 and A008514 can not be both enabled. 383 #endif 384 385 static void erratum_a009660(void) 386 { 387 #ifdef CONFIG_SYS_FSL_ERRATUM_A009660 388 u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c; 389 out_be32(eddrtqcr1, 0x63b20042); 390 #endif 391 } 392 393 static void erratum_a008850_early(void) 394 { 395 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 396 /* part 1 of 2 */ 397 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR + 398 CONFIG_SYS_CCI400_OFFSET); 399 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 400 401 /* Skip if running at lower exception level */ 402 if (current_el() < 3) 403 return; 404 405 /* disables propagation of barrier transactions to DDRC from CCI400 */ 406 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER); 407 408 /* disable the re-ordering in DDRC */ 409 ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 410 #endif 411 } 412 413 void erratum_a008850_post(void) 414 { 415 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 416 /* part 2 of 2 */ 417 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR + 418 CONFIG_SYS_CCI400_OFFSET); 419 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 420 u32 tmp; 421 422 /* Skip if running at lower exception level */ 423 if (current_el() < 3) 424 return; 425 426 /* enable propagation of barrier transactions to DDRC from CCI400 */ 427 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER); 428 429 /* enable the re-ordering in DDRC */ 430 tmp = ddr_in32(&ddr->eor); 431 tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 432 ddr_out32(&ddr->eor, tmp); 433 #endif 434 } 435 436 #ifdef CONFIG_SYS_FSL_ERRATUM_A010315 437 void erratum_a010315(void) 438 { 439 int i; 440 441 for (i = PCIE1; i <= PCIE4; i++) 442 if (!is_serdes_configured(i)) { 443 debug("PCIe%d: disabled all R/W permission!\n", i); 444 set_pcie_ns_access(i, 0); 445 } 446 } 447 #endif 448 449 static void erratum_a010539(void) 450 { 451 #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT) 452 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 453 u32 porsr1; 454 455 porsr1 = in_be32(&gur->porsr1); 456 porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK; 457 out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1), 458 porsr1); 459 #endif 460 } 461 462 /* Get VDD in the unit mV from voltage ID */ 463 int get_core_volt_from_fuse(void) 464 { 465 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 466 int vdd; 467 u32 fusesr; 468 u8 vid; 469 470 fusesr = in_be32(&gur->dcfg_fusesr); 471 debug("%s: fusesr = 0x%x\n", __func__, fusesr); 472 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) & 473 FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK; 474 if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) { 475 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) & 476 FSL_CHASSIS2_DCFG_FUSESR_VID_MASK; 477 } 478 debug("%s: VID = 0x%x\n", __func__, vid); 479 switch (vid) { 480 case 0x00: /* VID isn't supported */ 481 vdd = -EINVAL; 482 debug("%s: The VID feature is not supported\n", __func__); 483 break; 484 case 0x08: /* 0.9V silicon */ 485 vdd = 900; 486 break; 487 case 0x10: /* 1.0V silicon */ 488 vdd = 1000; 489 break; 490 default: /* Other core voltage */ 491 vdd = -EINVAL; 492 printf("%s: The VID(%x) isn't supported\n", __func__, vid); 493 break; 494 } 495 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd); 496 497 return vdd; 498 } 499 500 __weak int board_switch_core_volt(u32 vdd) 501 { 502 return 0; 503 } 504 505 static int setup_core_volt(u32 vdd) 506 { 507 return board_setup_core_volt(vdd); 508 } 509 510 #ifdef CONFIG_SYS_FSL_DDR 511 static void ddr_enable_0v9_volt(bool en) 512 { 513 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 514 u32 tmp; 515 516 tmp = ddr_in32(&ddr->ddr_cdr1); 517 518 if (en) 519 tmp |= DDR_CDR1_V0PT9_EN; 520 else 521 tmp &= ~DDR_CDR1_V0PT9_EN; 522 523 ddr_out32(&ddr->ddr_cdr1, tmp); 524 } 525 #endif 526 527 int setup_chip_volt(void) 528 { 529 int vdd; 530 531 vdd = get_core_volt_from_fuse(); 532 /* Nothing to do for silicons doesn't support VID */ 533 if (vdd < 0) 534 return vdd; 535 536 if (setup_core_volt(vdd)) 537 printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd); 538 #ifdef CONFIG_SYS_HAS_SERDES 539 if (setup_serdes_volt(vdd)) 540 printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd); 541 #endif 542 543 #ifdef CONFIG_SYS_FSL_DDR 544 if (vdd == 900) 545 ddr_enable_0v9_volt(true); 546 #endif 547 548 return 0; 549 } 550 551 void fsl_lsch2_early_init_f(void) 552 { 553 struct ccsr_cci400 *cci = (struct ccsr_cci400 *)(CONFIG_SYS_IMMR + 554 CONFIG_SYS_CCI400_OFFSET); 555 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR; 556 557 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 558 enable_layerscape_ns_access(); 559 #endif 560 561 #ifdef CONFIG_FSL_IFC 562 init_early_memctl_regs(); /* tighten IFC timing */ 563 #endif 564 565 #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT) 566 out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL); 567 #endif 568 /* Make SEC reads and writes snoopable */ 569 setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP | 570 SCFG_SNPCNFGCR_SECWRSNP | 571 SCFG_SNPCNFGCR_SATARDSNP | 572 SCFG_SNPCNFGCR_SATAWRSNP); 573 574 /* 575 * Enable snoop requests and DVM message requests for 576 * Slave insterface S4 (A53 core cluster) 577 */ 578 if (current_el() == 3) { 579 out_le32(&cci->slave[4].snoop_ctrl, 580 CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN); 581 } 582 583 /* Erratum */ 584 erratum_a008850_early(); /* part 1 of 2 */ 585 erratum_a009929(); 586 erratum_a009660(); 587 erratum_a010539(); 588 erratum_a009008(); 589 erratum_a009798(); 590 erratum_a008997(); 591 erratum_a009007(); 592 } 593 #endif 594 595 #ifdef CONFIG_QSPI_AHB_INIT 596 /* Enable 4bytes address support and fast read */ 597 int qspi_ahb_init(void) 598 { 599 u32 *qspi_lut, lut_key, *qspi_key; 600 601 qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300; 602 qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310; 603 604 lut_key = in_be32(qspi_key); 605 606 if (lut_key == 0x5af05af0) { 607 /* That means the register is BE */ 608 out_be32(qspi_key, 0x5af05af0); 609 /* Unlock the lut table */ 610 out_be32(qspi_key + 1, 0x00000002); 611 out_be32(qspi_lut, 0x0820040c); 612 out_be32(qspi_lut + 1, 0x1c080c08); 613 out_be32(qspi_lut + 2, 0x00002400); 614 /* Lock the lut table */ 615 out_be32(qspi_key, 0x5af05af0); 616 out_be32(qspi_key + 1, 0x00000001); 617 } else { 618 /* That means the register is LE */ 619 out_le32(qspi_key, 0x5af05af0); 620 /* Unlock the lut table */ 621 out_le32(qspi_key + 1, 0x00000002); 622 out_le32(qspi_lut, 0x0820040c); 623 out_le32(qspi_lut + 1, 0x1c080c08); 624 out_le32(qspi_lut + 2, 0x00002400); 625 /* Lock the lut table */ 626 out_le32(qspi_key, 0x5af05af0); 627 out_le32(qspi_key + 1, 0x00000001); 628 } 629 630 return 0; 631 } 632 #endif 633 634 #ifdef CONFIG_BOARD_LATE_INIT 635 int board_late_init(void) 636 { 637 #ifdef CONFIG_SCSI_AHCI_PLAT 638 sata_init(); 639 #endif 640 #ifdef CONFIG_CHAIN_OF_TRUST 641 fsl_setenv_chain_of_trust(); 642 #endif 643 #ifdef CONFIG_QSPI_AHB_INIT 644 qspi_ahb_init(); 645 #endif 646 647 return 0; 648 } 649 #endif 650