1 /* 2 * Copyright 2014-2015 Freescale Semiconductor 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <fsl_ifc.h> 9 #include <ahci.h> 10 #include <scsi.h> 11 #include <asm/arch/fsl_serdes.h> 12 #include <asm/arch/soc.h> 13 #include <asm/io.h> 14 #include <asm/global_data.h> 15 #include <asm/arch-fsl-layerscape/config.h> 16 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 17 #include <fsl_csu.h> 18 #endif 19 #ifdef CONFIG_SYS_FSL_DDR 20 #include <fsl_ddr_sdram.h> 21 #include <fsl_ddr.h> 22 #endif 23 #ifdef CONFIG_CHAIN_OF_TRUST 24 #include <fsl_validate.h> 25 #endif 26 27 DECLARE_GLOBAL_DATA_PTR; 28 29 bool soc_has_dp_ddr(void) 30 { 31 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 32 u32 svr = gur_in32(&gur->svr); 33 34 /* LS2085A, LS2088A, LS2048A has DP_DDR */ 35 if ((SVR_SOC_VER(svr) == SVR_LS2085A) || 36 (SVR_SOC_VER(svr) == SVR_LS2088A) || 37 (SVR_SOC_VER(svr) == SVR_LS2048A)) 38 return true; 39 40 return false; 41 } 42 43 bool soc_has_aiop(void) 44 { 45 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 46 u32 svr = gur_in32(&gur->svr); 47 48 /* LS2085A has AIOP */ 49 if (SVR_SOC_VER(svr) == SVR_LS2085A) 50 return true; 51 52 return false; 53 } 54 55 #if defined(CONFIG_FSL_LSCH3) 56 /* 57 * This erratum requires setting a value to eddrtqcr1 to 58 * optimal the DDR performance. 59 */ 60 static void erratum_a008336(void) 61 { 62 #ifdef CONFIG_SYS_FSL_ERRATUM_A008336 63 u32 *eddrtqcr1; 64 65 #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR 66 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800; 67 if (fsl_ddr_get_version(0) == 0x50200) 68 out_le32(eddrtqcr1, 0x63b30002); 69 #endif 70 #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR 71 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800; 72 if (fsl_ddr_get_version(0) == 0x50200) 73 out_le32(eddrtqcr1, 0x63b30002); 74 #endif 75 #endif 76 } 77 78 /* 79 * This erratum requires a register write before being Memory 80 * controller 3 being enabled. 81 */ 82 static void erratum_a008514(void) 83 { 84 #ifdef CONFIG_SYS_FSL_ERRATUM_A008514 85 u32 *eddrtqcr1; 86 87 #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR 88 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800; 89 out_le32(eddrtqcr1, 0x63b20002); 90 #endif 91 #endif 92 } 93 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 94 #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val" 95 96 static unsigned long get_internval_val_mhz(void) 97 { 98 char *interval = getenv(PLATFORM_CYCLE_ENV_VAR); 99 /* 100 * interval is the number of platform cycles(MHz) between 101 * wake up events generated by EPU. 102 */ 103 ulong interval_mhz = get_bus_freq(0) / (1000 * 1000); 104 105 if (interval) 106 interval_mhz = simple_strtoul(interval, NULL, 10); 107 108 return interval_mhz; 109 } 110 111 void erratum_a009635(void) 112 { 113 u32 val; 114 unsigned long interval_mhz = get_internval_val_mhz(); 115 116 if (!interval_mhz) 117 return; 118 119 val = in_le32(DCSR_CGACRE5); 120 writel(val | 0x00000200, DCSR_CGACRE5); 121 122 val = in_le32(EPU_EPCMPR5); 123 writel(interval_mhz, EPU_EPCMPR5); 124 val = in_le32(EPU_EPCCR5); 125 writel(val | 0x82820000, EPU_EPCCR5); 126 val = in_le32(EPU_EPSMCR5); 127 writel(val | 0x002f0000, EPU_EPSMCR5); 128 val = in_le32(EPU_EPECR5); 129 writel(val | 0x20000000, EPU_EPECR5); 130 val = in_le32(EPU_EPGCR); 131 writel(val | 0x80000000, EPU_EPGCR); 132 } 133 #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */ 134 135 static void erratum_rcw_src(void) 136 { 137 #if defined(CONFIG_SPL) 138 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 139 u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE; 140 u32 val; 141 142 val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 143 val &= ~DCFG_PORSR1_RCW_SRC; 144 val |= DCFG_PORSR1_RCW_SRC_NOR; 145 out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val); 146 #endif 147 } 148 149 #define I2C_DEBUG_REG 0x6 150 #define I2C_GLITCH_EN 0x8 151 /* 152 * This erratum requires setting glitch_en bit to enable 153 * digital glitch filter to improve clock stability. 154 */ 155 static void erratum_a009203(void) 156 { 157 u8 __iomem *ptr; 158 #ifdef CONFIG_SYS_I2C 159 #ifdef I2C1_BASE_ADDR 160 ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG); 161 162 writeb(I2C_GLITCH_EN, ptr); 163 #endif 164 #ifdef I2C2_BASE_ADDR 165 ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG); 166 167 writeb(I2C_GLITCH_EN, ptr); 168 #endif 169 #ifdef I2C3_BASE_ADDR 170 ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG); 171 172 writeb(I2C_GLITCH_EN, ptr); 173 #endif 174 #ifdef I2C4_BASE_ADDR 175 ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG); 176 177 writeb(I2C_GLITCH_EN, ptr); 178 #endif 179 #endif 180 } 181 182 void bypass_smmu(void) 183 { 184 u32 val; 185 val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 186 out_le32(SMMU_SCR0, val); 187 val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK); 188 out_le32(SMMU_NSCR0, val); 189 } 190 void fsl_lsch3_early_init_f(void) 191 { 192 erratum_rcw_src(); 193 init_early_memctl_regs(); /* tighten IFC timing */ 194 erratum_a009203(); 195 erratum_a008514(); 196 erratum_a008336(); 197 #ifdef CONFIG_CHAIN_OF_TRUST 198 /* In case of Secure Boot, the IBR configures the SMMU 199 * to allow only Secure transactions. 200 * SMMU must be reset in bypass mode. 201 * Set the ClientPD bit and Clear the USFCFG Bit 202 */ 203 if (fsl_check_boot_mode_secure() == 1) 204 bypass_smmu(); 205 #endif 206 } 207 208 #ifdef CONFIG_SCSI_AHCI_PLAT 209 int sata_init(void) 210 { 211 struct ccsr_ahci __iomem *ccsr_ahci; 212 213 ccsr_ahci = (void *)CONFIG_SYS_SATA2; 214 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 215 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 216 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 217 218 ccsr_ahci = (void *)CONFIG_SYS_SATA1; 219 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 220 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 221 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 222 223 ahci_init((void __iomem *)CONFIG_SYS_SATA1); 224 scsi_scan(0); 225 226 return 0; 227 } 228 #endif 229 230 #elif defined(CONFIG_FSL_LSCH2) 231 #ifdef CONFIG_SCSI_AHCI_PLAT 232 int sata_init(void) 233 { 234 struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA; 235 236 /* Disable SATA ECC */ 237 out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000); 238 out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG); 239 out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG); 240 out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG); 241 242 ahci_init((void __iomem *)CONFIG_SYS_SATA); 243 scsi_scan(0); 244 245 return 0; 246 } 247 #endif 248 249 static void erratum_a009929(void) 250 { 251 #ifdef CONFIG_SYS_FSL_ERRATUM_A009929 252 struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR; 253 u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR; 254 u32 rstrqmr1 = gur_in32(&gur->rstrqmr1); 255 256 rstrqmr1 |= 0x00000400; 257 gur_out32(&gur->rstrqmr1, rstrqmr1); 258 writel(0x01000000, dcsr_cop_ccp); 259 #endif 260 } 261 262 /* 263 * This erratum requires setting a value to eddrtqcr1 to optimal 264 * the DDR performance. The eddrtqcr1 register is in SCFG space 265 * of LS1043A and the offset is 0x157_020c. 266 */ 267 #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \ 268 && defined(CONFIG_SYS_FSL_ERRATUM_A008514) 269 #error A009660 and A008514 can not be both enabled. 270 #endif 271 272 static void erratum_a009660(void) 273 { 274 #ifdef CONFIG_SYS_FSL_ERRATUM_A009660 275 u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c; 276 out_be32(eddrtqcr1, 0x63b20042); 277 #endif 278 } 279 280 static void erratum_a008850_early(void) 281 { 282 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 283 /* part 1 of 2 */ 284 struct ccsr_cci400 __iomem *cci = (void *)CONFIG_SYS_CCI400_ADDR; 285 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 286 287 /* disables propagation of barrier transactions to DDRC from CCI400 */ 288 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER); 289 290 /* disable the re-ordering in DDRC */ 291 ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 292 #endif 293 } 294 295 void erratum_a008850_post(void) 296 { 297 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850 298 /* part 2 of 2 */ 299 struct ccsr_cci400 __iomem *cci = (void *)CONFIG_SYS_CCI400_ADDR; 300 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 301 u32 tmp; 302 303 /* enable propagation of barrier transactions to DDRC from CCI400 */ 304 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER); 305 306 /* enable the re-ordering in DDRC */ 307 tmp = ddr_in32(&ddr->eor); 308 tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS); 309 ddr_out32(&ddr->eor, tmp); 310 #endif 311 } 312 313 #ifdef CONFIG_SYS_FSL_ERRATUM_A010315 314 void erratum_a010315(void) 315 { 316 int i; 317 318 for (i = PCIE1; i <= PCIE4; i++) 319 if (!is_serdes_configured(i)) { 320 debug("PCIe%d: disabled all R/W permission!\n", i); 321 set_pcie_ns_access(i, 0); 322 } 323 } 324 #endif 325 326 static void erratum_a010539(void) 327 { 328 #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT) 329 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 330 u32 porsr1; 331 332 porsr1 = in_be32(&gur->porsr1); 333 porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK; 334 out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1), 335 porsr1); 336 #endif 337 } 338 339 /* Get VDD in the unit mV from voltage ID */ 340 int get_core_volt_from_fuse(void) 341 { 342 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 343 int vdd; 344 u32 fusesr; 345 u8 vid; 346 347 fusesr = in_be32(&gur->dcfg_fusesr); 348 debug("%s: fusesr = 0x%x\n", __func__, fusesr); 349 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) & 350 FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK; 351 if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) { 352 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) & 353 FSL_CHASSIS2_DCFG_FUSESR_VID_MASK; 354 } 355 debug("%s: VID = 0x%x\n", __func__, vid); 356 switch (vid) { 357 case 0x00: /* VID isn't supported */ 358 vdd = -EINVAL; 359 debug("%s: The VID feature is not supported\n", __func__); 360 break; 361 case 0x08: /* 0.9V silicon */ 362 vdd = 900; 363 break; 364 case 0x10: /* 1.0V silicon */ 365 vdd = 1000; 366 break; 367 default: /* Other core voltage */ 368 vdd = -EINVAL; 369 printf("%s: The VID(%x) isn't supported\n", __func__, vid); 370 break; 371 } 372 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd); 373 374 return vdd; 375 } 376 377 __weak int board_switch_core_volt(u32 vdd) 378 { 379 return 0; 380 } 381 382 static int setup_core_volt(u32 vdd) 383 { 384 return board_setup_core_volt(vdd); 385 } 386 387 #ifdef CONFIG_SYS_FSL_DDR 388 static void ddr_enable_0v9_volt(bool en) 389 { 390 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; 391 u32 tmp; 392 393 tmp = ddr_in32(&ddr->ddr_cdr1); 394 395 if (en) 396 tmp |= DDR_CDR1_V0PT9_EN; 397 else 398 tmp &= ~DDR_CDR1_V0PT9_EN; 399 400 ddr_out32(&ddr->ddr_cdr1, tmp); 401 } 402 #endif 403 404 int setup_chip_volt(void) 405 { 406 int vdd; 407 408 vdd = get_core_volt_from_fuse(); 409 /* Nothing to do for silicons doesn't support VID */ 410 if (vdd < 0) 411 return vdd; 412 413 if (setup_core_volt(vdd)) 414 printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd); 415 #ifdef CONFIG_SYS_HAS_SERDES 416 if (setup_serdes_volt(vdd)) 417 printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd); 418 #endif 419 420 #ifdef CONFIG_SYS_FSL_DDR 421 if (vdd == 900) 422 ddr_enable_0v9_volt(true); 423 #endif 424 425 return 0; 426 } 427 428 void fsl_lsch2_early_init_f(void) 429 { 430 struct ccsr_cci400 *cci = (struct ccsr_cci400 *)CONFIG_SYS_CCI400_ADDR; 431 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR; 432 433 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS 434 enable_layerscape_ns_access(); 435 #endif 436 437 #ifdef CONFIG_FSL_IFC 438 init_early_memctl_regs(); /* tighten IFC timing */ 439 #endif 440 441 #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT) 442 out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL); 443 #endif 444 /* Make SEC reads and writes snoopable */ 445 setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP | 446 SCFG_SNPCNFGCR_SECWRSNP | 447 SCFG_SNPCNFGCR_SATARDSNP | 448 SCFG_SNPCNFGCR_SATAWRSNP); 449 450 /* 451 * Enable snoop requests and DVM message requests for 452 * Slave insterface S4 (A53 core cluster) 453 */ 454 out_le32(&cci->slave[4].snoop_ctrl, 455 CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN); 456 457 /* Erratum */ 458 erratum_a008850_early(); /* part 1 of 2 */ 459 erratum_a009929(); 460 erratum_a009660(); 461 erratum_a010539(); 462 } 463 #endif 464 465 #ifdef CONFIG_QSPI_AHB_INIT 466 /* Enable 4bytes address support and fast read */ 467 int qspi_ahb_init(void) 468 { 469 u32 *qspi_lut, lut_key, *qspi_key; 470 471 qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300; 472 qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310; 473 474 lut_key = in_be32(qspi_key); 475 476 if (lut_key == 0x5af05af0) { 477 /* That means the register is BE */ 478 out_be32(qspi_key, 0x5af05af0); 479 /* Unlock the lut table */ 480 out_be32(qspi_key + 1, 0x00000002); 481 out_be32(qspi_lut, 0x0820040c); 482 out_be32(qspi_lut + 1, 0x1c080c08); 483 out_be32(qspi_lut + 2, 0x00002400); 484 /* Lock the lut table */ 485 out_be32(qspi_key, 0x5af05af0); 486 out_be32(qspi_key + 1, 0x00000001); 487 } else { 488 /* That means the register is LE */ 489 out_le32(qspi_key, 0x5af05af0); 490 /* Unlock the lut table */ 491 out_le32(qspi_key + 1, 0x00000002); 492 out_le32(qspi_lut, 0x0820040c); 493 out_le32(qspi_lut + 1, 0x1c080c08); 494 out_le32(qspi_lut + 2, 0x00002400); 495 /* Lock the lut table */ 496 out_le32(qspi_key, 0x5af05af0); 497 out_le32(qspi_key + 1, 0x00000001); 498 } 499 500 return 0; 501 } 502 #endif 503 504 #ifdef CONFIG_BOARD_LATE_INIT 505 int board_late_init(void) 506 { 507 #ifdef CONFIG_SCSI_AHCI_PLAT 508 sata_init(); 509 #endif 510 #ifdef CONFIG_CHAIN_OF_TRUST 511 fsl_setenv_chain_of_trust(); 512 #endif 513 #ifdef CONFIG_QSPI_AHB_INIT 514 qspi_ahb_init(); 515 #endif 516 517 return 0; 518 } 519 #endif 520