1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2014-2015 Freescale Semiconductor
4 */
5
6 #include <common.h>
7 #include <fsl_immap.h>
8 #include <fsl_ifc.h>
9 #include <asm/arch/fsl_serdes.h>
10 #include <asm/arch/soc.h>
11 #include <asm/io.h>
12 #include <asm/global_data.h>
13 #include <asm/arch-fsl-layerscape/config.h>
14 #include <asm/arch-fsl-layerscape/ns_access.h>
15 #include <asm/arch-fsl-layerscape/fsl_icid.h>
16 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
17 #include <fsl_csu.h>
18 #endif
19 #ifdef CONFIG_SYS_FSL_DDR
20 #include <fsl_ddr_sdram.h>
21 #include <fsl_ddr.h>
22 #endif
23 #ifdef CONFIG_CHAIN_OF_TRUST
24 #include <fsl_validate.h>
25 #endif
26 #include <fsl_immap.h>
27 #ifdef CONFIG_TFABOOT
28 #include <environment.h>
29 DECLARE_GLOBAL_DATA_PTR;
30 #endif
31
soc_has_dp_ddr(void)32 bool soc_has_dp_ddr(void)
33 {
34 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
35 u32 svr = gur_in32(&gur->svr);
36
37 /* LS2085A, LS2088A, LS2048A has DP_DDR */
38 if ((SVR_SOC_VER(svr) == SVR_LS2085A) ||
39 (SVR_SOC_VER(svr) == SVR_LS2088A) ||
40 (SVR_SOC_VER(svr) == SVR_LS2048A))
41 return true;
42
43 return false;
44 }
45
soc_has_aiop(void)46 bool soc_has_aiop(void)
47 {
48 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
49 u32 svr = gur_in32(&gur->svr);
50
51 /* LS2085A has AIOP */
52 if (SVR_SOC_VER(svr) == SVR_LS2085A)
53 return true;
54
55 return false;
56 }
57
set_usb_txvreftune(u32 __iomem * scfg,u32 offset)58 static inline void set_usb_txvreftune(u32 __iomem *scfg, u32 offset)
59 {
60 scfg_clrsetbits32(scfg + offset / 4,
61 0xF << 6,
62 SCFG_USB_TXVREFTUNE << 6);
63 }
64
erratum_a009008(void)65 static void erratum_a009008(void)
66 {
67 #ifdef CONFIG_SYS_FSL_ERRATUM_A009008
68 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE;
69
70 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
71 defined(CONFIG_ARCH_LS1012A)
72 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB1);
73 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A)
74 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB2);
75 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR_USB3);
76 #endif
77 #elif defined(CONFIG_ARCH_LS2080A)
78 set_usb_txvreftune(scfg, SCFG_USB3PRM1CR);
79 #endif
80 #endif /* CONFIG_SYS_FSL_ERRATUM_A009008 */
81 }
82
set_usb_sqrxtune(u32 __iomem * scfg,u32 offset)83 static inline void set_usb_sqrxtune(u32 __iomem *scfg, u32 offset)
84 {
85 scfg_clrbits32(scfg + offset / 4,
86 SCFG_USB_SQRXTUNE_MASK << 23);
87 }
88
erratum_a009798(void)89 static void erratum_a009798(void)
90 {
91 #ifdef CONFIG_SYS_FSL_ERRATUM_A009798
92 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE;
93
94 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
95 defined(CONFIG_ARCH_LS1012A)
96 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB1);
97 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A)
98 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB2);
99 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR_USB3);
100 #endif
101 #elif defined(CONFIG_ARCH_LS2080A)
102 set_usb_sqrxtune(scfg, SCFG_USB3PRM1CR);
103 #endif
104 #endif /* CONFIG_SYS_FSL_ERRATUM_A009798 */
105 }
106
107 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
108 defined(CONFIG_ARCH_LS1012A)
set_usb_pcstxswingfull(u32 __iomem * scfg,u32 offset)109 static inline void set_usb_pcstxswingfull(u32 __iomem *scfg, u32 offset)
110 {
111 scfg_clrsetbits32(scfg + offset / 4,
112 0x7F << 9,
113 SCFG_USB_PCSTXSWINGFULL << 9);
114 }
115 #endif
116
erratum_a008997(void)117 static void erratum_a008997(void)
118 {
119 #ifdef CONFIG_SYS_FSL_ERRATUM_A008997
120 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
121 defined(CONFIG_ARCH_LS1012A)
122 u32 __iomem *scfg = (u32 __iomem *)SCFG_BASE;
123
124 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB1);
125 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A)
126 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB2);
127 set_usb_pcstxswingfull(scfg, SCFG_USB3PRM2CR_USB3);
128 #endif
129 #endif
130 #endif /* CONFIG_SYS_FSL_ERRATUM_A008997 */
131 }
132
133 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
134 defined(CONFIG_ARCH_LS1012A)
135
136 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \
137 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \
138 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \
139 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \
140 out_be16((phy) + SCFG_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4)
141
142 #elif defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
143
144 #define PROGRAM_USB_PHY_RX_OVRD_IN_HI(phy) \
145 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_1); \
146 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_2); \
147 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_3); \
148 out_le16((phy) + DCSR_USB_PHY_RX_OVRD_IN_HI, USB_PHY_RX_EQ_VAL_4)
149
150 #endif
151
erratum_a009007(void)152 static void erratum_a009007(void)
153 {
154 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) || \
155 defined(CONFIG_ARCH_LS1012A)
156 void __iomem *usb_phy = (void __iomem *)SCFG_USB_PHY1;
157
158 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
159 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A)
160 usb_phy = (void __iomem *)SCFG_USB_PHY2;
161 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
162
163 usb_phy = (void __iomem *)SCFG_USB_PHY3;
164 PROGRAM_USB_PHY_RX_OVRD_IN_HI(usb_phy);
165 #endif
166 #elif defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
167 void __iomem *dcsr = (void __iomem *)DCSR_BASE;
168
169 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY1);
170 PROGRAM_USB_PHY_RX_OVRD_IN_HI(dcsr + DCSR_USB_PHY2);
171 #endif /* CONFIG_SYS_FSL_ERRATUM_A009007 */
172 }
173
174 #if defined(CONFIG_FSL_LSCH3)
175 /*
176 * This erratum requires setting a value to eddrtqcr1 to
177 * optimal the DDR performance.
178 */
erratum_a008336(void)179 static void erratum_a008336(void)
180 {
181 #ifdef CONFIG_SYS_FSL_ERRATUM_A008336
182 u32 *eddrtqcr1;
183
184 #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR
185 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800;
186 if (fsl_ddr_get_version(0) == 0x50200)
187 out_le32(eddrtqcr1, 0x63b30002);
188 #endif
189 #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR
190 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800;
191 if (fsl_ddr_get_version(0) == 0x50200)
192 out_le32(eddrtqcr1, 0x63b30002);
193 #endif
194 #endif
195 }
196
197 /*
198 * This erratum requires a register write before being Memory
199 * controller 3 being enabled.
200 */
erratum_a008514(void)201 static void erratum_a008514(void)
202 {
203 #ifdef CONFIG_SYS_FSL_ERRATUM_A008514
204 u32 *eddrtqcr1;
205
206 #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR
207 eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800;
208 out_le32(eddrtqcr1, 0x63b20002);
209 #endif
210 #endif
211 }
212 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
213 #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val"
214
get_internval_val_mhz(void)215 static unsigned long get_internval_val_mhz(void)
216 {
217 char *interval = env_get(PLATFORM_CYCLE_ENV_VAR);
218 /*
219 * interval is the number of platform cycles(MHz) between
220 * wake up events generated by EPU.
221 */
222 ulong interval_mhz = get_bus_freq(0) / (1000 * 1000);
223
224 if (interval)
225 interval_mhz = simple_strtoul(interval, NULL, 10);
226
227 return interval_mhz;
228 }
229
erratum_a009635(void)230 void erratum_a009635(void)
231 {
232 u32 val;
233 unsigned long interval_mhz = get_internval_val_mhz();
234
235 if (!interval_mhz)
236 return;
237
238 val = in_le32(DCSR_CGACRE5);
239 writel(val | 0x00000200, DCSR_CGACRE5);
240
241 val = in_le32(EPU_EPCMPR5);
242 writel(interval_mhz, EPU_EPCMPR5);
243 val = in_le32(EPU_EPCCR5);
244 writel(val | 0x82820000, EPU_EPCCR5);
245 val = in_le32(EPU_EPSMCR5);
246 writel(val | 0x002f0000, EPU_EPSMCR5);
247 val = in_le32(EPU_EPECR5);
248 writel(val | 0x20000000, EPU_EPECR5);
249 val = in_le32(EPU_EPGCR);
250 writel(val | 0x80000000, EPU_EPGCR);
251 }
252 #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */
253
erratum_rcw_src(void)254 static void erratum_rcw_src(void)
255 {
256 #if defined(CONFIG_SPL) && defined(CONFIG_NAND_BOOT)
257 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
258 u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE;
259 u32 val;
260
261 val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
262 val &= ~DCFG_PORSR1_RCW_SRC;
263 val |= DCFG_PORSR1_RCW_SRC_NOR;
264 out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val);
265 #endif
266 }
267
268 #define I2C_DEBUG_REG 0x6
269 #define I2C_GLITCH_EN 0x8
270 /*
271 * This erratum requires setting glitch_en bit to enable
272 * digital glitch filter to improve clock stability.
273 */
274 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
erratum_a009203(void)275 static void erratum_a009203(void)
276 {
277 #ifdef CONFIG_SYS_I2C
278 u8 __iomem *ptr;
279 #ifdef I2C1_BASE_ADDR
280 ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG);
281
282 writeb(I2C_GLITCH_EN, ptr);
283 #endif
284 #ifdef I2C2_BASE_ADDR
285 ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG);
286
287 writeb(I2C_GLITCH_EN, ptr);
288 #endif
289 #ifdef I2C3_BASE_ADDR
290 ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG);
291
292 writeb(I2C_GLITCH_EN, ptr);
293 #endif
294 #ifdef I2C4_BASE_ADDR
295 ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG);
296
297 writeb(I2C_GLITCH_EN, ptr);
298 #endif
299 #endif
300 }
301 #endif
302
bypass_smmu(void)303 void bypass_smmu(void)
304 {
305 u32 val;
306 val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
307 out_le32(SMMU_SCR0, val);
308 val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
309 out_le32(SMMU_NSCR0, val);
310 }
fsl_lsch3_early_init_f(void)311 void fsl_lsch3_early_init_f(void)
312 {
313 erratum_rcw_src();
314 #ifdef CONFIG_FSL_IFC
315 init_early_memctl_regs(); /* tighten IFC timing */
316 #endif
317 #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
318 erratum_a009203();
319 #endif
320 erratum_a008514();
321 erratum_a008336();
322 erratum_a009008();
323 erratum_a009798();
324 erratum_a008997();
325 erratum_a009007();
326 #ifdef CONFIG_CHAIN_OF_TRUST
327 /* In case of Secure Boot, the IBR configures the SMMU
328 * to allow only Secure transactions.
329 * SMMU must be reset in bypass mode.
330 * Set the ClientPD bit and Clear the USFCFG Bit
331 */
332 if (fsl_check_boot_mode_secure() == 1)
333 bypass_smmu();
334 #endif
335 }
336
337 /* Get VDD in the unit mV from voltage ID */
get_core_volt_from_fuse(void)338 int get_core_volt_from_fuse(void)
339 {
340 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
341 int vdd;
342 u32 fusesr;
343 u8 vid;
344
345 /* get the voltage ID from fuse status register */
346 fusesr = in_le32(&gur->dcfg_fusesr);
347 debug("%s: fusesr = 0x%x\n", __func__, fusesr);
348 vid = (fusesr >> FSL_CHASSIS3_DCFG_FUSESR_ALTVID_SHIFT) &
349 FSL_CHASSIS3_DCFG_FUSESR_ALTVID_MASK;
350 if ((vid == 0) || (vid == FSL_CHASSIS3_DCFG_FUSESR_ALTVID_MASK)) {
351 vid = (fusesr >> FSL_CHASSIS3_DCFG_FUSESR_VID_SHIFT) &
352 FSL_CHASSIS3_DCFG_FUSESR_VID_MASK;
353 }
354 debug("%s: VID = 0x%x\n", __func__, vid);
355 switch (vid) {
356 case 0x00: /* VID isn't supported */
357 vdd = -EINVAL;
358 debug("%s: The VID feature is not supported\n", __func__);
359 break;
360 case 0x08: /* 0.9V silicon */
361 vdd = 900;
362 break;
363 case 0x10: /* 1.0V silicon */
364 vdd = 1000;
365 break;
366 default: /* Other core voltage */
367 vdd = -EINVAL;
368 debug("%s: The VID(%x) isn't supported\n", __func__, vid);
369 break;
370 }
371 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd);
372
373 return vdd;
374 }
375
376 #elif defined(CONFIG_FSL_LSCH2)
377
erratum_a009929(void)378 static void erratum_a009929(void)
379 {
380 #ifdef CONFIG_SYS_FSL_ERRATUM_A009929
381 struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR;
382 u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR;
383 u32 rstrqmr1 = gur_in32(&gur->rstrqmr1);
384
385 rstrqmr1 |= 0x00000400;
386 gur_out32(&gur->rstrqmr1, rstrqmr1);
387 writel(0x01000000, dcsr_cop_ccp);
388 #endif
389 }
390
391 /*
392 * This erratum requires setting a value to eddrtqcr1 to optimal
393 * the DDR performance. The eddrtqcr1 register is in SCFG space
394 * of LS1043A and the offset is 0x157_020c.
395 */
396 #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \
397 && defined(CONFIG_SYS_FSL_ERRATUM_A008514)
398 #error A009660 and A008514 can not be both enabled.
399 #endif
400
erratum_a009660(void)401 static void erratum_a009660(void)
402 {
403 #ifdef CONFIG_SYS_FSL_ERRATUM_A009660
404 u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c;
405 out_be32(eddrtqcr1, 0x63b20042);
406 #endif
407 }
408
erratum_a008850_early(void)409 static void erratum_a008850_early(void)
410 {
411 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
412 /* part 1 of 2 */
413 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR +
414 CONFIG_SYS_CCI400_OFFSET);
415 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
416
417 /* Skip if running at lower exception level */
418 if (current_el() < 3)
419 return;
420
421 /* disables propagation of barrier transactions to DDRC from CCI400 */
422 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER);
423
424 /* disable the re-ordering in DDRC */
425 ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
426 #endif
427 }
428
erratum_a008850_post(void)429 void erratum_a008850_post(void)
430 {
431 #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
432 /* part 2 of 2 */
433 struct ccsr_cci400 __iomem *cci = (void *)(CONFIG_SYS_IMMR +
434 CONFIG_SYS_CCI400_OFFSET);
435 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
436 u32 tmp;
437
438 /* Skip if running at lower exception level */
439 if (current_el() < 3)
440 return;
441
442 /* enable propagation of barrier transactions to DDRC from CCI400 */
443 out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER);
444
445 /* enable the re-ordering in DDRC */
446 tmp = ddr_in32(&ddr->eor);
447 tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
448 ddr_out32(&ddr->eor, tmp);
449 #endif
450 }
451
452 #ifdef CONFIG_SYS_FSL_ERRATUM_A010315
erratum_a010315(void)453 void erratum_a010315(void)
454 {
455 int i;
456
457 for (i = PCIE1; i <= PCIE4; i++)
458 if (!is_serdes_configured(i)) {
459 debug("PCIe%d: disabled all R/W permission!\n", i);
460 set_pcie_ns_access(i, 0);
461 }
462 }
463 #endif
464
erratum_a010539(void)465 static void erratum_a010539(void)
466 {
467 #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT)
468 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
469 u32 porsr1;
470
471 porsr1 = in_be32(&gur->porsr1);
472 porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK;
473 out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1),
474 porsr1);
475 out_be32((void *)(CONFIG_SYS_FSL_SCFG_ADDR + 0x1a8), 0xffffffff);
476 #endif
477 }
478
479 /* Get VDD in the unit mV from voltage ID */
get_core_volt_from_fuse(void)480 int get_core_volt_from_fuse(void)
481 {
482 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
483 int vdd;
484 u32 fusesr;
485 u8 vid;
486
487 fusesr = in_be32(&gur->dcfg_fusesr);
488 debug("%s: fusesr = 0x%x\n", __func__, fusesr);
489 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) &
490 FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK;
491 if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) {
492 vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) &
493 FSL_CHASSIS2_DCFG_FUSESR_VID_MASK;
494 }
495 debug("%s: VID = 0x%x\n", __func__, vid);
496 switch (vid) {
497 case 0x00: /* VID isn't supported */
498 vdd = -EINVAL;
499 debug("%s: The VID feature is not supported\n", __func__);
500 break;
501 case 0x08: /* 0.9V silicon */
502 vdd = 900;
503 break;
504 case 0x10: /* 1.0V silicon */
505 vdd = 1000;
506 break;
507 default: /* Other core voltage */
508 vdd = -EINVAL;
509 printf("%s: The VID(%x) isn't supported\n", __func__, vid);
510 break;
511 }
512 debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd);
513
514 return vdd;
515 }
516
board_switch_core_volt(u32 vdd)517 __weak int board_switch_core_volt(u32 vdd)
518 {
519 return 0;
520 }
521
setup_core_volt(u32 vdd)522 static int setup_core_volt(u32 vdd)
523 {
524 return board_setup_core_volt(vdd);
525 }
526
527 #ifdef CONFIG_SYS_FSL_DDR
ddr_enable_0v9_volt(bool en)528 static void ddr_enable_0v9_volt(bool en)
529 {
530 struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
531 u32 tmp;
532
533 tmp = ddr_in32(&ddr->ddr_cdr1);
534
535 if (en)
536 tmp |= DDR_CDR1_V0PT9_EN;
537 else
538 tmp &= ~DDR_CDR1_V0PT9_EN;
539
540 ddr_out32(&ddr->ddr_cdr1, tmp);
541 }
542 #endif
543
setup_chip_volt(void)544 int setup_chip_volt(void)
545 {
546 int vdd;
547
548 vdd = get_core_volt_from_fuse();
549 /* Nothing to do for silicons doesn't support VID */
550 if (vdd < 0)
551 return vdd;
552
553 if (setup_core_volt(vdd))
554 printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd);
555 #ifdef CONFIG_SYS_HAS_SERDES
556 if (setup_serdes_volt(vdd))
557 printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd);
558 #endif
559
560 #ifdef CONFIG_SYS_FSL_DDR
561 if (vdd == 900)
562 ddr_enable_0v9_volt(true);
563 #endif
564
565 return 0;
566 }
567
568 #ifdef CONFIG_FSL_PFE
init_pfe_scfg_dcfg_regs(void)569 void init_pfe_scfg_dcfg_regs(void)
570 {
571 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR;
572 u32 ecccr2;
573
574 out_be32(&scfg->pfeasbcr,
575 in_be32(&scfg->pfeasbcr) | SCFG_PFEASBCR_AWCACHE0);
576 out_be32(&scfg->pfebsbcr,
577 in_be32(&scfg->pfebsbcr) | SCFG_PFEASBCR_AWCACHE0);
578
579 /* CCI-400 QoS settings for PFE */
580 out_be32(&scfg->wr_qos1, (unsigned int)(SCFG_WR_QOS1_PFE1_QOS
581 | SCFG_WR_QOS1_PFE2_QOS));
582 out_be32(&scfg->rd_qos1, (unsigned int)(SCFG_RD_QOS1_PFE1_QOS
583 | SCFG_RD_QOS1_PFE2_QOS));
584
585 ecccr2 = in_be32(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_ECCCR2);
586 out_be32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_ECCCR2,
587 ecccr2 | (unsigned int)DISABLE_PFE_ECC);
588 }
589 #endif
590
fsl_lsch2_early_init_f(void)591 void fsl_lsch2_early_init_f(void)
592 {
593 struct ccsr_cci400 *cci = (struct ccsr_cci400 *)(CONFIG_SYS_IMMR +
594 CONFIG_SYS_CCI400_OFFSET);
595 struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR;
596
597 #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
598 enable_layerscape_ns_access();
599 #endif
600
601 #ifdef CONFIG_FSL_IFC
602 init_early_memctl_regs(); /* tighten IFC timing */
603 #endif
604
605 #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT)
606 out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL);
607 #endif
608 /* Make SEC reads and writes snoopable */
609 setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP |
610 SCFG_SNPCNFGCR_SECWRSNP |
611 SCFG_SNPCNFGCR_SATARDSNP |
612 SCFG_SNPCNFGCR_SATAWRSNP);
613
614 /*
615 * Enable snoop requests and DVM message requests for
616 * Slave insterface S4 (A53 core cluster)
617 */
618 if (current_el() == 3) {
619 out_le32(&cci->slave[4].snoop_ctrl,
620 CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN);
621 }
622
623 /*
624 * Program Central Security Unit (CSU) to grant access
625 * permission for USB 2.0 controller
626 */
627 #if defined(CONFIG_ARCH_LS1012A) && defined(CONFIG_USB_EHCI_FSL)
628 if (current_el() == 3)
629 set_devices_ns_access(CSU_CSLX_USB_2, CSU_ALL_RW);
630 #endif
631 /* Erratum */
632 erratum_a008850_early(); /* part 1 of 2 */
633 erratum_a009929();
634 erratum_a009660();
635 erratum_a010539();
636 erratum_a009008();
637 erratum_a009798();
638 erratum_a008997();
639 erratum_a009007();
640
641 #if defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A)
642 set_icids();
643 #endif
644 }
645 #endif
646
647 #ifdef CONFIG_QSPI_AHB_INIT
648 /* Enable 4bytes address support and fast read */
qspi_ahb_init(void)649 int qspi_ahb_init(void)
650 {
651 u32 *qspi_lut, lut_key, *qspi_key;
652
653 qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300;
654 qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310;
655
656 lut_key = in_be32(qspi_key);
657
658 if (lut_key == 0x5af05af0) {
659 /* That means the register is BE */
660 out_be32(qspi_key, 0x5af05af0);
661 /* Unlock the lut table */
662 out_be32(qspi_key + 1, 0x00000002);
663 out_be32(qspi_lut, 0x0820040c);
664 out_be32(qspi_lut + 1, 0x1c080c08);
665 out_be32(qspi_lut + 2, 0x00002400);
666 /* Lock the lut table */
667 out_be32(qspi_key, 0x5af05af0);
668 out_be32(qspi_key + 1, 0x00000001);
669 } else {
670 /* That means the register is LE */
671 out_le32(qspi_key, 0x5af05af0);
672 /* Unlock the lut table */
673 out_le32(qspi_key + 1, 0x00000002);
674 out_le32(qspi_lut, 0x0820040c);
675 out_le32(qspi_lut + 1, 0x1c080c08);
676 out_le32(qspi_lut + 2, 0x00002400);
677 /* Lock the lut table */
678 out_le32(qspi_key, 0x5af05af0);
679 out_le32(qspi_key + 1, 0x00000001);
680 }
681
682 return 0;
683 }
684 #endif
685
686 #ifdef CONFIG_TFABOOT
687 #define MAX_BOOTCMD_SIZE 512
688
fsl_setenv_bootcmd(void)689 int fsl_setenv_bootcmd(void)
690 {
691 int ret;
692 enum boot_src src = get_boot_src();
693 char bootcmd_str[MAX_BOOTCMD_SIZE];
694
695 switch (src) {
696 #ifdef IFC_NOR_BOOTCOMMAND
697 case BOOT_SOURCE_IFC_NOR:
698 sprintf(bootcmd_str, IFC_NOR_BOOTCOMMAND);
699 break;
700 #endif
701 #ifdef QSPI_NOR_BOOTCOMMAND
702 case BOOT_SOURCE_QSPI_NOR:
703 sprintf(bootcmd_str, QSPI_NOR_BOOTCOMMAND);
704 break;
705 #endif
706 #ifdef XSPI_NOR_BOOTCOMMAND
707 case BOOT_SOURCE_XSPI_NOR:
708 sprintf(bootcmd_str, XSPI_NOR_BOOTCOMMAND);
709 break;
710 #endif
711 #ifdef IFC_NAND_BOOTCOMMAND
712 case BOOT_SOURCE_IFC_NAND:
713 sprintf(bootcmd_str, IFC_NAND_BOOTCOMMAND);
714 break;
715 #endif
716 #ifdef QSPI_NAND_BOOTCOMMAND
717 case BOOT_SOURCE_QSPI_NAND:
718 sprintf(bootcmd_str, QSPI_NAND_BOOTCOMMAND);
719 break;
720 #endif
721 #ifdef XSPI_NAND_BOOTCOMMAND
722 case BOOT_SOURCE_XSPI_NAND:
723 sprintf(bootcmd_str, XSPI_NAND_BOOTCOMMAND);
724 break;
725 #endif
726 #ifdef SD_BOOTCOMMAND
727 case BOOT_SOURCE_SD_MMC:
728 sprintf(bootcmd_str, SD_BOOTCOMMAND);
729 break;
730 #endif
731 #ifdef SD2_BOOTCOMMAND
732 case BOOT_SOURCE_SD_MMC2:
733 sprintf(bootcmd_str, SD2_BOOTCOMMAND);
734 break;
735 #endif
736 default:
737 #ifdef QSPI_NOR_BOOTCOMMAND
738 sprintf(bootcmd_str, QSPI_NOR_BOOTCOMMAND);
739 #endif
740 break;
741 }
742
743 ret = env_set("bootcmd", bootcmd_str);
744 if (ret) {
745 printf("Failed to set bootcmd: ret = %d\n", ret);
746 return ret;
747 }
748 return 0;
749 }
750
fsl_setenv_mcinitcmd(void)751 int fsl_setenv_mcinitcmd(void)
752 {
753 int ret = 0;
754 enum boot_src src = get_boot_src();
755
756 switch (src) {
757 #ifdef IFC_MC_INIT_CMD
758 case BOOT_SOURCE_IFC_NAND:
759 case BOOT_SOURCE_IFC_NOR:
760 ret = env_set("mcinitcmd", IFC_MC_INIT_CMD);
761 break;
762 #endif
763 #ifdef QSPI_MC_INIT_CMD
764 case BOOT_SOURCE_QSPI_NAND:
765 case BOOT_SOURCE_QSPI_NOR:
766 ret = env_set("mcinitcmd", QSPI_MC_INIT_CMD);
767 break;
768 #endif
769 #ifdef XSPI_MC_INIT_CMD
770 case BOOT_SOURCE_XSPI_NAND:
771 case BOOT_SOURCE_XSPI_NOR:
772 ret = env_set("mcinitcmd", XSPI_MC_INIT_CMD);
773 break;
774 #endif
775 #ifdef SD_MC_INIT_CMD
776 case BOOT_SOURCE_SD_MMC:
777 ret = env_set("mcinitcmd", SD_MC_INIT_CMD);
778 break;
779 #endif
780 #ifdef SD2_MC_INIT_CMD
781 case BOOT_SOURCE_SD_MMC2:
782 ret = env_set("mcinitcmd", SD2_MC_INIT_CMD);
783 break;
784 #endif
785 default:
786 #ifdef QSPI_MC_INIT_CMD
787 ret = env_set("mcinitcmd", QSPI_MC_INIT_CMD);
788 #endif
789 break;
790 }
791
792 if (ret) {
793 printf("Failed to set mcinitcmd: ret = %d\n", ret);
794 return ret;
795 }
796 return 0;
797 }
798 #endif
799
800 #ifdef CONFIG_BOARD_LATE_INIT
board_late_init(void)801 int board_late_init(void)
802 {
803 #ifdef CONFIG_CHAIN_OF_TRUST
804 fsl_setenv_chain_of_trust();
805 #endif
806 #ifdef CONFIG_TFABOOT
807 /*
808 * check if gd->env_addr is default_environment; then setenv bootcmd
809 * and mcinitcmd.
810 */
811 if (gd->env_addr + gd->reloc_off == (ulong)&default_environment[0]) {
812 fsl_setenv_bootcmd();
813 fsl_setenv_mcinitcmd();
814 }
815
816 /*
817 * If the boot mode is secure, default environment is not present then
818 * setenv command needs to be run by default
819 */
820 #ifdef CONFIG_CHAIN_OF_TRUST
821 if ((fsl_check_boot_mode_secure() == 1)) {
822 fsl_setenv_bootcmd();
823 fsl_setenv_mcinitcmd();
824 }
825 #endif
826 #endif
827 #ifdef CONFIG_QSPI_AHB_INIT
828 qspi_ahb_init();
829 #endif
830
831 return 0;
832 }
833 #endif
834