1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2004,2007-2011 Freescale Semiconductor, Inc.
4 * (C) Copyright 2002, 2003 Motorola Inc.
5 * Xianghua Xiao (X.Xiao@motorola.com)
6 *
7 * (C) Copyright 2000
8 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9 */
10
11 #include <config.h>
12 #include <common.h>
13 #include <watchdog.h>
14 #include <command.h>
15 #include <fsl_esdhc.h>
16 #include <asm/cache.h>
17 #include <asm/io.h>
18 #include <asm/mmu.h>
19 #include <fsl_ifc.h>
20 #include <asm/fsl_law.h>
21 #include <asm/fsl_lbc.h>
22 #include <post.h>
23 #include <asm/processor.h>
24 #include <fsl_ddr_sdram.h>
25 #include <asm/ppc.h>
26
27 DECLARE_GLOBAL_DATA_PTR;
28
29 /*
30 * Default board reset function
31 */
32 static void
__board_reset(void)33 __board_reset(void)
34 {
35 /* Do nothing */
36 }
37 void board_reset(void) __attribute__((weak, alias("__board_reset")));
38
checkcpu(void)39 int checkcpu (void)
40 {
41 sys_info_t sysinfo;
42 uint pvr, svr;
43 uint ver;
44 uint major, minor;
45 struct cpu_type *cpu;
46 char buf1[32], buf2[32];
47 #if defined(CONFIG_DDR_CLK_FREQ) || defined(CONFIG_FSL_CORENET)
48 ccsr_gur_t __iomem *gur =
49 (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
50 #endif
51
52 /*
53 * Cornet platforms use ddr sync bit in RCW to indicate sync vs async
54 * mode. Previous platform use ddr ratio to do the same. This
55 * information is only for display here.
56 */
57 #ifdef CONFIG_FSL_CORENET
58 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
59 u32 ddr_sync = 0; /* only async mode is supported */
60 #else
61 u32 ddr_sync = ((gur->rcwsr[5]) & FSL_CORENET_RCWSR5_DDR_SYNC)
62 >> FSL_CORENET_RCWSR5_DDR_SYNC_SHIFT;
63 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
64 #else /* CONFIG_FSL_CORENET */
65 #ifdef CONFIG_DDR_CLK_FREQ
66 u32 ddr_ratio = ((gur->porpllsr) & MPC85xx_PORPLLSR_DDR_RATIO)
67 >> MPC85xx_PORPLLSR_DDR_RATIO_SHIFT;
68 #else
69 u32 ddr_ratio = 0;
70 #endif /* CONFIG_DDR_CLK_FREQ */
71 #endif /* CONFIG_FSL_CORENET */
72
73 unsigned int i, core, nr_cores = cpu_numcores();
74 u32 mask = cpu_mask();
75
76 #ifdef CONFIG_HETROGENOUS_CLUSTERS
77 unsigned int j, dsp_core, dsp_numcores = cpu_num_dspcores();
78 u32 dsp_mask = cpu_dsp_mask();
79 #endif
80
81 svr = get_svr();
82 major = SVR_MAJ(svr);
83 minor = SVR_MIN(svr);
84
85 #if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
86 if (SVR_SOC_VER(svr) == SVR_T4080) {
87 ccsr_rcpm_t *rcpm =
88 (void __iomem *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
89
90 setbits_be32(&gur->devdisr2, FSL_CORENET_DEVDISR2_DTSEC1_6 ||
91 FSL_CORENET_DEVDISR2_DTSEC1_9);
92 setbits_be32(&gur->devdisr3, FSL_CORENET_DEVDISR3_PCIE3);
93 setbits_be32(&gur->devdisr5, FSL_CORENET_DEVDISR5_DDR3);
94
95 /* It needs SW to disable core4~7 as HW design sake on T4080 */
96 for (i = 4; i < 8; i++)
97 cpu_disable(i);
98
99 /* request core4~7 into PH20 state, prior to entering PCL10
100 * state, all cores in cluster should be placed in PH20 state.
101 */
102 setbits_be32(&rcpm->pcph20setr, 0xf0);
103
104 /* put the 2nd cluster into PCL10 state */
105 setbits_be32(&rcpm->clpcl10setr, 1 << 1);
106 }
107 #endif
108
109 if (cpu_numcores() > 1) {
110 #ifndef CONFIG_MP
111 puts("Unicore software on multiprocessor system!!\n"
112 "To enable mutlticore build define CONFIG_MP\n");
113 #endif
114 volatile ccsr_pic_t *pic = (void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
115 printf("CPU%d: ", pic->whoami);
116 } else {
117 puts("CPU: ");
118 }
119
120 cpu = gd->arch.cpu;
121
122 puts(cpu->name);
123 if (IS_E_PROCESSOR(svr))
124 puts("E");
125
126 printf(", Version: %d.%d, (0x%08x)\n", major, minor, svr);
127
128 pvr = get_pvr();
129 ver = PVR_VER(pvr);
130 major = PVR_MAJ(pvr);
131 minor = PVR_MIN(pvr);
132
133 printf("Core: ");
134 switch(ver) {
135 case PVR_VER_E500_V1:
136 case PVR_VER_E500_V2:
137 puts("e500");
138 break;
139 case PVR_VER_E500MC:
140 puts("e500mc");
141 break;
142 case PVR_VER_E5500:
143 puts("e5500");
144 break;
145 case PVR_VER_E6500:
146 puts("e6500");
147 break;
148 default:
149 puts("Unknown");
150 break;
151 }
152
153 printf(", Version: %d.%d, (0x%08x)\n", major, minor, pvr);
154
155 if (nr_cores > CONFIG_MAX_CPUS) {
156 panic("\nUnexpected number of cores: %d, max is %d\n",
157 nr_cores, CONFIG_MAX_CPUS);
158 }
159
160 get_sys_info(&sysinfo);
161
162 #ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
163 if (sysinfo.diff_sysclk == 1)
164 puts("Single Source Clock Configuration\n");
165 #endif
166
167 puts("Clock Configuration:");
168 for_each_cpu(i, core, nr_cores, mask) {
169 if (!(i & 3))
170 printf ("\n ");
171 printf("CPU%d:%-4s MHz, ", core,
172 strmhz(buf1, sysinfo.freq_processor[core]));
173 }
174
175 #ifdef CONFIG_HETROGENOUS_CLUSTERS
176 for_each_cpu(j, dsp_core, dsp_numcores, dsp_mask) {
177 if (!(j & 3))
178 printf("\n ");
179 printf("DSP CPU%d:%-4s MHz, ", j,
180 strmhz(buf1, sysinfo.freq_processor_dsp[dsp_core]));
181 }
182 #endif
183
184 printf("\n CCB:%-4s MHz,", strmhz(buf1, sysinfo.freq_systembus));
185 printf("\n");
186
187 #ifdef CONFIG_FSL_CORENET
188 if (ddr_sync == 1) {
189 printf(" DDR:%-4s MHz (%s MT/s data rate) "
190 "(Synchronous), ",
191 strmhz(buf1, sysinfo.freq_ddrbus/2),
192 strmhz(buf2, sysinfo.freq_ddrbus));
193 } else {
194 printf(" DDR:%-4s MHz (%s MT/s data rate) "
195 "(Asynchronous), ",
196 strmhz(buf1, sysinfo.freq_ddrbus/2),
197 strmhz(buf2, sysinfo.freq_ddrbus));
198 }
199 #else
200 switch (ddr_ratio) {
201 case 0x0:
202 printf(" DDR:%-4s MHz (%s MT/s data rate), ",
203 strmhz(buf1, sysinfo.freq_ddrbus/2),
204 strmhz(buf2, sysinfo.freq_ddrbus));
205 break;
206 case 0x7:
207 printf(" DDR:%-4s MHz (%s MT/s data rate) "
208 "(Synchronous), ",
209 strmhz(buf1, sysinfo.freq_ddrbus/2),
210 strmhz(buf2, sysinfo.freq_ddrbus));
211 break;
212 default:
213 printf(" DDR:%-4s MHz (%s MT/s data rate) "
214 "(Asynchronous), ",
215 strmhz(buf1, sysinfo.freq_ddrbus/2),
216 strmhz(buf2, sysinfo.freq_ddrbus));
217 break;
218 }
219 #endif
220
221 #if defined(CONFIG_FSL_LBC)
222 if (sysinfo.freq_localbus > LCRR_CLKDIV) {
223 printf("LBC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus));
224 } else {
225 printf("LBC: unknown (LCRR[CLKDIV] = 0x%02lx)\n",
226 sysinfo.freq_localbus);
227 }
228 #endif
229
230 #if defined(CONFIG_FSL_IFC)
231 printf("IFC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus));
232 #endif
233
234 #ifdef CONFIG_CPM2
235 printf("CPM: %s MHz\n", strmhz(buf1, sysinfo.freq_systembus));
236 #endif
237
238 #ifdef CONFIG_QE
239 printf(" QE:%-4s MHz\n", strmhz(buf1, sysinfo.freq_qe));
240 #endif
241
242 #if defined(CONFIG_SYS_CPRI)
243 printf(" ");
244 printf("CPRI:%-4s MHz", strmhz(buf1, sysinfo.freq_cpri));
245 #endif
246
247 #if defined(CONFIG_SYS_MAPLE)
248 printf("\n ");
249 printf("MAPLE:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple));
250 printf("MAPLE-ULB:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple_ulb));
251 printf("MAPLE-eTVPE:%-4s MHz\n",
252 strmhz(buf1, sysinfo.freq_maple_etvpe));
253 #endif
254
255 #ifdef CONFIG_SYS_DPAA_FMAN
256 for (i = 0; i < CONFIG_SYS_NUM_FMAN; i++) {
257 printf(" FMAN%d: %s MHz\n", i + 1,
258 strmhz(buf1, sysinfo.freq_fman[i]));
259 }
260 #endif
261
262 #ifdef CONFIG_SYS_DPAA_QBMAN
263 printf(" QMAN: %s MHz\n", strmhz(buf1, sysinfo.freq_qman));
264 #endif
265
266 #ifdef CONFIG_SYS_DPAA_PME
267 printf(" PME: %s MHz\n", strmhz(buf1, sysinfo.freq_pme));
268 #endif
269
270 puts("L1: D-cache 32 KiB enabled\n I-cache 32 KiB enabled\n");
271
272 #ifdef CONFIG_FSL_CORENET
273 /* Display the RCW, so that no one gets confused as to what RCW
274 * we're actually using for this boot.
275 */
276 puts("Reset Configuration Word (RCW):");
277 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
278 u32 rcw = in_be32(&gur->rcwsr[i]);
279
280 if ((i % 4) == 0)
281 printf("\n %08x:", i * 4);
282 printf(" %08x", rcw);
283 }
284 puts("\n");
285 #endif
286
287 return 0;
288 }
289
290
291 /* ------------------------------------------------------------------------- */
292
do_reset(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])293 int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
294 {
295 /* Everything after the first generation of PQ3 parts has RSTCR */
296 #if defined(CONFIG_ARCH_MPC8540) || defined(CONFIG_ARCH_MPC8541) || \
297 defined(CONFIG_ARCH_MPC8555) || defined(CONFIG_ARCH_MPC8560)
298 unsigned long val, msr;
299
300 /*
301 * Initiate hard reset in debug control register DBCR0
302 * Make sure MSR[DE] = 1. This only resets the core.
303 */
304 msr = mfmsr ();
305 msr |= MSR_DE;
306 mtmsr (msr);
307
308 val = mfspr(DBCR0);
309 val |= 0x70000000;
310 mtspr(DBCR0,val);
311 #else
312 volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
313
314 /* Attempt board-specific reset */
315 board_reset();
316
317 /* Next try asserting HRESET_REQ */
318 out_be32(&gur->rstcr, 0x2);
319 udelay(100);
320 #endif
321
322 return 1;
323 }
324
325
326 /*
327 * Get timebase clock frequency
328 */
329 #ifndef CONFIG_SYS_FSL_TBCLK_DIV
330 #define CONFIG_SYS_FSL_TBCLK_DIV 8
331 #endif
get_tbclk(void)332 __weak unsigned long get_tbclk (void)
333 {
334 unsigned long tbclk_div = CONFIG_SYS_FSL_TBCLK_DIV;
335
336 return (gd->bus_clk + (tbclk_div >> 1)) / tbclk_div;
337 }
338
339
340 #if defined(CONFIG_WATCHDOG)
341 #define WATCHDOG_MASK (TCR_WP(63) | TCR_WRC(3) | TCR_WIE)
342 void
init_85xx_watchdog(void)343 init_85xx_watchdog(void)
344 {
345 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WATCHDOG_MASK) |
346 TCR_WP(CONFIG_WATCHDOG_PRESC) | TCR_WRC(CONFIG_WATCHDOG_RC));
347 }
348
349 void
reset_85xx_watchdog(void)350 reset_85xx_watchdog(void)
351 {
352 /*
353 * Clear TSR(WIS) bit by writing 1
354 */
355 mtspr(SPRN_TSR, TSR_WIS);
356 }
357
358 void
watchdog_reset(void)359 watchdog_reset(void)
360 {
361 int re_enable = disable_interrupts();
362
363 reset_85xx_watchdog();
364 if (re_enable)
365 enable_interrupts();
366 }
367 #endif /* CONFIG_WATCHDOG */
368
369 /*
370 * Initializes on-chip MMC controllers.
371 * to override, implement board_mmc_init()
372 */
cpu_mmc_init(bd_t * bis)373 int cpu_mmc_init(bd_t *bis)
374 {
375 #ifdef CONFIG_FSL_ESDHC
376 return fsl_esdhc_mmc_init(bis);
377 #else
378 return 0;
379 #endif
380 }
381
382 /*
383 * Print out the state of various machine registers.
384 * Currently prints out LAWs, BR0/OR0 for LBC, CSPR/CSOR/Timing
385 * parameters for IFC and TLBs
386 */
print_reginfo(void)387 void print_reginfo(void)
388 {
389 print_tlbcam();
390 print_laws();
391 #if defined(CONFIG_FSL_LBC)
392 print_lbc_regs();
393 #endif
394 #ifdef CONFIG_FSL_IFC
395 print_ifc_regs();
396 #endif
397
398 }
399
400 /* Common ddr init for non-corenet fsl 85xx platforms */
401 #ifndef CONFIG_FSL_CORENET
402 #if (defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)) && \
403 !defined(CONFIG_SYS_INIT_L2_ADDR)
dram_init(void)404 int dram_init(void)
405 {
406 #if defined(CONFIG_SPD_EEPROM) || defined(CONFIG_DDR_SPD) || \
407 defined(CONFIG_ARCH_QEMU_E500)
408 gd->ram_size = fsl_ddr_sdram_size();
409 #else
410 gd->ram_size = (phys_size_t)CONFIG_SYS_SDRAM_SIZE * 1024 * 1024;
411 #endif
412
413 return 0;
414 }
415 #else /* CONFIG_SYS_RAMBOOT */
dram_init(void)416 int dram_init(void)
417 {
418 phys_size_t dram_size = 0;
419
420 #if defined(CONFIG_SYS_FSL_ERRATUM_DDR_MSYNC_IN)
421 {
422 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
423 unsigned int x = 10;
424 unsigned int i;
425
426 /*
427 * Work around to stabilize DDR DLL
428 */
429 out_be32(&gur->ddrdllcr, 0x81000000);
430 asm("sync;isync;msync");
431 udelay(200);
432 while (in_be32(&gur->ddrdllcr) != 0x81000100) {
433 setbits_be32(&gur->devdisr, 0x00010000);
434 for (i = 0; i < x; i++)
435 ;
436 clrbits_be32(&gur->devdisr, 0x00010000);
437 x++;
438 }
439 }
440 #endif
441
442 #if defined(CONFIG_SPD_EEPROM) || \
443 defined(CONFIG_DDR_SPD) || \
444 defined(CONFIG_SYS_DDR_RAW_TIMING)
445 dram_size = fsl_ddr_sdram();
446 #else
447 dram_size = fixed_sdram();
448 #endif
449 dram_size = setup_ddr_tlbs(dram_size / 0x100000);
450 dram_size *= 0x100000;
451
452 #if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
453 /*
454 * Initialize and enable DDR ECC.
455 */
456 ddr_enable_ecc(dram_size);
457 #endif
458
459 #if defined(CONFIG_FSL_LBC)
460 /* Some boards also have sdram on the lbc */
461 lbc_sdram_init();
462 #endif
463
464 debug("DDR: ");
465 gd->ram_size = dram_size;
466
467 return 0;
468 }
469 #endif /* CONFIG_SYS_RAMBOOT */
470 #endif
471
472 #if CONFIG_POST & CONFIG_SYS_POST_MEMORY
473
474 /* Board-specific functions defined in each board's ddr.c */
475 void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
476 unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl);
477 void read_tlbcam_entry(int idx, u32 *valid, u32 *tsize, unsigned long *epn,
478 phys_addr_t *rpn);
479 unsigned int
480 setup_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg);
481
482 void clear_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg);
483
dump_spd_ddr_reg(void)484 static void dump_spd_ddr_reg(void)
485 {
486 int i, j, k, m;
487 u8 *p_8;
488 u32 *p_32;
489 struct ccsr_ddr __iomem *ddr[CONFIG_SYS_NUM_DDR_CTLRS];
490 generic_spd_eeprom_t
491 spd[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR];
492
493 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++)
494 fsl_ddr_get_spd(spd[i], i, CONFIG_DIMM_SLOTS_PER_CTLR);
495
496 puts("SPD data of all dimms (zero value is omitted)...\n");
497 puts("Byte (hex) ");
498 k = 1;
499 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
500 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++)
501 printf("Dimm%d ", k++);
502 }
503 puts("\n");
504 for (k = 0; k < sizeof(generic_spd_eeprom_t); k++) {
505 m = 0;
506 printf("%3d (0x%02x) ", k, k);
507 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
508 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
509 p_8 = (u8 *) &spd[i][j];
510 if (p_8[k]) {
511 printf("0x%02x ", p_8[k]);
512 m++;
513 } else
514 puts(" ");
515 }
516 }
517 if (m)
518 puts("\n");
519 else
520 puts("\r");
521 }
522
523 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
524 switch (i) {
525 case 0:
526 ddr[i] = (void *)CONFIG_SYS_FSL_DDR_ADDR;
527 break;
528 #if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1)
529 case 1:
530 ddr[i] = (void *)CONFIG_SYS_FSL_DDR2_ADDR;
531 break;
532 #endif
533 #if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2)
534 case 2:
535 ddr[i] = (void *)CONFIG_SYS_FSL_DDR3_ADDR;
536 break;
537 #endif
538 #if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3)
539 case 3:
540 ddr[i] = (void *)CONFIG_SYS_FSL_DDR4_ADDR;
541 break;
542 #endif
543 default:
544 printf("%s unexpected controller number = %u\n",
545 __func__, i);
546 return;
547 }
548 }
549 printf("DDR registers dump for all controllers "
550 "(zero value is omitted)...\n");
551 puts("Offset (hex) ");
552 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++)
553 printf(" Base + 0x%04x", (u32)ddr[i] & 0xFFFF);
554 puts("\n");
555 for (k = 0; k < sizeof(struct ccsr_ddr)/4; k++) {
556 m = 0;
557 printf("%6d (0x%04x)", k * 4, k * 4);
558 for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) {
559 p_32 = (u32 *) ddr[i];
560 if (p_32[k]) {
561 printf(" 0x%08x", p_32[k]);
562 m++;
563 } else
564 puts(" ");
565 }
566 if (m)
567 puts("\n");
568 else
569 puts("\r");
570 }
571 puts("\n");
572 }
573
574 /* invalid the TLBs for DDR and setup new ones to cover p_addr */
reset_tlb(phys_addr_t p_addr,u32 size,phys_addr_t * phys_offset)575 static int reset_tlb(phys_addr_t p_addr, u32 size, phys_addr_t *phys_offset)
576 {
577 u32 vstart = CONFIG_SYS_DDR_SDRAM_BASE;
578 unsigned long epn;
579 u32 tsize, valid, ptr;
580 int ddr_esel;
581
582 clear_ddr_tlbs_phys(p_addr, size>>20);
583
584 /* Setup new tlb to cover the physical address */
585 setup_ddr_tlbs_phys(p_addr, size>>20);
586
587 ptr = vstart;
588 ddr_esel = find_tlb_idx((void *)ptr, 1);
589 if (ddr_esel != -1) {
590 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, phys_offset);
591 } else {
592 printf("TLB error in function %s\n", __func__);
593 return -1;
594 }
595
596 return 0;
597 }
598
599 /*
600 * slide the testing window up to test another area
601 * for 32_bit system, the maximum testable memory is limited to
602 * CONFIG_MAX_MEM_MAPPED
603 */
arch_memory_test_advance(u32 * vstart,u32 * size,phys_addr_t * phys_offset)604 int arch_memory_test_advance(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
605 {
606 phys_addr_t test_cap, p_addr;
607 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED);
608
609 #if !defined(CONFIG_PHYS_64BIT) || \
610 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \
611 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull)
612 test_cap = p_size;
613 #else
614 test_cap = gd->ram_size;
615 #endif
616 p_addr = (*vstart) + (*size) + (*phys_offset);
617 if (p_addr < test_cap - 1) {
618 p_size = min(test_cap - p_addr, CONFIG_MAX_MEM_MAPPED);
619 if (reset_tlb(p_addr, p_size, phys_offset) == -1)
620 return -1;
621 *vstart = CONFIG_SYS_DDR_SDRAM_BASE;
622 *size = (u32) p_size;
623 printf("Testing 0x%08llx - 0x%08llx\n",
624 (u64)(*vstart) + (*phys_offset),
625 (u64)(*vstart) + (*phys_offset) + (*size) - 1);
626 } else
627 return 1;
628
629 return 0;
630 }
631
632 /* initialization for testing area */
arch_memory_test_prepare(u32 * vstart,u32 * size,phys_addr_t * phys_offset)633 int arch_memory_test_prepare(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
634 {
635 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED);
636
637 *vstart = CONFIG_SYS_DDR_SDRAM_BASE;
638 *size = (u32) p_size; /* CONFIG_MAX_MEM_MAPPED < 4G */
639 *phys_offset = 0;
640
641 #if !defined(CONFIG_PHYS_64BIT) || \
642 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \
643 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull)
644 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
645 puts("Cannot test more than ");
646 print_size(CONFIG_MAX_MEM_MAPPED,
647 " without proper 36BIT support.\n");
648 }
649 #endif
650 printf("Testing 0x%08llx - 0x%08llx\n",
651 (u64)(*vstart) + (*phys_offset),
652 (u64)(*vstart) + (*phys_offset) + (*size) - 1);
653
654 return 0;
655 }
656
657 /* invalid TLBs for DDR and remap as normal after testing */
arch_memory_test_cleanup(u32 * vstart,u32 * size,phys_addr_t * phys_offset)658 int arch_memory_test_cleanup(u32 *vstart, u32 *size, phys_addr_t *phys_offset)
659 {
660 unsigned long epn;
661 u32 tsize, valid, ptr;
662 phys_addr_t rpn = 0;
663 int ddr_esel;
664
665 /* disable the TLBs for this testing */
666 ptr = *vstart;
667
668 while (ptr < (*vstart) + (*size)) {
669 ddr_esel = find_tlb_idx((void *)ptr, 1);
670 if (ddr_esel != -1) {
671 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, &rpn);
672 disable_tlb(ddr_esel);
673 }
674 ptr += TSIZE_TO_BYTES(tsize);
675 }
676
677 puts("Remap DDR ");
678 setup_ddr_tlbs(gd->ram_size>>20);
679 puts("\n");
680
681 return 0;
682 }
683
arch_memory_failure_handle(void)684 void arch_memory_failure_handle(void)
685 {
686 dump_spd_ddr_reg();
687 }
688 #endif
689