1 /* 2 * Copyright 2004,2007-2011 Freescale Semiconductor, Inc. 3 * (C) Copyright 2002, 2003 Motorola Inc. 4 * Xianghua Xiao (X.Xiao@motorola.com) 5 * 6 * (C) Copyright 2000 7 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. 8 * 9 * SPDX-License-Identifier: GPL-2.0+ 10 */ 11 12 #include <config.h> 13 #include <common.h> 14 #include <watchdog.h> 15 #include <command.h> 16 #include <fsl_esdhc.h> 17 #include <asm/cache.h> 18 #include <asm/io.h> 19 #include <asm/mmu.h> 20 #include <fsl_ifc.h> 21 #include <asm/fsl_law.h> 22 #include <asm/fsl_lbc.h> 23 #include <post.h> 24 #include <asm/processor.h> 25 #include <fsl_ddr_sdram.h> 26 27 DECLARE_GLOBAL_DATA_PTR; 28 29 /* 30 * Default board reset function 31 */ 32 static void 33 __board_reset(void) 34 { 35 /* Do nothing */ 36 } 37 void board_reset(void) __attribute__((weak, alias("__board_reset"))); 38 39 int checkcpu (void) 40 { 41 sys_info_t sysinfo; 42 uint pvr, svr; 43 uint ver; 44 uint major, minor; 45 struct cpu_type *cpu; 46 char buf1[32], buf2[32]; 47 #if defined(CONFIG_DDR_CLK_FREQ) || defined(CONFIG_FSL_CORENET) 48 ccsr_gur_t __iomem *gur = 49 (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); 50 #endif 51 52 /* 53 * Cornet platforms use ddr sync bit in RCW to indicate sync vs async 54 * mode. Previous platform use ddr ratio to do the same. This 55 * information is only for display here. 56 */ 57 #ifdef CONFIG_FSL_CORENET 58 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2 59 u32 ddr_sync = 0; /* only async mode is supported */ 60 #else 61 u32 ddr_sync = ((gur->rcwsr[5]) & FSL_CORENET_RCWSR5_DDR_SYNC) 62 >> FSL_CORENET_RCWSR5_DDR_SYNC_SHIFT; 63 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */ 64 #else /* CONFIG_FSL_CORENET */ 65 #ifdef CONFIG_DDR_CLK_FREQ 66 u32 ddr_ratio = ((gur->porpllsr) & MPC85xx_PORPLLSR_DDR_RATIO) 67 >> MPC85xx_PORPLLSR_DDR_RATIO_SHIFT; 68 #else 69 u32 ddr_ratio = 0; 70 #endif /* CONFIG_DDR_CLK_FREQ */ 71 #endif /* CONFIG_FSL_CORENET */ 72 73 unsigned int i, core, nr_cores = cpu_numcores(); 74 u32 mask = cpu_mask(); 75 76 #ifdef CONFIG_HETROGENOUS_CLUSTERS 77 unsigned int j, dsp_core, dsp_numcores = cpu_num_dspcores(); 78 u32 dsp_mask = cpu_dsp_mask(); 79 #endif 80 81 svr = get_svr(); 82 major = SVR_MAJ(svr); 83 minor = SVR_MIN(svr); 84 85 #if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500) 86 if (SVR_SOC_VER(svr) == SVR_T4080) { 87 ccsr_rcpm_t *rcpm = 88 (void __iomem *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR); 89 90 setbits_be32(&gur->devdisr2, FSL_CORENET_DEVDISR2_DTSEC1_6 || 91 FSL_CORENET_DEVDISR2_DTSEC1_9); 92 setbits_be32(&gur->devdisr3, FSL_CORENET_DEVDISR3_PCIE3); 93 setbits_be32(&gur->devdisr5, FSL_CORENET_DEVDISR5_DDR3); 94 95 /* It needs SW to disable core4~7 as HW design sake on T4080 */ 96 for (i = 4; i < 8; i++) 97 cpu_disable(i); 98 99 /* request core4~7 into PH20 state, prior to entering PCL10 100 * state, all cores in cluster should be placed in PH20 state. 101 */ 102 setbits_be32(&rcpm->pcph20setr, 0xf0); 103 104 /* put the 2nd cluster into PCL10 state */ 105 setbits_be32(&rcpm->clpcl10setr, 1 << 1); 106 } 107 #endif 108 109 if (cpu_numcores() > 1) { 110 #ifndef CONFIG_MP 111 puts("Unicore software on multiprocessor system!!\n" 112 "To enable mutlticore build define CONFIG_MP\n"); 113 #endif 114 volatile ccsr_pic_t *pic = (void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR); 115 printf("CPU%d: ", pic->whoami); 116 } else { 117 puts("CPU: "); 118 } 119 120 cpu = gd->arch.cpu; 121 122 puts(cpu->name); 123 if (IS_E_PROCESSOR(svr)) 124 puts("E"); 125 126 printf(", Version: %d.%d, (0x%08x)\n", major, minor, svr); 127 128 pvr = get_pvr(); 129 ver = PVR_VER(pvr); 130 major = PVR_MAJ(pvr); 131 minor = PVR_MIN(pvr); 132 133 printf("Core: "); 134 switch(ver) { 135 case PVR_VER_E500_V1: 136 case PVR_VER_E500_V2: 137 puts("e500"); 138 break; 139 case PVR_VER_E500MC: 140 puts("e500mc"); 141 break; 142 case PVR_VER_E5500: 143 puts("e5500"); 144 break; 145 case PVR_VER_E6500: 146 puts("e6500"); 147 break; 148 default: 149 puts("Unknown"); 150 break; 151 } 152 153 printf(", Version: %d.%d, (0x%08x)\n", major, minor, pvr); 154 155 if (nr_cores > CONFIG_MAX_CPUS) { 156 panic("\nUnexpected number of cores: %d, max is %d\n", 157 nr_cores, CONFIG_MAX_CPUS); 158 } 159 160 get_sys_info(&sysinfo); 161 162 #ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK 163 if (sysinfo.diff_sysclk == 1) 164 puts("Single Source Clock Configuration\n"); 165 #endif 166 167 puts("Clock Configuration:"); 168 for_each_cpu(i, core, nr_cores, mask) { 169 if (!(i & 3)) 170 printf ("\n "); 171 printf("CPU%d:%-4s MHz, ", core, 172 strmhz(buf1, sysinfo.freq_processor[core])); 173 } 174 175 #ifdef CONFIG_HETROGENOUS_CLUSTERS 176 for_each_cpu(j, dsp_core, dsp_numcores, dsp_mask) { 177 if (!(j & 3)) 178 printf("\n "); 179 printf("DSP CPU%d:%-4s MHz, ", j, 180 strmhz(buf1, sysinfo.freq_processor_dsp[dsp_core])); 181 } 182 #endif 183 184 printf("\n CCB:%-4s MHz,", strmhz(buf1, sysinfo.freq_systembus)); 185 printf("\n"); 186 187 #ifdef CONFIG_FSL_CORENET 188 if (ddr_sync == 1) { 189 printf(" DDR:%-4s MHz (%s MT/s data rate) " 190 "(Synchronous), ", 191 strmhz(buf1, sysinfo.freq_ddrbus/2), 192 strmhz(buf2, sysinfo.freq_ddrbus)); 193 } else { 194 printf(" DDR:%-4s MHz (%s MT/s data rate) " 195 "(Asynchronous), ", 196 strmhz(buf1, sysinfo.freq_ddrbus/2), 197 strmhz(buf2, sysinfo.freq_ddrbus)); 198 } 199 #else 200 switch (ddr_ratio) { 201 case 0x0: 202 printf(" DDR:%-4s MHz (%s MT/s data rate), ", 203 strmhz(buf1, sysinfo.freq_ddrbus/2), 204 strmhz(buf2, sysinfo.freq_ddrbus)); 205 break; 206 case 0x7: 207 printf(" DDR:%-4s MHz (%s MT/s data rate) " 208 "(Synchronous), ", 209 strmhz(buf1, sysinfo.freq_ddrbus/2), 210 strmhz(buf2, sysinfo.freq_ddrbus)); 211 break; 212 default: 213 printf(" DDR:%-4s MHz (%s MT/s data rate) " 214 "(Asynchronous), ", 215 strmhz(buf1, sysinfo.freq_ddrbus/2), 216 strmhz(buf2, sysinfo.freq_ddrbus)); 217 break; 218 } 219 #endif 220 221 #if defined(CONFIG_FSL_LBC) 222 if (sysinfo.freq_localbus > LCRR_CLKDIV) { 223 printf("LBC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus)); 224 } else { 225 printf("LBC: unknown (LCRR[CLKDIV] = 0x%02lx)\n", 226 sysinfo.freq_localbus); 227 } 228 #endif 229 230 #if defined(CONFIG_FSL_IFC) 231 printf("IFC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus)); 232 #endif 233 234 #ifdef CONFIG_CPM2 235 printf("CPM: %s MHz\n", strmhz(buf1, sysinfo.freq_systembus)); 236 #endif 237 238 #ifdef CONFIG_QE 239 printf(" QE:%-4s MHz\n", strmhz(buf1, sysinfo.freq_qe)); 240 #endif 241 242 #if defined(CONFIG_SYS_CPRI) 243 printf(" "); 244 printf("CPRI:%-4s MHz", strmhz(buf1, sysinfo.freq_cpri)); 245 #endif 246 247 #if defined(CONFIG_SYS_MAPLE) 248 printf("\n "); 249 printf("MAPLE:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple)); 250 printf("MAPLE-ULB:%-4s MHz, ", strmhz(buf1, sysinfo.freq_maple_ulb)); 251 printf("MAPLE-eTVPE:%-4s MHz\n", 252 strmhz(buf1, sysinfo.freq_maple_etvpe)); 253 #endif 254 255 #ifdef CONFIG_SYS_DPAA_FMAN 256 for (i = 0; i < CONFIG_SYS_NUM_FMAN; i++) { 257 printf(" FMAN%d: %s MHz\n", i + 1, 258 strmhz(buf1, sysinfo.freq_fman[i])); 259 } 260 #endif 261 262 #ifdef CONFIG_SYS_DPAA_QBMAN 263 printf(" QMAN: %s MHz\n", strmhz(buf1, sysinfo.freq_qman)); 264 #endif 265 266 #ifdef CONFIG_SYS_DPAA_PME 267 printf(" PME: %s MHz\n", strmhz(buf1, sysinfo.freq_pme)); 268 #endif 269 270 puts("L1: D-cache 32 KiB enabled\n I-cache 32 KiB enabled\n"); 271 272 #ifdef CONFIG_FSL_CORENET 273 /* Display the RCW, so that no one gets confused as to what RCW 274 * we're actually using for this boot. 275 */ 276 puts("Reset Configuration Word (RCW):"); 277 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 278 u32 rcw = in_be32(&gur->rcwsr[i]); 279 280 if ((i % 4) == 0) 281 printf("\n %08x:", i * 4); 282 printf(" %08x", rcw); 283 } 284 puts("\n"); 285 #endif 286 287 return 0; 288 } 289 290 291 /* ------------------------------------------------------------------------- */ 292 293 int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) 294 { 295 /* Everything after the first generation of PQ3 parts has RSTCR */ 296 #if defined(CONFIG_ARCH_MPC8540) || defined(CONFIG_ARCH_MPC8541) || \ 297 defined(CONFIG_ARCH_MPC8555) || defined(CONFIG_ARCH_MPC8560) 298 unsigned long val, msr; 299 300 /* 301 * Initiate hard reset in debug control register DBCR0 302 * Make sure MSR[DE] = 1. This only resets the core. 303 */ 304 msr = mfmsr (); 305 msr |= MSR_DE; 306 mtmsr (msr); 307 308 val = mfspr(DBCR0); 309 val |= 0x70000000; 310 mtspr(DBCR0,val); 311 #else 312 volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); 313 314 /* Attempt board-specific reset */ 315 board_reset(); 316 317 /* Next try asserting HRESET_REQ */ 318 out_be32(&gur->rstcr, 0x2); 319 udelay(100); 320 #endif 321 322 return 1; 323 } 324 325 326 /* 327 * Get timebase clock frequency 328 */ 329 #ifndef CONFIG_SYS_FSL_TBCLK_DIV 330 #define CONFIG_SYS_FSL_TBCLK_DIV 8 331 #endif 332 __weak unsigned long get_tbclk (void) 333 { 334 unsigned long tbclk_div = CONFIG_SYS_FSL_TBCLK_DIV; 335 336 return (gd->bus_clk + (tbclk_div >> 1)) / tbclk_div; 337 } 338 339 340 #if defined(CONFIG_WATCHDOG) 341 #define WATCHDOG_MASK (TCR_WP(63) | TCR_WRC(3) | TCR_WIE) 342 void 343 init_85xx_watchdog(void) 344 { 345 mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WATCHDOG_MASK) | 346 TCR_WP(CONFIG_WATCHDOG_PRESC) | TCR_WRC(CONFIG_WATCHDOG_RC)); 347 } 348 349 void 350 reset_85xx_watchdog(void) 351 { 352 /* 353 * Clear TSR(WIS) bit by writing 1 354 */ 355 mtspr(SPRN_TSR, TSR_WIS); 356 } 357 358 void 359 watchdog_reset(void) 360 { 361 int re_enable = disable_interrupts(); 362 363 reset_85xx_watchdog(); 364 if (re_enable) 365 enable_interrupts(); 366 } 367 #endif /* CONFIG_WATCHDOG */ 368 369 /* 370 * Initializes on-chip MMC controllers. 371 * to override, implement board_mmc_init() 372 */ 373 int cpu_mmc_init(bd_t *bis) 374 { 375 #ifdef CONFIG_FSL_ESDHC 376 return fsl_esdhc_mmc_init(bis); 377 #else 378 return 0; 379 #endif 380 } 381 382 /* 383 * Print out the state of various machine registers. 384 * Currently prints out LAWs, BR0/OR0 for LBC, CSPR/CSOR/Timing 385 * parameters for IFC and TLBs 386 */ 387 void mpc85xx_reginfo(void) 388 { 389 print_tlbcam(); 390 print_laws(); 391 #if defined(CONFIG_FSL_LBC) 392 print_lbc_regs(); 393 #endif 394 #ifdef CONFIG_FSL_IFC 395 print_ifc_regs(); 396 #endif 397 398 } 399 400 /* Common ddr init for non-corenet fsl 85xx platforms */ 401 #ifndef CONFIG_FSL_CORENET 402 #if (defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)) && \ 403 !defined(CONFIG_SYS_INIT_L2_ADDR) 404 phys_size_t initdram(int board_type) 405 { 406 #if defined(CONFIG_SPD_EEPROM) || defined(CONFIG_DDR_SPD) || \ 407 defined(CONFIG_ARCH_QEMU_E500) 408 return fsl_ddr_sdram_size(); 409 #else 410 return (phys_size_t)CONFIG_SYS_SDRAM_SIZE * 1024 * 1024; 411 #endif 412 } 413 #else /* CONFIG_SYS_RAMBOOT */ 414 phys_size_t initdram(int board_type) 415 { 416 phys_size_t dram_size = 0; 417 418 #if defined(CONFIG_SYS_FSL_ERRATUM_DDR_MSYNC_IN) 419 { 420 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); 421 unsigned int x = 10; 422 unsigned int i; 423 424 /* 425 * Work around to stabilize DDR DLL 426 */ 427 out_be32(&gur->ddrdllcr, 0x81000000); 428 asm("sync;isync;msync"); 429 udelay(200); 430 while (in_be32(&gur->ddrdllcr) != 0x81000100) { 431 setbits_be32(&gur->devdisr, 0x00010000); 432 for (i = 0; i < x; i++) 433 ; 434 clrbits_be32(&gur->devdisr, 0x00010000); 435 x++; 436 } 437 } 438 #endif 439 440 #if defined(CONFIG_SPD_EEPROM) || \ 441 defined(CONFIG_DDR_SPD) || \ 442 defined(CONFIG_SYS_DDR_RAW_TIMING) 443 dram_size = fsl_ddr_sdram(); 444 #else 445 dram_size = fixed_sdram(); 446 #endif 447 dram_size = setup_ddr_tlbs(dram_size / 0x100000); 448 dram_size *= 0x100000; 449 450 #if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER) 451 /* 452 * Initialize and enable DDR ECC. 453 */ 454 ddr_enable_ecc(dram_size); 455 #endif 456 457 #if defined(CONFIG_FSL_LBC) 458 /* Some boards also have sdram on the lbc */ 459 lbc_sdram_init(); 460 #endif 461 462 debug("DDR: "); 463 return dram_size; 464 } 465 #endif /* CONFIG_SYS_RAMBOOT */ 466 #endif 467 468 #if CONFIG_POST & CONFIG_SYS_POST_MEMORY 469 470 /* Board-specific functions defined in each board's ddr.c */ 471 void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd, 472 unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl); 473 void read_tlbcam_entry(int idx, u32 *valid, u32 *tsize, unsigned long *epn, 474 phys_addr_t *rpn); 475 unsigned int 476 setup_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg); 477 478 void clear_ddr_tlbs_phys(phys_addr_t p_addr, unsigned int memsize_in_meg); 479 480 static void dump_spd_ddr_reg(void) 481 { 482 int i, j, k, m; 483 u8 *p_8; 484 u32 *p_32; 485 struct ccsr_ddr __iomem *ddr[CONFIG_NUM_DDR_CONTROLLERS]; 486 generic_spd_eeprom_t 487 spd[CONFIG_NUM_DDR_CONTROLLERS][CONFIG_DIMM_SLOTS_PER_CTLR]; 488 489 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) 490 fsl_ddr_get_spd(spd[i], i, CONFIG_DIMM_SLOTS_PER_CTLR); 491 492 puts("SPD data of all dimms (zero value is omitted)...\n"); 493 puts("Byte (hex) "); 494 k = 1; 495 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) { 496 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) 497 printf("Dimm%d ", k++); 498 } 499 puts("\n"); 500 for (k = 0; k < sizeof(generic_spd_eeprom_t); k++) { 501 m = 0; 502 printf("%3d (0x%02x) ", k, k); 503 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) { 504 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { 505 p_8 = (u8 *) &spd[i][j]; 506 if (p_8[k]) { 507 printf("0x%02x ", p_8[k]); 508 m++; 509 } else 510 puts(" "); 511 } 512 } 513 if (m) 514 puts("\n"); 515 else 516 puts("\r"); 517 } 518 519 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) { 520 switch (i) { 521 case 0: 522 ddr[i] = (void *)CONFIG_SYS_FSL_DDR_ADDR; 523 break; 524 #if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_NUM_DDR_CONTROLLERS > 1) 525 case 1: 526 ddr[i] = (void *)CONFIG_SYS_FSL_DDR2_ADDR; 527 break; 528 #endif 529 #if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_NUM_DDR_CONTROLLERS > 2) 530 case 2: 531 ddr[i] = (void *)CONFIG_SYS_FSL_DDR3_ADDR; 532 break; 533 #endif 534 #if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_NUM_DDR_CONTROLLERS > 3) 535 case 3: 536 ddr[i] = (void *)CONFIG_SYS_FSL_DDR4_ADDR; 537 break; 538 #endif 539 default: 540 printf("%s unexpected controller number = %u\n", 541 __func__, i); 542 return; 543 } 544 } 545 printf("DDR registers dump for all controllers " 546 "(zero value is omitted)...\n"); 547 puts("Offset (hex) "); 548 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) 549 printf(" Base + 0x%04x", (u32)ddr[i] & 0xFFFF); 550 puts("\n"); 551 for (k = 0; k < sizeof(struct ccsr_ddr)/4; k++) { 552 m = 0; 553 printf("%6d (0x%04x)", k * 4, k * 4); 554 for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) { 555 p_32 = (u32 *) ddr[i]; 556 if (p_32[k]) { 557 printf(" 0x%08x", p_32[k]); 558 m++; 559 } else 560 puts(" "); 561 } 562 if (m) 563 puts("\n"); 564 else 565 puts("\r"); 566 } 567 puts("\n"); 568 } 569 570 /* invalid the TLBs for DDR and setup new ones to cover p_addr */ 571 static int reset_tlb(phys_addr_t p_addr, u32 size, phys_addr_t *phys_offset) 572 { 573 u32 vstart = CONFIG_SYS_DDR_SDRAM_BASE; 574 unsigned long epn; 575 u32 tsize, valid, ptr; 576 int ddr_esel; 577 578 clear_ddr_tlbs_phys(p_addr, size>>20); 579 580 /* Setup new tlb to cover the physical address */ 581 setup_ddr_tlbs_phys(p_addr, size>>20); 582 583 ptr = vstart; 584 ddr_esel = find_tlb_idx((void *)ptr, 1); 585 if (ddr_esel != -1) { 586 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, phys_offset); 587 } else { 588 printf("TLB error in function %s\n", __func__); 589 return -1; 590 } 591 592 return 0; 593 } 594 595 /* 596 * slide the testing window up to test another area 597 * for 32_bit system, the maximum testable memory is limited to 598 * CONFIG_MAX_MEM_MAPPED 599 */ 600 int arch_memory_test_advance(u32 *vstart, u32 *size, phys_addr_t *phys_offset) 601 { 602 phys_addr_t test_cap, p_addr; 603 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED); 604 605 #if !defined(CONFIG_PHYS_64BIT) || \ 606 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \ 607 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull) 608 test_cap = p_size; 609 #else 610 test_cap = gd->ram_size; 611 #endif 612 p_addr = (*vstart) + (*size) + (*phys_offset); 613 if (p_addr < test_cap - 1) { 614 p_size = min(test_cap - p_addr, CONFIG_MAX_MEM_MAPPED); 615 if (reset_tlb(p_addr, p_size, phys_offset) == -1) 616 return -1; 617 *vstart = CONFIG_SYS_DDR_SDRAM_BASE; 618 *size = (u32) p_size; 619 printf("Testing 0x%08llx - 0x%08llx\n", 620 (u64)(*vstart) + (*phys_offset), 621 (u64)(*vstart) + (*phys_offset) + (*size) - 1); 622 } else 623 return 1; 624 625 return 0; 626 } 627 628 /* initialization for testing area */ 629 int arch_memory_test_prepare(u32 *vstart, u32 *size, phys_addr_t *phys_offset) 630 { 631 phys_size_t p_size = min(gd->ram_size, CONFIG_MAX_MEM_MAPPED); 632 633 *vstart = CONFIG_SYS_DDR_SDRAM_BASE; 634 *size = (u32) p_size; /* CONFIG_MAX_MEM_MAPPED < 4G */ 635 *phys_offset = 0; 636 637 #if !defined(CONFIG_PHYS_64BIT) || \ 638 !defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS) || \ 639 (CONFIG_SYS_INIT_RAM_ADDR_PHYS < 0x100000000ull) 640 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { 641 puts("Cannot test more than "); 642 print_size(CONFIG_MAX_MEM_MAPPED, 643 " without proper 36BIT support.\n"); 644 } 645 #endif 646 printf("Testing 0x%08llx - 0x%08llx\n", 647 (u64)(*vstart) + (*phys_offset), 648 (u64)(*vstart) + (*phys_offset) + (*size) - 1); 649 650 return 0; 651 } 652 653 /* invalid TLBs for DDR and remap as normal after testing */ 654 int arch_memory_test_cleanup(u32 *vstart, u32 *size, phys_addr_t *phys_offset) 655 { 656 unsigned long epn; 657 u32 tsize, valid, ptr; 658 phys_addr_t rpn = 0; 659 int ddr_esel; 660 661 /* disable the TLBs for this testing */ 662 ptr = *vstart; 663 664 while (ptr < (*vstart) + (*size)) { 665 ddr_esel = find_tlb_idx((void *)ptr, 1); 666 if (ddr_esel != -1) { 667 read_tlbcam_entry(ddr_esel, &valid, &tsize, &epn, &rpn); 668 disable_tlb(ddr_esel); 669 } 670 ptr += TSIZE_TO_BYTES(tsize); 671 } 672 673 puts("Remap DDR "); 674 setup_ddr_tlbs(gd->ram_size>>20); 675 puts("\n"); 676 677 return 0; 678 } 679 680 void arch_memory_failure_handle(void) 681 { 682 dump_spd_ddr_reg(); 683 } 684 #endif 685