1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl_debug_server.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 27 DECLARE_GLOBAL_DATA_PTR; 28 29 static struct mm_region layerscape_mem_map[] = { 30 { 31 /* List terminator */ 32 0, 33 } 34 }; 35 struct mm_region *mem_map = layerscape_mem_map; 36 37 void cpu_name(char *name) 38 { 39 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 40 unsigned int i, svr, ver; 41 42 svr = gur_in32(&gur->svr); 43 ver = SVR_SOC_VER(svr); 44 45 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 46 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 47 strcpy(name, cpu_type_list[i].name); 48 49 if (IS_E_PROCESSOR(svr)) 50 strcat(name, "E"); 51 break; 52 } 53 54 if (i == ARRAY_SIZE(cpu_type_list)) 55 strcpy(name, "unknown"); 56 } 57 58 #ifndef CONFIG_SYS_DCACHE_OFF 59 static void set_pgtable_section(u64 *page_table, u64 index, u64 section, 60 u64 memory_type, u64 attribute) 61 { 62 u64 value; 63 64 value = section | PTE_TYPE_BLOCK | PTE_BLOCK_AF; 65 value |= PMD_ATTRINDX(memory_type); 66 value |= attribute; 67 page_table[index] = value; 68 } 69 70 static void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) 71 { 72 u64 value; 73 74 value = (u64)table_addr | PTE_TYPE_TABLE; 75 page_table[index] = value; 76 } 77 78 /* 79 * Set the block entries according to the information of the table. 80 */ 81 static int set_block_entry(const struct sys_mmu_table *list, 82 struct table_info *table) 83 { 84 u64 block_size = 0, block_shift = 0; 85 u64 block_addr, index; 86 int j; 87 88 if (table->entry_size == BLOCK_SIZE_L1) { 89 block_size = BLOCK_SIZE_L1; 90 block_shift = SECTION_SHIFT_L1; 91 } else if (table->entry_size == BLOCK_SIZE_L2) { 92 block_size = BLOCK_SIZE_L2; 93 block_shift = SECTION_SHIFT_L2; 94 } else { 95 return -EINVAL; 96 } 97 98 block_addr = list->phys_addr; 99 index = (list->virt_addr - table->table_base) >> block_shift; 100 101 for (j = 0; j < (list->size >> block_shift); j++) { 102 set_pgtable_section(table->ptr, 103 index, 104 block_addr, 105 list->memory_type, 106 list->attribute); 107 block_addr += block_size; 108 index++; 109 } 110 111 return 0; 112 } 113 114 /* 115 * Find the corresponding table entry for the list. 116 */ 117 static int find_table(const struct sys_mmu_table *list, 118 struct table_info *table, u64 *level0_table) 119 { 120 u64 index = 0, level = 0; 121 u64 *level_table = level0_table; 122 u64 temp_base = 0, block_size = 0, block_shift = 0; 123 124 while (level < 3) { 125 if (level == 0) { 126 block_size = BLOCK_SIZE_L0; 127 block_shift = SECTION_SHIFT_L0; 128 } else if (level == 1) { 129 block_size = BLOCK_SIZE_L1; 130 block_shift = SECTION_SHIFT_L1; 131 } else if (level == 2) { 132 block_size = BLOCK_SIZE_L2; 133 block_shift = SECTION_SHIFT_L2; 134 } 135 136 index = 0; 137 while (list->virt_addr >= temp_base) { 138 index++; 139 temp_base += block_size; 140 } 141 142 temp_base -= block_size; 143 144 if ((level_table[index - 1] & PTE_TYPE_MASK) == 145 PTE_TYPE_TABLE) { 146 level_table = (u64 *)(level_table[index - 1] & 147 ~PTE_TYPE_MASK); 148 level++; 149 continue; 150 } else { 151 if (level == 0) 152 return -EINVAL; 153 154 if ((list->phys_addr + list->size) > 155 (temp_base + block_size * NUM_OF_ENTRY)) 156 return -EINVAL; 157 158 /* 159 * Check the address and size of the list member is 160 * aligned with the block size. 161 */ 162 if (((list->phys_addr & (block_size - 1)) != 0) || 163 ((list->size & (block_size - 1)) != 0)) 164 return -EINVAL; 165 166 table->ptr = level_table; 167 table->table_base = temp_base - 168 ((index - 1) << block_shift); 169 table->entry_size = block_size; 170 171 return 0; 172 } 173 } 174 return -EINVAL; 175 } 176 177 /* 178 * To start MMU before DDR is available, we create MMU table in SRAM. 179 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 180 * levels of translation tables here to cover 40-bit address space. 181 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 182 * Level 0 IA[39], table address @0 183 * Level 1 IA[38:30], table address @0x1000, 0x2000 184 * Level 2 IA[29:21], table address @0x3000, 0x4000 185 * Address above 0x5000 is free for other purpose. 186 */ 187 static inline void early_mmu_setup(void) 188 { 189 unsigned int el, i; 190 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; 191 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); 192 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); 193 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); 194 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000); 195 196 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0}; 197 198 /* Invalidate all table entries */ 199 memset(level0_table, 0, 0x5000); 200 201 /* Fill in the table entries */ 202 set_pgtable_table(level0_table, 0, level1_table0); 203 set_pgtable_table(level0_table, 1, level1_table1); 204 set_pgtable_table(level1_table0, 0, level2_table0); 205 206 #ifdef CONFIG_FSL_LSCH3 207 set_pgtable_table(level1_table0, 208 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1, 209 level2_table1); 210 #elif defined(CONFIG_FSL_LSCH2) 211 set_pgtable_table(level1_table0, 1, level2_table1); 212 #endif 213 /* Find the table and fill in the block entries */ 214 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) { 215 if (find_table(&early_mmu_table[i], 216 &table, level0_table) == 0) { 217 /* 218 * If find_table() returns error, it cannot be dealt 219 * with here. Breakpoint can be added for debugging. 220 */ 221 set_block_entry(&early_mmu_table[i], &table); 222 /* 223 * If set_block_entry() returns error, it cannot be 224 * dealt with here too. 225 */ 226 } 227 } 228 229 el = current_el(); 230 231 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR, 232 MEMORY_ATTRIBUTES); 233 set_sctlr(get_sctlr() | CR_M); 234 } 235 236 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 237 /* 238 * Called from final mmu setup. The phys_addr is new, non-existing 239 * address. A new sub table is created @level2_table_secure to cover 240 * size of CONFIG_SYS_MEM_RESERVE_SECURE memory. 241 */ 242 static inline int final_secure_ddr(u64 *level0_table, 243 u64 *level2_table_secure, 244 phys_addr_t phys_addr) 245 { 246 int ret = -EINVAL; 247 struct table_info table = {}; 248 struct sys_mmu_table ddr_entry = { 249 0, 0, BLOCK_SIZE_L1, MT_NORMAL, 250 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 251 }; 252 u64 index; 253 254 /* Need to create a new table */ 255 ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 256 ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 257 ret = find_table(&ddr_entry, &table, level0_table); 258 if (ret) 259 return ret; 260 index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1; 261 set_pgtable_table(table.ptr, index, level2_table_secure); 262 table.ptr = level2_table_secure; 263 table.table_base = ddr_entry.virt_addr; 264 table.entry_size = BLOCK_SIZE_L2; 265 ret = set_block_entry(&ddr_entry, &table); 266 if (ret) { 267 printf("MMU error: could not fill non-secure ddr block entries\n"); 268 return ret; 269 } 270 ddr_entry.virt_addr = phys_addr; 271 ddr_entry.phys_addr = phys_addr; 272 ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE; 273 ddr_entry.attribute = PTE_BLOCK_OUTER_SHARE; 274 ret = find_table(&ddr_entry, &table, level0_table); 275 if (ret) { 276 printf("MMU error: could not find secure ddr table\n"); 277 return ret; 278 } 279 ret = set_block_entry(&ddr_entry, &table); 280 if (ret) 281 printf("MMU error: could not set secure ddr block entry\n"); 282 283 return ret; 284 } 285 #endif 286 287 /* 288 * The final tables look similar to early tables, but different in detail. 289 * These tables are in DRAM. Sub tables are added to enable cache for 290 * QBMan and OCRAM. 291 * 292 * Put the MMU table in secure memory if gd->secure_ram is valid. 293 * OCRAM will be not used for this purpose so gd->secure_ram can't be 0. 294 * 295 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB. 296 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB. 297 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB. 298 * 299 * For LSCH3: 300 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB. 301 * For LSCH2: 302 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB. 303 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB. 304 */ 305 static inline void final_mmu_setup(void) 306 { 307 unsigned int el = current_el(); 308 unsigned int i; 309 u64 *level0_table = (u64 *)gd->arch.tlb_addr; 310 u64 *level1_table0; 311 u64 *level1_table1; 312 u64 *level2_table0; 313 u64 *level2_table1; 314 #ifdef CONFIG_FSL_LSCH2 315 u64 *level2_table2; 316 #endif 317 struct table_info table = {NULL, 0, BLOCK_SIZE_L0}; 318 319 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 320 u64 *level2_table_secure; 321 322 if (el == 3) { 323 /* 324 * Only use gd->secure_ram if the address is recalculated 325 * Align to 4KB for MMU table 326 */ 327 if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) 328 level0_table = (u64 *)(gd->secure_ram & ~0xfff); 329 else 330 printf("MMU warning: gd->secure_ram is not maintained, disabled.\n"); 331 } 332 #endif 333 level1_table0 = level0_table + 512; 334 level1_table1 = level1_table0 + 512; 335 level2_table0 = level1_table1 + 512; 336 level2_table1 = level2_table0 + 512; 337 #ifdef CONFIG_FSL_LSCH2 338 level2_table2 = level2_table1 + 512; 339 #endif 340 table.ptr = level0_table; 341 342 /* Invalidate all table entries */ 343 memset(level0_table, 0, PGTABLE_SIZE); 344 345 /* Fill in the table entries */ 346 set_pgtable_table(level0_table, 0, level1_table0); 347 set_pgtable_table(level0_table, 1, level1_table1); 348 set_pgtable_table(level1_table0, 0, level2_table0); 349 #ifdef CONFIG_FSL_LSCH3 350 set_pgtable_table(level1_table0, 351 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 352 level2_table1); 353 #elif defined(CONFIG_FSL_LSCH2) 354 set_pgtable_table(level1_table0, 1, level2_table1); 355 set_pgtable_table(level1_table0, 356 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 357 level2_table2); 358 #endif 359 360 /* Find the table and fill in the block entries */ 361 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) { 362 if (find_table(&final_mmu_table[i], 363 &table, level0_table) == 0) { 364 if (set_block_entry(&final_mmu_table[i], 365 &table) != 0) { 366 printf("MMU error: could not set block entry for %p\n", 367 &final_mmu_table[i]); 368 } 369 370 } else { 371 printf("MMU error: could not find the table for %p\n", 372 &final_mmu_table[i]); 373 } 374 } 375 /* Set the secure memory to secure in MMU */ 376 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 377 if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 378 #ifdef CONFIG_FSL_LSCH3 379 level2_table_secure = level2_table1 + 512; 380 #elif defined(CONFIG_FSL_LSCH2) 381 level2_table_secure = level2_table2 + 512; 382 #endif 383 if (!final_secure_ddr(level0_table, 384 level2_table_secure, 385 gd->secure_ram & ~0x3)) { 386 gd->secure_ram |= MEM_RESERVE_SECURE_SECURED; 387 debug("Now MMU table is in secured memory at 0x%llx\n", 388 gd->secure_ram & ~0x3); 389 } else { 390 printf("MMU warning: Failed to secure DDR\n"); 391 } 392 } 393 #endif 394 395 /* flush new MMU table */ 396 flush_dcache_range((ulong)level0_table, 397 (ulong)level0_table + gd->arch.tlb_size); 398 399 /* point TTBR to the new table */ 400 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL, 401 MEMORY_ATTRIBUTES); 402 /* 403 * MMU is already enabled, just need to invalidate TLB to load the 404 * new table. The new table is compatible with the current table, if 405 * MMU somehow walks through the new table before invalidation TLB, 406 * it still works. So we don't need to turn off MMU here. 407 */ 408 } 409 410 u64 get_page_table_size(void) 411 { 412 return 0x10000; 413 } 414 415 int arch_cpu_init(void) 416 { 417 icache_enable(); 418 __asm_invalidate_dcache_all(); 419 __asm_invalidate_tlb_all(); 420 early_mmu_setup(); 421 set_sctlr(get_sctlr() | CR_C); 422 return 0; 423 } 424 425 /* 426 * This function is called from lib/board.c. 427 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. 428 * There is no need to disable d-cache for this operation. 429 */ 430 void enable_caches(void) 431 { 432 final_mmu_setup(); 433 __asm_invalidate_tlb_all(); 434 } 435 #endif 436 437 static inline u32 initiator_type(u32 cluster, int init_id) 438 { 439 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 440 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 441 u32 type = 0; 442 443 type = gur_in32(&gur->tp_ityp[idx]); 444 if (type & TP_ITYP_AV) 445 return type; 446 447 return 0; 448 } 449 450 u32 cpu_mask(void) 451 { 452 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 453 int i = 0, count = 0; 454 u32 cluster, type, mask = 0; 455 456 do { 457 int j; 458 459 cluster = gur_in32(&gur->tp_cluster[i].lower); 460 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 461 type = initiator_type(cluster, j); 462 if (type) { 463 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 464 mask |= 1 << count; 465 count++; 466 } 467 } 468 i++; 469 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 470 471 return mask; 472 } 473 474 /* 475 * Return the number of cores on this SOC. 476 */ 477 int cpu_numcores(void) 478 { 479 return hweight32(cpu_mask()); 480 } 481 482 int fsl_qoriq_core_to_cluster(unsigned int core) 483 { 484 struct ccsr_gur __iomem *gur = 485 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 486 int i = 0, count = 0; 487 u32 cluster; 488 489 do { 490 int j; 491 492 cluster = gur_in32(&gur->tp_cluster[i].lower); 493 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 494 if (initiator_type(cluster, j)) { 495 if (count == core) 496 return i; 497 count++; 498 } 499 } 500 i++; 501 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 502 503 return -1; /* cannot identify the cluster */ 504 } 505 506 u32 fsl_qoriq_core_to_type(unsigned int core) 507 { 508 struct ccsr_gur __iomem *gur = 509 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 510 int i = 0, count = 0; 511 u32 cluster, type; 512 513 do { 514 int j; 515 516 cluster = gur_in32(&gur->tp_cluster[i].lower); 517 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 518 type = initiator_type(cluster, j); 519 if (type) { 520 if (count == core) 521 return type; 522 count++; 523 } 524 } 525 i++; 526 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 527 528 return -1; /* cannot identify the cluster */ 529 } 530 531 #ifdef CONFIG_DISPLAY_CPUINFO 532 int print_cpuinfo(void) 533 { 534 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 535 struct sys_info sysinfo; 536 char buf[32]; 537 unsigned int i, core; 538 u32 type, rcw, svr = gur_in32(&gur->svr); 539 540 puts("SoC: "); 541 542 cpu_name(buf); 543 printf(" %s (0x%x)\n", buf, svr); 544 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 545 get_sys_info(&sysinfo); 546 puts("Clock Configuration:"); 547 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 548 if (!(i % 3)) 549 puts("\n "); 550 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 551 printf("CPU%d(%s):%-4s MHz ", core, 552 type == TY_ITYP_VER_A7 ? "A7 " : 553 (type == TY_ITYP_VER_A53 ? "A53" : 554 (type == TY_ITYP_VER_A57 ? "A57" : " ")), 555 strmhz(buf, sysinfo.freq_processor[core])); 556 } 557 printf("\n Bus: %-4s MHz ", 558 strmhz(buf, sysinfo.freq_systembus)); 559 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 560 #ifdef CONFIG_SYS_DPAA_FMAN 561 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 562 #endif 563 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 564 if (soc_has_dp_ddr()) { 565 printf(" DP-DDR: %-4s MT/s", 566 strmhz(buf, sysinfo.freq_ddrbus2)); 567 } 568 #endif 569 puts("\n"); 570 571 /* 572 * Display the RCW, so that no one gets confused as to what RCW 573 * we're actually using for this boot. 574 */ 575 puts("Reset Configuration Word (RCW):"); 576 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 577 rcw = gur_in32(&gur->rcwsr[i]); 578 if ((i % 4) == 0) 579 printf("\n %08x:", i * 4); 580 printf(" %08x", rcw); 581 } 582 puts("\n"); 583 584 return 0; 585 } 586 #endif 587 588 #ifdef CONFIG_FSL_ESDHC 589 int cpu_mmc_init(bd_t *bis) 590 { 591 return fsl_esdhc_mmc_init(bis); 592 } 593 #endif 594 595 int cpu_eth_init(bd_t *bis) 596 { 597 int error = 0; 598 599 #ifdef CONFIG_FSL_MC_ENET 600 error = fsl_mc_ldpaa_init(bis); 601 #endif 602 #ifdef CONFIG_FMAN_ENET 603 fm_standard_init(bis); 604 #endif 605 return error; 606 } 607 608 int arch_early_init_r(void) 609 { 610 #ifdef CONFIG_MP 611 int rv = 1; 612 #endif 613 614 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 615 erratum_a009635(); 616 #endif 617 618 #ifdef CONFIG_MP 619 rv = fsl_layerscape_wake_seconday_cores(); 620 if (rv) 621 printf("Did not wake secondary cores\n"); 622 #endif 623 624 #ifdef CONFIG_SYS_HAS_SERDES 625 fsl_serdes_init(); 626 #endif 627 #ifdef CONFIG_FMAN_ENET 628 fman_enet_init(); 629 #endif 630 return 0; 631 } 632 633 int timer_init(void) 634 { 635 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 636 #ifdef CONFIG_FSL_LSCH3 637 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 638 #endif 639 #ifdef CONFIG_LS2080A 640 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 641 #endif 642 #ifdef COUNTER_FREQUENCY_REAL 643 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 644 645 /* Update with accurate clock frequency */ 646 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 647 #endif 648 649 #ifdef CONFIG_FSL_LSCH3 650 /* Enable timebase for all clusters. 651 * It is safe to do so even some clusters are not enabled. 652 */ 653 out_le32(cltbenr, 0xf); 654 #endif 655 656 #ifdef CONFIG_LS2080A 657 /* 658 * In certain Layerscape SoCs, the clock for each core's 659 * has an enable bit in the PMU Physical Core Time Base Enable 660 * Register (PCTBENR), which allows the watchdog to operate. 661 */ 662 setbits_le32(pctbenr, 0xff); 663 #endif 664 665 /* Enable clock for timer 666 * This is a global setting. 667 */ 668 out_le32(cntcr, 0x1); 669 670 return 0; 671 } 672 673 void reset_cpu(ulong addr) 674 { 675 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 676 u32 val; 677 678 /* Raise RESET_REQ_B */ 679 val = scfg_in32(rstcr); 680 val |= 0x02; 681 scfg_out32(rstcr, val); 682 } 683 684 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 685 { 686 phys_size_t ram_top = ram_size; 687 688 #ifdef CONFIG_SYS_MEM_TOP_HIDE 689 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 690 #endif 691 /* Carve the Debug Server private DRAM block from the end of DRAM */ 692 #ifdef CONFIG_FSL_DEBUG_SERVER 693 ram_top -= debug_server_get_dram_block_size(); 694 #endif 695 696 /* Carve the MC private DRAM block from the end of DRAM */ 697 #ifdef CONFIG_FSL_MC_ENET 698 ram_top -= mc_get_dram_block_size(); 699 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 700 #endif 701 702 return ram_top; 703 } 704