1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl_debug_server.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 27 DECLARE_GLOBAL_DATA_PTR; 28 29 static struct mm_region layerscape_mem_map[] = { 30 { 31 /* List terminator */ 32 0, 33 } 34 }; 35 struct mm_region *mem_map = layerscape_mem_map; 36 37 void cpu_name(char *name) 38 { 39 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 40 unsigned int i, svr, ver; 41 42 svr = gur_in32(&gur->svr); 43 ver = SVR_SOC_VER(svr); 44 45 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 46 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 47 strcpy(name, cpu_type_list[i].name); 48 49 if (IS_E_PROCESSOR(svr)) 50 strcat(name, "E"); 51 break; 52 } 53 54 if (i == ARRAY_SIZE(cpu_type_list)) 55 strcpy(name, "unknown"); 56 } 57 58 #ifndef CONFIG_SYS_DCACHE_OFF 59 static void set_pgtable_section(u64 *page_table, u64 index, u64 section, 60 u64 memory_type, u64 attribute) 61 { 62 u64 value; 63 64 value = section | PTE_TYPE_BLOCK | PTE_BLOCK_AF; 65 value |= PMD_ATTRINDX(memory_type); 66 value |= attribute; 67 page_table[index] = value; 68 } 69 70 static void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) 71 { 72 u64 value; 73 74 value = (u64)table_addr | PTE_TYPE_TABLE; 75 page_table[index] = value; 76 } 77 78 /* 79 * Set the block entries according to the information of the table. 80 */ 81 static int set_block_entry(const struct sys_mmu_table *list, 82 struct table_info *table) 83 { 84 u64 block_size = 0, block_shift = 0; 85 u64 block_addr, index; 86 int j; 87 88 if (table->entry_size == BLOCK_SIZE_L1) { 89 block_size = BLOCK_SIZE_L1; 90 block_shift = SECTION_SHIFT_L1; 91 } else if (table->entry_size == BLOCK_SIZE_L2) { 92 block_size = BLOCK_SIZE_L2; 93 block_shift = SECTION_SHIFT_L2; 94 } else { 95 return -EINVAL; 96 } 97 98 block_addr = list->phys_addr; 99 index = (list->virt_addr - table->table_base) >> block_shift; 100 101 for (j = 0; j < (list->size >> block_shift); j++) { 102 set_pgtable_section(table->ptr, 103 index, 104 block_addr, 105 list->memory_type, 106 list->attribute); 107 block_addr += block_size; 108 index++; 109 } 110 111 return 0; 112 } 113 114 /* 115 * Find the corresponding table entry for the list. 116 */ 117 static int find_table(const struct sys_mmu_table *list, 118 struct table_info *table, u64 *level0_table) 119 { 120 u64 index = 0, level = 0; 121 u64 *level_table = level0_table; 122 u64 temp_base = 0, block_size = 0, block_shift = 0; 123 124 while (level < 3) { 125 if (level == 0) { 126 block_size = BLOCK_SIZE_L0; 127 block_shift = SECTION_SHIFT_L0; 128 } else if (level == 1) { 129 block_size = BLOCK_SIZE_L1; 130 block_shift = SECTION_SHIFT_L1; 131 } else if (level == 2) { 132 block_size = BLOCK_SIZE_L2; 133 block_shift = SECTION_SHIFT_L2; 134 } 135 136 index = 0; 137 while (list->virt_addr >= temp_base) { 138 index++; 139 temp_base += block_size; 140 } 141 142 temp_base -= block_size; 143 144 if ((level_table[index - 1] & PTE_TYPE_MASK) == 145 PTE_TYPE_TABLE) { 146 level_table = (u64 *)(level_table[index - 1] & 147 ~PTE_TYPE_MASK); 148 level++; 149 continue; 150 } else { 151 if (level == 0) 152 return -EINVAL; 153 154 if ((list->phys_addr + list->size) > 155 (temp_base + block_size * NUM_OF_ENTRY)) 156 return -EINVAL; 157 158 /* 159 * Check the address and size of the list member is 160 * aligned with the block size. 161 */ 162 if (((list->phys_addr & (block_size - 1)) != 0) || 163 ((list->size & (block_size - 1)) != 0)) 164 return -EINVAL; 165 166 table->ptr = level_table; 167 table->table_base = temp_base - 168 ((index - 1) << block_shift); 169 table->entry_size = block_size; 170 171 return 0; 172 } 173 } 174 return -EINVAL; 175 } 176 177 /* 178 * To start MMU before DDR is available, we create MMU table in SRAM. 179 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 180 * levels of translation tables here to cover 40-bit address space. 181 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 182 * Level 0 IA[39], table address @0 183 * Level 1 IA[38:30], table address @0x1000, 0x2000 184 * Level 2 IA[29:21], table address @0x3000, 0x4000 185 * Address above 0x5000 is free for other purpose. 186 */ 187 static inline void early_mmu_setup(void) 188 { 189 unsigned int el, i; 190 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; 191 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); 192 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); 193 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); 194 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000); 195 196 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0}; 197 198 /* Invalidate all table entries */ 199 memset(level0_table, 0, 0x5000); 200 201 /* Fill in the table entries */ 202 set_pgtable_table(level0_table, 0, level1_table0); 203 set_pgtable_table(level0_table, 1, level1_table1); 204 set_pgtable_table(level1_table0, 0, level2_table0); 205 206 #ifdef CONFIG_FSL_LSCH3 207 set_pgtable_table(level1_table0, 208 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1, 209 level2_table1); 210 #elif defined(CONFIG_FSL_LSCH2) 211 set_pgtable_table(level1_table0, 1, level2_table1); 212 #endif 213 /* Find the table and fill in the block entries */ 214 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) { 215 if (find_table(&early_mmu_table[i], 216 &table, level0_table) == 0) { 217 /* 218 * If find_table() returns error, it cannot be dealt 219 * with here. Breakpoint can be added for debugging. 220 */ 221 set_block_entry(&early_mmu_table[i], &table); 222 /* 223 * If set_block_entry() returns error, it cannot be 224 * dealt with here too. 225 */ 226 } 227 } 228 229 el = current_el(); 230 231 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR, 232 MEMORY_ATTRIBUTES); 233 set_sctlr(get_sctlr() | CR_M); 234 } 235 236 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 237 /* 238 * Called from final mmu setup. The phys_addr is new, non-existing 239 * address. A new sub table is created @level2_table_secure to cover 240 * size of CONFIG_SYS_MEM_RESERVE_SECURE memory. 241 */ 242 static inline int final_secure_ddr(u64 *level0_table, 243 u64 *level2_table_secure, 244 phys_addr_t phys_addr) 245 { 246 int ret = -EINVAL; 247 struct table_info table = {}; 248 struct sys_mmu_table ddr_entry = { 249 0, 0, BLOCK_SIZE_L1, MT_NORMAL, 250 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 251 }; 252 u64 index; 253 254 /* Need to create a new table */ 255 ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 256 ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 257 ret = find_table(&ddr_entry, &table, level0_table); 258 if (ret) 259 return ret; 260 index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1; 261 set_pgtable_table(table.ptr, index, level2_table_secure); 262 table.ptr = level2_table_secure; 263 table.table_base = ddr_entry.virt_addr; 264 table.entry_size = BLOCK_SIZE_L2; 265 ret = set_block_entry(&ddr_entry, &table); 266 if (ret) { 267 printf("MMU error: could not fill non-secure ddr block entries\n"); 268 return ret; 269 } 270 ddr_entry.virt_addr = phys_addr; 271 ddr_entry.phys_addr = phys_addr; 272 ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE; 273 ddr_entry.attribute = PTE_BLOCK_OUTER_SHARE; 274 ret = find_table(&ddr_entry, &table, level0_table); 275 if (ret) { 276 printf("MMU error: could not find secure ddr table\n"); 277 return ret; 278 } 279 ret = set_block_entry(&ddr_entry, &table); 280 if (ret) 281 printf("MMU error: could not set secure ddr block entry\n"); 282 283 return ret; 284 } 285 #endif 286 287 /* 288 * The final tables look similar to early tables, but different in detail. 289 * These tables are in DRAM. Sub tables are added to enable cache for 290 * QBMan and OCRAM. 291 * 292 * Put the MMU table in secure memory if gd->secure_ram is valid. 293 * OCRAM will be not used for this purpose so gd->secure_ram can't be 0. 294 * 295 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB. 296 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB. 297 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB. 298 * 299 * For LSCH3: 300 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB. 301 * For LSCH2: 302 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB. 303 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB. 304 */ 305 static inline void final_mmu_setup(void) 306 { 307 unsigned int el = current_el(); 308 unsigned int i; 309 u64 *level0_table = (u64 *)gd->arch.tlb_addr; 310 u64 *level1_table0; 311 u64 *level1_table1; 312 u64 *level2_table0; 313 u64 *level2_table1; 314 #ifdef CONFIG_FSL_LSCH2 315 u64 *level2_table2; 316 #endif 317 struct table_info table = {NULL, 0, BLOCK_SIZE_L0}; 318 319 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 320 u64 *level2_table_secure; 321 322 if (el == 3) { 323 /* 324 * Only use gd->secure_ram if the address is recalculated 325 * Align to 4KB for MMU table 326 */ 327 if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) 328 level0_table = (u64 *)(gd->secure_ram & ~0xfff); 329 else 330 printf("MMU warning: gd->secure_ram is not maintained, disabled.\n"); 331 } 332 #endif 333 level1_table0 = level0_table + 512; 334 level1_table1 = level1_table0 + 512; 335 level2_table0 = level1_table1 + 512; 336 level2_table1 = level2_table0 + 512; 337 #ifdef CONFIG_FSL_LSCH2 338 level2_table2 = level2_table1 + 512; 339 #endif 340 table.ptr = level0_table; 341 342 /* Invalidate all table entries */ 343 memset(level0_table, 0, PGTABLE_SIZE); 344 345 /* Fill in the table entries */ 346 set_pgtable_table(level0_table, 0, level1_table0); 347 set_pgtable_table(level0_table, 1, level1_table1); 348 set_pgtable_table(level1_table0, 0, level2_table0); 349 #ifdef CONFIG_FSL_LSCH3 350 set_pgtable_table(level1_table0, 351 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 352 level2_table1); 353 #elif defined(CONFIG_FSL_LSCH2) 354 set_pgtable_table(level1_table0, 1, level2_table1); 355 set_pgtable_table(level1_table0, 356 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 357 level2_table2); 358 #endif 359 360 /* Find the table and fill in the block entries */ 361 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) { 362 if (find_table(&final_mmu_table[i], 363 &table, level0_table) == 0) { 364 if (set_block_entry(&final_mmu_table[i], 365 &table) != 0) { 366 printf("MMU error: could not set block entry for %p\n", 367 &final_mmu_table[i]); 368 } 369 370 } else { 371 printf("MMU error: could not find the table for %p\n", 372 &final_mmu_table[i]); 373 } 374 } 375 /* Set the secure memory to secure in MMU */ 376 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 377 if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 378 #ifdef CONFIG_FSL_LSCH3 379 level2_table_secure = level2_table1 + 512; 380 #elif defined(CONFIG_FSL_LSCH2) 381 level2_table_secure = level2_table2 + 512; 382 #endif 383 if (!final_secure_ddr(level0_table, 384 level2_table_secure, 385 gd->secure_ram & ~0x3)) { 386 gd->secure_ram |= MEM_RESERVE_SECURE_SECURED; 387 debug("Now MMU table is in secured memory at 0x%llx\n", 388 gd->secure_ram & ~0x3); 389 } else { 390 printf("MMU warning: Failed to secure DDR\n"); 391 } 392 } 393 #endif 394 395 /* flush new MMU table */ 396 flush_dcache_range((ulong)level0_table, 397 (ulong)level0_table + gd->arch.tlb_size); 398 399 #ifdef CONFIG_SYS_DPAA_FMAN 400 flush_dcache_all(); 401 #endif 402 /* point TTBR to the new table */ 403 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL, 404 MEMORY_ATTRIBUTES); 405 /* 406 * MMU is already enabled, just need to invalidate TLB to load the 407 * new table. The new table is compatible with the current table, if 408 * MMU somehow walks through the new table before invalidation TLB, 409 * it still works. So we don't need to turn off MMU here. 410 */ 411 } 412 413 u64 get_page_table_size(void) 414 { 415 return 0x10000; 416 } 417 418 int arch_cpu_init(void) 419 { 420 icache_enable(); 421 __asm_invalidate_dcache_all(); 422 __asm_invalidate_tlb_all(); 423 early_mmu_setup(); 424 set_sctlr(get_sctlr() | CR_C); 425 return 0; 426 } 427 428 /* 429 * This function is called from lib/board.c. 430 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. 431 * There is no need to disable d-cache for this operation. 432 */ 433 void enable_caches(void) 434 { 435 final_mmu_setup(); 436 __asm_invalidate_tlb_all(); 437 } 438 #endif 439 440 static inline u32 initiator_type(u32 cluster, int init_id) 441 { 442 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 443 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 444 u32 type = 0; 445 446 type = gur_in32(&gur->tp_ityp[idx]); 447 if (type & TP_ITYP_AV) 448 return type; 449 450 return 0; 451 } 452 453 u32 cpu_mask(void) 454 { 455 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 456 int i = 0, count = 0; 457 u32 cluster, type, mask = 0; 458 459 do { 460 int j; 461 462 cluster = gur_in32(&gur->tp_cluster[i].lower); 463 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 464 type = initiator_type(cluster, j); 465 if (type) { 466 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 467 mask |= 1 << count; 468 count++; 469 } 470 } 471 i++; 472 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 473 474 return mask; 475 } 476 477 /* 478 * Return the number of cores on this SOC. 479 */ 480 int cpu_numcores(void) 481 { 482 return hweight32(cpu_mask()); 483 } 484 485 int fsl_qoriq_core_to_cluster(unsigned int core) 486 { 487 struct ccsr_gur __iomem *gur = 488 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 489 int i = 0, count = 0; 490 u32 cluster; 491 492 do { 493 int j; 494 495 cluster = gur_in32(&gur->tp_cluster[i].lower); 496 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 497 if (initiator_type(cluster, j)) { 498 if (count == core) 499 return i; 500 count++; 501 } 502 } 503 i++; 504 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 505 506 return -1; /* cannot identify the cluster */ 507 } 508 509 u32 fsl_qoriq_core_to_type(unsigned int core) 510 { 511 struct ccsr_gur __iomem *gur = 512 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 513 int i = 0, count = 0; 514 u32 cluster, type; 515 516 do { 517 int j; 518 519 cluster = gur_in32(&gur->tp_cluster[i].lower); 520 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 521 type = initiator_type(cluster, j); 522 if (type) { 523 if (count == core) 524 return type; 525 count++; 526 } 527 } 528 i++; 529 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 530 531 return -1; /* cannot identify the cluster */ 532 } 533 534 #ifdef CONFIG_DISPLAY_CPUINFO 535 int print_cpuinfo(void) 536 { 537 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 538 struct sys_info sysinfo; 539 char buf[32]; 540 unsigned int i, core; 541 u32 type, rcw, svr = gur_in32(&gur->svr); 542 543 puts("SoC: "); 544 545 cpu_name(buf); 546 printf(" %s (0x%x)\n", buf, svr); 547 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 548 get_sys_info(&sysinfo); 549 puts("Clock Configuration:"); 550 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 551 if (!(i % 3)) 552 puts("\n "); 553 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 554 printf("CPU%d(%s):%-4s MHz ", core, 555 type == TY_ITYP_VER_A7 ? "A7 " : 556 (type == TY_ITYP_VER_A53 ? "A53" : 557 (type == TY_ITYP_VER_A57 ? "A57" : " ")), 558 strmhz(buf, sysinfo.freq_processor[core])); 559 } 560 printf("\n Bus: %-4s MHz ", 561 strmhz(buf, sysinfo.freq_systembus)); 562 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 563 #ifdef CONFIG_SYS_DPAA_FMAN 564 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 565 #endif 566 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 567 if (soc_has_dp_ddr()) { 568 printf(" DP-DDR: %-4s MT/s", 569 strmhz(buf, sysinfo.freq_ddrbus2)); 570 } 571 #endif 572 puts("\n"); 573 574 /* 575 * Display the RCW, so that no one gets confused as to what RCW 576 * we're actually using for this boot. 577 */ 578 puts("Reset Configuration Word (RCW):"); 579 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 580 rcw = gur_in32(&gur->rcwsr[i]); 581 if ((i % 4) == 0) 582 printf("\n %08x:", i * 4); 583 printf(" %08x", rcw); 584 } 585 puts("\n"); 586 587 return 0; 588 } 589 #endif 590 591 #ifdef CONFIG_FSL_ESDHC 592 int cpu_mmc_init(bd_t *bis) 593 { 594 return fsl_esdhc_mmc_init(bis); 595 } 596 #endif 597 598 int cpu_eth_init(bd_t *bis) 599 { 600 int error = 0; 601 602 #ifdef CONFIG_FSL_MC_ENET 603 error = fsl_mc_ldpaa_init(bis); 604 #endif 605 #ifdef CONFIG_FMAN_ENET 606 fm_standard_init(bis); 607 #endif 608 return error; 609 } 610 611 int arch_early_init_r(void) 612 { 613 #ifdef CONFIG_MP 614 int rv = 1; 615 #endif 616 617 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 618 erratum_a009635(); 619 #endif 620 621 #ifdef CONFIG_MP 622 rv = fsl_layerscape_wake_seconday_cores(); 623 if (rv) 624 printf("Did not wake secondary cores\n"); 625 #endif 626 627 #ifdef CONFIG_SYS_HAS_SERDES 628 fsl_serdes_init(); 629 #endif 630 #ifdef CONFIG_FMAN_ENET 631 fman_enet_init(); 632 #endif 633 return 0; 634 } 635 636 int timer_init(void) 637 { 638 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 639 #ifdef CONFIG_FSL_LSCH3 640 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 641 #endif 642 #ifdef COUNTER_FREQUENCY_REAL 643 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 644 645 /* Update with accurate clock frequency */ 646 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 647 #endif 648 649 #ifdef CONFIG_FSL_LSCH3 650 /* Enable timebase for all clusters. 651 * It is safe to do so even some clusters are not enabled. 652 */ 653 out_le32(cltbenr, 0xf); 654 #endif 655 656 /* Enable clock for timer 657 * This is a global setting. 658 */ 659 out_le32(cntcr, 0x1); 660 661 return 0; 662 } 663 664 void reset_cpu(ulong addr) 665 { 666 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 667 u32 val; 668 669 /* Raise RESET_REQ_B */ 670 val = scfg_in32(rstcr); 671 val |= 0x02; 672 scfg_out32(rstcr, val); 673 } 674 675 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 676 { 677 phys_size_t ram_top = ram_size; 678 679 #ifdef CONFIG_SYS_MEM_TOP_HIDE 680 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 681 #endif 682 /* Carve the Debug Server private DRAM block from the end of DRAM */ 683 #ifdef CONFIG_FSL_DEBUG_SERVER 684 ram_top -= debug_server_get_dram_block_size(); 685 #endif 686 687 /* Carve the MC private DRAM block from the end of DRAM */ 688 #ifdef CONFIG_FSL_MC_ENET 689 ram_top -= mc_get_dram_block_size(); 690 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 691 #endif 692 693 return ram_top; 694 } 695