1 /* 2 * Based on arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/export.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/init.h> 24 #include <linux/libfdt.h> 25 #include <linux/mman.h> 26 #include <linux/nodemask.h> 27 #include <linux/memblock.h> 28 #include <linux/fs.h> 29 #include <linux/io.h> 30 #include <linux/slab.h> 31 #include <linux/stop_machine.h> 32 33 #include <asm/cputype.h> 34 #include <asm/fixmap.h> 35 #include <asm/kernel-pgtable.h> 36 #include <asm/sections.h> 37 #include <asm/setup.h> 38 #include <asm/sizes.h> 39 #include <asm/tlb.h> 40 #include <asm/memblock.h> 41 #include <asm/mmu_context.h> 42 43 #include "mm.h" 44 45 u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 46 47 /* 48 * Empty_zero_page is a special page that is used for zero-initialized data 49 * and COW. 50 */ 51 struct page *empty_zero_page; 52 EXPORT_SYMBOL(empty_zero_page); 53 54 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 55 unsigned long size, pgprot_t vma_prot) 56 { 57 if (!pfn_valid(pfn)) 58 return pgprot_noncached(vma_prot); 59 else if (file->f_flags & O_SYNC) 60 return pgprot_writecombine(vma_prot); 61 return vma_prot; 62 } 63 EXPORT_SYMBOL(phys_mem_access_prot); 64 65 static void __init *early_alloc(unsigned long sz) 66 { 67 phys_addr_t phys; 68 void *ptr; 69 70 phys = memblock_alloc(sz, sz); 71 BUG_ON(!phys); 72 ptr = __va(phys); 73 memset(ptr, 0, sz); 74 return ptr; 75 } 76 77 /* 78 * remap a PMD into pages 79 */ 80 static void split_pmd(pmd_t *pmd, pte_t *pte) 81 { 82 unsigned long pfn = pmd_pfn(*pmd); 83 int i = 0; 84 85 do { 86 /* 87 * Need to have the least restrictive permissions available 88 * permissions will be fixed up later 89 */ 90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); 91 pfn++; 92 } while (pte++, i++, i < PTRS_PER_PTE); 93 } 94 95 static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 96 unsigned long end, unsigned long pfn, 97 pgprot_t prot, 98 void *(*alloc)(unsigned long size)) 99 { 100 pte_t *pte; 101 102 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); 104 if (pmd_sect(*pmd)) 105 split_pmd(pmd, pte); 106 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); 107 flush_tlb_all(); 108 } 109 BUG_ON(pmd_bad(*pmd)); 110 111 pte = pte_offset_kernel(pmd, addr); 112 do { 113 set_pte(pte, pfn_pte(pfn, prot)); 114 pfn++; 115 } while (pte++, addr += PAGE_SIZE, addr != end); 116 } 117 118 static void split_pud(pud_t *old_pud, pmd_t *pmd) 119 { 120 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; 121 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); 122 int i = 0; 123 124 do { 125 set_pmd(pmd, __pmd(addr | pgprot_val(prot))); 126 addr += PMD_SIZE; 127 } while (pmd++, i++, i < PTRS_PER_PMD); 128 } 129 130 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud, 131 unsigned long addr, unsigned long end, 132 phys_addr_t phys, pgprot_t prot, 133 void *(*alloc)(unsigned long size)) 134 { 135 pmd_t *pmd; 136 unsigned long next; 137 138 /* 139 * Check for initial section mappings in the pgd/pud and remove them. 140 */ 141 if (pud_none(*pud) || pud_sect(*pud)) { 142 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t)); 143 if (pud_sect(*pud)) { 144 /* 145 * need to have the 1G of mappings continue to be 146 * present 147 */ 148 split_pud(pud, pmd); 149 } 150 pud_populate(mm, pud, pmd); 151 flush_tlb_all(); 152 } 153 BUG_ON(pud_bad(*pud)); 154 155 pmd = pmd_offset(pud, addr); 156 do { 157 next = pmd_addr_end(addr, end); 158 /* try section mapping first */ 159 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 160 pmd_t old_pmd =*pmd; 161 set_pmd(pmd, __pmd(phys | 162 pgprot_val(mk_sect_prot(prot)))); 163 /* 164 * Check for previous table entries created during 165 * boot (__create_page_tables) and flush them. 166 */ 167 if (!pmd_none(old_pmd)) { 168 flush_tlb_all(); 169 if (pmd_table(old_pmd)) { 170 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0)); 171 if (!WARN_ON_ONCE(slab_is_available())) 172 memblock_free(table, PAGE_SIZE); 173 } 174 } 175 } else { 176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 177 prot, alloc); 178 } 179 phys += next - addr; 180 } while (pmd++, addr = next, addr != end); 181 } 182 183 static inline bool use_1G_block(unsigned long addr, unsigned long next, 184 unsigned long phys) 185 { 186 if (PAGE_SHIFT != 12) 187 return false; 188 189 if (((addr | next | phys) & ~PUD_MASK) != 0) 190 return false; 191 192 return true; 193 } 194 195 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd, 196 unsigned long addr, unsigned long end, 197 phys_addr_t phys, pgprot_t prot, 198 void *(*alloc)(unsigned long size)) 199 { 200 pud_t *pud; 201 unsigned long next; 202 203 if (pgd_none(*pgd)) { 204 pud = alloc(PTRS_PER_PUD * sizeof(pud_t)); 205 pgd_populate(mm, pgd, pud); 206 } 207 BUG_ON(pgd_bad(*pgd)); 208 209 pud = pud_offset(pgd, addr); 210 do { 211 next = pud_addr_end(addr, end); 212 213 /* 214 * For 4K granule only, attempt to put down a 1GB block 215 */ 216 if (use_1G_block(addr, next, phys)) { 217 pud_t old_pud = *pud; 218 set_pud(pud, __pud(phys | 219 pgprot_val(mk_sect_prot(prot)))); 220 221 /* 222 * If we have an old value for a pud, it will 223 * be pointing to a pmd table that we no longer 224 * need (from swapper_pg_dir). 225 * 226 * Look up the old pmd table and free it. 227 */ 228 if (!pud_none(old_pud)) { 229 flush_tlb_all(); 230 if (pud_table(old_pud)) { 231 phys_addr_t table = __pa(pmd_offset(&old_pud, 0)); 232 if (!WARN_ON_ONCE(slab_is_available())) 233 memblock_free(table, PAGE_SIZE); 234 } 235 } 236 } else { 237 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc); 238 } 239 phys += next - addr; 240 } while (pud++, addr = next, addr != end); 241 } 242 243 /* 244 * Create the page directory entries and any necessary page tables for the 245 * mapping specified by 'md'. 246 */ 247 static void __create_mapping(struct mm_struct *mm, pgd_t *pgd, 248 phys_addr_t phys, unsigned long virt, 249 phys_addr_t size, pgprot_t prot, 250 void *(*alloc)(unsigned long size)) 251 { 252 unsigned long addr, length, end, next; 253 254 /* 255 * If the virtual and physical address don't have the same offset 256 * within a page, we cannot map the region as the caller expects. 257 */ 258 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 259 return; 260 261 phys &= PAGE_MASK; 262 addr = virt & PAGE_MASK; 263 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 264 265 end = addr + length; 266 do { 267 next = pgd_addr_end(addr, end); 268 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc); 269 phys += next - addr; 270 } while (pgd++, addr = next, addr != end); 271 } 272 273 static void *late_alloc(unsigned long size) 274 { 275 void *ptr; 276 277 BUG_ON(size > PAGE_SIZE); 278 ptr = (void *)__get_free_page(PGALLOC_GFP); 279 BUG_ON(!ptr); 280 return ptr; 281 } 282 283 static void __init create_mapping(phys_addr_t phys, unsigned long virt, 284 phys_addr_t size, pgprot_t prot) 285 { 286 if (virt < VMALLOC_START) { 287 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 288 &phys, virt); 289 return; 290 } 291 __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt, 292 size, prot, early_alloc); 293 } 294 295 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 296 unsigned long virt, phys_addr_t size, 297 pgprot_t prot) 298 { 299 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot, 300 late_alloc); 301 } 302 303 static void create_mapping_late(phys_addr_t phys, unsigned long virt, 304 phys_addr_t size, pgprot_t prot) 305 { 306 if (virt < VMALLOC_START) { 307 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 308 &phys, virt); 309 return; 310 } 311 312 return __create_mapping(&init_mm, pgd_offset_k(virt), 313 phys, virt, size, prot, late_alloc); 314 } 315 316 #ifdef CONFIG_DEBUG_RODATA 317 static void __init __map_memblock(phys_addr_t start, phys_addr_t end) 318 { 319 /* 320 * Set up the executable regions using the existing section mappings 321 * for now. This will get more fine grained later once all memory 322 * is mapped 323 */ 324 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); 325 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); 326 327 if (end < kernel_x_start) { 328 create_mapping(start, __phys_to_virt(start), 329 end - start, PAGE_KERNEL); 330 } else if (start >= kernel_x_end) { 331 create_mapping(start, __phys_to_virt(start), 332 end - start, PAGE_KERNEL); 333 } else { 334 if (start < kernel_x_start) 335 create_mapping(start, __phys_to_virt(start), 336 kernel_x_start - start, 337 PAGE_KERNEL); 338 create_mapping(kernel_x_start, 339 __phys_to_virt(kernel_x_start), 340 kernel_x_end - kernel_x_start, 341 PAGE_KERNEL_EXEC); 342 if (kernel_x_end < end) 343 create_mapping(kernel_x_end, 344 __phys_to_virt(kernel_x_end), 345 end - kernel_x_end, 346 PAGE_KERNEL); 347 } 348 349 } 350 #else 351 static void __init __map_memblock(phys_addr_t start, phys_addr_t end) 352 { 353 create_mapping(start, __phys_to_virt(start), end - start, 354 PAGE_KERNEL_EXEC); 355 } 356 #endif 357 358 static void __init map_mem(void) 359 { 360 struct memblock_region *reg; 361 phys_addr_t limit; 362 363 /* 364 * Temporarily limit the memblock range. We need to do this as 365 * create_mapping requires puds, pmds and ptes to be allocated from 366 * memory addressable from the initial direct kernel mapping. 367 * 368 * The initial direct kernel mapping, located at swapper_pg_dir, gives 369 * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps, 370 * memory starting from PHYS_OFFSET (which must be aligned to 2MB as 371 * per Documentation/arm64/booting.txt). 372 */ 373 limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE; 374 memblock_set_current_limit(limit); 375 376 /* map all the memory banks */ 377 for_each_memblock(memory, reg) { 378 phys_addr_t start = reg->base; 379 phys_addr_t end = start + reg->size; 380 381 if (start >= end) 382 break; 383 if (memblock_is_nomap(reg)) 384 continue; 385 386 if (ARM64_SWAPPER_USES_SECTION_MAPS) { 387 /* 388 * For the first memory bank align the start address and 389 * current memblock limit to prevent create_mapping() from 390 * allocating pte page tables from unmapped memory. With 391 * the section maps, if the first block doesn't end on section 392 * size boundary, create_mapping() will try to allocate a pte 393 * page, which may be returned from an unmapped area. 394 * When section maps are not used, the pte page table for the 395 * current limit is already present in swapper_pg_dir. 396 */ 397 if (start < limit) 398 start = ALIGN(start, SECTION_SIZE); 399 if (end < limit) { 400 limit = end & SECTION_MASK; 401 memblock_set_current_limit(limit); 402 } 403 } 404 __map_memblock(start, end); 405 } 406 407 /* Limit no longer required. */ 408 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 409 } 410 411 static void __init fixup_executable(void) 412 { 413 #ifdef CONFIG_DEBUG_RODATA 414 /* now that we are actually fully mapped, make the start/end more fine grained */ 415 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { 416 unsigned long aligned_start = round_down(__pa(_stext), 417 SWAPPER_BLOCK_SIZE); 418 419 create_mapping(aligned_start, __phys_to_virt(aligned_start), 420 __pa(_stext) - aligned_start, 421 PAGE_KERNEL); 422 } 423 424 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { 425 unsigned long aligned_end = round_up(__pa(__init_end), 426 SWAPPER_BLOCK_SIZE); 427 create_mapping(__pa(__init_end), (unsigned long)__init_end, 428 aligned_end - __pa(__init_end), 429 PAGE_KERNEL); 430 } 431 #endif 432 } 433 434 #ifdef CONFIG_DEBUG_RODATA 435 void mark_rodata_ro(void) 436 { 437 create_mapping_late(__pa(_stext), (unsigned long)_stext, 438 (unsigned long)_etext - (unsigned long)_stext, 439 PAGE_KERNEL_ROX); 440 441 } 442 #endif 443 444 void fixup_init(void) 445 { 446 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin, 447 (unsigned long)__init_end - (unsigned long)__init_begin, 448 PAGE_KERNEL); 449 } 450 451 /* 452 * paging_init() sets up the page tables, initialises the zone memory 453 * maps and sets up the zero page. 454 */ 455 void __init paging_init(void) 456 { 457 void *zero_page; 458 459 map_mem(); 460 fixup_executable(); 461 462 /* allocate the zero page. */ 463 zero_page = early_alloc(PAGE_SIZE); 464 465 bootmem_init(); 466 467 empty_zero_page = virt_to_page(zero_page); 468 469 /* Ensure the zero page is visible to the page table walker */ 470 dsb(ishst); 471 472 /* 473 * TTBR0 is only used for the identity mapping at this stage. Make it 474 * point to zero page to avoid speculatively fetching new entries. 475 */ 476 cpu_set_reserved_ttbr0(); 477 local_flush_tlb_all(); 478 cpu_set_default_tcr_t0sz(); 479 } 480 481 /* 482 * Check whether a kernel address is valid (derived from arch/x86/). 483 */ 484 int kern_addr_valid(unsigned long addr) 485 { 486 pgd_t *pgd; 487 pud_t *pud; 488 pmd_t *pmd; 489 pte_t *pte; 490 491 if ((((long)addr) >> VA_BITS) != -1UL) 492 return 0; 493 494 pgd = pgd_offset_k(addr); 495 if (pgd_none(*pgd)) 496 return 0; 497 498 pud = pud_offset(pgd, addr); 499 if (pud_none(*pud)) 500 return 0; 501 502 if (pud_sect(*pud)) 503 return pfn_valid(pud_pfn(*pud)); 504 505 pmd = pmd_offset(pud, addr); 506 if (pmd_none(*pmd)) 507 return 0; 508 509 if (pmd_sect(*pmd)) 510 return pfn_valid(pmd_pfn(*pmd)); 511 512 pte = pte_offset_kernel(pmd, addr); 513 if (pte_none(*pte)) 514 return 0; 515 516 return pfn_valid(pte_pfn(*pte)); 517 } 518 #ifdef CONFIG_SPARSEMEM_VMEMMAP 519 #if !ARM64_SWAPPER_USES_SECTION_MAPS 520 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 521 { 522 return vmemmap_populate_basepages(start, end, node); 523 } 524 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 525 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 526 { 527 unsigned long addr = start; 528 unsigned long next; 529 pgd_t *pgd; 530 pud_t *pud; 531 pmd_t *pmd; 532 533 do { 534 next = pmd_addr_end(addr, end); 535 536 pgd = vmemmap_pgd_populate(addr, node); 537 if (!pgd) 538 return -ENOMEM; 539 540 pud = vmemmap_pud_populate(pgd, addr, node); 541 if (!pud) 542 return -ENOMEM; 543 544 pmd = pmd_offset(pud, addr); 545 if (pmd_none(*pmd)) { 546 void *p = NULL; 547 548 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 549 if (!p) 550 return -ENOMEM; 551 552 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); 553 } else 554 vmemmap_verify((pte_t *)pmd, node, addr, next); 555 } while (addr = next, addr != end); 556 557 return 0; 558 } 559 #endif /* CONFIG_ARM64_64K_PAGES */ 560 void vmemmap_free(unsigned long start, unsigned long end) 561 { 562 } 563 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 564 565 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 566 #if CONFIG_PGTABLE_LEVELS > 2 567 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; 568 #endif 569 #if CONFIG_PGTABLE_LEVELS > 3 570 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; 571 #endif 572 573 static inline pud_t * fixmap_pud(unsigned long addr) 574 { 575 pgd_t *pgd = pgd_offset_k(addr); 576 577 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 578 579 return pud_offset(pgd, addr); 580 } 581 582 static inline pmd_t * fixmap_pmd(unsigned long addr) 583 { 584 pud_t *pud = fixmap_pud(addr); 585 586 BUG_ON(pud_none(*pud) || pud_bad(*pud)); 587 588 return pmd_offset(pud, addr); 589 } 590 591 static inline pte_t * fixmap_pte(unsigned long addr) 592 { 593 pmd_t *pmd = fixmap_pmd(addr); 594 595 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); 596 597 return pte_offset_kernel(pmd, addr); 598 } 599 600 void __init early_fixmap_init(void) 601 { 602 pgd_t *pgd; 603 pud_t *pud; 604 pmd_t *pmd; 605 unsigned long addr = FIXADDR_START; 606 607 pgd = pgd_offset_k(addr); 608 pgd_populate(&init_mm, pgd, bm_pud); 609 pud = pud_offset(pgd, addr); 610 pud_populate(&init_mm, pud, bm_pmd); 611 pmd = pmd_offset(pud, addr); 612 pmd_populate_kernel(&init_mm, pmd, bm_pte); 613 614 /* 615 * The boot-ioremap range spans multiple pmds, for which 616 * we are not preparted: 617 */ 618 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 619 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 620 621 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 622 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 623 WARN_ON(1); 624 pr_warn("pmd %p != %p, %p\n", 625 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 626 fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 627 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 628 fix_to_virt(FIX_BTMAP_BEGIN)); 629 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 630 fix_to_virt(FIX_BTMAP_END)); 631 632 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 633 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 634 } 635 } 636 637 void __set_fixmap(enum fixed_addresses idx, 638 phys_addr_t phys, pgprot_t flags) 639 { 640 unsigned long addr = __fix_to_virt(idx); 641 pte_t *pte; 642 643 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 644 645 pte = fixmap_pte(addr); 646 647 if (pgprot_val(flags)) { 648 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 649 } else { 650 pte_clear(&init_mm, addr, pte); 651 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 652 } 653 } 654 655 void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 656 { 657 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 658 pgprot_t prot = PAGE_KERNEL_RO; 659 int size, offset; 660 void *dt_virt; 661 662 /* 663 * Check whether the physical FDT address is set and meets the minimum 664 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 665 * at least 8 bytes so that we can always access the size field of the 666 * FDT header after mapping the first chunk, double check here if that 667 * is indeed the case. 668 */ 669 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 670 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 671 return NULL; 672 673 /* 674 * Make sure that the FDT region can be mapped without the need to 675 * allocate additional translation table pages, so that it is safe 676 * to call create_mapping() this early. 677 * 678 * On 64k pages, the FDT will be mapped using PTEs, so we need to 679 * be in the same PMD as the rest of the fixmap. 680 * On 4k pages, we'll use section mappings for the FDT so we only 681 * have to be in the same PUD. 682 */ 683 BUILD_BUG_ON(dt_virt_base % SZ_2M); 684 685 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 686 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 687 688 offset = dt_phys % SWAPPER_BLOCK_SIZE; 689 dt_virt = (void *)dt_virt_base + offset; 690 691 /* map the first chunk so we can read the size from the header */ 692 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 693 SWAPPER_BLOCK_SIZE, prot); 694 695 if (fdt_check_header(dt_virt) != 0) 696 return NULL; 697 698 size = fdt_totalsize(dt_virt); 699 if (size > MAX_FDT_SIZE) 700 return NULL; 701 702 if (offset + size > SWAPPER_BLOCK_SIZE) 703 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 704 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); 705 706 memblock_reserve(dt_phys, size); 707 708 return dt_virt; 709 } 710