init.c (598ee698d9db7beb06e12f4ea9d9a5fbc03a3a77) | init.c (95f72d1ed41a66f1c1c29c24d479de81a0bea36f) |
---|---|
1/* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2010 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10#include <linux/mm.h> 11#include <linux/swap.h> 12#include <linux/init.h> 13#include <linux/gfp.h> 14#include <linux/bootmem.h> 15#include <linux/proc_fs.h> 16#include <linux/pagemap.h> 17#include <linux/percpu.h> 18#include <linux/io.h> | 1/* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2010 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10#include <linux/mm.h> 11#include <linux/swap.h> 12#include <linux/init.h> 13#include <linux/gfp.h> 14#include <linux/bootmem.h> 15#include <linux/proc_fs.h> 16#include <linux/pagemap.h> 17#include <linux/percpu.h> 18#include <linux/io.h> |
19#include <linux/lmb.h> | 19#include <linux/memblock.h> |
20#include <linux/dma-mapping.h> 21#include <asm/mmu_context.h> 22#include <asm/mmzone.h> 23#include <asm/kexec.h> 24#include <asm/tlb.h> 25#include <asm/cacheflush.h> 26#include <asm/sections.h> 27#include <asm/setup.h> 28#include <asm/cache.h> 29#include <asm/sizes.h> 30 31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34void __init generic_mem_init(void) 35{ | 20#include <linux/dma-mapping.h> 21#include <asm/mmu_context.h> 22#include <asm/mmzone.h> 23#include <asm/kexec.h> 24#include <asm/tlb.h> 25#include <asm/cacheflush.h> 26#include <asm/sections.h> 27#include <asm/setup.h> 28#include <asm/cache.h> 29#include <asm/sizes.h> 30 31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 33 34void __init generic_mem_init(void) 35{ |
36 lmb_add(__MEMORY_START, __MEMORY_SIZE); | 36 memblock_add(__MEMORY_START, __MEMORY_SIZE); |
37} 38 39void __init __weak plat_mem_setup(void) 40{ 41 /* Nothing to see here, move along. */ 42} 43 44#ifdef CONFIG_MMU 45static pte_t *__get_pte_phys(unsigned long addr) 46{ 47 pgd_t *pgd; 48 pud_t *pud; 49 pmd_t *pmd; | 37} 38 39void __init __weak plat_mem_setup(void) 40{ 41 /* Nothing to see here, move along. */ 42} 43 44#ifdef CONFIG_MMU 45static pte_t *__get_pte_phys(unsigned long addr) 46{ 47 pgd_t *pgd; 48 pud_t *pud; 49 pmd_t *pmd; |
50 pte_t *pte; |
|
50 51 pgd = pgd_offset_k(addr); 52 if (pgd_none(*pgd)) { 53 pgd_ERROR(*pgd); 54 return NULL; 55 } 56 57 pud = pud_alloc(NULL, pgd, addr); 58 if (unlikely(!pud)) { 59 pud_ERROR(*pud); 60 return NULL; 61 } 62 63 pmd = pmd_alloc(NULL, pud, addr); 64 if (unlikely(!pmd)) { 65 pmd_ERROR(*pmd); 66 return NULL; 67 } 68 | 51 52 pgd = pgd_offset_k(addr); 53 if (pgd_none(*pgd)) { 54 pgd_ERROR(*pgd); 55 return NULL; 56 } 57 58 pud = pud_alloc(NULL, pgd, addr); 59 if (unlikely(!pud)) { 60 pud_ERROR(*pud); 61 return NULL; 62 } 63 64 pmd = pmd_alloc(NULL, pud, addr); 65 if (unlikely(!pmd)) { 66 pmd_ERROR(*pmd); 67 return NULL; 68 } 69 |
69 return pte_offset_kernel(pmd, addr); | 70 pte = pte_offset_kernel(pmd, addr); 71 return pte; |
70} 71 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 73{ 74 pte_t *pte; 75 76 pte = __get_pte_phys(addr); 77 if (!pte_none(*pte)) { --- 40 unchanged lines hidden (view full) --- 118 if (idx >= __end_of_fixed_addresses) { 119 BUG(); 120 return; 121 } 122 123 clear_pte_phys(address, prot); 124} 125 | 72} 73 74static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 75{ 76 pte_t *pte; 77 78 pte = __get_pte_phys(addr); 79 if (!pte_none(*pte)) { --- 40 unchanged lines hidden (view full) --- 120 if (idx >= __end_of_fixed_addresses) { 121 BUG(); 122 return; 123 } 124 125 clear_pte_phys(address, prot); 126} 127 |
126static pmd_t * __init one_md_table_init(pud_t *pud) 127{ 128 if (pud_none(*pud)) { 129 pmd_t *pmd; 130 131 pmd = alloc_bootmem_pages(PAGE_SIZE); 132 pud_populate(&init_mm, pud, pmd); 133 BUG_ON(pmd != pmd_offset(pud, 0)); 134 } 135 136 return pmd_offset(pud, 0); 137} 138 139static pte_t * __init one_page_table_init(pmd_t *pmd) 140{ 141 if (pmd_none(*pmd)) { 142 pte_t *pte; 143 144 pte = alloc_bootmem_pages(PAGE_SIZE); 145 pmd_populate_kernel(&init_mm, pmd, pte); 146 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 147 } 148 149 return pte_offset_kernel(pmd, 0); 150} 151 152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, 153 unsigned long vaddr, pte_t *lastpte) 154{ 155 return pte; 156} 157 | |
158void __init page_table_range_init(unsigned long start, unsigned long end, 159 pgd_t *pgd_base) 160{ 161 pgd_t *pgd; 162 pud_t *pud; 163 pmd_t *pmd; | 128void __init page_table_range_init(unsigned long start, unsigned long end, 129 pgd_t *pgd_base) 130{ 131 pgd_t *pgd; 132 pud_t *pud; 133 pmd_t *pmd; |
164 pte_t *pte = NULL; | 134 pte_t *pte; |
165 int i, j, k; 166 unsigned long vaddr; 167 168 vaddr = start; 169 i = __pgd_offset(vaddr); 170 j = __pud_offset(vaddr); 171 k = __pmd_offset(vaddr); 172 pgd = pgd_base + i; 173 174 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 175 pud = (pud_t *)pgd; 176 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | 135 int i, j, k; 136 unsigned long vaddr; 137 138 vaddr = start; 139 i = __pgd_offset(vaddr); 140 j = __pud_offset(vaddr); 141 k = __pmd_offset(vaddr); 142 pgd = pgd_base + i; 143 144 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 145 pud = (pud_t *)pgd; 146 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
177 pmd = one_md_table_init(pud); 178#ifndef __PAGETABLE_PMD_FOLDED | 147#ifdef __PAGETABLE_PMD_FOLDED 148 pmd = (pmd_t *)pud; 149#else 150 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); 151 pud_populate(&init_mm, pud, pmd); |
179 pmd += k; 180#endif 181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 152 pmd += k; 153#endif 154 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
182 pte = page_table_kmap_check(one_page_table_init(pmd), 183 pmd, vaddr, pte); | 155 if (pmd_none(*pmd)) { 156 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 157 pmd_populate_kernel(&init_mm, pmd, pte); 158 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 159 } |
184 vaddr += PMD_SIZE; 185 } 186 k = 0; 187 } 188 j = 0; 189 } 190} 191#endif /* CONFIG_MMU */ 192 193void __init allocate_pgdat(unsigned int nid) 194{ 195 unsigned long start_pfn, end_pfn; 196#ifdef CONFIG_NEED_MULTIPLE_NODES 197 unsigned long phys; 198#endif 199 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 201 202#ifdef CONFIG_NEED_MULTIPLE_NODES | 160 vaddr += PMD_SIZE; 161 } 162 k = 0; 163 } 164 j = 0; 165 } 166} 167#endif /* CONFIG_MMU */ 168 169void __init allocate_pgdat(unsigned int nid) 170{ 171 unsigned long start_pfn, end_pfn; 172#ifdef CONFIG_NEED_MULTIPLE_NODES 173 unsigned long phys; 174#endif 175 176 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 177 178#ifdef CONFIG_NEED_MULTIPLE_NODES |
203 phys = __lmb_alloc_base(sizeof(struct pglist_data), | 179 phys = __memblock_alloc_base(sizeof(struct pglist_data), |
204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 205 /* Retry with all of system memory */ 206 if (!phys) | 180 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 181 /* Retry with all of system memory */ 182 if (!phys) |
207 phys = __lmb_alloc_base(sizeof(struct pglist_data), 208 SMP_CACHE_BYTES, lmb_end_of_DRAM()); | 183 phys = __memblock_alloc_base(sizeof(struct pglist_data), 184 SMP_CACHE_BYTES, memblock_end_of_DRAM()); |
209 if (!phys) 210 panic("Can't allocate pgdat for node %d\n", nid); 211 212 NODE_DATA(nid) = __va(phys); 213 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 214 215 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 216#endif --- 14 unchanged lines hidden (view full) --- 231 /* Nothing to do.. */ 232 if (!p->node_spanned_pages) 233 return; 234 235 end_pfn = p->node_start_pfn + p->node_spanned_pages; 236 237 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 238 | 185 if (!phys) 186 panic("Can't allocate pgdat for node %d\n", nid); 187 188 NODE_DATA(nid) = __va(phys); 189 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 190 191 NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; 192#endif --- 14 unchanged lines hidden (view full) --- 207 /* Nothing to do.. */ 208 if (!p->node_spanned_pages) 209 return; 210 211 end_pfn = p->node_start_pfn + p->node_spanned_pages; 212 213 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 214 |
239 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); | 215 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); |
240 if (!paddr) 241 panic("Can't allocate bootmap for nid[%d]\n", nid); 242 243 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 244 245 free_bootmem_with_active_regions(nid, end_pfn); 246 247 /* 248 * XXX Handle initial reservations for the system memory node 249 * only for the moment, we'll refactor this later for handling 250 * reservations in other nodes. 251 */ 252 if (nid == 0) { 253 /* Reserve the sections we're already using. */ | 216 if (!paddr) 217 panic("Can't allocate bootmap for nid[%d]\n", nid); 218 219 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); 220 221 free_bootmem_with_active_regions(nid, end_pfn); 222 223 /* 224 * XXX Handle initial reservations for the system memory node 225 * only for the moment, we'll refactor this later for handling 226 * reservations in other nodes. 227 */ 228 if (nid == 0) { 229 /* Reserve the sections we're already using. */ |
254 for (i = 0; i < lmb.reserved.cnt; i++) 255 reserve_bootmem(lmb.reserved.region[i].base, 256 lmb_size_bytes(&lmb.reserved, i), | 230 for (i = 0; i < memblock.reserved.cnt; i++) 231 reserve_bootmem(memblock.reserved.region[i].base, 232 memblock_size_bytes(&memblock.reserved, i), |
257 BOOTMEM_DEFAULT); 258 } 259 260 sparse_memory_present_with_active_regions(nid); 261} 262 263static void __init do_init_bootmem(void) 264{ 265 int i; 266 267 /* Add active regions with valid PFNs. */ | 233 BOOTMEM_DEFAULT); 234 } 235 236 sparse_memory_present_with_active_regions(nid); 237} 238 239static void __init do_init_bootmem(void) 240{ 241 int i; 242 243 /* Add active regions with valid PFNs. */ |
268 for (i = 0; i < lmb.memory.cnt; i++) { | 244 for (i = 0; i < memblock.memory.cnt; i++) { |
269 unsigned long start_pfn, end_pfn; | 245 unsigned long start_pfn, end_pfn; |
270 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 271 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 246 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; 247 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); |
272 __add_active_range(0, start_pfn, end_pfn); 273 } 274 275 /* All of system RAM sits in node 0 for the non-NUMA case */ 276 allocate_pgdat(0); 277 node_set_online(0); 278 279 plat_mem_setup(); --- 15 unchanged lines hidden (view full) --- 295 start_pfn = PFN_UP(__pa(_end)); 296 297 /* 298 * Reserve the kernel text and Reserve the bootmem bitmap. We do 299 * this in two steps (first step was init_bootmem()), because 300 * this catches the (definitely buggy) case of us accidentally 301 * initializing the bootmem allocator with an invalid RAM area. 302 */ | 248 __add_active_range(0, start_pfn, end_pfn); 249 } 250 251 /* All of system RAM sits in node 0 for the non-NUMA case */ 252 allocate_pgdat(0); 253 node_set_online(0); 254 255 plat_mem_setup(); --- 15 unchanged lines hidden (view full) --- 271 start_pfn = PFN_UP(__pa(_end)); 272 273 /* 274 * Reserve the kernel text and Reserve the bootmem bitmap. We do 275 * this in two steps (first step was init_bootmem()), because 276 * this catches the (definitely buggy) case of us accidentally 277 * initializing the bootmem allocator with an invalid RAM area. 278 */ |
303 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, | 279 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, |
304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 306 307 /* 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 309 */ 310 if (CONFIG_ZERO_PAGE_OFFSET != 0) | 280 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 281 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 282 283 /* 284 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 285 */ 286 if (CONFIG_ZERO_PAGE_OFFSET != 0) |
311 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | 287 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); |
312 313 /* 314 * Handle additional early reservations 315 */ 316 check_for_initrd(); 317 reserve_crashkernel(); 318} 319 320void __init paging_init(void) 321{ 322 unsigned long max_zone_pfns[MAX_NR_ZONES]; 323 unsigned long vaddr, end; 324 int nid; 325 | 288 289 /* 290 * Handle additional early reservations 291 */ 292 check_for_initrd(); 293 reserve_crashkernel(); 294} 295 296void __init paging_init(void) 297{ 298 unsigned long max_zone_pfns[MAX_NR_ZONES]; 299 unsigned long vaddr, end; 300 int nid; 301 |
326 lmb_init(); | 302 memblock_init(); |
327 328 sh_mv.mv_mem_init(); 329 330 early_reserve_mem(); 331 | 303 304 sh_mv.mv_mem_init(); 305 306 early_reserve_mem(); 307 |
332 lmb_enforce_memory_limit(memory_limit); 333 lmb_analyze(); | 308 memblock_enforce_memory_limit(memory_limit); 309 memblock_analyze(); |
334 | 310 |
335 lmb_dump_all(); | 311 memblock_dump_all(); |
336 337 /* 338 * Determine low and high memory ranges: 339 */ | 312 313 /* 314 * Determine low and high memory ranges: 315 */ |
340 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 316 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
341 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 342 343 nodes_clear(node_online_map); 344 345 memory_start = (unsigned long)__va(__MEMORY_START); | 317 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 318 319 nodes_clear(node_online_map); 320 321 memory_start = (unsigned long)__va(__MEMORY_START); |
346 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); | 322 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); |
347 348 uncached_init(); 349 pmb_init(); 350 do_init_bootmem(); 351 ioremap_fixed_init(); 352 353 /* We don't need to map the kernel through the TLB, as 354 * it is permanatly mapped using P1. So clear the --- 201 unchanged lines hidden --- | 323 324 uncached_init(); 325 pmb_init(); 326 do_init_bootmem(); 327 ioremap_fixed_init(); 328 329 /* We don't need to map the kernel through the TLB, as 330 * it is permanatly mapped using P1. So clear the --- 201 unchanged lines hidden --- |