1 /* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2007 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/proc_fs.h> 15 #include <linux/pagemap.h> 16 #include <linux/percpu.h> 17 #include <linux/io.h> 18 #include <asm/mmu_context.h> 19 #include <asm/tlb.h> 20 #include <asm/cacheflush.h> 21 #include <asm/sections.h> 22 #include <asm/cache.h> 23 24 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 25 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 26 unsigned long cached_to_uncached = 0; 27 28 #ifdef CONFIG_MMU 29 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 30 { 31 pgd_t *pgd; 32 pud_t *pud; 33 pmd_t *pmd; 34 pte_t *pte; 35 36 pgd = pgd_offset_k(addr); 37 if (pgd_none(*pgd)) { 38 pgd_ERROR(*pgd); 39 return; 40 } 41 42 pud = pud_alloc(NULL, pgd, addr); 43 if (unlikely(!pud)) { 44 pud_ERROR(*pud); 45 return; 46 } 47 48 pmd = pmd_alloc(NULL, pud, addr); 49 if (unlikely(!pmd)) { 50 pmd_ERROR(*pmd); 51 return; 52 } 53 54 pte = pte_offset_kernel(pmd, addr); 55 if (!pte_none(*pte)) { 56 pte_ERROR(*pte); 57 return; 58 } 59 60 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 61 62 if (cached_to_uncached) 63 flush_tlb_one(get_asid(), addr); 64 } 65 66 /* 67 * As a performance optimization, other platforms preserve the fixmap mapping 68 * across a context switch, we don't presently do this, but this could be done 69 * in a similar fashion as to the wired TLB interface that sh64 uses (by way 70 * of the memory mapped UTLB configuration) -- this unfortunately forces us to 71 * give up a TLB entry for each mapping we want to preserve. While this may be 72 * viable for a small number of fixmaps, it's not particularly useful for 73 * everything and needs to be carefully evaluated. (ie, we may want this for 74 * the vsyscall page). 75 * 76 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass 77 * in at __set_fixmap() time to determine the appropriate behavior to follow. 78 * 79 * -- PFM. 80 */ 81 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 82 { 83 unsigned long address = __fix_to_virt(idx); 84 85 if (idx >= __end_of_fixed_addresses) { 86 BUG(); 87 return; 88 } 89 90 set_pte_phys(address, phys, prot); 91 } 92 93 void __init page_table_range_init(unsigned long start, unsigned long end, 94 pgd_t *pgd_base) 95 { 96 pgd_t *pgd; 97 pud_t *pud; 98 pmd_t *pmd; 99 int pgd_idx; 100 unsigned long vaddr; 101 102 vaddr = start & PMD_MASK; 103 end = (end + PMD_SIZE - 1) & PMD_MASK; 104 pgd_idx = pgd_index(vaddr); 105 pgd = pgd_base + pgd_idx; 106 107 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 108 BUG_ON(pgd_none(*pgd)); 109 pud = pud_offset(pgd, 0); 110 BUG_ON(pud_none(*pud)); 111 pmd = pmd_offset(pud, 0); 112 113 if (!pmd_present(*pmd)) { 114 pte_t *pte_table; 115 pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 116 memset(pte_table, 0, PAGE_SIZE); 117 pmd_populate_kernel(&init_mm, pmd, pte_table); 118 } 119 120 vaddr += PMD_SIZE; 121 } 122 } 123 #endif /* CONFIG_MMU */ 124 125 /* 126 * paging_init() sets up the page tables 127 */ 128 void __init paging_init(void) 129 { 130 unsigned long max_zone_pfns[MAX_NR_ZONES]; 131 int nid; 132 133 /* We don't need to map the kernel through the TLB, as 134 * it is permanatly mapped using P1. So clear the 135 * entire pgd. */ 136 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 137 138 /* Set an initial value for the MMU.TTB so we don't have to 139 * check for a null value. */ 140 set_TTB(swapper_pg_dir); 141 142 /* Populate the relevant portions of swapper_pg_dir so that 143 * we can use the fixmap entries without calling kmalloc. 144 * pte's will be filled in by __set_fixmap(). */ 145 page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir); 146 147 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 148 149 for_each_online_node(nid) { 150 pg_data_t *pgdat = NODE_DATA(nid); 151 unsigned long low, start_pfn; 152 153 start_pfn = pgdat->bdata->node_min_pfn; 154 low = pgdat->bdata->node_low_pfn; 155 156 if (max_zone_pfns[ZONE_NORMAL] < low) 157 max_zone_pfns[ZONE_NORMAL] = low; 158 159 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 160 nid, start_pfn, low); 161 } 162 163 free_area_init_nodes(max_zone_pfns); 164 165 #ifdef CONFIG_SUPERH32 166 /* Set up the uncached fixmap */ 167 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); 168 169 #ifdef CONFIG_29BIT 170 /* 171 * Handle trivial transitions between cached and uncached 172 * segments, making use of the 1:1 mapping relationship in 173 * 512MB lowmem. 174 */ 175 cached_to_uncached = P2SEG - P1SEG; 176 #endif 177 #endif 178 } 179 180 static struct kcore_list kcore_mem, kcore_vmalloc; 181 int after_bootmem = 0; 182 183 void __init mem_init(void) 184 { 185 int codesize, datasize, initsize; 186 int nid; 187 188 num_physpages = 0; 189 high_memory = NULL; 190 191 for_each_online_node(nid) { 192 pg_data_t *pgdat = NODE_DATA(nid); 193 unsigned long node_pages = 0; 194 void *node_high_memory; 195 196 num_physpages += pgdat->node_present_pages; 197 198 if (pgdat->node_spanned_pages) 199 node_pages = free_all_bootmem_node(pgdat); 200 201 totalram_pages += node_pages; 202 203 node_high_memory = (void *)__va((pgdat->node_start_pfn + 204 pgdat->node_spanned_pages) << 205 PAGE_SHIFT); 206 if (node_high_memory > high_memory) 207 high_memory = node_high_memory; 208 } 209 210 /* clear the zero-page */ 211 memset(empty_zero_page, 0, PAGE_SIZE); 212 __flush_wback_region(empty_zero_page, PAGE_SIZE); 213 214 after_bootmem = 1; 215 216 codesize = (unsigned long) &_etext - (unsigned long) &_text; 217 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 218 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 219 220 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 221 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 222 VMALLOC_END - VMALLOC_START); 223 224 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 225 "%dk data, %dk init)\n", 226 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 227 num_physpages << (PAGE_SHIFT-10), 228 codesize >> 10, 229 datasize >> 10, 230 initsize >> 10); 231 232 p3_cache_init(); 233 234 /* Initialize the vDSO */ 235 vsyscall_init(); 236 } 237 238 void free_initmem(void) 239 { 240 unsigned long addr; 241 242 addr = (unsigned long)(&__init_begin); 243 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 244 ClearPageReserved(virt_to_page(addr)); 245 init_page_count(virt_to_page(addr)); 246 free_page(addr); 247 totalram_pages++; 248 } 249 printk("Freeing unused kernel memory: %ldk freed\n", 250 ((unsigned long)&__init_end - 251 (unsigned long)&__init_begin) >> 10); 252 } 253 254 #ifdef CONFIG_BLK_DEV_INITRD 255 void free_initrd_mem(unsigned long start, unsigned long end) 256 { 257 unsigned long p; 258 for (p = start; p < end; p += PAGE_SIZE) { 259 ClearPageReserved(virt_to_page(p)); 260 init_page_count(virt_to_page(p)); 261 free_page(p); 262 totalram_pages++; 263 } 264 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 265 } 266 #endif 267 268 #ifdef CONFIG_MEMORY_HOTPLUG 269 int arch_add_memory(int nid, u64 start, u64 size) 270 { 271 pg_data_t *pgdat; 272 unsigned long start_pfn = start >> PAGE_SHIFT; 273 unsigned long nr_pages = size >> PAGE_SHIFT; 274 int ret; 275 276 pgdat = NODE_DATA(nid); 277 278 /* We only have ZONE_NORMAL, so this is easy.. */ 279 ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages); 280 if (unlikely(ret)) 281 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 282 283 return ret; 284 } 285 EXPORT_SYMBOL_GPL(arch_add_memory); 286 287 #ifdef CONFIG_NUMA 288 int memory_add_physaddr_to_nid(u64 addr) 289 { 290 /* Node 0 for now.. */ 291 return 0; 292 } 293 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 294 #endif 295 #endif 296