1 /* 2 * linux/arch/sh/mm/init.c 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2002 - 2007 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/init.c: 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/init.h> 13 #include <linux/bootmem.h> 14 #include <linux/proc_fs.h> 15 #include <linux/pagemap.h> 16 #include <linux/percpu.h> 17 #include <linux/io.h> 18 #include <asm/mmu_context.h> 19 #include <asm/tlb.h> 20 #include <asm/cacheflush.h> 21 #include <asm/sections.h> 22 #include <asm/cache.h> 23 24 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 25 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 26 27 #ifdef CONFIG_SUPERH32 28 /* 29 * Handle trivial transitions between cached and uncached 30 * segments, making use of the 1:1 mapping relationship in 31 * 512MB lowmem. 32 * 33 * This is the offset of the uncached section from its cached alias. 34 * Default value only valid in 29 bit mode, in 32bit mode will be 35 * overridden in pmb_init. 36 */ 37 unsigned long cached_to_uncached = P2SEG - P1SEG; 38 #endif 39 40 #ifdef CONFIG_MMU 41 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) 42 { 43 pgd_t *pgd; 44 pud_t *pud; 45 pmd_t *pmd; 46 pte_t *pte; 47 48 pgd = pgd_offset_k(addr); 49 if (pgd_none(*pgd)) { 50 pgd_ERROR(*pgd); 51 return; 52 } 53 54 pud = pud_alloc(NULL, pgd, addr); 55 if (unlikely(!pud)) { 56 pud_ERROR(*pud); 57 return; 58 } 59 60 pmd = pmd_alloc(NULL, pud, addr); 61 if (unlikely(!pmd)) { 62 pmd_ERROR(*pmd); 63 return; 64 } 65 66 pte = pte_offset_kernel(pmd, addr); 67 if (!pte_none(*pte)) { 68 pte_ERROR(*pte); 69 return; 70 } 71 72 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); 73 local_flush_tlb_one(get_asid(), addr); 74 } 75 76 /* 77 * As a performance optimization, other platforms preserve the fixmap mapping 78 * across a context switch, we don't presently do this, but this could be done 79 * in a similar fashion as to the wired TLB interface that sh64 uses (by way 80 * of the memory mapped UTLB configuration) -- this unfortunately forces us to 81 * give up a TLB entry for each mapping we want to preserve. While this may be 82 * viable for a small number of fixmaps, it's not particularly useful for 83 * everything and needs to be carefully evaluated. (ie, we may want this for 84 * the vsyscall page). 85 * 86 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass 87 * in at __set_fixmap() time to determine the appropriate behavior to follow. 88 * 89 * -- PFM. 90 */ 91 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 92 { 93 unsigned long address = __fix_to_virt(idx); 94 95 if (idx >= __end_of_fixed_addresses) { 96 BUG(); 97 return; 98 } 99 100 set_pte_phys(address, phys, prot); 101 } 102 103 void __init page_table_range_init(unsigned long start, unsigned long end, 104 pgd_t *pgd_base) 105 { 106 pgd_t *pgd; 107 pud_t *pud; 108 pmd_t *pmd; 109 pte_t *pte; 110 int i, j, k; 111 unsigned long vaddr; 112 113 vaddr = start; 114 i = __pgd_offset(vaddr); 115 j = __pud_offset(vaddr); 116 k = __pmd_offset(vaddr); 117 pgd = pgd_base + i; 118 119 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 120 pud = (pud_t *)pgd; 121 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 122 pmd = (pmd_t *)pud; 123 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 124 if (pmd_none(*pmd)) { 125 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 126 pmd_populate_kernel(&init_mm, pmd, pte); 127 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 128 } 129 vaddr += PMD_SIZE; 130 } 131 k = 0; 132 } 133 j = 0; 134 } 135 } 136 #endif /* CONFIG_MMU */ 137 138 /* 139 * paging_init() sets up the page tables 140 */ 141 void __init paging_init(void) 142 { 143 unsigned long max_zone_pfns[MAX_NR_ZONES]; 144 unsigned long vaddr, end; 145 int nid; 146 147 /* We don't need to map the kernel through the TLB, as 148 * it is permanatly mapped using P1. So clear the 149 * entire pgd. */ 150 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); 151 152 /* Set an initial value for the MMU.TTB so we don't have to 153 * check for a null value. */ 154 set_TTB(swapper_pg_dir); 155 156 /* 157 * Populate the relevant portions of swapper_pg_dir so that 158 * we can use the fixmap entries without calling kmalloc. 159 * pte's will be filled in by __set_fixmap(). 160 */ 161 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 162 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 163 page_table_range_init(vaddr, end, swapper_pg_dir); 164 165 kmap_coherent_init(); 166 167 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 168 169 for_each_online_node(nid) { 170 pg_data_t *pgdat = NODE_DATA(nid); 171 unsigned long low, start_pfn; 172 173 start_pfn = pgdat->bdata->node_min_pfn; 174 low = pgdat->bdata->node_low_pfn; 175 176 if (max_zone_pfns[ZONE_NORMAL] < low) 177 max_zone_pfns[ZONE_NORMAL] = low; 178 179 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 180 nid, start_pfn, low); 181 } 182 183 free_area_init_nodes(max_zone_pfns); 184 185 /* Set up the uncached fixmap */ 186 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); 187 } 188 189 static struct kcore_list kcore_mem, kcore_vmalloc; 190 191 void __init mem_init(void) 192 { 193 int codesize, datasize, initsize; 194 int nid; 195 196 num_physpages = 0; 197 high_memory = NULL; 198 199 for_each_online_node(nid) { 200 pg_data_t *pgdat = NODE_DATA(nid); 201 unsigned long node_pages = 0; 202 void *node_high_memory; 203 204 num_physpages += pgdat->node_present_pages; 205 206 if (pgdat->node_spanned_pages) 207 node_pages = free_all_bootmem_node(pgdat); 208 209 totalram_pages += node_pages; 210 211 node_high_memory = (void *)__va((pgdat->node_start_pfn + 212 pgdat->node_spanned_pages) << 213 PAGE_SHIFT); 214 if (node_high_memory > high_memory) 215 high_memory = node_high_memory; 216 } 217 218 /* Set this up early, so we can take care of the zero page */ 219 cpu_cache_init(); 220 221 /* clear the zero-page */ 222 memset(empty_zero_page, 0, PAGE_SIZE); 223 __flush_wback_region(empty_zero_page, PAGE_SIZE); 224 225 codesize = (unsigned long) &_etext - (unsigned long) &_text; 226 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 227 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 228 229 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 230 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 231 VMALLOC_END - VMALLOC_START); 232 233 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 234 "%dk data, %dk init)\n", 235 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 236 num_physpages << (PAGE_SHIFT-10), 237 codesize >> 10, 238 datasize >> 10, 239 initsize >> 10); 240 241 /* Initialize the vDSO */ 242 vsyscall_init(); 243 } 244 245 void free_initmem(void) 246 { 247 unsigned long addr; 248 249 addr = (unsigned long)(&__init_begin); 250 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 251 ClearPageReserved(virt_to_page(addr)); 252 init_page_count(virt_to_page(addr)); 253 free_page(addr); 254 totalram_pages++; 255 } 256 printk("Freeing unused kernel memory: %ldk freed\n", 257 ((unsigned long)&__init_end - 258 (unsigned long)&__init_begin) >> 10); 259 } 260 261 #ifdef CONFIG_BLK_DEV_INITRD 262 void free_initrd_mem(unsigned long start, unsigned long end) 263 { 264 unsigned long p; 265 for (p = start; p < end; p += PAGE_SIZE) { 266 ClearPageReserved(virt_to_page(p)); 267 init_page_count(virt_to_page(p)); 268 free_page(p); 269 totalram_pages++; 270 } 271 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 272 } 273 #endif 274 275 #if THREAD_SHIFT < PAGE_SHIFT 276 static struct kmem_cache *thread_info_cache; 277 278 struct thread_info *alloc_thread_info(struct task_struct *tsk) 279 { 280 struct thread_info *ti; 281 282 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 283 if (unlikely(ti == NULL)) 284 return NULL; 285 #ifdef CONFIG_DEBUG_STACK_USAGE 286 memset(ti, 0, THREAD_SIZE); 287 #endif 288 return ti; 289 } 290 291 void free_thread_info(struct thread_info *ti) 292 { 293 kmem_cache_free(thread_info_cache, ti); 294 } 295 296 void thread_info_cache_init(void) 297 { 298 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 299 THREAD_SIZE, 0, NULL); 300 BUG_ON(thread_info_cache == NULL); 301 } 302 #endif /* THREAD_SHIFT < PAGE_SHIFT */ 303 304 #ifdef CONFIG_MEMORY_HOTPLUG 305 int arch_add_memory(int nid, u64 start, u64 size) 306 { 307 pg_data_t *pgdat; 308 unsigned long start_pfn = start >> PAGE_SHIFT; 309 unsigned long nr_pages = size >> PAGE_SHIFT; 310 int ret; 311 312 pgdat = NODE_DATA(nid); 313 314 /* We only have ZONE_NORMAL, so this is easy.. */ 315 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, 316 start_pfn, nr_pages); 317 if (unlikely(ret)) 318 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 319 320 return ret; 321 } 322 EXPORT_SYMBOL_GPL(arch_add_memory); 323 324 #ifdef CONFIG_NUMA 325 int memory_add_physaddr_to_nid(u64 addr) 326 { 327 /* Node 0 for now.. */ 328 return 0; 329 } 330 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 331 #endif 332 #endif /* CONFIG_MEMORY_HOTPLUG */ 333