1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1998-2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * Stephane Eranian <eranian@hpl.hp.com> 9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> 10 * Copyright (C) 1999 VA Linux Systems 11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. 13 * 14 * Routines used by ia64 machines with contiguous (or virtually contiguous) 15 * memory. 16 */ 17 #include <linux/bootmem.h> 18 #include <linux/efi.h> 19 #include <linux/mm.h> 20 #include <linux/nmi.h> 21 #include <linux/swap.h> 22 23 #include <asm/meminit.h> 24 #include <asm/pgalloc.h> 25 #include <asm/pgtable.h> 26 #include <asm/sections.h> 27 #include <asm/mca.h> 28 29 #ifdef CONFIG_VIRTUAL_MEM_MAP 30 static unsigned long max_gap; 31 #endif 32 33 /** 34 * show_mem - give short summary of memory stats 35 * 36 * Shows a simple page count of reserved and used pages in the system. 37 * For discontig machines, it does this on a per-pgdat basis. 38 */ 39 void show_mem(unsigned int filter) 40 { 41 int i, total_reserved = 0; 42 int total_shared = 0, total_cached = 0; 43 unsigned long total_present = 0; 44 pg_data_t *pgdat; 45 46 printk(KERN_INFO "Mem-info:\n"); 47 show_free_areas(); 48 printk(KERN_INFO "Node memory in pages:\n"); 49 for_each_online_pgdat(pgdat) { 50 unsigned long present; 51 unsigned long flags; 52 int shared = 0, cached = 0, reserved = 0; 53 54 pgdat_resize_lock(pgdat, &flags); 55 present = pgdat->node_present_pages; 56 for(i = 0; i < pgdat->node_spanned_pages; i++) { 57 struct page *page; 58 if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) 59 touch_nmi_watchdog(); 60 if (pfn_valid(pgdat->node_start_pfn + i)) 61 page = pfn_to_page(pgdat->node_start_pfn + i); 62 else { 63 #ifdef CONFIG_VIRTUAL_MEM_MAP 64 if (max_gap < LARGE_GAP) 65 continue; 66 #endif 67 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 68 i) - 1; 69 continue; 70 } 71 if (PageReserved(page)) 72 reserved++; 73 else if (PageSwapCache(page)) 74 cached++; 75 else if (page_count(page)) 76 shared += page_count(page)-1; 77 } 78 pgdat_resize_unlock(pgdat, &flags); 79 total_present += present; 80 total_reserved += reserved; 81 total_cached += cached; 82 total_shared += shared; 83 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 84 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 85 present, reserved, shared, cached); 86 } 87 printk(KERN_INFO "%ld pages of RAM\n", total_present); 88 printk(KERN_INFO "%d reserved pages\n", total_reserved); 89 printk(KERN_INFO "%d pages shared\n", total_shared); 90 printk(KERN_INFO "%d pages swap cached\n", total_cached); 91 printk(KERN_INFO "Total of %ld pages in page table cache\n", 92 quicklist_total_size()); 93 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages()); 94 } 95 96 97 /* physical address where the bootmem map is located */ 98 unsigned long bootmap_start; 99 100 /** 101 * find_bootmap_location - callback to find a memory area for the bootmap 102 * @start: start of region 103 * @end: end of region 104 * @arg: unused callback data 105 * 106 * Find a place to put the bootmap and return its starting address in 107 * bootmap_start. This address must be page-aligned. 108 */ 109 static int __init 110 find_bootmap_location (u64 start, u64 end, void *arg) 111 { 112 u64 needed = *(unsigned long *)arg; 113 u64 range_start, range_end, free_start; 114 int i; 115 116 #if IGNORE_PFN0 117 if (start == PAGE_OFFSET) { 118 start += PAGE_SIZE; 119 if (start >= end) 120 return 0; 121 } 122 #endif 123 124 free_start = PAGE_OFFSET; 125 126 for (i = 0; i < num_rsvd_regions; i++) { 127 range_start = max(start, free_start); 128 range_end = min(end, rsvd_region[i].start & PAGE_MASK); 129 130 free_start = PAGE_ALIGN(rsvd_region[i].end); 131 132 if (range_end <= range_start) 133 continue; /* skip over empty range */ 134 135 if (range_end - range_start >= needed) { 136 bootmap_start = __pa(range_start); 137 return -1; /* done */ 138 } 139 140 /* nothing more available in this segment */ 141 if (range_end == end) 142 return 0; 143 } 144 return 0; 145 } 146 147 #ifdef CONFIG_SMP 148 static void *cpu_data; 149 /** 150 * per_cpu_init - setup per-cpu variables 151 * 152 * Allocate and setup per-cpu data areas. 153 */ 154 void * __cpuinit 155 per_cpu_init (void) 156 { 157 static bool first_time = true; 158 void *cpu0_data = __cpu0_per_cpu; 159 unsigned int cpu; 160 161 if (!first_time) 162 goto skip; 163 first_time = false; 164 165 /* 166 * get_free_pages() cannot be used before cpu_init() done. 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs 168 * to avoid that AP calls get_zeroed_page(). 169 */ 170 for_each_possible_cpu(cpu) { 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; 172 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); 174 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; 175 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; 176 177 /* 178 * percpu area for cpu0 is moved from the __init area 179 * which is setup by head.S and used till this point. 180 * Update ar.k3. This move is ensures that percpu 181 * area for cpu0 is on the correct node and its 182 * virtual address isn't insanely far from other 183 * percpu areas which is important for congruent 184 * percpu allocator. 185 */ 186 if (cpu == 0) 187 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - 188 (unsigned long)__per_cpu_start); 189 190 cpu_data += PERCPU_PAGE_SIZE; 191 } 192 skip: 193 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 194 } 195 196 static inline void 197 alloc_per_cpu_data(void) 198 { 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(), 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 201 } 202 203 /** 204 * setup_per_cpu_areas - setup percpu areas 205 * 206 * Arch code has already allocated and initialized percpu areas. All 207 * this function has to do is to teach the determined layout to the 208 * dynamic percpu allocator, which happens to be more complex than 209 * creating whole new ones using helpers. 210 */ 211 void __init 212 setup_per_cpu_areas(void) 213 { 214 struct pcpu_alloc_info *ai; 215 struct pcpu_group_info *gi; 216 unsigned int cpu; 217 ssize_t static_size, reserved_size, dyn_size; 218 int rc; 219 220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); 221 if (!ai) 222 panic("failed to allocate pcpu_alloc_info"); 223 gi = &ai->groups[0]; 224 225 /* units are assigned consecutively to possible cpus */ 226 for_each_possible_cpu(cpu) 227 gi->cpu_map[gi->nr_units++] = cpu; 228 229 /* set parameters */ 230 static_size = __per_cpu_end - __per_cpu_start; 231 reserved_size = PERCPU_MODULE_RESERVE; 232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; 233 if (dyn_size < 0) 234 panic("percpu area overflow static=%zd reserved=%zd\n", 235 static_size, reserved_size); 236 237 ai->static_size = static_size; 238 ai->reserved_size = reserved_size; 239 ai->dyn_size = dyn_size; 240 ai->unit_size = PERCPU_PAGE_SIZE; 241 ai->atom_size = PAGE_SIZE; 242 ai->alloc_size = PERCPU_PAGE_SIZE; 243 244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); 245 if (rc) 246 panic("failed to setup percpu area (err=%d)", rc); 247 248 pcpu_free_alloc_info(ai); 249 } 250 #else 251 #define alloc_per_cpu_data() do { } while (0) 252 #endif /* CONFIG_SMP */ 253 254 /** 255 * find_memory - setup memory map 256 * 257 * Walk the EFI memory map and find usable memory for the system, taking 258 * into account reserved areas. 259 */ 260 void __init 261 find_memory (void) 262 { 263 unsigned long bootmap_size; 264 265 reserve_memory(); 266 267 /* first find highest page frame number */ 268 min_low_pfn = ~0UL; 269 max_low_pfn = 0; 270 efi_memmap_walk(find_max_min_low_pfn, NULL); 271 max_pfn = max_low_pfn; 272 /* how many bytes to cover all the pages */ 273 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT; 274 275 /* look for a location to hold the bootmap */ 276 bootmap_start = ~0UL; 277 efi_memmap_walk(find_bootmap_location, &bootmap_size); 278 if (bootmap_start == ~0UL) 279 panic("Cannot find %ld bytes for bootmap\n", bootmap_size); 280 281 bootmap_size = init_bootmem_node(NODE_DATA(0), 282 (bootmap_start >> PAGE_SHIFT), 0, max_pfn); 283 284 /* Free all available memory, then mark bootmem-map as being in use. */ 285 efi_memmap_walk(filter_rsvd_memory, free_bootmem); 286 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT); 287 288 find_initrd(); 289 290 alloc_per_cpu_data(); 291 } 292 293 static int count_pages(u64 start, u64 end, void *arg) 294 { 295 unsigned long *count = arg; 296 297 *count += (end - start) >> PAGE_SHIFT; 298 return 0; 299 } 300 301 /* 302 * Set up the page tables. 303 */ 304 305 void __init 306 paging_init (void) 307 { 308 unsigned long max_dma; 309 unsigned long max_zone_pfns[MAX_NR_ZONES]; 310 311 num_physpages = 0; 312 efi_memmap_walk(count_pages, &num_physpages); 313 314 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 315 #ifdef CONFIG_ZONE_DMA 316 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 317 max_zone_pfns[ZONE_DMA] = max_dma; 318 #endif 319 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 320 321 #ifdef CONFIG_VIRTUAL_MEM_MAP 322 efi_memmap_walk(filter_memory, register_active_ranges); 323 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 324 if (max_gap < LARGE_GAP) { 325 vmem_map = (struct page *) 0; 326 free_area_init_nodes(max_zone_pfns); 327 } else { 328 unsigned long map_size; 329 330 /* allocate virtual_mem_map */ 331 332 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 333 sizeof(struct page)); 334 VMALLOC_END -= map_size; 335 vmem_map = (struct page *) VMALLOC_END; 336 efi_memmap_walk(create_mem_map_page_table, NULL); 337 338 /* 339 * alloc_node_mem_map makes an adjustment for mem_map 340 * which isn't compatible with vmem_map. 341 */ 342 NODE_DATA(0)->node_mem_map = vmem_map + 343 find_min_pfn_with_active_regions(); 344 free_area_init_nodes(max_zone_pfns); 345 346 printk("Virtual mem_map starts at 0x%p\n", mem_map); 347 } 348 #else /* !CONFIG_VIRTUAL_MEM_MAP */ 349 add_active_range(0, 0, max_low_pfn); 350 free_area_init_nodes(max_zone_pfns); 351 #endif /* !CONFIG_VIRTUAL_MEM_MAP */ 352 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 353 } 354