1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1998-2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * Stephane Eranian <eranian@hpl.hp.com> 9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> 10 * Copyright (C) 1999 VA Linux Systems 11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. 13 * 14 * Routines used by ia64 machines with contiguous (or virtually contiguous) 15 * memory. 16 */ 17 #include <linux/efi.h> 18 #include <linux/memblock.h> 19 #include <linux/mm.h> 20 #include <linux/nmi.h> 21 #include <linux/swap.h> 22 23 #include <asm/meminit.h> 24 #include <asm/pgalloc.h> 25 #include <asm/pgtable.h> 26 #include <asm/sections.h> 27 #include <asm/mca.h> 28 29 #ifdef CONFIG_VIRTUAL_MEM_MAP 30 static unsigned long max_gap; 31 #endif 32 33 /* physical address where the bootmem map is located */ 34 unsigned long bootmap_start; 35 36 #ifdef CONFIG_SMP 37 static void *cpu_data; 38 /** 39 * per_cpu_init - setup per-cpu variables 40 * 41 * Allocate and setup per-cpu data areas. 42 */ 43 void *per_cpu_init(void) 44 { 45 static bool first_time = true; 46 void *cpu0_data = __cpu0_per_cpu; 47 unsigned int cpu; 48 49 if (!first_time) 50 goto skip; 51 first_time = false; 52 53 /* 54 * get_free_pages() cannot be used before cpu_init() done. 55 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs 56 * to avoid that AP calls get_zeroed_page(). 57 */ 58 for_each_possible_cpu(cpu) { 59 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; 60 61 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); 62 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; 63 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; 64 65 /* 66 * percpu area for cpu0 is moved from the __init area 67 * which is setup by head.S and used till this point. 68 * Update ar.k3. This move is ensures that percpu 69 * area for cpu0 is on the correct node and its 70 * virtual address isn't insanely far from other 71 * percpu areas which is important for congruent 72 * percpu allocator. 73 */ 74 if (cpu == 0) 75 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - 76 (unsigned long)__per_cpu_start); 77 78 cpu_data += PERCPU_PAGE_SIZE; 79 } 80 skip: 81 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 82 } 83 84 static inline void 85 alloc_per_cpu_data(void) 86 { 87 cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(), 88 PERCPU_PAGE_SIZE, 89 __pa(MAX_DMA_ADDRESS)); 90 } 91 92 /** 93 * setup_per_cpu_areas - setup percpu areas 94 * 95 * Arch code has already allocated and initialized percpu areas. All 96 * this function has to do is to teach the determined layout to the 97 * dynamic percpu allocator, which happens to be more complex than 98 * creating whole new ones using helpers. 99 */ 100 void __init 101 setup_per_cpu_areas(void) 102 { 103 struct pcpu_alloc_info *ai; 104 struct pcpu_group_info *gi; 105 unsigned int cpu; 106 ssize_t static_size, reserved_size, dyn_size; 107 int rc; 108 109 ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); 110 if (!ai) 111 panic("failed to allocate pcpu_alloc_info"); 112 gi = &ai->groups[0]; 113 114 /* units are assigned consecutively to possible cpus */ 115 for_each_possible_cpu(cpu) 116 gi->cpu_map[gi->nr_units++] = cpu; 117 118 /* set parameters */ 119 static_size = __per_cpu_end - __per_cpu_start; 120 reserved_size = PERCPU_MODULE_RESERVE; 121 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; 122 if (dyn_size < 0) 123 panic("percpu area overflow static=%zd reserved=%zd\n", 124 static_size, reserved_size); 125 126 ai->static_size = static_size; 127 ai->reserved_size = reserved_size; 128 ai->dyn_size = dyn_size; 129 ai->unit_size = PERCPU_PAGE_SIZE; 130 ai->atom_size = PAGE_SIZE; 131 ai->alloc_size = PERCPU_PAGE_SIZE; 132 133 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); 134 if (rc) 135 panic("failed to setup percpu area (err=%d)", rc); 136 137 pcpu_free_alloc_info(ai); 138 } 139 #else 140 #define alloc_per_cpu_data() do { } while (0) 141 #endif /* CONFIG_SMP */ 142 143 /** 144 * find_memory - setup memory map 145 * 146 * Walk the EFI memory map and find usable memory for the system, taking 147 * into account reserved areas. 148 */ 149 void __init 150 find_memory (void) 151 { 152 reserve_memory(); 153 154 /* first find highest page frame number */ 155 min_low_pfn = ~0UL; 156 max_low_pfn = 0; 157 efi_memmap_walk(find_max_min_low_pfn, NULL); 158 max_pfn = max_low_pfn; 159 160 #ifdef CONFIG_VIRTUAL_MEM_MAP 161 efi_memmap_walk(filter_memory, register_active_ranges); 162 #else 163 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); 164 #endif 165 166 find_initrd(); 167 168 alloc_per_cpu_data(); 169 } 170 171 /* 172 * Set up the page tables. 173 */ 174 175 void __init 176 paging_init (void) 177 { 178 unsigned long max_dma; 179 unsigned long max_zone_pfns[MAX_NR_ZONES]; 180 181 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 182 #ifdef CONFIG_ZONE_DMA32 183 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 184 max_zone_pfns[ZONE_DMA32] = max_dma; 185 #endif 186 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 187 188 #ifdef CONFIG_VIRTUAL_MEM_MAP 189 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 190 if (max_gap < LARGE_GAP) { 191 vmem_map = (struct page *) 0; 192 } else { 193 unsigned long map_size; 194 195 /* allocate virtual_mem_map */ 196 197 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 198 sizeof(struct page)); 199 VMALLOC_END -= map_size; 200 vmem_map = (struct page *) VMALLOC_END; 201 efi_memmap_walk(create_mem_map_page_table, NULL); 202 203 /* 204 * alloc_node_mem_map makes an adjustment for mem_map 205 * which isn't compatible with vmem_map. 206 */ 207 NODE_DATA(0)->node_mem_map = vmem_map + 208 find_min_pfn_with_active_regions(); 209 210 printk("Virtual mem_map starts at 0x%p\n", mem_map); 211 } 212 #endif /* !CONFIG_VIRTUAL_MEM_MAP */ 213 free_area_init_nodes(max_zone_pfns); 214 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); 215 } 216