1 /* 2 * Virtual Memory Map support 3 * 4 * (C) 2007 sgi. Christoph Lameter. 5 * 6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 7 * virt_to_page, page_address() to be implemented as a base offset 8 * calculation without memory access. 9 * 10 * However, virtual mappings need a page table and TLBs. Many Linux 11 * architectures already map their physical space using 1-1 mappings 12 * via TLBs. For those arches the virtual memory map is essentially 13 * for free if we use the same page size as the 1-1 mappings. In that 14 * case the overhead consists of a few additional pages that are 15 * allocated to create a view of memory for vmemmap. 16 * 17 * The architecture is expected to provide a vmemmap_populate() function 18 * to instantiate the mapping. 19 */ 20 #include <linux/mm.h> 21 #include <linux/mmzone.h> 22 #include <linux/bootmem.h> 23 #include <linux/highmem.h> 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sched.h> 28 #include <asm/dma.h> 29 #include <asm/pgalloc.h> 30 #include <asm/pgtable.h> 31 32 /* 33 * Allocate a block of memory to be used to back the virtual memory map 34 * or to back the page tables that are used to create the mapping. 35 * Uses the main allocators if they are available, else bootmem. 36 */ 37 38 static void * __init_refok __earlyonly_bootmem_alloc(int node, 39 unsigned long size, 40 unsigned long align, 41 unsigned long goal) 42 { 43 return memblock_virt_alloc_try_nid(size, align, goal, 44 BOOTMEM_ALLOC_ACCESSIBLE, node); 45 } 46 47 static void *vmemmap_buf; 48 static void *vmemmap_buf_end; 49 50 void * __meminit vmemmap_alloc_block(unsigned long size, int node) 51 { 52 /* If the main allocator is up use that, fallback to bootmem. */ 53 if (slab_is_available()) { 54 struct page *page; 55 56 if (node_state(node, N_HIGH_MEMORY)) 57 page = alloc_pages_node( 58 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, 59 get_order(size)); 60 else 61 page = alloc_pages( 62 GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, 63 get_order(size)); 64 if (page) 65 return page_address(page); 66 return NULL; 67 } else 68 return __earlyonly_bootmem_alloc(node, size, size, 69 __pa(MAX_DMA_ADDRESS)); 70 } 71 72 /* need to make sure size is all the same during early stage */ 73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) 74 { 75 void *ptr; 76 77 if (!vmemmap_buf) 78 return vmemmap_alloc_block(size, node); 79 80 /* take the from buf */ 81 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); 82 if (ptr + size > vmemmap_buf_end) 83 return vmemmap_alloc_block(size, node); 84 85 vmemmap_buf = ptr + size; 86 87 return ptr; 88 } 89 90 void __meminit vmemmap_verify(pte_t *pte, int node, 91 unsigned long start, unsigned long end) 92 { 93 unsigned long pfn = pte_pfn(*pte); 94 int actual_node = early_pfn_to_nid(pfn); 95 96 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 97 printk(KERN_WARNING "[%lx-%lx] potential offnode " 98 "page_structs\n", start, end - 1); 99 } 100 101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) 102 { 103 pte_t *pte = pte_offset_kernel(pmd, addr); 104 if (pte_none(*pte)) { 105 pte_t entry; 106 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); 107 if (!p) 108 return NULL; 109 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 110 set_pte_at(&init_mm, addr, pte, entry); 111 } 112 return pte; 113 } 114 115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) 116 { 117 pmd_t *pmd = pmd_offset(pud, addr); 118 if (pmd_none(*pmd)) { 119 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 120 if (!p) 121 return NULL; 122 pmd_populate_kernel(&init_mm, pmd, p); 123 } 124 return pmd; 125 } 126 127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) 128 { 129 pud_t *pud = pud_offset(pgd, addr); 130 if (pud_none(*pud)) { 131 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 132 if (!p) 133 return NULL; 134 pud_populate(&init_mm, pud, p); 135 } 136 return pud; 137 } 138 139 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 140 { 141 pgd_t *pgd = pgd_offset_k(addr); 142 if (pgd_none(*pgd)) { 143 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 144 if (!p) 145 return NULL; 146 pgd_populate(&init_mm, pgd, p); 147 } 148 return pgd; 149 } 150 151 int __meminit vmemmap_populate_basepages(unsigned long start, 152 unsigned long end, int node) 153 { 154 unsigned long addr = start; 155 pgd_t *pgd; 156 pud_t *pud; 157 pmd_t *pmd; 158 pte_t *pte; 159 160 for (; addr < end; addr += PAGE_SIZE) { 161 pgd = vmemmap_pgd_populate(addr, node); 162 if (!pgd) 163 return -ENOMEM; 164 pud = vmemmap_pud_populate(pgd, addr, node); 165 if (!pud) 166 return -ENOMEM; 167 pmd = vmemmap_pmd_populate(pud, addr, node); 168 if (!pmd) 169 return -ENOMEM; 170 pte = vmemmap_pte_populate(pmd, addr, node); 171 if (!pte) 172 return -ENOMEM; 173 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); 174 } 175 176 return 0; 177 } 178 179 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) 180 { 181 unsigned long start; 182 unsigned long end; 183 struct page *map; 184 185 map = pfn_to_page(pnum * PAGES_PER_SECTION); 186 start = (unsigned long)map; 187 end = (unsigned long)(map + PAGES_PER_SECTION); 188 189 if (vmemmap_populate(start, end, nid)) 190 return NULL; 191 192 return map; 193 } 194 195 void __init sparse_mem_maps_populate_node(struct page **map_map, 196 unsigned long pnum_begin, 197 unsigned long pnum_end, 198 unsigned long map_count, int nodeid) 199 { 200 unsigned long pnum; 201 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 202 void *vmemmap_buf_start; 203 204 size = ALIGN(size, PMD_SIZE); 205 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, 206 PMD_SIZE, __pa(MAX_DMA_ADDRESS)); 207 208 if (vmemmap_buf_start) { 209 vmemmap_buf = vmemmap_buf_start; 210 vmemmap_buf_end = vmemmap_buf_start + size * map_count; 211 } 212 213 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 214 struct mem_section *ms; 215 216 if (!present_section_nr(pnum)) 217 continue; 218 219 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 220 if (map_map[pnum]) 221 continue; 222 ms = __nr_to_section(pnum); 223 printk(KERN_ERR "%s: sparsemem memory map backing failed " 224 "some memory will not be available.\n", __func__); 225 ms->section_mem_map = 0; 226 } 227 228 if (vmemmap_buf_start) { 229 /* need to free left buf */ 230 memblock_free_early(__pa(vmemmap_buf), 231 vmemmap_buf_end - vmemmap_buf); 232 vmemmap_buf = NULL; 233 vmemmap_buf_end = NULL; 234 } 235 } 236