1 /* 2 * Virtual Memory Map support 3 * 4 * (C) 2007 sgi. Christoph Lameter. 5 * 6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 7 * virt_to_page, page_address() to be implemented as a base offset 8 * calculation without memory access. 9 * 10 * However, virtual mappings need a page table and TLBs. Many Linux 11 * architectures already map their physical space using 1-1 mappings 12 * via TLBs. For those arches the virtual memmory map is essentially 13 * for free if we use the same page size as the 1-1 mappings. In that 14 * case the overhead consists of a few additional pages that are 15 * allocated to create a view of memory for vmemmap. 16 * 17 * The architecture is expected to provide a vmemmap_populate() function 18 * to instantiate the mapping. 19 */ 20 #include <linux/mm.h> 21 #include <linux/mmzone.h> 22 #include <linux/bootmem.h> 23 #include <linux/highmem.h> 24 #include <linux/module.h> 25 #include <linux/spinlock.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sched.h> 28 #include <asm/dma.h> 29 #include <asm/pgalloc.h> 30 #include <asm/pgtable.h> 31 32 /* 33 * Allocate a block of memory to be used to back the virtual memory map 34 * or to back the page tables that are used to create the mapping. 35 * Uses the main allocators if they are available, else bootmem. 36 */ 37 38 static void * __init_refok __earlyonly_bootmem_alloc(int node, 39 unsigned long size, 40 unsigned long align, 41 unsigned long goal) 42 { 43 return __alloc_bootmem_node(NODE_DATA(node), size, align, goal); 44 } 45 46 47 void * __meminit vmemmap_alloc_block(unsigned long size, int node) 48 { 49 /* If the main allocator is up use that, fallback to bootmem. */ 50 if (slab_is_available()) { 51 struct page *page; 52 53 if (node_state(node, N_HIGH_MEMORY)) 54 page = alloc_pages_node(node, 55 GFP_KERNEL | __GFP_ZERO, get_order(size)); 56 else 57 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 58 get_order(size)); 59 if (page) 60 return page_address(page); 61 return NULL; 62 } else 63 return __earlyonly_bootmem_alloc(node, size, size, 64 __pa(MAX_DMA_ADDRESS)); 65 } 66 67 void __meminit vmemmap_verify(pte_t *pte, int node, 68 unsigned long start, unsigned long end) 69 { 70 unsigned long pfn = pte_pfn(*pte); 71 int actual_node = early_pfn_to_nid(pfn); 72 73 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 74 printk(KERN_WARNING "[%lx-%lx] potential offnode " 75 "page_structs\n", start, end - 1); 76 } 77 78 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) 79 { 80 pte_t *pte = pte_offset_kernel(pmd, addr); 81 if (pte_none(*pte)) { 82 pte_t entry; 83 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 84 if (!p) 85 return NULL; 86 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 87 set_pte_at(&init_mm, addr, pte, entry); 88 } 89 return pte; 90 } 91 92 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) 93 { 94 pmd_t *pmd = pmd_offset(pud, addr); 95 if (pmd_none(*pmd)) { 96 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 97 if (!p) 98 return NULL; 99 pmd_populate_kernel(&init_mm, pmd, p); 100 } 101 return pmd; 102 } 103 104 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) 105 { 106 pud_t *pud = pud_offset(pgd, addr); 107 if (pud_none(*pud)) { 108 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 109 if (!p) 110 return NULL; 111 pud_populate(&init_mm, pud, p); 112 } 113 return pud; 114 } 115 116 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 117 { 118 pgd_t *pgd = pgd_offset_k(addr); 119 if (pgd_none(*pgd)) { 120 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 121 if (!p) 122 return NULL; 123 pgd_populate(&init_mm, pgd, p); 124 } 125 return pgd; 126 } 127 128 int __meminit vmemmap_populate_basepages(struct page *start_page, 129 unsigned long size, int node) 130 { 131 unsigned long addr = (unsigned long)start_page; 132 unsigned long end = (unsigned long)(start_page + size); 133 pgd_t *pgd; 134 pud_t *pud; 135 pmd_t *pmd; 136 pte_t *pte; 137 138 for (; addr < end; addr += PAGE_SIZE) { 139 pgd = vmemmap_pgd_populate(addr, node); 140 if (!pgd) 141 return -ENOMEM; 142 pud = vmemmap_pud_populate(pgd, addr, node); 143 if (!pud) 144 return -ENOMEM; 145 pmd = vmemmap_pmd_populate(pud, addr, node); 146 if (!pmd) 147 return -ENOMEM; 148 pte = vmemmap_pte_populate(pmd, addr, node); 149 if (!pte) 150 return -ENOMEM; 151 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); 152 } 153 154 return 0; 155 } 156 157 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) 158 { 159 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); 160 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); 161 if (error) 162 return NULL; 163 164 return map; 165 } 166