1 /* 2 * IA-32 Huge TLB Page Support for Kernel. 3 * 4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 5 */ 6 7 #include <linux/init.h> 8 #include <linux/fs.h> 9 #include <linux/mm.h> 10 #include <linux/sched/mm.h> 11 #include <linux/hugetlb.h> 12 #include <linux/pagemap.h> 13 #include <linux/err.h> 14 #include <linux/sysctl.h> 15 #include <linux/compat.h> 16 #include <asm/mman.h> 17 #include <asm/tlb.h> 18 #include <asm/tlbflush.h> 19 #include <asm/pgalloc.h> 20 #include <asm/elf.h> 21 #include <asm/mpx.h> 22 23 #if 0 /* This is just for testing */ 24 struct page * 25 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 26 { 27 unsigned long start = address; 28 int length = 1; 29 int nr; 30 struct page *page; 31 struct vm_area_struct *vma; 32 33 vma = find_vma(mm, addr); 34 if (!vma || !is_vm_hugetlb_page(vma)) 35 return ERR_PTR(-EINVAL); 36 37 pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); 38 39 /* hugetlb should be locked, and hence, prefaulted */ 40 WARN_ON(!pte || pte_none(*pte)); 41 42 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 43 44 WARN_ON(!PageHead(page)); 45 46 return page; 47 } 48 49 int pmd_huge(pmd_t pmd) 50 { 51 return 0; 52 } 53 54 int pud_huge(pud_t pud) 55 { 56 return 0; 57 } 58 59 #else 60 61 /* 62 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal 63 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. 64 * Otherwise, returns 0. 65 */ 66 int pmd_huge(pmd_t pmd) 67 { 68 return !pmd_none(pmd) && 69 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; 70 } 71 72 int pud_huge(pud_t pud) 73 { 74 return !!(pud_val(pud) & _PAGE_PSE); 75 } 76 #endif 77 78 #ifdef CONFIG_HUGETLB_PAGE 79 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 80 unsigned long addr, unsigned long len, 81 unsigned long pgoff, unsigned long flags) 82 { 83 struct hstate *h = hstate_file(file); 84 struct vm_unmapped_area_info info; 85 86 info.flags = 0; 87 info.length = len; 88 info.low_limit = get_mmap_base(1); 89 90 /* 91 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 92 * in the full address space. 93 */ 94 info.high_limit = in_compat_syscall() ? 95 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); 96 97 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 98 info.align_offset = 0; 99 return vm_unmapped_area(&info); 100 } 101 102 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 103 unsigned long addr, unsigned long len, 104 unsigned long pgoff, unsigned long flags) 105 { 106 struct hstate *h = hstate_file(file); 107 struct vm_unmapped_area_info info; 108 109 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 110 info.length = len; 111 info.low_limit = PAGE_SIZE; 112 info.high_limit = get_mmap_base(0); 113 114 /* 115 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 116 * in the full address space. 117 */ 118 if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) 119 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; 120 121 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 122 info.align_offset = 0; 123 addr = vm_unmapped_area(&info); 124 125 /* 126 * A failed mmap() very likely causes application failure, 127 * so fall back to the bottom-up function here. This scenario 128 * can happen with large stack limits and large mmap() 129 * allocations. 130 */ 131 if (addr & ~PAGE_MASK) { 132 VM_BUG_ON(addr != -ENOMEM); 133 info.flags = 0; 134 info.low_limit = TASK_UNMAPPED_BASE; 135 info.high_limit = TASK_SIZE_LOW; 136 addr = vm_unmapped_area(&info); 137 } 138 139 return addr; 140 } 141 142 unsigned long 143 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 144 unsigned long len, unsigned long pgoff, unsigned long flags) 145 { 146 struct hstate *h = hstate_file(file); 147 struct mm_struct *mm = current->mm; 148 struct vm_area_struct *vma; 149 150 if (len & ~huge_page_mask(h)) 151 return -EINVAL; 152 153 addr = mpx_unmapped_area_check(addr, len, flags); 154 if (IS_ERR_VALUE(addr)) 155 return addr; 156 157 if (len > TASK_SIZE) 158 return -ENOMEM; 159 160 if (flags & MAP_FIXED) { 161 if (prepare_hugepage_range(file, addr, len)) 162 return -EINVAL; 163 return addr; 164 } 165 166 if (addr) { 167 addr = ALIGN(addr, huge_page_size(h)); 168 vma = find_vma(mm, addr); 169 if (TASK_SIZE - len >= addr && 170 (!vma || addr + len <= vm_start_gap(vma))) 171 return addr; 172 } 173 if (mm->get_unmapped_area == arch_get_unmapped_area) 174 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 175 pgoff, flags); 176 else 177 return hugetlb_get_unmapped_area_topdown(file, addr, len, 178 pgoff, flags); 179 } 180 #endif /* CONFIG_HUGETLB_PAGE */ 181 182 #ifdef CONFIG_X86_64 183 static __init int setup_hugepagesz(char *opt) 184 { 185 unsigned long ps = memparse(opt, &opt); 186 if (ps == PMD_SIZE) { 187 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 188 } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) { 189 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 190 } else { 191 hugetlb_bad_size(); 192 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", 193 ps >> 20); 194 return 0; 195 } 196 return 1; 197 } 198 __setup("hugepagesz=", setup_hugepagesz); 199 200 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 201 static __init int gigantic_pages_init(void) 202 { 203 /* With compaction or CMA we can allocate gigantic pages at runtime */ 204 if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) 205 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 206 return 0; 207 } 208 arch_initcall(gigantic_pages_init); 209 #endif 210 #endif 211