1 /* 2 * IA-32 Huge TLB Page Support for Kernel. 3 * 4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 5 */ 6 7 #include <linux/init.h> 8 #include <linux/fs.h> 9 #include <linux/mm.h> 10 #include <linux/sched/mm.h> 11 #include <linux/hugetlb.h> 12 #include <linux/pagemap.h> 13 #include <linux/err.h> 14 #include <linux/sysctl.h> 15 #include <linux/compat.h> 16 #include <asm/mman.h> 17 #include <asm/tlb.h> 18 #include <asm/tlbflush.h> 19 #include <asm/pgalloc.h> 20 #include <asm/elf.h> 21 22 #if 0 /* This is just for testing */ 23 struct page * 24 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 25 { 26 unsigned long start = address; 27 int length = 1; 28 int nr; 29 struct page *page; 30 struct vm_area_struct *vma; 31 32 vma = find_vma(mm, addr); 33 if (!vma || !is_vm_hugetlb_page(vma)) 34 return ERR_PTR(-EINVAL); 35 36 pte = huge_pte_offset(mm, address); 37 38 /* hugetlb should be locked, and hence, prefaulted */ 39 WARN_ON(!pte || pte_none(*pte)); 40 41 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 42 43 WARN_ON(!PageHead(page)); 44 45 return page; 46 } 47 48 int pmd_huge(pmd_t pmd) 49 { 50 return 0; 51 } 52 53 int pud_huge(pud_t pud) 54 { 55 return 0; 56 } 57 58 #else 59 60 /* 61 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal 62 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. 63 * Otherwise, returns 0. 64 */ 65 int pmd_huge(pmd_t pmd) 66 { 67 return !pmd_none(pmd) && 68 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; 69 } 70 71 int pud_huge(pud_t pud) 72 { 73 return !!(pud_val(pud) & _PAGE_PSE); 74 } 75 #endif 76 77 #ifdef CONFIG_HUGETLB_PAGE 78 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 79 unsigned long addr, unsigned long len, 80 unsigned long pgoff, unsigned long flags) 81 { 82 struct hstate *h = hstate_file(file); 83 struct vm_unmapped_area_info info; 84 85 info.flags = 0; 86 info.length = len; 87 info.low_limit = get_mmap_base(1); 88 info.high_limit = in_compat_syscall() ? 89 tasksize_32bit() : tasksize_64bit(); 90 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 91 info.align_offset = 0; 92 return vm_unmapped_area(&info); 93 } 94 95 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 96 unsigned long addr0, unsigned long len, 97 unsigned long pgoff, unsigned long flags) 98 { 99 struct hstate *h = hstate_file(file); 100 struct vm_unmapped_area_info info; 101 unsigned long addr; 102 103 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 104 info.length = len; 105 info.low_limit = PAGE_SIZE; 106 info.high_limit = get_mmap_base(0); 107 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 108 info.align_offset = 0; 109 addr = vm_unmapped_area(&info); 110 111 /* 112 * A failed mmap() very likely causes application failure, 113 * so fall back to the bottom-up function here. This scenario 114 * can happen with large stack limits and large mmap() 115 * allocations. 116 */ 117 if (addr & ~PAGE_MASK) { 118 VM_BUG_ON(addr != -ENOMEM); 119 info.flags = 0; 120 info.low_limit = TASK_UNMAPPED_BASE; 121 info.high_limit = TASK_SIZE; 122 addr = vm_unmapped_area(&info); 123 } 124 125 return addr; 126 } 127 128 unsigned long 129 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 130 unsigned long len, unsigned long pgoff, unsigned long flags) 131 { 132 struct hstate *h = hstate_file(file); 133 struct mm_struct *mm = current->mm; 134 struct vm_area_struct *vma; 135 136 if (len & ~huge_page_mask(h)) 137 return -EINVAL; 138 if (len > TASK_SIZE) 139 return -ENOMEM; 140 141 if (flags & MAP_FIXED) { 142 if (prepare_hugepage_range(file, addr, len)) 143 return -EINVAL; 144 return addr; 145 } 146 147 if (addr) { 148 addr = ALIGN(addr, huge_page_size(h)); 149 vma = find_vma(mm, addr); 150 if (TASK_SIZE - len >= addr && 151 (!vma || addr + len <= vma->vm_start)) 152 return addr; 153 } 154 if (mm->get_unmapped_area == arch_get_unmapped_area) 155 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 156 pgoff, flags); 157 else 158 return hugetlb_get_unmapped_area_topdown(file, addr, len, 159 pgoff, flags); 160 } 161 #endif /* CONFIG_HUGETLB_PAGE */ 162 163 #ifdef CONFIG_X86_64 164 static __init int setup_hugepagesz(char *opt) 165 { 166 unsigned long ps = memparse(opt, &opt); 167 if (ps == PMD_SIZE) { 168 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 169 } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) { 170 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 171 } else { 172 hugetlb_bad_size(); 173 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", 174 ps >> 20); 175 return 0; 176 } 177 return 1; 178 } 179 __setup("hugepagesz=", setup_hugepagesz); 180 181 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 182 static __init int gigantic_pages_init(void) 183 { 184 /* With compaction or CMA we can allocate gigantic pages at runtime */ 185 if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) 186 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 187 return 0; 188 } 189 arch_initcall(gigantic_pages_init); 190 #endif 191 #endif 192