1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IA-32 Huge TLB Page Support for Kernel. 4 * 5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 6 */ 7 8 #include <linux/init.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 12 #include <linux/hugetlb.h> 13 #include <linux/pagemap.h> 14 #include <linux/err.h> 15 #include <linux/sysctl.h> 16 #include <linux/compat.h> 17 #include <asm/mman.h> 18 #include <asm/tlb.h> 19 #include <asm/tlbflush.h> 20 #include <asm/elf.h> 21 22 /* 23 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal 24 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. 25 * Otherwise, returns 0. 26 */ 27 int pmd_huge(pmd_t pmd) 28 { 29 return !pmd_none(pmd) && 30 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; 31 } 32 33 int pud_huge(pud_t pud) 34 { 35 return !!(pud_val(pud) & _PAGE_PSE); 36 } 37 38 #ifdef CONFIG_HUGETLB_PAGE 39 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 40 unsigned long addr, unsigned long len, 41 unsigned long pgoff, unsigned long flags) 42 { 43 struct hstate *h = hstate_file(file); 44 struct vm_unmapped_area_info info; 45 46 info.flags = 0; 47 info.length = len; 48 info.low_limit = get_mmap_base(1); 49 50 /* 51 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 52 * in the full address space. 53 */ 54 info.high_limit = in_32bit_syscall() ? 55 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); 56 57 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 58 info.align_offset = 0; 59 return vm_unmapped_area(&info); 60 } 61 62 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 63 unsigned long addr, unsigned long len, 64 unsigned long pgoff, unsigned long flags) 65 { 66 struct hstate *h = hstate_file(file); 67 struct vm_unmapped_area_info info; 68 69 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 70 info.length = len; 71 info.low_limit = PAGE_SIZE; 72 info.high_limit = get_mmap_base(0); 73 74 /* 75 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 76 * in the full address space. 77 */ 78 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) 79 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; 80 81 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 82 info.align_offset = 0; 83 addr = vm_unmapped_area(&info); 84 85 /* 86 * A failed mmap() very likely causes application failure, 87 * so fall back to the bottom-up function here. This scenario 88 * can happen with large stack limits and large mmap() 89 * allocations. 90 */ 91 if (addr & ~PAGE_MASK) { 92 VM_BUG_ON(addr != -ENOMEM); 93 info.flags = 0; 94 info.low_limit = TASK_UNMAPPED_BASE; 95 info.high_limit = TASK_SIZE_LOW; 96 addr = vm_unmapped_area(&info); 97 } 98 99 return addr; 100 } 101 102 unsigned long 103 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 104 unsigned long len, unsigned long pgoff, unsigned long flags) 105 { 106 struct hstate *h = hstate_file(file); 107 struct mm_struct *mm = current->mm; 108 struct vm_area_struct *vma; 109 110 if (len & ~huge_page_mask(h)) 111 return -EINVAL; 112 113 if (len > TASK_SIZE) 114 return -ENOMEM; 115 116 /* No address checking. See comment at mmap_address_hint_valid() */ 117 if (flags & MAP_FIXED) { 118 if (prepare_hugepage_range(file, addr, len)) 119 return -EINVAL; 120 return addr; 121 } 122 123 if (addr) { 124 addr &= huge_page_mask(h); 125 if (!mmap_address_hint_valid(addr, len)) 126 goto get_unmapped_area; 127 128 vma = find_vma(mm, addr); 129 if (!vma || addr + len <= vm_start_gap(vma)) 130 return addr; 131 } 132 133 get_unmapped_area: 134 if (mm->get_unmapped_area == arch_get_unmapped_area) 135 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 136 pgoff, flags); 137 else 138 return hugetlb_get_unmapped_area_topdown(file, addr, len, 139 pgoff, flags); 140 } 141 #endif /* CONFIG_HUGETLB_PAGE */ 142 143 #ifdef CONFIG_X86_64 144 bool __init arch_hugetlb_valid_size(unsigned long size) 145 { 146 if (size == PMD_SIZE) 147 return true; 148 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) 149 return true; 150 else 151 return false; 152 } 153 154 #ifdef CONFIG_CONTIG_ALLOC 155 static __init int gigantic_pages_init(void) 156 { 157 /* With compaction or CMA we can allocate gigantic pages at runtime */ 158 if (boot_cpu_has(X86_FEATURE_GBPAGES)) 159 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 160 return 0; 161 } 162 arch_initcall(gigantic_pages_init); 163 #endif 164 #endif 165