xref: /openbmc/linux/arch/x86/mm/hugetlbpage.c (revision 82df5b73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IA-32 Huge TLB Page Support for Kernel.
4  *
5  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6  */
7 
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/pgalloc.h>
21 #include <asm/elf.h>
22 
23 #if 0	/* This is just for testing */
24 struct page *
25 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
26 {
27 	unsigned long start = address;
28 	int length = 1;
29 	int nr;
30 	struct page *page;
31 	struct vm_area_struct *vma;
32 
33 	vma = find_vma(mm, addr);
34 	if (!vma || !is_vm_hugetlb_page(vma))
35 		return ERR_PTR(-EINVAL);
36 
37 	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
38 
39 	/* hugetlb should be locked, and hence, prefaulted */
40 	WARN_ON(!pte || pte_none(*pte));
41 
42 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
43 
44 	WARN_ON(!PageHead(page));
45 
46 	return page;
47 }
48 
49 int pmd_huge(pmd_t pmd)
50 {
51 	return 0;
52 }
53 
54 int pud_huge(pud_t pud)
55 {
56 	return 0;
57 }
58 
59 #else
60 
61 /*
62  * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
63  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
64  * Otherwise, returns 0.
65  */
66 int pmd_huge(pmd_t pmd)
67 {
68 	return !pmd_none(pmd) &&
69 		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
70 }
71 
72 int pud_huge(pud_t pud)
73 {
74 	return !!(pud_val(pud) & _PAGE_PSE);
75 }
76 #endif
77 
78 #ifdef CONFIG_HUGETLB_PAGE
79 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
80 		unsigned long addr, unsigned long len,
81 		unsigned long pgoff, unsigned long flags)
82 {
83 	struct hstate *h = hstate_file(file);
84 	struct vm_unmapped_area_info info;
85 
86 	info.flags = 0;
87 	info.length = len;
88 	info.low_limit = get_mmap_base(1);
89 
90 	/*
91 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
92 	 * in the full address space.
93 	 */
94 	info.high_limit = in_32bit_syscall() ?
95 		task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
96 
97 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
98 	info.align_offset = 0;
99 	return vm_unmapped_area(&info);
100 }
101 
102 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
103 		unsigned long addr, unsigned long len,
104 		unsigned long pgoff, unsigned long flags)
105 {
106 	struct hstate *h = hstate_file(file);
107 	struct vm_unmapped_area_info info;
108 
109 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
110 	info.length = len;
111 	info.low_limit = PAGE_SIZE;
112 	info.high_limit = get_mmap_base(0);
113 
114 	/*
115 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
116 	 * in the full address space.
117 	 */
118 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
119 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
120 
121 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
122 	info.align_offset = 0;
123 	addr = vm_unmapped_area(&info);
124 
125 	/*
126 	 * A failed mmap() very likely causes application failure,
127 	 * so fall back to the bottom-up function here. This scenario
128 	 * can happen with large stack limits and large mmap()
129 	 * allocations.
130 	 */
131 	if (addr & ~PAGE_MASK) {
132 		VM_BUG_ON(addr != -ENOMEM);
133 		info.flags = 0;
134 		info.low_limit = TASK_UNMAPPED_BASE;
135 		info.high_limit = TASK_SIZE_LOW;
136 		addr = vm_unmapped_area(&info);
137 	}
138 
139 	return addr;
140 }
141 
142 unsigned long
143 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
144 		unsigned long len, unsigned long pgoff, unsigned long flags)
145 {
146 	struct hstate *h = hstate_file(file);
147 	struct mm_struct *mm = current->mm;
148 	struct vm_area_struct *vma;
149 
150 	if (len & ~huge_page_mask(h))
151 		return -EINVAL;
152 
153 	if (len > TASK_SIZE)
154 		return -ENOMEM;
155 
156 	/* No address checking. See comment at mmap_address_hint_valid() */
157 	if (flags & MAP_FIXED) {
158 		if (prepare_hugepage_range(file, addr, len))
159 			return -EINVAL;
160 		return addr;
161 	}
162 
163 	if (addr) {
164 		addr &= huge_page_mask(h);
165 		if (!mmap_address_hint_valid(addr, len))
166 			goto get_unmapped_area;
167 
168 		vma = find_vma(mm, addr);
169 		if (!vma || addr + len <= vm_start_gap(vma))
170 			return addr;
171 	}
172 
173 get_unmapped_area:
174 	if (mm->get_unmapped_area == arch_get_unmapped_area)
175 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
176 				pgoff, flags);
177 	else
178 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
179 				pgoff, flags);
180 }
181 #endif /* CONFIG_HUGETLB_PAGE */
182 
183 #ifdef CONFIG_X86_64
184 bool __init arch_hugetlb_valid_size(unsigned long size)
185 {
186 	if (size == PMD_SIZE)
187 		return true;
188 	else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
189 		return true;
190 	else
191 		return false;
192 }
193 
194 #ifdef CONFIG_CONTIG_ALLOC
195 static __init int gigantic_pages_init(void)
196 {
197 	/* With compaction or CMA we can allocate gigantic pages at runtime */
198 	if (boot_cpu_has(X86_FEATURE_GBPAGES))
199 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
200 	return 0;
201 }
202 arch_initcall(gigantic_pages_init);
203 #endif
204 #endif
205