xref: /openbmc/linux/arch/x86/mm/hugetlbpage.c (revision 174cd4b1)
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/err.h>
14 #include <linux/sysctl.h>
15 #include <asm/mman.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/pgalloc.h>
19 
20 #if 0	/* This is just for testing */
21 struct page *
22 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
23 {
24 	unsigned long start = address;
25 	int length = 1;
26 	int nr;
27 	struct page *page;
28 	struct vm_area_struct *vma;
29 
30 	vma = find_vma(mm, addr);
31 	if (!vma || !is_vm_hugetlb_page(vma))
32 		return ERR_PTR(-EINVAL);
33 
34 	pte = huge_pte_offset(mm, address);
35 
36 	/* hugetlb should be locked, and hence, prefaulted */
37 	WARN_ON(!pte || pte_none(*pte));
38 
39 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
40 
41 	WARN_ON(!PageHead(page));
42 
43 	return page;
44 }
45 
46 int pmd_huge(pmd_t pmd)
47 {
48 	return 0;
49 }
50 
51 int pud_huge(pud_t pud)
52 {
53 	return 0;
54 }
55 
56 #else
57 
58 /*
59  * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
60  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
61  * Otherwise, returns 0.
62  */
63 int pmd_huge(pmd_t pmd)
64 {
65 	return !pmd_none(pmd) &&
66 		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
67 }
68 
69 int pud_huge(pud_t pud)
70 {
71 	return !!(pud_val(pud) & _PAGE_PSE);
72 }
73 #endif
74 
75 #ifdef CONFIG_HUGETLB_PAGE
76 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
77 		unsigned long addr, unsigned long len,
78 		unsigned long pgoff, unsigned long flags)
79 {
80 	struct hstate *h = hstate_file(file);
81 	struct vm_unmapped_area_info info;
82 
83 	info.flags = 0;
84 	info.length = len;
85 	info.low_limit = current->mm->mmap_legacy_base;
86 	info.high_limit = TASK_SIZE;
87 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
88 	info.align_offset = 0;
89 	return vm_unmapped_area(&info);
90 }
91 
92 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
93 		unsigned long addr0, unsigned long len,
94 		unsigned long pgoff, unsigned long flags)
95 {
96 	struct hstate *h = hstate_file(file);
97 	struct vm_unmapped_area_info info;
98 	unsigned long addr;
99 
100 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
101 	info.length = len;
102 	info.low_limit = PAGE_SIZE;
103 	info.high_limit = current->mm->mmap_base;
104 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
105 	info.align_offset = 0;
106 	addr = vm_unmapped_area(&info);
107 
108 	/*
109 	 * A failed mmap() very likely causes application failure,
110 	 * so fall back to the bottom-up function here. This scenario
111 	 * can happen with large stack limits and large mmap()
112 	 * allocations.
113 	 */
114 	if (addr & ~PAGE_MASK) {
115 		VM_BUG_ON(addr != -ENOMEM);
116 		info.flags = 0;
117 		info.low_limit = TASK_UNMAPPED_BASE;
118 		info.high_limit = TASK_SIZE;
119 		addr = vm_unmapped_area(&info);
120 	}
121 
122 	return addr;
123 }
124 
125 unsigned long
126 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
127 		unsigned long len, unsigned long pgoff, unsigned long flags)
128 {
129 	struct hstate *h = hstate_file(file);
130 	struct mm_struct *mm = current->mm;
131 	struct vm_area_struct *vma;
132 
133 	if (len & ~huge_page_mask(h))
134 		return -EINVAL;
135 	if (len > TASK_SIZE)
136 		return -ENOMEM;
137 
138 	if (flags & MAP_FIXED) {
139 		if (prepare_hugepage_range(file, addr, len))
140 			return -EINVAL;
141 		return addr;
142 	}
143 
144 	if (addr) {
145 		addr = ALIGN(addr, huge_page_size(h));
146 		vma = find_vma(mm, addr);
147 		if (TASK_SIZE - len >= addr &&
148 		    (!vma || addr + len <= vma->vm_start))
149 			return addr;
150 	}
151 	if (mm->get_unmapped_area == arch_get_unmapped_area)
152 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
153 				pgoff, flags);
154 	else
155 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
156 				pgoff, flags);
157 }
158 #endif /* CONFIG_HUGETLB_PAGE */
159 
160 #ifdef CONFIG_X86_64
161 static __init int setup_hugepagesz(char *opt)
162 {
163 	unsigned long ps = memparse(opt, &opt);
164 	if (ps == PMD_SIZE) {
165 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
166 	} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
167 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
168 	} else {
169 		hugetlb_bad_size();
170 		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
171 			ps >> 20);
172 		return 0;
173 	}
174 	return 1;
175 }
176 __setup("hugepagesz=", setup_hugepagesz);
177 
178 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
179 static __init int gigantic_pages_init(void)
180 {
181 	/* With compaction or CMA we can allocate gigantic pages at runtime */
182 	if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
183 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
184 	return 0;
185 }
186 arch_initcall(gigantic_pages_init);
187 #endif
188 #endif
189