xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision f677b30b487ca3763c3de3f1b4d8c976c2961cd1)
1 /*
2  * SPARC64 Huge TLB page support.
3  *
4  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
13 
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
20 
21 /* Slightly simplified from the non-hugepage variant because by
22  * definition we don't have to worry about any page coloring stuff
23  */
24 
25 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
26 							unsigned long addr,
27 							unsigned long len,
28 							unsigned long pgoff,
29 							unsigned long flags)
30 {
31 	unsigned long task_size = TASK_SIZE;
32 	struct vm_unmapped_area_info info;
33 
34 	if (test_thread_flag(TIF_32BIT))
35 		task_size = STACK_TOP32;
36 
37 	info.flags = 0;
38 	info.length = len;
39 	info.low_limit = TASK_UNMAPPED_BASE;
40 	info.high_limit = min(task_size, VA_EXCLUDE_START);
41 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
42 	info.align_offset = 0;
43 	addr = vm_unmapped_area(&info);
44 
45 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
46 		VM_BUG_ON(addr != -ENOMEM);
47 		info.low_limit = VA_EXCLUDE_END;
48 		info.high_limit = task_size;
49 		addr = vm_unmapped_area(&info);
50 	}
51 
52 	return addr;
53 }
54 
55 static unsigned long
56 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57 				  const unsigned long len,
58 				  const unsigned long pgoff,
59 				  const unsigned long flags)
60 {
61 	struct mm_struct *mm = current->mm;
62 	unsigned long addr = addr0;
63 	struct vm_unmapped_area_info info;
64 
65 	/* This should only ever run for 32-bit processes.  */
66 	BUG_ON(!test_thread_flag(TIF_32BIT));
67 
68 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
69 	info.length = len;
70 	info.low_limit = PAGE_SIZE;
71 	info.high_limit = mm->mmap_base;
72 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
73 	info.align_offset = 0;
74 	addr = vm_unmapped_area(&info);
75 
76 	/*
77 	 * A failed mmap() very likely causes application failure,
78 	 * so fall back to the bottom-up function here. This scenario
79 	 * can happen with large stack limits and large mmap()
80 	 * allocations.
81 	 */
82 	if (addr & ~PAGE_MASK) {
83 		VM_BUG_ON(addr != -ENOMEM);
84 		info.flags = 0;
85 		info.low_limit = TASK_UNMAPPED_BASE;
86 		info.high_limit = STACK_TOP32;
87 		addr = vm_unmapped_area(&info);
88 	}
89 
90 	return addr;
91 }
92 
93 unsigned long
94 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
95 		unsigned long len, unsigned long pgoff, unsigned long flags)
96 {
97 	struct mm_struct *mm = current->mm;
98 	struct vm_area_struct *vma;
99 	unsigned long task_size = TASK_SIZE;
100 
101 	if (test_thread_flag(TIF_32BIT))
102 		task_size = STACK_TOP32;
103 
104 	if (len & ~HPAGE_MASK)
105 		return -EINVAL;
106 	if (len > task_size)
107 		return -ENOMEM;
108 
109 	if (flags & MAP_FIXED) {
110 		if (prepare_hugepage_range(file, addr, len))
111 			return -EINVAL;
112 		return addr;
113 	}
114 
115 	if (addr) {
116 		addr = ALIGN(addr, HPAGE_SIZE);
117 		vma = find_vma(mm, addr);
118 		if (task_size - len >= addr &&
119 		    (!vma || addr + len <= vma->vm_start))
120 			return addr;
121 	}
122 	if (mm->get_unmapped_area == arch_get_unmapped_area)
123 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
124 				pgoff, flags);
125 	else
126 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
127 				pgoff, flags);
128 }
129 
130 pte_t *huge_pte_alloc(struct mm_struct *mm,
131 			unsigned long addr, unsigned long sz)
132 {
133 	pgd_t *pgd;
134 	pud_t *pud;
135 	pmd_t *pmd;
136 	pte_t *pte = NULL;
137 
138 	/* We must align the address, because our caller will run
139 	 * set_huge_pte_at() on whatever we return, which writes out
140 	 * all of the sub-ptes for the hugepage range.  So we have
141 	 * to give it the first such sub-pte.
142 	 */
143 	addr &= HPAGE_MASK;
144 
145 	pgd = pgd_offset(mm, addr);
146 	pud = pud_alloc(mm, pgd, addr);
147 	if (pud) {
148 		pmd = pmd_alloc(mm, pud, addr);
149 		if (pmd)
150 			pte = pte_alloc_map(mm, NULL, pmd, addr);
151 	}
152 	return pte;
153 }
154 
155 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
156 {
157 	pgd_t *pgd;
158 	pud_t *pud;
159 	pmd_t *pmd;
160 	pte_t *pte = NULL;
161 
162 	addr &= HPAGE_MASK;
163 
164 	pgd = pgd_offset(mm, addr);
165 	if (!pgd_none(*pgd)) {
166 		pud = pud_offset(pgd, addr);
167 		if (!pud_none(*pud)) {
168 			pmd = pmd_offset(pud, addr);
169 			if (!pmd_none(*pmd))
170 				pte = pte_offset_map(pmd, addr);
171 		}
172 	}
173 	return pte;
174 }
175 
176 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
177 {
178 	return 0;
179 }
180 
181 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
182 		     pte_t *ptep, pte_t entry)
183 {
184 	int i;
185 
186 	if (!pte_present(*ptep) && pte_present(entry))
187 		mm->context.huge_pte_count++;
188 
189 	addr &= HPAGE_MASK;
190 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
191 		set_pte_at(mm, addr, ptep, entry);
192 		ptep++;
193 		addr += PAGE_SIZE;
194 		pte_val(entry) += PAGE_SIZE;
195 	}
196 }
197 
198 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
199 			      pte_t *ptep)
200 {
201 	pte_t entry;
202 	int i;
203 
204 	entry = *ptep;
205 	if (pte_present(entry))
206 		mm->context.huge_pte_count--;
207 
208 	addr &= HPAGE_MASK;
209 
210 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
211 		pte_clear(mm, addr, ptep);
212 		addr += PAGE_SIZE;
213 		ptep++;
214 	}
215 
216 	return entry;
217 }
218 
219 struct page *follow_huge_addr(struct mm_struct *mm,
220 			      unsigned long address, int write)
221 {
222 	return ERR_PTR(-EINVAL);
223 }
224 
225 int pmd_huge(pmd_t pmd)
226 {
227 	return 0;
228 }
229 
230 int pud_huge(pud_t pud)
231 {
232 	return 0;
233 }
234 
235 int pmd_huge_support(void)
236 {
237 	return 0;
238 }
239 
240 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
241 			     pmd_t *pmd, int write)
242 {
243 	return NULL;
244 }
245