xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision 297ce026)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SPARC64 Huge TLB page support.
4  *
5  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14 
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 /* Slightly simplified from the non-hugepage variant because by
23  * definition we don't have to worry about any page coloring stuff
24  */
25 
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 							unsigned long addr,
28 							unsigned long len,
29 							unsigned long pgoff,
30 							unsigned long flags)
31 {
32 	struct hstate *h = hstate_file(filp);
33 	unsigned long task_size = TASK_SIZE;
34 	struct vm_unmapped_area_info info;
35 
36 	if (test_thread_flag(TIF_32BIT))
37 		task_size = STACK_TOP32;
38 
39 	info.flags = 0;
40 	info.length = len;
41 	info.low_limit = TASK_UNMAPPED_BASE;
42 	info.high_limit = min(task_size, VA_EXCLUDE_START);
43 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 	info.align_offset = 0;
45 	addr = vm_unmapped_area(&info);
46 
47 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 		VM_BUG_ON(addr != -ENOMEM);
49 		info.low_limit = VA_EXCLUDE_END;
50 		info.high_limit = task_size;
51 		addr = vm_unmapped_area(&info);
52 	}
53 
54 	return addr;
55 }
56 
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 				  const unsigned long len,
60 				  const unsigned long pgoff,
61 				  const unsigned long flags)
62 {
63 	struct hstate *h = hstate_file(filp);
64 	struct mm_struct *mm = current->mm;
65 	unsigned long addr = addr0;
66 	struct vm_unmapped_area_info info;
67 
68 	/* This should only ever run for 32-bit processes.  */
69 	BUG_ON(!test_thread_flag(TIF_32BIT));
70 
71 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
72 	info.length = len;
73 	info.low_limit = PAGE_SIZE;
74 	info.high_limit = mm->mmap_base;
75 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 	info.align_offset = 0;
77 	addr = vm_unmapped_area(&info);
78 
79 	/*
80 	 * A failed mmap() very likely causes application failure,
81 	 * so fall back to the bottom-up function here. This scenario
82 	 * can happen with large stack limits and large mmap()
83 	 * allocations.
84 	 */
85 	if (addr & ~PAGE_MASK) {
86 		VM_BUG_ON(addr != -ENOMEM);
87 		info.flags = 0;
88 		info.low_limit = TASK_UNMAPPED_BASE;
89 		info.high_limit = STACK_TOP32;
90 		addr = vm_unmapped_area(&info);
91 	}
92 
93 	return addr;
94 }
95 
96 unsigned long
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 		unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 	struct hstate *h = hstate_file(file);
101 	struct mm_struct *mm = current->mm;
102 	struct vm_area_struct *vma;
103 	unsigned long task_size = TASK_SIZE;
104 
105 	if (test_thread_flag(TIF_32BIT))
106 		task_size = STACK_TOP32;
107 
108 	if (len & ~huge_page_mask(h))
109 		return -EINVAL;
110 	if (len > task_size)
111 		return -ENOMEM;
112 
113 	if (flags & MAP_FIXED) {
114 		if (prepare_hugepage_range(file, addr, len))
115 			return -EINVAL;
116 		return addr;
117 	}
118 
119 	if (addr) {
120 		addr = ALIGN(addr, huge_page_size(h));
121 		vma = find_vma(mm, addr);
122 		if (task_size - len >= addr &&
123 		    (!vma || addr + len <= vm_start_gap(vma)))
124 			return addr;
125 	}
126 	if (mm->get_unmapped_area == arch_get_unmapped_area)
127 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128 				pgoff, flags);
129 	else
130 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
131 				pgoff, flags);
132 }
133 
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135 {
136 	return entry;
137 }
138 
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140 {
141 	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142 
143 	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 
145 	switch (shift) {
146 	case HPAGE_16GB_SHIFT:
147 		hugepage_size = _PAGE_SZ16GB_4V;
148 		pte_val(entry) |= _PAGE_PUD_HUGE;
149 		break;
150 	case HPAGE_2GB_SHIFT:
151 		hugepage_size = _PAGE_SZ2GB_4V;
152 		pte_val(entry) |= _PAGE_PMD_HUGE;
153 		break;
154 	case HPAGE_256MB_SHIFT:
155 		hugepage_size = _PAGE_SZ256MB_4V;
156 		pte_val(entry) |= _PAGE_PMD_HUGE;
157 		break;
158 	case HPAGE_SHIFT:
159 		pte_val(entry) |= _PAGE_PMD_HUGE;
160 		break;
161 	case HPAGE_64K_SHIFT:
162 		hugepage_size = _PAGE_SZ64K_4V;
163 		break;
164 	default:
165 		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166 	}
167 
168 	pte_val(entry) = pte_val(entry) | hugepage_size;
169 	return entry;
170 }
171 
172 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173 {
174 	if (tlb_type == hypervisor)
175 		return sun4v_hugepage_shift_to_tte(entry, shift);
176 	else
177 		return sun4u_hugepage_shift_to_tte(entry, shift);
178 }
179 
180 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
181 {
182 	pte_t pte;
183 
184 	entry = pte_mkhuge(entry);
185 	pte = hugepage_shift_to_tte(entry, shift);
186 
187 #ifdef CONFIG_SPARC64
188 	/* If this vma has ADI enabled on it, turn on TTE.mcd
189 	 */
190 	if (flags & VM_SPARC_ADI)
191 		return pte_mkmcd(pte);
192 	else
193 		return pte_mknotmcd(pte);
194 #else
195 	return pte;
196 #endif
197 }
198 
199 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
200 {
201 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
202 	unsigned int shift;
203 
204 	switch (tte_szbits) {
205 	case _PAGE_SZ16GB_4V:
206 		shift = HPAGE_16GB_SHIFT;
207 		break;
208 	case _PAGE_SZ2GB_4V:
209 		shift = HPAGE_2GB_SHIFT;
210 		break;
211 	case _PAGE_SZ256MB_4V:
212 		shift = HPAGE_256MB_SHIFT;
213 		break;
214 	case _PAGE_SZ4MB_4V:
215 		shift = REAL_HPAGE_SHIFT;
216 		break;
217 	case _PAGE_SZ64K_4V:
218 		shift = HPAGE_64K_SHIFT;
219 		break;
220 	default:
221 		shift = PAGE_SHIFT;
222 		break;
223 	}
224 	return shift;
225 }
226 
227 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
228 {
229 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
230 	unsigned int shift;
231 
232 	switch (tte_szbits) {
233 	case _PAGE_SZ256MB_4U:
234 		shift = HPAGE_256MB_SHIFT;
235 		break;
236 	case _PAGE_SZ4MB_4U:
237 		shift = REAL_HPAGE_SHIFT;
238 		break;
239 	case _PAGE_SZ64K_4U:
240 		shift = HPAGE_64K_SHIFT;
241 		break;
242 	default:
243 		shift = PAGE_SHIFT;
244 		break;
245 	}
246 	return shift;
247 }
248 
249 static unsigned long tte_to_shift(pte_t entry)
250 {
251 	if (tlb_type == hypervisor)
252 		return sun4v_huge_tte_to_shift(entry);
253 
254 	return sun4u_huge_tte_to_shift(entry);
255 }
256 
257 static unsigned int huge_tte_to_shift(pte_t entry)
258 {
259 	unsigned long shift = tte_to_shift(entry);
260 
261 	if (shift == PAGE_SHIFT)
262 		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
263 			  pte_val(entry));
264 
265 	return shift;
266 }
267 
268 static unsigned long huge_tte_to_size(pte_t pte)
269 {
270 	unsigned long size = 1UL << huge_tte_to_shift(pte);
271 
272 	if (size == REAL_HPAGE_SIZE)
273 		size = HPAGE_SIZE;
274 	return size;
275 }
276 
277 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
278 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
279 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
280 
281 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
282 			unsigned long addr, unsigned long sz)
283 {
284 	pgd_t *pgd;
285 	p4d_t *p4d;
286 	pud_t *pud;
287 	pmd_t *pmd;
288 
289 	pgd = pgd_offset(mm, addr);
290 	p4d = p4d_offset(pgd, addr);
291 	pud = pud_alloc(mm, p4d, addr);
292 	if (!pud)
293 		return NULL;
294 	if (sz >= PUD_SIZE)
295 		return (pte_t *)pud;
296 	pmd = pmd_alloc(mm, pud, addr);
297 	if (!pmd)
298 		return NULL;
299 	if (sz >= PMD_SIZE)
300 		return (pte_t *)pmd;
301 	return pte_alloc_map(mm, pmd, addr);
302 }
303 
304 pte_t *huge_pte_offset(struct mm_struct *mm,
305 		       unsigned long addr, unsigned long sz)
306 {
307 	pgd_t *pgd;
308 	p4d_t *p4d;
309 	pud_t *pud;
310 	pmd_t *pmd;
311 
312 	pgd = pgd_offset(mm, addr);
313 	if (pgd_none(*pgd))
314 		return NULL;
315 	p4d = p4d_offset(pgd, addr);
316 	if (p4d_none(*p4d))
317 		return NULL;
318 	pud = pud_offset(p4d, addr);
319 	if (pud_none(*pud))
320 		return NULL;
321 	if (is_hugetlb_pud(*pud))
322 		return (pte_t *)pud;
323 	pmd = pmd_offset(pud, addr);
324 	if (pmd_none(*pmd))
325 		return NULL;
326 	if (is_hugetlb_pmd(*pmd))
327 		return (pte_t *)pmd;
328 	return pte_offset_map(pmd, addr);
329 }
330 
331 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
332 		     pte_t *ptep, pte_t entry)
333 {
334 	unsigned int nptes, orig_shift, shift;
335 	unsigned long i, size;
336 	pte_t orig;
337 
338 	size = huge_tte_to_size(entry);
339 
340 	shift = PAGE_SHIFT;
341 	if (size >= PUD_SIZE)
342 		shift = PUD_SHIFT;
343 	else if (size >= PMD_SIZE)
344 		shift = PMD_SHIFT;
345 	else
346 		shift = PAGE_SHIFT;
347 
348 	nptes = size >> shift;
349 
350 	if (!pte_present(*ptep) && pte_present(entry))
351 		mm->context.hugetlb_pte_count += nptes;
352 
353 	addr &= ~(size - 1);
354 	orig = *ptep;
355 	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
356 
357 	for (i = 0; i < nptes; i++)
358 		ptep[i] = __pte(pte_val(entry) + (i << shift));
359 
360 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
361 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
362 	if (size == HPAGE_SIZE)
363 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
364 				    orig_shift);
365 }
366 
367 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
368 			      pte_t *ptep)
369 {
370 	unsigned int i, nptes, orig_shift, shift;
371 	unsigned long size;
372 	pte_t entry;
373 
374 	entry = *ptep;
375 	size = huge_tte_to_size(entry);
376 
377 	shift = PAGE_SHIFT;
378 	if (size >= PUD_SIZE)
379 		shift = PUD_SHIFT;
380 	else if (size >= PMD_SIZE)
381 		shift = PMD_SHIFT;
382 	else
383 		shift = PAGE_SHIFT;
384 
385 	nptes = size >> shift;
386 	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
387 
388 	if (pte_present(entry))
389 		mm->context.hugetlb_pte_count -= nptes;
390 
391 	addr &= ~(size - 1);
392 	for (i = 0; i < nptes; i++)
393 		ptep[i] = __pte(0UL);
394 
395 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
396 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
397 	if (size == HPAGE_SIZE)
398 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
399 				    orig_shift);
400 
401 	return entry;
402 }
403 
404 int pmd_huge(pmd_t pmd)
405 {
406 	return !pmd_none(pmd) &&
407 		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
408 }
409 
410 int pud_huge(pud_t pud)
411 {
412 	return !pud_none(pud) &&
413 		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
414 }
415 
416 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
417 			   unsigned long addr)
418 {
419 	pgtable_t token = pmd_pgtable(*pmd);
420 
421 	pmd_clear(pmd);
422 	pte_free_tlb(tlb, token, addr);
423 	mm_dec_nr_ptes(tlb->mm);
424 }
425 
426 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
427 				   unsigned long addr, unsigned long end,
428 				   unsigned long floor, unsigned long ceiling)
429 {
430 	pmd_t *pmd;
431 	unsigned long next;
432 	unsigned long start;
433 
434 	start = addr;
435 	pmd = pmd_offset(pud, addr);
436 	do {
437 		next = pmd_addr_end(addr, end);
438 		if (pmd_none(*pmd))
439 			continue;
440 		if (is_hugetlb_pmd(*pmd))
441 			pmd_clear(pmd);
442 		else
443 			hugetlb_free_pte_range(tlb, pmd, addr);
444 	} while (pmd++, addr = next, addr != end);
445 
446 	start &= PUD_MASK;
447 	if (start < floor)
448 		return;
449 	if (ceiling) {
450 		ceiling &= PUD_MASK;
451 		if (!ceiling)
452 			return;
453 	}
454 	if (end - 1 > ceiling - 1)
455 		return;
456 
457 	pmd = pmd_offset(pud, start);
458 	pud_clear(pud);
459 	pmd_free_tlb(tlb, pmd, start);
460 	mm_dec_nr_pmds(tlb->mm);
461 }
462 
463 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
464 				   unsigned long addr, unsigned long end,
465 				   unsigned long floor, unsigned long ceiling)
466 {
467 	pud_t *pud;
468 	unsigned long next;
469 	unsigned long start;
470 
471 	start = addr;
472 	pud = pud_offset(p4d, addr);
473 	do {
474 		next = pud_addr_end(addr, end);
475 		if (pud_none_or_clear_bad(pud))
476 			continue;
477 		if (is_hugetlb_pud(*pud))
478 			pud_clear(pud);
479 		else
480 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
481 					       ceiling);
482 	} while (pud++, addr = next, addr != end);
483 
484 	start &= PGDIR_MASK;
485 	if (start < floor)
486 		return;
487 	if (ceiling) {
488 		ceiling &= PGDIR_MASK;
489 		if (!ceiling)
490 			return;
491 	}
492 	if (end - 1 > ceiling - 1)
493 		return;
494 
495 	pud = pud_offset(p4d, start);
496 	p4d_clear(p4d);
497 	pud_free_tlb(tlb, pud, start);
498 	mm_dec_nr_puds(tlb->mm);
499 }
500 
501 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
502 			    unsigned long addr, unsigned long end,
503 			    unsigned long floor, unsigned long ceiling)
504 {
505 	pgd_t *pgd;
506 	p4d_t *p4d;
507 	unsigned long next;
508 
509 	addr &= PMD_MASK;
510 	if (addr < floor) {
511 		addr += PMD_SIZE;
512 		if (!addr)
513 			return;
514 	}
515 	if (ceiling) {
516 		ceiling &= PMD_MASK;
517 		if (!ceiling)
518 			return;
519 	}
520 	if (end - 1 > ceiling - 1)
521 		end -= PMD_SIZE;
522 	if (addr > end - 1)
523 		return;
524 
525 	pgd = pgd_offset(tlb->mm, addr);
526 	p4d = p4d_offset(pgd, addr);
527 	do {
528 		next = p4d_addr_end(addr, end);
529 		if (p4d_none_or_clear_bad(p4d))
530 			continue;
531 		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
532 	} while (p4d++, addr = next, addr != end);
533 }
534