xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SPARC64 Huge TLB page support.
4  *
5  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14 
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 /* Slightly simplified from the non-hugepage variant because by
23  * definition we don't have to worry about any page coloring stuff
24  */
25 
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 							unsigned long addr,
28 							unsigned long len,
29 							unsigned long pgoff,
30 							unsigned long flags)
31 {
32 	struct hstate *h = hstate_file(filp);
33 	unsigned long task_size = TASK_SIZE;
34 	struct vm_unmapped_area_info info;
35 
36 	if (test_thread_flag(TIF_32BIT))
37 		task_size = STACK_TOP32;
38 
39 	info.flags = 0;
40 	info.length = len;
41 	info.low_limit = TASK_UNMAPPED_BASE;
42 	info.high_limit = min(task_size, VA_EXCLUDE_START);
43 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 	info.align_offset = 0;
45 	addr = vm_unmapped_area(&info);
46 
47 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 		VM_BUG_ON(addr != -ENOMEM);
49 		info.low_limit = VA_EXCLUDE_END;
50 		info.high_limit = task_size;
51 		addr = vm_unmapped_area(&info);
52 	}
53 
54 	return addr;
55 }
56 
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 				  const unsigned long len,
60 				  const unsigned long pgoff,
61 				  const unsigned long flags)
62 {
63 	struct hstate *h = hstate_file(filp);
64 	struct mm_struct *mm = current->mm;
65 	unsigned long addr = addr0;
66 	struct vm_unmapped_area_info info;
67 
68 	/* This should only ever run for 32-bit processes.  */
69 	BUG_ON(!test_thread_flag(TIF_32BIT));
70 
71 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
72 	info.length = len;
73 	info.low_limit = PAGE_SIZE;
74 	info.high_limit = mm->mmap_base;
75 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 	info.align_offset = 0;
77 	addr = vm_unmapped_area(&info);
78 
79 	/*
80 	 * A failed mmap() very likely causes application failure,
81 	 * so fall back to the bottom-up function here. This scenario
82 	 * can happen with large stack limits and large mmap()
83 	 * allocations.
84 	 */
85 	if (addr & ~PAGE_MASK) {
86 		VM_BUG_ON(addr != -ENOMEM);
87 		info.flags = 0;
88 		info.low_limit = TASK_UNMAPPED_BASE;
89 		info.high_limit = STACK_TOP32;
90 		addr = vm_unmapped_area(&info);
91 	}
92 
93 	return addr;
94 }
95 
96 unsigned long
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 		unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 	struct hstate *h = hstate_file(file);
101 	struct mm_struct *mm = current->mm;
102 	struct vm_area_struct *vma;
103 	unsigned long task_size = TASK_SIZE;
104 
105 	if (test_thread_flag(TIF_32BIT))
106 		task_size = STACK_TOP32;
107 
108 	if (len & ~huge_page_mask(h))
109 		return -EINVAL;
110 	if (len > task_size)
111 		return -ENOMEM;
112 
113 	if (flags & MAP_FIXED) {
114 		if (prepare_hugepage_range(file, addr, len))
115 			return -EINVAL;
116 		return addr;
117 	}
118 
119 	if (addr) {
120 		addr = ALIGN(addr, huge_page_size(h));
121 		vma = find_vma(mm, addr);
122 		if (task_size - len >= addr &&
123 		    (!vma || addr + len <= vm_start_gap(vma)))
124 			return addr;
125 	}
126 	if (mm->get_unmapped_area == arch_get_unmapped_area)
127 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128 				pgoff, flags);
129 	else
130 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
131 				pgoff, flags);
132 }
133 
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135 {
136 	return entry;
137 }
138 
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140 {
141 	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142 
143 	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 
145 	switch (shift) {
146 	case HPAGE_16GB_SHIFT:
147 		hugepage_size = _PAGE_SZ16GB_4V;
148 		pte_val(entry) |= _PAGE_PUD_HUGE;
149 		break;
150 	case HPAGE_2GB_SHIFT:
151 		hugepage_size = _PAGE_SZ2GB_4V;
152 		pte_val(entry) |= _PAGE_PMD_HUGE;
153 		break;
154 	case HPAGE_256MB_SHIFT:
155 		hugepage_size = _PAGE_SZ256MB_4V;
156 		pte_val(entry) |= _PAGE_PMD_HUGE;
157 		break;
158 	case HPAGE_SHIFT:
159 		pte_val(entry) |= _PAGE_PMD_HUGE;
160 		break;
161 	case HPAGE_64K_SHIFT:
162 		hugepage_size = _PAGE_SZ64K_4V;
163 		break;
164 	default:
165 		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166 	}
167 
168 	pte_val(entry) = pte_val(entry) | hugepage_size;
169 	return entry;
170 }
171 
172 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173 {
174 	if (tlb_type == hypervisor)
175 		return sun4v_hugepage_shift_to_tte(entry, shift);
176 	else
177 		return sun4u_hugepage_shift_to_tte(entry, shift);
178 }
179 
180 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
181 {
182 	pte_t pte;
183 
184 	pte = hugepage_shift_to_tte(entry, shift);
185 
186 #ifdef CONFIG_SPARC64
187 	/* If this vma has ADI enabled on it, turn on TTE.mcd
188 	 */
189 	if (flags & VM_SPARC_ADI)
190 		return pte_mkmcd(pte);
191 	else
192 		return pte_mknotmcd(pte);
193 #else
194 	return pte;
195 #endif
196 }
197 
198 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
199 {
200 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
201 	unsigned int shift;
202 
203 	switch (tte_szbits) {
204 	case _PAGE_SZ16GB_4V:
205 		shift = HPAGE_16GB_SHIFT;
206 		break;
207 	case _PAGE_SZ2GB_4V:
208 		shift = HPAGE_2GB_SHIFT;
209 		break;
210 	case _PAGE_SZ256MB_4V:
211 		shift = HPAGE_256MB_SHIFT;
212 		break;
213 	case _PAGE_SZ4MB_4V:
214 		shift = REAL_HPAGE_SHIFT;
215 		break;
216 	case _PAGE_SZ64K_4V:
217 		shift = HPAGE_64K_SHIFT;
218 		break;
219 	default:
220 		shift = PAGE_SHIFT;
221 		break;
222 	}
223 	return shift;
224 }
225 
226 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
227 {
228 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
229 	unsigned int shift;
230 
231 	switch (tte_szbits) {
232 	case _PAGE_SZ256MB_4U:
233 		shift = HPAGE_256MB_SHIFT;
234 		break;
235 	case _PAGE_SZ4MB_4U:
236 		shift = REAL_HPAGE_SHIFT;
237 		break;
238 	case _PAGE_SZ64K_4U:
239 		shift = HPAGE_64K_SHIFT;
240 		break;
241 	default:
242 		shift = PAGE_SHIFT;
243 		break;
244 	}
245 	return shift;
246 }
247 
248 static unsigned long tte_to_shift(pte_t entry)
249 {
250 	if (tlb_type == hypervisor)
251 		return sun4v_huge_tte_to_shift(entry);
252 
253 	return sun4u_huge_tte_to_shift(entry);
254 }
255 
256 static unsigned int huge_tte_to_shift(pte_t entry)
257 {
258 	unsigned long shift = tte_to_shift(entry);
259 
260 	if (shift == PAGE_SHIFT)
261 		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
262 			  pte_val(entry));
263 
264 	return shift;
265 }
266 
267 static unsigned long huge_tte_to_size(pte_t pte)
268 {
269 	unsigned long size = 1UL << huge_tte_to_shift(pte);
270 
271 	if (size == REAL_HPAGE_SIZE)
272 		size = HPAGE_SIZE;
273 	return size;
274 }
275 
276 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
277 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
278 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
279 
280 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
281 			unsigned long addr, unsigned long sz)
282 {
283 	pgd_t *pgd;
284 	p4d_t *p4d;
285 	pud_t *pud;
286 	pmd_t *pmd;
287 
288 	pgd = pgd_offset(mm, addr);
289 	p4d = p4d_offset(pgd, addr);
290 	pud = pud_alloc(mm, p4d, addr);
291 	if (!pud)
292 		return NULL;
293 	if (sz >= PUD_SIZE)
294 		return (pte_t *)pud;
295 	pmd = pmd_alloc(mm, pud, addr);
296 	if (!pmd)
297 		return NULL;
298 	if (sz >= PMD_SIZE)
299 		return (pte_t *)pmd;
300 	return pte_alloc_map(mm, pmd, addr);
301 }
302 
303 pte_t *huge_pte_offset(struct mm_struct *mm,
304 		       unsigned long addr, unsigned long sz)
305 {
306 	pgd_t *pgd;
307 	p4d_t *p4d;
308 	pud_t *pud;
309 	pmd_t *pmd;
310 
311 	pgd = pgd_offset(mm, addr);
312 	if (pgd_none(*pgd))
313 		return NULL;
314 	p4d = p4d_offset(pgd, addr);
315 	if (p4d_none(*p4d))
316 		return NULL;
317 	pud = pud_offset(p4d, addr);
318 	if (pud_none(*pud))
319 		return NULL;
320 	if (is_hugetlb_pud(*pud))
321 		return (pte_t *)pud;
322 	pmd = pmd_offset(pud, addr);
323 	if (pmd_none(*pmd))
324 		return NULL;
325 	if (is_hugetlb_pmd(*pmd))
326 		return (pte_t *)pmd;
327 	return pte_offset_map(pmd, addr);
328 }
329 
330 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
331 		     pte_t *ptep, pte_t entry)
332 {
333 	unsigned int nptes, orig_shift, shift;
334 	unsigned long i, size;
335 	pte_t orig;
336 
337 	size = huge_tte_to_size(entry);
338 
339 	shift = PAGE_SHIFT;
340 	if (size >= PUD_SIZE)
341 		shift = PUD_SHIFT;
342 	else if (size >= PMD_SIZE)
343 		shift = PMD_SHIFT;
344 	else
345 		shift = PAGE_SHIFT;
346 
347 	nptes = size >> shift;
348 
349 	if (!pte_present(*ptep) && pte_present(entry))
350 		mm->context.hugetlb_pte_count += nptes;
351 
352 	addr &= ~(size - 1);
353 	orig = *ptep;
354 	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
355 
356 	for (i = 0; i < nptes; i++)
357 		ptep[i] = __pte(pte_val(entry) + (i << shift));
358 
359 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
360 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
361 	if (size == HPAGE_SIZE)
362 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
363 				    orig_shift);
364 }
365 
366 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
367 			      pte_t *ptep)
368 {
369 	unsigned int i, nptes, orig_shift, shift;
370 	unsigned long size;
371 	pte_t entry;
372 
373 	entry = *ptep;
374 	size = huge_tte_to_size(entry);
375 
376 	shift = PAGE_SHIFT;
377 	if (size >= PUD_SIZE)
378 		shift = PUD_SHIFT;
379 	else if (size >= PMD_SIZE)
380 		shift = PMD_SHIFT;
381 	else
382 		shift = PAGE_SHIFT;
383 
384 	nptes = size >> shift;
385 	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
386 
387 	if (pte_present(entry))
388 		mm->context.hugetlb_pte_count -= nptes;
389 
390 	addr &= ~(size - 1);
391 	for (i = 0; i < nptes; i++)
392 		ptep[i] = __pte(0UL);
393 
394 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
395 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
396 	if (size == HPAGE_SIZE)
397 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
398 				    orig_shift);
399 
400 	return entry;
401 }
402 
403 int pmd_huge(pmd_t pmd)
404 {
405 	return !pmd_none(pmd) &&
406 		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
407 }
408 
409 int pud_huge(pud_t pud)
410 {
411 	return !pud_none(pud) &&
412 		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
413 }
414 
415 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
416 			   unsigned long addr)
417 {
418 	pgtable_t token = pmd_pgtable(*pmd);
419 
420 	pmd_clear(pmd);
421 	pte_free_tlb(tlb, token, addr);
422 	mm_dec_nr_ptes(tlb->mm);
423 }
424 
425 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
426 				   unsigned long addr, unsigned long end,
427 				   unsigned long floor, unsigned long ceiling)
428 {
429 	pmd_t *pmd;
430 	unsigned long next;
431 	unsigned long start;
432 
433 	start = addr;
434 	pmd = pmd_offset(pud, addr);
435 	do {
436 		next = pmd_addr_end(addr, end);
437 		if (pmd_none(*pmd))
438 			continue;
439 		if (is_hugetlb_pmd(*pmd))
440 			pmd_clear(pmd);
441 		else
442 			hugetlb_free_pte_range(tlb, pmd, addr);
443 	} while (pmd++, addr = next, addr != end);
444 
445 	start &= PUD_MASK;
446 	if (start < floor)
447 		return;
448 	if (ceiling) {
449 		ceiling &= PUD_MASK;
450 		if (!ceiling)
451 			return;
452 	}
453 	if (end - 1 > ceiling - 1)
454 		return;
455 
456 	pmd = pmd_offset(pud, start);
457 	pud_clear(pud);
458 	pmd_free_tlb(tlb, pmd, start);
459 	mm_dec_nr_pmds(tlb->mm);
460 }
461 
462 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
463 				   unsigned long addr, unsigned long end,
464 				   unsigned long floor, unsigned long ceiling)
465 {
466 	pud_t *pud;
467 	unsigned long next;
468 	unsigned long start;
469 
470 	start = addr;
471 	pud = pud_offset(p4d, addr);
472 	do {
473 		next = pud_addr_end(addr, end);
474 		if (pud_none_or_clear_bad(pud))
475 			continue;
476 		if (is_hugetlb_pud(*pud))
477 			pud_clear(pud);
478 		else
479 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
480 					       ceiling);
481 	} while (pud++, addr = next, addr != end);
482 
483 	start &= PGDIR_MASK;
484 	if (start < floor)
485 		return;
486 	if (ceiling) {
487 		ceiling &= PGDIR_MASK;
488 		if (!ceiling)
489 			return;
490 	}
491 	if (end - 1 > ceiling - 1)
492 		return;
493 
494 	pud = pud_offset(p4d, start);
495 	p4d_clear(p4d);
496 	pud_free_tlb(tlb, pud, start);
497 	mm_dec_nr_puds(tlb->mm);
498 }
499 
500 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
501 			    unsigned long addr, unsigned long end,
502 			    unsigned long floor, unsigned long ceiling)
503 {
504 	pgd_t *pgd;
505 	p4d_t *p4d;
506 	unsigned long next;
507 
508 	addr &= PMD_MASK;
509 	if (addr < floor) {
510 		addr += PMD_SIZE;
511 		if (!addr)
512 			return;
513 	}
514 	if (ceiling) {
515 		ceiling &= PMD_MASK;
516 		if (!ceiling)
517 			return;
518 	}
519 	if (end - 1 > ceiling - 1)
520 		end -= PMD_SIZE;
521 	if (addr > end - 1)
522 		return;
523 
524 	pgd = pgd_offset(tlb->mm, addr);
525 	p4d = p4d_offset(pgd, addr);
526 	do {
527 		next = p4d_addr_end(addr, end);
528 		if (p4d_none_or_clear_bad(p4d))
529 			continue;
530 		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
531 	} while (p4d++, addr = next, addr != end);
532 }
533