xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision e2c75e76)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SPARC64 Huge TLB page support.
4  *
5  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14 
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mmu_context.h>
22 
23 /* Slightly simplified from the non-hugepage variant because by
24  * definition we don't have to worry about any page coloring stuff
25  */
26 
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 							unsigned long addr,
29 							unsigned long len,
30 							unsigned long pgoff,
31 							unsigned long flags)
32 {
33 	struct hstate *h = hstate_file(filp);
34 	unsigned long task_size = TASK_SIZE;
35 	struct vm_unmapped_area_info info;
36 
37 	if (test_thread_flag(TIF_32BIT))
38 		task_size = STACK_TOP32;
39 
40 	info.flags = 0;
41 	info.length = len;
42 	info.low_limit = TASK_UNMAPPED_BASE;
43 	info.high_limit = min(task_size, VA_EXCLUDE_START);
44 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
45 	info.align_offset = 0;
46 	addr = vm_unmapped_area(&info);
47 
48 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
49 		VM_BUG_ON(addr != -ENOMEM);
50 		info.low_limit = VA_EXCLUDE_END;
51 		info.high_limit = task_size;
52 		addr = vm_unmapped_area(&info);
53 	}
54 
55 	return addr;
56 }
57 
58 static unsigned long
59 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
60 				  const unsigned long len,
61 				  const unsigned long pgoff,
62 				  const unsigned long flags)
63 {
64 	struct hstate *h = hstate_file(filp);
65 	struct mm_struct *mm = current->mm;
66 	unsigned long addr = addr0;
67 	struct vm_unmapped_area_info info;
68 
69 	/* This should only ever run for 32-bit processes.  */
70 	BUG_ON(!test_thread_flag(TIF_32BIT));
71 
72 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
73 	info.length = len;
74 	info.low_limit = PAGE_SIZE;
75 	info.high_limit = mm->mmap_base;
76 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
77 	info.align_offset = 0;
78 	addr = vm_unmapped_area(&info);
79 
80 	/*
81 	 * A failed mmap() very likely causes application failure,
82 	 * so fall back to the bottom-up function here. This scenario
83 	 * can happen with large stack limits and large mmap()
84 	 * allocations.
85 	 */
86 	if (addr & ~PAGE_MASK) {
87 		VM_BUG_ON(addr != -ENOMEM);
88 		info.flags = 0;
89 		info.low_limit = TASK_UNMAPPED_BASE;
90 		info.high_limit = STACK_TOP32;
91 		addr = vm_unmapped_area(&info);
92 	}
93 
94 	return addr;
95 }
96 
97 unsigned long
98 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
99 		unsigned long len, unsigned long pgoff, unsigned long flags)
100 {
101 	struct hstate *h = hstate_file(file);
102 	struct mm_struct *mm = current->mm;
103 	struct vm_area_struct *vma;
104 	unsigned long task_size = TASK_SIZE;
105 
106 	if (test_thread_flag(TIF_32BIT))
107 		task_size = STACK_TOP32;
108 
109 	if (len & ~huge_page_mask(h))
110 		return -EINVAL;
111 	if (len > task_size)
112 		return -ENOMEM;
113 
114 	if (flags & MAP_FIXED) {
115 		if (prepare_hugepage_range(file, addr, len))
116 			return -EINVAL;
117 		return addr;
118 	}
119 
120 	if (addr) {
121 		addr = ALIGN(addr, huge_page_size(h));
122 		vma = find_vma(mm, addr);
123 		if (task_size - len >= addr &&
124 		    (!vma || addr + len <= vm_start_gap(vma)))
125 			return addr;
126 	}
127 	if (mm->get_unmapped_area == arch_get_unmapped_area)
128 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
129 				pgoff, flags);
130 	else
131 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
132 				pgoff, flags);
133 }
134 
135 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
136 {
137 	return entry;
138 }
139 
140 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
141 {
142 	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
143 
144 	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
145 
146 	switch (shift) {
147 	case HPAGE_16GB_SHIFT:
148 		hugepage_size = _PAGE_SZ16GB_4V;
149 		pte_val(entry) |= _PAGE_PUD_HUGE;
150 		break;
151 	case HPAGE_2GB_SHIFT:
152 		hugepage_size = _PAGE_SZ2GB_4V;
153 		pte_val(entry) |= _PAGE_PMD_HUGE;
154 		break;
155 	case HPAGE_256MB_SHIFT:
156 		hugepage_size = _PAGE_SZ256MB_4V;
157 		pte_val(entry) |= _PAGE_PMD_HUGE;
158 		break;
159 	case HPAGE_SHIFT:
160 		pte_val(entry) |= _PAGE_PMD_HUGE;
161 		break;
162 	case HPAGE_64K_SHIFT:
163 		hugepage_size = _PAGE_SZ64K_4V;
164 		break;
165 	default:
166 		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
167 	}
168 
169 	pte_val(entry) = pte_val(entry) | hugepage_size;
170 	return entry;
171 }
172 
173 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
174 {
175 	if (tlb_type == hypervisor)
176 		return sun4v_hugepage_shift_to_tte(entry, shift);
177 	else
178 		return sun4u_hugepage_shift_to_tte(entry, shift);
179 }
180 
181 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
182 			 struct page *page, int writeable)
183 {
184 	unsigned int shift = huge_page_shift(hstate_vma(vma));
185 
186 	return hugepage_shift_to_tte(entry, shift);
187 }
188 
189 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
190 {
191 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
192 	unsigned int shift;
193 
194 	switch (tte_szbits) {
195 	case _PAGE_SZ16GB_4V:
196 		shift = HPAGE_16GB_SHIFT;
197 		break;
198 	case _PAGE_SZ2GB_4V:
199 		shift = HPAGE_2GB_SHIFT;
200 		break;
201 	case _PAGE_SZ256MB_4V:
202 		shift = HPAGE_256MB_SHIFT;
203 		break;
204 	case _PAGE_SZ4MB_4V:
205 		shift = REAL_HPAGE_SHIFT;
206 		break;
207 	case _PAGE_SZ64K_4V:
208 		shift = HPAGE_64K_SHIFT;
209 		break;
210 	default:
211 		shift = PAGE_SHIFT;
212 		break;
213 	}
214 	return shift;
215 }
216 
217 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
218 {
219 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
220 	unsigned int shift;
221 
222 	switch (tte_szbits) {
223 	case _PAGE_SZ256MB_4U:
224 		shift = HPAGE_256MB_SHIFT;
225 		break;
226 	case _PAGE_SZ4MB_4U:
227 		shift = REAL_HPAGE_SHIFT;
228 		break;
229 	case _PAGE_SZ64K_4U:
230 		shift = HPAGE_64K_SHIFT;
231 		break;
232 	default:
233 		shift = PAGE_SHIFT;
234 		break;
235 	}
236 	return shift;
237 }
238 
239 static unsigned int huge_tte_to_shift(pte_t entry)
240 {
241 	unsigned long shift;
242 
243 	if (tlb_type == hypervisor)
244 		shift = sun4v_huge_tte_to_shift(entry);
245 	else
246 		shift = sun4u_huge_tte_to_shift(entry);
247 
248 	if (shift == PAGE_SHIFT)
249 		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
250 			  pte_val(entry));
251 
252 	return shift;
253 }
254 
255 static unsigned long huge_tte_to_size(pte_t pte)
256 {
257 	unsigned long size = 1UL << huge_tte_to_shift(pte);
258 
259 	if (size == REAL_HPAGE_SIZE)
260 		size = HPAGE_SIZE;
261 	return size;
262 }
263 
264 pte_t *huge_pte_alloc(struct mm_struct *mm,
265 			unsigned long addr, unsigned long sz)
266 {
267 	pgd_t *pgd;
268 	pud_t *pud;
269 	pmd_t *pmd;
270 
271 	pgd = pgd_offset(mm, addr);
272 	pud = pud_alloc(mm, pgd, addr);
273 	if (!pud)
274 		return NULL;
275 	if (sz >= PUD_SIZE)
276 		return (pte_t *)pud;
277 	pmd = pmd_alloc(mm, pud, addr);
278 	if (!pmd)
279 		return NULL;
280 	if (sz >= PMD_SIZE)
281 		return (pte_t *)pmd;
282 	return pte_alloc_map(mm, pmd, addr);
283 }
284 
285 pte_t *huge_pte_offset(struct mm_struct *mm,
286 		       unsigned long addr, unsigned long sz)
287 {
288 	pgd_t *pgd;
289 	pud_t *pud;
290 	pmd_t *pmd;
291 
292 	pgd = pgd_offset(mm, addr);
293 	if (pgd_none(*pgd))
294 		return NULL;
295 	pud = pud_offset(pgd, addr);
296 	if (pud_none(*pud))
297 		return NULL;
298 	if (is_hugetlb_pud(*pud))
299 		return (pte_t *)pud;
300 	pmd = pmd_offset(pud, addr);
301 	if (pmd_none(*pmd))
302 		return NULL;
303 	if (is_hugetlb_pmd(*pmd))
304 		return (pte_t *)pmd;
305 	return pte_offset_map(pmd, addr);
306 }
307 
308 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
309 		     pte_t *ptep, pte_t entry)
310 {
311 	unsigned int nptes, orig_shift, shift;
312 	unsigned long i, size;
313 	pte_t orig;
314 
315 	size = huge_tte_to_size(entry);
316 
317 	shift = PAGE_SHIFT;
318 	if (size >= PUD_SIZE)
319 		shift = PUD_SHIFT;
320 	else if (size >= PMD_SIZE)
321 		shift = PMD_SHIFT;
322 	else
323 		shift = PAGE_SHIFT;
324 
325 	nptes = size >> shift;
326 
327 	if (!pte_present(*ptep) && pte_present(entry))
328 		mm->context.hugetlb_pte_count += nptes;
329 
330 	addr &= ~(size - 1);
331 	orig = *ptep;
332 	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
333 
334 	for (i = 0; i < nptes; i++)
335 		ptep[i] = __pte(pte_val(entry) + (i << shift));
336 
337 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
338 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
339 	if (size == HPAGE_SIZE)
340 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
341 				    orig_shift);
342 }
343 
344 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
345 			      pte_t *ptep)
346 {
347 	unsigned int i, nptes, orig_shift, shift;
348 	unsigned long size;
349 	pte_t entry;
350 
351 	entry = *ptep;
352 	size = huge_tte_to_size(entry);
353 
354 	shift = PAGE_SHIFT;
355 	if (size >= PUD_SIZE)
356 		shift = PUD_SHIFT;
357 	else if (size >= PMD_SIZE)
358 		shift = PMD_SHIFT;
359 	else
360 		shift = PAGE_SHIFT;
361 
362 	nptes = size >> shift;
363 	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
364 
365 	if (pte_present(entry))
366 		mm->context.hugetlb_pte_count -= nptes;
367 
368 	addr &= ~(size - 1);
369 	for (i = 0; i < nptes; i++)
370 		ptep[i] = __pte(0UL);
371 
372 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
373 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
374 	if (size == HPAGE_SIZE)
375 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
376 				    orig_shift);
377 
378 	return entry;
379 }
380 
381 int pmd_huge(pmd_t pmd)
382 {
383 	return !pmd_none(pmd) &&
384 		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
385 }
386 
387 int pud_huge(pud_t pud)
388 {
389 	return !pud_none(pud) &&
390 		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
391 }
392 
393 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
394 			   unsigned long addr)
395 {
396 	pgtable_t token = pmd_pgtable(*pmd);
397 
398 	pmd_clear(pmd);
399 	pte_free_tlb(tlb, token, addr);
400 	mm_dec_nr_ptes(tlb->mm);
401 }
402 
403 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
404 				   unsigned long addr, unsigned long end,
405 				   unsigned long floor, unsigned long ceiling)
406 {
407 	pmd_t *pmd;
408 	unsigned long next;
409 	unsigned long start;
410 
411 	start = addr;
412 	pmd = pmd_offset(pud, addr);
413 	do {
414 		next = pmd_addr_end(addr, end);
415 		if (pmd_none(*pmd))
416 			continue;
417 		if (is_hugetlb_pmd(*pmd))
418 			pmd_clear(pmd);
419 		else
420 			hugetlb_free_pte_range(tlb, pmd, addr);
421 	} while (pmd++, addr = next, addr != end);
422 
423 	start &= PUD_MASK;
424 	if (start < floor)
425 		return;
426 	if (ceiling) {
427 		ceiling &= PUD_MASK;
428 		if (!ceiling)
429 			return;
430 	}
431 	if (end - 1 > ceiling - 1)
432 		return;
433 
434 	pmd = pmd_offset(pud, start);
435 	pud_clear(pud);
436 	pmd_free_tlb(tlb, pmd, start);
437 	mm_dec_nr_pmds(tlb->mm);
438 }
439 
440 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
441 				   unsigned long addr, unsigned long end,
442 				   unsigned long floor, unsigned long ceiling)
443 {
444 	pud_t *pud;
445 	unsigned long next;
446 	unsigned long start;
447 
448 	start = addr;
449 	pud = pud_offset(pgd, addr);
450 	do {
451 		next = pud_addr_end(addr, end);
452 		if (pud_none_or_clear_bad(pud))
453 			continue;
454 		if (is_hugetlb_pud(*pud))
455 			pud_clear(pud);
456 		else
457 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
458 					       ceiling);
459 	} while (pud++, addr = next, addr != end);
460 
461 	start &= PGDIR_MASK;
462 	if (start < floor)
463 		return;
464 	if (ceiling) {
465 		ceiling &= PGDIR_MASK;
466 		if (!ceiling)
467 			return;
468 	}
469 	if (end - 1 > ceiling - 1)
470 		return;
471 
472 	pud = pud_offset(pgd, start);
473 	pgd_clear(pgd);
474 	pud_free_tlb(tlb, pud, start);
475 	mm_dec_nr_puds(tlb->mm);
476 }
477 
478 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
479 			    unsigned long addr, unsigned long end,
480 			    unsigned long floor, unsigned long ceiling)
481 {
482 	pgd_t *pgd;
483 	unsigned long next;
484 
485 	addr &= PMD_MASK;
486 	if (addr < floor) {
487 		addr += PMD_SIZE;
488 		if (!addr)
489 			return;
490 	}
491 	if (ceiling) {
492 		ceiling &= PMD_MASK;
493 		if (!ceiling)
494 			return;
495 	}
496 	if (end - 1 > ceiling - 1)
497 		end -= PMD_SIZE;
498 	if (addr > end - 1)
499 		return;
500 
501 	pgd = pgd_offset(tlb->mm, addr);
502 	do {
503 		next = pgd_addr_end(addr, end);
504 		if (pgd_none_or_clear_bad(pgd))
505 			continue;
506 		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
507 	} while (pgd++, addr = next, addr != end);
508 }
509