xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision 8ee90c5c)
1 /*
2  * SPARC64 Huge TLB page support.
3  *
4  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/sched/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
13 
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21 
22 /* Slightly simplified from the non-hugepage variant because by
23  * definition we don't have to worry about any page coloring stuff
24  */
25 
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 							unsigned long addr,
28 							unsigned long len,
29 							unsigned long pgoff,
30 							unsigned long flags)
31 {
32 	struct hstate *h = hstate_file(filp);
33 	unsigned long task_size = TASK_SIZE;
34 	struct vm_unmapped_area_info info;
35 
36 	if (test_thread_flag(TIF_32BIT))
37 		task_size = STACK_TOP32;
38 
39 	info.flags = 0;
40 	info.length = len;
41 	info.low_limit = TASK_UNMAPPED_BASE;
42 	info.high_limit = min(task_size, VA_EXCLUDE_START);
43 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 	info.align_offset = 0;
45 	addr = vm_unmapped_area(&info);
46 
47 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 		VM_BUG_ON(addr != -ENOMEM);
49 		info.low_limit = VA_EXCLUDE_END;
50 		info.high_limit = task_size;
51 		addr = vm_unmapped_area(&info);
52 	}
53 
54 	return addr;
55 }
56 
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 				  const unsigned long len,
60 				  const unsigned long pgoff,
61 				  const unsigned long flags)
62 {
63 	struct hstate *h = hstate_file(filp);
64 	struct mm_struct *mm = current->mm;
65 	unsigned long addr = addr0;
66 	struct vm_unmapped_area_info info;
67 
68 	/* This should only ever run for 32-bit processes.  */
69 	BUG_ON(!test_thread_flag(TIF_32BIT));
70 
71 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
72 	info.length = len;
73 	info.low_limit = PAGE_SIZE;
74 	info.high_limit = mm->mmap_base;
75 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 	info.align_offset = 0;
77 	addr = vm_unmapped_area(&info);
78 
79 	/*
80 	 * A failed mmap() very likely causes application failure,
81 	 * so fall back to the bottom-up function here. This scenario
82 	 * can happen with large stack limits and large mmap()
83 	 * allocations.
84 	 */
85 	if (addr & ~PAGE_MASK) {
86 		VM_BUG_ON(addr != -ENOMEM);
87 		info.flags = 0;
88 		info.low_limit = TASK_UNMAPPED_BASE;
89 		info.high_limit = STACK_TOP32;
90 		addr = vm_unmapped_area(&info);
91 	}
92 
93 	return addr;
94 }
95 
96 unsigned long
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 		unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 	struct hstate *h = hstate_file(file);
101 	struct mm_struct *mm = current->mm;
102 	struct vm_area_struct *vma;
103 	unsigned long task_size = TASK_SIZE;
104 
105 	if (test_thread_flag(TIF_32BIT))
106 		task_size = STACK_TOP32;
107 
108 	if (len & ~huge_page_mask(h))
109 		return -EINVAL;
110 	if (len > task_size)
111 		return -ENOMEM;
112 
113 	if (flags & MAP_FIXED) {
114 		if (prepare_hugepage_range(file, addr, len))
115 			return -EINVAL;
116 		return addr;
117 	}
118 
119 	if (addr) {
120 		addr = ALIGN(addr, huge_page_size(h));
121 		vma = find_vma(mm, addr);
122 		if (task_size - len >= addr &&
123 		    (!vma || addr + len <= vm_start_gap(vma)))
124 			return addr;
125 	}
126 	if (mm->get_unmapped_area == arch_get_unmapped_area)
127 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128 				pgoff, flags);
129 	else
130 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
131 				pgoff, flags);
132 }
133 
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135 {
136 	return entry;
137 }
138 
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140 {
141 	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142 
143 	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144 
145 	switch (shift) {
146 	case HPAGE_16GB_SHIFT:
147 		hugepage_size = _PAGE_SZ16GB_4V;
148 		pte_val(entry) |= _PAGE_PUD_HUGE;
149 		break;
150 	case HPAGE_2GB_SHIFT:
151 		hugepage_size = _PAGE_SZ2GB_4V;
152 		pte_val(entry) |= _PAGE_PMD_HUGE;
153 		break;
154 	case HPAGE_256MB_SHIFT:
155 		hugepage_size = _PAGE_SZ256MB_4V;
156 		pte_val(entry) |= _PAGE_PMD_HUGE;
157 		break;
158 	case HPAGE_SHIFT:
159 		pte_val(entry) |= _PAGE_PMD_HUGE;
160 		break;
161 	case HPAGE_64K_SHIFT:
162 		hugepage_size = _PAGE_SZ64K_4V;
163 		break;
164 	default:
165 		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166 	}
167 
168 	pte_val(entry) = pte_val(entry) | hugepage_size;
169 	return entry;
170 }
171 
172 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173 {
174 	if (tlb_type == hypervisor)
175 		return sun4v_hugepage_shift_to_tte(entry, shift);
176 	else
177 		return sun4u_hugepage_shift_to_tte(entry, shift);
178 }
179 
180 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
181 			 struct page *page, int writeable)
182 {
183 	unsigned int shift = huge_page_shift(hstate_vma(vma));
184 
185 	return hugepage_shift_to_tte(entry, shift);
186 }
187 
188 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
189 {
190 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
191 	unsigned int shift;
192 
193 	switch (tte_szbits) {
194 	case _PAGE_SZ16GB_4V:
195 		shift = HPAGE_16GB_SHIFT;
196 		break;
197 	case _PAGE_SZ2GB_4V:
198 		shift = HPAGE_2GB_SHIFT;
199 		break;
200 	case _PAGE_SZ256MB_4V:
201 		shift = HPAGE_256MB_SHIFT;
202 		break;
203 	case _PAGE_SZ4MB_4V:
204 		shift = REAL_HPAGE_SHIFT;
205 		break;
206 	case _PAGE_SZ64K_4V:
207 		shift = HPAGE_64K_SHIFT;
208 		break;
209 	default:
210 		shift = PAGE_SHIFT;
211 		break;
212 	}
213 	return shift;
214 }
215 
216 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
217 {
218 	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
219 	unsigned int shift;
220 
221 	switch (tte_szbits) {
222 	case _PAGE_SZ256MB_4U:
223 		shift = HPAGE_256MB_SHIFT;
224 		break;
225 	case _PAGE_SZ4MB_4U:
226 		shift = REAL_HPAGE_SHIFT;
227 		break;
228 	case _PAGE_SZ64K_4U:
229 		shift = HPAGE_64K_SHIFT;
230 		break;
231 	default:
232 		shift = PAGE_SHIFT;
233 		break;
234 	}
235 	return shift;
236 }
237 
238 static unsigned int huge_tte_to_shift(pte_t entry)
239 {
240 	unsigned long shift;
241 
242 	if (tlb_type == hypervisor)
243 		shift = sun4v_huge_tte_to_shift(entry);
244 	else
245 		shift = sun4u_huge_tte_to_shift(entry);
246 
247 	if (shift == PAGE_SHIFT)
248 		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
249 			  pte_val(entry));
250 
251 	return shift;
252 }
253 
254 static unsigned long huge_tte_to_size(pte_t pte)
255 {
256 	unsigned long size = 1UL << huge_tte_to_shift(pte);
257 
258 	if (size == REAL_HPAGE_SIZE)
259 		size = HPAGE_SIZE;
260 	return size;
261 }
262 
263 pte_t *huge_pte_alloc(struct mm_struct *mm,
264 			unsigned long addr, unsigned long sz)
265 {
266 	pgd_t *pgd;
267 	pud_t *pud;
268 	pmd_t *pmd;
269 
270 	pgd = pgd_offset(mm, addr);
271 	pud = pud_alloc(mm, pgd, addr);
272 	if (!pud)
273 		return NULL;
274 	if (sz >= PUD_SIZE)
275 		return (pte_t *)pud;
276 	pmd = pmd_alloc(mm, pud, addr);
277 	if (!pmd)
278 		return NULL;
279 	if (sz >= PMD_SIZE)
280 		return (pte_t *)pmd;
281 	return pte_alloc_map(mm, pmd, addr);
282 }
283 
284 pte_t *huge_pte_offset(struct mm_struct *mm,
285 		       unsigned long addr, unsigned long sz)
286 {
287 	pgd_t *pgd;
288 	pud_t *pud;
289 	pmd_t *pmd;
290 
291 	pgd = pgd_offset(mm, addr);
292 	if (pgd_none(*pgd))
293 		return NULL;
294 	pud = pud_offset(pgd, addr);
295 	if (pud_none(*pud))
296 		return NULL;
297 	if (is_hugetlb_pud(*pud))
298 		return (pte_t *)pud;
299 	pmd = pmd_offset(pud, addr);
300 	if (pmd_none(*pmd))
301 		return NULL;
302 	if (is_hugetlb_pmd(*pmd))
303 		return (pte_t *)pmd;
304 	return pte_offset_map(pmd, addr);
305 }
306 
307 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
308 		     pte_t *ptep, pte_t entry)
309 {
310 	unsigned int nptes, orig_shift, shift;
311 	unsigned long i, size;
312 	pte_t orig;
313 
314 	size = huge_tte_to_size(entry);
315 
316 	shift = PAGE_SHIFT;
317 	if (size >= PUD_SIZE)
318 		shift = PUD_SHIFT;
319 	else if (size >= PMD_SIZE)
320 		shift = PMD_SHIFT;
321 	else
322 		shift = PAGE_SHIFT;
323 
324 	nptes = size >> shift;
325 
326 	if (!pte_present(*ptep) && pte_present(entry))
327 		mm->context.hugetlb_pte_count += nptes;
328 
329 	addr &= ~(size - 1);
330 	orig = *ptep;
331 	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
332 
333 	for (i = 0; i < nptes; i++)
334 		ptep[i] = __pte(pte_val(entry) + (i << shift));
335 
336 	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
337 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
338 	if (size == HPAGE_SIZE)
339 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
340 				    orig_shift);
341 }
342 
343 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
344 			      pte_t *ptep)
345 {
346 	unsigned int i, nptes, orig_shift, shift;
347 	unsigned long size;
348 	pte_t entry;
349 
350 	entry = *ptep;
351 	size = huge_tte_to_size(entry);
352 
353 	shift = PAGE_SHIFT;
354 	if (size >= PUD_SIZE)
355 		shift = PUD_SHIFT;
356 	else if (size >= PMD_SIZE)
357 		shift = PMD_SHIFT;
358 	else
359 		shift = PAGE_SHIFT;
360 
361 	nptes = size >> shift;
362 	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
363 
364 	if (pte_present(entry))
365 		mm->context.hugetlb_pte_count -= nptes;
366 
367 	addr &= ~(size - 1);
368 	for (i = 0; i < nptes; i++)
369 		ptep[i] = __pte(0UL);
370 
371 	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
372 	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
373 	if (size == HPAGE_SIZE)
374 		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
375 				    orig_shift);
376 
377 	return entry;
378 }
379 
380 int pmd_huge(pmd_t pmd)
381 {
382 	return !pmd_none(pmd) &&
383 		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
384 }
385 
386 int pud_huge(pud_t pud)
387 {
388 	return !pud_none(pud) &&
389 		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
390 }
391 
392 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
393 			   unsigned long addr)
394 {
395 	pgtable_t token = pmd_pgtable(*pmd);
396 
397 	pmd_clear(pmd);
398 	pte_free_tlb(tlb, token, addr);
399 	atomic_long_dec(&tlb->mm->nr_ptes);
400 }
401 
402 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
403 				   unsigned long addr, unsigned long end,
404 				   unsigned long floor, unsigned long ceiling)
405 {
406 	pmd_t *pmd;
407 	unsigned long next;
408 	unsigned long start;
409 
410 	start = addr;
411 	pmd = pmd_offset(pud, addr);
412 	do {
413 		next = pmd_addr_end(addr, end);
414 		if (pmd_none(*pmd))
415 			continue;
416 		if (is_hugetlb_pmd(*pmd))
417 			pmd_clear(pmd);
418 		else
419 			hugetlb_free_pte_range(tlb, pmd, addr);
420 	} while (pmd++, addr = next, addr != end);
421 
422 	start &= PUD_MASK;
423 	if (start < floor)
424 		return;
425 	if (ceiling) {
426 		ceiling &= PUD_MASK;
427 		if (!ceiling)
428 			return;
429 	}
430 	if (end - 1 > ceiling - 1)
431 		return;
432 
433 	pmd = pmd_offset(pud, start);
434 	pud_clear(pud);
435 	pmd_free_tlb(tlb, pmd, start);
436 	mm_dec_nr_pmds(tlb->mm);
437 }
438 
439 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
440 				   unsigned long addr, unsigned long end,
441 				   unsigned long floor, unsigned long ceiling)
442 {
443 	pud_t *pud;
444 	unsigned long next;
445 	unsigned long start;
446 
447 	start = addr;
448 	pud = pud_offset(pgd, addr);
449 	do {
450 		next = pud_addr_end(addr, end);
451 		if (pud_none_or_clear_bad(pud))
452 			continue;
453 		if (is_hugetlb_pud(*pud))
454 			pud_clear(pud);
455 		else
456 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
457 					       ceiling);
458 	} while (pud++, addr = next, addr != end);
459 
460 	start &= PGDIR_MASK;
461 	if (start < floor)
462 		return;
463 	if (ceiling) {
464 		ceiling &= PGDIR_MASK;
465 		if (!ceiling)
466 			return;
467 	}
468 	if (end - 1 > ceiling - 1)
469 		return;
470 
471 	pud = pud_offset(pgd, start);
472 	pgd_clear(pgd);
473 	pud_free_tlb(tlb, pud, start);
474 }
475 
476 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
477 			    unsigned long addr, unsigned long end,
478 			    unsigned long floor, unsigned long ceiling)
479 {
480 	pgd_t *pgd;
481 	unsigned long next;
482 
483 	addr &= PMD_MASK;
484 	if (addr < floor) {
485 		addr += PMD_SIZE;
486 		if (!addr)
487 			return;
488 	}
489 	if (ceiling) {
490 		ceiling &= PMD_MASK;
491 		if (!ceiling)
492 			return;
493 	}
494 	if (end - 1 > ceiling - 1)
495 		end -= PMD_SIZE;
496 	if (addr > end - 1)
497 		return;
498 
499 	pgd = pgd_offset(tlb->mm, addr);
500 	do {
501 		next = pgd_addr_end(addr, end);
502 		if (pgd_none_or_clear_bad(pgd))
503 			continue;
504 		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
505 	} while (pgd++, addr = next, addr != end);
506 }
507