xref: /openbmc/linux/arch/powerpc/mm/hugetlbpage.c (revision 8bd1369b)
1 /*
2  * PPC Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  * Based on the IA-32 version:
8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
28 
29 
30 #ifdef CONFIG_HUGETLB_PAGE
31 
32 #define PAGE_SHIFT_64K	16
33 #define PAGE_SHIFT_512K	19
34 #define PAGE_SHIFT_8M	23
35 #define PAGE_SHIFT_16M	24
36 #define PAGE_SHIFT_16G	34
37 
38 bool hugetlb_disabled = false;
39 
40 unsigned int HPAGE_SHIFT;
41 EXPORT_SYMBOL(HPAGE_SHIFT);
42 
43 #define hugepd_none(hpd)	(hpd_val(hpd) == 0)
44 
45 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
46 {
47 	/*
48 	 * Only called for hugetlbfs pages, hence can ignore THP and the
49 	 * irq disabled walk.
50 	 */
51 	return __find_linux_pte(mm->pgd, addr, NULL, NULL);
52 }
53 
54 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
55 			   unsigned long address, unsigned int pdshift,
56 			   unsigned int pshift, spinlock_t *ptl)
57 {
58 	struct kmem_cache *cachep;
59 	pte_t *new;
60 	int i;
61 	int num_hugepd;
62 
63 	if (pshift >= pdshift) {
64 		cachep = hugepte_cache;
65 		num_hugepd = 1 << (pshift - pdshift);
66 	} else {
67 		cachep = PGT_CACHE(pdshift - pshift);
68 		num_hugepd = 1;
69 	}
70 
71 	new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
72 
73 	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
74 	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
75 
76 	if (! new)
77 		return -ENOMEM;
78 
79 	/*
80 	 * Make sure other cpus find the hugepd set only after a
81 	 * properly initialized page table is visible to them.
82 	 * For more details look for comment in __pte_alloc().
83 	 */
84 	smp_wmb();
85 
86 	spin_lock(ptl);
87 	/*
88 	 * We have multiple higher-level entries that point to the same
89 	 * actual pte location.  Fill in each as we go and backtrack on error.
90 	 * We need all of these so the DTLB pgtable walk code can find the
91 	 * right higher-level entry without knowing if it's a hugepage or not.
92 	 */
93 	for (i = 0; i < num_hugepd; i++, hpdp++) {
94 		if (unlikely(!hugepd_none(*hpdp)))
95 			break;
96 		else {
97 #ifdef CONFIG_PPC_BOOK3S_64
98 			*hpdp = __hugepd(__pa(new) |
99 					 (shift_to_mmu_psize(pshift) << 2));
100 #elif defined(CONFIG_PPC_8xx)
101 			*hpdp = __hugepd(__pa(new) | _PMD_USER |
102 					 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
103 					  _PMD_PAGE_512K) | _PMD_PRESENT);
104 #else
105 			/* We use the old format for PPC_FSL_BOOK3E */
106 			*hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
107 #endif
108 		}
109 	}
110 	/* If we bailed from the for loop early, an error occurred, clean up */
111 	if (i < num_hugepd) {
112 		for (i = i - 1 ; i >= 0; i--, hpdp--)
113 			*hpdp = __hugepd(0);
114 		kmem_cache_free(cachep, new);
115 	}
116 	spin_unlock(ptl);
117 	return 0;
118 }
119 
120 /*
121  * These macros define how to determine which level of the page table holds
122  * the hpdp.
123  */
124 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
125 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
126 #define HUGEPD_PUD_SHIFT PUD_SHIFT
127 #endif
128 
129 /*
130  * At this point we do the placement change only for BOOK3S 64. This would
131  * possibly work on other subarchs.
132  */
133 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
134 {
135 	pgd_t *pg;
136 	pud_t *pu;
137 	pmd_t *pm;
138 	hugepd_t *hpdp = NULL;
139 	unsigned pshift = __ffs(sz);
140 	unsigned pdshift = PGDIR_SHIFT;
141 	spinlock_t *ptl;
142 
143 	addr &= ~(sz-1);
144 	pg = pgd_offset(mm, addr);
145 
146 #ifdef CONFIG_PPC_BOOK3S_64
147 	if (pshift == PGDIR_SHIFT)
148 		/* 16GB huge page */
149 		return (pte_t *) pg;
150 	else if (pshift > PUD_SHIFT) {
151 		/*
152 		 * We need to use hugepd table
153 		 */
154 		ptl = &mm->page_table_lock;
155 		hpdp = (hugepd_t *)pg;
156 	} else {
157 		pdshift = PUD_SHIFT;
158 		pu = pud_alloc(mm, pg, addr);
159 		if (pshift == PUD_SHIFT)
160 			return (pte_t *)pu;
161 		else if (pshift > PMD_SHIFT) {
162 			ptl = pud_lockptr(mm, pu);
163 			hpdp = (hugepd_t *)pu;
164 		} else {
165 			pdshift = PMD_SHIFT;
166 			pm = pmd_alloc(mm, pu, addr);
167 			if (pshift == PMD_SHIFT)
168 				/* 16MB hugepage */
169 				return (pte_t *)pm;
170 			else {
171 				ptl = pmd_lockptr(mm, pm);
172 				hpdp = (hugepd_t *)pm;
173 			}
174 		}
175 	}
176 #else
177 	if (pshift >= HUGEPD_PGD_SHIFT) {
178 		ptl = &mm->page_table_lock;
179 		hpdp = (hugepd_t *)pg;
180 	} else {
181 		pdshift = PUD_SHIFT;
182 		pu = pud_alloc(mm, pg, addr);
183 		if (pshift >= HUGEPD_PUD_SHIFT) {
184 			ptl = pud_lockptr(mm, pu);
185 			hpdp = (hugepd_t *)pu;
186 		} else {
187 			pdshift = PMD_SHIFT;
188 			pm = pmd_alloc(mm, pu, addr);
189 			ptl = pmd_lockptr(mm, pm);
190 			hpdp = (hugepd_t *)pm;
191 		}
192 	}
193 #endif
194 	if (!hpdp)
195 		return NULL;
196 
197 	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
198 
199 	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
200 						  pdshift, pshift, ptl))
201 		return NULL;
202 
203 	return hugepte_offset(*hpdp, addr, pdshift);
204 }
205 
206 #ifdef CONFIG_PPC_BOOK3S_64
207 /*
208  * Tracks gpages after the device tree is scanned and before the
209  * huge_boot_pages list is ready on pseries.
210  */
211 #define MAX_NUMBER_GPAGES	1024
212 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
213 __initdata static unsigned nr_gpages;
214 
215 /*
216  * Build list of addresses of gigantic pages.  This function is used in early
217  * boot before the buddy allocator is setup.
218  */
219 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
220 {
221 	if (!addr)
222 		return;
223 	while (number_of_pages > 0) {
224 		gpage_freearray[nr_gpages] = addr;
225 		nr_gpages++;
226 		number_of_pages--;
227 		addr += page_size;
228 	}
229 }
230 
231 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
232 {
233 	struct huge_bootmem_page *m;
234 	if (nr_gpages == 0)
235 		return 0;
236 	m = phys_to_virt(gpage_freearray[--nr_gpages]);
237 	gpage_freearray[nr_gpages] = 0;
238 	list_add(&m->list, &huge_boot_pages);
239 	m->hstate = hstate;
240 	return 1;
241 }
242 #endif
243 
244 
245 int __init alloc_bootmem_huge_page(struct hstate *h)
246 {
247 
248 #ifdef CONFIG_PPC_BOOK3S_64
249 	if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
250 		return pseries_alloc_bootmem_huge_page(h);
251 #endif
252 	return __alloc_bootmem_huge_page(h);
253 }
254 
255 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
256 #define HUGEPD_FREELIST_SIZE \
257 	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
258 
259 struct hugepd_freelist {
260 	struct rcu_head	rcu;
261 	unsigned int index;
262 	void *ptes[0];
263 };
264 
265 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
266 
267 static void hugepd_free_rcu_callback(struct rcu_head *head)
268 {
269 	struct hugepd_freelist *batch =
270 		container_of(head, struct hugepd_freelist, rcu);
271 	unsigned int i;
272 
273 	for (i = 0; i < batch->index; i++)
274 		kmem_cache_free(hugepte_cache, batch->ptes[i]);
275 
276 	free_page((unsigned long)batch);
277 }
278 
279 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
280 {
281 	struct hugepd_freelist **batchp;
282 
283 	batchp = &get_cpu_var(hugepd_freelist_cur);
284 
285 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
286 	    mm_is_thread_local(tlb->mm)) {
287 		kmem_cache_free(hugepte_cache, hugepte);
288 		put_cpu_var(hugepd_freelist_cur);
289 		return;
290 	}
291 
292 	if (*batchp == NULL) {
293 		*batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
294 		(*batchp)->index = 0;
295 	}
296 
297 	(*batchp)->ptes[(*batchp)->index++] = hugepte;
298 	if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
299 		call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
300 		*batchp = NULL;
301 	}
302 	put_cpu_var(hugepd_freelist_cur);
303 }
304 #else
305 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
306 #endif
307 
308 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
309 			      unsigned long start, unsigned long end,
310 			      unsigned long floor, unsigned long ceiling)
311 {
312 	pte_t *hugepte = hugepd_page(*hpdp);
313 	int i;
314 
315 	unsigned long pdmask = ~((1UL << pdshift) - 1);
316 	unsigned int num_hugepd = 1;
317 	unsigned int shift = hugepd_shift(*hpdp);
318 
319 	/* Note: On fsl the hpdp may be the first of several */
320 	if (shift > pdshift)
321 		num_hugepd = 1 << (shift - pdshift);
322 
323 	start &= pdmask;
324 	if (start < floor)
325 		return;
326 	if (ceiling) {
327 		ceiling &= pdmask;
328 		if (! ceiling)
329 			return;
330 	}
331 	if (end - 1 > ceiling - 1)
332 		return;
333 
334 	for (i = 0; i < num_hugepd; i++, hpdp++)
335 		*hpdp = __hugepd(0);
336 
337 	if (shift >= pdshift)
338 		hugepd_free(tlb, hugepte);
339 	else
340 		pgtable_free_tlb(tlb, hugepte,
341 				 get_hugepd_cache_index(pdshift - shift));
342 }
343 
344 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
345 				   unsigned long addr, unsigned long end,
346 				   unsigned long floor, unsigned long ceiling)
347 {
348 	pmd_t *pmd;
349 	unsigned long next;
350 	unsigned long start;
351 
352 	start = addr;
353 	do {
354 		unsigned long more;
355 
356 		pmd = pmd_offset(pud, addr);
357 		next = pmd_addr_end(addr, end);
358 		if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
359 			/*
360 			 * if it is not hugepd pointer, we should already find
361 			 * it cleared.
362 			 */
363 			WARN_ON(!pmd_none_or_clear_bad(pmd));
364 			continue;
365 		}
366 		/*
367 		 * Increment next by the size of the huge mapping since
368 		 * there may be more than one entry at this level for a
369 		 * single hugepage, but all of them point to
370 		 * the same kmem cache that holds the hugepte.
371 		 */
372 		more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
373 		if (more > next)
374 			next = more;
375 
376 		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
377 				  addr, next, floor, ceiling);
378 	} while (addr = next, addr != end);
379 
380 	start &= PUD_MASK;
381 	if (start < floor)
382 		return;
383 	if (ceiling) {
384 		ceiling &= PUD_MASK;
385 		if (!ceiling)
386 			return;
387 	}
388 	if (end - 1 > ceiling - 1)
389 		return;
390 
391 	pmd = pmd_offset(pud, start);
392 	pud_clear(pud);
393 	pmd_free_tlb(tlb, pmd, start);
394 	mm_dec_nr_pmds(tlb->mm);
395 }
396 
397 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
398 				   unsigned long addr, unsigned long end,
399 				   unsigned long floor, unsigned long ceiling)
400 {
401 	pud_t *pud;
402 	unsigned long next;
403 	unsigned long start;
404 
405 	start = addr;
406 	do {
407 		pud = pud_offset(pgd, addr);
408 		next = pud_addr_end(addr, end);
409 		if (!is_hugepd(__hugepd(pud_val(*pud)))) {
410 			if (pud_none_or_clear_bad(pud))
411 				continue;
412 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
413 					       ceiling);
414 		} else {
415 			unsigned long more;
416 			/*
417 			 * Increment next by the size of the huge mapping since
418 			 * there may be more than one entry at this level for a
419 			 * single hugepage, but all of them point to
420 			 * the same kmem cache that holds the hugepte.
421 			 */
422 			more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
423 			if (more > next)
424 				next = more;
425 
426 			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
427 					  addr, next, floor, ceiling);
428 		}
429 	} while (addr = next, addr != end);
430 
431 	start &= PGDIR_MASK;
432 	if (start < floor)
433 		return;
434 	if (ceiling) {
435 		ceiling &= PGDIR_MASK;
436 		if (!ceiling)
437 			return;
438 	}
439 	if (end - 1 > ceiling - 1)
440 		return;
441 
442 	pud = pud_offset(pgd, start);
443 	pgd_clear(pgd);
444 	pud_free_tlb(tlb, pud, start);
445 	mm_dec_nr_puds(tlb->mm);
446 }
447 
448 /*
449  * This function frees user-level page tables of a process.
450  */
451 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
452 			    unsigned long addr, unsigned long end,
453 			    unsigned long floor, unsigned long ceiling)
454 {
455 	pgd_t *pgd;
456 	unsigned long next;
457 
458 	/*
459 	 * Because there are a number of different possible pagetable
460 	 * layouts for hugepage ranges, we limit knowledge of how
461 	 * things should be laid out to the allocation path
462 	 * (huge_pte_alloc(), above).  Everything else works out the
463 	 * structure as it goes from information in the hugepd
464 	 * pointers.  That means that we can't here use the
465 	 * optimization used in the normal page free_pgd_range(), of
466 	 * checking whether we're actually covering a large enough
467 	 * range to have to do anything at the top level of the walk
468 	 * instead of at the bottom.
469 	 *
470 	 * To make sense of this, you should probably go read the big
471 	 * block comment at the top of the normal free_pgd_range(),
472 	 * too.
473 	 */
474 
475 	do {
476 		next = pgd_addr_end(addr, end);
477 		pgd = pgd_offset(tlb->mm, addr);
478 		if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
479 			if (pgd_none_or_clear_bad(pgd))
480 				continue;
481 			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
482 		} else {
483 			unsigned long more;
484 			/*
485 			 * Increment next by the size of the huge mapping since
486 			 * there may be more than one entry at the pgd level
487 			 * for a single hugepage, but all of them point to the
488 			 * same kmem cache that holds the hugepte.
489 			 */
490 			more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
491 			if (more > next)
492 				next = more;
493 
494 			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
495 					  addr, next, floor, ceiling);
496 		}
497 	} while (addr = next, addr != end);
498 }
499 
500 struct page *follow_huge_pd(struct vm_area_struct *vma,
501 			    unsigned long address, hugepd_t hpd,
502 			    int flags, int pdshift)
503 {
504 	pte_t *ptep;
505 	spinlock_t *ptl;
506 	struct page *page = NULL;
507 	unsigned long mask;
508 	int shift = hugepd_shift(hpd);
509 	struct mm_struct *mm = vma->vm_mm;
510 
511 retry:
512 	/*
513 	 * hugepage directory entries are protected by mm->page_table_lock
514 	 * Use this instead of huge_pte_lockptr
515 	 */
516 	ptl = &mm->page_table_lock;
517 	spin_lock(ptl);
518 
519 	ptep = hugepte_offset(hpd, address, pdshift);
520 	if (pte_present(*ptep)) {
521 		mask = (1UL << shift) - 1;
522 		page = pte_page(*ptep);
523 		page += ((address & mask) >> PAGE_SHIFT);
524 		if (flags & FOLL_GET)
525 			get_page(page);
526 	} else {
527 		if (is_hugetlb_entry_migration(*ptep)) {
528 			spin_unlock(ptl);
529 			__migration_entry_wait(mm, ptep, ptl);
530 			goto retry;
531 		}
532 	}
533 	spin_unlock(ptl);
534 	return page;
535 }
536 
537 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
538 				      unsigned long sz)
539 {
540 	unsigned long __boundary = (addr + sz) & ~(sz-1);
541 	return (__boundary - 1 < end - 1) ? __boundary : end;
542 }
543 
544 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
545 		unsigned long end, int write, struct page **pages, int *nr)
546 {
547 	pte_t *ptep;
548 	unsigned long sz = 1UL << hugepd_shift(hugepd);
549 	unsigned long next;
550 
551 	ptep = hugepte_offset(hugepd, addr, pdshift);
552 	do {
553 		next = hugepte_addr_end(addr, end, sz);
554 		if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
555 			return 0;
556 	} while (ptep++, addr = next, addr != end);
557 
558 	return 1;
559 }
560 
561 #ifdef CONFIG_PPC_MM_SLICES
562 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
563 					unsigned long len, unsigned long pgoff,
564 					unsigned long flags)
565 {
566 	struct hstate *hstate = hstate_file(file);
567 	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
568 
569 #ifdef CONFIG_PPC_RADIX_MMU
570 	if (radix_enabled())
571 		return radix__hugetlb_get_unmapped_area(file, addr, len,
572 						       pgoff, flags);
573 #endif
574 	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
575 }
576 #endif
577 
578 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
579 {
580 #ifdef CONFIG_PPC_MM_SLICES
581 	/* With radix we don't use slice, so derive it from vma*/
582 	if (!radix_enabled()) {
583 		unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
584 
585 		return 1UL << mmu_psize_to_shift(psize);
586 	}
587 #endif
588 	return vma_kernel_pagesize(vma);
589 }
590 
591 static inline bool is_power_of_4(unsigned long x)
592 {
593 	if (is_power_of_2(x))
594 		return (__ilog2(x) % 2) ? false : true;
595 	return false;
596 }
597 
598 static int __init add_huge_page_size(unsigned long long size)
599 {
600 	int shift = __ffs(size);
601 	int mmu_psize;
602 
603 	/* Check that it is a page size supported by the hardware and
604 	 * that it fits within pagetable and slice limits. */
605 	if (size <= PAGE_SIZE)
606 		return -EINVAL;
607 #if defined(CONFIG_PPC_FSL_BOOK3E)
608 	if (!is_power_of_4(size))
609 		return -EINVAL;
610 #elif !defined(CONFIG_PPC_8xx)
611 	if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
612 		return -EINVAL;
613 #endif
614 
615 	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
616 		return -EINVAL;
617 
618 #ifdef CONFIG_PPC_BOOK3S_64
619 	/*
620 	 * We need to make sure that for different page sizes reported by
621 	 * firmware we only add hugetlb support for page sizes that can be
622 	 * supported by linux page table layout.
623 	 * For now we have
624 	 * Radix: 2M
625 	 * Hash: 16M and 16G
626 	 */
627 	if (radix_enabled()) {
628 		if (mmu_psize != MMU_PAGE_2M) {
629 			if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
630 			    (mmu_psize != MMU_PAGE_1G))
631 				return -EINVAL;
632 		}
633 	} else {
634 		if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
635 			return -EINVAL;
636 	}
637 #endif
638 
639 	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
640 
641 	/* Return if huge page size has already been setup */
642 	if (size_to_hstate(size))
643 		return 0;
644 
645 	hugetlb_add_hstate(shift - PAGE_SHIFT);
646 
647 	return 0;
648 }
649 
650 static int __init hugepage_setup_sz(char *str)
651 {
652 	unsigned long long size;
653 
654 	size = memparse(str, &str);
655 
656 	if (add_huge_page_size(size) != 0) {
657 		hugetlb_bad_size();
658 		pr_err("Invalid huge page size specified(%llu)\n", size);
659 	}
660 
661 	return 1;
662 }
663 __setup("hugepagesz=", hugepage_setup_sz);
664 
665 struct kmem_cache *hugepte_cache;
666 static int __init hugetlbpage_init(void)
667 {
668 	int psize;
669 
670 	if (hugetlb_disabled) {
671 		pr_info("HugeTLB support is disabled!\n");
672 		return 0;
673 	}
674 
675 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
676 	if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
677 		return -ENODEV;
678 #endif
679 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
680 		unsigned shift;
681 		unsigned pdshift;
682 
683 		if (!mmu_psize_defs[psize].shift)
684 			continue;
685 
686 		shift = mmu_psize_to_shift(psize);
687 
688 #ifdef CONFIG_PPC_BOOK3S_64
689 		if (shift > PGDIR_SHIFT)
690 			continue;
691 		else if (shift > PUD_SHIFT)
692 			pdshift = PGDIR_SHIFT;
693 		else if (shift > PMD_SHIFT)
694 			pdshift = PUD_SHIFT;
695 		else
696 			pdshift = PMD_SHIFT;
697 #else
698 		if (shift < HUGEPD_PUD_SHIFT)
699 			pdshift = PMD_SHIFT;
700 		else if (shift < HUGEPD_PGD_SHIFT)
701 			pdshift = PUD_SHIFT;
702 		else
703 			pdshift = PGDIR_SHIFT;
704 #endif
705 
706 		if (add_huge_page_size(1ULL << shift) < 0)
707 			continue;
708 		/*
709 		 * if we have pdshift and shift value same, we don't
710 		 * use pgt cache for hugepd.
711 		 */
712 		if (pdshift > shift)
713 			pgtable_cache_add(pdshift - shift, NULL);
714 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
715 		else if (!hugepte_cache) {
716 			/*
717 			 * Create a kmem cache for hugeptes.  The bottom bits in
718 			 * the pte have size information encoded in them, so
719 			 * align them to allow this
720 			 */
721 			hugepte_cache = kmem_cache_create("hugepte-cache",
722 							  sizeof(pte_t),
723 							  HUGEPD_SHIFT_MASK + 1,
724 							  0, NULL);
725 			if (hugepte_cache == NULL)
726 				panic("%s: Unable to create kmem cache "
727 				      "for hugeptes\n", __func__);
728 
729 		}
730 #endif
731 	}
732 
733 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
734 	/* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
735 	if (mmu_psize_defs[MMU_PAGE_4M].shift)
736 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
737 	else if (mmu_psize_defs[MMU_PAGE_512K].shift)
738 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
739 #else
740 	/* Set default large page size. Currently, we pick 16M or 1M
741 	 * depending on what is available
742 	 */
743 	if (mmu_psize_defs[MMU_PAGE_16M].shift)
744 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
745 	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
746 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
747 	else if (mmu_psize_defs[MMU_PAGE_2M].shift)
748 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
749 #endif
750 	return 0;
751 }
752 
753 arch_initcall(hugetlbpage_init);
754 
755 void flush_dcache_icache_hugepage(struct page *page)
756 {
757 	int i;
758 	void *start;
759 
760 	BUG_ON(!PageCompound(page));
761 
762 	for (i = 0; i < (1UL << compound_order(page)); i++) {
763 		if (!PageHighMem(page)) {
764 			__flush_dcache_icache(page_address(page+i));
765 		} else {
766 			start = kmap_atomic(page+i);
767 			__flush_dcache_icache(start);
768 			kunmap_atomic(start);
769 		}
770 	}
771 }
772 
773 #endif /* CONFIG_HUGETLB_PAGE */
774 
775 /*
776  * We have 4 cases for pgds and pmds:
777  * (1) invalid (all zeroes)
778  * (2) pointer to next table, as normal; bottom 6 bits == 0
779  * (3) leaf pte for huge page _PAGE_PTE set
780  * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
781  *
782  * So long as we atomically load page table pointers we are safe against teardown,
783  * we can follow the address down to the the page and take a ref on it.
784  * This function need to be called with interrupts disabled. We use this variant
785  * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
786  */
787 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
788 			bool *is_thp, unsigned *hpage_shift)
789 {
790 	pgd_t pgd, *pgdp;
791 	pud_t pud, *pudp;
792 	pmd_t pmd, *pmdp;
793 	pte_t *ret_pte;
794 	hugepd_t *hpdp = NULL;
795 	unsigned pdshift = PGDIR_SHIFT;
796 
797 	if (hpage_shift)
798 		*hpage_shift = 0;
799 
800 	if (is_thp)
801 		*is_thp = false;
802 
803 	pgdp = pgdir + pgd_index(ea);
804 	pgd  = READ_ONCE(*pgdp);
805 	/*
806 	 * Always operate on the local stack value. This make sure the
807 	 * value don't get updated by a parallel THP split/collapse,
808 	 * page fault or a page unmap. The return pte_t * is still not
809 	 * stable. So should be checked there for above conditions.
810 	 */
811 	if (pgd_none(pgd))
812 		return NULL;
813 	else if (pgd_huge(pgd)) {
814 		ret_pte = (pte_t *) pgdp;
815 		goto out;
816 	} else if (is_hugepd(__hugepd(pgd_val(pgd))))
817 		hpdp = (hugepd_t *)&pgd;
818 	else {
819 		/*
820 		 * Even if we end up with an unmap, the pgtable will not
821 		 * be freed, because we do an rcu free and here we are
822 		 * irq disabled
823 		 */
824 		pdshift = PUD_SHIFT;
825 		pudp = pud_offset(&pgd, ea);
826 		pud  = READ_ONCE(*pudp);
827 
828 		if (pud_none(pud))
829 			return NULL;
830 		else if (pud_huge(pud)) {
831 			ret_pte = (pte_t *) pudp;
832 			goto out;
833 		} else if (is_hugepd(__hugepd(pud_val(pud))))
834 			hpdp = (hugepd_t *)&pud;
835 		else {
836 			pdshift = PMD_SHIFT;
837 			pmdp = pmd_offset(&pud, ea);
838 			pmd  = READ_ONCE(*pmdp);
839 			/*
840 			 * A hugepage collapse is captured by pmd_none, because
841 			 * it mark the pmd none and do a hpte invalidate.
842 			 */
843 			if (pmd_none(pmd))
844 				return NULL;
845 
846 			if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
847 				if (is_thp)
848 					*is_thp = true;
849 				ret_pte = (pte_t *) pmdp;
850 				goto out;
851 			}
852 
853 			if (pmd_huge(pmd)) {
854 				ret_pte = (pte_t *) pmdp;
855 				goto out;
856 			} else if (is_hugepd(__hugepd(pmd_val(pmd))))
857 				hpdp = (hugepd_t *)&pmd;
858 			else
859 				return pte_offset_kernel(&pmd, ea);
860 		}
861 	}
862 	if (!hpdp)
863 		return NULL;
864 
865 	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
866 	pdshift = hugepd_shift(*hpdp);
867 out:
868 	if (hpage_shift)
869 		*hpage_shift = pdshift;
870 	return ret_pte;
871 }
872 EXPORT_SYMBOL_GPL(__find_linux_pte);
873 
874 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
875 		unsigned long end, int write, struct page **pages, int *nr)
876 {
877 	unsigned long pte_end;
878 	struct page *head, *page;
879 	pte_t pte;
880 	int refs;
881 
882 	pte_end = (addr + sz) & ~(sz-1);
883 	if (pte_end < end)
884 		end = pte_end;
885 
886 	pte = READ_ONCE(*ptep);
887 
888 	if (!pte_access_permitted(pte, write))
889 		return 0;
890 
891 	/* hugepages are never "special" */
892 	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
893 
894 	refs = 0;
895 	head = pte_page(pte);
896 
897 	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
898 	do {
899 		VM_BUG_ON(compound_head(page) != head);
900 		pages[*nr] = page;
901 		(*nr)++;
902 		page++;
903 		refs++;
904 	} while (addr += PAGE_SIZE, addr != end);
905 
906 	if (!page_cache_add_speculative(head, refs)) {
907 		*nr -= refs;
908 		return 0;
909 	}
910 
911 	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
912 		/* Could be optimized better */
913 		*nr -= refs;
914 		while (refs--)
915 			put_page(head);
916 		return 0;
917 	}
918 
919 	return 1;
920 }
921