1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
3 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
4 
5 #ifndef __ASSEMBLY__
6 #ifdef CONFIG_HUGETLB_PAGE
7 /*
8  * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
9  * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
10  *
11  * Defined in such a way that we can optimize away code block at build time
12  * if CONFIG_HUGETLB_PAGE=n.
13  */
14 static inline int pmd_huge(pmd_t pmd)
15 {
16 	/*
17 	 * leaf pte for huge page
18 	 */
19 	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
20 }
21 
22 static inline int pud_huge(pud_t pud)
23 {
24 	/*
25 	 * leaf pte for huge page
26 	 */
27 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
28 }
29 
30 static inline int pgd_huge(pgd_t pgd)
31 {
32 	/*
33 	 * leaf pte for huge page
34 	 */
35 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
36 }
37 #define pgd_huge pgd_huge
38 
39 /*
40  * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
41  * need to setup hugepage directory for them. Our pte and page directory format
42  * enable us to have this enabled.
43  */
44 static inline int hugepd_ok(hugepd_t hpd)
45 {
46 	return 0;
47 }
48 
49 #define is_hugepd(pdep)			0
50 
51 /*
52  * This should never get called
53  */
54 static inline int get_hugepd_cache_index(int index)
55 {
56 	BUG();
57 }
58 
59 #else /* !CONFIG_HUGETLB_PAGE */
60 static inline int pmd_huge(pmd_t pmd) { return 0; }
61 static inline int pud_huge(pud_t pud) { return 0; }
62 #endif /* CONFIG_HUGETLB_PAGE */
63 
64 static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
65 			       unsigned long pfn, pgprot_t prot)
66 {
67 	if (radix_enabled())
68 		BUG();
69 	return hash__remap_4k_pfn(vma, addr, pfn, prot);
70 }
71 #endif	/* __ASSEMBLY__ */
72 #endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */
73