1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
2 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
3 
4 #include <asm-generic/pgtable-nopud.h>
5 
6 #define PTE_INDEX_SIZE  8
7 #define PMD_INDEX_SIZE  10
8 #define PUD_INDEX_SIZE	0
9 #define PGD_INDEX_SIZE  12
10 
11 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
12 #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
13 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
14 
15 /* With 4k base page size, hugepage PTEs go at the PMD level */
16 #define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
17 
18 /* PMD_SHIFT determines what a second-level page table entry can map */
19 #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
20 #define PMD_SIZE	(1UL << PMD_SHIFT)
21 #define PMD_MASK	(~(PMD_SIZE-1))
22 
23 /* PGDIR_SHIFT determines what a third-level page table entry can map */
24 #define PGDIR_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
25 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
26 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
27 
28 /* Bits to mask out from a PMD to get to the PTE page */
29 /* PMDs point to PTE table fragments which are 4K aligned.  */
30 #define PMD_MASKED_BITS		0xfff
31 /* Bits to mask out from a PGD/PUD to get to the PMD page */
32 #define PUD_MASKED_BITS		0x1ff
33 
34 #define _PAGE_COMBO	0x00020000 /* this is a combo 4k page */
35 #define _PAGE_4K_PFN	0x00040000 /* PFN is for a single 4k page */
36 /*
37  * Used to track subpage group valid if _PAGE_COMBO is set
38  * This overloads _PAGE_F_GIX and _PAGE_F_SECOND
39  */
40 #define _PAGE_COMBO_VALID	(_PAGE_F_GIX | _PAGE_F_SECOND)
41 
42 /* PTE flags to conserve for HPTE identification */
43 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \
44 			 _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO)
45 
46 /* Shift to put page number into pte.
47  *
48  * That gives us a max RPN of 34 bits, which means a max of 50 bits
49  * of addressable physical space, or 46 bits for the special 4k PFNs.
50  */
51 #define PTE_RPN_SHIFT	(30)
52 
53 #ifndef __ASSEMBLY__
54 
55 /*
56  * With 64K pages on hash table, we have a special PTE format that
57  * uses a second "half" of the page table to encode sub-page information
58  * in order to deal with 64K made of 4K HW pages. Thus we override the
59  * generic accessors and iterators here
60  */
61 #define __real_pte __real_pte
62 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
63 {
64 	real_pte_t rpte;
65 	unsigned long *hidxp;
66 
67 	rpte.pte = pte;
68 	rpte.hidx = 0;
69 	if (pte_val(pte) & _PAGE_COMBO) {
70 		/*
71 		 * Make sure we order the hidx load against the _PAGE_COMBO
72 		 * check. The store side ordering is done in __hash_page_4K
73 		 */
74 		smp_rmb();
75 		hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
76 		rpte.hidx = *hidxp;
77 	}
78 	return rpte;
79 }
80 
81 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
82 {
83 	if ((pte_val(rpte.pte) & _PAGE_COMBO))
84 		return (rpte.hidx >> (index<<2)) & 0xf;
85 	return (pte_val(rpte.pte) >> 12) & 0xf;
86 }
87 
88 #define __rpte_to_pte(r)	((r).pte)
89 extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
90 /*
91  * Trick: we set __end to va + 64k, which happens works for
92  * a 16M page as well as we want only one iteration
93  */
94 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)	\
95 	do {								\
96 		unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));	\
97 		unsigned __split = (psize == MMU_PAGE_4K ||		\
98 				    psize == MMU_PAGE_64K_AP);		\
99 		shift = mmu_psize_defs[psize].shift;			\
100 		for (index = 0; vpn < __end; index++,			\
101 			     vpn += (1L << (shift - VPN_SHIFT))) {	\
102 			if (!__split || __rpte_sub_valid(rpte, index))	\
103 				do {
104 
105 #define pte_iterate_hashed_end() } while(0); } } while(0)
106 
107 #define pte_pagesize_index(mm, addr, pte)	\
108 	(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
109 
110 #define remap_4k_pfn(vma, addr, pfn, prot)				\
111 	(WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL :	\
112 		remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,	\
113 			__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
114 
115 #define PTE_TABLE_SIZE	(sizeof(real_pte_t) << PTE_INDEX_SIZE)
116 #define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE)
117 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
118 
119 #define pgd_pte(pgd)	(pud_pte(((pud_t){ pgd })))
120 #define pte_pgd(pte)	((pgd_t)pte_pud(pte))
121 
122 #ifdef CONFIG_HUGETLB_PAGE
123 /*
124  * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
125  * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
126  *
127  * Defined in such a way that we can optimize away code block at build time
128  * if CONFIG_HUGETLB_PAGE=n.
129  */
130 static inline int pmd_huge(pmd_t pmd)
131 {
132 	/*
133 	 * leaf pte for huge page, bottom two bits != 00
134 	 */
135 	return ((pmd_val(pmd) & 0x3) != 0x0);
136 }
137 
138 static inline int pud_huge(pud_t pud)
139 {
140 	/*
141 	 * leaf pte for huge page, bottom two bits != 00
142 	 */
143 	return ((pud_val(pud) & 0x3) != 0x0);
144 }
145 
146 static inline int pgd_huge(pgd_t pgd)
147 {
148 	/*
149 	 * leaf pte for huge page, bottom two bits != 00
150 	 */
151 	return ((pgd_val(pgd) & 0x3) != 0x0);
152 }
153 #define pgd_huge pgd_huge
154 
155 #ifdef CONFIG_DEBUG_VM
156 extern int hugepd_ok(hugepd_t hpd);
157 #define is_hugepd(hpd)               (hugepd_ok(hpd))
158 #else
159 /*
160  * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
161  * need to setup hugepage directory for them. Our pte and page directory format
162  * enable us to have this enabled.
163  */
164 static inline int hugepd_ok(hugepd_t hpd)
165 {
166 	return 0;
167 }
168 #define is_hugepd(pdep)			0
169 #endif /* CONFIG_DEBUG_VM */
170 
171 #endif /* CONFIG_HUGETLB_PAGE */
172 
173 #endif	/* __ASSEMBLY__ */
174 
175 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
176