1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
5 
6 #include <asm/asm-const.h>
7 
8 /*
9  * Common bits between 4K and 64K pages in a linux-style PTE.
10  * Additional bits may be defined in pgtable-hash64-*.h
11  *
12  */
13 #define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
14 
15 #ifdef CONFIG_PPC_64K_PAGES
16 #include <asm/book3s/64/hash-64k.h>
17 #else
18 #include <asm/book3s/64/hash-4k.h>
19 #endif
20 
21 /* Bits to set in a PMD/PUD/PGD entry valid bit*/
22 #define HASH_PMD_VAL_BITS		(0x8000000000000000UL)
23 #define HASH_PUD_VAL_BITS		(0x8000000000000000UL)
24 #define HASH_PGD_VAL_BITS		(0x8000000000000000UL)
25 
26 /*
27  * Size of EA range mapped by our pagetables.
28  */
29 #define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
30 				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
31 #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
32 
33 /*
34  * We store the slot details in the second half of page table.
35  * Increase the pud level table so that hugetlb ptes can be stored
36  * at pud level.
37  */
38 #if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
39 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
40 #else
41 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
42 #endif
43 
44 /*
45  * Define the address range of the kernel non-linear virtual area. In contrast
46  * to the linear mapping, this is managed using the kernel page tables and then
47  * inserted into the hash page table to actually take effect, similarly to user
48  * mappings.
49  */
50 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
51 
52 /*
53  * Allow virtual mapping of one context size.
54  * 512TB for 64K page size
55  * 64TB for 4K page size
56  */
57 #define H_KERN_VIRT_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)
58 
59 /*
60  * 8TB IO mapping size
61  */
62 #define H_KERN_IO_SIZE ASM_CONST(0x80000000000) /* 8T */
63 
64 /*
65  * The vmalloc space starts at the beginning of the kernel non-linear virtual
66  * region, and occupies 504T (64K) or 56T (4K)
67  */
68 #define H_VMALLOC_START H_KERN_VIRT_START
69 #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE - H_KERN_IO_SIZE)
70 #define H_VMALLOC_END  (H_VMALLOC_START + H_VMALLOC_SIZE)
71 
72 #define H_KERN_IO_START H_VMALLOC_END
73 
74 /*
75  * Region IDs
76  */
77 #define REGION_SHIFT		60UL
78 #define REGION_MASK		(0xfUL << REGION_SHIFT)
79 #define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
80 
81 #define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
82 #define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
83 #define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
84 #define USER_REGION_ID		(0UL)
85 
86 /*
87  * Defines the address of the vmemap area, in its own region on
88  * hash table CPUs.
89  */
90 #define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
91 
92 #ifdef CONFIG_PPC_MM_SLICES
93 #define HAVE_ARCH_UNMAPPED_AREA
94 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
95 #endif /* CONFIG_PPC_MM_SLICES */
96 
97 
98 /* PTEIDX nibble */
99 #define _PTEIDX_SECONDARY	0x8
100 #define _PTEIDX_GROUP_IX	0x7
101 
102 #define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
103 #define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
104 
105 #ifndef __ASSEMBLY__
106 #define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
107 #define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
108 static inline int hash__pgd_bad(pgd_t pgd)
109 {
110 	return (pgd_val(pgd) == 0);
111 }
112 #ifdef CONFIG_STRICT_KERNEL_RWX
113 extern void hash__mark_rodata_ro(void);
114 extern void hash__mark_initmem_nx(void);
115 #endif
116 
117 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
118 			    pte_t *ptep, unsigned long pte, int huge);
119 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
120 /* Atomic PTE updates */
121 static inline unsigned long hash__pte_update(struct mm_struct *mm,
122 					 unsigned long addr,
123 					 pte_t *ptep, unsigned long clr,
124 					 unsigned long set,
125 					 int huge)
126 {
127 	__be64 old_be, tmp_be;
128 	unsigned long old;
129 
130 	__asm__ __volatile__(
131 	"1:	ldarx	%0,0,%3		# pte_update\n\
132 	and.	%1,%0,%6\n\
133 	bne-	1b \n\
134 	andc	%1,%0,%4 \n\
135 	or	%1,%1,%7\n\
136 	stdcx.	%1,0,%3 \n\
137 	bne-	1b"
138 	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
139 	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
140 	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
141 	: "cc" );
142 	/* huge pages use the old page table lock */
143 	if (!huge)
144 		assert_pte_locked(mm, addr);
145 
146 	old = be64_to_cpu(old_be);
147 	if (old & H_PAGE_HASHPTE)
148 		hpte_need_flush(mm, addr, ptep, old, huge);
149 
150 	return old;
151 }
152 
153 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
154  * function doesn't need to flush the hash entry
155  */
156 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
157 {
158 	__be64 old, tmp, val, mask;
159 
160 	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
161 			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
162 
163 	val = pte_raw(entry) & mask;
164 
165 	__asm__ __volatile__(
166 	"1:	ldarx	%0,0,%4\n\
167 		and.	%1,%0,%6\n\
168 		bne-	1b \n\
169 		or	%0,%3,%0\n\
170 		stdcx.	%0,0,%4\n\
171 		bne-	1b"
172 	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
173 	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
174 	:"cc");
175 }
176 
177 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
178 {
179 	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
180 }
181 
182 static inline int hash__pte_none(pte_t pte)
183 {
184 	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
185 }
186 
187 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
188 		int ssize, real_pte_t rpte, unsigned int subpg_index);
189 
190 /* This low level function performs the actual PTE insertion
191  * Setting the PTE depends on the MMU type and other factors. It's
192  * an horrible mess that I'm not going to try to clean up now but
193  * I'm keeping it in one place rather than spread around
194  */
195 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
196 				  pte_t *ptep, pte_t pte, int percpu)
197 {
198 	/*
199 	 * Anything else just stores the PTE normally. That covers all 64-bit
200 	 * cases, and 32-bit non-hash with 32-bit PTEs.
201 	 */
202 	*ptep = pte;
203 }
204 
205 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
206 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
207 				   pmd_t *pmdp, unsigned long old_pmd);
208 #else
209 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
210 					  unsigned long addr, pmd_t *pmdp,
211 					  unsigned long old_pmd)
212 {
213 	WARN(1, "%s called with THP disabled\n", __func__);
214 }
215 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
216 
217 
218 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
219 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
220 					      unsigned long page_size,
221 					      unsigned long phys);
222 extern void hash__vmemmap_remove_mapping(unsigned long start,
223 				     unsigned long page_size);
224 
225 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
226 int hash__remove_section_mapping(unsigned long start, unsigned long end);
227 
228 #endif /* !__ASSEMBLY__ */
229 #endif /* __KERNEL__ */
230 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
231