1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
5 
6 #include <asm/asm-const.h>
7 
8 /*
9  * Common bits between 4K and 64K pages in a linux-style PTE.
10  * Additional bits may be defined in pgtable-hash64-*.h
11  *
12  */
13 #define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
14 
15 #ifdef CONFIG_PPC_64K_PAGES
16 #include <asm/book3s/64/hash-64k.h>
17 #else
18 #include <asm/book3s/64/hash-4k.h>
19 #endif
20 
21 /* Bits to set in a PMD/PUD/PGD entry valid bit*/
22 #define HASH_PMD_VAL_BITS		(0x8000000000000000UL)
23 #define HASH_PUD_VAL_BITS		(0x8000000000000000UL)
24 #define HASH_PGD_VAL_BITS		(0x8000000000000000UL)
25 
26 /*
27  * Size of EA range mapped by our pagetables.
28  */
29 #define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
30 				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
31 #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
32 
33 /*
34  * We store the slot details in the second half of page table.
35  * Increase the pud level table so that hugetlb ptes can be stored
36  * at pud level.
37  */
38 #if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
39 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
40 #else
41 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
42 #endif
43 /*
44  * Define the address range of the kernel non-linear virtual area
45  */
46 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
47 #define H_KERN_VIRT_SIZE  ASM_CONST(0x0000400000000000) /* 64T */
48 
49 /*
50  * The vmalloc space starts at the beginning of that region, and
51  * occupies half of it on hash CPUs and a quarter of it on Book3E
52  * (we keep a quarter for the virtual memmap)
53  */
54 #define H_VMALLOC_START	H_KERN_VIRT_START
55 #define H_VMALLOC_SIZE	ASM_CONST(0x380000000000) /* 56T */
56 #define H_VMALLOC_END	(H_VMALLOC_START + H_VMALLOC_SIZE)
57 
58 #define H_KERN_IO_START	H_VMALLOC_END
59 
60 /*
61  * Region IDs
62  */
63 #define REGION_SHIFT		60UL
64 #define REGION_MASK		(0xfUL << REGION_SHIFT)
65 #define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
66 
67 #define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
68 #define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
69 #define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
70 #define USER_REGION_ID		(0UL)
71 
72 /*
73  * Defines the address of the vmemap area, in its own region on
74  * hash table CPUs.
75  */
76 #define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
77 
78 #ifdef CONFIG_PPC_MM_SLICES
79 #define HAVE_ARCH_UNMAPPED_AREA
80 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
81 #endif /* CONFIG_PPC_MM_SLICES */
82 
83 
84 /* PTEIDX nibble */
85 #define _PTEIDX_SECONDARY	0x8
86 #define _PTEIDX_GROUP_IX	0x7
87 
88 #define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
89 #define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
90 
91 #ifndef __ASSEMBLY__
92 #define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
93 #define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
94 static inline int hash__pgd_bad(pgd_t pgd)
95 {
96 	return (pgd_val(pgd) == 0);
97 }
98 #ifdef CONFIG_STRICT_KERNEL_RWX
99 extern void hash__mark_rodata_ro(void);
100 extern void hash__mark_initmem_nx(void);
101 #endif
102 
103 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
104 			    pte_t *ptep, unsigned long pte, int huge);
105 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
106 /* Atomic PTE updates */
107 static inline unsigned long hash__pte_update(struct mm_struct *mm,
108 					 unsigned long addr,
109 					 pte_t *ptep, unsigned long clr,
110 					 unsigned long set,
111 					 int huge)
112 {
113 	__be64 old_be, tmp_be;
114 	unsigned long old;
115 
116 	__asm__ __volatile__(
117 	"1:	ldarx	%0,0,%3		# pte_update\n\
118 	and.	%1,%0,%6\n\
119 	bne-	1b \n\
120 	andc	%1,%0,%4 \n\
121 	or	%1,%1,%7\n\
122 	stdcx.	%1,0,%3 \n\
123 	bne-	1b"
124 	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
125 	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
126 	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
127 	: "cc" );
128 	/* huge pages use the old page table lock */
129 	if (!huge)
130 		assert_pte_locked(mm, addr);
131 
132 	old = be64_to_cpu(old_be);
133 	if (old & H_PAGE_HASHPTE)
134 		hpte_need_flush(mm, addr, ptep, old, huge);
135 
136 	return old;
137 }
138 
139 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
140  * function doesn't need to flush the hash entry
141  */
142 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
143 {
144 	__be64 old, tmp, val, mask;
145 
146 	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
147 			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
148 
149 	val = pte_raw(entry) & mask;
150 
151 	__asm__ __volatile__(
152 	"1:	ldarx	%0,0,%4\n\
153 		and.	%1,%0,%6\n\
154 		bne-	1b \n\
155 		or	%0,%3,%0\n\
156 		stdcx.	%0,0,%4\n\
157 		bne-	1b"
158 	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
159 	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
160 	:"cc");
161 }
162 
163 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
164 {
165 	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
166 }
167 
168 static inline int hash__pte_none(pte_t pte)
169 {
170 	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
171 }
172 
173 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
174 		int ssize, real_pte_t rpte, unsigned int subpg_index);
175 
176 /* This low level function performs the actual PTE insertion
177  * Setting the PTE depends on the MMU type and other factors. It's
178  * an horrible mess that I'm not going to try to clean up now but
179  * I'm keeping it in one place rather than spread around
180  */
181 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
182 				  pte_t *ptep, pte_t pte, int percpu)
183 {
184 	/*
185 	 * Anything else just stores the PTE normally. That covers all 64-bit
186 	 * cases, and 32-bit non-hash with 32-bit PTEs.
187 	 */
188 	*ptep = pte;
189 }
190 
191 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
192 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
193 				   pmd_t *pmdp, unsigned long old_pmd);
194 #else
195 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
196 					  unsigned long addr, pmd_t *pmdp,
197 					  unsigned long old_pmd)
198 {
199 	WARN(1, "%s called with THP disabled\n", __func__);
200 }
201 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
202 
203 
204 int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
205 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
206 					      unsigned long page_size,
207 					      unsigned long phys);
208 extern void hash__vmemmap_remove_mapping(unsigned long start,
209 				     unsigned long page_size);
210 
211 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
212 int hash__remove_section_mapping(unsigned long start, unsigned long end);
213 
214 #endif /* !__ASSEMBLY__ */
215 #endif /* __KERNEL__ */
216 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
217