1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
4 
5 #include <asm/bug.h>
6 #include <asm/book3s/32/mmu-hash.h>
7 #include <asm/mmu.h>
8 #include <asm/synch.h>
9 
10 #ifndef __ASSEMBLY__
11 
12 #include <linux/jump_label.h>
13 
14 extern struct static_key_false disable_kuap_key;
15 extern struct static_key_false disable_kuep_key;
16 
17 static __always_inline bool kuap_is_disabled(void)
18 {
19 	return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
20 }
21 
22 static __always_inline bool kuep_is_disabled(void)
23 {
24 	return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
25 }
26 
27 static inline void kuep_lock(void)
28 {
29 	if (kuep_is_disabled())
30 		return;
31 
32 	update_user_segments(mfsr(0) | SR_NX);
33 	/*
34 	 * This isync() shouldn't be necessary as the kernel is not excepted to
35 	 * run any instruction in userspace soon after the update of segments,
36 	 * but hash based cores (at least G3) seem to exhibit a random
37 	 * behaviour when the 'isync' is not there. 603 cores don't have this
38 	 * behaviour so don't do the 'isync' as it saves several CPU cycles.
39 	 */
40 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
41 		isync();	/* Context sync required after mtsr() */
42 }
43 
44 static inline void kuep_unlock(void)
45 {
46 	if (kuep_is_disabled())
47 		return;
48 
49 	update_user_segments(mfsr(0) & ~SR_NX);
50 	/*
51 	 * This isync() shouldn't be necessary as a 'rfi' will soon be executed
52 	 * to return to userspace, but hash based cores (at least G3) seem to
53 	 * exhibit a random behaviour when the 'isync' is not there. 603 cores
54 	 * don't have this behaviour so don't do the 'isync' as it saves several
55 	 * CPU cycles.
56 	 */
57 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
58 		isync();	/* Context sync required after mtsr() */
59 }
60 
61 #ifdef CONFIG_PPC_KUAP
62 
63 #include <linux/sched.h>
64 
65 #define KUAP_NONE	(~0UL)
66 #define KUAP_ALL	(~1UL)
67 
68 static inline void kuap_lock_one(unsigned long addr)
69 {
70 	mtsr(mfsr(addr) | SR_KS, addr);
71 	isync();	/* Context sync required after mtsr() */
72 }
73 
74 static inline void kuap_unlock_one(unsigned long addr)
75 {
76 	mtsr(mfsr(addr) & ~SR_KS, addr);
77 	isync();	/* Context sync required after mtsr() */
78 }
79 
80 static inline void kuap_lock_all(void)
81 {
82 	update_user_segments(mfsr(0) | SR_KS);
83 	isync();	/* Context sync required after mtsr() */
84 }
85 
86 static inline void kuap_unlock_all(void)
87 {
88 	update_user_segments(mfsr(0) & ~SR_KS);
89 	isync();	/* Context sync required after mtsr() */
90 }
91 
92 void kuap_lock_all_ool(void);
93 void kuap_unlock_all_ool(void);
94 
95 static inline void kuap_lock(unsigned long addr, bool ool)
96 {
97 	if (likely(addr != KUAP_ALL))
98 		kuap_lock_one(addr);
99 	else if (!ool)
100 		kuap_lock_all();
101 	else
102 		kuap_lock_all_ool();
103 }
104 
105 static inline void kuap_unlock(unsigned long addr, bool ool)
106 {
107 	if (likely(addr != KUAP_ALL))
108 		kuap_unlock_one(addr);
109 	else if (!ool)
110 		kuap_unlock_all();
111 	else
112 		kuap_unlock_all_ool();
113 }
114 
115 static inline void kuap_save_and_lock(struct pt_regs *regs)
116 {
117 	unsigned long kuap = current->thread.kuap;
118 
119 	if (kuap_is_disabled())
120 		return;
121 
122 	regs->kuap = kuap;
123 	if (unlikely(kuap == KUAP_NONE))
124 		return;
125 
126 	current->thread.kuap = KUAP_NONE;
127 	kuap_lock(kuap, false);
128 }
129 
130 static inline void kuap_user_restore(struct pt_regs *regs)
131 {
132 }
133 
134 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
135 {
136 	if (kuap_is_disabled())
137 		return;
138 
139 	current->thread.kuap = regs->kuap;
140 
141 	kuap_unlock(regs->kuap, false);
142 }
143 
144 static inline unsigned long kuap_get_and_assert_locked(void)
145 {
146 	unsigned long kuap = current->thread.kuap;
147 
148 	if (kuap_is_disabled())
149 		return KUAP_NONE;
150 
151 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
152 
153 	return kuap;
154 }
155 
156 static inline void kuap_assert_locked(void)
157 {
158 	kuap_get_and_assert_locked();
159 }
160 
161 static __always_inline void allow_user_access(void __user *to, const void __user *from,
162 					      u32 size, unsigned long dir)
163 {
164 	if (kuap_is_disabled())
165 		return;
166 
167 	BUILD_BUG_ON(!__builtin_constant_p(dir));
168 
169 	if (!(dir & KUAP_WRITE))
170 		return;
171 
172 	current->thread.kuap = (__force u32)to;
173 	kuap_unlock_one((__force u32)to);
174 }
175 
176 static __always_inline void prevent_user_access(unsigned long dir)
177 {
178 	u32 kuap = current->thread.kuap;
179 
180 	if (kuap_is_disabled())
181 		return;
182 
183 	BUILD_BUG_ON(!__builtin_constant_p(dir));
184 
185 	if (!(dir & KUAP_WRITE))
186 		return;
187 
188 	current->thread.kuap = KUAP_NONE;
189 	kuap_lock(kuap, true);
190 }
191 
192 static inline unsigned long prevent_user_access_return(void)
193 {
194 	unsigned long flags = current->thread.kuap;
195 
196 	if (kuap_is_disabled())
197 		return KUAP_NONE;
198 
199 	if (flags != KUAP_NONE) {
200 		current->thread.kuap = KUAP_NONE;
201 		kuap_lock(flags, true);
202 	}
203 
204 	return flags;
205 }
206 
207 static inline void restore_user_access(unsigned long flags)
208 {
209 	if (kuap_is_disabled())
210 		return;
211 
212 	if (flags != KUAP_NONE) {
213 		current->thread.kuap = flags;
214 		kuap_unlock(flags, true);
215 	}
216 }
217 
218 static inline bool
219 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
220 {
221 	unsigned long kuap = regs->kuap;
222 
223 	if (kuap_is_disabled())
224 		return false;
225 
226 	if (!is_write || kuap == KUAP_ALL)
227 		return false;
228 	if (kuap == KUAP_NONE)
229 		return true;
230 
231 	/* If faulting address doesn't match unlocked segment, unlock all */
232 	if ((kuap ^ address) & 0xf0000000)
233 		regs->kuap = KUAP_ALL;
234 
235 	return false;
236 }
237 
238 #endif /* CONFIG_PPC_KUAP */
239 
240 #endif /* __ASSEMBLY__ */
241 
242 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
243