131ed2b13SChristophe Leroy /* SPDX-License-Identifier: GPL-2.0 */
231ed2b13SChristophe Leroy #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
331ed2b13SChristophe Leroy #define _ASM_POWERPC_BOOK3S_32_KUP_H
431ed2b13SChristophe Leroy
574016701SChristophe Leroy #include <asm/bug.h>
631ed2b13SChristophe Leroy #include <asm/book3s/32/mmu-hash.h>
7ef486bf4SChristophe Leroy #include <asm/mmu.h>
8ef486bf4SChristophe Leroy #include <asm/synch.h>
931ed2b13SChristophe Leroy
10c1672883SChristophe Leroy #ifndef __ASSEMBLY__
11a68c31fcSChristophe Leroy
12a68c31fcSChristophe Leroy #ifdef CONFIG_PPC_KUAP
13a68c31fcSChristophe Leroy
14a68c31fcSChristophe Leroy #include <linux/sched.h>
15a68c31fcSChristophe Leroy
1616132529SChristophe Leroy #define KUAP_NONE (~0UL)
1716132529SChristophe Leroy
kuap_lock_one(unsigned long addr)18eb52f66fSChristophe Leroy static __always_inline void kuap_lock_one(unsigned long addr)
19a68c31fcSChristophe Leroy {
2016132529SChristophe Leroy mtsr(mfsr(addr) | SR_KS, addr);
21179ae57dSChristophe Leroy isync(); /* Context sync required after mtsr() */
22a68c31fcSChristophe Leroy }
23a68c31fcSChristophe Leroy
kuap_unlock_one(unsigned long addr)24eb52f66fSChristophe Leroy static __always_inline void kuap_unlock_one(unsigned long addr)
2516132529SChristophe Leroy {
2616132529SChristophe Leroy mtsr(mfsr(addr) & ~SR_KS, addr);
2716132529SChristophe Leroy isync(); /* Context sync required after mtsr() */
2816132529SChristophe Leroy }
2916132529SChristophe Leroy
uaccess_begin_32s(unsigned long addr)30*3a24ea0dSChristophe Leroy static __always_inline void uaccess_begin_32s(unsigned long addr)
31*3a24ea0dSChristophe Leroy {
32*3a24ea0dSChristophe Leroy unsigned long tmp;
33*3a24ea0dSChristophe Leroy
34*3a24ea0dSChristophe Leroy asm volatile(ASM_MMU_FTR_IFSET(
35*3a24ea0dSChristophe Leroy "mfsrin %0, %1;"
36*3a24ea0dSChristophe Leroy "rlwinm %0, %0, 0, %2;"
37*3a24ea0dSChristophe Leroy "mtsrin %0, %1;"
38*3a24ea0dSChristophe Leroy "isync", "", %3)
39*3a24ea0dSChristophe Leroy : "=&r"(tmp)
40*3a24ea0dSChristophe Leroy : "r"(addr), "i"(~SR_KS), "i"(MMU_FTR_KUAP)
41*3a24ea0dSChristophe Leroy : "memory");
42*3a24ea0dSChristophe Leroy }
43*3a24ea0dSChristophe Leroy
uaccess_end_32s(unsigned long addr)44*3a24ea0dSChristophe Leroy static __always_inline void uaccess_end_32s(unsigned long addr)
45*3a24ea0dSChristophe Leroy {
46*3a24ea0dSChristophe Leroy unsigned long tmp;
47*3a24ea0dSChristophe Leroy
48*3a24ea0dSChristophe Leroy asm volatile(ASM_MMU_FTR_IFSET(
49*3a24ea0dSChristophe Leroy "mfsrin %0, %1;"
50*3a24ea0dSChristophe Leroy "oris %0, %0, %2;"
51*3a24ea0dSChristophe Leroy "mtsrin %0, %1;"
52*3a24ea0dSChristophe Leroy "isync", "", %3)
53*3a24ea0dSChristophe Leroy : "=&r"(tmp)
54*3a24ea0dSChristophe Leroy : "r"(addr), "i"(SR_KS >> 16), "i"(MMU_FTR_KUAP)
55*3a24ea0dSChristophe Leroy : "memory");
56*3a24ea0dSChristophe Leroy }
57*3a24ea0dSChristophe Leroy
__kuap_save_and_lock(struct pt_regs * regs)58eb52f66fSChristophe Leroy static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
5921eb58aeSChristophe Leroy {
6021eb58aeSChristophe Leroy unsigned long kuap = current->thread.kuap;
6121eb58aeSChristophe Leroy
6221eb58aeSChristophe Leroy regs->kuap = kuap;
6316132529SChristophe Leroy if (unlikely(kuap == KUAP_NONE))
6421eb58aeSChristophe Leroy return;
6521eb58aeSChristophe Leroy
6616132529SChristophe Leroy current->thread.kuap = KUAP_NONE;
675222a1d5SChristophe Leroy kuap_lock_one(kuap);
6821eb58aeSChristophe Leroy }
691bec4adcSChristophe Leroy #define __kuap_save_and_lock __kuap_save_and_lock
7021eb58aeSChristophe Leroy
kuap_user_restore(struct pt_regs * regs)71eb52f66fSChristophe Leroy static __always_inline void kuap_user_restore(struct pt_regs *regs)
7221eb58aeSChristophe Leroy {
7321eb58aeSChristophe Leroy }
7421eb58aeSChristophe Leroy
__kuap_kernel_restore(struct pt_regs * regs,unsigned long kuap)75eb52f66fSChristophe Leroy static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
7621eb58aeSChristophe Leroy {
77d93f9e23SChristophe Leroy if (unlikely(kuap != KUAP_NONE)) {
78d93f9e23SChristophe Leroy current->thread.kuap = KUAP_NONE;
795222a1d5SChristophe Leroy kuap_lock_one(kuap);
80d93f9e23SChristophe Leroy }
81d93f9e23SChristophe Leroy
82d93f9e23SChristophe Leroy if (likely(regs->kuap == KUAP_NONE))
83d93f9e23SChristophe Leroy return;
84d93f9e23SChristophe Leroy
8521eb58aeSChristophe Leroy current->thread.kuap = regs->kuap;
8621eb58aeSChristophe Leroy
875222a1d5SChristophe Leroy kuap_unlock_one(regs->kuap);
8821eb58aeSChristophe Leroy }
8921eb58aeSChristophe Leroy
__kuap_get_and_assert_locked(void)90eb52f66fSChristophe Leroy static __always_inline unsigned long __kuap_get_and_assert_locked(void)
9121eb58aeSChristophe Leroy {
9221eb58aeSChristophe Leroy unsigned long kuap = current->thread.kuap;
9321eb58aeSChristophe Leroy
9416132529SChristophe Leroy WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
9521eb58aeSChristophe Leroy
9621eb58aeSChristophe Leroy return kuap;
9721eb58aeSChristophe Leroy }
981bec4adcSChristophe Leroy #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
9921eb58aeSChristophe Leroy
allow_user_access(void __user * to,const void __user * from,u32 size,unsigned long dir)100*3a24ea0dSChristophe Leroy static __always_inline void allow_user_access(void __user *to, const void __user *from,
1011d8f739bSChristophe Leroy u32 size, unsigned long dir)
102a68c31fcSChristophe Leroy {
1031d8f739bSChristophe Leroy BUILD_BUG_ON(!__builtin_constant_p(dir));
104bedb4dbeSChristophe Leroy
1051d8f739bSChristophe Leroy if (!(dir & KUAP_WRITE))
106a68c31fcSChristophe Leroy return;
107a68c31fcSChristophe Leroy
10816132529SChristophe Leroy current->thread.kuap = (__force u32)to;
109*3a24ea0dSChristophe Leroy uaccess_begin_32s((__force u32)to);
110a68c31fcSChristophe Leroy }
111a68c31fcSChristophe Leroy
prevent_user_access(unsigned long dir)112*3a24ea0dSChristophe Leroy static __always_inline void prevent_user_access(unsigned long dir)
113a68c31fcSChristophe Leroy {
11416132529SChristophe Leroy u32 kuap = current->thread.kuap;
115a68c31fcSChristophe Leroy
1161d8f739bSChristophe Leroy BUILD_BUG_ON(!__builtin_constant_p(dir));
117bedb4dbeSChristophe Leroy
11816132529SChristophe Leroy if (!(dir & KUAP_WRITE))
1191d8f739bSChristophe Leroy return;
1201d8f739bSChristophe Leroy
12116132529SChristophe Leroy current->thread.kuap = KUAP_NONE;
122*3a24ea0dSChristophe Leroy uaccess_end_32s(kuap);
123a68c31fcSChristophe Leroy }
124a68c31fcSChristophe Leroy
prevent_user_access_return(void)125*3a24ea0dSChristophe Leroy static __always_inline unsigned long prevent_user_access_return(void)
1263d7dfd63SChristophe Leroy {
1273d7dfd63SChristophe Leroy unsigned long flags = current->thread.kuap;
1283d7dfd63SChristophe Leroy
12916132529SChristophe Leroy if (flags != KUAP_NONE) {
13016132529SChristophe Leroy current->thread.kuap = KUAP_NONE;
131*3a24ea0dSChristophe Leroy uaccess_end_32s(flags);
13216132529SChristophe Leroy }
1333d7dfd63SChristophe Leroy
1343d7dfd63SChristophe Leroy return flags;
1353d7dfd63SChristophe Leroy }
1363d7dfd63SChristophe Leroy
restore_user_access(unsigned long flags)137*3a24ea0dSChristophe Leroy static __always_inline void restore_user_access(unsigned long flags)
1383d7dfd63SChristophe Leroy {
13916132529SChristophe Leroy if (flags != KUAP_NONE) {
14016132529SChristophe Leroy current->thread.kuap = flags;
141*3a24ea0dSChristophe Leroy uaccess_begin_32s(flags);
14216132529SChristophe Leroy }
1433d7dfd63SChristophe Leroy }
1443d7dfd63SChristophe Leroy
145eb52f66fSChristophe Leroy static __always_inline bool
__bad_kuap_fault(struct pt_regs * regs,unsigned long address,bool is_write)146ba454f9cSChristophe Leroy __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
147a68c31fcSChristophe Leroy {
14816132529SChristophe Leroy unsigned long kuap = regs->kuap;
1496ec20aa2SChristophe Leroy
1505222a1d5SChristophe Leroy if (!is_write)
15116132529SChristophe Leroy return false;
15216132529SChristophe Leroy if (kuap == KUAP_NONE)
15316132529SChristophe Leroy return true;
15416132529SChristophe Leroy
1555222a1d5SChristophe Leroy /*
1565222a1d5SChristophe Leroy * If faulting address doesn't match unlocked segment, change segment.
1575222a1d5SChristophe Leroy * In case of unaligned store crossing two segments, emulate store.
1585222a1d5SChristophe Leroy */
1595222a1d5SChristophe Leroy if ((kuap ^ address) & 0xf0000000) {
1605222a1d5SChristophe Leroy if (!(kuap & 0x0fffffff) && address > kuap - 4 && fix_alignment(regs)) {
1615222a1d5SChristophe Leroy regs_add_return_ip(regs, 4);
1625222a1d5SChristophe Leroy emulate_single_step(regs);
1635222a1d5SChristophe Leroy } else {
1645222a1d5SChristophe Leroy regs->kuap = address;
1655222a1d5SChristophe Leroy }
1665222a1d5SChristophe Leroy }
16716132529SChristophe Leroy
16816132529SChristophe Leroy return false;
169a68c31fcSChristophe Leroy }
170a68c31fcSChristophe Leroy
171a68c31fcSChristophe Leroy #endif /* CONFIG_PPC_KUAP */
172a68c31fcSChristophe Leroy
17331ed2b13SChristophe Leroy #endif /* __ASSEMBLY__ */
17431ed2b13SChristophe Leroy
17531ed2b13SChristophe Leroy #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
176