1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
4
5 #include <asm/bug.h>
6 #include <asm/book3s/32/mmu-hash.h>
7 #include <asm/mmu.h>
8 #include <asm/synch.h>
9
10 #ifndef __ASSEMBLY__
11
12 #ifdef CONFIG_PPC_KUAP
13
14 #include <linux/sched.h>
15
16 #define KUAP_NONE (~0UL)
17
kuap_lock_one(unsigned long addr)18 static __always_inline void kuap_lock_one(unsigned long addr)
19 {
20 mtsr(mfsr(addr) | SR_KS, addr);
21 isync(); /* Context sync required after mtsr() */
22 }
23
kuap_unlock_one(unsigned long addr)24 static __always_inline void kuap_unlock_one(unsigned long addr)
25 {
26 mtsr(mfsr(addr) & ~SR_KS, addr);
27 isync(); /* Context sync required after mtsr() */
28 }
29
uaccess_begin_32s(unsigned long addr)30 static __always_inline void uaccess_begin_32s(unsigned long addr)
31 {
32 unsigned long tmp;
33
34 asm volatile(ASM_MMU_FTR_IFSET(
35 "mfsrin %0, %1;"
36 "rlwinm %0, %0, 0, %2;"
37 "mtsrin %0, %1;"
38 "isync", "", %3)
39 : "=&r"(tmp)
40 : "r"(addr), "i"(~SR_KS), "i"(MMU_FTR_KUAP)
41 : "memory");
42 }
43
uaccess_end_32s(unsigned long addr)44 static __always_inline void uaccess_end_32s(unsigned long addr)
45 {
46 unsigned long tmp;
47
48 asm volatile(ASM_MMU_FTR_IFSET(
49 "mfsrin %0, %1;"
50 "oris %0, %0, %2;"
51 "mtsrin %0, %1;"
52 "isync", "", %3)
53 : "=&r"(tmp)
54 : "r"(addr), "i"(SR_KS >> 16), "i"(MMU_FTR_KUAP)
55 : "memory");
56 }
57
__kuap_save_and_lock(struct pt_regs * regs)58 static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
59 {
60 unsigned long kuap = current->thread.kuap;
61
62 regs->kuap = kuap;
63 if (unlikely(kuap == KUAP_NONE))
64 return;
65
66 current->thread.kuap = KUAP_NONE;
67 kuap_lock_one(kuap);
68 }
69 #define __kuap_save_and_lock __kuap_save_and_lock
70
kuap_user_restore(struct pt_regs * regs)71 static __always_inline void kuap_user_restore(struct pt_regs *regs)
72 {
73 }
74
__kuap_kernel_restore(struct pt_regs * regs,unsigned long kuap)75 static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
76 {
77 if (unlikely(kuap != KUAP_NONE)) {
78 current->thread.kuap = KUAP_NONE;
79 kuap_lock_one(kuap);
80 }
81
82 if (likely(regs->kuap == KUAP_NONE))
83 return;
84
85 current->thread.kuap = regs->kuap;
86
87 kuap_unlock_one(regs->kuap);
88 }
89
__kuap_get_and_assert_locked(void)90 static __always_inline unsigned long __kuap_get_and_assert_locked(void)
91 {
92 unsigned long kuap = current->thread.kuap;
93
94 WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
95
96 return kuap;
97 }
98 #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
99
allow_user_access(void __user * to,const void __user * from,u32 size,unsigned long dir)100 static __always_inline void allow_user_access(void __user *to, const void __user *from,
101 u32 size, unsigned long dir)
102 {
103 BUILD_BUG_ON(!__builtin_constant_p(dir));
104
105 if (!(dir & KUAP_WRITE))
106 return;
107
108 current->thread.kuap = (__force u32)to;
109 uaccess_begin_32s((__force u32)to);
110 }
111
prevent_user_access(unsigned long dir)112 static __always_inline void prevent_user_access(unsigned long dir)
113 {
114 u32 kuap = current->thread.kuap;
115
116 BUILD_BUG_ON(!__builtin_constant_p(dir));
117
118 if (!(dir & KUAP_WRITE))
119 return;
120
121 current->thread.kuap = KUAP_NONE;
122 uaccess_end_32s(kuap);
123 }
124
prevent_user_access_return(void)125 static __always_inline unsigned long prevent_user_access_return(void)
126 {
127 unsigned long flags = current->thread.kuap;
128
129 if (flags != KUAP_NONE) {
130 current->thread.kuap = KUAP_NONE;
131 uaccess_end_32s(flags);
132 }
133
134 return flags;
135 }
136
restore_user_access(unsigned long flags)137 static __always_inline void restore_user_access(unsigned long flags)
138 {
139 if (flags != KUAP_NONE) {
140 current->thread.kuap = flags;
141 uaccess_begin_32s(flags);
142 }
143 }
144
145 static __always_inline bool
__bad_kuap_fault(struct pt_regs * regs,unsigned long address,bool is_write)146 __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
147 {
148 unsigned long kuap = regs->kuap;
149
150 if (!is_write)
151 return false;
152 if (kuap == KUAP_NONE)
153 return true;
154
155 /*
156 * If faulting address doesn't match unlocked segment, change segment.
157 * In case of unaligned store crossing two segments, emulate store.
158 */
159 if ((kuap ^ address) & 0xf0000000) {
160 if (!(kuap & 0x0fffffff) && address > kuap - 4 && fix_alignment(regs)) {
161 regs_add_return_ip(regs, 4);
162 emulate_single_step(regs);
163 } else {
164 regs->kuap = address;
165 }
166 }
167
168 return false;
169 }
170
171 #endif /* CONFIG_PPC_KUAP */
172
173 #endif /* __ASSEMBLY__ */
174
175 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
176