1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
4 
5 #include <asm/bug.h>
6 #include <asm/book3s/32/mmu-hash.h>
7 
8 #ifdef __ASSEMBLY__
9 
10 .macro kuep_update_sr	gpr1, gpr2		/* NEVER use r0 as gpr2 due to addis */
11 101:	mtsrin	\gpr1, \gpr2
12 	addi	\gpr1, \gpr1, 0x111		/* next VSID */
13 	rlwinm	\gpr1, \gpr1, 0, 0xf0ffffff	/* clear VSID overflow */
14 	addis	\gpr2, \gpr2, 0x1000		/* address of next segment */
15 	bdnz	101b
16 	isync
17 .endm
18 
19 .macro kuep_lock	gpr1, gpr2
20 #ifdef CONFIG_PPC_KUEP
21 	li	\gpr1, NUM_USER_SEGMENTS
22 	li	\gpr2, 0
23 	mtctr	\gpr1
24 	mfsrin	\gpr1, \gpr2
25 	oris	\gpr1, \gpr1, SR_NX@h		/* set Nx */
26 	kuep_update_sr \gpr1, \gpr2
27 #endif
28 .endm
29 
30 .macro kuep_unlock	gpr1, gpr2
31 #ifdef CONFIG_PPC_KUEP
32 	li	\gpr1, NUM_USER_SEGMENTS
33 	li	\gpr2, 0
34 	mtctr	\gpr1
35 	mfsrin	\gpr1, \gpr2
36 	rlwinm	\gpr1, \gpr1, 0, ~SR_NX		/* Clear Nx */
37 	kuep_update_sr \gpr1, \gpr2
38 #endif
39 .endm
40 
41 #ifdef CONFIG_PPC_KUAP
42 
43 .macro kuap_update_sr	gpr1, gpr2, gpr3	/* NEVER use r0 as gpr2 due to addis */
44 101:	mtsrin	\gpr1, \gpr2
45 	addi	\gpr1, \gpr1, 0x111		/* next VSID */
46 	rlwinm	\gpr1, \gpr1, 0, 0xf0ffffff	/* clear VSID overflow */
47 	addis	\gpr2, \gpr2, 0x1000		/* address of next segment */
48 	cmplw	\gpr2, \gpr3
49 	blt-	101b
50 	isync
51 .endm
52 
53 .macro kuap_save_and_lock	sp, thread, gpr1, gpr2, gpr3
54 	lwz	\gpr2, KUAP(\thread)
55 	rlwinm.	\gpr3, \gpr2, 28, 0xf0000000
56 	stw	\gpr2, STACK_REGS_KUAP(\sp)
57 	beq+	102f
58 	li	\gpr1, 0
59 	stw	\gpr1, KUAP(\thread)
60 	mfsrin	\gpr1, \gpr2
61 	oris	\gpr1, \gpr1, SR_KS@h	/* set Ks */
62 	kuap_update_sr	\gpr1, \gpr2, \gpr3
63 102:
64 .endm
65 
66 .macro kuap_restore	sp, current, gpr1, gpr2, gpr3
67 	lwz	\gpr2, STACK_REGS_KUAP(\sp)
68 	rlwinm.	\gpr3, \gpr2, 28, 0xf0000000
69 	stw	\gpr2, THREAD + KUAP(\current)
70 	beq+	102f
71 	mfsrin	\gpr1, \gpr2
72 	rlwinm	\gpr1, \gpr1, 0, ~SR_KS	/* Clear Ks */
73 	kuap_update_sr	\gpr1, \gpr2, \gpr3
74 102:
75 .endm
76 
77 .macro kuap_check	current, gpr
78 #ifdef CONFIG_PPC_KUAP_DEBUG
79 	lwz	\gpr, THREAD + KUAP(\current)
80 999:	twnei	\gpr, 0
81 	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
82 #endif
83 .endm
84 
85 #endif /* CONFIG_PPC_KUAP */
86 
87 #else /* !__ASSEMBLY__ */
88 
89 #ifdef CONFIG_PPC_KUAP
90 
91 #include <linux/sched.h>
92 
93 static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
94 {
95 	addr &= 0xf0000000;	/* align addr to start of segment */
96 	barrier();	/* make sure thread.kuap is updated before playing with SRs */
97 	while (addr < end) {
98 		mtsr(sr, addr);
99 		sr += 0x111;		/* next VSID */
100 		sr &= 0xf0ffffff;	/* clear VSID overflow */
101 		addr += 0x10000000;	/* address of next segment */
102 	}
103 	isync();	/* Context sync required after mtsr() */
104 }
105 
106 static __always_inline void allow_user_access(void __user *to, const void __user *from,
107 					      u32 size, unsigned long dir)
108 {
109 	u32 addr, end;
110 
111 	BUILD_BUG_ON(!__builtin_constant_p(dir));
112 	BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
113 
114 	if (!(dir & KUAP_WRITE))
115 		return;
116 
117 	addr = (__force u32)to;
118 
119 	if (unlikely(addr >= TASK_SIZE || !size))
120 		return;
121 
122 	end = min(addr + size, TASK_SIZE);
123 
124 	current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
125 	kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end);	/* Clear Ks */
126 }
127 
128 static __always_inline void prevent_user_access(void __user *to, const void __user *from,
129 						u32 size, unsigned long dir)
130 {
131 	u32 addr, end;
132 
133 	BUILD_BUG_ON(!__builtin_constant_p(dir));
134 
135 	if (dir & KUAP_CURRENT_WRITE) {
136 		u32 kuap = current->thread.kuap;
137 
138 		if (unlikely(!kuap))
139 			return;
140 
141 		addr = kuap & 0xf0000000;
142 		end = kuap << 28;
143 	} else if (dir & KUAP_WRITE) {
144 		addr = (__force u32)to;
145 		end = min(addr + size, TASK_SIZE);
146 
147 		if (unlikely(addr >= TASK_SIZE || !size))
148 			return;
149 	} else {
150 		return;
151 	}
152 
153 	current->thread.kuap = 0;
154 	kuap_update_sr(mfsr(addr) | SR_KS, addr, end);	/* set Ks */
155 }
156 
157 static inline unsigned long prevent_user_access_return(void)
158 {
159 	unsigned long flags = current->thread.kuap;
160 	unsigned long addr = flags & 0xf0000000;
161 	unsigned long end = flags << 28;
162 	void __user *to = (__force void __user *)addr;
163 
164 	if (flags)
165 		prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
166 
167 	return flags;
168 }
169 
170 static inline void restore_user_access(unsigned long flags)
171 {
172 	unsigned long addr = flags & 0xf0000000;
173 	unsigned long end = flags << 28;
174 	void __user *to = (__force void __user *)addr;
175 
176 	if (flags)
177 		allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
178 }
179 
180 static inline bool
181 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
182 {
183 	unsigned long begin = regs->kuap & 0xf0000000;
184 	unsigned long end = regs->kuap << 28;
185 
186 	return is_write && (address < begin || address >= end);
187 }
188 
189 #endif /* CONFIG_PPC_KUAP */
190 
191 #endif /* __ASSEMBLY__ */
192 
193 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
194