1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
3 #define _ASM_POWERPC_BOOK3S_32_KUP_H
4 
5 #include <asm/book3s/32/mmu-hash.h>
6 
7 #ifdef __ASSEMBLY__
8 
9 .macro kuep_update_sr	gpr1, gpr2		/* NEVER use r0 as gpr2 due to addis */
10 101:	mtsrin	\gpr1, \gpr2
11 	addi	\gpr1, \gpr1, 0x111		/* next VSID */
12 	rlwinm	\gpr1, \gpr1, 0, 0xf0ffffff	/* clear VSID overflow */
13 	addis	\gpr2, \gpr2, 0x1000		/* address of next segment */
14 	bdnz	101b
15 	isync
16 .endm
17 
18 .macro kuep_lock	gpr1, gpr2
19 #ifdef CONFIG_PPC_KUEP
20 	li	\gpr1, NUM_USER_SEGMENTS
21 	li	\gpr2, 0
22 	mtctr	\gpr1
23 	mfsrin	\gpr1, \gpr2
24 	oris	\gpr1, \gpr1, SR_NX@h		/* set Nx */
25 	kuep_update_sr \gpr1, \gpr2
26 #endif
27 .endm
28 
29 .macro kuep_unlock	gpr1, gpr2
30 #ifdef CONFIG_PPC_KUEP
31 	li	\gpr1, NUM_USER_SEGMENTS
32 	li	\gpr2, 0
33 	mtctr	\gpr1
34 	mfsrin	\gpr1, \gpr2
35 	rlwinm	\gpr1, \gpr1, 0, ~SR_NX		/* Clear Nx */
36 	kuep_update_sr \gpr1, \gpr2
37 #endif
38 .endm
39 
40 #ifdef CONFIG_PPC_KUAP
41 
42 .macro kuap_update_sr	gpr1, gpr2, gpr3	/* NEVER use r0 as gpr2 due to addis */
43 101:	mtsrin	\gpr1, \gpr2
44 	addi	\gpr1, \gpr1, 0x111		/* next VSID */
45 	rlwinm	\gpr1, \gpr1, 0, 0xf0ffffff	/* clear VSID overflow */
46 	addis	\gpr2, \gpr2, 0x1000		/* address of next segment */
47 	cmplw	\gpr2, \gpr3
48 	blt-	101b
49 	isync
50 .endm
51 
52 .macro kuap_save_and_lock	sp, thread, gpr1, gpr2, gpr3
53 	lwz	\gpr2, KUAP(\thread)
54 	rlwinm.	\gpr3, \gpr2, 28, 0xf0000000
55 	stw	\gpr2, STACK_REGS_KUAP(\sp)
56 	beq+	102f
57 	li	\gpr1, 0
58 	stw	\gpr1, KUAP(\thread)
59 	mfsrin	\gpr1, \gpr2
60 	oris	\gpr1, \gpr1, SR_KS@h	/* set Ks */
61 	kuap_update_sr	\gpr1, \gpr2, \gpr3
62 102:
63 .endm
64 
65 .macro kuap_restore	sp, current, gpr1, gpr2, gpr3
66 	lwz	\gpr2, STACK_REGS_KUAP(\sp)
67 	rlwinm.	\gpr3, \gpr2, 28, 0xf0000000
68 	stw	\gpr2, THREAD + KUAP(\current)
69 	beq+	102f
70 	mfsrin	\gpr1, \gpr2
71 	rlwinm	\gpr1, \gpr1, 0, ~SR_KS	/* Clear Ks */
72 	kuap_update_sr	\gpr1, \gpr2, \gpr3
73 102:
74 .endm
75 
76 .macro kuap_check	current, gpr
77 #ifdef CONFIG_PPC_KUAP_DEBUG
78 	lwz	\gpr2, KUAP(thread)
79 999:	twnei	\gpr, 0
80 	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
81 #endif
82 .endm
83 
84 #endif /* CONFIG_PPC_KUAP */
85 
86 #else /* !__ASSEMBLY__ */
87 
88 #ifdef CONFIG_PPC_KUAP
89 
90 #include <linux/sched.h>
91 
92 static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
93 {
94 	barrier();	/* make sure thread.kuap is updated before playing with SRs */
95 	while (addr < end) {
96 		mtsrin(sr, addr);
97 		sr += 0x111;		/* next VSID */
98 		sr &= 0xf0ffffff;	/* clear VSID overflow */
99 		addr += 0x10000000;	/* address of next segment */
100 	}
101 	isync();	/* Context sync required after mtsrin() */
102 }
103 
104 static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
105 {
106 	u32 addr, end;
107 
108 	if (__builtin_constant_p(to) && to == NULL)
109 		return;
110 
111 	addr = (__force u32)to;
112 
113 	if (!addr || addr >= TASK_SIZE || !size)
114 		return;
115 
116 	end = min(addr + size, TASK_SIZE);
117 	current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
118 	kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end);	/* Clear Ks */
119 }
120 
121 static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
122 {
123 	u32 addr = (__force u32)to;
124 	u32 end = min(addr + size, TASK_SIZE);
125 
126 	if (!addr || addr >= TASK_SIZE || !size)
127 		return;
128 
129 	current->thread.kuap = 0;
130 	kuap_update_sr(mfsrin(addr) | SR_KS, addr, end);	/* set Ks */
131 }
132 
133 static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
134 {
135 	if (!is_write)
136 		return false;
137 
138 	return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
139 }
140 
141 #endif /* CONFIG_PPC_KUAP */
142 
143 #endif /* __ASSEMBLY__ */
144 
145 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
146