1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_32_KUP_H 3 #define _ASM_POWERPC_BOOK3S_32_KUP_H 4 5 #include <asm/bug.h> 6 #include <asm/book3s/32/mmu-hash.h> 7 8 #ifndef __ASSEMBLY__ 9 10 #ifdef CONFIG_PPC_KUAP 11 12 #include <linux/sched.h> 13 14 static inline void kuap_update_sr(u32 sr, u32 addr, u32 end) 15 { 16 addr &= 0xf0000000; /* align addr to start of segment */ 17 barrier(); /* make sure thread.kuap is updated before playing with SRs */ 18 while (addr < end) { 19 mtsr(sr, addr); 20 sr += 0x111; /* next VSID */ 21 sr &= 0xf0ffffff; /* clear VSID overflow */ 22 addr += 0x10000000; /* address of next segment */ 23 } 24 isync(); /* Context sync required after mtsr() */ 25 } 26 27 static inline void kuap_save_and_lock(struct pt_regs *regs) 28 { 29 unsigned long kuap = current->thread.kuap; 30 u32 addr = kuap & 0xf0000000; 31 u32 end = kuap << 28; 32 33 regs->kuap = kuap; 34 if (unlikely(!kuap)) 35 return; 36 37 current->thread.kuap = 0; 38 kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */ 39 } 40 41 static inline void kuap_user_restore(struct pt_regs *regs) 42 { 43 } 44 45 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) 46 { 47 u32 addr = regs->kuap & 0xf0000000; 48 u32 end = regs->kuap << 28; 49 50 current->thread.kuap = regs->kuap; 51 52 if (unlikely(regs->kuap == kuap)) 53 return; 54 55 kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */ 56 } 57 58 static inline unsigned long kuap_get_and_assert_locked(void) 59 { 60 unsigned long kuap = current->thread.kuap; 61 62 WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0); 63 64 return kuap; 65 } 66 67 static inline void kuap_assert_locked(void) 68 { 69 kuap_get_and_assert_locked(); 70 } 71 72 static __always_inline void allow_user_access(void __user *to, const void __user *from, 73 u32 size, unsigned long dir) 74 { 75 u32 addr, end; 76 77 BUILD_BUG_ON(!__builtin_constant_p(dir)); 78 BUILD_BUG_ON(dir & ~KUAP_READ_WRITE); 79 80 if (!(dir & KUAP_WRITE)) 81 return; 82 83 addr = (__force u32)to; 84 85 if (unlikely(addr >= TASK_SIZE || !size)) 86 return; 87 88 end = min(addr + size, TASK_SIZE); 89 90 current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf); 91 kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */ 92 } 93 94 static __always_inline void prevent_user_access(void __user *to, const void __user *from, 95 u32 size, unsigned long dir) 96 { 97 u32 addr, end; 98 99 BUILD_BUG_ON(!__builtin_constant_p(dir)); 100 101 if (dir & KUAP_CURRENT_WRITE) { 102 u32 kuap = current->thread.kuap; 103 104 if (unlikely(!kuap)) 105 return; 106 107 addr = kuap & 0xf0000000; 108 end = kuap << 28; 109 } else if (dir & KUAP_WRITE) { 110 addr = (__force u32)to; 111 end = min(addr + size, TASK_SIZE); 112 113 if (unlikely(addr >= TASK_SIZE || !size)) 114 return; 115 } else { 116 return; 117 } 118 119 current->thread.kuap = 0; 120 kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */ 121 } 122 123 static inline unsigned long prevent_user_access_return(void) 124 { 125 unsigned long flags = current->thread.kuap; 126 unsigned long addr = flags & 0xf0000000; 127 unsigned long end = flags << 28; 128 void __user *to = (__force void __user *)addr; 129 130 if (flags) 131 prevent_user_access(to, to, end - addr, KUAP_READ_WRITE); 132 133 return flags; 134 } 135 136 static inline void restore_user_access(unsigned long flags) 137 { 138 unsigned long addr = flags & 0xf0000000; 139 unsigned long end = flags << 28; 140 void __user *to = (__force void __user *)addr; 141 142 if (flags) 143 allow_user_access(to, to, end - addr, KUAP_READ_WRITE); 144 } 145 146 static inline bool 147 bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) 148 { 149 unsigned long begin = regs->kuap & 0xf0000000; 150 unsigned long end = regs->kuap << 28; 151 152 return is_write && (address < begin || address >= end); 153 } 154 155 #endif /* CONFIG_PPC_KUAP */ 156 157 #endif /* __ASSEMBLY__ */ 158 159 #endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */ 160