1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * PowerPC Memory Protection Keys management 4 * 5 * Copyright 2017, Ram Pai, IBM Corporation. 6 */ 7 8 #ifndef _ASM_POWERPC_KEYS_H 9 #define _ASM_POWERPC_KEYS_H 10 11 #include <linux/jump_label.h> 12 #include <asm/firmware.h> 13 14 DECLARE_STATIC_KEY_FALSE(pkey_disabled); 15 extern int num_pkey; 16 extern u32 reserved_allocation_mask; /* bits set for reserved keys */ 17 18 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \ 19 VM_PKEY_BIT3 | VM_PKEY_BIT4) 20 21 /* Override any generic PKEY permission defines */ 22 #define PKEY_DISABLE_EXECUTE 0x4 23 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | \ 24 PKEY_DISABLE_WRITE | \ 25 PKEY_DISABLE_EXECUTE) 26 27 #ifdef CONFIG_PPC_BOOK3S_64 28 #include <asm/book3s/64/pkeys.h> 29 #else 30 #error "Not supported" 31 #endif 32 33 34 static inline u64 pkey_to_vmflag_bits(u16 pkey) 35 { 36 return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); 37 } 38 39 static inline int vma_pkey(struct vm_area_struct *vma) 40 { 41 if (static_branch_likely(&pkey_disabled)) 42 return 0; 43 return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT; 44 } 45 46 static inline int arch_max_pkey(void) 47 { 48 return num_pkey; 49 } 50 51 #define pkey_alloc_mask(pkey) (0x1 << pkey) 52 53 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) 54 55 #define __mm_pkey_allocated(mm, pkey) { \ 56 mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \ 57 } 58 59 #define __mm_pkey_free(mm, pkey) { \ 60 mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey); \ 61 } 62 63 #define __mm_pkey_is_allocated(mm, pkey) \ 64 (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey)) 65 66 #define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \ 67 pkey_alloc_mask(pkey)) 68 69 static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) 70 { 71 if (pkey < 0 || pkey >= arch_max_pkey()) 72 return false; 73 74 /* Reserved keys are never allocated. */ 75 if (__mm_pkey_is_reserved(pkey)) 76 return false; 77 78 return __mm_pkey_is_allocated(mm, pkey); 79 } 80 81 /* 82 * Returns a positive, 5-bit key on success, or -1 on failure. 83 * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and 84 * mm_pkey_free(). 85 */ 86 static inline int mm_pkey_alloc(struct mm_struct *mm) 87 { 88 /* 89 * Note: this is the one and only place we make sure that the pkey is 90 * valid as far as the hardware is concerned. The rest of the kernel 91 * trusts that only good, valid pkeys come out of here. 92 */ 93 u32 all_pkeys_mask = (u32)(~(0x0)); 94 int ret; 95 96 if (static_branch_likely(&pkey_disabled)) 97 return -1; 98 99 /* 100 * Are we out of pkeys? We must handle this specially because ffz() 101 * behavior is undefined if there are no zeros. 102 */ 103 if (mm_pkey_allocation_map(mm) == all_pkeys_mask) 104 return -1; 105 106 ret = ffz((u32)mm_pkey_allocation_map(mm)); 107 __mm_pkey_allocated(mm, ret); 108 109 return ret; 110 } 111 112 static inline int mm_pkey_free(struct mm_struct *mm, int pkey) 113 { 114 if (static_branch_likely(&pkey_disabled)) 115 return -1; 116 117 if (!mm_pkey_is_allocated(mm, pkey)) 118 return -EINVAL; 119 120 __mm_pkey_free(mm, pkey); 121 122 return 0; 123 } 124 125 /* 126 * Try to dedicate one of the protection keys to be used as an 127 * execute-only protection key. 128 */ 129 extern int __execute_only_pkey(struct mm_struct *mm); 130 static inline int execute_only_pkey(struct mm_struct *mm) 131 { 132 if (static_branch_likely(&pkey_disabled)) 133 return -1; 134 135 return __execute_only_pkey(mm); 136 } 137 138 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, 139 int prot, int pkey); 140 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, 141 int prot, int pkey) 142 { 143 if (static_branch_likely(&pkey_disabled)) 144 return 0; 145 146 /* 147 * Is this an mprotect_pkey() call? If so, never override the value that 148 * came from the user. 149 */ 150 if (pkey != -1) 151 return pkey; 152 153 return __arch_override_mprotect_pkey(vma, prot, pkey); 154 } 155 156 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 157 unsigned long init_val); 158 static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 159 unsigned long init_val) 160 { 161 if (static_branch_likely(&pkey_disabled)) 162 return -EINVAL; 163 164 /* 165 * userspace should not change pkey-0 permissions. 166 * pkey-0 is associated with every page in the kernel. 167 * If userspace denies any permission on pkey-0, the 168 * kernel cannot operate. 169 */ 170 if (pkey == 0) 171 return init_val ? -EINVAL : 0; 172 173 return __arch_set_user_pkey_access(tsk, pkey, init_val); 174 } 175 176 static inline bool arch_pkeys_enabled(void) 177 { 178 return !static_branch_likely(&pkey_disabled); 179 } 180 181 extern void pkey_mm_init(struct mm_struct *mm); 182 extern bool arch_supports_pkeys(int cap); 183 extern unsigned int arch_usable_pkeys(void); 184 extern void thread_pkey_regs_save(struct thread_struct *thread); 185 extern void thread_pkey_regs_restore(struct thread_struct *new_thread, 186 struct thread_struct *old_thread); 187 extern void thread_pkey_regs_init(struct thread_struct *thread); 188 #endif /*_ASM_POWERPC_KEYS_H */ 189