1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_PKEYS_H 3 #define _ASM_X86_PKEYS_H 4 5 #define ARCH_DEFAULT_PKEY 0 6 7 /* 8 * If more than 16 keys are ever supported, a thorough audit 9 * will be necessary to ensure that the types that store key 10 * numbers and masks have sufficient capacity. 11 */ 12 #define arch_max_pkey() (cpu_feature_enabled(X86_FEATURE_OSPKE) ? 16 : 1) 13 14 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 15 unsigned long init_val); 16 17 static inline bool arch_pkeys_enabled(void) 18 { 19 return cpu_feature_enabled(X86_FEATURE_OSPKE); 20 } 21 22 /* 23 * Try to dedicate one of the protection keys to be used as an 24 * execute-only protection key. 25 */ 26 extern int __execute_only_pkey(struct mm_struct *mm); 27 static inline int execute_only_pkey(struct mm_struct *mm) 28 { 29 if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) 30 return ARCH_DEFAULT_PKEY; 31 32 return __execute_only_pkey(mm); 33 } 34 35 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, 36 int prot, int pkey); 37 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, 38 int prot, int pkey) 39 { 40 if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) 41 return 0; 42 43 return __arch_override_mprotect_pkey(vma, prot, pkey); 44 } 45 46 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 47 unsigned long init_val); 48 49 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3) 50 51 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) 52 #define mm_set_pkey_allocated(mm, pkey) do { \ 53 mm_pkey_allocation_map(mm) |= (1U << pkey); \ 54 } while (0) 55 #define mm_set_pkey_free(mm, pkey) do { \ 56 mm_pkey_allocation_map(mm) &= ~(1U << pkey); \ 57 } while (0) 58 59 static inline 60 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) 61 { 62 /* 63 * "Allocated" pkeys are those that have been returned 64 * from pkey_alloc() or pkey 0 which is allocated 65 * implicitly when the mm is created. 66 */ 67 if (pkey < 0) 68 return false; 69 if (pkey >= arch_max_pkey()) 70 return false; 71 /* 72 * The exec-only pkey is set in the allocation map, but 73 * is not available to any of the user interfaces like 74 * mprotect_pkey(). 75 */ 76 if (pkey == mm->context.execute_only_pkey) 77 return false; 78 79 return mm_pkey_allocation_map(mm) & (1U << pkey); 80 } 81 82 /* 83 * Returns a positive, 4-bit key on success, or -1 on failure. 84 */ 85 static inline 86 int mm_pkey_alloc(struct mm_struct *mm) 87 { 88 /* 89 * Note: this is the one and only place we make sure 90 * that the pkey is valid as far as the hardware is 91 * concerned. The rest of the kernel trusts that 92 * only good, valid pkeys come out of here. 93 */ 94 u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1); 95 int ret; 96 97 /* 98 * Are we out of pkeys? We must handle this specially 99 * because ffz() behavior is undefined if there are no 100 * zeros. 101 */ 102 if (mm_pkey_allocation_map(mm) == all_pkeys_mask) 103 return -1; 104 105 ret = ffz(mm_pkey_allocation_map(mm)); 106 107 mm_set_pkey_allocated(mm, ret); 108 109 return ret; 110 } 111 112 static inline 113 int mm_pkey_free(struct mm_struct *mm, int pkey) 114 { 115 if (!mm_pkey_is_allocated(mm, pkey)) 116 return -EINVAL; 117 118 mm_set_pkey_free(mm, pkey); 119 120 return 0; 121 } 122 123 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 124 unsigned long init_val); 125 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, 126 unsigned long init_val); 127 128 static inline int vma_pkey(struct vm_area_struct *vma) 129 { 130 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | 131 VM_PKEY_BIT2 | VM_PKEY_BIT3; 132 133 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; 134 } 135 136 #endif /*_ASM_X86_PKEYS_H */ 137