1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/syscalls.h> 4 #include <linux/fdtable.h> 5 #include <linux/string.h> 6 #include <linux/random.h> 7 #include <linux/module.h> 8 #include <linux/ptrace.h> 9 #include <linux/init.h> 10 #include <linux/errno.h> 11 #include <linux/cache.h> 12 #include <linux/bug.h> 13 #include <linux/err.h> 14 #include <linux/kcmp.h> 15 #include <linux/capability.h> 16 #include <linux/list.h> 17 #include <linux/eventpoll.h> 18 #include <linux/file.h> 19 20 #include <asm/unistd.h> 21 22 /* 23 * We don't expose the real in-memory order of objects for security reasons. 24 * But still the comparison results should be suitable for sorting. So we 25 * obfuscate kernel pointers values and compare the production instead. 26 * 27 * The obfuscation is done in two steps. First we xor the kernel pointer with 28 * a random value, which puts pointer into a new position in a reordered space. 29 * Secondly we multiply the xor production with a large odd random number to 30 * permute its bits even more (the odd multiplier guarantees that the product 31 * is unique ever after the high bits are truncated, since any odd number is 32 * relative prime to 2^n). 33 * 34 * Note also that the obfuscation itself is invisible to userspace and if needed 35 * it can be changed to an alternate scheme. 36 */ 37 static unsigned long cookies[KCMP_TYPES][2] __read_mostly; 38 39 static long kptr_obfuscate(long v, int type) 40 { 41 return (v ^ cookies[type][0]) * cookies[type][1]; 42 } 43 44 /* 45 * 0 - equal, i.e. v1 = v2 46 * 1 - less than, i.e. v1 < v2 47 * 2 - greater than, i.e. v1 > v2 48 * 3 - not equal but ordering unavailable (reserved for future) 49 */ 50 static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) 51 { 52 long t1, t2; 53 54 t1 = kptr_obfuscate((long)v1, type); 55 t2 = kptr_obfuscate((long)v2, type); 56 57 return (t1 < t2) | ((t1 > t2) << 1); 58 } 59 60 /* The caller must have pinned the task */ 61 static struct file * 62 get_file_raw_ptr(struct task_struct *task, unsigned int idx) 63 { 64 struct file *file = NULL; 65 66 task_lock(task); 67 rcu_read_lock(); 68 69 if (task->files) 70 file = fcheck_files(task->files, idx); 71 72 rcu_read_unlock(); 73 task_unlock(task); 74 75 return file; 76 } 77 78 static void kcmp_unlock(struct mutex *m1, struct mutex *m2) 79 { 80 if (likely(m2 != m1)) 81 mutex_unlock(m2); 82 mutex_unlock(m1); 83 } 84 85 static int kcmp_lock(struct mutex *m1, struct mutex *m2) 86 { 87 int err; 88 89 if (m2 > m1) 90 swap(m1, m2); 91 92 err = mutex_lock_killable(m1); 93 if (!err && likely(m1 != m2)) { 94 err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING); 95 if (err) 96 mutex_unlock(m1); 97 } 98 99 return err; 100 } 101 102 #ifdef CONFIG_EPOLL 103 static int kcmp_epoll_target(struct task_struct *task1, 104 struct task_struct *task2, 105 unsigned long idx1, 106 struct kcmp_epoll_slot __user *uslot) 107 { 108 struct file *filp, *filp_epoll, *filp_tgt; 109 struct kcmp_epoll_slot slot; 110 111 if (copy_from_user(&slot, uslot, sizeof(slot))) 112 return -EFAULT; 113 114 filp = get_file_raw_ptr(task1, idx1); 115 if (!filp) 116 return -EBADF; 117 118 filp_epoll = fget_task(task2, slot.efd); 119 if (!filp_epoll) 120 return -EBADF; 121 122 filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); 123 fput(filp_epoll); 124 125 if (IS_ERR(filp_tgt)) 126 return PTR_ERR(filp_tgt); 127 128 return kcmp_ptr(filp, filp_tgt, KCMP_FILE); 129 } 130 #else 131 static int kcmp_epoll_target(struct task_struct *task1, 132 struct task_struct *task2, 133 unsigned long idx1, 134 struct kcmp_epoll_slot __user *uslot) 135 { 136 return -EOPNOTSUPP; 137 } 138 #endif 139 140 SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, 141 unsigned long, idx1, unsigned long, idx2) 142 { 143 struct task_struct *task1, *task2; 144 int ret; 145 146 rcu_read_lock(); 147 148 /* 149 * Tasks are looked up in caller's PID namespace only. 150 */ 151 task1 = find_task_by_vpid(pid1); 152 task2 = find_task_by_vpid(pid2); 153 if (!task1 || !task2) 154 goto err_no_task; 155 156 get_task_struct(task1); 157 get_task_struct(task2); 158 159 rcu_read_unlock(); 160 161 /* 162 * One should have enough rights to inspect task details. 163 */ 164 ret = kcmp_lock(&task1->signal->exec_update_mutex, 165 &task2->signal->exec_update_mutex); 166 if (ret) 167 goto err; 168 if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || 169 !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) { 170 ret = -EPERM; 171 goto err_unlock; 172 } 173 174 switch (type) { 175 case KCMP_FILE: { 176 struct file *filp1, *filp2; 177 178 filp1 = get_file_raw_ptr(task1, idx1); 179 filp2 = get_file_raw_ptr(task2, idx2); 180 181 if (filp1 && filp2) 182 ret = kcmp_ptr(filp1, filp2, KCMP_FILE); 183 else 184 ret = -EBADF; 185 break; 186 } 187 case KCMP_VM: 188 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM); 189 break; 190 case KCMP_FILES: 191 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES); 192 break; 193 case KCMP_FS: 194 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS); 195 break; 196 case KCMP_SIGHAND: 197 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND); 198 break; 199 case KCMP_IO: 200 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO); 201 break; 202 case KCMP_SYSVSEM: 203 #ifdef CONFIG_SYSVIPC 204 ret = kcmp_ptr(task1->sysvsem.undo_list, 205 task2->sysvsem.undo_list, 206 KCMP_SYSVSEM); 207 #else 208 ret = -EOPNOTSUPP; 209 #endif 210 break; 211 case KCMP_EPOLL_TFD: 212 ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2); 213 break; 214 default: 215 ret = -EINVAL; 216 break; 217 } 218 219 err_unlock: 220 kcmp_unlock(&task1->signal->exec_update_mutex, 221 &task2->signal->exec_update_mutex); 222 err: 223 put_task_struct(task1); 224 put_task_struct(task2); 225 226 return ret; 227 228 err_no_task: 229 rcu_read_unlock(); 230 return -ESRCH; 231 } 232 233 static __init int kcmp_cookies_init(void) 234 { 235 int i; 236 237 get_random_bytes(cookies, sizeof(cookies)); 238 239 for (i = 0; i < KCMP_TYPES; i++) 240 cookies[i][1] |= (~(~0UL >> 1) | 1); 241 242 return 0; 243 } 244 arch_initcall(kcmp_cookies_init); 245