1 /* 2 * Yama Linux Security Module 3 * 4 * Author: Kees Cook <keescook@chromium.org> 5 * 6 * Copyright (C) 2010 Canonical, Ltd. 7 * Copyright (C) 2011 The Chromium OS Authors. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2, as 11 * published by the Free Software Foundation. 12 * 13 */ 14 15 #include <linux/security.h> 16 #include <linux/sysctl.h> 17 #include <linux/ptrace.h> 18 #include <linux/prctl.h> 19 #include <linux/ratelimit.h> 20 21 #define YAMA_SCOPE_DISABLED 0 22 #define YAMA_SCOPE_RELATIONAL 1 23 #define YAMA_SCOPE_CAPABILITY 2 24 #define YAMA_SCOPE_NO_ATTACH 3 25 26 static int ptrace_scope = YAMA_SCOPE_RELATIONAL; 27 28 /* describe a ptrace relationship for potential exception */ 29 struct ptrace_relation { 30 struct task_struct *tracer; 31 struct task_struct *tracee; 32 struct list_head node; 33 }; 34 35 static LIST_HEAD(ptracer_relations); 36 static DEFINE_SPINLOCK(ptracer_relations_lock); 37 38 /** 39 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair 40 * @tracer: the task_struct of the process doing the ptrace 41 * @tracee: the task_struct of the process to be ptraced 42 * 43 * Each tracee can have, at most, one tracer registered. Each time this 44 * is called, the prior registered tracer will be replaced for the tracee. 45 * 46 * Returns 0 if relationship was added, -ve on error. 47 */ 48 static int yama_ptracer_add(struct task_struct *tracer, 49 struct task_struct *tracee) 50 { 51 int rc = 0; 52 struct ptrace_relation *added; 53 struct ptrace_relation *entry, *relation = NULL; 54 55 added = kmalloc(sizeof(*added), GFP_KERNEL); 56 if (!added) 57 return -ENOMEM; 58 59 spin_lock_bh(&ptracer_relations_lock); 60 list_for_each_entry(entry, &ptracer_relations, node) 61 if (entry->tracee == tracee) { 62 relation = entry; 63 break; 64 } 65 if (!relation) { 66 relation = added; 67 relation->tracee = tracee; 68 list_add(&relation->node, &ptracer_relations); 69 } 70 relation->tracer = tracer; 71 72 spin_unlock_bh(&ptracer_relations_lock); 73 if (added != relation) 74 kfree(added); 75 76 return rc; 77 } 78 79 /** 80 * yama_ptracer_del - remove exceptions related to the given tasks 81 * @tracer: remove any relation where tracer task matches 82 * @tracee: remove any relation where tracee task matches 83 */ 84 static void yama_ptracer_del(struct task_struct *tracer, 85 struct task_struct *tracee) 86 { 87 struct ptrace_relation *relation, *safe; 88 89 spin_lock_bh(&ptracer_relations_lock); 90 list_for_each_entry_safe(relation, safe, &ptracer_relations, node) 91 if (relation->tracee == tracee || 92 (tracer && relation->tracer == tracer)) { 93 list_del(&relation->node); 94 kfree(relation); 95 } 96 spin_unlock_bh(&ptracer_relations_lock); 97 } 98 99 /** 100 * yama_task_free - check for task_pid to remove from exception list 101 * @task: task being removed 102 */ 103 static void yama_task_free(struct task_struct *task) 104 { 105 yama_ptracer_del(task, task); 106 } 107 108 /** 109 * yama_task_prctl - check for Yama-specific prctl operations 110 * @option: operation 111 * @arg2: argument 112 * @arg3: argument 113 * @arg4: argument 114 * @arg5: argument 115 * 116 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama 117 * does not handle the given option. 118 */ 119 static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, 120 unsigned long arg4, unsigned long arg5) 121 { 122 int rc; 123 struct task_struct *myself = current; 124 125 rc = cap_task_prctl(option, arg2, arg3, arg4, arg5); 126 if (rc != -ENOSYS) 127 return rc; 128 129 switch (option) { 130 case PR_SET_PTRACER: 131 /* Since a thread can call prctl(), find the group leader 132 * before calling _add() or _del() on it, since we want 133 * process-level granularity of control. The tracer group 134 * leader checking is handled later when walking the ancestry 135 * at the time of PTRACE_ATTACH check. 136 */ 137 rcu_read_lock(); 138 if (!thread_group_leader(myself)) 139 myself = rcu_dereference(myself->group_leader); 140 get_task_struct(myself); 141 rcu_read_unlock(); 142 143 if (arg2 == 0) { 144 yama_ptracer_del(NULL, myself); 145 rc = 0; 146 } else if (arg2 == PR_SET_PTRACER_ANY) { 147 rc = yama_ptracer_add(NULL, myself); 148 } else { 149 struct task_struct *tracer; 150 151 rcu_read_lock(); 152 tracer = find_task_by_vpid(arg2); 153 if (tracer) 154 get_task_struct(tracer); 155 else 156 rc = -EINVAL; 157 rcu_read_unlock(); 158 159 if (tracer) { 160 rc = yama_ptracer_add(tracer, myself); 161 put_task_struct(tracer); 162 } 163 } 164 165 put_task_struct(myself); 166 break; 167 } 168 169 return rc; 170 } 171 172 /** 173 * task_is_descendant - walk up a process family tree looking for a match 174 * @parent: the process to compare against while walking up from child 175 * @child: the process to start from while looking upwards for parent 176 * 177 * Returns 1 if child is a descendant of parent, 0 if not. 178 */ 179 static int task_is_descendant(struct task_struct *parent, 180 struct task_struct *child) 181 { 182 int rc = 0; 183 struct task_struct *walker = child; 184 185 if (!parent || !child) 186 return 0; 187 188 rcu_read_lock(); 189 if (!thread_group_leader(parent)) 190 parent = rcu_dereference(parent->group_leader); 191 while (walker->pid > 0) { 192 if (!thread_group_leader(walker)) 193 walker = rcu_dereference(walker->group_leader); 194 if (walker == parent) { 195 rc = 1; 196 break; 197 } 198 walker = rcu_dereference(walker->real_parent); 199 } 200 rcu_read_unlock(); 201 202 return rc; 203 } 204 205 /** 206 * ptracer_exception_found - tracer registered as exception for this tracee 207 * @tracer: the task_struct of the process attempting ptrace 208 * @tracee: the task_struct of the process to be ptraced 209 * 210 * Returns 1 if tracer has is ptracer exception ancestor for tracee. 211 */ 212 static int ptracer_exception_found(struct task_struct *tracer, 213 struct task_struct *tracee) 214 { 215 int rc = 0; 216 struct ptrace_relation *relation; 217 struct task_struct *parent = NULL; 218 bool found = false; 219 220 spin_lock_bh(&ptracer_relations_lock); 221 rcu_read_lock(); 222 if (!thread_group_leader(tracee)) 223 tracee = rcu_dereference(tracee->group_leader); 224 list_for_each_entry(relation, &ptracer_relations, node) 225 if (relation->tracee == tracee) { 226 parent = relation->tracer; 227 found = true; 228 break; 229 } 230 231 if (found && (parent == NULL || task_is_descendant(parent, tracer))) 232 rc = 1; 233 rcu_read_unlock(); 234 spin_unlock_bh(&ptracer_relations_lock); 235 236 return rc; 237 } 238 239 /** 240 * yama_ptrace_access_check - validate PTRACE_ATTACH calls 241 * @child: task that current task is attempting to ptrace 242 * @mode: ptrace attach mode 243 * 244 * Returns 0 if following the ptrace is allowed, -ve on error. 245 */ 246 static int yama_ptrace_access_check(struct task_struct *child, 247 unsigned int mode) 248 { 249 int rc; 250 251 /* If standard caps disallows it, so does Yama. We should 252 * only tighten restrictions further. 253 */ 254 rc = cap_ptrace_access_check(child, mode); 255 if (rc) 256 return rc; 257 258 /* require ptrace target be a child of ptracer on attach */ 259 if (mode == PTRACE_MODE_ATTACH) { 260 switch (ptrace_scope) { 261 case YAMA_SCOPE_DISABLED: 262 /* No additional restrictions. */ 263 break; 264 case YAMA_SCOPE_RELATIONAL: 265 if (!task_is_descendant(current, child) && 266 !ptracer_exception_found(current, child) && 267 !ns_capable(task_user_ns(child), CAP_SYS_PTRACE)) 268 rc = -EPERM; 269 break; 270 case YAMA_SCOPE_CAPABILITY: 271 if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE)) 272 rc = -EPERM; 273 break; 274 case YAMA_SCOPE_NO_ATTACH: 275 default: 276 rc = -EPERM; 277 break; 278 } 279 } 280 281 if (rc) { 282 char name[sizeof(current->comm)]; 283 printk_ratelimited(KERN_NOTICE 284 "ptrace of pid %d was attempted by: %s (pid %d)\n", 285 child->pid, 286 get_task_comm(name, current), 287 current->pid); 288 } 289 290 return rc; 291 } 292 293 static struct security_operations yama_ops = { 294 .name = "yama", 295 296 .ptrace_access_check = yama_ptrace_access_check, 297 .task_prctl = yama_task_prctl, 298 .task_free = yama_task_free, 299 }; 300 301 #ifdef CONFIG_SYSCTL 302 static int yama_dointvec_minmax(struct ctl_table *table, int write, 303 void __user *buffer, size_t *lenp, loff_t *ppos) 304 { 305 int rc; 306 307 if (write && !capable(CAP_SYS_PTRACE)) 308 return -EPERM; 309 310 rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 311 if (rc) 312 return rc; 313 314 /* Lock the max value if it ever gets set. */ 315 if (write && *(int *)table->data == *(int *)table->extra2) 316 table->extra1 = table->extra2; 317 318 return rc; 319 } 320 321 static int zero; 322 static int max_scope = YAMA_SCOPE_NO_ATTACH; 323 324 struct ctl_path yama_sysctl_path[] = { 325 { .procname = "kernel", }, 326 { .procname = "yama", }, 327 { } 328 }; 329 330 static struct ctl_table yama_sysctl_table[] = { 331 { 332 .procname = "ptrace_scope", 333 .data = &ptrace_scope, 334 .maxlen = sizeof(int), 335 .mode = 0644, 336 .proc_handler = yama_dointvec_minmax, 337 .extra1 = &zero, 338 .extra2 = &max_scope, 339 }, 340 { } 341 }; 342 #endif /* CONFIG_SYSCTL */ 343 344 static __init int yama_init(void) 345 { 346 if (!security_module_enable(&yama_ops)) 347 return 0; 348 349 printk(KERN_INFO "Yama: becoming mindful.\n"); 350 351 if (register_security(&yama_ops)) 352 panic("Yama: kernel registration failed.\n"); 353 354 #ifdef CONFIG_SYSCTL 355 if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table)) 356 panic("Yama: sysctl registration failed.\n"); 357 #endif 358 359 return 0; 360 } 361 362 security_initcall(yama_init); 363