1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 4 #include <vmlinux.h> 5 #include <bpf/bpf_tracing.h> 6 #include <bpf/bpf_helpers.h> 7 8 #include "task_kfunc_common.h" 9 10 char _license[] SEC("license") = "GPL"; 11 12 /* Prototype for all of the program trace events below: 13 * 14 * TRACE_EVENT(task_newtask, 15 * TP_PROTO(struct task_struct *p, u64 clone_flags) 16 */ 17 18 static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task) 19 { 20 int status; 21 22 status = tasks_kfunc_map_insert(task); 23 if (status) 24 return NULL; 25 26 return tasks_kfunc_map_value_lookup(task); 27 } 28 29 SEC("tp_btf/task_newtask") 30 int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) 31 { 32 struct task_struct *acquired; 33 struct __tasks_kfunc_map_value *v; 34 35 v = insert_lookup_task(task); 36 if (!v) 37 return 0; 38 39 /* Can't invoke bpf_task_acquire() on an untrusted pointer. */ 40 acquired = bpf_task_acquire(v->task); 41 bpf_task_release(acquired); 42 43 return 0; 44 } 45 46 SEC("tp_btf/task_newtask") 47 int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) 48 { 49 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; 50 51 /* Can't invoke bpf_task_acquire() on a random frame pointer. */ 52 acquired = bpf_task_acquire((struct task_struct *)&stack_task); 53 bpf_task_release(acquired); 54 55 return 0; 56 } 57 58 SEC("kretprobe/free_task") 59 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) 60 { 61 struct task_struct *acquired; 62 63 acquired = bpf_task_acquire(task); 64 /* Can't release a bpf_task_acquire()'d task without a NULL check. */ 65 bpf_task_release(acquired); 66 67 return 0; 68 } 69 70 SEC("tp_btf/task_newtask") 71 int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) 72 { 73 struct task_struct *acquired; 74 75 /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */ 76 acquired = bpf_task_acquire(task->group_leader); 77 bpf_task_release(acquired); 78 79 return 0; 80 } 81 82 83 SEC("tp_btf/task_newtask") 84 int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) 85 { 86 struct task_struct *acquired; 87 88 /* Can't invoke bpf_task_acquire() on a NULL pointer. */ 89 acquired = bpf_task_acquire(NULL); 90 if (!acquired) 91 return 0; 92 bpf_task_release(acquired); 93 94 return 0; 95 } 96 97 SEC("tp_btf/task_newtask") 98 int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags) 99 { 100 struct task_struct *acquired; 101 102 acquired = bpf_task_acquire(task); 103 104 /* Acquired task is never released. */ 105 106 return 0; 107 } 108 109 SEC("tp_btf/task_newtask") 110 int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags) 111 { 112 struct task_struct *kptr; 113 114 /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */ 115 kptr = bpf_task_kptr_get(&task); 116 if (!kptr) 117 return 0; 118 119 bpf_task_release(kptr); 120 121 return 0; 122 } 123 124 SEC("tp_btf/task_newtask") 125 int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags) 126 { 127 struct task_struct *kptr, *acquired; 128 129 acquired = bpf_task_acquire(task); 130 131 /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */ 132 kptr = bpf_task_kptr_get(&acquired); 133 bpf_task_release(acquired); 134 if (!kptr) 135 return 0; 136 137 bpf_task_release(kptr); 138 139 return 0; 140 } 141 142 SEC("tp_btf/task_newtask") 143 int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) 144 { 145 struct task_struct *kptr; 146 147 /* Cannot use bpf_task_kptr_get() on a NULL pointer. */ 148 kptr = bpf_task_kptr_get(NULL); 149 if (!kptr) 150 return 0; 151 152 bpf_task_release(kptr); 153 154 return 0; 155 } 156 157 SEC("tp_btf/task_newtask") 158 int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags) 159 { 160 struct task_struct *kptr; 161 struct __tasks_kfunc_map_value *v; 162 163 v = insert_lookup_task(task); 164 if (!v) 165 return 0; 166 167 kptr = bpf_kptr_xchg(&v->task, NULL); 168 if (!kptr) 169 return 0; 170 171 /* Kptr retrieved from map is never released. */ 172 173 return 0; 174 } 175 176 SEC("tp_btf/task_newtask") 177 int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) 178 { 179 struct task_struct *kptr; 180 struct __tasks_kfunc_map_value *v; 181 182 v = insert_lookup_task(task); 183 if (!v) 184 return 0; 185 186 kptr = bpf_task_kptr_get(&v->task); 187 if (!kptr) 188 return 0; 189 190 /* Kptr acquired above is never released. */ 191 192 return 0; 193 } 194 195 SEC("tp_btf/task_newtask") 196 int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) 197 { 198 struct __tasks_kfunc_map_value *v; 199 200 v = insert_lookup_task(task); 201 if (!v) 202 return 0; 203 204 /* Can't invoke bpf_task_release() on an untrusted pointer. */ 205 bpf_task_release(v->task); 206 207 return 0; 208 } 209 210 SEC("tp_btf/task_newtask") 211 int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) 212 { 213 struct task_struct *acquired = (struct task_struct *)&clone_flags; 214 215 /* Cannot release random frame pointer. */ 216 bpf_task_release(acquired); 217 218 return 0; 219 } 220 221 SEC("tp_btf/task_newtask") 222 int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) 223 { 224 struct __tasks_kfunc_map_value local, *v; 225 long status; 226 struct task_struct *acquired, *old; 227 s32 pid; 228 229 status = bpf_probe_read_kernel(&pid, sizeof(pid), &task->pid); 230 if (status) 231 return 0; 232 233 local.task = NULL; 234 status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST); 235 if (status) 236 return status; 237 238 v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); 239 if (!v) 240 return -ENOENT; 241 242 acquired = bpf_task_acquire(task); 243 244 old = bpf_kptr_xchg(&v->task, acquired); 245 246 /* old cannot be passed to bpf_task_release() without a NULL check. */ 247 bpf_task_release(old); 248 bpf_task_release(old); 249 250 return 0; 251 } 252 253 SEC("tp_btf/task_newtask") 254 int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags) 255 { 256 /* Cannot release trusted task pointer which was not acquired. */ 257 bpf_task_release(task); 258 259 return 0; 260 } 261 262 SEC("tp_btf/task_newtask") 263 int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) 264 { 265 struct task_struct *acquired; 266 267 acquired = bpf_task_from_pid(task->pid); 268 269 /* Releasing bpf_task_from_pid() lookup without a NULL check. */ 270 bpf_task_release(acquired); 271 272 return 0; 273 } 274 275 SEC("lsm/task_free") 276 int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) 277 { 278 struct task_struct *acquired; 279 280 /* the argument of lsm task_free hook is untrusted. */ 281 acquired = bpf_task_acquire(task); 282 bpf_task_release(acquired); 283 return 0; 284 } 285