1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 4 #include <vmlinux.h> 5 #include <bpf/bpf_tracing.h> 6 #include <bpf/bpf_helpers.h> 7 8 #include "task_kfunc_common.h" 9 10 char _license[] SEC("license") = "GPL"; 11 12 int err, pid; 13 14 /* Prototype for all of the program trace events below: 15 * 16 * TRACE_EVENT(task_newtask, 17 * TP_PROTO(struct task_struct *p, u64 clone_flags) 18 */ 19 20 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; 21 void invalid_kfunc(void) __ksym __weak; 22 void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; 23 24 static bool is_test_kfunc_task(void) 25 { 26 int cur_pid = bpf_get_current_pid_tgid() >> 32; 27 28 return pid == cur_pid; 29 } 30 31 static int test_acquire_release(struct task_struct *task) 32 { 33 struct task_struct *acquired = NULL; 34 35 if (!bpf_ksym_exists(bpf_task_acquire)) { 36 err = 3; 37 return 0; 38 } 39 if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) { 40 err = 4; 41 return 0; 42 } 43 if (bpf_ksym_exists(invalid_kfunc)) { 44 /* the verifier's dead code elimination should remove this */ 45 err = 5; 46 asm volatile ("goto -1"); /* for (;;); */ 47 } 48 49 acquired = bpf_task_acquire(task); 50 if (acquired) 51 bpf_task_release(acquired); 52 else 53 err = 6; 54 55 return 0; 56 } 57 58 SEC("tp_btf/task_newtask") 59 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags) 60 { 61 if (!is_test_kfunc_task()) 62 return 0; 63 64 return test_acquire_release(task); 65 } 66 67 SEC("tp_btf/task_newtask") 68 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags) 69 { 70 if (!is_test_kfunc_task()) 71 return 0; 72 73 return test_acquire_release(bpf_get_current_task_btf()); 74 } 75 76 SEC("tp_btf/task_newtask") 77 int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags) 78 { 79 long status; 80 81 if (!is_test_kfunc_task()) 82 return 0; 83 84 status = tasks_kfunc_map_insert(task); 85 if (status) 86 err = 1; 87 88 return 0; 89 } 90 91 SEC("tp_btf/task_newtask") 92 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) 93 { 94 struct task_struct *kptr; 95 struct __tasks_kfunc_map_value *v; 96 long status; 97 98 if (!is_test_kfunc_task()) 99 return 0; 100 101 status = tasks_kfunc_map_insert(task); 102 if (status) { 103 err = 1; 104 return 0; 105 } 106 107 v = tasks_kfunc_map_value_lookup(task); 108 if (!v) { 109 err = 2; 110 return 0; 111 } 112 113 kptr = bpf_kptr_xchg(&v->task, NULL); 114 if (!kptr) { 115 err = 3; 116 return 0; 117 } 118 119 bpf_task_release(kptr); 120 121 return 0; 122 } 123 124 SEC("tp_btf/task_newtask") 125 int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags) 126 { 127 struct task_struct *kptr; 128 struct __tasks_kfunc_map_value *v; 129 long status; 130 131 if (!is_test_kfunc_task()) 132 return 0; 133 134 status = tasks_kfunc_map_insert(task); 135 if (status) { 136 err = 1; 137 return 0; 138 } 139 140 v = tasks_kfunc_map_value_lookup(task); 141 if (!v) { 142 err = 2; 143 return 0; 144 } 145 146 bpf_rcu_read_lock(); 147 kptr = v->task; 148 if (!kptr) { 149 err = 3; 150 } else { 151 kptr = bpf_task_acquire(kptr); 152 if (!kptr) 153 err = 4; 154 else 155 bpf_task_release(kptr); 156 } 157 bpf_rcu_read_unlock(); 158 159 return 0; 160 } 161 162 SEC("tp_btf/task_newtask") 163 int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags) 164 { 165 struct task_struct *current, *acquired; 166 167 if (!is_test_kfunc_task()) 168 return 0; 169 170 current = bpf_get_current_task_btf(); 171 acquired = bpf_task_acquire(current); 172 if (acquired) 173 bpf_task_release(acquired); 174 else 175 err = 1; 176 177 return 0; 178 } 179 180 static void lookup_compare_pid(const struct task_struct *p) 181 { 182 struct task_struct *acquired; 183 184 acquired = bpf_task_from_pid(p->pid); 185 if (!acquired) { 186 err = 1; 187 return; 188 } 189 190 if (acquired->pid != p->pid) 191 err = 2; 192 bpf_task_release(acquired); 193 } 194 195 SEC("tp_btf/task_newtask") 196 int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) 197 { 198 if (!is_test_kfunc_task()) 199 return 0; 200 201 lookup_compare_pid(task); 202 return 0; 203 } 204 205 SEC("tp_btf/task_newtask") 206 int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags) 207 { 208 if (!is_test_kfunc_task()) 209 return 0; 210 211 lookup_compare_pid(bpf_get_current_task_btf()); 212 return 0; 213 } 214 215 static int is_pid_lookup_valid(s32 pid) 216 { 217 struct task_struct *acquired; 218 219 acquired = bpf_task_from_pid(pid); 220 if (acquired) { 221 bpf_task_release(acquired); 222 return 1; 223 } 224 225 return 0; 226 } 227 228 SEC("tp_btf/task_newtask") 229 int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags) 230 { 231 if (!is_test_kfunc_task()) 232 return 0; 233 234 bpf_strncmp(task->comm, 12, "foo"); 235 bpf_strncmp(task->comm, 16, "foo"); 236 bpf_strncmp(&task->comm[8], 4, "foo"); 237 238 if (is_pid_lookup_valid(-1)) { 239 err = 1; 240 return 0; 241 } 242 243 if (is_pid_lookup_valid(0xcafef00d)) { 244 err = 2; 245 return 0; 246 } 247 248 return 0; 249 } 250 251 SEC("tp_btf/task_newtask") 252 int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) 253 { 254 struct task_struct *acquired; 255 256 /* task->group_leader is listed as a trusted, non-NULL field of task struct. */ 257 acquired = bpf_task_acquire(task->group_leader); 258 if (acquired) 259 bpf_task_release(acquired); 260 else 261 err = 1; 262 263 264 return 0; 265 } 266