1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <vmlinux.h>
4 #include <bpf/bpf_core_read.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 
8 #include "profiler.h"
9 
10 #ifndef NULL
11 #define NULL 0
12 #endif
13 
14 #define O_WRONLY 00000001
15 #define O_RDWR 00000002
16 #define O_DIRECTORY 00200000
17 #define __O_TMPFILE 020000000
18 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
19 #define MAX_ERRNO 4095
20 #define S_IFMT 00170000
21 #define S_IFSOCK 0140000
22 #define S_IFLNK 0120000
23 #define S_IFREG 0100000
24 #define S_IFBLK 0060000
25 #define S_IFDIR 0040000
26 #define S_IFCHR 0020000
27 #define S_IFIFO 0010000
28 #define S_ISUID 0004000
29 #define S_ISGID 0002000
30 #define S_ISVTX 0001000
31 #define S_ISLNK(m) (((m)&S_IFMT) == S_IFLNK)
32 #define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR)
33 #define S_ISCHR(m) (((m)&S_IFMT) == S_IFCHR)
34 #define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK)
35 #define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO)
36 #define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK)
37 #define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO
38 
39 #define KILL_DATA_ARRAY_SIZE 8
40 
41 struct var_kill_data_arr_t {
42 	struct var_kill_data_t array[KILL_DATA_ARRAY_SIZE];
43 };
44 
45 union any_profiler_data_t {
46 	struct var_exec_data_t var_exec;
47 	struct var_kill_data_t var_kill;
48 	struct var_sysctl_data_t var_sysctl;
49 	struct var_filemod_data_t var_filemod;
50 	struct var_fork_data_t var_fork;
51 	struct var_kill_data_arr_t var_kill_data_arr;
52 };
53 
54 volatile struct profiler_config_struct bpf_config = {};
55 
56 #define FETCH_CGROUPS_FROM_BPF (bpf_config.fetch_cgroups_from_bpf)
57 #define CGROUP_FS_INODE (bpf_config.cgroup_fs_inode)
58 #define CGROUP_LOGIN_SESSION_INODE \
59 	(bpf_config.cgroup_login_session_inode)
60 #define KILL_SIGNALS (bpf_config.kill_signals_mask)
61 #define STALE_INFO (bpf_config.stale_info_secs)
62 #define INODE_FILTER (bpf_config.inode_filter)
63 #define READ_ENVIRON_FROM_EXEC (bpf_config.read_environ_from_exec)
64 #define ENABLE_CGROUP_V1_RESOLVER (bpf_config.enable_cgroup_v1_resolver)
65 
66 struct kernfs_iattrs___52 {
67 	struct iattr ia_iattr;
68 };
69 
70 struct kernfs_node___52 {
71 	union /* kernfs_node_id */ {
72 		struct {
73 			u32 ino;
74 			u32 generation;
75 		};
76 		u64 id;
77 	} id;
78 };
79 
80 struct {
81 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
82 	__uint(max_entries, 1);
83 	__type(key, u32);
84 	__type(value, union any_profiler_data_t);
85 } data_heap SEC(".maps");
86 
87 struct {
88 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
89 	__uint(key_size, sizeof(int));
90 	__uint(value_size, sizeof(int));
91 } events SEC(".maps");
92 
93 struct {
94 	__uint(type, BPF_MAP_TYPE_HASH);
95 	__uint(max_entries, KILL_DATA_ARRAY_SIZE);
96 	__type(key, u32);
97 	__type(value, struct var_kill_data_arr_t);
98 } var_tpid_to_data SEC(".maps");
99 
100 struct {
101 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
102 	__uint(max_entries, profiler_bpf_max_function_id);
103 	__type(key, u32);
104 	__type(value, struct bpf_func_stats_data);
105 } bpf_func_stats SEC(".maps");
106 
107 struct {
108 	__uint(type, BPF_MAP_TYPE_HASH);
109 	__type(key, u32);
110 	__type(value, bool);
111 	__uint(max_entries, 16);
112 } allowed_devices SEC(".maps");
113 
114 struct {
115 	__uint(type, BPF_MAP_TYPE_HASH);
116 	__type(key, u64);
117 	__type(value, bool);
118 	__uint(max_entries, 1024);
119 } allowed_file_inodes SEC(".maps");
120 
121 struct {
122 	__uint(type, BPF_MAP_TYPE_HASH);
123 	__type(key, u64);
124 	__type(value, bool);
125 	__uint(max_entries, 1024);
126 } allowed_directory_inodes SEC(".maps");
127 
128 struct {
129 	__uint(type, BPF_MAP_TYPE_HASH);
130 	__type(key, u32);
131 	__type(value, bool);
132 	__uint(max_entries, 16);
133 } disallowed_exec_inodes SEC(".maps");
134 
135 #ifndef ARRAY_SIZE
136 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
137 #endif
138 
139 static INLINE bool IS_ERR(const void* ptr)
140 {
141 	return IS_ERR_VALUE((unsigned long)ptr);
142 }
143 
144 static INLINE u32 get_userspace_pid()
145 {
146 	return bpf_get_current_pid_tgid() >> 32;
147 }
148 
149 static INLINE bool is_init_process(u32 tgid)
150 {
151 	return tgid == 1 || tgid == 0;
152 }
153 
154 static INLINE unsigned long
155 probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
156 {
157 	len = len < max ? len : max;
158 	if (len > 1) {
159 		if (bpf_probe_read(dst, len, src))
160 			return 0;
161 	} else if (len == 1) {
162 		if (bpf_probe_read(dst, 1, src))
163 			return 0;
164 	}
165 	return len;
166 }
167 
168 static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
169 				     int spid)
170 {
171 #ifdef UNROLL
172 #pragma unroll
173 #endif
174 	for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
175 		if (arr_struct->array[i].meta.pid == spid)
176 			return i;
177 	return -1;
178 }
179 
180 static INLINE void populate_ancestors(struct task_struct* task,
181 				      struct ancestors_data_t* ancestors_data)
182 {
183 	struct task_struct* parent = task;
184 	u32 num_ancestors, ppid;
185 
186 	ancestors_data->num_ancestors = 0;
187 #ifdef UNROLL
188 #pragma unroll
189 #endif
190 	for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
191 		parent = BPF_CORE_READ(parent, real_parent);
192 		if (parent == NULL)
193 			break;
194 		ppid = BPF_CORE_READ(parent, tgid);
195 		if (is_init_process(ppid))
196 			break;
197 		ancestors_data->ancestor_pids[num_ancestors] = ppid;
198 		ancestors_data->ancestor_exec_ids[num_ancestors] =
199 			BPF_CORE_READ(parent, self_exec_id);
200 		ancestors_data->ancestor_start_times[num_ancestors] =
201 			BPF_CORE_READ(parent, start_time);
202 		ancestors_data->num_ancestors = num_ancestors;
203 	}
204 }
205 
206 static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
207 					  struct kernfs_node* cgroup_root_node,
208 					  void* payload,
209 					  int* root_pos)
210 {
211 	void* payload_start = payload;
212 	size_t filepart_length;
213 
214 #ifdef UNROLL
215 #pragma unroll
216 #endif
217 	for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
218 		filepart_length =
219 			bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
220 		if (!cgroup_node)
221 			return payload;
222 		if (cgroup_node == cgroup_root_node)
223 			*root_pos = payload - payload_start;
224 		if (filepart_length <= MAX_PATH) {
225 			barrier_var(filepart_length);
226 			payload += filepart_length;
227 		}
228 		cgroup_node = BPF_CORE_READ(cgroup_node, parent);
229 	}
230 	return payload;
231 }
232 
233 static ino_t get_inode_from_kernfs(struct kernfs_node* node)
234 {
235 	struct kernfs_node___52* node52 = (void*)node;
236 
237 	if (bpf_core_field_exists(node52->id.ino)) {
238 		barrier_var(node52);
239 		return BPF_CORE_READ(node52, id.ino);
240 	} else {
241 		barrier_var(node);
242 		return (u64)BPF_CORE_READ(node, id);
243 	}
244 }
245 
246 int pids_cgrp_id = 1;
247 
248 static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
249 					 struct task_struct* task,
250 					 void* payload)
251 {
252 	struct kernfs_node* root_kernfs =
253 		BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
254 	struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
255 
256 	if (ENABLE_CGROUP_V1_RESOLVER) {
257 #ifdef UNROLL
258 #pragma unroll
259 #endif
260 		for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
261 			struct cgroup_subsys_state* subsys =
262 				BPF_CORE_READ(task, cgroups, subsys[i]);
263 			if (subsys != NULL) {
264 				int subsys_id = BPF_CORE_READ(subsys, ss, id);
265 				if (subsys_id == pids_cgrp_id) {
266 					proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn);
267 					root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn);
268 					break;
269 				}
270 			}
271 		}
272 	}
273 
274 	cgroup_data->cgroup_root_inode = get_inode_from_kernfs(root_kernfs);
275 	cgroup_data->cgroup_proc_inode = get_inode_from_kernfs(proc_kernfs);
276 
277 	if (bpf_core_field_exists(root_kernfs->iattr->ia_mtime)) {
278 		cgroup_data->cgroup_root_mtime =
279 			BPF_CORE_READ(root_kernfs, iattr, ia_mtime.tv_nsec);
280 		cgroup_data->cgroup_proc_mtime =
281 			BPF_CORE_READ(proc_kernfs, iattr, ia_mtime.tv_nsec);
282 	} else {
283 		struct kernfs_iattrs___52* root_iattr =
284 			(struct kernfs_iattrs___52*)BPF_CORE_READ(root_kernfs, iattr);
285 		cgroup_data->cgroup_root_mtime =
286 			BPF_CORE_READ(root_iattr, ia_iattr.ia_mtime.tv_nsec);
287 
288 		struct kernfs_iattrs___52* proc_iattr =
289 			(struct kernfs_iattrs___52*)BPF_CORE_READ(proc_kernfs, iattr);
290 		cgroup_data->cgroup_proc_mtime =
291 			BPF_CORE_READ(proc_iattr, ia_iattr.ia_mtime.tv_nsec);
292 	}
293 
294 	cgroup_data->cgroup_root_length = 0;
295 	cgroup_data->cgroup_proc_length = 0;
296 	cgroup_data->cgroup_full_length = 0;
297 
298 	size_t cgroup_root_length =
299 		bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
300 	barrier_var(cgroup_root_length);
301 	if (cgroup_root_length <= MAX_PATH) {
302 		barrier_var(cgroup_root_length);
303 		cgroup_data->cgroup_root_length = cgroup_root_length;
304 		payload += cgroup_root_length;
305 	}
306 
307 	size_t cgroup_proc_length =
308 		bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
309 	barrier_var(cgroup_proc_length);
310 	if (cgroup_proc_length <= MAX_PATH) {
311 		barrier_var(cgroup_proc_length);
312 		cgroup_data->cgroup_proc_length = cgroup_proc_length;
313 		payload += cgroup_proc_length;
314 	}
315 
316 	if (FETCH_CGROUPS_FROM_BPF) {
317 		cgroup_data->cgroup_full_path_root_pos = -1;
318 		void* payload_end_pos = read_full_cgroup_path(proc_kernfs, root_kernfs, payload,
319 							      &cgroup_data->cgroup_full_path_root_pos);
320 		cgroup_data->cgroup_full_length = payload_end_pos - payload;
321 		payload = payload_end_pos;
322 	}
323 
324 	return (void*)payload;
325 }
326 
327 static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
328 					  struct task_struct* task,
329 					  u32 pid, void* payload)
330 {
331 	u64 uid_gid = bpf_get_current_uid_gid();
332 
333 	metadata->uid = (u32)uid_gid;
334 	metadata->gid = uid_gid >> 32;
335 	metadata->pid = pid;
336 	metadata->exec_id = BPF_CORE_READ(task, self_exec_id);
337 	metadata->start_time = BPF_CORE_READ(task, start_time);
338 	metadata->comm_length = 0;
339 
340 	size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
341 	barrier_var(comm_length);
342 	if (comm_length <= TASK_COMM_LEN) {
343 		barrier_var(comm_length);
344 		metadata->comm_length = comm_length;
345 		payload += comm_length;
346 	}
347 
348 	return (void*)payload;
349 }
350 
351 static INLINE struct var_kill_data_t*
352 get_var_kill_data(struct pt_regs* ctx, int spid, int tpid, int sig)
353 {
354 	int zero = 0;
355 	struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
356 
357 	if (kill_data == NULL)
358 		return NULL;
359 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
360 
361 	void* payload = populate_var_metadata(&kill_data->meta, task, spid, kill_data->payload);
362 	payload = populate_cgroup_info(&kill_data->cgroup_data, task, payload);
363 	size_t payload_length = payload - (void*)kill_data->payload;
364 	kill_data->payload_length = payload_length;
365 	populate_ancestors(task, &kill_data->ancestors_info);
366 	kill_data->meta.type = KILL_EVENT;
367 	kill_data->kill_target_pid = tpid;
368 	kill_data->kill_sig = sig;
369 	kill_data->kill_count = 1;
370 	kill_data->last_kill_time = bpf_ktime_get_ns();
371 	return kill_data;
372 }
373 
374 static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
375 {
376 	if ((KILL_SIGNALS & (1ULL << sig)) == 0)
377 		return 0;
378 
379 	u32 spid = get_userspace_pid();
380 	struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
381 
382 	if (arr_struct == NULL) {
383 		struct var_kill_data_t* kill_data = get_var_kill_data(ctx, spid, tpid, sig);
384 		int zero = 0;
385 
386 		if (kill_data == NULL)
387 			return 0;
388 		arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
389 		if (arr_struct == NULL)
390 			return 0;
391 		bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
392 	} else {
393 		int index = get_var_spid_index(arr_struct, spid);
394 
395 		if (index == -1) {
396 			struct var_kill_data_t* kill_data =
397 				get_var_kill_data(ctx, spid, tpid, sig);
398 			if (kill_data == NULL)
399 				return 0;
400 #ifdef UNROLL
401 #pragma unroll
402 #endif
403 			for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
404 				if (arr_struct->array[i].meta.pid == 0) {
405 					bpf_probe_read(&arr_struct->array[i],
406 						       sizeof(arr_struct->array[i]), kill_data);
407 					bpf_map_update_elem(&var_tpid_to_data, &tpid,
408 							    arr_struct, 0);
409 
410 					return 0;
411 				}
412 			return 0;
413 		}
414 
415 		struct var_kill_data_t* kill_data = &arr_struct->array[index];
416 
417 		u64 delta_sec =
418 			(bpf_ktime_get_ns() - kill_data->last_kill_time) / 1000000000;
419 
420 		if (delta_sec < STALE_INFO) {
421 			kill_data->kill_count++;
422 			kill_data->last_kill_time = bpf_ktime_get_ns();
423 			bpf_probe_read(&arr_struct->array[index],
424 				       sizeof(arr_struct->array[index]),
425 				       kill_data);
426 		} else {
427 			struct var_kill_data_t* kill_data =
428 				get_var_kill_data(ctx, spid, tpid, sig);
429 			if (kill_data == NULL)
430 				return 0;
431 			bpf_probe_read(&arr_struct->array[index],
432 				       sizeof(arr_struct->array[index]),
433 				       kill_data);
434 		}
435 	}
436 	bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
437 	return 0;
438 }
439 
440 static INLINE void bpf_stats_enter(struct bpf_func_stats_ctx* bpf_stat_ctx,
441 				   enum bpf_function_id func_id)
442 {
443 	int func_id_key = func_id;
444 
445 	bpf_stat_ctx->start_time_ns = bpf_ktime_get_ns();
446 	bpf_stat_ctx->bpf_func_stats_data_val =
447 		bpf_map_lookup_elem(&bpf_func_stats, &func_id_key);
448 	if (bpf_stat_ctx->bpf_func_stats_data_val)
449 		bpf_stat_ctx->bpf_func_stats_data_val->num_executions++;
450 }
451 
452 static INLINE void bpf_stats_exit(struct bpf_func_stats_ctx* bpf_stat_ctx)
453 {
454 	if (bpf_stat_ctx->bpf_func_stats_data_val)
455 		bpf_stat_ctx->bpf_func_stats_data_val->time_elapsed_ns +=
456 			bpf_ktime_get_ns() - bpf_stat_ctx->start_time_ns;
457 }
458 
459 static INLINE void
460 bpf_stats_pre_submit_var_perf_event(struct bpf_func_stats_ctx* bpf_stat_ctx,
461 				    struct var_metadata_t* meta)
462 {
463 	if (bpf_stat_ctx->bpf_func_stats_data_val) {
464 		bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events++;
465 		meta->bpf_stats_num_perf_events =
466 			bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events;
467 	}
468 	meta->bpf_stats_start_ktime_ns = bpf_stat_ctx->start_time_ns;
469 	meta->cpu_id = bpf_get_smp_processor_id();
470 }
471 
472 static INLINE size_t
473 read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
474 {
475 	size_t length = 0;
476 	size_t filepart_length;
477 	struct dentry* parent_dentry;
478 
479 #ifdef UNROLL
480 #pragma unroll
481 #endif
482 	for (int i = 0; i < MAX_PATH_DEPTH; i++) {
483 		filepart_length = bpf_probe_read_str(payload, MAX_PATH,
484 						     BPF_CORE_READ(filp_dentry, d_name.name));
485 		barrier_var(filepart_length);
486 		if (filepart_length > MAX_PATH)
487 			break;
488 		barrier_var(filepart_length);
489 		payload += filepart_length;
490 		length += filepart_length;
491 
492 		parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
493 		if (filp_dentry == parent_dentry)
494 			break;
495 		filp_dentry = parent_dentry;
496 	}
497 
498 	return length;
499 }
500 
501 static INLINE bool
502 is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
503 {
504 	struct dentry* parent_dentry;
505 #ifdef UNROLL
506 #pragma unroll
507 #endif
508 	for (int i = 0; i < MAX_PATH_DEPTH; i++) {
509 		u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
510 		bool* allowed_dir = bpf_map_lookup_elem(&allowed_directory_inodes, &dir_ino);
511 
512 		if (allowed_dir != NULL)
513 			return true;
514 		parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
515 		if (filp_dentry == parent_dentry)
516 			break;
517 		filp_dentry = parent_dentry;
518 	}
519 	return false;
520 }
521 
522 static INLINE bool is_dentry_allowed_for_filemod(struct dentry* file_dentry,
523 						 u32* device_id,
524 						 u64* file_ino)
525 {
526 	u32 dev_id = BPF_CORE_READ(file_dentry, d_sb, s_dev);
527 	*device_id = dev_id;
528 	bool* allowed_device = bpf_map_lookup_elem(&allowed_devices, &dev_id);
529 
530 	if (allowed_device == NULL)
531 		return false;
532 
533 	u64 ino = BPF_CORE_READ(file_dentry, d_inode, i_ino);
534 	*file_ino = ino;
535 	bool* allowed_file = bpf_map_lookup_elem(&allowed_file_inodes, &ino);
536 
537 	if (allowed_file == NULL)
538 		if (!is_ancestor_in_allowed_inodes(BPF_CORE_READ(file_dentry, d_parent)))
539 			return false;
540 	return true;
541 }
542 
543 SEC("kprobe/proc_sys_write")
544 ssize_t BPF_KPROBE(kprobe__proc_sys_write,
545 		   struct file* filp, const char* buf,
546 		   size_t count, loff_t* ppos)
547 {
548 	struct bpf_func_stats_ctx stats_ctx;
549 	bpf_stats_enter(&stats_ctx, profiler_bpf_proc_sys_write);
550 
551 	u32 pid = get_userspace_pid();
552 	int zero = 0;
553 	struct var_sysctl_data_t* sysctl_data =
554 		bpf_map_lookup_elem(&data_heap, &zero);
555 	if (!sysctl_data)
556 		goto out;
557 
558 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
559 	sysctl_data->meta.type = SYSCTL_EVENT;
560 	void* payload = populate_var_metadata(&sysctl_data->meta, task, pid, sysctl_data->payload);
561 	payload = populate_cgroup_info(&sysctl_data->cgroup_data, task, payload);
562 
563 	populate_ancestors(task, &sysctl_data->ancestors_info);
564 
565 	sysctl_data->sysctl_val_length = 0;
566 	sysctl_data->sysctl_path_length = 0;
567 
568 	size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
569 	barrier_var(sysctl_val_length);
570 	if (sysctl_val_length <= CTL_MAXNAME) {
571 		barrier_var(sysctl_val_length);
572 		sysctl_data->sysctl_val_length = sysctl_val_length;
573 		payload += sysctl_val_length;
574 	}
575 
576 	size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
577 						       BPF_CORE_READ(filp, f_path.dentry, d_name.name));
578 	barrier_var(sysctl_path_length);
579 	if (sysctl_path_length <= MAX_PATH) {
580 		barrier_var(sysctl_path_length);
581 		sysctl_data->sysctl_path_length = sysctl_path_length;
582 		payload += sysctl_path_length;
583 	}
584 
585 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &sysctl_data->meta);
586 	unsigned long data_len = payload - (void*)sysctl_data;
587 	data_len = data_len > sizeof(struct var_sysctl_data_t)
588 		? sizeof(struct var_sysctl_data_t)
589 		: data_len;
590 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, sysctl_data, data_len);
591 out:
592 	bpf_stats_exit(&stats_ctx);
593 	return 0;
594 }
595 
596 SEC("tracepoint/syscalls/sys_enter_kill")
597 int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
598 {
599 	struct bpf_func_stats_ctx stats_ctx;
600 
601 	bpf_stats_enter(&stats_ctx, profiler_bpf_sys_enter_kill);
602 	int pid = ctx->args[0];
603 	int sig = ctx->args[1];
604 	int ret = trace_var_sys_kill(ctx, pid, sig);
605 	bpf_stats_exit(&stats_ctx);
606 	return ret;
607 };
608 
609 SEC("raw_tracepoint/sched_process_exit")
610 int raw_tracepoint__sched_process_exit(void* ctx)
611 {
612 	int zero = 0;
613 	struct bpf_func_stats_ctx stats_ctx;
614 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exit);
615 
616 	u32 tpid = get_userspace_pid();
617 
618 	struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
619 	struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
620 
621 	if (arr_struct == NULL || kill_data == NULL)
622 		goto out;
623 
624 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
625 	struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
626 
627 #ifdef UNROLL
628 #pragma unroll
629 #endif
630 	for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
631 		struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
632 
633 		if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
634 			bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
635 			void* payload = kill_data->payload;
636 			size_t offset = kill_data->payload_length;
637 			if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
638 				return 0;
639 			payload += offset;
640 
641 			kill_data->kill_target_name_length = 0;
642 			kill_data->kill_target_cgroup_proc_length = 0;
643 
644 			size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
645 			barrier_var(comm_length);
646 			if (comm_length <= TASK_COMM_LEN) {
647 				barrier_var(comm_length);
648 				kill_data->kill_target_name_length = comm_length;
649 				payload += comm_length;
650 			}
651 
652 			size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
653 								       BPF_CORE_READ(proc_kernfs, name));
654 			barrier_var(cgroup_proc_length);
655 			if (cgroup_proc_length <= KILL_TARGET_LEN) {
656 				barrier_var(cgroup_proc_length);
657 				kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
658 				payload += cgroup_proc_length;
659 			}
660 
661 			bpf_stats_pre_submit_var_perf_event(&stats_ctx, &kill_data->meta);
662 			unsigned long data_len = (void*)payload - (void*)kill_data;
663 			data_len = data_len > sizeof(struct var_kill_data_t)
664 				? sizeof(struct var_kill_data_t)
665 				: data_len;
666 			bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kill_data, data_len);
667 		}
668 	}
669 	bpf_map_delete_elem(&var_tpid_to_data, &tpid);
670 out:
671 	bpf_stats_exit(&stats_ctx);
672 	return 0;
673 }
674 
675 SEC("raw_tracepoint/sched_process_exec")
676 int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
677 {
678 	struct bpf_func_stats_ctx stats_ctx;
679 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exec);
680 
681 	struct linux_binprm* bprm = (struct linux_binprm*)ctx->args[2];
682 	u64 inode = BPF_CORE_READ(bprm, file, f_inode, i_ino);
683 
684 	bool* should_filter_binprm = bpf_map_lookup_elem(&disallowed_exec_inodes, &inode);
685 	if (should_filter_binprm != NULL)
686 		goto out;
687 
688 	int zero = 0;
689 	struct var_exec_data_t* proc_exec_data = bpf_map_lookup_elem(&data_heap, &zero);
690 	if (!proc_exec_data)
691 		goto out;
692 
693 	if (INODE_FILTER && inode != INODE_FILTER)
694 		return 0;
695 
696 	u32 pid = get_userspace_pid();
697 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
698 
699 	proc_exec_data->meta.type = EXEC_EVENT;
700 	proc_exec_data->bin_path_length = 0;
701 	proc_exec_data->cmdline_length = 0;
702 	proc_exec_data->environment_length = 0;
703 	void* payload = populate_var_metadata(&proc_exec_data->meta, task, pid,
704 					      proc_exec_data->payload);
705 	payload = populate_cgroup_info(&proc_exec_data->cgroup_data, task, payload);
706 
707 	struct task_struct* parent_task = BPF_CORE_READ(task, real_parent);
708 	proc_exec_data->parent_pid = BPF_CORE_READ(parent_task, tgid);
709 	proc_exec_data->parent_uid = BPF_CORE_READ(parent_task, real_cred, uid.val);
710 	proc_exec_data->parent_exec_id = BPF_CORE_READ(parent_task, self_exec_id);
711 	proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
712 
713 	const char* filename = BPF_CORE_READ(bprm, filename);
714 	size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
715 	barrier_var(bin_path_length);
716 	if (bin_path_length <= MAX_FILENAME_LEN) {
717 		barrier_var(bin_path_length);
718 		proc_exec_data->bin_path_length = bin_path_length;
719 		payload += bin_path_length;
720 	}
721 
722 	void* arg_start = (void*)BPF_CORE_READ(task, mm, arg_start);
723 	void* arg_end = (void*)BPF_CORE_READ(task, mm, arg_end);
724 	unsigned int cmdline_length = probe_read_lim(payload, arg_start,
725 						     arg_end - arg_start, MAX_ARGS_LEN);
726 
727 	if (cmdline_length <= MAX_ARGS_LEN) {
728 		barrier_var(cmdline_length);
729 		proc_exec_data->cmdline_length = cmdline_length;
730 		payload += cmdline_length;
731 	}
732 
733 	if (READ_ENVIRON_FROM_EXEC) {
734 		void* env_start = (void*)BPF_CORE_READ(task, mm, env_start);
735 		void* env_end = (void*)BPF_CORE_READ(task, mm, env_end);
736 		unsigned long env_len = probe_read_lim(payload, env_start,
737 						       env_end - env_start, MAX_ENVIRON_LEN);
738 		if (cmdline_length <= MAX_ENVIRON_LEN) {
739 			proc_exec_data->environment_length = env_len;
740 			payload += env_len;
741 		}
742 	}
743 
744 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &proc_exec_data->meta);
745 	unsigned long data_len = payload - (void*)proc_exec_data;
746 	data_len = data_len > sizeof(struct var_exec_data_t)
747 		? sizeof(struct var_exec_data_t)
748 		: data_len;
749 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, proc_exec_data, data_len);
750 out:
751 	bpf_stats_exit(&stats_ctx);
752 	return 0;
753 }
754 
755 SEC("kretprobe/do_filp_open")
756 int kprobe_ret__do_filp_open(struct pt_regs* ctx)
757 {
758 	struct bpf_func_stats_ctx stats_ctx;
759 	bpf_stats_enter(&stats_ctx, profiler_bpf_do_filp_open_ret);
760 
761 	struct file* filp = (struct file*)PT_REGS_RC_CORE(ctx);
762 
763 	if (filp == NULL || IS_ERR(filp))
764 		goto out;
765 	unsigned int flags = BPF_CORE_READ(filp, f_flags);
766 	if ((flags & (O_RDWR | O_WRONLY)) == 0)
767 		goto out;
768 	if ((flags & O_TMPFILE) > 0)
769 		goto out;
770 	struct inode* file_inode = BPF_CORE_READ(filp, f_inode);
771 	umode_t mode = BPF_CORE_READ(file_inode, i_mode);
772 	if (S_ISDIR(mode) || S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
773 	    S_ISSOCK(mode))
774 		goto out;
775 
776 	struct dentry* filp_dentry = BPF_CORE_READ(filp, f_path.dentry);
777 	u32 device_id = 0;
778 	u64 file_ino = 0;
779 	if (!is_dentry_allowed_for_filemod(filp_dentry, &device_id, &file_ino))
780 		goto out;
781 
782 	int zero = 0;
783 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
784 	if (!filemod_data)
785 		goto out;
786 
787 	u32 pid = get_userspace_pid();
788 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
789 
790 	filemod_data->meta.type = FILEMOD_EVENT;
791 	filemod_data->fmod_type = FMOD_OPEN;
792 	filemod_data->dst_flags = flags;
793 	filemod_data->src_inode = 0;
794 	filemod_data->dst_inode = file_ino;
795 	filemod_data->src_device_id = 0;
796 	filemod_data->dst_device_id = device_id;
797 	filemod_data->src_filepath_length = 0;
798 	filemod_data->dst_filepath_length = 0;
799 
800 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
801 					      filemod_data->payload);
802 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
803 
804 	size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
805 	barrier_var(len);
806 	if (len <= MAX_FILEPATH_LENGTH) {
807 		barrier_var(len);
808 		payload += len;
809 		filemod_data->dst_filepath_length = len;
810 	}
811 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
812 	unsigned long data_len = payload - (void*)filemod_data;
813 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
814 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
815 out:
816 	bpf_stats_exit(&stats_ctx);
817 	return 0;
818 }
819 
820 SEC("kprobe/vfs_link")
821 int BPF_KPROBE(kprobe__vfs_link,
822 	       struct dentry* old_dentry, struct inode* dir,
823 	       struct dentry* new_dentry, struct inode** delegated_inode)
824 {
825 	struct bpf_func_stats_ctx stats_ctx;
826 	bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
827 
828 	u32 src_device_id = 0;
829 	u64 src_file_ino = 0;
830 	u32 dst_device_id = 0;
831 	u64 dst_file_ino = 0;
832 	if (!is_dentry_allowed_for_filemod(old_dentry, &src_device_id, &src_file_ino) &&
833 	    !is_dentry_allowed_for_filemod(new_dentry, &dst_device_id, &dst_file_ino))
834 		goto out;
835 
836 	int zero = 0;
837 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
838 	if (!filemod_data)
839 		goto out;
840 
841 	u32 pid = get_userspace_pid();
842 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
843 
844 	filemod_data->meta.type = FILEMOD_EVENT;
845 	filemod_data->fmod_type = FMOD_LINK;
846 	filemod_data->dst_flags = 0;
847 	filemod_data->src_inode = src_file_ino;
848 	filemod_data->dst_inode = dst_file_ino;
849 	filemod_data->src_device_id = src_device_id;
850 	filemod_data->dst_device_id = dst_device_id;
851 	filemod_data->src_filepath_length = 0;
852 	filemod_data->dst_filepath_length = 0;
853 
854 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
855 					      filemod_data->payload);
856 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
857 
858 	size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
859 	barrier_var(len);
860 	if (len <= MAX_FILEPATH_LENGTH) {
861 		barrier_var(len);
862 		payload += len;
863 		filemod_data->src_filepath_length = len;
864 	}
865 
866 	len = read_absolute_file_path_from_dentry(new_dentry, payload);
867 	barrier_var(len);
868 	if (len <= MAX_FILEPATH_LENGTH) {
869 		barrier_var(len);
870 		payload += len;
871 		filemod_data->dst_filepath_length = len;
872 	}
873 
874 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
875 	unsigned long data_len = payload - (void*)filemod_data;
876 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
877 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
878 out:
879 	bpf_stats_exit(&stats_ctx);
880 	return 0;
881 }
882 
883 SEC("kprobe/vfs_symlink")
884 int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
885 	       const char* oldname)
886 {
887 	struct bpf_func_stats_ctx stats_ctx;
888 	bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_symlink);
889 
890 	u32 dst_device_id = 0;
891 	u64 dst_file_ino = 0;
892 	if (!is_dentry_allowed_for_filemod(dentry, &dst_device_id, &dst_file_ino))
893 		goto out;
894 
895 	int zero = 0;
896 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
897 	if (!filemod_data)
898 		goto out;
899 
900 	u32 pid = get_userspace_pid();
901 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
902 
903 	filemod_data->meta.type = FILEMOD_EVENT;
904 	filemod_data->fmod_type = FMOD_SYMLINK;
905 	filemod_data->dst_flags = 0;
906 	filemod_data->src_inode = 0;
907 	filemod_data->dst_inode = dst_file_ino;
908 	filemod_data->src_device_id = 0;
909 	filemod_data->dst_device_id = dst_device_id;
910 	filemod_data->src_filepath_length = 0;
911 	filemod_data->dst_filepath_length = 0;
912 
913 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
914 					      filemod_data->payload);
915 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
916 
917 	size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
918 	barrier_var(len);
919 	if (len <= MAX_FILEPATH_LENGTH) {
920 		barrier_var(len);
921 		payload += len;
922 		filemod_data->src_filepath_length = len;
923 	}
924 	len = read_absolute_file_path_from_dentry(dentry, payload);
925 	barrier_var(len);
926 	if (len <= MAX_FILEPATH_LENGTH) {
927 		barrier_var(len);
928 		payload += len;
929 		filemod_data->dst_filepath_length = len;
930 	}
931 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
932 	unsigned long data_len = payload - (void*)filemod_data;
933 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
934 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
935 out:
936 	bpf_stats_exit(&stats_ctx);
937 	return 0;
938 }
939 
940 SEC("raw_tracepoint/sched_process_fork")
941 int raw_tracepoint__sched_process_fork(struct bpf_raw_tracepoint_args* ctx)
942 {
943 	struct bpf_func_stats_ctx stats_ctx;
944 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_fork);
945 
946 	int zero = 0;
947 	struct var_fork_data_t* fork_data = bpf_map_lookup_elem(&data_heap, &zero);
948 	if (!fork_data)
949 		goto out;
950 
951 	struct task_struct* parent = (struct task_struct*)ctx->args[0];
952 	struct task_struct* child = (struct task_struct*)ctx->args[1];
953 	fork_data->meta.type = FORK_EVENT;
954 
955 	void* payload = populate_var_metadata(&fork_data->meta, child,
956 					      BPF_CORE_READ(child, pid), fork_data->payload);
957 	fork_data->parent_pid = BPF_CORE_READ(parent, pid);
958 	fork_data->parent_exec_id = BPF_CORE_READ(parent, self_exec_id);
959 	fork_data->parent_start_time = BPF_CORE_READ(parent, start_time);
960 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &fork_data->meta);
961 
962 	unsigned long data_len = payload - (void*)fork_data;
963 	data_len = data_len > sizeof(*fork_data) ? sizeof(*fork_data) : data_len;
964 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, fork_data, data_len);
965 out:
966 	bpf_stats_exit(&stats_ctx);
967 	return 0;
968 }
969 char _license[] SEC("license") = "GPL";
970