1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2019 Facebook 3 #include <linux/sched.h> 4 #include <linux/ptrace.h> 5 #include <stdint.h> 6 #include <stddef.h> 7 #include <stdbool.h> 8 #include <linux/bpf.h> 9 #include <bpf/bpf_helpers.h> 10 11 #define FUNCTION_NAME_LEN 64 12 #define FILE_NAME_LEN 128 13 #define TASK_COMM_LEN 16 14 15 typedef struct { 16 int PyThreadState_frame; 17 int PyThreadState_thread; 18 int PyFrameObject_back; 19 int PyFrameObject_code; 20 int PyFrameObject_lineno; 21 int PyCodeObject_filename; 22 int PyCodeObject_name; 23 int String_data; 24 int String_size; 25 } OffsetConfig; 26 27 typedef struct { 28 uintptr_t current_state_addr; 29 uintptr_t tls_key_addr; 30 OffsetConfig offsets; 31 bool use_tls; 32 } PidData; 33 34 typedef struct { 35 uint32_t success; 36 } Stats; 37 38 typedef struct { 39 char name[FUNCTION_NAME_LEN]; 40 char file[FILE_NAME_LEN]; 41 } Symbol; 42 43 typedef struct { 44 uint32_t pid; 45 uint32_t tid; 46 char comm[TASK_COMM_LEN]; 47 int32_t kernel_stack_id; 48 int32_t user_stack_id; 49 bool thread_current; 50 bool pthread_match; 51 bool stack_complete; 52 int16_t stack_len; 53 int32_t stack[STACK_MAX_LEN]; 54 55 int has_meta; 56 int metadata; 57 char dummy_safeguard; 58 } Event; 59 60 61 typedef int pid_t; 62 63 typedef struct { 64 void* f_back; // PyFrameObject.f_back, previous frame 65 void* f_code; // PyFrameObject.f_code, pointer to PyCodeObject 66 void* co_filename; // PyCodeObject.co_filename 67 void* co_name; // PyCodeObject.co_name 68 } FrameData; 69 70 #ifdef SUBPROGS 71 __noinline 72 #else 73 __always_inline 74 #endif 75 static void *get_thread_state(void *tls_base, PidData *pidData) 76 { 77 void* thread_state; 78 int key; 79 80 bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); 81 bpf_probe_read_user(&thread_state, sizeof(thread_state), 82 tls_base + 0x310 + key * 0x10 + 0x08); 83 return thread_state; 84 } 85 86 static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData, 87 FrameData *frame, Symbol *symbol) 88 { 89 // read data from PyFrameObject 90 bpf_probe_read_user(&frame->f_back, 91 sizeof(frame->f_back), 92 frame_ptr + pidData->offsets.PyFrameObject_back); 93 bpf_probe_read_user(&frame->f_code, 94 sizeof(frame->f_code), 95 frame_ptr + pidData->offsets.PyFrameObject_code); 96 97 // read data from PyCodeObject 98 if (!frame->f_code) 99 return false; 100 bpf_probe_read_user(&frame->co_filename, 101 sizeof(frame->co_filename), 102 frame->f_code + pidData->offsets.PyCodeObject_filename); 103 bpf_probe_read_user(&frame->co_name, 104 sizeof(frame->co_name), 105 frame->f_code + pidData->offsets.PyCodeObject_name); 106 // read actual names into symbol 107 if (frame->co_filename) 108 bpf_probe_read_user_str(&symbol->file, 109 sizeof(symbol->file), 110 frame->co_filename + 111 pidData->offsets.String_data); 112 if (frame->co_name) 113 bpf_probe_read_user_str(&symbol->name, 114 sizeof(symbol->name), 115 frame->co_name + 116 pidData->offsets.String_data); 117 return true; 118 } 119 120 struct { 121 __uint(type, BPF_MAP_TYPE_HASH); 122 __uint(max_entries, 1); 123 __type(key, int); 124 __type(value, PidData); 125 } pidmap SEC(".maps"); 126 127 struct { 128 __uint(type, BPF_MAP_TYPE_HASH); 129 __uint(max_entries, 1); 130 __type(key, int); 131 __type(value, Event); 132 } eventmap SEC(".maps"); 133 134 struct { 135 __uint(type, BPF_MAP_TYPE_HASH); 136 __uint(max_entries, 1); 137 __type(key, Symbol); 138 __type(value, int); 139 } symbolmap SEC(".maps"); 140 141 struct { 142 __uint(type, BPF_MAP_TYPE_ARRAY); 143 __uint(max_entries, 1); 144 __type(key, int); 145 __type(value, Stats); 146 } statsmap SEC(".maps"); 147 148 struct { 149 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); 150 __uint(max_entries, 32); 151 __uint(key_size, sizeof(int)); 152 __uint(value_size, sizeof(int)); 153 } perfmap SEC(".maps"); 154 155 struct { 156 __uint(type, BPF_MAP_TYPE_STACK_TRACE); 157 __uint(max_entries, 1000); 158 __uint(key_size, sizeof(int)); 159 __uint(value_size, sizeof(long long) * 127); 160 } stackmap SEC(".maps"); 161 162 #ifdef USE_BPF_LOOP 163 struct process_frame_ctx { 164 int cur_cpu; 165 int32_t *symbol_counter; 166 void *frame_ptr; 167 FrameData *frame; 168 PidData *pidData; 169 Symbol *sym; 170 Event *event; 171 bool done; 172 }; 173 174 #define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var)) 175 176 static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx) 177 { 178 int zero = 0; 179 void *frame_ptr = ctx->frame_ptr; 180 PidData *pidData = ctx->pidData; 181 FrameData *frame = ctx->frame; 182 int32_t *symbol_counter = ctx->symbol_counter; 183 int cur_cpu = ctx->cur_cpu; 184 Event *event = ctx->event; 185 Symbol *sym = ctx->sym; 186 187 if (frame_ptr && get_frame_data(frame_ptr, pidData, frame, sym)) { 188 int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; 189 int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, sym); 190 191 if (!symbol_id) { 192 bpf_map_update_elem(&symbolmap, sym, &zero, 0); 193 symbol_id = bpf_map_lookup_elem(&symbolmap, sym); 194 if (!symbol_id) { 195 ctx->done = true; 196 return 1; 197 } 198 } 199 if (*symbol_id == new_symbol_id) 200 (*symbol_counter)++; 201 202 barrier_var(i); 203 if (i >= STACK_MAX_LEN) 204 return 1; 205 206 event->stack[i] = *symbol_id; 207 208 event->stack_len = i + 1; 209 frame_ptr = frame->f_back; 210 } 211 return 0; 212 } 213 #endif /* USE_BPF_LOOP */ 214 215 #ifdef GLOBAL_FUNC 216 __noinline 217 #elif defined(SUBPROGS) 218 static __noinline 219 #else 220 static __always_inline 221 #endif 222 int __on_event(struct bpf_raw_tracepoint_args *ctx) 223 { 224 uint64_t pid_tgid = bpf_get_current_pid_tgid(); 225 pid_t pid = (pid_t)(pid_tgid >> 32); 226 PidData* pidData = bpf_map_lookup_elem(&pidmap, &pid); 227 if (!pidData) 228 return 0; 229 230 int zero = 0; 231 Event* event = bpf_map_lookup_elem(&eventmap, &zero); 232 if (!event) 233 return 0; 234 235 event->pid = pid; 236 237 event->tid = (pid_t)pid_tgid; 238 bpf_get_current_comm(&event->comm, sizeof(event->comm)); 239 240 event->user_stack_id = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK); 241 event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); 242 243 void* thread_state_current = (void*)0; 244 bpf_probe_read_user(&thread_state_current, 245 sizeof(thread_state_current), 246 (void*)(long)pidData->current_state_addr); 247 248 struct task_struct* task = (struct task_struct*)bpf_get_current_task(); 249 void* tls_base = (void*)task; 250 251 void* thread_state = pidData->use_tls ? get_thread_state(tls_base, pidData) 252 : thread_state_current; 253 event->thread_current = thread_state == thread_state_current; 254 255 if (pidData->use_tls) { 256 uint64_t pthread_created; 257 uint64_t pthread_self; 258 bpf_probe_read_user(&pthread_self, sizeof(pthread_self), 259 tls_base + 0x10); 260 261 bpf_probe_read_user(&pthread_created, 262 sizeof(pthread_created), 263 thread_state + 264 pidData->offsets.PyThreadState_thread); 265 event->pthread_match = pthread_created == pthread_self; 266 } else { 267 event->pthread_match = 1; 268 } 269 270 if (event->pthread_match || !pidData->use_tls) { 271 void* frame_ptr; 272 FrameData frame; 273 Symbol sym = {}; 274 int cur_cpu = bpf_get_smp_processor_id(); 275 276 bpf_probe_read_user(&frame_ptr, 277 sizeof(frame_ptr), 278 thread_state + 279 pidData->offsets.PyThreadState_frame); 280 281 int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); 282 if (symbol_counter == NULL) 283 return 0; 284 #ifdef USE_BPF_LOOP 285 struct process_frame_ctx ctx = { 286 .cur_cpu = cur_cpu, 287 .symbol_counter = symbol_counter, 288 .frame_ptr = frame_ptr, 289 .frame = &frame, 290 .pidData = pidData, 291 .sym = &sym, 292 .event = event, 293 }; 294 295 bpf_loop(STACK_MAX_LEN, process_frame_callback, &ctx, 0); 296 if (ctx.done) 297 return 0; 298 #else 299 #ifdef NO_UNROLL 300 #pragma clang loop unroll(disable) 301 #else 302 #pragma clang loop unroll(full) 303 #endif /* NO_UNROLL */ 304 /* Unwind python stack */ 305 for (int i = 0; i < STACK_MAX_LEN; ++i) { 306 if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { 307 int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; 308 int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); 309 if (!symbol_id) { 310 bpf_map_update_elem(&symbolmap, &sym, &zero, 0); 311 symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); 312 if (!symbol_id) 313 return 0; 314 } 315 if (*symbol_id == new_symbol_id) 316 (*symbol_counter)++; 317 event->stack[i] = *symbol_id; 318 event->stack_len = i + 1; 319 frame_ptr = frame.f_back; 320 } 321 } 322 #endif /* USE_BPF_LOOP */ 323 event->stack_complete = frame_ptr == NULL; 324 } else { 325 event->stack_complete = 1; 326 } 327 328 Stats* stats = bpf_map_lookup_elem(&statsmap, &zero); 329 if (stats) 330 stats->success++; 331 332 event->has_meta = 0; 333 bpf_perf_event_output(ctx, &perfmap, 0, event, offsetof(Event, metadata)); 334 return 0; 335 } 336 337 SEC("raw_tracepoint/kfree_skb") 338 int on_event(struct bpf_raw_tracepoint_args* ctx) 339 { 340 int i, ret = 0; 341 ret |= __on_event(ctx); 342 ret |= __on_event(ctx); 343 ret |= __on_event(ctx); 344 ret |= __on_event(ctx); 345 ret |= __on_event(ctx); 346 return ret; 347 } 348 349 char _license[] SEC("license") = "GPL"; 350