1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <ctype.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <libelf.h> 8 #include <gelf.h> 9 #include <unistd.h> 10 #include <linux/ptrace.h> 11 #include <linux/kernel.h> 12 13 /* s8 will be marked as poison while it's a reg of riscv */ 14 #if defined(__riscv) 15 #define rv_s8 s8 16 #endif 17 18 #include "bpf.h" 19 #include "libbpf.h" 20 #include "libbpf_common.h" 21 #include "libbpf_internal.h" 22 #include "hashmap.h" 23 24 /* libbpf's USDT support consists of BPF-side state/code and user-space 25 * state/code working together in concert. BPF-side parts are defined in 26 * usdt.bpf.h header library. User-space state is encapsulated by struct 27 * usdt_manager and all the supporting code centered around usdt_manager. 28 * 29 * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map 30 * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that 31 * don't support BPF cookie (see below). These two maps are implicitly 32 * embedded into user's end BPF object file when user's code included 33 * usdt.bpf.h. This means that libbpf doesn't do anything special to create 34 * these USDT support maps. They are created by normal libbpf logic of 35 * instantiating BPF maps when opening and loading BPF object. 36 * 37 * As such, libbpf is basically unaware of the need to do anything 38 * USDT-related until the very first call to bpf_program__attach_usdt(), which 39 * can be called by user explicitly or happen automatically during skeleton 40 * attach (or, equivalently, through generic bpf_program__attach() call). At 41 * this point, libbpf will instantiate and initialize struct usdt_manager and 42 * store it in bpf_object. USDT manager is per-BPF object construct, as each 43 * independent BPF object might or might not have USDT programs, and thus all 44 * the expected USDT-related state. There is no coordination between two 45 * bpf_object in parts of USDT attachment, they are oblivious of each other's 46 * existence and libbpf is just oblivious, dealing with bpf_object-specific 47 * USDT state. 48 * 49 * Quick crash course on USDTs. 50 * 51 * From user-space application's point of view, USDT is essentially just 52 * a slightly special function call that normally has zero overhead, unless it 53 * is being traced by some external entity (e.g, BPF-based tool). Here's how 54 * a typical application can trigger USDT probe: 55 * 56 * #include <sys/sdt.h> // provided by systemtap-sdt-devel package 57 * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h 58 * 59 * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y); 60 * 61 * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each 62 * individual USDT has a fixed number of arguments (3 in the above example) 63 * and specifies values of each argument as if it was a function call. 64 * 65 * USDT call is actually not a function call, but is instead replaced by 66 * a single NOP instruction (thus zero overhead, effectively). But in addition 67 * to that, those USDT macros generate special SHT_NOTE ELF records in 68 * .note.stapsdt ELF section. Here's an example USDT definition as emitted by 69 * `readelf -n <binary>`: 70 * 71 * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors) 72 * Provider: test 73 * Name: usdt12 74 * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e 75 * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil 76 * 77 * In this case we have USDT test:usdt12 with 12 arguments. 78 * 79 * Location and base are offsets used to calculate absolute IP address of that 80 * NOP instruction that kernel can replace with an interrupt instruction to 81 * trigger instrumentation code (BPF program for all that we care about). 82 * 83 * Semaphore above is and optional feature. It records an address of a 2-byte 84 * refcount variable (normally in '.probes' ELF section) used for signaling if 85 * there is anything that is attached to USDT. This is useful for user 86 * applications if, for example, they need to prepare some arguments that are 87 * passed only to USDTs and preparation is expensive. By checking if USDT is 88 * "activated", an application can avoid paying those costs unnecessarily. 89 * Recent enough kernel has built-in support for automatically managing this 90 * refcount, which libbpf expects and relies on. If USDT is defined without 91 * associated semaphore, this value will be zero. See selftests for semaphore 92 * examples. 93 * 94 * Arguments is the most interesting part. This USDT specification string is 95 * providing information about all the USDT arguments and their locations. The 96 * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and 97 * whether the argument is signed or unsigned (negative size means signed). 98 * The part after @ sign is assembly-like definition of argument location 99 * (see [0] for more details). Technically, assembler can provide some pretty 100 * advanced definitions, but libbpf is currently supporting three most common 101 * cases: 102 * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9); 103 * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer 104 * whose value is in register %rdx"; 105 * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which 106 * specifies signed 32-bit integer stored at offset -1204 bytes from 107 * memory address stored in %rbp. 108 * 109 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation 110 * 111 * During attachment, libbpf parses all the relevant USDT specifications and 112 * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side 113 * code through spec map. This allows BPF applications to quickly fetch the 114 * actual value at runtime using a simple BPF-side code. 115 * 116 * With basics out of the way, let's go over less immediately obvious aspects 117 * of supporting USDTs. 118 * 119 * First, there is no special USDT BPF program type. It is actually just 120 * a uprobe BPF program (which for kernel, at least currently, is just a kprobe 121 * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference 122 * that uprobe is usually attached at the function entry, while USDT will 123 * normally will be somewhere inside the function. But it should always be 124 * pointing to NOP instruction, which makes such uprobes the fastest uprobe 125 * kind. 126 * 127 * Second, it's important to realize that such STAP_PROBEn(provider, name, ...) 128 * macro invocations can end up being inlined many-many times, depending on 129 * specifics of each individual user application. So single conceptual USDT 130 * (identified by provider:name pair of identifiers) is, generally speaking, 131 * multiple uprobe locations (USDT call sites) in different places in user 132 * application. Further, again due to inlining, each USDT call site might end 133 * up having the same argument #N be located in a different place. In one call 134 * site it could be a constant, in another will end up in a register, and in 135 * yet another could be some other register or even somewhere on the stack. 136 * 137 * As such, "attaching to USDT" means (in general case) attaching the same 138 * uprobe BPF program to multiple target locations in user application, each 139 * potentially having a completely different USDT spec associated with it. 140 * To wire all this up together libbpf allocates a unique integer spec ID for 141 * each unique USDT spec. Spec IDs are allocated as sequential small integers 142 * so that they can be used as keys in array BPF map (for performance reasons). 143 * Spec ID allocation and accounting is big part of what usdt_manager is 144 * about. This state has to be maintained per-BPF object and coordinate 145 * between different USDT attachments within the same BPF object. 146 * 147 * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out 148 * as struct usdt_spec. Each invocation of BPF program at runtime needs to 149 * know its associated spec ID. It gets it either through BPF cookie, which 150 * libbpf sets to spec ID during attach time, or, if kernel is too old to 151 * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such 152 * case. The latter means that some modes of operation can't be supported 153 * without BPF cookie. Such mode is attaching to shared library "generically", 154 * without specifying target process. In such case, it's impossible to 155 * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode 156 * is not supported without BPF cookie support. 157 * 158 * Note that libbpf is using BPF cookie functionality for its own internal 159 * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf 160 * provides conceptually equivalent USDT cookie support. It's still u64 161 * user-provided value that can be associated with USDT attachment. Note that 162 * this will be the same value for all USDT call sites within the same single 163 * *logical* USDT attachment. This makes sense because to user attaching to 164 * USDT is a single BPF program triggered for singular USDT probe. The fact 165 * that this is done at multiple actual locations is a mostly hidden 166 * implementation details. This USDT cookie value can be fetched with 167 * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h 168 * 169 * Lastly, while single USDT can have tons of USDT call sites, it doesn't 170 * necessarily have that many different USDT specs. It very well might be 171 * that 1000 USDT call sites only need 5 different USDT specs, because all the 172 * arguments are typically contained in a small set of registers or stack 173 * locations. As such, it's wasteful to allocate as many USDT spec IDs as 174 * there are USDT call sites. So libbpf tries to be frugal and performs 175 * on-the-fly deduplication during a single USDT attachment to only allocate 176 * the minimal required amount of unique USDT specs (and thus spec IDs). This 177 * is trivially achieved by using USDT spec string (Arguments string from USDT 178 * note) as a lookup key in a hashmap. USDT spec string uniquely defines 179 * everything about how to fetch USDT arguments, so two USDT call sites 180 * sharing USDT spec string can safely share the same USDT spec and spec ID. 181 * Note, this spec string deduplication is happening only during the same USDT 182 * attachment, so each USDT spec shares the same USDT cookie value. This is 183 * not generally true for other USDT attachments within the same BPF object, 184 * as even if USDT spec string is the same, USDT cookie value can be 185 * different. It was deemed excessive to try to deduplicate across independent 186 * USDT attachments by taking into account USDT spec string *and* USDT cookie 187 * value, which would complicated spec ID accounting significantly for little 188 * gain. 189 */ 190 191 #define USDT_BASE_SEC ".stapsdt.base" 192 #define USDT_SEMA_SEC ".probes" 193 #define USDT_NOTE_SEC ".note.stapsdt" 194 #define USDT_NOTE_TYPE 3 195 #define USDT_NOTE_NAME "stapsdt" 196 197 /* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */ 198 enum usdt_arg_type { 199 USDT_ARG_CONST, 200 USDT_ARG_REG, 201 USDT_ARG_REG_DEREF, 202 }; 203 204 /* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */ 205 struct usdt_arg_spec { 206 __u64 val_off; 207 enum usdt_arg_type arg_type; 208 short reg_off; 209 bool arg_signed; 210 char arg_bitshift; 211 }; 212 213 /* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */ 214 #define USDT_MAX_ARG_CNT 12 215 216 /* should match struct __bpf_usdt_spec from usdt.bpf.h */ 217 struct usdt_spec { 218 struct usdt_arg_spec args[USDT_MAX_ARG_CNT]; 219 __u64 usdt_cookie; 220 short arg_cnt; 221 }; 222 223 struct usdt_note { 224 const char *provider; 225 const char *name; 226 /* USDT args specification string, e.g.: 227 * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx" 228 */ 229 const char *args; 230 long loc_addr; 231 long base_addr; 232 long sema_addr; 233 }; 234 235 struct usdt_target { 236 long abs_ip; 237 long rel_ip; 238 long sema_off; 239 struct usdt_spec spec; 240 const char *spec_str; 241 }; 242 243 struct usdt_manager { 244 struct bpf_map *specs_map; 245 struct bpf_map *ip_to_spec_id_map; 246 247 int *free_spec_ids; 248 size_t free_spec_cnt; 249 size_t next_free_spec_id; 250 251 bool has_bpf_cookie; 252 bool has_sema_refcnt; 253 }; 254 255 struct usdt_manager *usdt_manager_new(struct bpf_object *obj) 256 { 257 static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"; 258 struct usdt_manager *man; 259 struct bpf_map *specs_map, *ip_to_spec_id_map; 260 261 specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs"); 262 ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id"); 263 if (!specs_map || !ip_to_spec_id_map) { 264 pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n"); 265 return ERR_PTR(-ESRCH); 266 } 267 268 man = calloc(1, sizeof(*man)); 269 if (!man) 270 return ERR_PTR(-ENOMEM); 271 272 man->specs_map = specs_map; 273 man->ip_to_spec_id_map = ip_to_spec_id_map; 274 275 /* Detect if BPF cookie is supported for kprobes. 276 * We don't need IP-to-ID mapping if we can use BPF cookies. 277 * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value") 278 */ 279 man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE); 280 281 /* Detect kernel support for automatic refcounting of USDT semaphore. 282 * If this is not supported, USDTs with semaphores will not be supported. 283 * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe") 284 */ 285 man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0; 286 287 return man; 288 } 289 290 void usdt_manager_free(struct usdt_manager *man) 291 { 292 if (IS_ERR_OR_NULL(man)) 293 return; 294 295 free(man->free_spec_ids); 296 free(man); 297 } 298 299 static int sanity_check_usdt_elf(Elf *elf, const char *path) 300 { 301 GElf_Ehdr ehdr; 302 int endianness; 303 304 if (elf_kind(elf) != ELF_K_ELF) { 305 pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path); 306 return -EBADF; 307 } 308 309 switch (gelf_getclass(elf)) { 310 case ELFCLASS64: 311 if (sizeof(void *) != 8) { 312 pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path); 313 return -EBADF; 314 } 315 break; 316 case ELFCLASS32: 317 if (sizeof(void *) != 4) { 318 pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path); 319 return -EBADF; 320 } 321 break; 322 default: 323 pr_warn("usdt: unsupported ELF class for '%s'\n", path); 324 return -EBADF; 325 } 326 327 if (!gelf_getehdr(elf, &ehdr)) 328 return -EINVAL; 329 330 if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) { 331 pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n", 332 path, ehdr.e_type); 333 return -EBADF; 334 } 335 336 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 337 endianness = ELFDATA2LSB; 338 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 339 endianness = ELFDATA2MSB; 340 #else 341 # error "Unrecognized __BYTE_ORDER__" 342 #endif 343 if (endianness != ehdr.e_ident[EI_DATA]) { 344 pr_warn("usdt: ELF endianness mismatch for '%s'\n", path); 345 return -EBADF; 346 } 347 348 return 0; 349 } 350 351 static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn) 352 { 353 Elf_Scn *sec = NULL; 354 size_t shstrndx; 355 356 if (elf_getshdrstrndx(elf, &shstrndx)) 357 return -EINVAL; 358 359 /* check if ELF is corrupted and avoid calling elf_strptr if yes */ 360 if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) 361 return -EINVAL; 362 363 while ((sec = elf_nextscn(elf, sec)) != NULL) { 364 char *name; 365 366 if (!gelf_getshdr(sec, shdr)) 367 return -EINVAL; 368 369 name = elf_strptr(elf, shstrndx, shdr->sh_name); 370 if (name && strcmp(sec_name, name) == 0) { 371 *scn = sec; 372 return 0; 373 } 374 } 375 376 return -ENOENT; 377 } 378 379 struct elf_seg { 380 long start; 381 long end; 382 long offset; 383 bool is_exec; 384 }; 385 386 static int cmp_elf_segs(const void *_a, const void *_b) 387 { 388 const struct elf_seg *a = _a; 389 const struct elf_seg *b = _b; 390 391 return a->start < b->start ? -1 : 1; 392 } 393 394 static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt) 395 { 396 GElf_Phdr phdr; 397 size_t n; 398 int i, err; 399 struct elf_seg *seg; 400 void *tmp; 401 402 *seg_cnt = 0; 403 404 if (elf_getphdrnum(elf, &n)) { 405 err = -errno; 406 return err; 407 } 408 409 for (i = 0; i < n; i++) { 410 if (!gelf_getphdr(elf, i, &phdr)) { 411 err = -errno; 412 return err; 413 } 414 415 pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n", 416 i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset, 417 (long)phdr.p_type, (long)phdr.p_flags); 418 if (phdr.p_type != PT_LOAD) 419 continue; 420 421 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); 422 if (!tmp) 423 return -ENOMEM; 424 425 *segs = tmp; 426 seg = *segs + *seg_cnt; 427 (*seg_cnt)++; 428 429 seg->start = phdr.p_vaddr; 430 seg->end = phdr.p_vaddr + phdr.p_memsz; 431 seg->offset = phdr.p_offset; 432 seg->is_exec = phdr.p_flags & PF_X; 433 } 434 435 if (*seg_cnt == 0) { 436 pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path); 437 return -ESRCH; 438 } 439 440 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); 441 return 0; 442 } 443 444 static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt) 445 { 446 char path[PATH_MAX], line[PATH_MAX], mode[16]; 447 size_t seg_start, seg_end, seg_off; 448 struct elf_seg *seg; 449 int tmp_pid, i, err; 450 FILE *f; 451 452 *seg_cnt = 0; 453 454 /* Handle containerized binaries only accessible from 455 * /proc/<pid>/root/<path>. They will be reported as just /<path> in 456 * /proc/<pid>/maps. 457 */ 458 if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid) 459 goto proceed; 460 461 if (!realpath(lib_path, path)) { 462 pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n", 463 lib_path, -errno); 464 libbpf_strlcpy(path, lib_path, sizeof(path)); 465 } 466 467 proceed: 468 sprintf(line, "/proc/%d/maps", pid); 469 f = fopen(line, "r"); 470 if (!f) { 471 err = -errno; 472 pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n", 473 line, lib_path, err); 474 return err; 475 } 476 477 /* We need to handle lines with no path at the end: 478 * 479 * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so 480 * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0 481 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so 482 */ 483 while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n", 484 &seg_start, &seg_end, mode, &seg_off, line) == 5) { 485 void *tmp; 486 487 /* to handle no path case (see above) we need to capture line 488 * without skipping any whitespaces. So we need to strip 489 * leading whitespaces manually here 490 */ 491 i = 0; 492 while (isblank(line[i])) 493 i++; 494 if (strcmp(line + i, path) != 0) 495 continue; 496 497 pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n", 498 path, seg_start, seg_end, mode, seg_off); 499 500 /* ignore non-executable sections for shared libs */ 501 if (mode[2] != 'x') 502 continue; 503 504 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); 505 if (!tmp) { 506 err = -ENOMEM; 507 goto err_out; 508 } 509 510 *segs = tmp; 511 seg = *segs + *seg_cnt; 512 *seg_cnt += 1; 513 514 seg->start = seg_start; 515 seg->end = seg_end; 516 seg->offset = seg_off; 517 seg->is_exec = true; 518 } 519 520 if (*seg_cnt == 0) { 521 pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n", 522 lib_path, path, pid); 523 err = -ESRCH; 524 goto err_out; 525 } 526 527 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); 528 err = 0; 529 err_out: 530 fclose(f); 531 return err; 532 } 533 534 static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr) 535 { 536 struct elf_seg *seg; 537 int i; 538 539 /* for ELF binaries (both executables and shared libraries), we are 540 * given virtual address (absolute for executables, relative for 541 * libraries) which should match address range of [seg_start, seg_end) 542 */ 543 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { 544 if (seg->start <= virtaddr && virtaddr < seg->end) 545 return seg; 546 } 547 return NULL; 548 } 549 550 static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset) 551 { 552 struct elf_seg *seg; 553 int i; 554 555 /* for VMA segments from /proc/<pid>/maps file, provided "address" is 556 * actually a file offset, so should be fall within logical 557 * offset-based range of [offset_start, offset_end) 558 */ 559 for (i = 0, seg = segs; i < seg_cnt; i++, seg++) { 560 if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start)) 561 return seg; 562 } 563 return NULL; 564 } 565 566 static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr, 567 const char *data, size_t name_off, size_t desc_off, 568 struct usdt_note *usdt_note); 569 570 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie); 571 572 static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid, 573 const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie, 574 struct usdt_target **out_targets, size_t *out_target_cnt) 575 { 576 size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0; 577 struct elf_seg *segs = NULL, *vma_segs = NULL; 578 struct usdt_target *targets = NULL, *target; 579 long base_addr = 0; 580 Elf_Scn *notes_scn, *base_scn; 581 GElf_Shdr base_shdr, notes_shdr; 582 GElf_Ehdr ehdr; 583 GElf_Nhdr nhdr; 584 Elf_Data *data; 585 int err; 586 587 *out_targets = NULL; 588 *out_target_cnt = 0; 589 590 err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, ¬es_shdr, ¬es_scn); 591 if (err) { 592 pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path); 593 return err; 594 } 595 596 if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) { 597 pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path); 598 return -EINVAL; 599 } 600 601 err = parse_elf_segs(elf, path, &segs, &seg_cnt); 602 if (err) { 603 pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err); 604 goto err_out; 605 } 606 607 /* .stapsdt.base ELF section is optional, but is used for prelink 608 * offset compensation (see a big comment further below) 609 */ 610 if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0) 611 base_addr = base_shdr.sh_addr; 612 613 data = elf_getdata(notes_scn, 0); 614 off = 0; 615 while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) { 616 long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0; 617 struct usdt_note note; 618 struct elf_seg *seg = NULL; 619 void *tmp; 620 621 err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, ¬e); 622 if (err) 623 goto err_out; 624 625 if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0) 626 continue; 627 628 /* We need to compensate "prelink effect". See [0] for details, 629 * relevant parts quoted here: 630 * 631 * Each SDT probe also expands into a non-allocated ELF note. You can 632 * find this by looking at SHT_NOTE sections and decoding the format; 633 * see below for details. Because the note is non-allocated, it means 634 * there is no runtime cost, and also preserved in both stripped files 635 * and .debug files. 636 * 637 * However, this means that prelink won't adjust the note's contents 638 * for address offsets. Instead, this is done via the .stapsdt.base 639 * section. This is a special section that is added to the text. We 640 * will only ever have one of these sections in a final link and it 641 * will only ever be one byte long. Nothing about this section itself 642 * matters, we just use it as a marker to detect prelink address 643 * adjustments. 644 * 645 * Each probe note records the link-time address of the .stapsdt.base 646 * section alongside the probe PC address. The decoder compares the 647 * base address stored in the note with the .stapsdt.base section's 648 * sh_addr. Initially these are the same, but the section header will 649 * be adjusted by prelink. So the decoder applies the difference to 650 * the probe PC address to get the correct prelinked PC address; the 651 * same adjustment is applied to the semaphore address, if any. 652 * 653 * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation 654 */ 655 usdt_abs_ip = note.loc_addr; 656 if (base_addr) 657 usdt_abs_ip += base_addr - note.base_addr; 658 659 /* When attaching uprobes (which is what USDTs basically are) 660 * kernel expects file offset to be specified, not a relative 661 * virtual address, so we need to translate virtual address to 662 * file offset, for both ET_EXEC and ET_DYN binaries. 663 */ 664 seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip); 665 if (!seg) { 666 err = -ESRCH; 667 pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n", 668 usdt_provider, usdt_name, path, usdt_abs_ip); 669 goto err_out; 670 } 671 if (!seg->is_exec) { 672 err = -ESRCH; 673 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n", 674 path, seg->start, seg->end, usdt_provider, usdt_name, 675 usdt_abs_ip); 676 goto err_out; 677 } 678 /* translate from virtual address to file offset */ 679 usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset; 680 681 if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) { 682 /* If we don't have BPF cookie support but need to 683 * attach to a shared library, we'll need to know and 684 * record absolute addresses of attach points due to 685 * the need to lookup USDT spec by absolute IP of 686 * triggered uprobe. Doing this resolution is only 687 * possible when we have a specific PID of the process 688 * that's using specified shared library. BPF cookie 689 * removes the absolute address limitation as we don't 690 * need to do this lookup (we just use BPF cookie as 691 * an index of USDT spec), so for newer kernels with 692 * BPF cookie support libbpf supports USDT attachment 693 * to shared libraries with no PID filter. 694 */ 695 if (pid < 0) { 696 pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n"); 697 err = -ENOTSUP; 698 goto err_out; 699 } 700 701 /* vma_segs are lazily initialized only if necessary */ 702 if (vma_seg_cnt == 0) { 703 err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt); 704 if (err) { 705 pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n", 706 pid, path, err); 707 goto err_out; 708 } 709 } 710 711 seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip); 712 if (!seg) { 713 err = -ESRCH; 714 pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n", 715 usdt_provider, usdt_name, path, usdt_rel_ip); 716 goto err_out; 717 } 718 719 usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip; 720 } 721 722 pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n", 723 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path, 724 note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args, 725 seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0); 726 727 /* Adjust semaphore address to be a file offset */ 728 if (note.sema_addr) { 729 if (!man->has_sema_refcnt) { 730 pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n", 731 usdt_provider, usdt_name, path); 732 err = -ENOTSUP; 733 goto err_out; 734 } 735 736 seg = find_elf_seg(segs, seg_cnt, note.sema_addr); 737 if (!seg) { 738 err = -ESRCH; 739 pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n", 740 usdt_provider, usdt_name, path, note.sema_addr); 741 goto err_out; 742 } 743 if (seg->is_exec) { 744 err = -ESRCH; 745 pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n", 746 path, seg->start, seg->end, usdt_provider, usdt_name, 747 note.sema_addr); 748 goto err_out; 749 } 750 751 usdt_sema_off = note.sema_addr - seg->start + seg->offset; 752 753 pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n", 754 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", 755 path, note.sema_addr, note.base_addr, usdt_sema_off, 756 seg->start, seg->end, seg->offset); 757 } 758 759 /* Record adjusted addresses and offsets and parse USDT spec */ 760 tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets)); 761 if (!tmp) { 762 err = -ENOMEM; 763 goto err_out; 764 } 765 targets = tmp; 766 767 target = &targets[target_cnt]; 768 memset(target, 0, sizeof(*target)); 769 770 target->abs_ip = usdt_abs_ip; 771 target->rel_ip = usdt_rel_ip; 772 target->sema_off = usdt_sema_off; 773 774 /* notes.args references strings from ELF itself, so they can 775 * be referenced safely until elf_end() call 776 */ 777 target->spec_str = note.args; 778 779 err = parse_usdt_spec(&target->spec, ¬e, usdt_cookie); 780 if (err) 781 goto err_out; 782 783 target_cnt++; 784 } 785 786 *out_targets = targets; 787 *out_target_cnt = target_cnt; 788 err = target_cnt; 789 790 err_out: 791 free(segs); 792 free(vma_segs); 793 if (err < 0) 794 free(targets); 795 return err; 796 } 797 798 struct bpf_link_usdt { 799 struct bpf_link link; 800 801 struct usdt_manager *usdt_man; 802 803 size_t spec_cnt; 804 int *spec_ids; 805 806 size_t uprobe_cnt; 807 struct { 808 long abs_ip; 809 struct bpf_link *link; 810 } *uprobes; 811 }; 812 813 static int bpf_link_usdt_detach(struct bpf_link *link) 814 { 815 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); 816 struct usdt_manager *man = usdt_link->usdt_man; 817 int i; 818 819 for (i = 0; i < usdt_link->uprobe_cnt; i++) { 820 /* detach underlying uprobe link */ 821 bpf_link__destroy(usdt_link->uprobes[i].link); 822 /* there is no need to update specs map because it will be 823 * unconditionally overwritten on subsequent USDT attaches, 824 * but if BPF cookies are not used we need to remove entry 825 * from ip_to_spec_id map, otherwise we'll run into false 826 * conflicting IP errors 827 */ 828 if (!man->has_bpf_cookie) { 829 /* not much we can do about errors here */ 830 (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map), 831 &usdt_link->uprobes[i].abs_ip); 832 } 833 } 834 835 /* try to return the list of previously used spec IDs to usdt_manager 836 * for future reuse for subsequent USDT attaches 837 */ 838 if (!man->free_spec_ids) { 839 /* if there were no free spec IDs yet, just transfer our IDs */ 840 man->free_spec_ids = usdt_link->spec_ids; 841 man->free_spec_cnt = usdt_link->spec_cnt; 842 usdt_link->spec_ids = NULL; 843 } else { 844 /* otherwise concat IDs */ 845 size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt; 846 int *new_free_ids; 847 848 new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt, 849 sizeof(*new_free_ids)); 850 /* If we couldn't resize free_spec_ids, we'll just leak 851 * a bunch of free IDs; this is very unlikely to happen and if 852 * system is so exhausted on memory, it's the least of user's 853 * concerns, probably. 854 * So just do our best here to return those IDs to usdt_manager. 855 */ 856 if (new_free_ids) { 857 memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids, 858 usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids)); 859 man->free_spec_ids = new_free_ids; 860 man->free_spec_cnt = new_cnt; 861 } 862 } 863 864 return 0; 865 } 866 867 static void bpf_link_usdt_dealloc(struct bpf_link *link) 868 { 869 struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link); 870 871 free(usdt_link->spec_ids); 872 free(usdt_link->uprobes); 873 free(usdt_link); 874 } 875 876 static size_t specs_hash_fn(long key, void *ctx) 877 { 878 return str_hash((char *)key); 879 } 880 881 static bool specs_equal_fn(long key1, long key2, void *ctx) 882 { 883 return strcmp((char *)key1, (char *)key2) == 0; 884 } 885 886 static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash, 887 struct bpf_link_usdt *link, struct usdt_target *target, 888 int *spec_id, bool *is_new) 889 { 890 long tmp; 891 void *new_ids; 892 int err; 893 894 /* check if we already allocated spec ID for this spec string */ 895 if (hashmap__find(specs_hash, target->spec_str, &tmp)) { 896 *spec_id = tmp; 897 *is_new = false; 898 return 0; 899 } 900 901 /* otherwise it's a new ID that needs to be set up in specs map and 902 * returned back to usdt_manager when USDT link is detached 903 */ 904 new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids)); 905 if (!new_ids) 906 return -ENOMEM; 907 link->spec_ids = new_ids; 908 909 /* get next free spec ID, giving preference to free list, if not empty */ 910 if (man->free_spec_cnt) { 911 *spec_id = man->free_spec_ids[man->free_spec_cnt - 1]; 912 913 /* cache spec ID for current spec string for future lookups */ 914 err = hashmap__add(specs_hash, target->spec_str, *spec_id); 915 if (err) 916 return err; 917 918 man->free_spec_cnt--; 919 } else { 920 /* don't allocate spec ID bigger than what fits in specs map */ 921 if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map)) 922 return -E2BIG; 923 924 *spec_id = man->next_free_spec_id; 925 926 /* cache spec ID for current spec string for future lookups */ 927 err = hashmap__add(specs_hash, target->spec_str, *spec_id); 928 if (err) 929 return err; 930 931 man->next_free_spec_id++; 932 } 933 934 /* remember new spec ID in the link for later return back to free list on detach */ 935 link->spec_ids[link->spec_cnt] = *spec_id; 936 link->spec_cnt++; 937 *is_new = true; 938 return 0; 939 } 940 941 struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog, 942 pid_t pid, const char *path, 943 const char *usdt_provider, const char *usdt_name, 944 __u64 usdt_cookie) 945 { 946 int i, fd, err, spec_map_fd, ip_map_fd; 947 LIBBPF_OPTS(bpf_uprobe_opts, opts); 948 struct hashmap *specs_hash = NULL; 949 struct bpf_link_usdt *link = NULL; 950 struct usdt_target *targets = NULL; 951 size_t target_cnt; 952 Elf *elf; 953 954 spec_map_fd = bpf_map__fd(man->specs_map); 955 ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map); 956 957 /* TODO: perform path resolution similar to uprobe's */ 958 fd = open(path, O_RDONLY); 959 if (fd < 0) { 960 err = -errno; 961 pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err); 962 return libbpf_err_ptr(err); 963 } 964 965 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); 966 if (!elf) { 967 err = -EBADF; 968 pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1)); 969 goto err_out; 970 } 971 972 err = sanity_check_usdt_elf(elf, path); 973 if (err) 974 goto err_out; 975 976 /* normalize PID filter */ 977 if (pid < 0) 978 pid = -1; 979 else if (pid == 0) 980 pid = getpid(); 981 982 /* discover USDT in given binary, optionally limiting 983 * activations to a given PID, if pid > 0 984 */ 985 err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name, 986 usdt_cookie, &targets, &target_cnt); 987 if (err <= 0) { 988 err = (err == 0) ? -ENOENT : err; 989 goto err_out; 990 } 991 992 specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL); 993 if (IS_ERR(specs_hash)) { 994 err = PTR_ERR(specs_hash); 995 goto err_out; 996 } 997 998 link = calloc(1, sizeof(*link)); 999 if (!link) { 1000 err = -ENOMEM; 1001 goto err_out; 1002 } 1003 1004 link->usdt_man = man; 1005 link->link.detach = &bpf_link_usdt_detach; 1006 link->link.dealloc = &bpf_link_usdt_dealloc; 1007 1008 link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); 1009 if (!link->uprobes) { 1010 err = -ENOMEM; 1011 goto err_out; 1012 } 1013 1014 for (i = 0; i < target_cnt; i++) { 1015 struct usdt_target *target = &targets[i]; 1016 struct bpf_link *uprobe_link; 1017 bool is_new; 1018 int spec_id; 1019 1020 /* Spec ID can be either reused or newly allocated. If it is 1021 * newly allocated, we'll need to fill out spec map, otherwise 1022 * entire spec should be valid and can be just used by a new 1023 * uprobe. We reuse spec when USDT arg spec is identical. We 1024 * also never share specs between two different USDT 1025 * attachments ("links"), so all the reused specs already 1026 * share USDT cookie value implicitly. 1027 */ 1028 err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new); 1029 if (err) 1030 goto err_out; 1031 1032 if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) { 1033 err = -errno; 1034 pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n", 1035 spec_id, usdt_provider, usdt_name, path, err); 1036 goto err_out; 1037 } 1038 if (!man->has_bpf_cookie && 1039 bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) { 1040 err = -errno; 1041 if (err == -EEXIST) { 1042 pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n", 1043 spec_id, usdt_provider, usdt_name, path); 1044 } else { 1045 pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n", 1046 target->abs_ip, spec_id, usdt_provider, usdt_name, 1047 path, err); 1048 } 1049 goto err_out; 1050 } 1051 1052 opts.ref_ctr_offset = target->sema_off; 1053 opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; 1054 uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, 1055 target->rel_ip, &opts); 1056 err = libbpf_get_error(uprobe_link); 1057 if (err) { 1058 pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", 1059 i, usdt_provider, usdt_name, path, err); 1060 goto err_out; 1061 } 1062 1063 link->uprobes[i].link = uprobe_link; 1064 link->uprobes[i].abs_ip = target->abs_ip; 1065 link->uprobe_cnt++; 1066 } 1067 1068 free(targets); 1069 hashmap__free(specs_hash); 1070 elf_end(elf); 1071 close(fd); 1072 1073 return &link->link; 1074 1075 err_out: 1076 if (link) 1077 bpf_link__destroy(&link->link); 1078 free(targets); 1079 hashmap__free(specs_hash); 1080 if (elf) 1081 elf_end(elf); 1082 close(fd); 1083 return libbpf_err_ptr(err); 1084 } 1085 1086 /* Parse out USDT ELF note from '.note.stapsdt' section. 1087 * Logic inspired by perf's code. 1088 */ 1089 static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr, 1090 const char *data, size_t name_off, size_t desc_off, 1091 struct usdt_note *note) 1092 { 1093 const char *provider, *name, *args; 1094 long addrs[3]; 1095 size_t len; 1096 1097 /* sanity check USDT note name and type first */ 1098 if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0) 1099 return -EINVAL; 1100 if (nhdr->n_type != USDT_NOTE_TYPE) 1101 return -EINVAL; 1102 1103 /* sanity check USDT note contents ("description" in ELF terminology) */ 1104 len = nhdr->n_descsz; 1105 data = data + desc_off; 1106 1107 /* +3 is the very minimum required to store three empty strings */ 1108 if (len < sizeof(addrs) + 3) 1109 return -EINVAL; 1110 1111 /* get location, base, and semaphore addrs */ 1112 memcpy(&addrs, data, sizeof(addrs)); 1113 1114 /* parse string fields: provider, name, args */ 1115 provider = data + sizeof(addrs); 1116 1117 name = (const char *)memchr(provider, '\0', data + len - provider); 1118 if (!name) /* non-zero-terminated provider */ 1119 return -EINVAL; 1120 name++; 1121 if (name >= data + len || *name == '\0') /* missing or empty name */ 1122 return -EINVAL; 1123 1124 args = memchr(name, '\0', data + len - name); 1125 if (!args) /* non-zero-terminated name */ 1126 return -EINVAL; 1127 ++args; 1128 if (args >= data + len) /* missing arguments spec */ 1129 return -EINVAL; 1130 1131 note->provider = provider; 1132 note->name = name; 1133 if (*args == '\0' || *args == ':') 1134 note->args = ""; 1135 else 1136 note->args = args; 1137 note->loc_addr = addrs[0]; 1138 note->base_addr = addrs[1]; 1139 note->sema_addr = addrs[2]; 1140 1141 return 0; 1142 } 1143 1144 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz); 1145 1146 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie) 1147 { 1148 struct usdt_arg_spec *arg; 1149 const char *s; 1150 int arg_sz, len; 1151 1152 spec->usdt_cookie = usdt_cookie; 1153 spec->arg_cnt = 0; 1154 1155 s = note->args; 1156 while (s[0]) { 1157 if (spec->arg_cnt >= USDT_MAX_ARG_CNT) { 1158 pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n", 1159 USDT_MAX_ARG_CNT, note->provider, note->name, note->args); 1160 return -E2BIG; 1161 } 1162 1163 arg = &spec->args[spec->arg_cnt]; 1164 len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz); 1165 if (len < 0) 1166 return len; 1167 1168 arg->arg_signed = arg_sz < 0; 1169 if (arg_sz < 0) 1170 arg_sz = -arg_sz; 1171 1172 switch (arg_sz) { 1173 case 1: case 2: case 4: case 8: 1174 arg->arg_bitshift = 64 - arg_sz * 8; 1175 break; 1176 default: 1177 pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n", 1178 spec->arg_cnt, s, arg_sz); 1179 return -EINVAL; 1180 } 1181 1182 s += len; 1183 spec->arg_cnt++; 1184 } 1185 1186 return 0; 1187 } 1188 1189 /* Architecture-specific logic for parsing USDT argument location specs */ 1190 1191 #if defined(__x86_64__) || defined(__i386__) 1192 1193 static int calc_pt_regs_off(const char *reg_name) 1194 { 1195 static struct { 1196 const char *names[4]; 1197 size_t pt_regs_off; 1198 } reg_map[] = { 1199 #ifdef __x86_64__ 1200 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64) 1201 #else 1202 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32) 1203 #endif 1204 { {"rip", "eip", "", ""}, reg_off(rip, eip) }, 1205 { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) }, 1206 { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) }, 1207 { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) }, 1208 { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) }, 1209 { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) }, 1210 { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) }, 1211 { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) }, 1212 { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) }, 1213 #undef reg_off 1214 #ifdef __x86_64__ 1215 { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) }, 1216 { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) }, 1217 { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) }, 1218 { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) }, 1219 { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) }, 1220 { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) }, 1221 { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) }, 1222 { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) }, 1223 #endif 1224 }; 1225 int i, j; 1226 1227 for (i = 0; i < ARRAY_SIZE(reg_map); i++) { 1228 for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) { 1229 if (strcmp(reg_name, reg_map[i].names[j]) == 0) 1230 return reg_map[i].pt_regs_off; 1231 } 1232 } 1233 1234 pr_warn("usdt: unrecognized register '%s'\n", reg_name); 1235 return -ENOENT; 1236 } 1237 1238 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1239 { 1240 char reg_name[16]; 1241 int len, reg_off; 1242 long off; 1243 1244 if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) { 1245 /* Memory dereference case, e.g., -4@-20(%rbp) */ 1246 arg->arg_type = USDT_ARG_REG_DEREF; 1247 arg->val_off = off; 1248 reg_off = calc_pt_regs_off(reg_name); 1249 if (reg_off < 0) 1250 return reg_off; 1251 arg->reg_off = reg_off; 1252 } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) { 1253 /* Memory dereference case without offset, e.g., 8@(%rsp) */ 1254 arg->arg_type = USDT_ARG_REG_DEREF; 1255 arg->val_off = 0; 1256 reg_off = calc_pt_regs_off(reg_name); 1257 if (reg_off < 0) 1258 return reg_off; 1259 arg->reg_off = reg_off; 1260 } else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) { 1261 /* Register read case, e.g., -4@%eax */ 1262 arg->arg_type = USDT_ARG_REG; 1263 arg->val_off = 0; 1264 1265 reg_off = calc_pt_regs_off(reg_name); 1266 if (reg_off < 0) 1267 return reg_off; 1268 arg->reg_off = reg_off; 1269 } else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) { 1270 /* Constant value case, e.g., 4@$71 */ 1271 arg->arg_type = USDT_ARG_CONST; 1272 arg->val_off = off; 1273 arg->reg_off = 0; 1274 } else { 1275 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); 1276 return -EINVAL; 1277 } 1278 1279 return len; 1280 } 1281 1282 #elif defined(__s390x__) 1283 1284 /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */ 1285 1286 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1287 { 1288 unsigned int reg; 1289 int len; 1290 long off; 1291 1292 if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, ®, &len) == 3) { 1293 /* Memory dereference case, e.g., -2@-28(%r15) */ 1294 arg->arg_type = USDT_ARG_REG_DEREF; 1295 arg->val_off = off; 1296 if (reg > 15) { 1297 pr_warn("usdt: unrecognized register '%%r%u'\n", reg); 1298 return -EINVAL; 1299 } 1300 arg->reg_off = offsetof(user_pt_regs, gprs[reg]); 1301 } else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, ®, &len) == 2) { 1302 /* Register read case, e.g., -8@%r0 */ 1303 arg->arg_type = USDT_ARG_REG; 1304 arg->val_off = 0; 1305 if (reg > 15) { 1306 pr_warn("usdt: unrecognized register '%%r%u'\n", reg); 1307 return -EINVAL; 1308 } 1309 arg->reg_off = offsetof(user_pt_regs, gprs[reg]); 1310 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { 1311 /* Constant value case, e.g., 4@71 */ 1312 arg->arg_type = USDT_ARG_CONST; 1313 arg->val_off = off; 1314 arg->reg_off = 0; 1315 } else { 1316 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); 1317 return -EINVAL; 1318 } 1319 1320 return len; 1321 } 1322 1323 #elif defined(__aarch64__) 1324 1325 static int calc_pt_regs_off(const char *reg_name) 1326 { 1327 int reg_num; 1328 1329 if (sscanf(reg_name, "x%d", ®_num) == 1) { 1330 if (reg_num >= 0 && reg_num < 31) 1331 return offsetof(struct user_pt_regs, regs[reg_num]); 1332 } else if (strcmp(reg_name, "sp") == 0) { 1333 return offsetof(struct user_pt_regs, sp); 1334 } 1335 pr_warn("usdt: unrecognized register '%s'\n", reg_name); 1336 return -ENOENT; 1337 } 1338 1339 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1340 { 1341 char reg_name[16]; 1342 int len, reg_off; 1343 long off; 1344 1345 if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) { 1346 /* Memory dereference case, e.g., -4@[sp, 96] */ 1347 arg->arg_type = USDT_ARG_REG_DEREF; 1348 arg->val_off = off; 1349 reg_off = calc_pt_regs_off(reg_name); 1350 if (reg_off < 0) 1351 return reg_off; 1352 arg->reg_off = reg_off; 1353 } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { 1354 /* Memory dereference case, e.g., -4@[sp] */ 1355 arg->arg_type = USDT_ARG_REG_DEREF; 1356 arg->val_off = 0; 1357 reg_off = calc_pt_regs_off(reg_name); 1358 if (reg_off < 0) 1359 return reg_off; 1360 arg->reg_off = reg_off; 1361 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { 1362 /* Constant value case, e.g., 4@5 */ 1363 arg->arg_type = USDT_ARG_CONST; 1364 arg->val_off = off; 1365 arg->reg_off = 0; 1366 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { 1367 /* Register read case, e.g., -8@x4 */ 1368 arg->arg_type = USDT_ARG_REG; 1369 arg->val_off = 0; 1370 reg_off = calc_pt_regs_off(reg_name); 1371 if (reg_off < 0) 1372 return reg_off; 1373 arg->reg_off = reg_off; 1374 } else { 1375 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); 1376 return -EINVAL; 1377 } 1378 1379 return len; 1380 } 1381 1382 #elif defined(__riscv) 1383 1384 static int calc_pt_regs_off(const char *reg_name) 1385 { 1386 static struct { 1387 const char *name; 1388 size_t pt_regs_off; 1389 } reg_map[] = { 1390 { "ra", offsetof(struct user_regs_struct, ra) }, 1391 { "sp", offsetof(struct user_regs_struct, sp) }, 1392 { "gp", offsetof(struct user_regs_struct, gp) }, 1393 { "tp", offsetof(struct user_regs_struct, tp) }, 1394 { "a0", offsetof(struct user_regs_struct, a0) }, 1395 { "a1", offsetof(struct user_regs_struct, a1) }, 1396 { "a2", offsetof(struct user_regs_struct, a2) }, 1397 { "a3", offsetof(struct user_regs_struct, a3) }, 1398 { "a4", offsetof(struct user_regs_struct, a4) }, 1399 { "a5", offsetof(struct user_regs_struct, a5) }, 1400 { "a6", offsetof(struct user_regs_struct, a6) }, 1401 { "a7", offsetof(struct user_regs_struct, a7) }, 1402 { "s0", offsetof(struct user_regs_struct, s0) }, 1403 { "s1", offsetof(struct user_regs_struct, s1) }, 1404 { "s2", offsetof(struct user_regs_struct, s2) }, 1405 { "s3", offsetof(struct user_regs_struct, s3) }, 1406 { "s4", offsetof(struct user_regs_struct, s4) }, 1407 { "s5", offsetof(struct user_regs_struct, s5) }, 1408 { "s6", offsetof(struct user_regs_struct, s6) }, 1409 { "s7", offsetof(struct user_regs_struct, s7) }, 1410 { "s8", offsetof(struct user_regs_struct, rv_s8) }, 1411 { "s9", offsetof(struct user_regs_struct, s9) }, 1412 { "s10", offsetof(struct user_regs_struct, s10) }, 1413 { "s11", offsetof(struct user_regs_struct, s11) }, 1414 { "t0", offsetof(struct user_regs_struct, t0) }, 1415 { "t1", offsetof(struct user_regs_struct, t1) }, 1416 { "t2", offsetof(struct user_regs_struct, t2) }, 1417 { "t3", offsetof(struct user_regs_struct, t3) }, 1418 { "t4", offsetof(struct user_regs_struct, t4) }, 1419 { "t5", offsetof(struct user_regs_struct, t5) }, 1420 { "t6", offsetof(struct user_regs_struct, t6) }, 1421 }; 1422 int i; 1423 1424 for (i = 0; i < ARRAY_SIZE(reg_map); i++) { 1425 if (strcmp(reg_name, reg_map[i].name) == 0) 1426 return reg_map[i].pt_regs_off; 1427 } 1428 1429 pr_warn("usdt: unrecognized register '%s'\n", reg_name); 1430 return -ENOENT; 1431 } 1432 1433 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1434 { 1435 char reg_name[16]; 1436 int len, reg_off; 1437 long off; 1438 1439 if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) { 1440 /* Memory dereference case, e.g., -8@-88(s0) */ 1441 arg->arg_type = USDT_ARG_REG_DEREF; 1442 arg->val_off = off; 1443 reg_off = calc_pt_regs_off(reg_name); 1444 if (reg_off < 0) 1445 return reg_off; 1446 arg->reg_off = reg_off; 1447 } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) { 1448 /* Constant value case, e.g., 4@5 */ 1449 arg->arg_type = USDT_ARG_CONST; 1450 arg->val_off = off; 1451 arg->reg_off = 0; 1452 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { 1453 /* Register read case, e.g., -8@a1 */ 1454 arg->arg_type = USDT_ARG_REG; 1455 arg->val_off = 0; 1456 reg_off = calc_pt_regs_off(reg_name); 1457 if (reg_off < 0) 1458 return reg_off; 1459 arg->reg_off = reg_off; 1460 } else { 1461 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); 1462 return -EINVAL; 1463 } 1464 1465 return len; 1466 } 1467 1468 #elif defined(__arm__) 1469 1470 static int calc_pt_regs_off(const char *reg_name) 1471 { 1472 static struct { 1473 const char *name; 1474 size_t pt_regs_off; 1475 } reg_map[] = { 1476 { "r0", offsetof(struct pt_regs, uregs[0]) }, 1477 { "r1", offsetof(struct pt_regs, uregs[1]) }, 1478 { "r2", offsetof(struct pt_regs, uregs[2]) }, 1479 { "r3", offsetof(struct pt_regs, uregs[3]) }, 1480 { "r4", offsetof(struct pt_regs, uregs[4]) }, 1481 { "r5", offsetof(struct pt_regs, uregs[5]) }, 1482 { "r6", offsetof(struct pt_regs, uregs[6]) }, 1483 { "r7", offsetof(struct pt_regs, uregs[7]) }, 1484 { "r8", offsetof(struct pt_regs, uregs[8]) }, 1485 { "r9", offsetof(struct pt_regs, uregs[9]) }, 1486 { "r10", offsetof(struct pt_regs, uregs[10]) }, 1487 { "fp", offsetof(struct pt_regs, uregs[11]) }, 1488 { "ip", offsetof(struct pt_regs, uregs[12]) }, 1489 { "sp", offsetof(struct pt_regs, uregs[13]) }, 1490 { "lr", offsetof(struct pt_regs, uregs[14]) }, 1491 { "pc", offsetof(struct pt_regs, uregs[15]) }, 1492 }; 1493 int i; 1494 1495 for (i = 0; i < ARRAY_SIZE(reg_map); i++) { 1496 if (strcmp(reg_name, reg_map[i].name) == 0) 1497 return reg_map[i].pt_regs_off; 1498 } 1499 1500 pr_warn("usdt: unrecognized register '%s'\n", reg_name); 1501 return -ENOENT; 1502 } 1503 1504 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1505 { 1506 char reg_name[16]; 1507 int len, reg_off; 1508 long off; 1509 1510 if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n", 1511 arg_sz, reg_name, &off, &len) == 3) { 1512 /* Memory dereference case, e.g., -4@[fp, #96] */ 1513 arg->arg_type = USDT_ARG_REG_DEREF; 1514 arg->val_off = off; 1515 reg_off = calc_pt_regs_off(reg_name); 1516 if (reg_off < 0) 1517 return reg_off; 1518 arg->reg_off = reg_off; 1519 } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) { 1520 /* Memory dereference case, e.g., -4@[sp] */ 1521 arg->arg_type = USDT_ARG_REG_DEREF; 1522 arg->val_off = 0; 1523 reg_off = calc_pt_regs_off(reg_name); 1524 if (reg_off < 0) 1525 return reg_off; 1526 arg->reg_off = reg_off; 1527 } else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) { 1528 /* Constant value case, e.g., 4@#5 */ 1529 arg->arg_type = USDT_ARG_CONST; 1530 arg->val_off = off; 1531 arg->reg_off = 0; 1532 } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) { 1533 /* Register read case, e.g., -8@r4 */ 1534 arg->arg_type = USDT_ARG_REG; 1535 arg->val_off = 0; 1536 reg_off = calc_pt_regs_off(reg_name); 1537 if (reg_off < 0) 1538 return reg_off; 1539 arg->reg_off = reg_off; 1540 } else { 1541 pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str); 1542 return -EINVAL; 1543 } 1544 1545 return len; 1546 } 1547 1548 #else 1549 1550 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1551 { 1552 pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n"); 1553 return -ENOTSUP; 1554 } 1555 1556 #endif 1557