xref: /openbmc/linux/tools/lib/bpf/libbpf.c (revision a17922de)
1 // SPDX-License-Identifier: LGPL-2.1
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation;
14  * version 2.1 of the License (not later!)
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with this program; if not,  see <http://www.gnu.org/licenses>
23  */
24 
25 #define _GNU_SOURCE
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdarg.h>
29 #include <libgen.h>
30 #include <inttypes.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <perf-sys.h>
36 #include <asm/unistd.h>
37 #include <linux/err.h>
38 #include <linux/kernel.h>
39 #include <linux/bpf.h>
40 #include <linux/list.h>
41 #include <linux/limits.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <sys/vfs.h>
45 #include <tools/libc_compat.h>
46 #include <libelf.h>
47 #include <gelf.h>
48 
49 #include "libbpf.h"
50 #include "bpf.h"
51 #include "btf.h"
52 
53 #ifndef EM_BPF
54 #define EM_BPF 247
55 #endif
56 
57 #ifndef BPF_FS_MAGIC
58 #define BPF_FS_MAGIC		0xcafe4a11
59 #endif
60 
61 #define __printf(a, b)	__attribute__((format(printf, a, b)))
62 
63 __printf(1, 2)
64 static int __base_pr(const char *format, ...)
65 {
66 	va_list args;
67 	int err;
68 
69 	va_start(args, format);
70 	err = vfprintf(stderr, format, args);
71 	va_end(args);
72 	return err;
73 }
74 
75 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
76 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
77 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
78 
79 #define __pr(func, fmt, ...)	\
80 do {				\
81 	if ((func))		\
82 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
83 } while (0)
84 
85 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
86 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
87 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
88 
89 void libbpf_set_print(libbpf_print_fn_t warn,
90 		      libbpf_print_fn_t info,
91 		      libbpf_print_fn_t debug)
92 {
93 	__pr_warning = warn;
94 	__pr_info = info;
95 	__pr_debug = debug;
96 }
97 
98 #define STRERR_BUFSIZE  128
99 
100 #define CHECK_ERR(action, err, out) do {	\
101 	err = action;			\
102 	if (err)			\
103 		goto out;		\
104 } while(0)
105 
106 
107 /* Copied from tools/perf/util/util.h */
108 #ifndef zfree
109 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
110 #endif
111 
112 #ifndef zclose
113 # define zclose(fd) ({			\
114 	int ___err = 0;			\
115 	if ((fd) >= 0)			\
116 		___err = close((fd));	\
117 	fd = -1;			\
118 	___err; })
119 #endif
120 
121 #ifdef HAVE_LIBELF_MMAP_SUPPORT
122 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
123 #else
124 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
125 #endif
126 
127 /*
128  * bpf_prog should be a better name but it has been used in
129  * linux/filter.h.
130  */
131 struct bpf_program {
132 	/* Index in elf obj file, for relocation use. */
133 	int idx;
134 	char *name;
135 	int prog_ifindex;
136 	char *section_name;
137 	struct bpf_insn *insns;
138 	size_t insns_cnt, main_prog_cnt;
139 	enum bpf_prog_type type;
140 
141 	struct reloc_desc {
142 		enum {
143 			RELO_LD64,
144 			RELO_CALL,
145 		} type;
146 		int insn_idx;
147 		union {
148 			int map_idx;
149 			int text_off;
150 		};
151 	} *reloc_desc;
152 	int nr_reloc;
153 
154 	struct {
155 		int nr;
156 		int *fds;
157 	} instances;
158 	bpf_program_prep_t preprocessor;
159 
160 	struct bpf_object *obj;
161 	void *priv;
162 	bpf_program_clear_priv_t clear_priv;
163 
164 	enum bpf_attach_type expected_attach_type;
165 };
166 
167 struct bpf_map {
168 	int fd;
169 	char *name;
170 	size_t offset;
171 	int map_ifindex;
172 	struct bpf_map_def def;
173 	uint32_t btf_key_type_id;
174 	uint32_t btf_value_type_id;
175 	void *priv;
176 	bpf_map_clear_priv_t clear_priv;
177 };
178 
179 static LIST_HEAD(bpf_objects_list);
180 
181 struct bpf_object {
182 	char license[64];
183 	u32 kern_version;
184 
185 	struct bpf_program *programs;
186 	size_t nr_programs;
187 	struct bpf_map *maps;
188 	size_t nr_maps;
189 
190 	bool loaded;
191 	bool has_pseudo_calls;
192 
193 	/*
194 	 * Information when doing elf related work. Only valid if fd
195 	 * is valid.
196 	 */
197 	struct {
198 		int fd;
199 		void *obj_buf;
200 		size_t obj_buf_sz;
201 		Elf *elf;
202 		GElf_Ehdr ehdr;
203 		Elf_Data *symbols;
204 		size_t strtabidx;
205 		struct {
206 			GElf_Shdr shdr;
207 			Elf_Data *data;
208 		} *reloc;
209 		int nr_reloc;
210 		int maps_shndx;
211 		int text_shndx;
212 	} efile;
213 	/*
214 	 * All loaded bpf_object is linked in a list, which is
215 	 * hidden to caller. bpf_objects__<func> handlers deal with
216 	 * all objects.
217 	 */
218 	struct list_head list;
219 
220 	struct btf *btf;
221 
222 	void *priv;
223 	bpf_object_clear_priv_t clear_priv;
224 
225 	char path[];
226 };
227 #define obj_elf_valid(o)	((o)->efile.elf)
228 
229 static void bpf_program__unload(struct bpf_program *prog)
230 {
231 	int i;
232 
233 	if (!prog)
234 		return;
235 
236 	/*
237 	 * If the object is opened but the program was never loaded,
238 	 * it is possible that prog->instances.nr == -1.
239 	 */
240 	if (prog->instances.nr > 0) {
241 		for (i = 0; i < prog->instances.nr; i++)
242 			zclose(prog->instances.fds[i]);
243 	} else if (prog->instances.nr != -1) {
244 		pr_warning("Internal error: instances.nr is %d\n",
245 			   prog->instances.nr);
246 	}
247 
248 	prog->instances.nr = -1;
249 	zfree(&prog->instances.fds);
250 }
251 
252 static void bpf_program__exit(struct bpf_program *prog)
253 {
254 	if (!prog)
255 		return;
256 
257 	if (prog->clear_priv)
258 		prog->clear_priv(prog, prog->priv);
259 
260 	prog->priv = NULL;
261 	prog->clear_priv = NULL;
262 
263 	bpf_program__unload(prog);
264 	zfree(&prog->name);
265 	zfree(&prog->section_name);
266 	zfree(&prog->insns);
267 	zfree(&prog->reloc_desc);
268 
269 	prog->nr_reloc = 0;
270 	prog->insns_cnt = 0;
271 	prog->idx = -1;
272 }
273 
274 static int
275 bpf_program__init(void *data, size_t size, char *section_name, int idx,
276 		  struct bpf_program *prog)
277 {
278 	if (size < sizeof(struct bpf_insn)) {
279 		pr_warning("corrupted section '%s'\n", section_name);
280 		return -EINVAL;
281 	}
282 
283 	bzero(prog, sizeof(*prog));
284 
285 	prog->section_name = strdup(section_name);
286 	if (!prog->section_name) {
287 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
288 			   idx, section_name);
289 		goto errout;
290 	}
291 
292 	prog->insns = malloc(size);
293 	if (!prog->insns) {
294 		pr_warning("failed to alloc insns for prog under section %s\n",
295 			   section_name);
296 		goto errout;
297 	}
298 	prog->insns_cnt = size / sizeof(struct bpf_insn);
299 	memcpy(prog->insns, data,
300 	       prog->insns_cnt * sizeof(struct bpf_insn));
301 	prog->idx = idx;
302 	prog->instances.fds = NULL;
303 	prog->instances.nr = -1;
304 	prog->type = BPF_PROG_TYPE_KPROBE;
305 
306 	return 0;
307 errout:
308 	bpf_program__exit(prog);
309 	return -ENOMEM;
310 }
311 
312 static int
313 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
314 			char *section_name, int idx)
315 {
316 	struct bpf_program prog, *progs;
317 	int nr_progs, err;
318 
319 	err = bpf_program__init(data, size, section_name, idx, &prog);
320 	if (err)
321 		return err;
322 
323 	progs = obj->programs;
324 	nr_progs = obj->nr_programs;
325 
326 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
327 	if (!progs) {
328 		/*
329 		 * In this case the original obj->programs
330 		 * is still valid, so don't need special treat for
331 		 * bpf_close_object().
332 		 */
333 		pr_warning("failed to alloc a new program under section '%s'\n",
334 			   section_name);
335 		bpf_program__exit(&prog);
336 		return -ENOMEM;
337 	}
338 
339 	pr_debug("found program %s\n", prog.section_name);
340 	obj->programs = progs;
341 	obj->nr_programs = nr_progs + 1;
342 	prog.obj = obj;
343 	progs[nr_progs] = prog;
344 	return 0;
345 }
346 
347 static int
348 bpf_object__init_prog_names(struct bpf_object *obj)
349 {
350 	Elf_Data *symbols = obj->efile.symbols;
351 	struct bpf_program *prog;
352 	size_t pi, si;
353 
354 	for (pi = 0; pi < obj->nr_programs; pi++) {
355 		const char *name = NULL;
356 
357 		prog = &obj->programs[pi];
358 
359 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
360 		     si++) {
361 			GElf_Sym sym;
362 
363 			if (!gelf_getsym(symbols, si, &sym))
364 				continue;
365 			if (sym.st_shndx != prog->idx)
366 				continue;
367 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
368 				continue;
369 
370 			name = elf_strptr(obj->efile.elf,
371 					  obj->efile.strtabidx,
372 					  sym.st_name);
373 			if (!name) {
374 				pr_warning("failed to get sym name string for prog %s\n",
375 					   prog->section_name);
376 				return -LIBBPF_ERRNO__LIBELF;
377 			}
378 		}
379 
380 		if (!name && prog->idx == obj->efile.text_shndx)
381 			name = ".text";
382 
383 		if (!name) {
384 			pr_warning("failed to find sym for prog %s\n",
385 				   prog->section_name);
386 			return -EINVAL;
387 		}
388 
389 		prog->name = strdup(name);
390 		if (!prog->name) {
391 			pr_warning("failed to allocate memory for prog sym %s\n",
392 				   name);
393 			return -ENOMEM;
394 		}
395 	}
396 
397 	return 0;
398 }
399 
400 static struct bpf_object *bpf_object__new(const char *path,
401 					  void *obj_buf,
402 					  size_t obj_buf_sz)
403 {
404 	struct bpf_object *obj;
405 
406 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
407 	if (!obj) {
408 		pr_warning("alloc memory failed for %s\n", path);
409 		return ERR_PTR(-ENOMEM);
410 	}
411 
412 	strcpy(obj->path, path);
413 	obj->efile.fd = -1;
414 
415 	/*
416 	 * Caller of this function should also calls
417 	 * bpf_object__elf_finish() after data collection to return
418 	 * obj_buf to user. If not, we should duplicate the buffer to
419 	 * avoid user freeing them before elf finish.
420 	 */
421 	obj->efile.obj_buf = obj_buf;
422 	obj->efile.obj_buf_sz = obj_buf_sz;
423 	obj->efile.maps_shndx = -1;
424 
425 	obj->loaded = false;
426 
427 	INIT_LIST_HEAD(&obj->list);
428 	list_add(&obj->list, &bpf_objects_list);
429 	return obj;
430 }
431 
432 static void bpf_object__elf_finish(struct bpf_object *obj)
433 {
434 	if (!obj_elf_valid(obj))
435 		return;
436 
437 	if (obj->efile.elf) {
438 		elf_end(obj->efile.elf);
439 		obj->efile.elf = NULL;
440 	}
441 	obj->efile.symbols = NULL;
442 
443 	zfree(&obj->efile.reloc);
444 	obj->efile.nr_reloc = 0;
445 	zclose(obj->efile.fd);
446 	obj->efile.obj_buf = NULL;
447 	obj->efile.obj_buf_sz = 0;
448 }
449 
450 static int bpf_object__elf_init(struct bpf_object *obj)
451 {
452 	int err = 0;
453 	GElf_Ehdr *ep;
454 
455 	if (obj_elf_valid(obj)) {
456 		pr_warning("elf init: internal error\n");
457 		return -LIBBPF_ERRNO__LIBELF;
458 	}
459 
460 	if (obj->efile.obj_buf_sz > 0) {
461 		/*
462 		 * obj_buf should have been validated by
463 		 * bpf_object__open_buffer().
464 		 */
465 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
466 					    obj->efile.obj_buf_sz);
467 	} else {
468 		obj->efile.fd = open(obj->path, O_RDONLY);
469 		if (obj->efile.fd < 0) {
470 			pr_warning("failed to open %s: %s\n", obj->path,
471 					strerror(errno));
472 			return -errno;
473 		}
474 
475 		obj->efile.elf = elf_begin(obj->efile.fd,
476 				LIBBPF_ELF_C_READ_MMAP,
477 				NULL);
478 	}
479 
480 	if (!obj->efile.elf) {
481 		pr_warning("failed to open %s as ELF file\n",
482 				obj->path);
483 		err = -LIBBPF_ERRNO__LIBELF;
484 		goto errout;
485 	}
486 
487 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
488 		pr_warning("failed to get EHDR from %s\n",
489 				obj->path);
490 		err = -LIBBPF_ERRNO__FORMAT;
491 		goto errout;
492 	}
493 	ep = &obj->efile.ehdr;
494 
495 	/* Old LLVM set e_machine to EM_NONE */
496 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
497 		pr_warning("%s is not an eBPF object file\n",
498 			obj->path);
499 		err = -LIBBPF_ERRNO__FORMAT;
500 		goto errout;
501 	}
502 
503 	return 0;
504 errout:
505 	bpf_object__elf_finish(obj);
506 	return err;
507 }
508 
509 static int
510 bpf_object__check_endianness(struct bpf_object *obj)
511 {
512 	static unsigned int const endian = 1;
513 
514 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
515 	case ELFDATA2LSB:
516 		/* We are big endian, BPF obj is little endian. */
517 		if (*(unsigned char const *)&endian != 1)
518 			goto mismatch;
519 		break;
520 
521 	case ELFDATA2MSB:
522 		/* We are little endian, BPF obj is big endian. */
523 		if (*(unsigned char const *)&endian != 0)
524 			goto mismatch;
525 		break;
526 	default:
527 		return -LIBBPF_ERRNO__ENDIAN;
528 	}
529 
530 	return 0;
531 
532 mismatch:
533 	pr_warning("Error: endianness mismatch.\n");
534 	return -LIBBPF_ERRNO__ENDIAN;
535 }
536 
537 static int
538 bpf_object__init_license(struct bpf_object *obj,
539 			 void *data, size_t size)
540 {
541 	memcpy(obj->license, data,
542 	       min(size, sizeof(obj->license) - 1));
543 	pr_debug("license of %s is %s\n", obj->path, obj->license);
544 	return 0;
545 }
546 
547 static int
548 bpf_object__init_kversion(struct bpf_object *obj,
549 			  void *data, size_t size)
550 {
551 	u32 kver;
552 
553 	if (size != sizeof(kver)) {
554 		pr_warning("invalid kver section in %s\n", obj->path);
555 		return -LIBBPF_ERRNO__FORMAT;
556 	}
557 	memcpy(&kver, data, sizeof(kver));
558 	obj->kern_version = kver;
559 	pr_debug("kernel version of %s is %x\n", obj->path,
560 		 obj->kern_version);
561 	return 0;
562 }
563 
564 static int compare_bpf_map(const void *_a, const void *_b)
565 {
566 	const struct bpf_map *a = _a;
567 	const struct bpf_map *b = _b;
568 
569 	return a->offset - b->offset;
570 }
571 
572 static int
573 bpf_object__init_maps(struct bpf_object *obj)
574 {
575 	int i, map_idx, map_def_sz, nr_maps = 0;
576 	Elf_Scn *scn;
577 	Elf_Data *data;
578 	Elf_Data *symbols = obj->efile.symbols;
579 
580 	if (obj->efile.maps_shndx < 0)
581 		return -EINVAL;
582 	if (!symbols)
583 		return -EINVAL;
584 
585 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
586 	if (scn)
587 		data = elf_getdata(scn, NULL);
588 	if (!scn || !data) {
589 		pr_warning("failed to get Elf_Data from map section %d\n",
590 			   obj->efile.maps_shndx);
591 		return -EINVAL;
592 	}
593 
594 	/*
595 	 * Count number of maps. Each map has a name.
596 	 * Array of maps is not supported: only the first element is
597 	 * considered.
598 	 *
599 	 * TODO: Detect array of map and report error.
600 	 */
601 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
602 		GElf_Sym sym;
603 
604 		if (!gelf_getsym(symbols, i, &sym))
605 			continue;
606 		if (sym.st_shndx != obj->efile.maps_shndx)
607 			continue;
608 		nr_maps++;
609 	}
610 
611 	/* Alloc obj->maps and fill nr_maps. */
612 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
613 		 nr_maps, data->d_size);
614 
615 	if (!nr_maps)
616 		return 0;
617 
618 	/* Assume equally sized map definitions */
619 	map_def_sz = data->d_size / nr_maps;
620 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
621 		pr_warning("unable to determine map definition size "
622 			   "section %s, %d maps in %zd bytes\n",
623 			   obj->path, nr_maps, data->d_size);
624 		return -EINVAL;
625 	}
626 
627 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
628 	if (!obj->maps) {
629 		pr_warning("alloc maps for object failed\n");
630 		return -ENOMEM;
631 	}
632 	obj->nr_maps = nr_maps;
633 
634 	/*
635 	 * fill all fd with -1 so won't close incorrect
636 	 * fd (fd=0 is stdin) when failure (zclose won't close
637 	 * negative fd)).
638 	 */
639 	for (i = 0; i < nr_maps; i++)
640 		obj->maps[i].fd = -1;
641 
642 	/*
643 	 * Fill obj->maps using data in "maps" section.
644 	 */
645 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
646 		GElf_Sym sym;
647 		const char *map_name;
648 		struct bpf_map_def *def;
649 
650 		if (!gelf_getsym(symbols, i, &sym))
651 			continue;
652 		if (sym.st_shndx != obj->efile.maps_shndx)
653 			continue;
654 
655 		map_name = elf_strptr(obj->efile.elf,
656 				      obj->efile.strtabidx,
657 				      sym.st_name);
658 		obj->maps[map_idx].offset = sym.st_value;
659 		if (sym.st_value + map_def_sz > data->d_size) {
660 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
661 				   obj->path, map_name);
662 			return -EINVAL;
663 		}
664 
665 		obj->maps[map_idx].name = strdup(map_name);
666 		if (!obj->maps[map_idx].name) {
667 			pr_warning("failed to alloc map name\n");
668 			return -ENOMEM;
669 		}
670 		pr_debug("map %d is \"%s\"\n", map_idx,
671 			 obj->maps[map_idx].name);
672 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
673 		/*
674 		 * If the definition of the map in the object file fits in
675 		 * bpf_map_def, copy it.  Any extra fields in our version
676 		 * of bpf_map_def will default to zero as a result of the
677 		 * calloc above.
678 		 */
679 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
680 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
681 		} else {
682 			/*
683 			 * Here the map structure being read is bigger than what
684 			 * we expect, truncate if the excess bits are all zero.
685 			 * If they are not zero, reject this map as
686 			 * incompatible.
687 			 */
688 			char *b;
689 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
690 			     b < ((char *)def) + map_def_sz; b++) {
691 				if (*b != 0) {
692 					pr_warning("maps section in %s: \"%s\" "
693 						   "has unrecognized, non-zero "
694 						   "options\n",
695 						   obj->path, map_name);
696 					return -EINVAL;
697 				}
698 			}
699 			memcpy(&obj->maps[map_idx].def, def,
700 			       sizeof(struct bpf_map_def));
701 		}
702 		map_idx++;
703 	}
704 
705 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
706 	return 0;
707 }
708 
709 static bool section_have_execinstr(struct bpf_object *obj, int idx)
710 {
711 	Elf_Scn *scn;
712 	GElf_Shdr sh;
713 
714 	scn = elf_getscn(obj->efile.elf, idx);
715 	if (!scn)
716 		return false;
717 
718 	if (gelf_getshdr(scn, &sh) != &sh)
719 		return false;
720 
721 	if (sh.sh_flags & SHF_EXECINSTR)
722 		return true;
723 
724 	return false;
725 }
726 
727 static int bpf_object__elf_collect(struct bpf_object *obj)
728 {
729 	Elf *elf = obj->efile.elf;
730 	GElf_Ehdr *ep = &obj->efile.ehdr;
731 	Elf_Scn *scn = NULL;
732 	int idx = 0, err = 0;
733 
734 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
735 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
736 		pr_warning("failed to get e_shstrndx from %s\n",
737 			   obj->path);
738 		return -LIBBPF_ERRNO__FORMAT;
739 	}
740 
741 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
742 		char *name;
743 		GElf_Shdr sh;
744 		Elf_Data *data;
745 
746 		idx++;
747 		if (gelf_getshdr(scn, &sh) != &sh) {
748 			pr_warning("failed to get section(%d) header from %s\n",
749 				   idx, obj->path);
750 			err = -LIBBPF_ERRNO__FORMAT;
751 			goto out;
752 		}
753 
754 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
755 		if (!name) {
756 			pr_warning("failed to get section(%d) name from %s\n",
757 				   idx, obj->path);
758 			err = -LIBBPF_ERRNO__FORMAT;
759 			goto out;
760 		}
761 
762 		data = elf_getdata(scn, 0);
763 		if (!data) {
764 			pr_warning("failed to get section(%d) data from %s(%s)\n",
765 				   idx, name, obj->path);
766 			err = -LIBBPF_ERRNO__FORMAT;
767 			goto out;
768 		}
769 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
770 			 idx, name, (unsigned long)data->d_size,
771 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
772 			 (int)sh.sh_type);
773 
774 		if (strcmp(name, "license") == 0)
775 			err = bpf_object__init_license(obj,
776 						       data->d_buf,
777 						       data->d_size);
778 		else if (strcmp(name, "version") == 0)
779 			err = bpf_object__init_kversion(obj,
780 							data->d_buf,
781 							data->d_size);
782 		else if (strcmp(name, "maps") == 0)
783 			obj->efile.maps_shndx = idx;
784 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
785 			obj->btf = btf__new(data->d_buf, data->d_size,
786 					    __pr_debug);
787 			if (IS_ERR(obj->btf)) {
788 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
789 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
790 				obj->btf = NULL;
791 			}
792 		} else if (sh.sh_type == SHT_SYMTAB) {
793 			if (obj->efile.symbols) {
794 				pr_warning("bpf: multiple SYMTAB in %s\n",
795 					   obj->path);
796 				err = -LIBBPF_ERRNO__FORMAT;
797 			} else {
798 				obj->efile.symbols = data;
799 				obj->efile.strtabidx = sh.sh_link;
800 			}
801 		} else if ((sh.sh_type == SHT_PROGBITS) &&
802 			   (sh.sh_flags & SHF_EXECINSTR) &&
803 			   (data->d_size > 0)) {
804 			if (strcmp(name, ".text") == 0)
805 				obj->efile.text_shndx = idx;
806 			err = bpf_object__add_program(obj, data->d_buf,
807 						      data->d_size, name, idx);
808 			if (err) {
809 				char errmsg[STRERR_BUFSIZE];
810 
811 				strerror_r(-err, errmsg, sizeof(errmsg));
812 				pr_warning("failed to alloc program %s (%s): %s",
813 					   name, obj->path, errmsg);
814 			}
815 		} else if (sh.sh_type == SHT_REL) {
816 			void *reloc = obj->efile.reloc;
817 			int nr_reloc = obj->efile.nr_reloc + 1;
818 			int sec = sh.sh_info; /* points to other section */
819 
820 			/* Only do relo for section with exec instructions */
821 			if (!section_have_execinstr(obj, sec)) {
822 				pr_debug("skip relo %s(%d) for section(%d)\n",
823 					 name, idx, sec);
824 				continue;
825 			}
826 
827 			reloc = reallocarray(reloc, nr_reloc,
828 					     sizeof(*obj->efile.reloc));
829 			if (!reloc) {
830 				pr_warning("realloc failed\n");
831 				err = -ENOMEM;
832 			} else {
833 				int n = nr_reloc - 1;
834 
835 				obj->efile.reloc = reloc;
836 				obj->efile.nr_reloc = nr_reloc;
837 
838 				obj->efile.reloc[n].shdr = sh;
839 				obj->efile.reloc[n].data = data;
840 			}
841 		} else {
842 			pr_debug("skip section(%d) %s\n", idx, name);
843 		}
844 		if (err)
845 			goto out;
846 	}
847 
848 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
849 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
850 		return LIBBPF_ERRNO__FORMAT;
851 	}
852 	if (obj->efile.maps_shndx >= 0) {
853 		err = bpf_object__init_maps(obj);
854 		if (err)
855 			goto out;
856 	}
857 	err = bpf_object__init_prog_names(obj);
858 out:
859 	return err;
860 }
861 
862 static struct bpf_program *
863 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
864 {
865 	struct bpf_program *prog;
866 	size_t i;
867 
868 	for (i = 0; i < obj->nr_programs; i++) {
869 		prog = &obj->programs[i];
870 		if (prog->idx == idx)
871 			return prog;
872 	}
873 	return NULL;
874 }
875 
876 static int
877 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
878 			   Elf_Data *data, struct bpf_object *obj)
879 {
880 	Elf_Data *symbols = obj->efile.symbols;
881 	int text_shndx = obj->efile.text_shndx;
882 	int maps_shndx = obj->efile.maps_shndx;
883 	struct bpf_map *maps = obj->maps;
884 	size_t nr_maps = obj->nr_maps;
885 	int i, nrels;
886 
887 	pr_debug("collecting relocating info for: '%s'\n",
888 		 prog->section_name);
889 	nrels = shdr->sh_size / shdr->sh_entsize;
890 
891 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
892 	if (!prog->reloc_desc) {
893 		pr_warning("failed to alloc memory in relocation\n");
894 		return -ENOMEM;
895 	}
896 	prog->nr_reloc = nrels;
897 
898 	for (i = 0; i < nrels; i++) {
899 		GElf_Sym sym;
900 		GElf_Rel rel;
901 		unsigned int insn_idx;
902 		struct bpf_insn *insns = prog->insns;
903 		size_t map_idx;
904 
905 		if (!gelf_getrel(data, i, &rel)) {
906 			pr_warning("relocation: failed to get %d reloc\n", i);
907 			return -LIBBPF_ERRNO__FORMAT;
908 		}
909 
910 		if (!gelf_getsym(symbols,
911 				 GELF_R_SYM(rel.r_info),
912 				 &sym)) {
913 			pr_warning("relocation: symbol %"PRIx64" not found\n",
914 				   GELF_R_SYM(rel.r_info));
915 			return -LIBBPF_ERRNO__FORMAT;
916 		}
917 		pr_debug("relo for %lld value %lld name %d\n",
918 			 (long long) (rel.r_info >> 32),
919 			 (long long) sym.st_value, sym.st_name);
920 
921 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
922 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
923 				   prog->section_name, sym.st_shndx);
924 			return -LIBBPF_ERRNO__RELOC;
925 		}
926 
927 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
928 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
929 
930 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
931 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
932 				pr_warning("incorrect bpf_call opcode\n");
933 				return -LIBBPF_ERRNO__RELOC;
934 			}
935 			prog->reloc_desc[i].type = RELO_CALL;
936 			prog->reloc_desc[i].insn_idx = insn_idx;
937 			prog->reloc_desc[i].text_off = sym.st_value;
938 			obj->has_pseudo_calls = true;
939 			continue;
940 		}
941 
942 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
943 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
944 				   insn_idx, insns[insn_idx].code);
945 			return -LIBBPF_ERRNO__RELOC;
946 		}
947 
948 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
949 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
950 			if (maps[map_idx].offset == sym.st_value) {
951 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
952 					 map_idx, maps[map_idx].name, insn_idx);
953 				break;
954 			}
955 		}
956 
957 		if (map_idx >= nr_maps) {
958 			pr_warning("bpf relocation: map_idx %d large than %d\n",
959 				   (int)map_idx, (int)nr_maps - 1);
960 			return -LIBBPF_ERRNO__RELOC;
961 		}
962 
963 		prog->reloc_desc[i].type = RELO_LD64;
964 		prog->reloc_desc[i].insn_idx = insn_idx;
965 		prog->reloc_desc[i].map_idx = map_idx;
966 	}
967 	return 0;
968 }
969 
970 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
971 {
972 	struct bpf_map_def *def = &map->def;
973 	const size_t max_name = 256;
974 	int64_t key_size, value_size;
975 	int32_t key_id, value_id;
976 	char name[max_name];
977 
978 	/* Find key type by name from BTF */
979 	if (snprintf(name, max_name, "%s_key", map->name) == max_name) {
980 		pr_warning("map:%s length of BTF key_type:%s_key is too long\n",
981 			   map->name, map->name);
982 		return -EINVAL;
983 	}
984 
985 	key_id = btf__find_by_name(btf, name);
986 	if (key_id < 0) {
987 		pr_debug("map:%s key_type:%s cannot be found in BTF\n",
988 			 map->name, name);
989 		return key_id;
990 	}
991 
992 	key_size = btf__resolve_size(btf, key_id);
993 	if (key_size < 0) {
994 		pr_warning("map:%s key_type:%s cannot get the BTF type_size\n",
995 			   map->name, name);
996 		return key_size;
997 	}
998 
999 	if (def->key_size != key_size) {
1000 		pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n",
1001 			   map->name, name, (unsigned int)key_size, def->key_size);
1002 		return -EINVAL;
1003 	}
1004 
1005 	/* Find value type from BTF */
1006 	if (snprintf(name, max_name, "%s_value", map->name) == max_name) {
1007 		pr_warning("map:%s length of BTF value_type:%s_value is too long\n",
1008 			  map->name, map->name);
1009 		return -EINVAL;
1010 	}
1011 
1012 	value_id = btf__find_by_name(btf, name);
1013 	if (value_id < 0) {
1014 		pr_debug("map:%s value_type:%s cannot be found in BTF\n",
1015 			 map->name, name);
1016 		return value_id;
1017 	}
1018 
1019 	value_size = btf__resolve_size(btf, value_id);
1020 	if (value_size < 0) {
1021 		pr_warning("map:%s value_type:%s cannot get the BTF type_size\n",
1022 			   map->name, name);
1023 		return value_size;
1024 	}
1025 
1026 	if (def->value_size != value_size) {
1027 		pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n",
1028 			   map->name, name, (unsigned int)value_size, def->value_size);
1029 		return -EINVAL;
1030 	}
1031 
1032 	map->btf_key_type_id = key_id;
1033 	map->btf_value_type_id = value_id;
1034 
1035 	return 0;
1036 }
1037 
1038 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1039 {
1040 	struct bpf_map_info info = {};
1041 	__u32 len = sizeof(info);
1042 	int new_fd, err;
1043 	char *new_name;
1044 
1045 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1046 	if (err)
1047 		return err;
1048 
1049 	new_name = strdup(info.name);
1050 	if (!new_name)
1051 		return -errno;
1052 
1053 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1054 	if (new_fd < 0)
1055 		goto err_free_new_name;
1056 
1057 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1058 	if (new_fd < 0)
1059 		goto err_close_new_fd;
1060 
1061 	err = zclose(map->fd);
1062 	if (err)
1063 		goto err_close_new_fd;
1064 	free(map->name);
1065 
1066 	map->fd = new_fd;
1067 	map->name = new_name;
1068 	map->def.type = info.type;
1069 	map->def.key_size = info.key_size;
1070 	map->def.value_size = info.value_size;
1071 	map->def.max_entries = info.max_entries;
1072 	map->def.map_flags = info.map_flags;
1073 	map->btf_key_type_id = info.btf_key_type_id;
1074 	map->btf_value_type_id = info.btf_value_type_id;
1075 
1076 	return 0;
1077 
1078 err_close_new_fd:
1079 	close(new_fd);
1080 err_free_new_name:
1081 	free(new_name);
1082 	return -errno;
1083 }
1084 
1085 static int
1086 bpf_object__create_maps(struct bpf_object *obj)
1087 {
1088 	struct bpf_create_map_attr create_attr = {};
1089 	unsigned int i;
1090 	int err;
1091 
1092 	for (i = 0; i < obj->nr_maps; i++) {
1093 		struct bpf_map *map = &obj->maps[i];
1094 		struct bpf_map_def *def = &map->def;
1095 		int *pfd = &map->fd;
1096 
1097 		if (map->fd >= 0) {
1098 			pr_debug("skip map create (preset) %s: fd=%d\n",
1099 				 map->name, map->fd);
1100 			continue;
1101 		}
1102 
1103 		create_attr.name = map->name;
1104 		create_attr.map_ifindex = map->map_ifindex;
1105 		create_attr.map_type = def->type;
1106 		create_attr.map_flags = def->map_flags;
1107 		create_attr.key_size = def->key_size;
1108 		create_attr.value_size = def->value_size;
1109 		create_attr.max_entries = def->max_entries;
1110 		create_attr.btf_fd = 0;
1111 		create_attr.btf_key_type_id = 0;
1112 		create_attr.btf_value_type_id = 0;
1113 
1114 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1115 			create_attr.btf_fd = btf__fd(obj->btf);
1116 			create_attr.btf_key_type_id = map->btf_key_type_id;
1117 			create_attr.btf_value_type_id = map->btf_value_type_id;
1118 		}
1119 
1120 		*pfd = bpf_create_map_xattr(&create_attr);
1121 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1122 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1123 				   map->name, strerror(errno), errno);
1124 			create_attr.btf_fd = 0;
1125 			create_attr.btf_key_type_id = 0;
1126 			create_attr.btf_value_type_id = 0;
1127 			map->btf_key_type_id = 0;
1128 			map->btf_value_type_id = 0;
1129 			*pfd = bpf_create_map_xattr(&create_attr);
1130 		}
1131 
1132 		if (*pfd < 0) {
1133 			size_t j;
1134 
1135 			err = *pfd;
1136 			pr_warning("failed to create map (name: '%s'): %s\n",
1137 				   map->name,
1138 				   strerror(errno));
1139 			for (j = 0; j < i; j++)
1140 				zclose(obj->maps[j].fd);
1141 			return err;
1142 		}
1143 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static int
1150 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1151 			struct reloc_desc *relo)
1152 {
1153 	struct bpf_insn *insn, *new_insn;
1154 	struct bpf_program *text;
1155 	size_t new_cnt;
1156 
1157 	if (relo->type != RELO_CALL)
1158 		return -LIBBPF_ERRNO__RELOC;
1159 
1160 	if (prog->idx == obj->efile.text_shndx) {
1161 		pr_warning("relo in .text insn %d into off %d\n",
1162 			   relo->insn_idx, relo->text_off);
1163 		return -LIBBPF_ERRNO__RELOC;
1164 	}
1165 
1166 	if (prog->main_prog_cnt == 0) {
1167 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1168 		if (!text) {
1169 			pr_warning("no .text section found yet relo into text exist\n");
1170 			return -LIBBPF_ERRNO__RELOC;
1171 		}
1172 		new_cnt = prog->insns_cnt + text->insns_cnt;
1173 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1174 		if (!new_insn) {
1175 			pr_warning("oom in prog realloc\n");
1176 			return -ENOMEM;
1177 		}
1178 		memcpy(new_insn + prog->insns_cnt, text->insns,
1179 		       text->insns_cnt * sizeof(*insn));
1180 		prog->insns = new_insn;
1181 		prog->main_prog_cnt = prog->insns_cnt;
1182 		prog->insns_cnt = new_cnt;
1183 		pr_debug("added %zd insn from %s to prog %s\n",
1184 			 text->insns_cnt, text->section_name,
1185 			 prog->section_name);
1186 	}
1187 	insn = &prog->insns[relo->insn_idx];
1188 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1189 	return 0;
1190 }
1191 
1192 static int
1193 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1194 {
1195 	int i, err;
1196 
1197 	if (!prog || !prog->reloc_desc)
1198 		return 0;
1199 
1200 	for (i = 0; i < prog->nr_reloc; i++) {
1201 		if (prog->reloc_desc[i].type == RELO_LD64) {
1202 			struct bpf_insn *insns = prog->insns;
1203 			int insn_idx, map_idx;
1204 
1205 			insn_idx = prog->reloc_desc[i].insn_idx;
1206 			map_idx = prog->reloc_desc[i].map_idx;
1207 
1208 			if (insn_idx >= (int)prog->insns_cnt) {
1209 				pr_warning("relocation out of range: '%s'\n",
1210 					   prog->section_name);
1211 				return -LIBBPF_ERRNO__RELOC;
1212 			}
1213 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1214 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1215 		} else {
1216 			err = bpf_program__reloc_text(prog, obj,
1217 						      &prog->reloc_desc[i]);
1218 			if (err)
1219 				return err;
1220 		}
1221 	}
1222 
1223 	zfree(&prog->reloc_desc);
1224 	prog->nr_reloc = 0;
1225 	return 0;
1226 }
1227 
1228 
1229 static int
1230 bpf_object__relocate(struct bpf_object *obj)
1231 {
1232 	struct bpf_program *prog;
1233 	size_t i;
1234 	int err;
1235 
1236 	for (i = 0; i < obj->nr_programs; i++) {
1237 		prog = &obj->programs[i];
1238 
1239 		err = bpf_program__relocate(prog, obj);
1240 		if (err) {
1241 			pr_warning("failed to relocate '%s'\n",
1242 				   prog->section_name);
1243 			return err;
1244 		}
1245 	}
1246 	return 0;
1247 }
1248 
1249 static int bpf_object__collect_reloc(struct bpf_object *obj)
1250 {
1251 	int i, err;
1252 
1253 	if (!obj_elf_valid(obj)) {
1254 		pr_warning("Internal error: elf object is closed\n");
1255 		return -LIBBPF_ERRNO__INTERNAL;
1256 	}
1257 
1258 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1259 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1260 		Elf_Data *data = obj->efile.reloc[i].data;
1261 		int idx = shdr->sh_info;
1262 		struct bpf_program *prog;
1263 
1264 		if (shdr->sh_type != SHT_REL) {
1265 			pr_warning("internal error at %d\n", __LINE__);
1266 			return -LIBBPF_ERRNO__INTERNAL;
1267 		}
1268 
1269 		prog = bpf_object__find_prog_by_idx(obj, idx);
1270 		if (!prog) {
1271 			pr_warning("relocation failed: no section(%d)\n", idx);
1272 			return -LIBBPF_ERRNO__RELOC;
1273 		}
1274 
1275 		err = bpf_program__collect_reloc(prog,
1276 						 shdr, data,
1277 						 obj);
1278 		if (err)
1279 			return err;
1280 	}
1281 	return 0;
1282 }
1283 
1284 static int
1285 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1286 	     const char *name, struct bpf_insn *insns, int insns_cnt,
1287 	     char *license, u32 kern_version, int *pfd, int prog_ifindex)
1288 {
1289 	struct bpf_load_program_attr load_attr;
1290 	char *log_buf;
1291 	int ret;
1292 
1293 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1294 	load_attr.prog_type = type;
1295 	load_attr.expected_attach_type = expected_attach_type;
1296 	load_attr.name = name;
1297 	load_attr.insns = insns;
1298 	load_attr.insns_cnt = insns_cnt;
1299 	load_attr.license = license;
1300 	load_attr.kern_version = kern_version;
1301 	load_attr.prog_ifindex = prog_ifindex;
1302 
1303 	if (!load_attr.insns || !load_attr.insns_cnt)
1304 		return -EINVAL;
1305 
1306 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1307 	if (!log_buf)
1308 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1309 
1310 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1311 
1312 	if (ret >= 0) {
1313 		*pfd = ret;
1314 		ret = 0;
1315 		goto out;
1316 	}
1317 
1318 	ret = -LIBBPF_ERRNO__LOAD;
1319 	pr_warning("load bpf program failed: %s\n", strerror(errno));
1320 
1321 	if (log_buf && log_buf[0] != '\0') {
1322 		ret = -LIBBPF_ERRNO__VERIFY;
1323 		pr_warning("-- BEGIN DUMP LOG ---\n");
1324 		pr_warning("\n%s\n", log_buf);
1325 		pr_warning("-- END LOG --\n");
1326 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1327 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1328 			   load_attr.insns_cnt, BPF_MAXINSNS);
1329 		ret = -LIBBPF_ERRNO__PROG2BIG;
1330 	} else {
1331 		/* Wrong program type? */
1332 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1333 			int fd;
1334 
1335 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1336 			load_attr.expected_attach_type = 0;
1337 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1338 			if (fd >= 0) {
1339 				close(fd);
1340 				ret = -LIBBPF_ERRNO__PROGTYPE;
1341 				goto out;
1342 			}
1343 		}
1344 
1345 		if (log_buf)
1346 			ret = -LIBBPF_ERRNO__KVER;
1347 	}
1348 
1349 out:
1350 	free(log_buf);
1351 	return ret;
1352 }
1353 
1354 static int
1355 bpf_program__load(struct bpf_program *prog,
1356 		  char *license, u32 kern_version)
1357 {
1358 	int err = 0, fd, i;
1359 
1360 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1361 		if (prog->preprocessor) {
1362 			pr_warning("Internal error: can't load program '%s'\n",
1363 				   prog->section_name);
1364 			return -LIBBPF_ERRNO__INTERNAL;
1365 		}
1366 
1367 		prog->instances.fds = malloc(sizeof(int));
1368 		if (!prog->instances.fds) {
1369 			pr_warning("Not enough memory for BPF fds\n");
1370 			return -ENOMEM;
1371 		}
1372 		prog->instances.nr = 1;
1373 		prog->instances.fds[0] = -1;
1374 	}
1375 
1376 	if (!prog->preprocessor) {
1377 		if (prog->instances.nr != 1) {
1378 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1379 				   prog->section_name, prog->instances.nr);
1380 		}
1381 		err = load_program(prog->type, prog->expected_attach_type,
1382 				   prog->name, prog->insns, prog->insns_cnt,
1383 				   license, kern_version, &fd,
1384 				   prog->prog_ifindex);
1385 		if (!err)
1386 			prog->instances.fds[0] = fd;
1387 		goto out;
1388 	}
1389 
1390 	for (i = 0; i < prog->instances.nr; i++) {
1391 		struct bpf_prog_prep_result result;
1392 		bpf_program_prep_t preprocessor = prog->preprocessor;
1393 
1394 		bzero(&result, sizeof(result));
1395 		err = preprocessor(prog, i, prog->insns,
1396 				   prog->insns_cnt, &result);
1397 		if (err) {
1398 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1399 				   i, prog->section_name);
1400 			goto out;
1401 		}
1402 
1403 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1404 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1405 				 i, prog->section_name);
1406 			prog->instances.fds[i] = -1;
1407 			if (result.pfd)
1408 				*result.pfd = -1;
1409 			continue;
1410 		}
1411 
1412 		err = load_program(prog->type, prog->expected_attach_type,
1413 				   prog->name, result.new_insn_ptr,
1414 				   result.new_insn_cnt,
1415 				   license, kern_version, &fd,
1416 				   prog->prog_ifindex);
1417 
1418 		if (err) {
1419 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1420 					i, prog->section_name);
1421 			goto out;
1422 		}
1423 
1424 		if (result.pfd)
1425 			*result.pfd = fd;
1426 		prog->instances.fds[i] = fd;
1427 	}
1428 out:
1429 	if (err)
1430 		pr_warning("failed to load program '%s'\n",
1431 			   prog->section_name);
1432 	zfree(&prog->insns);
1433 	prog->insns_cnt = 0;
1434 	return err;
1435 }
1436 
1437 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1438 					     struct bpf_object *obj)
1439 {
1440 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1441 }
1442 
1443 static int
1444 bpf_object__load_progs(struct bpf_object *obj)
1445 {
1446 	size_t i;
1447 	int err;
1448 
1449 	for (i = 0; i < obj->nr_programs; i++) {
1450 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1451 			continue;
1452 		err = bpf_program__load(&obj->programs[i],
1453 					obj->license,
1454 					obj->kern_version);
1455 		if (err)
1456 			return err;
1457 	}
1458 	return 0;
1459 }
1460 
1461 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1462 {
1463 	switch (type) {
1464 	case BPF_PROG_TYPE_SOCKET_FILTER:
1465 	case BPF_PROG_TYPE_SCHED_CLS:
1466 	case BPF_PROG_TYPE_SCHED_ACT:
1467 	case BPF_PROG_TYPE_XDP:
1468 	case BPF_PROG_TYPE_CGROUP_SKB:
1469 	case BPF_PROG_TYPE_CGROUP_SOCK:
1470 	case BPF_PROG_TYPE_LWT_IN:
1471 	case BPF_PROG_TYPE_LWT_OUT:
1472 	case BPF_PROG_TYPE_LWT_XMIT:
1473 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1474 	case BPF_PROG_TYPE_SOCK_OPS:
1475 	case BPF_PROG_TYPE_SK_SKB:
1476 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1477 	case BPF_PROG_TYPE_SK_MSG:
1478 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1479 	case BPF_PROG_TYPE_LIRC_MODE2:
1480 		return false;
1481 	case BPF_PROG_TYPE_UNSPEC:
1482 	case BPF_PROG_TYPE_KPROBE:
1483 	case BPF_PROG_TYPE_TRACEPOINT:
1484 	case BPF_PROG_TYPE_PERF_EVENT:
1485 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1486 	default:
1487 		return true;
1488 	}
1489 }
1490 
1491 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1492 {
1493 	if (needs_kver && obj->kern_version == 0) {
1494 		pr_warning("%s doesn't provide kernel version\n",
1495 			   obj->path);
1496 		return -LIBBPF_ERRNO__KVERSION;
1497 	}
1498 	return 0;
1499 }
1500 
1501 static struct bpf_object *
1502 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1503 		   bool needs_kver)
1504 {
1505 	struct bpf_object *obj;
1506 	int err;
1507 
1508 	if (elf_version(EV_CURRENT) == EV_NONE) {
1509 		pr_warning("failed to init libelf for %s\n", path);
1510 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1511 	}
1512 
1513 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1514 	if (IS_ERR(obj))
1515 		return obj;
1516 
1517 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1518 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1519 	CHECK_ERR(bpf_object__elf_collect(obj), err, out);
1520 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1521 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1522 
1523 	bpf_object__elf_finish(obj);
1524 	return obj;
1525 out:
1526 	bpf_object__close(obj);
1527 	return ERR_PTR(err);
1528 }
1529 
1530 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1531 {
1532 	/* param validation */
1533 	if (!attr->file)
1534 		return NULL;
1535 
1536 	pr_debug("loading %s\n", attr->file);
1537 
1538 	return __bpf_object__open(attr->file, NULL, 0,
1539 				  bpf_prog_type__needs_kver(attr->prog_type));
1540 }
1541 
1542 struct bpf_object *bpf_object__open(const char *path)
1543 {
1544 	struct bpf_object_open_attr attr = {
1545 		.file		= path,
1546 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1547 	};
1548 
1549 	return bpf_object__open_xattr(&attr);
1550 }
1551 
1552 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1553 					   size_t obj_buf_sz,
1554 					   const char *name)
1555 {
1556 	char tmp_name[64];
1557 
1558 	/* param validation */
1559 	if (!obj_buf || obj_buf_sz <= 0)
1560 		return NULL;
1561 
1562 	if (!name) {
1563 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1564 			 (unsigned long)obj_buf,
1565 			 (unsigned long)obj_buf_sz);
1566 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1567 		name = tmp_name;
1568 	}
1569 	pr_debug("loading object '%s' from buffer\n",
1570 		 name);
1571 
1572 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
1573 }
1574 
1575 int bpf_object__unload(struct bpf_object *obj)
1576 {
1577 	size_t i;
1578 
1579 	if (!obj)
1580 		return -EINVAL;
1581 
1582 	for (i = 0; i < obj->nr_maps; i++)
1583 		zclose(obj->maps[i].fd);
1584 
1585 	for (i = 0; i < obj->nr_programs; i++)
1586 		bpf_program__unload(&obj->programs[i]);
1587 
1588 	return 0;
1589 }
1590 
1591 int bpf_object__load(struct bpf_object *obj)
1592 {
1593 	int err;
1594 
1595 	if (!obj)
1596 		return -EINVAL;
1597 
1598 	if (obj->loaded) {
1599 		pr_warning("object should not be loaded twice\n");
1600 		return -EINVAL;
1601 	}
1602 
1603 	obj->loaded = true;
1604 
1605 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1606 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1607 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1608 
1609 	return 0;
1610 out:
1611 	bpf_object__unload(obj);
1612 	pr_warning("failed to load object '%s'\n", obj->path);
1613 	return err;
1614 }
1615 
1616 static int check_path(const char *path)
1617 {
1618 	struct statfs st_fs;
1619 	char *dname, *dir;
1620 	int err = 0;
1621 
1622 	if (path == NULL)
1623 		return -EINVAL;
1624 
1625 	dname = strdup(path);
1626 	if (dname == NULL)
1627 		return -ENOMEM;
1628 
1629 	dir = dirname(dname);
1630 	if (statfs(dir, &st_fs)) {
1631 		pr_warning("failed to statfs %s: %s\n", dir, strerror(errno));
1632 		err = -errno;
1633 	}
1634 	free(dname);
1635 
1636 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1637 		pr_warning("specified path %s is not on BPF FS\n", path);
1638 		err = -EINVAL;
1639 	}
1640 
1641 	return err;
1642 }
1643 
1644 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1645 			      int instance)
1646 {
1647 	int err;
1648 
1649 	err = check_path(path);
1650 	if (err)
1651 		return err;
1652 
1653 	if (prog == NULL) {
1654 		pr_warning("invalid program pointer\n");
1655 		return -EINVAL;
1656 	}
1657 
1658 	if (instance < 0 || instance >= prog->instances.nr) {
1659 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1660 			   instance, prog->section_name, prog->instances.nr);
1661 		return -EINVAL;
1662 	}
1663 
1664 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1665 		pr_warning("failed to pin program: %s\n", strerror(errno));
1666 		return -errno;
1667 	}
1668 	pr_debug("pinned program '%s'\n", path);
1669 
1670 	return 0;
1671 }
1672 
1673 static int make_dir(const char *path)
1674 {
1675 	int err = 0;
1676 
1677 	if (mkdir(path, 0700) && errno != EEXIST)
1678 		err = -errno;
1679 
1680 	if (err)
1681 		pr_warning("failed to mkdir %s: %s\n", path, strerror(-err));
1682 	return err;
1683 }
1684 
1685 int bpf_program__pin(struct bpf_program *prog, const char *path)
1686 {
1687 	int i, err;
1688 
1689 	err = check_path(path);
1690 	if (err)
1691 		return err;
1692 
1693 	if (prog == NULL) {
1694 		pr_warning("invalid program pointer\n");
1695 		return -EINVAL;
1696 	}
1697 
1698 	if (prog->instances.nr <= 0) {
1699 		pr_warning("no instances of prog %s to pin\n",
1700 			   prog->section_name);
1701 		return -EINVAL;
1702 	}
1703 
1704 	err = make_dir(path);
1705 	if (err)
1706 		return err;
1707 
1708 	for (i = 0; i < prog->instances.nr; i++) {
1709 		char buf[PATH_MAX];
1710 		int len;
1711 
1712 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1713 		if (len < 0)
1714 			return -EINVAL;
1715 		else if (len >= PATH_MAX)
1716 			return -ENAMETOOLONG;
1717 
1718 		err = bpf_program__pin_instance(prog, buf, i);
1719 		if (err)
1720 			return err;
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 int bpf_map__pin(struct bpf_map *map, const char *path)
1727 {
1728 	int err;
1729 
1730 	err = check_path(path);
1731 	if (err)
1732 		return err;
1733 
1734 	if (map == NULL) {
1735 		pr_warning("invalid map pointer\n");
1736 		return -EINVAL;
1737 	}
1738 
1739 	if (bpf_obj_pin(map->fd, path)) {
1740 		pr_warning("failed to pin map: %s\n", strerror(errno));
1741 		return -errno;
1742 	}
1743 
1744 	pr_debug("pinned map '%s'\n", path);
1745 	return 0;
1746 }
1747 
1748 int bpf_object__pin(struct bpf_object *obj, const char *path)
1749 {
1750 	struct bpf_program *prog;
1751 	struct bpf_map *map;
1752 	int err;
1753 
1754 	if (!obj)
1755 		return -ENOENT;
1756 
1757 	if (!obj->loaded) {
1758 		pr_warning("object not yet loaded; load it first\n");
1759 		return -ENOENT;
1760 	}
1761 
1762 	err = make_dir(path);
1763 	if (err)
1764 		return err;
1765 
1766 	bpf_map__for_each(map, obj) {
1767 		char buf[PATH_MAX];
1768 		int len;
1769 
1770 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1771 			       bpf_map__name(map));
1772 		if (len < 0)
1773 			return -EINVAL;
1774 		else if (len >= PATH_MAX)
1775 			return -ENAMETOOLONG;
1776 
1777 		err = bpf_map__pin(map, buf);
1778 		if (err)
1779 			return err;
1780 	}
1781 
1782 	bpf_object__for_each_program(prog, obj) {
1783 		char buf[PATH_MAX];
1784 		int len;
1785 
1786 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
1787 			       prog->section_name);
1788 		if (len < 0)
1789 			return -EINVAL;
1790 		else if (len >= PATH_MAX)
1791 			return -ENAMETOOLONG;
1792 
1793 		err = bpf_program__pin(prog, buf);
1794 		if (err)
1795 			return err;
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 void bpf_object__close(struct bpf_object *obj)
1802 {
1803 	size_t i;
1804 
1805 	if (!obj)
1806 		return;
1807 
1808 	if (obj->clear_priv)
1809 		obj->clear_priv(obj, obj->priv);
1810 
1811 	bpf_object__elf_finish(obj);
1812 	bpf_object__unload(obj);
1813 	btf__free(obj->btf);
1814 
1815 	for (i = 0; i < obj->nr_maps; i++) {
1816 		zfree(&obj->maps[i].name);
1817 		if (obj->maps[i].clear_priv)
1818 			obj->maps[i].clear_priv(&obj->maps[i],
1819 						obj->maps[i].priv);
1820 		obj->maps[i].priv = NULL;
1821 		obj->maps[i].clear_priv = NULL;
1822 	}
1823 	zfree(&obj->maps);
1824 	obj->nr_maps = 0;
1825 
1826 	if (obj->programs && obj->nr_programs) {
1827 		for (i = 0; i < obj->nr_programs; i++)
1828 			bpf_program__exit(&obj->programs[i]);
1829 	}
1830 	zfree(&obj->programs);
1831 
1832 	list_del(&obj->list);
1833 	free(obj);
1834 }
1835 
1836 struct bpf_object *
1837 bpf_object__next(struct bpf_object *prev)
1838 {
1839 	struct bpf_object *next;
1840 
1841 	if (!prev)
1842 		next = list_first_entry(&bpf_objects_list,
1843 					struct bpf_object,
1844 					list);
1845 	else
1846 		next = list_next_entry(prev, list);
1847 
1848 	/* Empty list is noticed here so don't need checking on entry. */
1849 	if (&next->list == &bpf_objects_list)
1850 		return NULL;
1851 
1852 	return next;
1853 }
1854 
1855 const char *bpf_object__name(struct bpf_object *obj)
1856 {
1857 	return obj ? obj->path : ERR_PTR(-EINVAL);
1858 }
1859 
1860 unsigned int bpf_object__kversion(struct bpf_object *obj)
1861 {
1862 	return obj ? obj->kern_version : 0;
1863 }
1864 
1865 int bpf_object__btf_fd(const struct bpf_object *obj)
1866 {
1867 	return obj->btf ? btf__fd(obj->btf) : -1;
1868 }
1869 
1870 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1871 			 bpf_object_clear_priv_t clear_priv)
1872 {
1873 	if (obj->priv && obj->clear_priv)
1874 		obj->clear_priv(obj, obj->priv);
1875 
1876 	obj->priv = priv;
1877 	obj->clear_priv = clear_priv;
1878 	return 0;
1879 }
1880 
1881 void *bpf_object__priv(struct bpf_object *obj)
1882 {
1883 	return obj ? obj->priv : ERR_PTR(-EINVAL);
1884 }
1885 
1886 static struct bpf_program *
1887 __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1888 {
1889 	size_t idx;
1890 
1891 	if (!obj->programs)
1892 		return NULL;
1893 	/* First handler */
1894 	if (prev == NULL)
1895 		return &obj->programs[0];
1896 
1897 	if (prev->obj != obj) {
1898 		pr_warning("error: program handler doesn't match object\n");
1899 		return NULL;
1900 	}
1901 
1902 	idx = (prev - obj->programs) + 1;
1903 	if (idx >= obj->nr_programs)
1904 		return NULL;
1905 	return &obj->programs[idx];
1906 }
1907 
1908 struct bpf_program *
1909 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1910 {
1911 	struct bpf_program *prog = prev;
1912 
1913 	do {
1914 		prog = __bpf_program__next(prog, obj);
1915 	} while (prog && bpf_program__is_function_storage(prog, obj));
1916 
1917 	return prog;
1918 }
1919 
1920 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1921 			  bpf_program_clear_priv_t clear_priv)
1922 {
1923 	if (prog->priv && prog->clear_priv)
1924 		prog->clear_priv(prog, prog->priv);
1925 
1926 	prog->priv = priv;
1927 	prog->clear_priv = clear_priv;
1928 	return 0;
1929 }
1930 
1931 void *bpf_program__priv(struct bpf_program *prog)
1932 {
1933 	return prog ? prog->priv : ERR_PTR(-EINVAL);
1934 }
1935 
1936 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1937 {
1938 	prog->prog_ifindex = ifindex;
1939 }
1940 
1941 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1942 {
1943 	const char *title;
1944 
1945 	title = prog->section_name;
1946 	if (needs_copy) {
1947 		title = strdup(title);
1948 		if (!title) {
1949 			pr_warning("failed to strdup program title\n");
1950 			return ERR_PTR(-ENOMEM);
1951 		}
1952 	}
1953 
1954 	return title;
1955 }
1956 
1957 int bpf_program__fd(struct bpf_program *prog)
1958 {
1959 	return bpf_program__nth_fd(prog, 0);
1960 }
1961 
1962 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1963 			  bpf_program_prep_t prep)
1964 {
1965 	int *instances_fds;
1966 
1967 	if (nr_instances <= 0 || !prep)
1968 		return -EINVAL;
1969 
1970 	if (prog->instances.nr > 0 || prog->instances.fds) {
1971 		pr_warning("Can't set pre-processor after loading\n");
1972 		return -EINVAL;
1973 	}
1974 
1975 	instances_fds = malloc(sizeof(int) * nr_instances);
1976 	if (!instances_fds) {
1977 		pr_warning("alloc memory failed for fds\n");
1978 		return -ENOMEM;
1979 	}
1980 
1981 	/* fill all fd with -1 */
1982 	memset(instances_fds, -1, sizeof(int) * nr_instances);
1983 
1984 	prog->instances.nr = nr_instances;
1985 	prog->instances.fds = instances_fds;
1986 	prog->preprocessor = prep;
1987 	return 0;
1988 }
1989 
1990 int bpf_program__nth_fd(struct bpf_program *prog, int n)
1991 {
1992 	int fd;
1993 
1994 	if (n >= prog->instances.nr || n < 0) {
1995 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
1996 			   n, prog->section_name, prog->instances.nr);
1997 		return -EINVAL;
1998 	}
1999 
2000 	fd = prog->instances.fds[n];
2001 	if (fd < 0) {
2002 		pr_warning("%dth instance of program '%s' is invalid\n",
2003 			   n, prog->section_name);
2004 		return -ENOENT;
2005 	}
2006 
2007 	return fd;
2008 }
2009 
2010 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2011 {
2012 	prog->type = type;
2013 }
2014 
2015 static bool bpf_program__is_type(struct bpf_program *prog,
2016 				 enum bpf_prog_type type)
2017 {
2018 	return prog ? (prog->type == type) : false;
2019 }
2020 
2021 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2022 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2023 {							\
2024 	if (!prog)					\
2025 		return -EINVAL;				\
2026 	bpf_program__set_type(prog, TYPE);		\
2027 	return 0;					\
2028 }							\
2029 							\
2030 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2031 {							\
2032 	return bpf_program__is_type(prog, TYPE);	\
2033 }							\
2034 
2035 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2036 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2037 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2038 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2039 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2040 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2041 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2042 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2043 
2044 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2045 					   enum bpf_attach_type type)
2046 {
2047 	prog->expected_attach_type = type;
2048 }
2049 
2050 #define BPF_PROG_SEC_FULL(string, ptype, atype) \
2051 	{ string, sizeof(string) - 1, ptype, atype }
2052 
2053 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
2054 
2055 #define BPF_S_PROG_SEC(string, ptype) \
2056 	BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
2057 
2058 #define BPF_SA_PROG_SEC(string, ptype) \
2059 	BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
2060 
2061 static const struct {
2062 	const char *sec;
2063 	size_t len;
2064 	enum bpf_prog_type prog_type;
2065 	enum bpf_attach_type expected_attach_type;
2066 } section_names[] = {
2067 	BPF_PROG_SEC("socket",		BPF_PROG_TYPE_SOCKET_FILTER),
2068 	BPF_PROG_SEC("kprobe/",		BPF_PROG_TYPE_KPROBE),
2069 	BPF_PROG_SEC("kretprobe/",	BPF_PROG_TYPE_KPROBE),
2070 	BPF_PROG_SEC("classifier",	BPF_PROG_TYPE_SCHED_CLS),
2071 	BPF_PROG_SEC("action",		BPF_PROG_TYPE_SCHED_ACT),
2072 	BPF_PROG_SEC("tracepoint/",	BPF_PROG_TYPE_TRACEPOINT),
2073 	BPF_PROG_SEC("raw_tracepoint/",	BPF_PROG_TYPE_RAW_TRACEPOINT),
2074 	BPF_PROG_SEC("xdp",		BPF_PROG_TYPE_XDP),
2075 	BPF_PROG_SEC("perf_event",	BPF_PROG_TYPE_PERF_EVENT),
2076 	BPF_PROG_SEC("cgroup/skb",	BPF_PROG_TYPE_CGROUP_SKB),
2077 	BPF_PROG_SEC("cgroup/sock",	BPF_PROG_TYPE_CGROUP_SOCK),
2078 	BPF_PROG_SEC("cgroup/dev",	BPF_PROG_TYPE_CGROUP_DEVICE),
2079 	BPF_PROG_SEC("lwt_in",		BPF_PROG_TYPE_LWT_IN),
2080 	BPF_PROG_SEC("lwt_out",		BPF_PROG_TYPE_LWT_OUT),
2081 	BPF_PROG_SEC("lwt_xmit",	BPF_PROG_TYPE_LWT_XMIT),
2082 	BPF_PROG_SEC("lwt_seg6local",	BPF_PROG_TYPE_LWT_SEG6LOCAL),
2083 	BPF_PROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS),
2084 	BPF_PROG_SEC("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2085 	BPF_PROG_SEC("sk_msg",		BPF_PROG_TYPE_SK_MSG),
2086 	BPF_PROG_SEC("lirc_mode2",	BPF_PROG_TYPE_LIRC_MODE2),
2087 	BPF_SA_PROG_SEC("cgroup/bind4",	BPF_CGROUP_INET4_BIND),
2088 	BPF_SA_PROG_SEC("cgroup/bind6",	BPF_CGROUP_INET6_BIND),
2089 	BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
2090 	BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
2091 	BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
2092 	BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
2093 	BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
2094 	BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
2095 };
2096 
2097 #undef BPF_PROG_SEC
2098 #undef BPF_PROG_SEC_FULL
2099 #undef BPF_S_PROG_SEC
2100 #undef BPF_SA_PROG_SEC
2101 
2102 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2103 			     enum bpf_attach_type *expected_attach_type)
2104 {
2105 	int i;
2106 
2107 	if (!name)
2108 		return -EINVAL;
2109 
2110 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2111 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2112 			continue;
2113 		*prog_type = section_names[i].prog_type;
2114 		*expected_attach_type = section_names[i].expected_attach_type;
2115 		return 0;
2116 	}
2117 	return -EINVAL;
2118 }
2119 
2120 static int
2121 bpf_program__identify_section(struct bpf_program *prog,
2122 			      enum bpf_prog_type *prog_type,
2123 			      enum bpf_attach_type *expected_attach_type)
2124 {
2125 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2126 					expected_attach_type);
2127 }
2128 
2129 int bpf_map__fd(struct bpf_map *map)
2130 {
2131 	return map ? map->fd : -EINVAL;
2132 }
2133 
2134 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2135 {
2136 	return map ? &map->def : ERR_PTR(-EINVAL);
2137 }
2138 
2139 const char *bpf_map__name(struct bpf_map *map)
2140 {
2141 	return map ? map->name : NULL;
2142 }
2143 
2144 uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map)
2145 {
2146 	return map ? map->btf_key_type_id : 0;
2147 }
2148 
2149 uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map)
2150 {
2151 	return map ? map->btf_value_type_id : 0;
2152 }
2153 
2154 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2155 		     bpf_map_clear_priv_t clear_priv)
2156 {
2157 	if (!map)
2158 		return -EINVAL;
2159 
2160 	if (map->priv) {
2161 		if (map->clear_priv)
2162 			map->clear_priv(map, map->priv);
2163 	}
2164 
2165 	map->priv = priv;
2166 	map->clear_priv = clear_priv;
2167 	return 0;
2168 }
2169 
2170 void *bpf_map__priv(struct bpf_map *map)
2171 {
2172 	return map ? map->priv : ERR_PTR(-EINVAL);
2173 }
2174 
2175 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2176 {
2177 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2178 }
2179 
2180 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2181 {
2182 	map->map_ifindex = ifindex;
2183 }
2184 
2185 struct bpf_map *
2186 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2187 {
2188 	size_t idx;
2189 	struct bpf_map *s, *e;
2190 
2191 	if (!obj || !obj->maps)
2192 		return NULL;
2193 
2194 	s = obj->maps;
2195 	e = obj->maps + obj->nr_maps;
2196 
2197 	if (prev == NULL)
2198 		return s;
2199 
2200 	if ((prev < s) || (prev >= e)) {
2201 		pr_warning("error in %s: map handler doesn't belong to object\n",
2202 			   __func__);
2203 		return NULL;
2204 	}
2205 
2206 	idx = (prev - obj->maps) + 1;
2207 	if (idx >= obj->nr_maps)
2208 		return NULL;
2209 	return &obj->maps[idx];
2210 }
2211 
2212 struct bpf_map *
2213 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2214 {
2215 	struct bpf_map *pos;
2216 
2217 	bpf_map__for_each(pos, obj) {
2218 		if (pos->name && !strcmp(pos->name, name))
2219 			return pos;
2220 	}
2221 	return NULL;
2222 }
2223 
2224 struct bpf_map *
2225 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2226 {
2227 	int i;
2228 
2229 	for (i = 0; i < obj->nr_maps; i++) {
2230 		if (obj->maps[i].offset == offset)
2231 			return &obj->maps[i];
2232 	}
2233 	return ERR_PTR(-ENOENT);
2234 }
2235 
2236 long libbpf_get_error(const void *ptr)
2237 {
2238 	if (IS_ERR(ptr))
2239 		return PTR_ERR(ptr);
2240 	return 0;
2241 }
2242 
2243 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2244 		  struct bpf_object **pobj, int *prog_fd)
2245 {
2246 	struct bpf_prog_load_attr attr;
2247 
2248 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2249 	attr.file = file;
2250 	attr.prog_type = type;
2251 	attr.expected_attach_type = 0;
2252 
2253 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2254 }
2255 
2256 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2257 			struct bpf_object **pobj, int *prog_fd)
2258 {
2259 	struct bpf_object_open_attr open_attr = {
2260 		.file		= attr->file,
2261 		.prog_type	= attr->prog_type,
2262 	};
2263 	struct bpf_program *prog, *first_prog = NULL;
2264 	enum bpf_attach_type expected_attach_type;
2265 	enum bpf_prog_type prog_type;
2266 	struct bpf_object *obj;
2267 	struct bpf_map *map;
2268 	int err;
2269 
2270 	if (!attr)
2271 		return -EINVAL;
2272 	if (!attr->file)
2273 		return -EINVAL;
2274 
2275 	obj = bpf_object__open_xattr(&open_attr);
2276 	if (IS_ERR_OR_NULL(obj))
2277 		return -ENOENT;
2278 
2279 	bpf_object__for_each_program(prog, obj) {
2280 		/*
2281 		 * If type is not specified, try to guess it based on
2282 		 * section name.
2283 		 */
2284 		prog_type = attr->prog_type;
2285 		prog->prog_ifindex = attr->ifindex;
2286 		expected_attach_type = attr->expected_attach_type;
2287 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2288 			err = bpf_program__identify_section(prog, &prog_type,
2289 							    &expected_attach_type);
2290 			if (err < 0) {
2291 				pr_warning("failed to guess program type based on section name %s\n",
2292 					   prog->section_name);
2293 				bpf_object__close(obj);
2294 				return -EINVAL;
2295 			}
2296 		}
2297 
2298 		bpf_program__set_type(prog, prog_type);
2299 		bpf_program__set_expected_attach_type(prog,
2300 						      expected_attach_type);
2301 
2302 		if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
2303 			first_prog = prog;
2304 	}
2305 
2306 	bpf_map__for_each(map, obj) {
2307 		if (!bpf_map__is_offload_neutral(map))
2308 			map->map_ifindex = attr->ifindex;
2309 	}
2310 
2311 	if (!first_prog) {
2312 		pr_warning("object file doesn't contain bpf program\n");
2313 		bpf_object__close(obj);
2314 		return -ENOENT;
2315 	}
2316 
2317 	err = bpf_object__load(obj);
2318 	if (err) {
2319 		bpf_object__close(obj);
2320 		return -EINVAL;
2321 	}
2322 
2323 	*pobj = obj;
2324 	*prog_fd = bpf_program__fd(first_prog);
2325 	return 0;
2326 }
2327 
2328 enum bpf_perf_event_ret
2329 bpf_perf_event_read_simple(void *mem, unsigned long size,
2330 			   unsigned long page_size, void **buf, size_t *buf_len,
2331 			   bpf_perf_event_print_t fn, void *priv)
2332 {
2333 	volatile struct perf_event_mmap_page *header = mem;
2334 	__u64 data_tail = header->data_tail;
2335 	__u64 data_head = header->data_head;
2336 	void *base, *begin, *end;
2337 	int ret;
2338 
2339 	asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2340 	if (data_head == data_tail)
2341 		return LIBBPF_PERF_EVENT_CONT;
2342 
2343 	base = ((char *)header) + page_size;
2344 
2345 	begin = base + data_tail % size;
2346 	end = base + data_head % size;
2347 
2348 	while (begin != end) {
2349 		struct perf_event_header *ehdr;
2350 
2351 		ehdr = begin;
2352 		if (begin + ehdr->size > base + size) {
2353 			long len = base + size - begin;
2354 
2355 			if (*buf_len < ehdr->size) {
2356 				free(*buf);
2357 				*buf = malloc(ehdr->size);
2358 				if (!*buf) {
2359 					ret = LIBBPF_PERF_EVENT_ERROR;
2360 					break;
2361 				}
2362 				*buf_len = ehdr->size;
2363 			}
2364 
2365 			memcpy(*buf, begin, len);
2366 			memcpy(*buf + len, base, ehdr->size - len);
2367 			ehdr = (void *)*buf;
2368 			begin = base + ehdr->size - len;
2369 		} else if (begin + ehdr->size == base + size) {
2370 			begin = base;
2371 		} else {
2372 			begin += ehdr->size;
2373 		}
2374 
2375 		ret = fn(ehdr, priv);
2376 		if (ret != LIBBPF_PERF_EVENT_CONT)
2377 			break;
2378 
2379 		data_tail += ehdr->size;
2380 	}
2381 
2382 	__sync_synchronize(); /* smp_mb() */
2383 	header->data_tail = data_tail;
2384 
2385 	return ret;
2386 }
2387