xref: /openbmc/linux/tools/lib/bpf/libbpf_internal.h (revision 710b797c)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 
3 /*
4  * Internal libbpf helpers.
5  *
6  * Copyright (c) 2019 Facebook
7  */
8 
9 #ifndef __LIBBPF_LIBBPF_INTERNAL_H
10 #define __LIBBPF_LIBBPF_INTERNAL_H
11 
12 #include <stdlib.h>
13 #include <limits.h>
14 
15 /* make sure libbpf doesn't use kernel-only integer typedefs */
16 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
17 
18 /* prevent accidental re-addition of reallocarray() */
19 #pragma GCC poison reallocarray
20 
21 #include "libbpf.h"
22 #include "btf.h"
23 
24 #ifndef EM_BPF
25 #define EM_BPF 247
26 #endif
27 
28 #ifndef R_BPF_64_64
29 #define R_BPF_64_64 1
30 #endif
31 #ifndef R_BPF_64_32
32 #define R_BPF_64_32 10
33 #endif
34 
35 #ifndef SHT_LLVM_ADDRSIG
36 #define SHT_LLVM_ADDRSIG 0x6FFF4C03
37 #endif
38 
39 /* if libelf is old and doesn't support mmap(), fall back to read() */
40 #ifndef ELF_C_READ_MMAP
41 #define ELF_C_READ_MMAP ELF_C_READ
42 #endif
43 
44 /* Older libelf all end up in this expression, for both 32 and 64 bit */
45 #ifndef GELF_ST_VISIBILITY
46 #define GELF_ST_VISIBILITY(o) ((o) & 0x03)
47 #endif
48 
49 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
50 	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
51 #define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
52 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
53 	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
54 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
55 	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
56 	BTF_INT_ENC(encoding, bits_offset, bits)
57 #define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
58 #define BTF_PARAM_ENC(name, type) (name), (type)
59 #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
60 #define BTF_TYPE_FLOAT_ENC(name, sz) \
61 	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
62 
63 #ifndef likely
64 #define likely(x) __builtin_expect(!!(x), 1)
65 #endif
66 #ifndef unlikely
67 #define unlikely(x) __builtin_expect(!!(x), 0)
68 #endif
69 #ifndef min
70 # define min(x, y) ((x) < (y) ? (x) : (y))
71 #endif
72 #ifndef max
73 # define max(x, y) ((x) < (y) ? (y) : (x))
74 #endif
75 #ifndef offsetofend
76 # define offsetofend(TYPE, FIELD) \
77 	(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
78 #endif
79 
80 /* Symbol versioning is different between static and shared library.
81  * Properly versioned symbols are needed for shared library, but
82  * only the symbol of the new version is needed for static library.
83  */
84 #ifdef SHARED
85 # define COMPAT_VERSION(internal_name, api_name, version) \
86 	asm(".symver " #internal_name "," #api_name "@" #version);
87 # define DEFAULT_VERSION(internal_name, api_name, version) \
88 	asm(".symver " #internal_name "," #api_name "@@" #version);
89 #else
90 # define COMPAT_VERSION(internal_name, api_name, version)
91 # define DEFAULT_VERSION(internal_name, api_name, version) \
92 	extern typeof(internal_name) api_name \
93 	__attribute__((alias(#internal_name)));
94 #endif
95 
96 extern void libbpf_print(enum libbpf_print_level level,
97 			 const char *format, ...)
98 	__attribute__((format(printf, 2, 3)));
99 
100 #define __pr(level, fmt, ...)	\
101 do {				\
102 	libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__);	\
103 } while (0)
104 
105 #define pr_warn(fmt, ...)	__pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
106 #define pr_info(fmt, ...)	__pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
107 #define pr_debug(fmt, ...)	__pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
108 
109 #ifndef __has_builtin
110 #define __has_builtin(x) 0
111 #endif
112 /*
113  * Re-implement glibc's reallocarray() for libbpf internal-only use.
114  * reallocarray(), unfortunately, is not available in all versions of glibc,
115  * so requires extra feature detection and using reallocarray() stub from
116  * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
117  * build of libbpf unnecessarily and is just a maintenance burden. Instead,
118  * it's trivial to implement libbpf-specific internal version and use it
119  * throughout libbpf.
120  */
121 static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
122 {
123 	size_t total;
124 
125 #if __has_builtin(__builtin_mul_overflow)
126 	if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
127 		return NULL;
128 #else
129 	if (size == 0 || nmemb > ULONG_MAX / size)
130 		return NULL;
131 	total = nmemb * size;
132 #endif
133 	return realloc(ptr, total);
134 }
135 
136 struct btf;
137 struct btf_type;
138 
139 struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id);
140 const char *btf_kind_str(const struct btf_type *t);
141 const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
142 
143 static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
144 {
145 	return (enum btf_func_linkage)(int)btf_vlen(t);
146 }
147 
148 static inline __u32 btf_type_info(int kind, int vlen, int kflag)
149 {
150 	return (kflag << 31) | (kind << 24) | vlen;
151 }
152 
153 enum map_def_parts {
154 	MAP_DEF_MAP_TYPE	= 0x001,
155 	MAP_DEF_KEY_TYPE	= 0x002,
156 	MAP_DEF_KEY_SIZE	= 0x004,
157 	MAP_DEF_VALUE_TYPE	= 0x008,
158 	MAP_DEF_VALUE_SIZE	= 0x010,
159 	MAP_DEF_MAX_ENTRIES	= 0x020,
160 	MAP_DEF_MAP_FLAGS	= 0x040,
161 	MAP_DEF_NUMA_NODE	= 0x080,
162 	MAP_DEF_PINNING		= 0x100,
163 	MAP_DEF_INNER_MAP	= 0x200,
164 
165 	MAP_DEF_ALL		= 0x3ff, /* combination of all above */
166 };
167 
168 struct btf_map_def {
169 	enum map_def_parts parts;
170 	__u32 map_type;
171 	__u32 key_type_id;
172 	__u32 key_size;
173 	__u32 value_type_id;
174 	__u32 value_size;
175 	__u32 max_entries;
176 	__u32 map_flags;
177 	__u32 numa_node;
178 	__u32 pinning;
179 };
180 
181 int parse_btf_map_def(const char *map_name, struct btf *btf,
182 		      const struct btf_type *def_t, bool strict,
183 		      struct btf_map_def *map_def, struct btf_map_def *inner_def);
184 
185 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
186 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt);
187 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
188 
189 static inline bool libbpf_validate_opts(const char *opts,
190 					size_t opts_sz, size_t user_sz,
191 					const char *type_name)
192 {
193 	if (user_sz < sizeof(size_t)) {
194 		pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
195 		return false;
196 	}
197 	if (user_sz > opts_sz) {
198 		size_t i;
199 
200 		for (i = opts_sz; i < user_sz; i++) {
201 			if (opts[i]) {
202 				pr_warn("%s has non-zero extra bytes\n",
203 					type_name);
204 				return false;
205 			}
206 		}
207 	}
208 	return true;
209 }
210 
211 #define OPTS_VALID(opts, type)						      \
212 	(!(opts) || libbpf_validate_opts((const char *)opts,		      \
213 					 offsetofend(struct type,	      \
214 						     type##__last_field),     \
215 					 (opts)->sz, #type))
216 #define OPTS_HAS(opts, field) \
217 	((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
218 #define OPTS_GET(opts, field, fallback_value) \
219 	(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
220 #define OPTS_SET(opts, field, value)		\
221 	do {					\
222 		if (OPTS_HAS(opts, field))	\
223 			(opts)->field = value;	\
224 	} while (0)
225 
226 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
227 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
228 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
229 			 const char *str_sec, size_t str_len);
230 
231 struct bpf_prog_load_params {
232 	enum bpf_prog_type prog_type;
233 	enum bpf_attach_type expected_attach_type;
234 	const char *name;
235 	const struct bpf_insn *insns;
236 	size_t insn_cnt;
237 	const char *license;
238 	__u32 kern_version;
239 	__u32 attach_prog_fd;
240 	__u32 attach_btf_obj_fd;
241 	__u32 attach_btf_id;
242 	__u32 prog_ifindex;
243 	__u32 prog_btf_fd;
244 	__u32 prog_flags;
245 
246 	__u32 func_info_rec_size;
247 	const void *func_info;
248 	__u32 func_info_cnt;
249 
250 	__u32 line_info_rec_size;
251 	const void *line_info;
252 	__u32 line_info_cnt;
253 
254 	__u32 log_level;
255 	char *log_buf;
256 	size_t log_buf_sz;
257 };
258 
259 int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
260 
261 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
262 			     __u32 *size);
263 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
264 				__u32 *off);
265 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
266 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
267 				const char **prefix, int *kind);
268 
269 struct btf_ext_info {
270 	/*
271 	 * info points to the individual info section (e.g. func_info and
272 	 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
273 	 */
274 	void *info;
275 	__u32 rec_size;
276 	__u32 len;
277 };
278 
279 #define for_each_btf_ext_sec(seg, sec)					\
280 	for (sec = (seg)->info;						\
281 	     (void *)sec < (seg)->info + (seg)->len;			\
282 	     sec = (void *)sec + sizeof(struct btf_ext_info_sec) +	\
283 		   (seg)->rec_size * sec->num_info)
284 
285 #define for_each_btf_ext_rec(seg, sec, i, rec)				\
286 	for (i = 0, rec = (void *)&(sec)->data;				\
287 	     i < (sec)->num_info;					\
288 	     i++, rec = (void *)rec + (seg)->rec_size)
289 
290 /*
291  * The .BTF.ext ELF section layout defined as
292  *   struct btf_ext_header
293  *   func_info subsection
294  *
295  * The func_info subsection layout:
296  *   record size for struct bpf_func_info in the func_info subsection
297  *   struct btf_sec_func_info for section #1
298  *   a list of bpf_func_info records for section #1
299  *     where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
300  *     but may not be identical
301  *   struct btf_sec_func_info for section #2
302  *   a list of bpf_func_info records for section #2
303  *   ......
304  *
305  * Note that the bpf_func_info record size in .BTF.ext may not
306  * be the same as the one defined in include/uapi/linux/bpf.h.
307  * The loader should ensure that record_size meets minimum
308  * requirement and pass the record as is to the kernel. The
309  * kernel will handle the func_info properly based on its contents.
310  */
311 struct btf_ext_header {
312 	__u16	magic;
313 	__u8	version;
314 	__u8	flags;
315 	__u32	hdr_len;
316 
317 	/* All offsets are in bytes relative to the end of this header */
318 	__u32	func_info_off;
319 	__u32	func_info_len;
320 	__u32	line_info_off;
321 	__u32	line_info_len;
322 
323 	/* optional part of .BTF.ext header */
324 	__u32	core_relo_off;
325 	__u32	core_relo_len;
326 };
327 
328 struct btf_ext {
329 	union {
330 		struct btf_ext_header *hdr;
331 		void *data;
332 	};
333 	struct btf_ext_info func_info;
334 	struct btf_ext_info line_info;
335 	struct btf_ext_info core_relo_info;
336 	__u32 data_size;
337 };
338 
339 struct btf_ext_info_sec {
340 	__u32	sec_name_off;
341 	__u32	num_info;
342 	/* Followed by num_info * record_size number of bytes */
343 	__u8	data[];
344 };
345 
346 /* The minimum bpf_func_info checked by the loader */
347 struct bpf_func_info_min {
348 	__u32   insn_off;
349 	__u32   type_id;
350 };
351 
352 /* The minimum bpf_line_info checked by the loader */
353 struct bpf_line_info_min {
354 	__u32	insn_off;
355 	__u32	file_name_off;
356 	__u32	line_off;
357 	__u32	line_col;
358 };
359 
360 /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
361  * has to be adjusted by relocations.
362  */
363 enum bpf_core_relo_kind {
364 	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
365 	BPF_FIELD_BYTE_SIZE = 1,	/* field size in bytes */
366 	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
367 	BPF_FIELD_SIGNED = 3,		/* field signedness (0 - unsigned, 1 - signed) */
368 	BPF_FIELD_LSHIFT_U64 = 4,	/* bitfield-specific left bitshift */
369 	BPF_FIELD_RSHIFT_U64 = 5,	/* bitfield-specific right bitshift */
370 	BPF_TYPE_ID_LOCAL = 6,		/* type ID in local BPF object */
371 	BPF_TYPE_ID_TARGET = 7,		/* type ID in target kernel */
372 	BPF_TYPE_EXISTS = 8,		/* type existence in target kernel */
373 	BPF_TYPE_SIZE = 9,		/* type size in bytes */
374 	BPF_ENUMVAL_EXISTS = 10,	/* enum value existence in target kernel */
375 	BPF_ENUMVAL_VALUE = 11,		/* enum value integer value */
376 };
377 
378 /* The minimum bpf_core_relo checked by the loader
379  *
380  * CO-RE relocation captures the following data:
381  * - insn_off - instruction offset (in bytes) within a BPF program that needs
382  *   its insn->imm field to be relocated with actual field info;
383  * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
384  *   type or field;
385  * - access_str_off - offset into corresponding .BTF string section. String
386  *   interpretation depends on specific relocation kind:
387  *     - for field-based relocations, string encodes an accessed field using
388  *     a sequence of field and array indices, separated by colon (:). It's
389  *     conceptually very close to LLVM's getelementptr ([0]) instruction's
390  *     arguments for identifying offset to a field.
391  *     - for type-based relocations, strings is expected to be just "0";
392  *     - for enum value-based relocations, string contains an index of enum
393  *     value within its enum type;
394  *
395  * Example to provide a better feel.
396  *
397  *   struct sample {
398  *       int a;
399  *       struct {
400  *           int b[10];
401  *       };
402  *   };
403  *
404  *   struct sample *s = ...;
405  *   int x = &s->a;     // encoded as "0:0" (a is field #0)
406  *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1,
407  *                      // b is field #0 inside anon struct, accessing elem #5)
408  *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
409  *
410  * type_id for all relocs in this example  will capture BTF type id of
411  * `struct sample`.
412  *
413  * Such relocation is emitted when using __builtin_preserve_access_index()
414  * Clang built-in, passing expression that captures field address, e.g.:
415  *
416  * bpf_probe_read(&dst, sizeof(dst),
417  *		  __builtin_preserve_access_index(&src->a.b.c));
418  *
419  * In this case Clang will emit field relocation recording necessary data to
420  * be able to find offset of embedded `a.b.c` field within `src` struct.
421  *
422  *   [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
423  */
424 struct bpf_core_relo {
425 	__u32   insn_off;
426 	__u32   type_id;
427 	__u32   access_str_off;
428 	enum bpf_core_relo_kind kind;
429 };
430 
431 typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
432 typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
433 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
434 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
435 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
436 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
437 
438 #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
439