xref: /openbmc/linux/tools/lib/bpf/relo_core.c (revision 2f0754f2)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Facebook */
3 
4 #ifdef __KERNEL__
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/string.h>
8 #include <linux/bpf_verifier.h>
9 #include "relo_core.h"
10 
11 static const char *btf_kind_str(const struct btf_type *t)
12 {
13 	return btf_type_str(t);
14 }
15 
16 static bool is_ldimm64_insn(struct bpf_insn *insn)
17 {
18 	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
19 }
20 
21 static const struct btf_type *
22 skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
23 {
24 	return btf_type_skip_modifiers(btf, id, res_id);
25 }
26 
27 static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
28 {
29 	return btf_name_by_offset(btf, offset);
30 }
31 
32 static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
33 {
34 	const struct btf_type *t;
35 	int size;
36 
37 	t = btf_type_by_id(btf, type_id);
38 	t = btf_resolve_size(btf, t, &size);
39 	if (IS_ERR(t))
40 		return PTR_ERR(t);
41 	return size;
42 }
43 
44 enum libbpf_print_level {
45 	LIBBPF_WARN,
46 	LIBBPF_INFO,
47 	LIBBPF_DEBUG,
48 };
49 
50 #undef pr_warn
51 #undef pr_info
52 #undef pr_debug
53 #define pr_warn(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
54 #define pr_info(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
55 #define pr_debug(fmt, log, ...)	bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
56 #define libbpf_print(level, fmt, ...)	bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
57 #else
58 #include <stdio.h>
59 #include <string.h>
60 #include <errno.h>
61 #include <ctype.h>
62 #include <linux/err.h>
63 
64 #include "libbpf.h"
65 #include "bpf.h"
66 #include "btf.h"
67 #include "str_error.h"
68 #include "libbpf_internal.h"
69 #endif
70 
71 static bool is_flex_arr(const struct btf *btf,
72 			const struct bpf_core_accessor *acc,
73 			const struct btf_array *arr)
74 {
75 	const struct btf_type *t;
76 
77 	/* not a flexible array, if not inside a struct or has non-zero size */
78 	if (!acc->name || arr->nelems > 0)
79 		return false;
80 
81 	/* has to be the last member of enclosing struct */
82 	t = btf_type_by_id(btf, acc->type_id);
83 	return acc->idx == btf_vlen(t) - 1;
84 }
85 
86 static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
87 {
88 	switch (kind) {
89 	case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
90 	case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
91 	case BPF_CORE_FIELD_EXISTS: return "field_exists";
92 	case BPF_CORE_FIELD_SIGNED: return "signed";
93 	case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
94 	case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
95 	case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
96 	case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
97 	case BPF_CORE_TYPE_EXISTS: return "type_exists";
98 	case BPF_CORE_TYPE_SIZE: return "type_size";
99 	case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
100 	case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
101 	default: return "unknown";
102 	}
103 }
104 
105 static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
106 {
107 	switch (kind) {
108 	case BPF_CORE_FIELD_BYTE_OFFSET:
109 	case BPF_CORE_FIELD_BYTE_SIZE:
110 	case BPF_CORE_FIELD_EXISTS:
111 	case BPF_CORE_FIELD_SIGNED:
112 	case BPF_CORE_FIELD_LSHIFT_U64:
113 	case BPF_CORE_FIELD_RSHIFT_U64:
114 		return true;
115 	default:
116 		return false;
117 	}
118 }
119 
120 static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
121 {
122 	switch (kind) {
123 	case BPF_CORE_TYPE_ID_LOCAL:
124 	case BPF_CORE_TYPE_ID_TARGET:
125 	case BPF_CORE_TYPE_EXISTS:
126 	case BPF_CORE_TYPE_SIZE:
127 		return true;
128 	default:
129 		return false;
130 	}
131 }
132 
133 static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
134 {
135 	switch (kind) {
136 	case BPF_CORE_ENUMVAL_EXISTS:
137 	case BPF_CORE_ENUMVAL_VALUE:
138 		return true;
139 	default:
140 		return false;
141 	}
142 }
143 
144 /*
145  * Turn bpf_core_relo into a low- and high-level spec representation,
146  * validating correctness along the way, as well as calculating resulting
147  * field bit offset, specified by accessor string. Low-level spec captures
148  * every single level of nestedness, including traversing anonymous
149  * struct/union members. High-level one only captures semantically meaningful
150  * "turning points": named fields and array indicies.
151  * E.g., for this case:
152  *
153  *   struct sample {
154  *       int __unimportant;
155  *       struct {
156  *           int __1;
157  *           int __2;
158  *           int a[7];
159  *       };
160  *   };
161  *
162  *   struct sample *s = ...;
163  *
164  *   int x = &s->a[3]; // access string = '0:1:2:3'
165  *
166  * Low-level spec has 1:1 mapping with each element of access string (it's
167  * just a parsed access string representation): [0, 1, 2, 3].
168  *
169  * High-level spec will capture only 3 points:
170  *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
171  *   - field 'a' access (corresponds to '2' in low-level spec);
172  *   - array element #3 access (corresponds to '3' in low-level spec).
173  *
174  * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
175  * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
176  * spec and raw_spec are kept empty.
177  *
178  * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
179  * string to specify enumerator's value index that need to be relocated.
180  */
181 static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
182 			       __u32 type_id,
183 			       const char *spec_str,
184 			       enum bpf_core_relo_kind relo_kind,
185 			       struct bpf_core_spec *spec)
186 {
187 	int access_idx, parsed_len, i;
188 	struct bpf_core_accessor *acc;
189 	const struct btf_type *t;
190 	const char *name;
191 	__u32 id;
192 	__s64 sz;
193 
194 	if (str_is_empty(spec_str) || *spec_str == ':')
195 		return -EINVAL;
196 
197 	memset(spec, 0, sizeof(*spec));
198 	spec->btf = btf;
199 	spec->root_type_id = type_id;
200 	spec->relo_kind = relo_kind;
201 
202 	/* type-based relocations don't have a field access string */
203 	if (core_relo_is_type_based(relo_kind)) {
204 		if (strcmp(spec_str, "0"))
205 			return -EINVAL;
206 		return 0;
207 	}
208 
209 	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
210 	while (*spec_str) {
211 		if (*spec_str == ':')
212 			++spec_str;
213 		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
214 			return -EINVAL;
215 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
216 			return -E2BIG;
217 		spec_str += parsed_len;
218 		spec->raw_spec[spec->raw_len++] = access_idx;
219 	}
220 
221 	if (spec->raw_len == 0)
222 		return -EINVAL;
223 
224 	t = skip_mods_and_typedefs(btf, type_id, &id);
225 	if (!t)
226 		return -EINVAL;
227 
228 	access_idx = spec->raw_spec[0];
229 	acc = &spec->spec[0];
230 	acc->type_id = id;
231 	acc->idx = access_idx;
232 	spec->len++;
233 
234 	if (core_relo_is_enumval_based(relo_kind)) {
235 		if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
236 			return -EINVAL;
237 
238 		/* record enumerator name in a first accessor */
239 		acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
240 		return 0;
241 	}
242 
243 	if (!core_relo_is_field_based(relo_kind))
244 		return -EINVAL;
245 
246 	sz = btf__resolve_size(btf, id);
247 	if (sz < 0)
248 		return sz;
249 	spec->bit_offset = access_idx * sz * 8;
250 
251 	for (i = 1; i < spec->raw_len; i++) {
252 		t = skip_mods_and_typedefs(btf, id, &id);
253 		if (!t)
254 			return -EINVAL;
255 
256 		access_idx = spec->raw_spec[i];
257 		acc = &spec->spec[spec->len];
258 
259 		if (btf_is_composite(t)) {
260 			const struct btf_member *m;
261 			__u32 bit_offset;
262 
263 			if (access_idx >= btf_vlen(t))
264 				return -EINVAL;
265 
266 			bit_offset = btf_member_bit_offset(t, access_idx);
267 			spec->bit_offset += bit_offset;
268 
269 			m = btf_members(t) + access_idx;
270 			if (m->name_off) {
271 				name = btf__name_by_offset(btf, m->name_off);
272 				if (str_is_empty(name))
273 					return -EINVAL;
274 
275 				acc->type_id = id;
276 				acc->idx = access_idx;
277 				acc->name = name;
278 				spec->len++;
279 			}
280 
281 			id = m->type;
282 		} else if (btf_is_array(t)) {
283 			const struct btf_array *a = btf_array(t);
284 			bool flex;
285 
286 			t = skip_mods_and_typedefs(btf, a->type, &id);
287 			if (!t)
288 				return -EINVAL;
289 
290 			flex = is_flex_arr(btf, acc - 1, a);
291 			if (!flex && access_idx >= a->nelems)
292 				return -EINVAL;
293 
294 			spec->spec[spec->len].type_id = id;
295 			spec->spec[spec->len].idx = access_idx;
296 			spec->len++;
297 
298 			sz = btf__resolve_size(btf, id);
299 			if (sz < 0)
300 				return sz;
301 			spec->bit_offset += access_idx * sz * 8;
302 		} else {
303 			pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
304 				prog_name, type_id, spec_str, i, id, btf_kind_str(t));
305 			return -EINVAL;
306 		}
307 	}
308 
309 	return 0;
310 }
311 
312 /* Check two types for compatibility for the purpose of field access
313  * relocation. const/volatile/restrict and typedefs are skipped to ensure we
314  * are relocating semantically compatible entities:
315  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
316  *   - any two FWDs are compatible, if their names match (modulo flavor suffix);
317  *   - any two PTRs are always compatible;
318  *   - for ENUMs, names should be the same (ignoring flavor suffix) or at
319  *     least one of enums should be anonymous;
320  *   - for ENUMs, check sizes, names are ignored;
321  *   - for INT, size and signedness are ignored;
322  *   - any two FLOATs are always compatible;
323  *   - for ARRAY, dimensionality is ignored, element types are checked for
324  *     compatibility recursively;
325  *   - everything else shouldn't be ever a target of relocation.
326  * These rules are not set in stone and probably will be adjusted as we get
327  * more experience with using BPF CO-RE relocations.
328  */
329 static int bpf_core_fields_are_compat(const struct btf *local_btf,
330 				      __u32 local_id,
331 				      const struct btf *targ_btf,
332 				      __u32 targ_id)
333 {
334 	const struct btf_type *local_type, *targ_type;
335 
336 recur:
337 	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
338 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
339 	if (!local_type || !targ_type)
340 		return -EINVAL;
341 
342 	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
343 		return 1;
344 	if (btf_kind(local_type) != btf_kind(targ_type))
345 		return 0;
346 
347 	switch (btf_kind(local_type)) {
348 	case BTF_KIND_PTR:
349 	case BTF_KIND_FLOAT:
350 		return 1;
351 	case BTF_KIND_FWD:
352 	case BTF_KIND_ENUM: {
353 		const char *local_name, *targ_name;
354 		size_t local_len, targ_len;
355 
356 		local_name = btf__name_by_offset(local_btf,
357 						 local_type->name_off);
358 		targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
359 		local_len = bpf_core_essential_name_len(local_name);
360 		targ_len = bpf_core_essential_name_len(targ_name);
361 		/* one of them is anonymous or both w/ same flavor-less names */
362 		return local_len == 0 || targ_len == 0 ||
363 		       (local_len == targ_len &&
364 			strncmp(local_name, targ_name, local_len) == 0);
365 	}
366 	case BTF_KIND_INT:
367 		/* just reject deprecated bitfield-like integers; all other
368 		 * integers are by default compatible between each other
369 		 */
370 		return btf_int_offset(local_type) == 0 &&
371 		       btf_int_offset(targ_type) == 0;
372 	case BTF_KIND_ARRAY:
373 		local_id = btf_array(local_type)->type;
374 		targ_id = btf_array(targ_type)->type;
375 		goto recur;
376 	default:
377 		return 0;
378 	}
379 }
380 
381 /*
382  * Given single high-level named field accessor in local type, find
383  * corresponding high-level accessor for a target type. Along the way,
384  * maintain low-level spec for target as well. Also keep updating target
385  * bit offset.
386  *
387  * Searching is performed through recursive exhaustive enumeration of all
388  * fields of a struct/union. If there are any anonymous (embedded)
389  * structs/unions, they are recursively searched as well. If field with
390  * desired name is found, check compatibility between local and target types,
391  * before returning result.
392  *
393  * 1 is returned, if field is found.
394  * 0 is returned if no compatible field is found.
395  * <0 is returned on error.
396  */
397 static int bpf_core_match_member(const struct btf *local_btf,
398 				 const struct bpf_core_accessor *local_acc,
399 				 const struct btf *targ_btf,
400 				 __u32 targ_id,
401 				 struct bpf_core_spec *spec,
402 				 __u32 *next_targ_id)
403 {
404 	const struct btf_type *local_type, *targ_type;
405 	const struct btf_member *local_member, *m;
406 	const char *local_name, *targ_name;
407 	__u32 local_id;
408 	int i, n, found;
409 
410 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
411 	if (!targ_type)
412 		return -EINVAL;
413 	if (!btf_is_composite(targ_type))
414 		return 0;
415 
416 	local_id = local_acc->type_id;
417 	local_type = btf_type_by_id(local_btf, local_id);
418 	local_member = btf_members(local_type) + local_acc->idx;
419 	local_name = btf__name_by_offset(local_btf, local_member->name_off);
420 
421 	n = btf_vlen(targ_type);
422 	m = btf_members(targ_type);
423 	for (i = 0; i < n; i++, m++) {
424 		__u32 bit_offset;
425 
426 		bit_offset = btf_member_bit_offset(targ_type, i);
427 
428 		/* too deep struct/union/array nesting */
429 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
430 			return -E2BIG;
431 
432 		/* speculate this member will be the good one */
433 		spec->bit_offset += bit_offset;
434 		spec->raw_spec[spec->raw_len++] = i;
435 
436 		targ_name = btf__name_by_offset(targ_btf, m->name_off);
437 		if (str_is_empty(targ_name)) {
438 			/* embedded struct/union, we need to go deeper */
439 			found = bpf_core_match_member(local_btf, local_acc,
440 						      targ_btf, m->type,
441 						      spec, next_targ_id);
442 			if (found) /* either found or error */
443 				return found;
444 		} else if (strcmp(local_name, targ_name) == 0) {
445 			/* matching named field */
446 			struct bpf_core_accessor *targ_acc;
447 
448 			targ_acc = &spec->spec[spec->len++];
449 			targ_acc->type_id = targ_id;
450 			targ_acc->idx = i;
451 			targ_acc->name = targ_name;
452 
453 			*next_targ_id = m->type;
454 			found = bpf_core_fields_are_compat(local_btf,
455 							   local_member->type,
456 							   targ_btf, m->type);
457 			if (!found)
458 				spec->len--; /* pop accessor */
459 			return found;
460 		}
461 		/* member turned out not to be what we looked for */
462 		spec->bit_offset -= bit_offset;
463 		spec->raw_len--;
464 	}
465 
466 	return 0;
467 }
468 
469 /*
470  * Try to match local spec to a target type and, if successful, produce full
471  * target spec (high-level, low-level + bit offset).
472  */
473 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
474 			       const struct btf *targ_btf, __u32 targ_id,
475 			       struct bpf_core_spec *targ_spec)
476 {
477 	const struct btf_type *targ_type;
478 	const struct bpf_core_accessor *local_acc;
479 	struct bpf_core_accessor *targ_acc;
480 	int i, sz, matched;
481 
482 	memset(targ_spec, 0, sizeof(*targ_spec));
483 	targ_spec->btf = targ_btf;
484 	targ_spec->root_type_id = targ_id;
485 	targ_spec->relo_kind = local_spec->relo_kind;
486 
487 	if (core_relo_is_type_based(local_spec->relo_kind)) {
488 		return bpf_core_types_are_compat(local_spec->btf,
489 						 local_spec->root_type_id,
490 						 targ_btf, targ_id);
491 	}
492 
493 	local_acc = &local_spec->spec[0];
494 	targ_acc = &targ_spec->spec[0];
495 
496 	if (core_relo_is_enumval_based(local_spec->relo_kind)) {
497 		size_t local_essent_len, targ_essent_len;
498 		const struct btf_enum *e;
499 		const char *targ_name;
500 
501 		/* has to resolve to an enum */
502 		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
503 		if (!btf_is_enum(targ_type))
504 			return 0;
505 
506 		local_essent_len = bpf_core_essential_name_len(local_acc->name);
507 
508 		for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
509 			targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
510 			targ_essent_len = bpf_core_essential_name_len(targ_name);
511 			if (targ_essent_len != local_essent_len)
512 				continue;
513 			if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
514 				targ_acc->type_id = targ_id;
515 				targ_acc->idx = i;
516 				targ_acc->name = targ_name;
517 				targ_spec->len++;
518 				targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
519 				targ_spec->raw_len++;
520 				return 1;
521 			}
522 		}
523 		return 0;
524 	}
525 
526 	if (!core_relo_is_field_based(local_spec->relo_kind))
527 		return -EINVAL;
528 
529 	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
530 		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
531 						   &targ_id);
532 		if (!targ_type)
533 			return -EINVAL;
534 
535 		if (local_acc->name) {
536 			matched = bpf_core_match_member(local_spec->btf,
537 							local_acc,
538 							targ_btf, targ_id,
539 							targ_spec, &targ_id);
540 			if (matched <= 0)
541 				return matched;
542 		} else {
543 			/* for i=0, targ_id is already treated as array element
544 			 * type (because it's the original struct), for others
545 			 * we should find array element type first
546 			 */
547 			if (i > 0) {
548 				const struct btf_array *a;
549 				bool flex;
550 
551 				if (!btf_is_array(targ_type))
552 					return 0;
553 
554 				a = btf_array(targ_type);
555 				flex = is_flex_arr(targ_btf, targ_acc - 1, a);
556 				if (!flex && local_acc->idx >= a->nelems)
557 					return 0;
558 				if (!skip_mods_and_typedefs(targ_btf, a->type,
559 							    &targ_id))
560 					return -EINVAL;
561 			}
562 
563 			/* too deep struct/union/array nesting */
564 			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
565 				return -E2BIG;
566 
567 			targ_acc->type_id = targ_id;
568 			targ_acc->idx = local_acc->idx;
569 			targ_acc->name = NULL;
570 			targ_spec->len++;
571 			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
572 			targ_spec->raw_len++;
573 
574 			sz = btf__resolve_size(targ_btf, targ_id);
575 			if (sz < 0)
576 				return sz;
577 			targ_spec->bit_offset += local_acc->idx * sz * 8;
578 		}
579 	}
580 
581 	return 1;
582 }
583 
584 static int bpf_core_calc_field_relo(const char *prog_name,
585 				    const struct bpf_core_relo *relo,
586 				    const struct bpf_core_spec *spec,
587 				    __u32 *val, __u32 *field_sz, __u32 *type_id,
588 				    bool *validate)
589 {
590 	const struct bpf_core_accessor *acc;
591 	const struct btf_type *t;
592 	__u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
593 	const struct btf_member *m;
594 	const struct btf_type *mt;
595 	bool bitfield;
596 	__s64 sz;
597 
598 	*field_sz = 0;
599 
600 	if (relo->kind == BPF_CORE_FIELD_EXISTS) {
601 		*val = spec ? 1 : 0;
602 		return 0;
603 	}
604 
605 	if (!spec)
606 		return -EUCLEAN; /* request instruction poisoning */
607 
608 	acc = &spec->spec[spec->len - 1];
609 	t = btf_type_by_id(spec->btf, acc->type_id);
610 
611 	/* a[n] accessor needs special handling */
612 	if (!acc->name) {
613 		if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
614 			*val = spec->bit_offset / 8;
615 			/* remember field size for load/store mem size */
616 			sz = btf__resolve_size(spec->btf, acc->type_id);
617 			if (sz < 0)
618 				return -EINVAL;
619 			*field_sz = sz;
620 			*type_id = acc->type_id;
621 		} else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
622 			sz = btf__resolve_size(spec->btf, acc->type_id);
623 			if (sz < 0)
624 				return -EINVAL;
625 			*val = sz;
626 		} else {
627 			pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
628 				prog_name, relo->kind, relo->insn_off / 8);
629 			return -EINVAL;
630 		}
631 		if (validate)
632 			*validate = true;
633 		return 0;
634 	}
635 
636 	m = btf_members(t) + acc->idx;
637 	mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
638 	bit_off = spec->bit_offset;
639 	bit_sz = btf_member_bitfield_size(t, acc->idx);
640 
641 	bitfield = bit_sz > 0;
642 	if (bitfield) {
643 		byte_sz = mt->size;
644 		byte_off = bit_off / 8 / byte_sz * byte_sz;
645 		/* figure out smallest int size necessary for bitfield load */
646 		while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
647 			if (byte_sz >= 8) {
648 				/* bitfield can't be read with 64-bit read */
649 				pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
650 					prog_name, relo->kind, relo->insn_off / 8);
651 				return -E2BIG;
652 			}
653 			byte_sz *= 2;
654 			byte_off = bit_off / 8 / byte_sz * byte_sz;
655 		}
656 	} else {
657 		sz = btf__resolve_size(spec->btf, field_type_id);
658 		if (sz < 0)
659 			return -EINVAL;
660 		byte_sz = sz;
661 		byte_off = spec->bit_offset / 8;
662 		bit_sz = byte_sz * 8;
663 	}
664 
665 	/* for bitfields, all the relocatable aspects are ambiguous and we
666 	 * might disagree with compiler, so turn off validation of expected
667 	 * value, except for signedness
668 	 */
669 	if (validate)
670 		*validate = !bitfield;
671 
672 	switch (relo->kind) {
673 	case BPF_CORE_FIELD_BYTE_OFFSET:
674 		*val = byte_off;
675 		if (!bitfield) {
676 			*field_sz = byte_sz;
677 			*type_id = field_type_id;
678 		}
679 		break;
680 	case BPF_CORE_FIELD_BYTE_SIZE:
681 		*val = byte_sz;
682 		break;
683 	case BPF_CORE_FIELD_SIGNED:
684 		/* enums will be assumed unsigned */
685 		*val = btf_is_enum(mt) ||
686 		       (btf_int_encoding(mt) & BTF_INT_SIGNED);
687 		if (validate)
688 			*validate = true; /* signedness is never ambiguous */
689 		break;
690 	case BPF_CORE_FIELD_LSHIFT_U64:
691 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
692 		*val = 64 - (bit_off + bit_sz - byte_off  * 8);
693 #else
694 		*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
695 #endif
696 		break;
697 	case BPF_CORE_FIELD_RSHIFT_U64:
698 		*val = 64 - bit_sz;
699 		if (validate)
700 			*validate = true; /* right shift is never ambiguous */
701 		break;
702 	case BPF_CORE_FIELD_EXISTS:
703 	default:
704 		return -EOPNOTSUPP;
705 	}
706 
707 	return 0;
708 }
709 
710 static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
711 				   const struct bpf_core_spec *spec,
712 				   __u32 *val, bool *validate)
713 {
714 	__s64 sz;
715 
716 	/* by default, always check expected value in bpf_insn */
717 	if (validate)
718 		*validate = true;
719 
720 	/* type-based relos return zero when target type is not found */
721 	if (!spec) {
722 		*val = 0;
723 		return 0;
724 	}
725 
726 	switch (relo->kind) {
727 	case BPF_CORE_TYPE_ID_TARGET:
728 		*val = spec->root_type_id;
729 		/* type ID, embedded in bpf_insn, might change during linking,
730 		 * so enforcing it is pointless
731 		 */
732 		if (validate)
733 			*validate = false;
734 		break;
735 	case BPF_CORE_TYPE_EXISTS:
736 		*val = 1;
737 		break;
738 	case BPF_CORE_TYPE_SIZE:
739 		sz = btf__resolve_size(spec->btf, spec->root_type_id);
740 		if (sz < 0)
741 			return -EINVAL;
742 		*val = sz;
743 		break;
744 	case BPF_CORE_TYPE_ID_LOCAL:
745 	/* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
746 	default:
747 		return -EOPNOTSUPP;
748 	}
749 
750 	return 0;
751 }
752 
753 static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
754 				      const struct bpf_core_spec *spec,
755 				      __u32 *val)
756 {
757 	const struct btf_type *t;
758 	const struct btf_enum *e;
759 
760 	switch (relo->kind) {
761 	case BPF_CORE_ENUMVAL_EXISTS:
762 		*val = spec ? 1 : 0;
763 		break;
764 	case BPF_CORE_ENUMVAL_VALUE:
765 		if (!spec)
766 			return -EUCLEAN; /* request instruction poisoning */
767 		t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
768 		e = btf_enum(t) + spec->spec[0].idx;
769 		*val = e->val;
770 		break;
771 	default:
772 		return -EOPNOTSUPP;
773 	}
774 
775 	return 0;
776 }
777 
778 struct bpf_core_relo_res
779 {
780 	/* expected value in the instruction, unless validate == false */
781 	__u32 orig_val;
782 	/* new value that needs to be patched up to */
783 	__u32 new_val;
784 	/* relocation unsuccessful, poison instruction, but don't fail load */
785 	bool poison;
786 	/* some relocations can't be validated against orig_val */
787 	bool validate;
788 	/* for field byte offset relocations or the forms:
789 	 *     *(T *)(rX + <off>) = rY
790 	 *     rX = *(T *)(rY + <off>),
791 	 * we remember original and resolved field size to adjust direct
792 	 * memory loads of pointers and integers; this is necessary for 32-bit
793 	 * host kernel architectures, but also allows to automatically
794 	 * relocate fields that were resized from, e.g., u32 to u64, etc.
795 	 */
796 	bool fail_memsz_adjust;
797 	__u32 orig_sz;
798 	__u32 orig_type_id;
799 	__u32 new_sz;
800 	__u32 new_type_id;
801 };
802 
803 /* Calculate original and target relocation values, given local and target
804  * specs and relocation kind. These values are calculated for each candidate.
805  * If there are multiple candidates, resulting values should all be consistent
806  * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
807  * If instruction has to be poisoned, *poison will be set to true.
808  */
809 static int bpf_core_calc_relo(const char *prog_name,
810 			      const struct bpf_core_relo *relo,
811 			      int relo_idx,
812 			      const struct bpf_core_spec *local_spec,
813 			      const struct bpf_core_spec *targ_spec,
814 			      struct bpf_core_relo_res *res)
815 {
816 	int err = -EOPNOTSUPP;
817 
818 	res->orig_val = 0;
819 	res->new_val = 0;
820 	res->poison = false;
821 	res->validate = true;
822 	res->fail_memsz_adjust = false;
823 	res->orig_sz = res->new_sz = 0;
824 	res->orig_type_id = res->new_type_id = 0;
825 
826 	if (core_relo_is_field_based(relo->kind)) {
827 		err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
828 					       &res->orig_val, &res->orig_sz,
829 					       &res->orig_type_id, &res->validate);
830 		err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
831 						      &res->new_val, &res->new_sz,
832 						      &res->new_type_id, NULL);
833 		if (err)
834 			goto done;
835 		/* Validate if it's safe to adjust load/store memory size.
836 		 * Adjustments are performed only if original and new memory
837 		 * sizes differ.
838 		 */
839 		res->fail_memsz_adjust = false;
840 		if (res->orig_sz != res->new_sz) {
841 			const struct btf_type *orig_t, *new_t;
842 
843 			orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
844 			new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
845 
846 			/* There are two use cases in which it's safe to
847 			 * adjust load/store's mem size:
848 			 *   - reading a 32-bit kernel pointer, while on BPF
849 			 *   size pointers are always 64-bit; in this case
850 			 *   it's safe to "downsize" instruction size due to
851 			 *   pointer being treated as unsigned integer with
852 			 *   zero-extended upper 32-bits;
853 			 *   - reading unsigned integers, again due to
854 			 *   zero-extension is preserving the value correctly.
855 			 *
856 			 * In all other cases it's incorrect to attempt to
857 			 * load/store field because read value will be
858 			 * incorrect, so we poison relocated instruction.
859 			 */
860 			if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
861 				goto done;
862 			if (btf_is_int(orig_t) && btf_is_int(new_t) &&
863 			    btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
864 			    btf_int_encoding(new_t) != BTF_INT_SIGNED)
865 				goto done;
866 
867 			/* mark as invalid mem size adjustment, but this will
868 			 * only be checked for LDX/STX/ST insns
869 			 */
870 			res->fail_memsz_adjust = true;
871 		}
872 	} else if (core_relo_is_type_based(relo->kind)) {
873 		err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
874 		err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
875 	} else if (core_relo_is_enumval_based(relo->kind)) {
876 		err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
877 		err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
878 	}
879 
880 done:
881 	if (err == -EUCLEAN) {
882 		/* EUCLEAN is used to signal instruction poisoning request */
883 		res->poison = true;
884 		err = 0;
885 	} else if (err == -EOPNOTSUPP) {
886 		/* EOPNOTSUPP means unknown/unsupported relocation */
887 		pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
888 			prog_name, relo_idx, core_relo_kind_str(relo->kind),
889 			relo->kind, relo->insn_off / 8);
890 	}
891 
892 	return err;
893 }
894 
895 /*
896  * Turn instruction for which CO_RE relocation failed into invalid one with
897  * distinct signature.
898  */
899 static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
900 				 int insn_idx, struct bpf_insn *insn)
901 {
902 	pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
903 		 prog_name, relo_idx, insn_idx);
904 	insn->code = BPF_JMP | BPF_CALL;
905 	insn->dst_reg = 0;
906 	insn->src_reg = 0;
907 	insn->off = 0;
908 	/* if this instruction is reachable (not a dead code),
909 	 * verifier will complain with the following message:
910 	 * invalid func unknown#195896080
911 	 */
912 	insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
913 }
914 
915 static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
916 {
917 	switch (BPF_SIZE(insn->code)) {
918 	case BPF_DW: return 8;
919 	case BPF_W: return 4;
920 	case BPF_H: return 2;
921 	case BPF_B: return 1;
922 	default: return -1;
923 	}
924 }
925 
926 static int insn_bytes_to_bpf_size(__u32 sz)
927 {
928 	switch (sz) {
929 	case 8: return BPF_DW;
930 	case 4: return BPF_W;
931 	case 2: return BPF_H;
932 	case 1: return BPF_B;
933 	default: return -1;
934 	}
935 }
936 
937 /*
938  * Patch relocatable BPF instruction.
939  *
940  * Patched value is determined by relocation kind and target specification.
941  * For existence relocations target spec will be NULL if field/type is not found.
942  * Expected insn->imm value is determined using relocation kind and local
943  * spec, and is checked before patching instruction. If actual insn->imm value
944  * is wrong, bail out with error.
945  *
946  * Currently supported classes of BPF instruction are:
947  * 1. rX = <imm> (assignment with immediate operand);
948  * 2. rX += <imm> (arithmetic operations with immediate operand);
949  * 3. rX = <imm64> (load with 64-bit immediate value);
950  * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
951  * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
952  * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
953  */
954 static int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
955 			       int insn_idx, const struct bpf_core_relo *relo,
956 			       int relo_idx, const struct bpf_core_relo_res *res)
957 {
958 	__u32 orig_val, new_val;
959 	__u8 class;
960 
961 	class = BPF_CLASS(insn->code);
962 
963 	if (res->poison) {
964 poison:
965 		/* poison second part of ldimm64 to avoid confusing error from
966 		 * verifier about "unknown opcode 00"
967 		 */
968 		if (is_ldimm64_insn(insn))
969 			bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
970 		bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
971 		return 0;
972 	}
973 
974 	orig_val = res->orig_val;
975 	new_val = res->new_val;
976 
977 	switch (class) {
978 	case BPF_ALU:
979 	case BPF_ALU64:
980 		if (BPF_SRC(insn->code) != BPF_K)
981 			return -EINVAL;
982 		if (res->validate && insn->imm != orig_val) {
983 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
984 				prog_name, relo_idx,
985 				insn_idx, insn->imm, orig_val, new_val);
986 			return -EINVAL;
987 		}
988 		orig_val = insn->imm;
989 		insn->imm = new_val;
990 		pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
991 			 prog_name, relo_idx, insn_idx,
992 			 orig_val, new_val);
993 		break;
994 	case BPF_LDX:
995 	case BPF_ST:
996 	case BPF_STX:
997 		if (res->validate && insn->off != orig_val) {
998 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
999 				prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
1000 			return -EINVAL;
1001 		}
1002 		if (new_val > SHRT_MAX) {
1003 			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
1004 				prog_name, relo_idx, insn_idx, new_val);
1005 			return -ERANGE;
1006 		}
1007 		if (res->fail_memsz_adjust) {
1008 			pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
1009 				"Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
1010 				prog_name, relo_idx, insn_idx);
1011 			goto poison;
1012 		}
1013 
1014 		orig_val = insn->off;
1015 		insn->off = new_val;
1016 		pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
1017 			 prog_name, relo_idx, insn_idx, orig_val, new_val);
1018 
1019 		if (res->new_sz != res->orig_sz) {
1020 			int insn_bytes_sz, insn_bpf_sz;
1021 
1022 			insn_bytes_sz = insn_bpf_size_to_bytes(insn);
1023 			if (insn_bytes_sz != res->orig_sz) {
1024 				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
1025 					prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
1026 				return -EINVAL;
1027 			}
1028 
1029 			insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
1030 			if (insn_bpf_sz < 0) {
1031 				pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
1032 					prog_name, relo_idx, insn_idx, res->new_sz);
1033 				return -EINVAL;
1034 			}
1035 
1036 			insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
1037 			pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
1038 				 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
1039 		}
1040 		break;
1041 	case BPF_LD: {
1042 		__u64 imm;
1043 
1044 		if (!is_ldimm64_insn(insn) ||
1045 		    insn[0].src_reg != 0 || insn[0].off != 0 ||
1046 		    insn[1].code != 0 || insn[1].dst_reg != 0 ||
1047 		    insn[1].src_reg != 0 || insn[1].off != 0) {
1048 			pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
1049 				prog_name, relo_idx, insn_idx);
1050 			return -EINVAL;
1051 		}
1052 
1053 		imm = insn[0].imm + ((__u64)insn[1].imm << 32);
1054 		if (res->validate && imm != orig_val) {
1055 			pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
1056 				prog_name, relo_idx,
1057 				insn_idx, (unsigned long long)imm,
1058 				orig_val, new_val);
1059 			return -EINVAL;
1060 		}
1061 
1062 		insn[0].imm = new_val;
1063 		insn[1].imm = 0; /* currently only 32-bit values are supported */
1064 		pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
1065 			 prog_name, relo_idx, insn_idx,
1066 			 (unsigned long long)imm, new_val);
1067 		break;
1068 	}
1069 	default:
1070 		pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
1071 			prog_name, relo_idx, insn_idx, insn->code,
1072 			insn->src_reg, insn->dst_reg, insn->off, insn->imm);
1073 		return -EINVAL;
1074 	}
1075 
1076 	return 0;
1077 }
1078 
1079 /* Output spec definition in the format:
1080  * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
1081  * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
1082  */
1083 static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
1084 {
1085 	const struct btf_type *t;
1086 	const struct btf_enum *e;
1087 	const char *s;
1088 	__u32 type_id;
1089 	int i;
1090 
1091 	type_id = spec->root_type_id;
1092 	t = btf_type_by_id(spec->btf, type_id);
1093 	s = btf__name_by_offset(spec->btf, t->name_off);
1094 
1095 	libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
1096 
1097 	if (core_relo_is_type_based(spec->relo_kind))
1098 		return;
1099 
1100 	if (core_relo_is_enumval_based(spec->relo_kind)) {
1101 		t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
1102 		e = btf_enum(t) + spec->raw_spec[0];
1103 		s = btf__name_by_offset(spec->btf, e->name_off);
1104 
1105 		libbpf_print(level, "::%s = %u", s, e->val);
1106 		return;
1107 	}
1108 
1109 	if (core_relo_is_field_based(spec->relo_kind)) {
1110 		for (i = 0; i < spec->len; i++) {
1111 			if (spec->spec[i].name)
1112 				libbpf_print(level, ".%s", spec->spec[i].name);
1113 			else if (i > 0 || spec->spec[i].idx > 0)
1114 				libbpf_print(level, "[%u]", spec->spec[i].idx);
1115 		}
1116 
1117 		libbpf_print(level, " (");
1118 		for (i = 0; i < spec->raw_len; i++)
1119 			libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
1120 
1121 		if (spec->bit_offset % 8)
1122 			libbpf_print(level, " @ offset %u.%u)",
1123 				     spec->bit_offset / 8, spec->bit_offset % 8);
1124 		else
1125 			libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
1126 		return;
1127 	}
1128 }
1129 
1130 /*
1131  * CO-RE relocate single instruction.
1132  *
1133  * The outline and important points of the algorithm:
1134  * 1. For given local type, find corresponding candidate target types.
1135  *    Candidate type is a type with the same "essential" name, ignoring
1136  *    everything after last triple underscore (___). E.g., `sample`,
1137  *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
1138  *    for each other. Names with triple underscore are referred to as
1139  *    "flavors" and are useful, among other things, to allow to
1140  *    specify/support incompatible variations of the same kernel struct, which
1141  *    might differ between different kernel versions and/or build
1142  *    configurations.
1143  *
1144  *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
1145  *    converter, when deduplicated BTF of a kernel still contains more than
1146  *    one different types with the same name. In that case, ___2, ___3, etc
1147  *    are appended starting from second name conflict. But start flavors are
1148  *    also useful to be defined "locally", in BPF program, to extract same
1149  *    data from incompatible changes between different kernel
1150  *    versions/configurations. For instance, to handle field renames between
1151  *    kernel versions, one can use two flavors of the struct name with the
1152  *    same common name and use conditional relocations to extract that field,
1153  *    depending on target kernel version.
1154  * 2. For each candidate type, try to match local specification to this
1155  *    candidate target type. Matching involves finding corresponding
1156  *    high-level spec accessors, meaning that all named fields should match,
1157  *    as well as all array accesses should be within the actual bounds. Also,
1158  *    types should be compatible (see bpf_core_fields_are_compat for details).
1159  * 3. It is supported and expected that there might be multiple flavors
1160  *    matching the spec. As long as all the specs resolve to the same set of
1161  *    offsets across all candidates, there is no error. If there is any
1162  *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
1163  *    imprefection of BTF deduplication, which can cause slight duplication of
1164  *    the same BTF type, if some directly or indirectly referenced (by
1165  *    pointer) type gets resolved to different actual types in different
1166  *    object files. If such situation occurs, deduplicated BTF will end up
1167  *    with two (or more) structurally identical types, which differ only in
1168  *    types they refer to through pointer. This should be OK in most cases and
1169  *    is not an error.
1170  * 4. Candidate types search is performed by linearly scanning through all
1171  *    types in target BTF. It is anticipated that this is overall more
1172  *    efficient memory-wise and not significantly worse (if not better)
1173  *    CPU-wise compared to prebuilding a map from all local type names to
1174  *    a list of candidate type names. It's also sped up by caching resolved
1175  *    list of matching candidates per each local "root" type ID, that has at
1176  *    least one bpf_core_relo associated with it. This list is shared
1177  *    between multiple relocations for the same type ID and is updated as some
1178  *    of the candidates are pruned due to structural incompatibility.
1179  */
1180 int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn,
1181 			     int insn_idx,
1182 			     const struct bpf_core_relo *relo,
1183 			     int relo_idx,
1184 			     const struct btf *local_btf,
1185 			     struct bpf_core_cand_list *cands,
1186 			     struct bpf_core_spec *specs_scratch)
1187 {
1188 	struct bpf_core_spec *local_spec = &specs_scratch[0];
1189 	struct bpf_core_spec *cand_spec = &specs_scratch[1];
1190 	struct bpf_core_spec *targ_spec = &specs_scratch[2];
1191 	struct bpf_core_relo_res cand_res, targ_res;
1192 	const struct btf_type *local_type;
1193 	const char *local_name;
1194 	__u32 local_id;
1195 	const char *spec_str;
1196 	int i, j, err;
1197 
1198 	local_id = relo->type_id;
1199 	local_type = btf_type_by_id(local_btf, local_id);
1200 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
1201 	if (!local_name)
1202 		return -EINVAL;
1203 
1204 	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
1205 	if (str_is_empty(spec_str))
1206 		return -EINVAL;
1207 
1208 	err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
1209 				  relo->kind, local_spec);
1210 	if (err) {
1211 		pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
1212 			prog_name, relo_idx, local_id, btf_kind_str(local_type),
1213 			str_is_empty(local_name) ? "<anon>" : local_name,
1214 			spec_str, err);
1215 		return -EINVAL;
1216 	}
1217 
1218 	pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
1219 		 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1220 	bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
1221 	libbpf_print(LIBBPF_DEBUG, "\n");
1222 
1223 	/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
1224 	if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
1225 		/* bpf_insn's imm value could get out of sync during linking */
1226 		memset(&targ_res, 0, sizeof(targ_res));
1227 		targ_res.validate = false;
1228 		targ_res.poison = false;
1229 		targ_res.orig_val = local_spec->root_type_id;
1230 		targ_res.new_val = local_spec->root_type_id;
1231 		goto patch_insn;
1232 	}
1233 
1234 	/* libbpf doesn't support candidate search for anonymous types */
1235 	if (str_is_empty(spec_str)) {
1236 		pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
1237 			prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1238 		return -EOPNOTSUPP;
1239 	}
1240 
1241 	for (i = 0, j = 0; i < cands->len; i++) {
1242 		err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
1243 					  cands->cands[i].id, cand_spec);
1244 		if (err < 0) {
1245 			pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
1246 				prog_name, relo_idx, i);
1247 			bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
1248 			libbpf_print(LIBBPF_WARN, ": %d\n", err);
1249 			return err;
1250 		}
1251 
1252 		pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
1253 			 relo_idx, err == 0 ? "non-matching" : "matching", i);
1254 		bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
1255 		libbpf_print(LIBBPF_DEBUG, "\n");
1256 
1257 		if (err == 0)
1258 			continue;
1259 
1260 		err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
1261 		if (err)
1262 			return err;
1263 
1264 		if (j == 0) {
1265 			targ_res = cand_res;
1266 			*targ_spec = *cand_spec;
1267 		} else if (cand_spec->bit_offset != targ_spec->bit_offset) {
1268 			/* if there are many field relo candidates, they
1269 			 * should all resolve to the same bit offset
1270 			 */
1271 			pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
1272 				prog_name, relo_idx, cand_spec->bit_offset,
1273 				targ_spec->bit_offset);
1274 			return -EINVAL;
1275 		} else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
1276 			/* all candidates should result in the same relocation
1277 			 * decision and value, otherwise it's dangerous to
1278 			 * proceed due to ambiguity
1279 			 */
1280 			pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
1281 				prog_name, relo_idx,
1282 				cand_res.poison ? "failure" : "success", cand_res.new_val,
1283 				targ_res.poison ? "failure" : "success", targ_res.new_val);
1284 			return -EINVAL;
1285 		}
1286 
1287 		cands->cands[j++] = cands->cands[i];
1288 	}
1289 
1290 	/*
1291 	 * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
1292 	 * existence checks or kernel version/config checks, it's expected
1293 	 * that we might not find any candidates. In this case, if field
1294 	 * wasn't found in any candidate, the list of candidates shouldn't
1295 	 * change at all, we'll just handle relocating appropriately,
1296 	 * depending on relo's kind.
1297 	 */
1298 	if (j > 0)
1299 		cands->len = j;
1300 
1301 	/*
1302 	 * If no candidates were found, it might be both a programmer error,
1303 	 * as well as expected case, depending whether instruction w/
1304 	 * relocation is guarded in some way that makes it unreachable (dead
1305 	 * code) if relocation can't be resolved. This is handled in
1306 	 * bpf_core_patch_insn() uniformly by replacing that instruction with
1307 	 * BPF helper call insn (using invalid helper ID). If that instruction
1308 	 * is indeed unreachable, then it will be ignored and eliminated by
1309 	 * verifier. If it was an error, then verifier will complain and point
1310 	 * to a specific instruction number in its log.
1311 	 */
1312 	if (j == 0) {
1313 		pr_debug("prog '%s': relo #%d: no matching targets found\n",
1314 			 prog_name, relo_idx);
1315 
1316 		/* calculate single target relo result explicitly */
1317 		err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res);
1318 		if (err)
1319 			return err;
1320 	}
1321 
1322 patch_insn:
1323 	/* bpf_core_patch_insn() should know how to handle missing targ_spec */
1324 	err = bpf_core_patch_insn(prog_name, insn, insn_idx, relo, relo_idx, &targ_res);
1325 	if (err) {
1326 		pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
1327 			prog_name, relo_idx, relo->insn_off / 8, err);
1328 		return -EINVAL;
1329 	}
1330 
1331 	return 0;
1332 }
1333