xref: /openbmc/linux/kernel/bpf/btf.c (revision 81de3bf3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/skmsg.h>
22 #include <linux/perf_event.h>
23 #include <net/sock.h>
24 
25 /* BTF (BPF Type Format) is the meta data format which describes
26  * the data types of BPF program/map.  Hence, it basically focus
27  * on the C programming language which the modern BPF is primary
28  * using.
29  *
30  * ELF Section:
31  * ~~~~~~~~~~~
32  * The BTF data is stored under the ".BTF" ELF section
33  *
34  * struct btf_type:
35  * ~~~~~~~~~~~~~~~
36  * Each 'struct btf_type' object describes a C data type.
37  * Depending on the type it is describing, a 'struct btf_type'
38  * object may be followed by more data.  F.e.
39  * To describe an array, 'struct btf_type' is followed by
40  * 'struct btf_array'.
41  *
42  * 'struct btf_type' and any extra data following it are
43  * 4 bytes aligned.
44  *
45  * Type section:
46  * ~~~~~~~~~~~~~
47  * The BTF type section contains a list of 'struct btf_type' objects.
48  * Each one describes a C type.  Recall from the above section
49  * that a 'struct btf_type' object could be immediately followed by extra
50  * data in order to desribe some particular C types.
51  *
52  * type_id:
53  * ~~~~~~~
54  * Each btf_type object is identified by a type_id.  The type_id
55  * is implicitly implied by the location of the btf_type object in
56  * the BTF type section.  The first one has type_id 1.  The second
57  * one has type_id 2...etc.  Hence, an earlier btf_type has
58  * a smaller type_id.
59  *
60  * A btf_type object may refer to another btf_type object by using
61  * type_id (i.e. the "type" in the "struct btf_type").
62  *
63  * NOTE that we cannot assume any reference-order.
64  * A btf_type object can refer to an earlier btf_type object
65  * but it can also refer to a later btf_type object.
66  *
67  * For example, to describe "const void *".  A btf_type
68  * object describing "const" may refer to another btf_type
69  * object describing "void *".  This type-reference is done
70  * by specifying type_id:
71  *
72  * [1] CONST (anon) type_id=2
73  * [2] PTR (anon) type_id=0
74  *
75  * The above is the btf_verifier debug log:
76  *   - Each line started with "[?]" is a btf_type object
77  *   - [?] is the type_id of the btf_type object.
78  *   - CONST/PTR is the BTF_KIND_XXX
79  *   - "(anon)" is the name of the type.  It just
80  *     happens that CONST and PTR has no name.
81  *   - type_id=XXX is the 'u32 type' in btf_type
82  *
83  * NOTE: "void" has type_id 0
84  *
85  * String section:
86  * ~~~~~~~~~~~~~~
87  * The BTF string section contains the names used by the type section.
88  * Each string is referred by an "offset" from the beginning of the
89  * string section.
90  *
91  * Each string is '\0' terminated.
92  *
93  * The first character in the string section must be '\0'
94  * which is used to mean 'anonymous'. Some btf_type may not
95  * have a name.
96  */
97 
98 /* BTF verification:
99  *
100  * To verify BTF data, two passes are needed.
101  *
102  * Pass #1
103  * ~~~~~~~
104  * The first pass is to collect all btf_type objects to
105  * an array: "btf->types".
106  *
107  * Depending on the C type that a btf_type is describing,
108  * a btf_type may be followed by extra data.  We don't know
109  * how many btf_type is there, and more importantly we don't
110  * know where each btf_type is located in the type section.
111  *
112  * Without knowing the location of each type_id, most verifications
113  * cannot be done.  e.g. an earlier btf_type may refer to a later
114  * btf_type (recall the "const void *" above), so we cannot
115  * check this type-reference in the first pass.
116  *
117  * In the first pass, it still does some verifications (e.g.
118  * checking the name is a valid offset to the string section).
119  *
120  * Pass #2
121  * ~~~~~~~
122  * The main focus is to resolve a btf_type that is referring
123  * to another type.
124  *
125  * We have to ensure the referring type:
126  * 1) does exist in the BTF (i.e. in btf->types[])
127  * 2) does not cause a loop:
128  *	struct A {
129  *		struct B b;
130  *	};
131  *
132  *	struct B {
133  *		struct A a;
134  *	};
135  *
136  * btf_type_needs_resolve() decides if a btf_type needs
137  * to be resolved.
138  *
139  * The needs_resolve type implements the "resolve()" ops which
140  * essentially does a DFS and detects backedge.
141  *
142  * During resolve (or DFS), different C types have different
143  * "RESOLVED" conditions.
144  *
145  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
146  * members because a member is always referring to another
147  * type.  A struct's member can be treated as "RESOLVED" if
148  * it is referring to a BTF_KIND_PTR.  Otherwise, the
149  * following valid C struct would be rejected:
150  *
151  *	struct A {
152  *		int m;
153  *		struct A *a;
154  *	};
155  *
156  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
157  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
158  * detect a pointer loop, e.g.:
159  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
160  *                        ^                                         |
161  *                        +-----------------------------------------+
162  *
163  */
164 
165 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
166 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
167 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
168 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
169 #define BITS_ROUNDUP_BYTES(bits) \
170 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
171 
172 #define BTF_INFO_MASK 0x8f00ffff
173 #define BTF_INT_MASK 0x0fffffff
174 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
175 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
176 
177 /* 16MB for 64k structs and each has 16 members and
178  * a few MB spaces for the string section.
179  * The hard limit is S32_MAX.
180  */
181 #define BTF_MAX_SIZE (16 * 1024 * 1024)
182 
183 #define for_each_member(i, struct_type, member)			\
184 	for (i = 0, member = btf_type_member(struct_type);	\
185 	     i < btf_type_vlen(struct_type);			\
186 	     i++, member++)
187 
188 #define for_each_member_from(i, from, struct_type, member)		\
189 	for (i = from, member = btf_type_member(struct_type) + from;	\
190 	     i < btf_type_vlen(struct_type);				\
191 	     i++, member++)
192 
193 #define for_each_vsi(i, struct_type, member)			\
194 	for (i = 0, member = btf_type_var_secinfo(struct_type);	\
195 	     i < btf_type_vlen(struct_type);			\
196 	     i++, member++)
197 
198 #define for_each_vsi_from(i, from, struct_type, member)				\
199 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
200 	     i < btf_type_vlen(struct_type);					\
201 	     i++, member++)
202 
203 DEFINE_IDR(btf_idr);
204 DEFINE_SPINLOCK(btf_idr_lock);
205 
206 struct btf {
207 	void *data;
208 	struct btf_type **types;
209 	u32 *resolved_ids;
210 	u32 *resolved_sizes;
211 	const char *strings;
212 	void *nohdr_data;
213 	struct btf_header hdr;
214 	u32 nr_types;
215 	u32 types_size;
216 	u32 data_size;
217 	refcount_t refcnt;
218 	u32 id;
219 	struct rcu_head rcu;
220 };
221 
222 enum verifier_phase {
223 	CHECK_META,
224 	CHECK_TYPE,
225 };
226 
227 struct resolve_vertex {
228 	const struct btf_type *t;
229 	u32 type_id;
230 	u16 next_member;
231 };
232 
233 enum visit_state {
234 	NOT_VISITED,
235 	VISITED,
236 	RESOLVED,
237 };
238 
239 enum resolve_mode {
240 	RESOLVE_TBD,	/* To Be Determined */
241 	RESOLVE_PTR,	/* Resolving for Pointer */
242 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
243 					 * or array
244 					 */
245 };
246 
247 #define MAX_RESOLVE_DEPTH 32
248 
249 struct btf_sec_info {
250 	u32 off;
251 	u32 len;
252 };
253 
254 struct btf_verifier_env {
255 	struct btf *btf;
256 	u8 *visit_states;
257 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
258 	struct bpf_verifier_log log;
259 	u32 log_type_id;
260 	u32 top_stack;
261 	enum verifier_phase phase;
262 	enum resolve_mode resolve_mode;
263 };
264 
265 static const char * const btf_kind_str[NR_BTF_KINDS] = {
266 	[BTF_KIND_UNKN]		= "UNKNOWN",
267 	[BTF_KIND_INT]		= "INT",
268 	[BTF_KIND_PTR]		= "PTR",
269 	[BTF_KIND_ARRAY]	= "ARRAY",
270 	[BTF_KIND_STRUCT]	= "STRUCT",
271 	[BTF_KIND_UNION]	= "UNION",
272 	[BTF_KIND_ENUM]		= "ENUM",
273 	[BTF_KIND_FWD]		= "FWD",
274 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
275 	[BTF_KIND_VOLATILE]	= "VOLATILE",
276 	[BTF_KIND_CONST]	= "CONST",
277 	[BTF_KIND_RESTRICT]	= "RESTRICT",
278 	[BTF_KIND_FUNC]		= "FUNC",
279 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
280 	[BTF_KIND_VAR]		= "VAR",
281 	[BTF_KIND_DATASEC]	= "DATASEC",
282 };
283 
284 struct btf_kind_operations {
285 	s32 (*check_meta)(struct btf_verifier_env *env,
286 			  const struct btf_type *t,
287 			  u32 meta_left);
288 	int (*resolve)(struct btf_verifier_env *env,
289 		       const struct resolve_vertex *v);
290 	int (*check_member)(struct btf_verifier_env *env,
291 			    const struct btf_type *struct_type,
292 			    const struct btf_member *member,
293 			    const struct btf_type *member_type);
294 	int (*check_kflag_member)(struct btf_verifier_env *env,
295 				  const struct btf_type *struct_type,
296 				  const struct btf_member *member,
297 				  const struct btf_type *member_type);
298 	void (*log_details)(struct btf_verifier_env *env,
299 			    const struct btf_type *t);
300 	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
301 			 u32 type_id, void *data, u8 bits_offsets,
302 			 struct seq_file *m);
303 };
304 
305 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
306 static struct btf_type btf_void;
307 
308 static int btf_resolve(struct btf_verifier_env *env,
309 		       const struct btf_type *t, u32 type_id);
310 
311 static bool btf_type_is_modifier(const struct btf_type *t)
312 {
313 	/* Some of them is not strictly a C modifier
314 	 * but they are grouped into the same bucket
315 	 * for BTF concern:
316 	 *   A type (t) that refers to another
317 	 *   type through t->type AND its size cannot
318 	 *   be determined without following the t->type.
319 	 *
320 	 * ptr does not fall into this bucket
321 	 * because its size is always sizeof(void *).
322 	 */
323 	switch (BTF_INFO_KIND(t->info)) {
324 	case BTF_KIND_TYPEDEF:
325 	case BTF_KIND_VOLATILE:
326 	case BTF_KIND_CONST:
327 	case BTF_KIND_RESTRICT:
328 		return true;
329 	}
330 
331 	return false;
332 }
333 
334 bool btf_type_is_void(const struct btf_type *t)
335 {
336 	return t == &btf_void;
337 }
338 
339 static bool btf_type_is_fwd(const struct btf_type *t)
340 {
341 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
342 }
343 
344 static bool btf_type_nosize(const struct btf_type *t)
345 {
346 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
347 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
348 }
349 
350 static bool btf_type_nosize_or_null(const struct btf_type *t)
351 {
352 	return !t || btf_type_nosize(t);
353 }
354 
355 /* union is only a special case of struct:
356  * all its offsetof(member) == 0
357  */
358 static bool btf_type_is_struct(const struct btf_type *t)
359 {
360 	u8 kind = BTF_INFO_KIND(t->info);
361 
362 	return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
363 }
364 
365 static bool __btf_type_is_struct(const struct btf_type *t)
366 {
367 	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
368 }
369 
370 static bool btf_type_is_array(const struct btf_type *t)
371 {
372 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
373 }
374 
375 static bool btf_type_is_var(const struct btf_type *t)
376 {
377 	return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
378 }
379 
380 static bool btf_type_is_datasec(const struct btf_type *t)
381 {
382 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
383 }
384 
385 /* Types that act only as a source, not sink or intermediate
386  * type when resolving.
387  */
388 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
389 {
390 	return btf_type_is_var(t) ||
391 	       btf_type_is_datasec(t);
392 }
393 
394 /* What types need to be resolved?
395  *
396  * btf_type_is_modifier() is an obvious one.
397  *
398  * btf_type_is_struct() because its member refers to
399  * another type (through member->type).
400  *
401  * btf_type_is_var() because the variable refers to
402  * another type. btf_type_is_datasec() holds multiple
403  * btf_type_is_var() types that need resolving.
404  *
405  * btf_type_is_array() because its element (array->type)
406  * refers to another type.  Array can be thought of a
407  * special case of struct while array just has the same
408  * member-type repeated by array->nelems of times.
409  */
410 static bool btf_type_needs_resolve(const struct btf_type *t)
411 {
412 	return btf_type_is_modifier(t) ||
413 	       btf_type_is_ptr(t) ||
414 	       btf_type_is_struct(t) ||
415 	       btf_type_is_array(t) ||
416 	       btf_type_is_var(t) ||
417 	       btf_type_is_datasec(t);
418 }
419 
420 /* t->size can be used */
421 static bool btf_type_has_size(const struct btf_type *t)
422 {
423 	switch (BTF_INFO_KIND(t->info)) {
424 	case BTF_KIND_INT:
425 	case BTF_KIND_STRUCT:
426 	case BTF_KIND_UNION:
427 	case BTF_KIND_ENUM:
428 	case BTF_KIND_DATASEC:
429 		return true;
430 	}
431 
432 	return false;
433 }
434 
435 static const char *btf_int_encoding_str(u8 encoding)
436 {
437 	if (encoding == 0)
438 		return "(none)";
439 	else if (encoding == BTF_INT_SIGNED)
440 		return "SIGNED";
441 	else if (encoding == BTF_INT_CHAR)
442 		return "CHAR";
443 	else if (encoding == BTF_INT_BOOL)
444 		return "BOOL";
445 	else
446 		return "UNKN";
447 }
448 
449 static u16 btf_type_vlen(const struct btf_type *t)
450 {
451 	return BTF_INFO_VLEN(t->info);
452 }
453 
454 static bool btf_type_kflag(const struct btf_type *t)
455 {
456 	return BTF_INFO_KFLAG(t->info);
457 }
458 
459 static u32 btf_member_bit_offset(const struct btf_type *struct_type,
460 			     const struct btf_member *member)
461 {
462 	return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
463 					   : member->offset;
464 }
465 
466 static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
467 				    const struct btf_member *member)
468 {
469 	return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
470 					   : 0;
471 }
472 
473 static u32 btf_type_int(const struct btf_type *t)
474 {
475 	return *(u32 *)(t + 1);
476 }
477 
478 static const struct btf_array *btf_type_array(const struct btf_type *t)
479 {
480 	return (const struct btf_array *)(t + 1);
481 }
482 
483 static const struct btf_member *btf_type_member(const struct btf_type *t)
484 {
485 	return (const struct btf_member *)(t + 1);
486 }
487 
488 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
489 {
490 	return (const struct btf_enum *)(t + 1);
491 }
492 
493 static const struct btf_var *btf_type_var(const struct btf_type *t)
494 {
495 	return (const struct btf_var *)(t + 1);
496 }
497 
498 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
499 {
500 	return (const struct btf_var_secinfo *)(t + 1);
501 }
502 
503 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
504 {
505 	return kind_ops[BTF_INFO_KIND(t->info)];
506 }
507 
508 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
509 {
510 	return BTF_STR_OFFSET_VALID(offset) &&
511 		offset < btf->hdr.str_len;
512 }
513 
514 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
515 {
516 	if ((first ? !isalpha(c) :
517 		     !isalnum(c)) &&
518 	    c != '_' &&
519 	    ((c == '.' && !dot_ok) ||
520 	      c != '.'))
521 		return false;
522 	return true;
523 }
524 
525 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
526 {
527 	/* offset must be valid */
528 	const char *src = &btf->strings[offset];
529 	const char *src_limit;
530 
531 	if (!__btf_name_char_ok(*src, true, dot_ok))
532 		return false;
533 
534 	/* set a limit on identifier length */
535 	src_limit = src + KSYM_NAME_LEN;
536 	src++;
537 	while (*src && src < src_limit) {
538 		if (!__btf_name_char_ok(*src, false, dot_ok))
539 			return false;
540 		src++;
541 	}
542 
543 	return !*src;
544 }
545 
546 /* Only C-style identifier is permitted. This can be relaxed if
547  * necessary.
548  */
549 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
550 {
551 	return __btf_name_valid(btf, offset, false);
552 }
553 
554 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
555 {
556 	return __btf_name_valid(btf, offset, true);
557 }
558 
559 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
560 {
561 	if (!offset)
562 		return "(anon)";
563 	else if (offset < btf->hdr.str_len)
564 		return &btf->strings[offset];
565 	else
566 		return "(invalid-name-offset)";
567 }
568 
569 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
570 {
571 	if (offset < btf->hdr.str_len)
572 		return &btf->strings[offset];
573 
574 	return NULL;
575 }
576 
577 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
578 {
579 	if (type_id > btf->nr_types)
580 		return NULL;
581 
582 	return btf->types[type_id];
583 }
584 
585 /*
586  * Regular int is not a bit field and it must be either
587  * u8/u16/u32/u64 or __int128.
588  */
589 static bool btf_type_int_is_regular(const struct btf_type *t)
590 {
591 	u8 nr_bits, nr_bytes;
592 	u32 int_data;
593 
594 	int_data = btf_type_int(t);
595 	nr_bits = BTF_INT_BITS(int_data);
596 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
597 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
598 	    BTF_INT_OFFSET(int_data) ||
599 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
600 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
601 	     nr_bytes != (2 * sizeof(u64)))) {
602 		return false;
603 	}
604 
605 	return true;
606 }
607 
608 /*
609  * Check that given struct member is a regular int with expected
610  * offset and size.
611  */
612 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
613 			   const struct btf_member *m,
614 			   u32 expected_offset, u32 expected_size)
615 {
616 	const struct btf_type *t;
617 	u32 id, int_data;
618 	u8 nr_bits;
619 
620 	id = m->type;
621 	t = btf_type_id_size(btf, &id, NULL);
622 	if (!t || !btf_type_is_int(t))
623 		return false;
624 
625 	int_data = btf_type_int(t);
626 	nr_bits = BTF_INT_BITS(int_data);
627 	if (btf_type_kflag(s)) {
628 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
629 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
630 
631 		/* if kflag set, int should be a regular int and
632 		 * bit offset should be at byte boundary.
633 		 */
634 		return !bitfield_size &&
635 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
636 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
637 	}
638 
639 	if (BTF_INT_OFFSET(int_data) ||
640 	    BITS_PER_BYTE_MASKED(m->offset) ||
641 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
642 	    BITS_PER_BYTE_MASKED(nr_bits) ||
643 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
644 		return false;
645 
646 	return true;
647 }
648 
649 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
650 					      const char *fmt, ...)
651 {
652 	va_list args;
653 
654 	va_start(args, fmt);
655 	bpf_verifier_vlog(log, fmt, args);
656 	va_end(args);
657 }
658 
659 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
660 					    const char *fmt, ...)
661 {
662 	struct bpf_verifier_log *log = &env->log;
663 	va_list args;
664 
665 	if (!bpf_verifier_log_needed(log))
666 		return;
667 
668 	va_start(args, fmt);
669 	bpf_verifier_vlog(log, fmt, args);
670 	va_end(args);
671 }
672 
673 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
674 						   const struct btf_type *t,
675 						   bool log_details,
676 						   const char *fmt, ...)
677 {
678 	struct bpf_verifier_log *log = &env->log;
679 	u8 kind = BTF_INFO_KIND(t->info);
680 	struct btf *btf = env->btf;
681 	va_list args;
682 
683 	if (!bpf_verifier_log_needed(log))
684 		return;
685 
686 	/* btf verifier prints all types it is processing via
687 	 * btf_verifier_log_type(..., fmt = NULL).
688 	 * Skip those prints for in-kernel BTF verification.
689 	 */
690 	if (log->level == BPF_LOG_KERNEL && !fmt)
691 		return;
692 
693 	__btf_verifier_log(log, "[%u] %s %s%s",
694 			   env->log_type_id,
695 			   btf_kind_str[kind],
696 			   __btf_name_by_offset(btf, t->name_off),
697 			   log_details ? " " : "");
698 
699 	if (log_details)
700 		btf_type_ops(t)->log_details(env, t);
701 
702 	if (fmt && *fmt) {
703 		__btf_verifier_log(log, " ");
704 		va_start(args, fmt);
705 		bpf_verifier_vlog(log, fmt, args);
706 		va_end(args);
707 	}
708 
709 	__btf_verifier_log(log, "\n");
710 }
711 
712 #define btf_verifier_log_type(env, t, ...) \
713 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
714 #define btf_verifier_log_basic(env, t, ...) \
715 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
716 
717 __printf(4, 5)
718 static void btf_verifier_log_member(struct btf_verifier_env *env,
719 				    const struct btf_type *struct_type,
720 				    const struct btf_member *member,
721 				    const char *fmt, ...)
722 {
723 	struct bpf_verifier_log *log = &env->log;
724 	struct btf *btf = env->btf;
725 	va_list args;
726 
727 	if (!bpf_verifier_log_needed(log))
728 		return;
729 
730 	if (log->level == BPF_LOG_KERNEL && !fmt)
731 		return;
732 	/* The CHECK_META phase already did a btf dump.
733 	 *
734 	 * If member is logged again, it must hit an error in
735 	 * parsing this member.  It is useful to print out which
736 	 * struct this member belongs to.
737 	 */
738 	if (env->phase != CHECK_META)
739 		btf_verifier_log_type(env, struct_type, NULL);
740 
741 	if (btf_type_kflag(struct_type))
742 		__btf_verifier_log(log,
743 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
744 				   __btf_name_by_offset(btf, member->name_off),
745 				   member->type,
746 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
747 				   BTF_MEMBER_BIT_OFFSET(member->offset));
748 	else
749 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
750 				   __btf_name_by_offset(btf, member->name_off),
751 				   member->type, member->offset);
752 
753 	if (fmt && *fmt) {
754 		__btf_verifier_log(log, " ");
755 		va_start(args, fmt);
756 		bpf_verifier_vlog(log, fmt, args);
757 		va_end(args);
758 	}
759 
760 	__btf_verifier_log(log, "\n");
761 }
762 
763 __printf(4, 5)
764 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
765 				 const struct btf_type *datasec_type,
766 				 const struct btf_var_secinfo *vsi,
767 				 const char *fmt, ...)
768 {
769 	struct bpf_verifier_log *log = &env->log;
770 	va_list args;
771 
772 	if (!bpf_verifier_log_needed(log))
773 		return;
774 	if (log->level == BPF_LOG_KERNEL && !fmt)
775 		return;
776 	if (env->phase != CHECK_META)
777 		btf_verifier_log_type(env, datasec_type, NULL);
778 
779 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
780 			   vsi->type, vsi->offset, vsi->size);
781 	if (fmt && *fmt) {
782 		__btf_verifier_log(log, " ");
783 		va_start(args, fmt);
784 		bpf_verifier_vlog(log, fmt, args);
785 		va_end(args);
786 	}
787 
788 	__btf_verifier_log(log, "\n");
789 }
790 
791 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
792 				 u32 btf_data_size)
793 {
794 	struct bpf_verifier_log *log = &env->log;
795 	const struct btf *btf = env->btf;
796 	const struct btf_header *hdr;
797 
798 	if (!bpf_verifier_log_needed(log))
799 		return;
800 
801 	if (log->level == BPF_LOG_KERNEL)
802 		return;
803 	hdr = &btf->hdr;
804 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
805 	__btf_verifier_log(log, "version: %u\n", hdr->version);
806 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
807 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
808 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
809 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
810 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
811 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
812 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
813 }
814 
815 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
816 {
817 	struct btf *btf = env->btf;
818 
819 	/* < 2 because +1 for btf_void which is always in btf->types[0].
820 	 * btf_void is not accounted in btf->nr_types because btf_void
821 	 * does not come from the BTF file.
822 	 */
823 	if (btf->types_size - btf->nr_types < 2) {
824 		/* Expand 'types' array */
825 
826 		struct btf_type **new_types;
827 		u32 expand_by, new_size;
828 
829 		if (btf->types_size == BTF_MAX_TYPE) {
830 			btf_verifier_log(env, "Exceeded max num of types");
831 			return -E2BIG;
832 		}
833 
834 		expand_by = max_t(u32, btf->types_size >> 2, 16);
835 		new_size = min_t(u32, BTF_MAX_TYPE,
836 				 btf->types_size + expand_by);
837 
838 		new_types = kvcalloc(new_size, sizeof(*new_types),
839 				     GFP_KERNEL | __GFP_NOWARN);
840 		if (!new_types)
841 			return -ENOMEM;
842 
843 		if (btf->nr_types == 0)
844 			new_types[0] = &btf_void;
845 		else
846 			memcpy(new_types, btf->types,
847 			       sizeof(*btf->types) * (btf->nr_types + 1));
848 
849 		kvfree(btf->types);
850 		btf->types = new_types;
851 		btf->types_size = new_size;
852 	}
853 
854 	btf->types[++(btf->nr_types)] = t;
855 
856 	return 0;
857 }
858 
859 static int btf_alloc_id(struct btf *btf)
860 {
861 	int id;
862 
863 	idr_preload(GFP_KERNEL);
864 	spin_lock_bh(&btf_idr_lock);
865 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
866 	if (id > 0)
867 		btf->id = id;
868 	spin_unlock_bh(&btf_idr_lock);
869 	idr_preload_end();
870 
871 	if (WARN_ON_ONCE(!id))
872 		return -ENOSPC;
873 
874 	return id > 0 ? 0 : id;
875 }
876 
877 static void btf_free_id(struct btf *btf)
878 {
879 	unsigned long flags;
880 
881 	/*
882 	 * In map-in-map, calling map_delete_elem() on outer
883 	 * map will call bpf_map_put on the inner map.
884 	 * It will then eventually call btf_free_id()
885 	 * on the inner map.  Some of the map_delete_elem()
886 	 * implementation may have irq disabled, so
887 	 * we need to use the _irqsave() version instead
888 	 * of the _bh() version.
889 	 */
890 	spin_lock_irqsave(&btf_idr_lock, flags);
891 	idr_remove(&btf_idr, btf->id);
892 	spin_unlock_irqrestore(&btf_idr_lock, flags);
893 }
894 
895 static void btf_free(struct btf *btf)
896 {
897 	kvfree(btf->types);
898 	kvfree(btf->resolved_sizes);
899 	kvfree(btf->resolved_ids);
900 	kvfree(btf->data);
901 	kfree(btf);
902 }
903 
904 static void btf_free_rcu(struct rcu_head *rcu)
905 {
906 	struct btf *btf = container_of(rcu, struct btf, rcu);
907 
908 	btf_free(btf);
909 }
910 
911 void btf_put(struct btf *btf)
912 {
913 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
914 		btf_free_id(btf);
915 		call_rcu(&btf->rcu, btf_free_rcu);
916 	}
917 }
918 
919 static int env_resolve_init(struct btf_verifier_env *env)
920 {
921 	struct btf *btf = env->btf;
922 	u32 nr_types = btf->nr_types;
923 	u32 *resolved_sizes = NULL;
924 	u32 *resolved_ids = NULL;
925 	u8 *visit_states = NULL;
926 
927 	/* +1 for btf_void */
928 	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
929 				  GFP_KERNEL | __GFP_NOWARN);
930 	if (!resolved_sizes)
931 		goto nomem;
932 
933 	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
934 				GFP_KERNEL | __GFP_NOWARN);
935 	if (!resolved_ids)
936 		goto nomem;
937 
938 	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
939 				GFP_KERNEL | __GFP_NOWARN);
940 	if (!visit_states)
941 		goto nomem;
942 
943 	btf->resolved_sizes = resolved_sizes;
944 	btf->resolved_ids = resolved_ids;
945 	env->visit_states = visit_states;
946 
947 	return 0;
948 
949 nomem:
950 	kvfree(resolved_sizes);
951 	kvfree(resolved_ids);
952 	kvfree(visit_states);
953 	return -ENOMEM;
954 }
955 
956 static void btf_verifier_env_free(struct btf_verifier_env *env)
957 {
958 	kvfree(env->visit_states);
959 	kfree(env);
960 }
961 
962 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
963 				     const struct btf_type *next_type)
964 {
965 	switch (env->resolve_mode) {
966 	case RESOLVE_TBD:
967 		/* int, enum or void is a sink */
968 		return !btf_type_needs_resolve(next_type);
969 	case RESOLVE_PTR:
970 		/* int, enum, void, struct, array, func or func_proto is a sink
971 		 * for ptr
972 		 */
973 		return !btf_type_is_modifier(next_type) &&
974 			!btf_type_is_ptr(next_type);
975 	case RESOLVE_STRUCT_OR_ARRAY:
976 		/* int, enum, void, ptr, func or func_proto is a sink
977 		 * for struct and array
978 		 */
979 		return !btf_type_is_modifier(next_type) &&
980 			!btf_type_is_array(next_type) &&
981 			!btf_type_is_struct(next_type);
982 	default:
983 		BUG();
984 	}
985 }
986 
987 static bool env_type_is_resolved(const struct btf_verifier_env *env,
988 				 u32 type_id)
989 {
990 	return env->visit_states[type_id] == RESOLVED;
991 }
992 
993 static int env_stack_push(struct btf_verifier_env *env,
994 			  const struct btf_type *t, u32 type_id)
995 {
996 	struct resolve_vertex *v;
997 
998 	if (env->top_stack == MAX_RESOLVE_DEPTH)
999 		return -E2BIG;
1000 
1001 	if (env->visit_states[type_id] != NOT_VISITED)
1002 		return -EEXIST;
1003 
1004 	env->visit_states[type_id] = VISITED;
1005 
1006 	v = &env->stack[env->top_stack++];
1007 	v->t = t;
1008 	v->type_id = type_id;
1009 	v->next_member = 0;
1010 
1011 	if (env->resolve_mode == RESOLVE_TBD) {
1012 		if (btf_type_is_ptr(t))
1013 			env->resolve_mode = RESOLVE_PTR;
1014 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1015 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1016 	}
1017 
1018 	return 0;
1019 }
1020 
1021 static void env_stack_set_next_member(struct btf_verifier_env *env,
1022 				      u16 next_member)
1023 {
1024 	env->stack[env->top_stack - 1].next_member = next_member;
1025 }
1026 
1027 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1028 				   u32 resolved_type_id,
1029 				   u32 resolved_size)
1030 {
1031 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1032 	struct btf *btf = env->btf;
1033 
1034 	btf->resolved_sizes[type_id] = resolved_size;
1035 	btf->resolved_ids[type_id] = resolved_type_id;
1036 	env->visit_states[type_id] = RESOLVED;
1037 }
1038 
1039 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1040 {
1041 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1042 }
1043 
1044 /* Resolve the size of a passed-in "type"
1045  *
1046  * type: is an array (e.g. u32 array[x][y])
1047  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1048  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1049  *             corresponds to the return type.
1050  * *elem_type: u32
1051  * *total_nelems: (x * y).  Hence, individual elem size is
1052  *                (*type_size / *total_nelems)
1053  *
1054  * type: is not an array (e.g. const struct X)
1055  * return type: type "struct X"
1056  * *type_size: sizeof(struct X)
1057  * *elem_type: same as return type ("struct X")
1058  * *total_nelems: 1
1059  */
1060 static const struct btf_type *
1061 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1062 		 u32 *type_size, const struct btf_type **elem_type,
1063 		 u32 *total_nelems)
1064 {
1065 	const struct btf_type *array_type = NULL;
1066 	const struct btf_array *array;
1067 	u32 i, size, nelems = 1;
1068 
1069 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1070 		switch (BTF_INFO_KIND(type->info)) {
1071 		/* type->size can be used */
1072 		case BTF_KIND_INT:
1073 		case BTF_KIND_STRUCT:
1074 		case BTF_KIND_UNION:
1075 		case BTF_KIND_ENUM:
1076 			size = type->size;
1077 			goto resolved;
1078 
1079 		case BTF_KIND_PTR:
1080 			size = sizeof(void *);
1081 			goto resolved;
1082 
1083 		/* Modifiers */
1084 		case BTF_KIND_TYPEDEF:
1085 		case BTF_KIND_VOLATILE:
1086 		case BTF_KIND_CONST:
1087 		case BTF_KIND_RESTRICT:
1088 			type = btf_type_by_id(btf, type->type);
1089 			break;
1090 
1091 		case BTF_KIND_ARRAY:
1092 			if (!array_type)
1093 				array_type = type;
1094 			array = btf_type_array(type);
1095 			if (nelems && array->nelems > U32_MAX / nelems)
1096 				return ERR_PTR(-EINVAL);
1097 			nelems *= array->nelems;
1098 			type = btf_type_by_id(btf, array->type);
1099 			break;
1100 
1101 		/* type without size */
1102 		default:
1103 			return ERR_PTR(-EINVAL);
1104 		}
1105 	}
1106 
1107 	return ERR_PTR(-EINVAL);
1108 
1109 resolved:
1110 	if (nelems && size > U32_MAX / nelems)
1111 		return ERR_PTR(-EINVAL);
1112 
1113 	*type_size = nelems * size;
1114 	*total_nelems = nelems;
1115 	*elem_type = type;
1116 
1117 	return array_type ? : type;
1118 }
1119 
1120 /* The input param "type_id" must point to a needs_resolve type */
1121 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1122 						  u32 *type_id)
1123 {
1124 	*type_id = btf->resolved_ids[*type_id];
1125 	return btf_type_by_id(btf, *type_id);
1126 }
1127 
1128 const struct btf_type *btf_type_id_size(const struct btf *btf,
1129 					u32 *type_id, u32 *ret_size)
1130 {
1131 	const struct btf_type *size_type;
1132 	u32 size_type_id = *type_id;
1133 	u32 size = 0;
1134 
1135 	size_type = btf_type_by_id(btf, size_type_id);
1136 	if (btf_type_nosize_or_null(size_type))
1137 		return NULL;
1138 
1139 	if (btf_type_has_size(size_type)) {
1140 		size = size_type->size;
1141 	} else if (btf_type_is_array(size_type)) {
1142 		size = btf->resolved_sizes[size_type_id];
1143 	} else if (btf_type_is_ptr(size_type)) {
1144 		size = sizeof(void *);
1145 	} else {
1146 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1147 				 !btf_type_is_var(size_type)))
1148 			return NULL;
1149 
1150 		size_type_id = btf->resolved_ids[size_type_id];
1151 		size_type = btf_type_by_id(btf, size_type_id);
1152 		if (btf_type_nosize_or_null(size_type))
1153 			return NULL;
1154 		else if (btf_type_has_size(size_type))
1155 			size = size_type->size;
1156 		else if (btf_type_is_array(size_type))
1157 			size = btf->resolved_sizes[size_type_id];
1158 		else if (btf_type_is_ptr(size_type))
1159 			size = sizeof(void *);
1160 		else
1161 			return NULL;
1162 	}
1163 
1164 	*type_id = size_type_id;
1165 	if (ret_size)
1166 		*ret_size = size;
1167 
1168 	return size_type;
1169 }
1170 
1171 static int btf_df_check_member(struct btf_verifier_env *env,
1172 			       const struct btf_type *struct_type,
1173 			       const struct btf_member *member,
1174 			       const struct btf_type *member_type)
1175 {
1176 	btf_verifier_log_basic(env, struct_type,
1177 			       "Unsupported check_member");
1178 	return -EINVAL;
1179 }
1180 
1181 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1182 				     const struct btf_type *struct_type,
1183 				     const struct btf_member *member,
1184 				     const struct btf_type *member_type)
1185 {
1186 	btf_verifier_log_basic(env, struct_type,
1187 			       "Unsupported check_kflag_member");
1188 	return -EINVAL;
1189 }
1190 
1191 /* Used for ptr, array and struct/union type members.
1192  * int, enum and modifier types have their specific callback functions.
1193  */
1194 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1195 					  const struct btf_type *struct_type,
1196 					  const struct btf_member *member,
1197 					  const struct btf_type *member_type)
1198 {
1199 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1200 		btf_verifier_log_member(env, struct_type, member,
1201 					"Invalid member bitfield_size");
1202 		return -EINVAL;
1203 	}
1204 
1205 	/* bitfield size is 0, so member->offset represents bit offset only.
1206 	 * It is safe to call non kflag check_member variants.
1207 	 */
1208 	return btf_type_ops(member_type)->check_member(env, struct_type,
1209 						       member,
1210 						       member_type);
1211 }
1212 
1213 static int btf_df_resolve(struct btf_verifier_env *env,
1214 			  const struct resolve_vertex *v)
1215 {
1216 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1217 	return -EINVAL;
1218 }
1219 
1220 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1221 			    u32 type_id, void *data, u8 bits_offsets,
1222 			    struct seq_file *m)
1223 {
1224 	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1225 }
1226 
1227 static int btf_int_check_member(struct btf_verifier_env *env,
1228 				const struct btf_type *struct_type,
1229 				const struct btf_member *member,
1230 				const struct btf_type *member_type)
1231 {
1232 	u32 int_data = btf_type_int(member_type);
1233 	u32 struct_bits_off = member->offset;
1234 	u32 struct_size = struct_type->size;
1235 	u32 nr_copy_bits;
1236 	u32 bytes_offset;
1237 
1238 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1239 		btf_verifier_log_member(env, struct_type, member,
1240 					"bits_offset exceeds U32_MAX");
1241 		return -EINVAL;
1242 	}
1243 
1244 	struct_bits_off += BTF_INT_OFFSET(int_data);
1245 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1246 	nr_copy_bits = BTF_INT_BITS(int_data) +
1247 		BITS_PER_BYTE_MASKED(struct_bits_off);
1248 
1249 	if (nr_copy_bits > BITS_PER_U128) {
1250 		btf_verifier_log_member(env, struct_type, member,
1251 					"nr_copy_bits exceeds 128");
1252 		return -EINVAL;
1253 	}
1254 
1255 	if (struct_size < bytes_offset ||
1256 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1257 		btf_verifier_log_member(env, struct_type, member,
1258 					"Member exceeds struct_size");
1259 		return -EINVAL;
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1266 				      const struct btf_type *struct_type,
1267 				      const struct btf_member *member,
1268 				      const struct btf_type *member_type)
1269 {
1270 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1271 	u32 int_data = btf_type_int(member_type);
1272 	u32 struct_size = struct_type->size;
1273 	u32 nr_copy_bits;
1274 
1275 	/* a regular int type is required for the kflag int member */
1276 	if (!btf_type_int_is_regular(member_type)) {
1277 		btf_verifier_log_member(env, struct_type, member,
1278 					"Invalid member base type");
1279 		return -EINVAL;
1280 	}
1281 
1282 	/* check sanity of bitfield size */
1283 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1284 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1285 	nr_int_data_bits = BTF_INT_BITS(int_data);
1286 	if (!nr_bits) {
1287 		/* Not a bitfield member, member offset must be at byte
1288 		 * boundary.
1289 		 */
1290 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1291 			btf_verifier_log_member(env, struct_type, member,
1292 						"Invalid member offset");
1293 			return -EINVAL;
1294 		}
1295 
1296 		nr_bits = nr_int_data_bits;
1297 	} else if (nr_bits > nr_int_data_bits) {
1298 		btf_verifier_log_member(env, struct_type, member,
1299 					"Invalid member bitfield_size");
1300 		return -EINVAL;
1301 	}
1302 
1303 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1304 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1305 	if (nr_copy_bits > BITS_PER_U128) {
1306 		btf_verifier_log_member(env, struct_type, member,
1307 					"nr_copy_bits exceeds 128");
1308 		return -EINVAL;
1309 	}
1310 
1311 	if (struct_size < bytes_offset ||
1312 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1313 		btf_verifier_log_member(env, struct_type, member,
1314 					"Member exceeds struct_size");
1315 		return -EINVAL;
1316 	}
1317 
1318 	return 0;
1319 }
1320 
1321 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1322 			      const struct btf_type *t,
1323 			      u32 meta_left)
1324 {
1325 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1326 	u16 encoding;
1327 
1328 	if (meta_left < meta_needed) {
1329 		btf_verifier_log_basic(env, t,
1330 				       "meta_left:%u meta_needed:%u",
1331 				       meta_left, meta_needed);
1332 		return -EINVAL;
1333 	}
1334 
1335 	if (btf_type_vlen(t)) {
1336 		btf_verifier_log_type(env, t, "vlen != 0");
1337 		return -EINVAL;
1338 	}
1339 
1340 	if (btf_type_kflag(t)) {
1341 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1342 		return -EINVAL;
1343 	}
1344 
1345 	int_data = btf_type_int(t);
1346 	if (int_data & ~BTF_INT_MASK) {
1347 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1348 				       int_data);
1349 		return -EINVAL;
1350 	}
1351 
1352 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1353 
1354 	if (nr_bits > BITS_PER_U128) {
1355 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1356 				      BITS_PER_U128);
1357 		return -EINVAL;
1358 	}
1359 
1360 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1361 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1362 		return -EINVAL;
1363 	}
1364 
1365 	/*
1366 	 * Only one of the encoding bits is allowed and it
1367 	 * should be sufficient for the pretty print purpose (i.e. decoding).
1368 	 * Multiple bits can be allowed later if it is found
1369 	 * to be insufficient.
1370 	 */
1371 	encoding = BTF_INT_ENCODING(int_data);
1372 	if (encoding &&
1373 	    encoding != BTF_INT_SIGNED &&
1374 	    encoding != BTF_INT_CHAR &&
1375 	    encoding != BTF_INT_BOOL) {
1376 		btf_verifier_log_type(env, t, "Unsupported encoding");
1377 		return -ENOTSUPP;
1378 	}
1379 
1380 	btf_verifier_log_type(env, t, NULL);
1381 
1382 	return meta_needed;
1383 }
1384 
1385 static void btf_int_log(struct btf_verifier_env *env,
1386 			const struct btf_type *t)
1387 {
1388 	int int_data = btf_type_int(t);
1389 
1390 	btf_verifier_log(env,
1391 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1392 			 t->size, BTF_INT_OFFSET(int_data),
1393 			 BTF_INT_BITS(int_data),
1394 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1395 }
1396 
1397 static void btf_int128_print(struct seq_file *m, void *data)
1398 {
1399 	/* data points to a __int128 number.
1400 	 * Suppose
1401 	 *     int128_num = *(__int128 *)data;
1402 	 * The below formulas shows what upper_num and lower_num represents:
1403 	 *     upper_num = int128_num >> 64;
1404 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1405 	 */
1406 	u64 upper_num, lower_num;
1407 
1408 #ifdef __BIG_ENDIAN_BITFIELD
1409 	upper_num = *(u64 *)data;
1410 	lower_num = *(u64 *)(data + 8);
1411 #else
1412 	upper_num = *(u64 *)(data + 8);
1413 	lower_num = *(u64 *)data;
1414 #endif
1415 	if (upper_num == 0)
1416 		seq_printf(m, "0x%llx", lower_num);
1417 	else
1418 		seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1419 }
1420 
1421 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1422 			     u16 right_shift_bits)
1423 {
1424 	u64 upper_num, lower_num;
1425 
1426 #ifdef __BIG_ENDIAN_BITFIELD
1427 	upper_num = print_num[0];
1428 	lower_num = print_num[1];
1429 #else
1430 	upper_num = print_num[1];
1431 	lower_num = print_num[0];
1432 #endif
1433 
1434 	/* shake out un-needed bits by shift/or operations */
1435 	if (left_shift_bits >= 64) {
1436 		upper_num = lower_num << (left_shift_bits - 64);
1437 		lower_num = 0;
1438 	} else {
1439 		upper_num = (upper_num << left_shift_bits) |
1440 			    (lower_num >> (64 - left_shift_bits));
1441 		lower_num = lower_num << left_shift_bits;
1442 	}
1443 
1444 	if (right_shift_bits >= 64) {
1445 		lower_num = upper_num >> (right_shift_bits - 64);
1446 		upper_num = 0;
1447 	} else {
1448 		lower_num = (lower_num >> right_shift_bits) |
1449 			    (upper_num << (64 - right_shift_bits));
1450 		upper_num = upper_num >> right_shift_bits;
1451 	}
1452 
1453 #ifdef __BIG_ENDIAN_BITFIELD
1454 	print_num[0] = upper_num;
1455 	print_num[1] = lower_num;
1456 #else
1457 	print_num[0] = lower_num;
1458 	print_num[1] = upper_num;
1459 #endif
1460 }
1461 
1462 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1463 				  u8 nr_bits, struct seq_file *m)
1464 {
1465 	u16 left_shift_bits, right_shift_bits;
1466 	u8 nr_copy_bytes;
1467 	u8 nr_copy_bits;
1468 	u64 print_num[2] = {};
1469 
1470 	nr_copy_bits = nr_bits + bits_offset;
1471 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1472 
1473 	memcpy(print_num, data, nr_copy_bytes);
1474 
1475 #ifdef __BIG_ENDIAN_BITFIELD
1476 	left_shift_bits = bits_offset;
1477 #else
1478 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1479 #endif
1480 	right_shift_bits = BITS_PER_U128 - nr_bits;
1481 
1482 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1483 	btf_int128_print(m, print_num);
1484 }
1485 
1486 
1487 static void btf_int_bits_seq_show(const struct btf *btf,
1488 				  const struct btf_type *t,
1489 				  void *data, u8 bits_offset,
1490 				  struct seq_file *m)
1491 {
1492 	u32 int_data = btf_type_int(t);
1493 	u8 nr_bits = BTF_INT_BITS(int_data);
1494 	u8 total_bits_offset;
1495 
1496 	/*
1497 	 * bits_offset is at most 7.
1498 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
1499 	 */
1500 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1501 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1502 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1503 	btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1504 }
1505 
1506 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1507 			     u32 type_id, void *data, u8 bits_offset,
1508 			     struct seq_file *m)
1509 {
1510 	u32 int_data = btf_type_int(t);
1511 	u8 encoding = BTF_INT_ENCODING(int_data);
1512 	bool sign = encoding & BTF_INT_SIGNED;
1513 	u8 nr_bits = BTF_INT_BITS(int_data);
1514 
1515 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
1516 	    BITS_PER_BYTE_MASKED(nr_bits)) {
1517 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1518 		return;
1519 	}
1520 
1521 	switch (nr_bits) {
1522 	case 128:
1523 		btf_int128_print(m, data);
1524 		break;
1525 	case 64:
1526 		if (sign)
1527 			seq_printf(m, "%lld", *(s64 *)data);
1528 		else
1529 			seq_printf(m, "%llu", *(u64 *)data);
1530 		break;
1531 	case 32:
1532 		if (sign)
1533 			seq_printf(m, "%d", *(s32 *)data);
1534 		else
1535 			seq_printf(m, "%u", *(u32 *)data);
1536 		break;
1537 	case 16:
1538 		if (sign)
1539 			seq_printf(m, "%d", *(s16 *)data);
1540 		else
1541 			seq_printf(m, "%u", *(u16 *)data);
1542 		break;
1543 	case 8:
1544 		if (sign)
1545 			seq_printf(m, "%d", *(s8 *)data);
1546 		else
1547 			seq_printf(m, "%u", *(u8 *)data);
1548 		break;
1549 	default:
1550 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1551 	}
1552 }
1553 
1554 static const struct btf_kind_operations int_ops = {
1555 	.check_meta = btf_int_check_meta,
1556 	.resolve = btf_df_resolve,
1557 	.check_member = btf_int_check_member,
1558 	.check_kflag_member = btf_int_check_kflag_member,
1559 	.log_details = btf_int_log,
1560 	.seq_show = btf_int_seq_show,
1561 };
1562 
1563 static int btf_modifier_check_member(struct btf_verifier_env *env,
1564 				     const struct btf_type *struct_type,
1565 				     const struct btf_member *member,
1566 				     const struct btf_type *member_type)
1567 {
1568 	const struct btf_type *resolved_type;
1569 	u32 resolved_type_id = member->type;
1570 	struct btf_member resolved_member;
1571 	struct btf *btf = env->btf;
1572 
1573 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1574 	if (!resolved_type) {
1575 		btf_verifier_log_member(env, struct_type, member,
1576 					"Invalid member");
1577 		return -EINVAL;
1578 	}
1579 
1580 	resolved_member = *member;
1581 	resolved_member.type = resolved_type_id;
1582 
1583 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
1584 							 &resolved_member,
1585 							 resolved_type);
1586 }
1587 
1588 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1589 					   const struct btf_type *struct_type,
1590 					   const struct btf_member *member,
1591 					   const struct btf_type *member_type)
1592 {
1593 	const struct btf_type *resolved_type;
1594 	u32 resolved_type_id = member->type;
1595 	struct btf_member resolved_member;
1596 	struct btf *btf = env->btf;
1597 
1598 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1599 	if (!resolved_type) {
1600 		btf_verifier_log_member(env, struct_type, member,
1601 					"Invalid member");
1602 		return -EINVAL;
1603 	}
1604 
1605 	resolved_member = *member;
1606 	resolved_member.type = resolved_type_id;
1607 
1608 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1609 							       &resolved_member,
1610 							       resolved_type);
1611 }
1612 
1613 static int btf_ptr_check_member(struct btf_verifier_env *env,
1614 				const struct btf_type *struct_type,
1615 				const struct btf_member *member,
1616 				const struct btf_type *member_type)
1617 {
1618 	u32 struct_size, struct_bits_off, bytes_offset;
1619 
1620 	struct_size = struct_type->size;
1621 	struct_bits_off = member->offset;
1622 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1623 
1624 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1625 		btf_verifier_log_member(env, struct_type, member,
1626 					"Member is not byte aligned");
1627 		return -EINVAL;
1628 	}
1629 
1630 	if (struct_size - bytes_offset < sizeof(void *)) {
1631 		btf_verifier_log_member(env, struct_type, member,
1632 					"Member exceeds struct_size");
1633 		return -EINVAL;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1640 				   const struct btf_type *t,
1641 				   u32 meta_left)
1642 {
1643 	if (btf_type_vlen(t)) {
1644 		btf_verifier_log_type(env, t, "vlen != 0");
1645 		return -EINVAL;
1646 	}
1647 
1648 	if (btf_type_kflag(t)) {
1649 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1650 		return -EINVAL;
1651 	}
1652 
1653 	if (!BTF_TYPE_ID_VALID(t->type)) {
1654 		btf_verifier_log_type(env, t, "Invalid type_id");
1655 		return -EINVAL;
1656 	}
1657 
1658 	/* typedef type must have a valid name, and other ref types,
1659 	 * volatile, const, restrict, should have a null name.
1660 	 */
1661 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1662 		if (!t->name_off ||
1663 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
1664 			btf_verifier_log_type(env, t, "Invalid name");
1665 			return -EINVAL;
1666 		}
1667 	} else {
1668 		if (t->name_off) {
1669 			btf_verifier_log_type(env, t, "Invalid name");
1670 			return -EINVAL;
1671 		}
1672 	}
1673 
1674 	btf_verifier_log_type(env, t, NULL);
1675 
1676 	return 0;
1677 }
1678 
1679 static int btf_modifier_resolve(struct btf_verifier_env *env,
1680 				const struct resolve_vertex *v)
1681 {
1682 	const struct btf_type *t = v->t;
1683 	const struct btf_type *next_type;
1684 	u32 next_type_id = t->type;
1685 	struct btf *btf = env->btf;
1686 
1687 	next_type = btf_type_by_id(btf, next_type_id);
1688 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1689 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1690 		return -EINVAL;
1691 	}
1692 
1693 	if (!env_type_is_resolve_sink(env, next_type) &&
1694 	    !env_type_is_resolved(env, next_type_id))
1695 		return env_stack_push(env, next_type, next_type_id);
1696 
1697 	/* Figure out the resolved next_type_id with size.
1698 	 * They will be stored in the current modifier's
1699 	 * resolved_ids and resolved_sizes such that it can
1700 	 * save us a few type-following when we use it later (e.g. in
1701 	 * pretty print).
1702 	 */
1703 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1704 		if (env_type_is_resolved(env, next_type_id))
1705 			next_type = btf_type_id_resolve(btf, &next_type_id);
1706 
1707 		/* "typedef void new_void", "const void"...etc */
1708 		if (!btf_type_is_void(next_type) &&
1709 		    !btf_type_is_fwd(next_type) &&
1710 		    !btf_type_is_func_proto(next_type)) {
1711 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1712 			return -EINVAL;
1713 		}
1714 	}
1715 
1716 	env_stack_pop_resolved(env, next_type_id, 0);
1717 
1718 	return 0;
1719 }
1720 
1721 static int btf_var_resolve(struct btf_verifier_env *env,
1722 			   const struct resolve_vertex *v)
1723 {
1724 	const struct btf_type *next_type;
1725 	const struct btf_type *t = v->t;
1726 	u32 next_type_id = t->type;
1727 	struct btf *btf = env->btf;
1728 
1729 	next_type = btf_type_by_id(btf, next_type_id);
1730 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1731 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1732 		return -EINVAL;
1733 	}
1734 
1735 	if (!env_type_is_resolve_sink(env, next_type) &&
1736 	    !env_type_is_resolved(env, next_type_id))
1737 		return env_stack_push(env, next_type, next_type_id);
1738 
1739 	if (btf_type_is_modifier(next_type)) {
1740 		const struct btf_type *resolved_type;
1741 		u32 resolved_type_id;
1742 
1743 		resolved_type_id = next_type_id;
1744 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1745 
1746 		if (btf_type_is_ptr(resolved_type) &&
1747 		    !env_type_is_resolve_sink(env, resolved_type) &&
1748 		    !env_type_is_resolved(env, resolved_type_id))
1749 			return env_stack_push(env, resolved_type,
1750 					      resolved_type_id);
1751 	}
1752 
1753 	/* We must resolve to something concrete at this point, no
1754 	 * forward types or similar that would resolve to size of
1755 	 * zero is allowed.
1756 	 */
1757 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1758 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1759 		return -EINVAL;
1760 	}
1761 
1762 	env_stack_pop_resolved(env, next_type_id, 0);
1763 
1764 	return 0;
1765 }
1766 
1767 static int btf_ptr_resolve(struct btf_verifier_env *env,
1768 			   const struct resolve_vertex *v)
1769 {
1770 	const struct btf_type *next_type;
1771 	const struct btf_type *t = v->t;
1772 	u32 next_type_id = t->type;
1773 	struct btf *btf = env->btf;
1774 
1775 	next_type = btf_type_by_id(btf, next_type_id);
1776 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1777 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1778 		return -EINVAL;
1779 	}
1780 
1781 	if (!env_type_is_resolve_sink(env, next_type) &&
1782 	    !env_type_is_resolved(env, next_type_id))
1783 		return env_stack_push(env, next_type, next_type_id);
1784 
1785 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1786 	 * the modifier may have stopped resolving when it was resolved
1787 	 * to a ptr (last-resolved-ptr).
1788 	 *
1789 	 * We now need to continue from the last-resolved-ptr to
1790 	 * ensure the last-resolved-ptr will not referring back to
1791 	 * the currenct ptr (t).
1792 	 */
1793 	if (btf_type_is_modifier(next_type)) {
1794 		const struct btf_type *resolved_type;
1795 		u32 resolved_type_id;
1796 
1797 		resolved_type_id = next_type_id;
1798 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1799 
1800 		if (btf_type_is_ptr(resolved_type) &&
1801 		    !env_type_is_resolve_sink(env, resolved_type) &&
1802 		    !env_type_is_resolved(env, resolved_type_id))
1803 			return env_stack_push(env, resolved_type,
1804 					      resolved_type_id);
1805 	}
1806 
1807 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1808 		if (env_type_is_resolved(env, next_type_id))
1809 			next_type = btf_type_id_resolve(btf, &next_type_id);
1810 
1811 		if (!btf_type_is_void(next_type) &&
1812 		    !btf_type_is_fwd(next_type) &&
1813 		    !btf_type_is_func_proto(next_type)) {
1814 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1815 			return -EINVAL;
1816 		}
1817 	}
1818 
1819 	env_stack_pop_resolved(env, next_type_id, 0);
1820 
1821 	return 0;
1822 }
1823 
1824 static void btf_modifier_seq_show(const struct btf *btf,
1825 				  const struct btf_type *t,
1826 				  u32 type_id, void *data,
1827 				  u8 bits_offset, struct seq_file *m)
1828 {
1829 	t = btf_type_id_resolve(btf, &type_id);
1830 
1831 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1832 }
1833 
1834 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1835 			     u32 type_id, void *data, u8 bits_offset,
1836 			     struct seq_file *m)
1837 {
1838 	t = btf_type_id_resolve(btf, &type_id);
1839 
1840 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1841 }
1842 
1843 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1844 			     u32 type_id, void *data, u8 bits_offset,
1845 			     struct seq_file *m)
1846 {
1847 	/* It is a hashed value */
1848 	seq_printf(m, "%p", *(void **)data);
1849 }
1850 
1851 static void btf_ref_type_log(struct btf_verifier_env *env,
1852 			     const struct btf_type *t)
1853 {
1854 	btf_verifier_log(env, "type_id=%u", t->type);
1855 }
1856 
1857 static struct btf_kind_operations modifier_ops = {
1858 	.check_meta = btf_ref_type_check_meta,
1859 	.resolve = btf_modifier_resolve,
1860 	.check_member = btf_modifier_check_member,
1861 	.check_kflag_member = btf_modifier_check_kflag_member,
1862 	.log_details = btf_ref_type_log,
1863 	.seq_show = btf_modifier_seq_show,
1864 };
1865 
1866 static struct btf_kind_operations ptr_ops = {
1867 	.check_meta = btf_ref_type_check_meta,
1868 	.resolve = btf_ptr_resolve,
1869 	.check_member = btf_ptr_check_member,
1870 	.check_kflag_member = btf_generic_check_kflag_member,
1871 	.log_details = btf_ref_type_log,
1872 	.seq_show = btf_ptr_seq_show,
1873 };
1874 
1875 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1876 			      const struct btf_type *t,
1877 			      u32 meta_left)
1878 {
1879 	if (btf_type_vlen(t)) {
1880 		btf_verifier_log_type(env, t, "vlen != 0");
1881 		return -EINVAL;
1882 	}
1883 
1884 	if (t->type) {
1885 		btf_verifier_log_type(env, t, "type != 0");
1886 		return -EINVAL;
1887 	}
1888 
1889 	/* fwd type must have a valid name */
1890 	if (!t->name_off ||
1891 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
1892 		btf_verifier_log_type(env, t, "Invalid name");
1893 		return -EINVAL;
1894 	}
1895 
1896 	btf_verifier_log_type(env, t, NULL);
1897 
1898 	return 0;
1899 }
1900 
1901 static void btf_fwd_type_log(struct btf_verifier_env *env,
1902 			     const struct btf_type *t)
1903 {
1904 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1905 }
1906 
1907 static struct btf_kind_operations fwd_ops = {
1908 	.check_meta = btf_fwd_check_meta,
1909 	.resolve = btf_df_resolve,
1910 	.check_member = btf_df_check_member,
1911 	.check_kflag_member = btf_df_check_kflag_member,
1912 	.log_details = btf_fwd_type_log,
1913 	.seq_show = btf_df_seq_show,
1914 };
1915 
1916 static int btf_array_check_member(struct btf_verifier_env *env,
1917 				  const struct btf_type *struct_type,
1918 				  const struct btf_member *member,
1919 				  const struct btf_type *member_type)
1920 {
1921 	u32 struct_bits_off = member->offset;
1922 	u32 struct_size, bytes_offset;
1923 	u32 array_type_id, array_size;
1924 	struct btf *btf = env->btf;
1925 
1926 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1927 		btf_verifier_log_member(env, struct_type, member,
1928 					"Member is not byte aligned");
1929 		return -EINVAL;
1930 	}
1931 
1932 	array_type_id = member->type;
1933 	btf_type_id_size(btf, &array_type_id, &array_size);
1934 	struct_size = struct_type->size;
1935 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1936 	if (struct_size - bytes_offset < array_size) {
1937 		btf_verifier_log_member(env, struct_type, member,
1938 					"Member exceeds struct_size");
1939 		return -EINVAL;
1940 	}
1941 
1942 	return 0;
1943 }
1944 
1945 static s32 btf_array_check_meta(struct btf_verifier_env *env,
1946 				const struct btf_type *t,
1947 				u32 meta_left)
1948 {
1949 	const struct btf_array *array = btf_type_array(t);
1950 	u32 meta_needed = sizeof(*array);
1951 
1952 	if (meta_left < meta_needed) {
1953 		btf_verifier_log_basic(env, t,
1954 				       "meta_left:%u meta_needed:%u",
1955 				       meta_left, meta_needed);
1956 		return -EINVAL;
1957 	}
1958 
1959 	/* array type should not have a name */
1960 	if (t->name_off) {
1961 		btf_verifier_log_type(env, t, "Invalid name");
1962 		return -EINVAL;
1963 	}
1964 
1965 	if (btf_type_vlen(t)) {
1966 		btf_verifier_log_type(env, t, "vlen != 0");
1967 		return -EINVAL;
1968 	}
1969 
1970 	if (btf_type_kflag(t)) {
1971 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1972 		return -EINVAL;
1973 	}
1974 
1975 	if (t->size) {
1976 		btf_verifier_log_type(env, t, "size != 0");
1977 		return -EINVAL;
1978 	}
1979 
1980 	/* Array elem type and index type cannot be in type void,
1981 	 * so !array->type and !array->index_type are not allowed.
1982 	 */
1983 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1984 		btf_verifier_log_type(env, t, "Invalid elem");
1985 		return -EINVAL;
1986 	}
1987 
1988 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1989 		btf_verifier_log_type(env, t, "Invalid index");
1990 		return -EINVAL;
1991 	}
1992 
1993 	btf_verifier_log_type(env, t, NULL);
1994 
1995 	return meta_needed;
1996 }
1997 
1998 static int btf_array_resolve(struct btf_verifier_env *env,
1999 			     const struct resolve_vertex *v)
2000 {
2001 	const struct btf_array *array = btf_type_array(v->t);
2002 	const struct btf_type *elem_type, *index_type;
2003 	u32 elem_type_id, index_type_id;
2004 	struct btf *btf = env->btf;
2005 	u32 elem_size;
2006 
2007 	/* Check array->index_type */
2008 	index_type_id = array->index_type;
2009 	index_type = btf_type_by_id(btf, index_type_id);
2010 	if (btf_type_nosize_or_null(index_type) ||
2011 	    btf_type_is_resolve_source_only(index_type)) {
2012 		btf_verifier_log_type(env, v->t, "Invalid index");
2013 		return -EINVAL;
2014 	}
2015 
2016 	if (!env_type_is_resolve_sink(env, index_type) &&
2017 	    !env_type_is_resolved(env, index_type_id))
2018 		return env_stack_push(env, index_type, index_type_id);
2019 
2020 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2021 	if (!index_type || !btf_type_is_int(index_type) ||
2022 	    !btf_type_int_is_regular(index_type)) {
2023 		btf_verifier_log_type(env, v->t, "Invalid index");
2024 		return -EINVAL;
2025 	}
2026 
2027 	/* Check array->type */
2028 	elem_type_id = array->type;
2029 	elem_type = btf_type_by_id(btf, elem_type_id);
2030 	if (btf_type_nosize_or_null(elem_type) ||
2031 	    btf_type_is_resolve_source_only(elem_type)) {
2032 		btf_verifier_log_type(env, v->t,
2033 				      "Invalid elem");
2034 		return -EINVAL;
2035 	}
2036 
2037 	if (!env_type_is_resolve_sink(env, elem_type) &&
2038 	    !env_type_is_resolved(env, elem_type_id))
2039 		return env_stack_push(env, elem_type, elem_type_id);
2040 
2041 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2042 	if (!elem_type) {
2043 		btf_verifier_log_type(env, v->t, "Invalid elem");
2044 		return -EINVAL;
2045 	}
2046 
2047 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2048 		btf_verifier_log_type(env, v->t, "Invalid array of int");
2049 		return -EINVAL;
2050 	}
2051 
2052 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
2053 		btf_verifier_log_type(env, v->t,
2054 				      "Array size overflows U32_MAX");
2055 		return -EINVAL;
2056 	}
2057 
2058 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2059 
2060 	return 0;
2061 }
2062 
2063 static void btf_array_log(struct btf_verifier_env *env,
2064 			  const struct btf_type *t)
2065 {
2066 	const struct btf_array *array = btf_type_array(t);
2067 
2068 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2069 			 array->type, array->index_type, array->nelems);
2070 }
2071 
2072 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2073 			       u32 type_id, void *data, u8 bits_offset,
2074 			       struct seq_file *m)
2075 {
2076 	const struct btf_array *array = btf_type_array(t);
2077 	const struct btf_kind_operations *elem_ops;
2078 	const struct btf_type *elem_type;
2079 	u32 i, elem_size, elem_type_id;
2080 
2081 	elem_type_id = array->type;
2082 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2083 	elem_ops = btf_type_ops(elem_type);
2084 	seq_puts(m, "[");
2085 	for (i = 0; i < array->nelems; i++) {
2086 		if (i)
2087 			seq_puts(m, ",");
2088 
2089 		elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2090 				   bits_offset, m);
2091 		data += elem_size;
2092 	}
2093 	seq_puts(m, "]");
2094 }
2095 
2096 static struct btf_kind_operations array_ops = {
2097 	.check_meta = btf_array_check_meta,
2098 	.resolve = btf_array_resolve,
2099 	.check_member = btf_array_check_member,
2100 	.check_kflag_member = btf_generic_check_kflag_member,
2101 	.log_details = btf_array_log,
2102 	.seq_show = btf_array_seq_show,
2103 };
2104 
2105 static int btf_struct_check_member(struct btf_verifier_env *env,
2106 				   const struct btf_type *struct_type,
2107 				   const struct btf_member *member,
2108 				   const struct btf_type *member_type)
2109 {
2110 	u32 struct_bits_off = member->offset;
2111 	u32 struct_size, bytes_offset;
2112 
2113 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2114 		btf_verifier_log_member(env, struct_type, member,
2115 					"Member is not byte aligned");
2116 		return -EINVAL;
2117 	}
2118 
2119 	struct_size = struct_type->size;
2120 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2121 	if (struct_size - bytes_offset < member_type->size) {
2122 		btf_verifier_log_member(env, struct_type, member,
2123 					"Member exceeds struct_size");
2124 		return -EINVAL;
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2131 				 const struct btf_type *t,
2132 				 u32 meta_left)
2133 {
2134 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2135 	const struct btf_member *member;
2136 	u32 meta_needed, last_offset;
2137 	struct btf *btf = env->btf;
2138 	u32 struct_size = t->size;
2139 	u32 offset;
2140 	u16 i;
2141 
2142 	meta_needed = btf_type_vlen(t) * sizeof(*member);
2143 	if (meta_left < meta_needed) {
2144 		btf_verifier_log_basic(env, t,
2145 				       "meta_left:%u meta_needed:%u",
2146 				       meta_left, meta_needed);
2147 		return -EINVAL;
2148 	}
2149 
2150 	/* struct type either no name or a valid one */
2151 	if (t->name_off &&
2152 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2153 		btf_verifier_log_type(env, t, "Invalid name");
2154 		return -EINVAL;
2155 	}
2156 
2157 	btf_verifier_log_type(env, t, NULL);
2158 
2159 	last_offset = 0;
2160 	for_each_member(i, t, member) {
2161 		if (!btf_name_offset_valid(btf, member->name_off)) {
2162 			btf_verifier_log_member(env, t, member,
2163 						"Invalid member name_offset:%u",
2164 						member->name_off);
2165 			return -EINVAL;
2166 		}
2167 
2168 		/* struct member either no name or a valid one */
2169 		if (member->name_off &&
2170 		    !btf_name_valid_identifier(btf, member->name_off)) {
2171 			btf_verifier_log_member(env, t, member, "Invalid name");
2172 			return -EINVAL;
2173 		}
2174 		/* A member cannot be in type void */
2175 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2176 			btf_verifier_log_member(env, t, member,
2177 						"Invalid type_id");
2178 			return -EINVAL;
2179 		}
2180 
2181 		offset = btf_member_bit_offset(t, member);
2182 		if (is_union && offset) {
2183 			btf_verifier_log_member(env, t, member,
2184 						"Invalid member bits_offset");
2185 			return -EINVAL;
2186 		}
2187 
2188 		/*
2189 		 * ">" instead of ">=" because the last member could be
2190 		 * "char a[0];"
2191 		 */
2192 		if (last_offset > offset) {
2193 			btf_verifier_log_member(env, t, member,
2194 						"Invalid member bits_offset");
2195 			return -EINVAL;
2196 		}
2197 
2198 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2199 			btf_verifier_log_member(env, t, member,
2200 						"Member bits_offset exceeds its struct size");
2201 			return -EINVAL;
2202 		}
2203 
2204 		btf_verifier_log_member(env, t, member, NULL);
2205 		last_offset = offset;
2206 	}
2207 
2208 	return meta_needed;
2209 }
2210 
2211 static int btf_struct_resolve(struct btf_verifier_env *env,
2212 			      const struct resolve_vertex *v)
2213 {
2214 	const struct btf_member *member;
2215 	int err;
2216 	u16 i;
2217 
2218 	/* Before continue resolving the next_member,
2219 	 * ensure the last member is indeed resolved to a
2220 	 * type with size info.
2221 	 */
2222 	if (v->next_member) {
2223 		const struct btf_type *last_member_type;
2224 		const struct btf_member *last_member;
2225 		u16 last_member_type_id;
2226 
2227 		last_member = btf_type_member(v->t) + v->next_member - 1;
2228 		last_member_type_id = last_member->type;
2229 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
2230 						       last_member_type_id)))
2231 			return -EINVAL;
2232 
2233 		last_member_type = btf_type_by_id(env->btf,
2234 						  last_member_type_id);
2235 		if (btf_type_kflag(v->t))
2236 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2237 								last_member,
2238 								last_member_type);
2239 		else
2240 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
2241 								last_member,
2242 								last_member_type);
2243 		if (err)
2244 			return err;
2245 	}
2246 
2247 	for_each_member_from(i, v->next_member, v->t, member) {
2248 		u32 member_type_id = member->type;
2249 		const struct btf_type *member_type = btf_type_by_id(env->btf,
2250 								member_type_id);
2251 
2252 		if (btf_type_nosize_or_null(member_type) ||
2253 		    btf_type_is_resolve_source_only(member_type)) {
2254 			btf_verifier_log_member(env, v->t, member,
2255 						"Invalid member");
2256 			return -EINVAL;
2257 		}
2258 
2259 		if (!env_type_is_resolve_sink(env, member_type) &&
2260 		    !env_type_is_resolved(env, member_type_id)) {
2261 			env_stack_set_next_member(env, i + 1);
2262 			return env_stack_push(env, member_type, member_type_id);
2263 		}
2264 
2265 		if (btf_type_kflag(v->t))
2266 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2267 									    member,
2268 									    member_type);
2269 		else
2270 			err = btf_type_ops(member_type)->check_member(env, v->t,
2271 								      member,
2272 								      member_type);
2273 		if (err)
2274 			return err;
2275 	}
2276 
2277 	env_stack_pop_resolved(env, 0, 0);
2278 
2279 	return 0;
2280 }
2281 
2282 static void btf_struct_log(struct btf_verifier_env *env,
2283 			   const struct btf_type *t)
2284 {
2285 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2286 }
2287 
2288 /* find 'struct bpf_spin_lock' in map value.
2289  * return >= 0 offset if found
2290  * and < 0 in case of error
2291  */
2292 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2293 {
2294 	const struct btf_member *member;
2295 	u32 i, off = -ENOENT;
2296 
2297 	if (!__btf_type_is_struct(t))
2298 		return -EINVAL;
2299 
2300 	for_each_member(i, t, member) {
2301 		const struct btf_type *member_type = btf_type_by_id(btf,
2302 								    member->type);
2303 		if (!__btf_type_is_struct(member_type))
2304 			continue;
2305 		if (member_type->size != sizeof(struct bpf_spin_lock))
2306 			continue;
2307 		if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2308 			   "bpf_spin_lock"))
2309 			continue;
2310 		if (off != -ENOENT)
2311 			/* only one 'struct bpf_spin_lock' is allowed */
2312 			return -E2BIG;
2313 		off = btf_member_bit_offset(t, member);
2314 		if (off % 8)
2315 			/* valid C code cannot generate such BTF */
2316 			return -EINVAL;
2317 		off /= 8;
2318 		if (off % __alignof__(struct bpf_spin_lock))
2319 			/* valid struct bpf_spin_lock will be 4 byte aligned */
2320 			return -EINVAL;
2321 	}
2322 	return off;
2323 }
2324 
2325 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2326 				u32 type_id, void *data, u8 bits_offset,
2327 				struct seq_file *m)
2328 {
2329 	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2330 	const struct btf_member *member;
2331 	u32 i;
2332 
2333 	seq_puts(m, "{");
2334 	for_each_member(i, t, member) {
2335 		const struct btf_type *member_type = btf_type_by_id(btf,
2336 								member->type);
2337 		const struct btf_kind_operations *ops;
2338 		u32 member_offset, bitfield_size;
2339 		u32 bytes_offset;
2340 		u8 bits8_offset;
2341 
2342 		if (i)
2343 			seq_puts(m, seq);
2344 
2345 		member_offset = btf_member_bit_offset(t, member);
2346 		bitfield_size = btf_member_bitfield_size(t, member);
2347 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2348 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2349 		if (bitfield_size) {
2350 			btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2351 					      bitfield_size, m);
2352 		} else {
2353 			ops = btf_type_ops(member_type);
2354 			ops->seq_show(btf, member_type, member->type,
2355 				      data + bytes_offset, bits8_offset, m);
2356 		}
2357 	}
2358 	seq_puts(m, "}");
2359 }
2360 
2361 static struct btf_kind_operations struct_ops = {
2362 	.check_meta = btf_struct_check_meta,
2363 	.resolve = btf_struct_resolve,
2364 	.check_member = btf_struct_check_member,
2365 	.check_kflag_member = btf_generic_check_kflag_member,
2366 	.log_details = btf_struct_log,
2367 	.seq_show = btf_struct_seq_show,
2368 };
2369 
2370 static int btf_enum_check_member(struct btf_verifier_env *env,
2371 				 const struct btf_type *struct_type,
2372 				 const struct btf_member *member,
2373 				 const struct btf_type *member_type)
2374 {
2375 	u32 struct_bits_off = member->offset;
2376 	u32 struct_size, bytes_offset;
2377 
2378 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2379 		btf_verifier_log_member(env, struct_type, member,
2380 					"Member is not byte aligned");
2381 		return -EINVAL;
2382 	}
2383 
2384 	struct_size = struct_type->size;
2385 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2386 	if (struct_size - bytes_offset < sizeof(int)) {
2387 		btf_verifier_log_member(env, struct_type, member,
2388 					"Member exceeds struct_size");
2389 		return -EINVAL;
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2396 				       const struct btf_type *struct_type,
2397 				       const struct btf_member *member,
2398 				       const struct btf_type *member_type)
2399 {
2400 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2401 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2402 
2403 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2404 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2405 	if (!nr_bits) {
2406 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2407 			btf_verifier_log_member(env, struct_type, member,
2408 						"Member is not byte aligned");
2409 			return -EINVAL;
2410 		}
2411 
2412 		nr_bits = int_bitsize;
2413 	} else if (nr_bits > int_bitsize) {
2414 		btf_verifier_log_member(env, struct_type, member,
2415 					"Invalid member bitfield_size");
2416 		return -EINVAL;
2417 	}
2418 
2419 	struct_size = struct_type->size;
2420 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2421 	if (struct_size < bytes_end) {
2422 		btf_verifier_log_member(env, struct_type, member,
2423 					"Member exceeds struct_size");
2424 		return -EINVAL;
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2431 			       const struct btf_type *t,
2432 			       u32 meta_left)
2433 {
2434 	const struct btf_enum *enums = btf_type_enum(t);
2435 	struct btf *btf = env->btf;
2436 	u16 i, nr_enums;
2437 	u32 meta_needed;
2438 
2439 	nr_enums = btf_type_vlen(t);
2440 	meta_needed = nr_enums * sizeof(*enums);
2441 
2442 	if (meta_left < meta_needed) {
2443 		btf_verifier_log_basic(env, t,
2444 				       "meta_left:%u meta_needed:%u",
2445 				       meta_left, meta_needed);
2446 		return -EINVAL;
2447 	}
2448 
2449 	if (btf_type_kflag(t)) {
2450 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2451 		return -EINVAL;
2452 	}
2453 
2454 	if (t->size > 8 || !is_power_of_2(t->size)) {
2455 		btf_verifier_log_type(env, t, "Unexpected size");
2456 		return -EINVAL;
2457 	}
2458 
2459 	/* enum type either no name or a valid one */
2460 	if (t->name_off &&
2461 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2462 		btf_verifier_log_type(env, t, "Invalid name");
2463 		return -EINVAL;
2464 	}
2465 
2466 	btf_verifier_log_type(env, t, NULL);
2467 
2468 	for (i = 0; i < nr_enums; i++) {
2469 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2470 			btf_verifier_log(env, "\tInvalid name_offset:%u",
2471 					 enums[i].name_off);
2472 			return -EINVAL;
2473 		}
2474 
2475 		/* enum member must have a valid name */
2476 		if (!enums[i].name_off ||
2477 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
2478 			btf_verifier_log_type(env, t, "Invalid name");
2479 			return -EINVAL;
2480 		}
2481 
2482 		if (env->log.level == BPF_LOG_KERNEL)
2483 			continue;
2484 		btf_verifier_log(env, "\t%s val=%d\n",
2485 				 __btf_name_by_offset(btf, enums[i].name_off),
2486 				 enums[i].val);
2487 	}
2488 
2489 	return meta_needed;
2490 }
2491 
2492 static void btf_enum_log(struct btf_verifier_env *env,
2493 			 const struct btf_type *t)
2494 {
2495 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2496 }
2497 
2498 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2499 			      u32 type_id, void *data, u8 bits_offset,
2500 			      struct seq_file *m)
2501 {
2502 	const struct btf_enum *enums = btf_type_enum(t);
2503 	u32 i, nr_enums = btf_type_vlen(t);
2504 	int v = *(int *)data;
2505 
2506 	for (i = 0; i < nr_enums; i++) {
2507 		if (v == enums[i].val) {
2508 			seq_printf(m, "%s",
2509 				   __btf_name_by_offset(btf,
2510 							enums[i].name_off));
2511 			return;
2512 		}
2513 	}
2514 
2515 	seq_printf(m, "%d", v);
2516 }
2517 
2518 static struct btf_kind_operations enum_ops = {
2519 	.check_meta = btf_enum_check_meta,
2520 	.resolve = btf_df_resolve,
2521 	.check_member = btf_enum_check_member,
2522 	.check_kflag_member = btf_enum_check_kflag_member,
2523 	.log_details = btf_enum_log,
2524 	.seq_show = btf_enum_seq_show,
2525 };
2526 
2527 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2528 				     const struct btf_type *t,
2529 				     u32 meta_left)
2530 {
2531 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2532 
2533 	if (meta_left < meta_needed) {
2534 		btf_verifier_log_basic(env, t,
2535 				       "meta_left:%u meta_needed:%u",
2536 				       meta_left, meta_needed);
2537 		return -EINVAL;
2538 	}
2539 
2540 	if (t->name_off) {
2541 		btf_verifier_log_type(env, t, "Invalid name");
2542 		return -EINVAL;
2543 	}
2544 
2545 	if (btf_type_kflag(t)) {
2546 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2547 		return -EINVAL;
2548 	}
2549 
2550 	btf_verifier_log_type(env, t, NULL);
2551 
2552 	return meta_needed;
2553 }
2554 
2555 static void btf_func_proto_log(struct btf_verifier_env *env,
2556 			       const struct btf_type *t)
2557 {
2558 	const struct btf_param *args = (const struct btf_param *)(t + 1);
2559 	u16 nr_args = btf_type_vlen(t), i;
2560 
2561 	btf_verifier_log(env, "return=%u args=(", t->type);
2562 	if (!nr_args) {
2563 		btf_verifier_log(env, "void");
2564 		goto done;
2565 	}
2566 
2567 	if (nr_args == 1 && !args[0].type) {
2568 		/* Only one vararg */
2569 		btf_verifier_log(env, "vararg");
2570 		goto done;
2571 	}
2572 
2573 	btf_verifier_log(env, "%u %s", args[0].type,
2574 			 __btf_name_by_offset(env->btf,
2575 					      args[0].name_off));
2576 	for (i = 1; i < nr_args - 1; i++)
2577 		btf_verifier_log(env, ", %u %s", args[i].type,
2578 				 __btf_name_by_offset(env->btf,
2579 						      args[i].name_off));
2580 
2581 	if (nr_args > 1) {
2582 		const struct btf_param *last_arg = &args[nr_args - 1];
2583 
2584 		if (last_arg->type)
2585 			btf_verifier_log(env, ", %u %s", last_arg->type,
2586 					 __btf_name_by_offset(env->btf,
2587 							      last_arg->name_off));
2588 		else
2589 			btf_verifier_log(env, ", vararg");
2590 	}
2591 
2592 done:
2593 	btf_verifier_log(env, ")");
2594 }
2595 
2596 static struct btf_kind_operations func_proto_ops = {
2597 	.check_meta = btf_func_proto_check_meta,
2598 	.resolve = btf_df_resolve,
2599 	/*
2600 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2601 	 * a struct's member.
2602 	 *
2603 	 * It should be a funciton pointer instead.
2604 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2605 	 *
2606 	 * Hence, there is no btf_func_check_member().
2607 	 */
2608 	.check_member = btf_df_check_member,
2609 	.check_kflag_member = btf_df_check_kflag_member,
2610 	.log_details = btf_func_proto_log,
2611 	.seq_show = btf_df_seq_show,
2612 };
2613 
2614 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2615 			       const struct btf_type *t,
2616 			       u32 meta_left)
2617 {
2618 	if (!t->name_off ||
2619 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2620 		btf_verifier_log_type(env, t, "Invalid name");
2621 		return -EINVAL;
2622 	}
2623 
2624 	if (btf_type_vlen(t)) {
2625 		btf_verifier_log_type(env, t, "vlen != 0");
2626 		return -EINVAL;
2627 	}
2628 
2629 	if (btf_type_kflag(t)) {
2630 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2631 		return -EINVAL;
2632 	}
2633 
2634 	btf_verifier_log_type(env, t, NULL);
2635 
2636 	return 0;
2637 }
2638 
2639 static struct btf_kind_operations func_ops = {
2640 	.check_meta = btf_func_check_meta,
2641 	.resolve = btf_df_resolve,
2642 	.check_member = btf_df_check_member,
2643 	.check_kflag_member = btf_df_check_kflag_member,
2644 	.log_details = btf_ref_type_log,
2645 	.seq_show = btf_df_seq_show,
2646 };
2647 
2648 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2649 			      const struct btf_type *t,
2650 			      u32 meta_left)
2651 {
2652 	const struct btf_var *var;
2653 	u32 meta_needed = sizeof(*var);
2654 
2655 	if (meta_left < meta_needed) {
2656 		btf_verifier_log_basic(env, t,
2657 				       "meta_left:%u meta_needed:%u",
2658 				       meta_left, meta_needed);
2659 		return -EINVAL;
2660 	}
2661 
2662 	if (btf_type_vlen(t)) {
2663 		btf_verifier_log_type(env, t, "vlen != 0");
2664 		return -EINVAL;
2665 	}
2666 
2667 	if (btf_type_kflag(t)) {
2668 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2669 		return -EINVAL;
2670 	}
2671 
2672 	if (!t->name_off ||
2673 	    !__btf_name_valid(env->btf, t->name_off, true)) {
2674 		btf_verifier_log_type(env, t, "Invalid name");
2675 		return -EINVAL;
2676 	}
2677 
2678 	/* A var cannot be in type void */
2679 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2680 		btf_verifier_log_type(env, t, "Invalid type_id");
2681 		return -EINVAL;
2682 	}
2683 
2684 	var = btf_type_var(t);
2685 	if (var->linkage != BTF_VAR_STATIC &&
2686 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2687 		btf_verifier_log_type(env, t, "Linkage not supported");
2688 		return -EINVAL;
2689 	}
2690 
2691 	btf_verifier_log_type(env, t, NULL);
2692 
2693 	return meta_needed;
2694 }
2695 
2696 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2697 {
2698 	const struct btf_var *var = btf_type_var(t);
2699 
2700 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2701 }
2702 
2703 static const struct btf_kind_operations var_ops = {
2704 	.check_meta		= btf_var_check_meta,
2705 	.resolve		= btf_var_resolve,
2706 	.check_member		= btf_df_check_member,
2707 	.check_kflag_member	= btf_df_check_kflag_member,
2708 	.log_details		= btf_var_log,
2709 	.seq_show		= btf_var_seq_show,
2710 };
2711 
2712 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2713 				  const struct btf_type *t,
2714 				  u32 meta_left)
2715 {
2716 	const struct btf_var_secinfo *vsi;
2717 	u64 last_vsi_end_off = 0, sum = 0;
2718 	u32 i, meta_needed;
2719 
2720 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2721 	if (meta_left < meta_needed) {
2722 		btf_verifier_log_basic(env, t,
2723 				       "meta_left:%u meta_needed:%u",
2724 				       meta_left, meta_needed);
2725 		return -EINVAL;
2726 	}
2727 
2728 	if (!btf_type_vlen(t)) {
2729 		btf_verifier_log_type(env, t, "vlen == 0");
2730 		return -EINVAL;
2731 	}
2732 
2733 	if (!t->size) {
2734 		btf_verifier_log_type(env, t, "size == 0");
2735 		return -EINVAL;
2736 	}
2737 
2738 	if (btf_type_kflag(t)) {
2739 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2740 		return -EINVAL;
2741 	}
2742 
2743 	if (!t->name_off ||
2744 	    !btf_name_valid_section(env->btf, t->name_off)) {
2745 		btf_verifier_log_type(env, t, "Invalid name");
2746 		return -EINVAL;
2747 	}
2748 
2749 	btf_verifier_log_type(env, t, NULL);
2750 
2751 	for_each_vsi(i, t, vsi) {
2752 		/* A var cannot be in type void */
2753 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2754 			btf_verifier_log_vsi(env, t, vsi,
2755 					     "Invalid type_id");
2756 			return -EINVAL;
2757 		}
2758 
2759 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2760 			btf_verifier_log_vsi(env, t, vsi,
2761 					     "Invalid offset");
2762 			return -EINVAL;
2763 		}
2764 
2765 		if (!vsi->size || vsi->size > t->size) {
2766 			btf_verifier_log_vsi(env, t, vsi,
2767 					     "Invalid size");
2768 			return -EINVAL;
2769 		}
2770 
2771 		last_vsi_end_off = vsi->offset + vsi->size;
2772 		if (last_vsi_end_off > t->size) {
2773 			btf_verifier_log_vsi(env, t, vsi,
2774 					     "Invalid offset+size");
2775 			return -EINVAL;
2776 		}
2777 
2778 		btf_verifier_log_vsi(env, t, vsi, NULL);
2779 		sum += vsi->size;
2780 	}
2781 
2782 	if (t->size < sum) {
2783 		btf_verifier_log_type(env, t, "Invalid btf_info size");
2784 		return -EINVAL;
2785 	}
2786 
2787 	return meta_needed;
2788 }
2789 
2790 static int btf_datasec_resolve(struct btf_verifier_env *env,
2791 			       const struct resolve_vertex *v)
2792 {
2793 	const struct btf_var_secinfo *vsi;
2794 	struct btf *btf = env->btf;
2795 	u16 i;
2796 
2797 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
2798 		u32 var_type_id = vsi->type, type_id, type_size = 0;
2799 		const struct btf_type *var_type = btf_type_by_id(env->btf,
2800 								 var_type_id);
2801 		if (!var_type || !btf_type_is_var(var_type)) {
2802 			btf_verifier_log_vsi(env, v->t, vsi,
2803 					     "Not a VAR kind member");
2804 			return -EINVAL;
2805 		}
2806 
2807 		if (!env_type_is_resolve_sink(env, var_type) &&
2808 		    !env_type_is_resolved(env, var_type_id)) {
2809 			env_stack_set_next_member(env, i + 1);
2810 			return env_stack_push(env, var_type, var_type_id);
2811 		}
2812 
2813 		type_id = var_type->type;
2814 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
2815 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2816 			return -EINVAL;
2817 		}
2818 
2819 		if (vsi->size < type_size) {
2820 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2821 			return -EINVAL;
2822 		}
2823 	}
2824 
2825 	env_stack_pop_resolved(env, 0, 0);
2826 	return 0;
2827 }
2828 
2829 static void btf_datasec_log(struct btf_verifier_env *env,
2830 			    const struct btf_type *t)
2831 {
2832 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2833 }
2834 
2835 static void btf_datasec_seq_show(const struct btf *btf,
2836 				 const struct btf_type *t, u32 type_id,
2837 				 void *data, u8 bits_offset,
2838 				 struct seq_file *m)
2839 {
2840 	const struct btf_var_secinfo *vsi;
2841 	const struct btf_type *var;
2842 	u32 i;
2843 
2844 	seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2845 	for_each_vsi(i, t, vsi) {
2846 		var = btf_type_by_id(btf, vsi->type);
2847 		if (i)
2848 			seq_puts(m, ",");
2849 		btf_type_ops(var)->seq_show(btf, var, vsi->type,
2850 					    data + vsi->offset, bits_offset, m);
2851 	}
2852 	seq_puts(m, "}");
2853 }
2854 
2855 static const struct btf_kind_operations datasec_ops = {
2856 	.check_meta		= btf_datasec_check_meta,
2857 	.resolve		= btf_datasec_resolve,
2858 	.check_member		= btf_df_check_member,
2859 	.check_kflag_member	= btf_df_check_kflag_member,
2860 	.log_details		= btf_datasec_log,
2861 	.seq_show		= btf_datasec_seq_show,
2862 };
2863 
2864 static int btf_func_proto_check(struct btf_verifier_env *env,
2865 				const struct btf_type *t)
2866 {
2867 	const struct btf_type *ret_type;
2868 	const struct btf_param *args;
2869 	const struct btf *btf;
2870 	u16 nr_args, i;
2871 	int err;
2872 
2873 	btf = env->btf;
2874 	args = (const struct btf_param *)(t + 1);
2875 	nr_args = btf_type_vlen(t);
2876 
2877 	/* Check func return type which could be "void" (t->type == 0) */
2878 	if (t->type) {
2879 		u32 ret_type_id = t->type;
2880 
2881 		ret_type = btf_type_by_id(btf, ret_type_id);
2882 		if (!ret_type) {
2883 			btf_verifier_log_type(env, t, "Invalid return type");
2884 			return -EINVAL;
2885 		}
2886 
2887 		if (btf_type_needs_resolve(ret_type) &&
2888 		    !env_type_is_resolved(env, ret_type_id)) {
2889 			err = btf_resolve(env, ret_type, ret_type_id);
2890 			if (err)
2891 				return err;
2892 		}
2893 
2894 		/* Ensure the return type is a type that has a size */
2895 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2896 			btf_verifier_log_type(env, t, "Invalid return type");
2897 			return -EINVAL;
2898 		}
2899 	}
2900 
2901 	if (!nr_args)
2902 		return 0;
2903 
2904 	/* Last func arg type_id could be 0 if it is a vararg */
2905 	if (!args[nr_args - 1].type) {
2906 		if (args[nr_args - 1].name_off) {
2907 			btf_verifier_log_type(env, t, "Invalid arg#%u",
2908 					      nr_args);
2909 			return -EINVAL;
2910 		}
2911 		nr_args--;
2912 	}
2913 
2914 	err = 0;
2915 	for (i = 0; i < nr_args; i++) {
2916 		const struct btf_type *arg_type;
2917 		u32 arg_type_id;
2918 
2919 		arg_type_id = args[i].type;
2920 		arg_type = btf_type_by_id(btf, arg_type_id);
2921 		if (!arg_type) {
2922 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2923 			err = -EINVAL;
2924 			break;
2925 		}
2926 
2927 		if (args[i].name_off &&
2928 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
2929 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
2930 			btf_verifier_log_type(env, t,
2931 					      "Invalid arg#%u", i + 1);
2932 			err = -EINVAL;
2933 			break;
2934 		}
2935 
2936 		if (btf_type_needs_resolve(arg_type) &&
2937 		    !env_type_is_resolved(env, arg_type_id)) {
2938 			err = btf_resolve(env, arg_type, arg_type_id);
2939 			if (err)
2940 				break;
2941 		}
2942 
2943 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2944 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2945 			err = -EINVAL;
2946 			break;
2947 		}
2948 	}
2949 
2950 	return err;
2951 }
2952 
2953 static int btf_func_check(struct btf_verifier_env *env,
2954 			  const struct btf_type *t)
2955 {
2956 	const struct btf_type *proto_type;
2957 	const struct btf_param *args;
2958 	const struct btf *btf;
2959 	u16 nr_args, i;
2960 
2961 	btf = env->btf;
2962 	proto_type = btf_type_by_id(btf, t->type);
2963 
2964 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2965 		btf_verifier_log_type(env, t, "Invalid type_id");
2966 		return -EINVAL;
2967 	}
2968 
2969 	args = (const struct btf_param *)(proto_type + 1);
2970 	nr_args = btf_type_vlen(proto_type);
2971 	for (i = 0; i < nr_args; i++) {
2972 		if (!args[i].name_off && args[i].type) {
2973 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2974 			return -EINVAL;
2975 		}
2976 	}
2977 
2978 	return 0;
2979 }
2980 
2981 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2982 	[BTF_KIND_INT] = &int_ops,
2983 	[BTF_KIND_PTR] = &ptr_ops,
2984 	[BTF_KIND_ARRAY] = &array_ops,
2985 	[BTF_KIND_STRUCT] = &struct_ops,
2986 	[BTF_KIND_UNION] = &struct_ops,
2987 	[BTF_KIND_ENUM] = &enum_ops,
2988 	[BTF_KIND_FWD] = &fwd_ops,
2989 	[BTF_KIND_TYPEDEF] = &modifier_ops,
2990 	[BTF_KIND_VOLATILE] = &modifier_ops,
2991 	[BTF_KIND_CONST] = &modifier_ops,
2992 	[BTF_KIND_RESTRICT] = &modifier_ops,
2993 	[BTF_KIND_FUNC] = &func_ops,
2994 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2995 	[BTF_KIND_VAR] = &var_ops,
2996 	[BTF_KIND_DATASEC] = &datasec_ops,
2997 };
2998 
2999 static s32 btf_check_meta(struct btf_verifier_env *env,
3000 			  const struct btf_type *t,
3001 			  u32 meta_left)
3002 {
3003 	u32 saved_meta_left = meta_left;
3004 	s32 var_meta_size;
3005 
3006 	if (meta_left < sizeof(*t)) {
3007 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3008 				 env->log_type_id, meta_left, sizeof(*t));
3009 		return -EINVAL;
3010 	}
3011 	meta_left -= sizeof(*t);
3012 
3013 	if (t->info & ~BTF_INFO_MASK) {
3014 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3015 				 env->log_type_id, t->info);
3016 		return -EINVAL;
3017 	}
3018 
3019 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3020 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3021 		btf_verifier_log(env, "[%u] Invalid kind:%u",
3022 				 env->log_type_id, BTF_INFO_KIND(t->info));
3023 		return -EINVAL;
3024 	}
3025 
3026 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
3027 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3028 				 env->log_type_id, t->name_off);
3029 		return -EINVAL;
3030 	}
3031 
3032 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3033 	if (var_meta_size < 0)
3034 		return var_meta_size;
3035 
3036 	meta_left -= var_meta_size;
3037 
3038 	return saved_meta_left - meta_left;
3039 }
3040 
3041 static int btf_check_all_metas(struct btf_verifier_env *env)
3042 {
3043 	struct btf *btf = env->btf;
3044 	struct btf_header *hdr;
3045 	void *cur, *end;
3046 
3047 	hdr = &btf->hdr;
3048 	cur = btf->nohdr_data + hdr->type_off;
3049 	end = cur + hdr->type_len;
3050 
3051 	env->log_type_id = 1;
3052 	while (cur < end) {
3053 		struct btf_type *t = cur;
3054 		s32 meta_size;
3055 
3056 		meta_size = btf_check_meta(env, t, end - cur);
3057 		if (meta_size < 0)
3058 			return meta_size;
3059 
3060 		btf_add_type(env, t);
3061 		cur += meta_size;
3062 		env->log_type_id++;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 static bool btf_resolve_valid(struct btf_verifier_env *env,
3069 			      const struct btf_type *t,
3070 			      u32 type_id)
3071 {
3072 	struct btf *btf = env->btf;
3073 
3074 	if (!env_type_is_resolved(env, type_id))
3075 		return false;
3076 
3077 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3078 		return !btf->resolved_ids[type_id] &&
3079 		       !btf->resolved_sizes[type_id];
3080 
3081 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3082 	    btf_type_is_var(t)) {
3083 		t = btf_type_id_resolve(btf, &type_id);
3084 		return t &&
3085 		       !btf_type_is_modifier(t) &&
3086 		       !btf_type_is_var(t) &&
3087 		       !btf_type_is_datasec(t);
3088 	}
3089 
3090 	if (btf_type_is_array(t)) {
3091 		const struct btf_array *array = btf_type_array(t);
3092 		const struct btf_type *elem_type;
3093 		u32 elem_type_id = array->type;
3094 		u32 elem_size;
3095 
3096 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3097 		return elem_type && !btf_type_is_modifier(elem_type) &&
3098 			(array->nelems * elem_size ==
3099 			 btf->resolved_sizes[type_id]);
3100 	}
3101 
3102 	return false;
3103 }
3104 
3105 static int btf_resolve(struct btf_verifier_env *env,
3106 		       const struct btf_type *t, u32 type_id)
3107 {
3108 	u32 save_log_type_id = env->log_type_id;
3109 	const struct resolve_vertex *v;
3110 	int err = 0;
3111 
3112 	env->resolve_mode = RESOLVE_TBD;
3113 	env_stack_push(env, t, type_id);
3114 	while (!err && (v = env_stack_peak(env))) {
3115 		env->log_type_id = v->type_id;
3116 		err = btf_type_ops(v->t)->resolve(env, v);
3117 	}
3118 
3119 	env->log_type_id = type_id;
3120 	if (err == -E2BIG) {
3121 		btf_verifier_log_type(env, t,
3122 				      "Exceeded max resolving depth:%u",
3123 				      MAX_RESOLVE_DEPTH);
3124 	} else if (err == -EEXIST) {
3125 		btf_verifier_log_type(env, t, "Loop detected");
3126 	}
3127 
3128 	/* Final sanity check */
3129 	if (!err && !btf_resolve_valid(env, t, type_id)) {
3130 		btf_verifier_log_type(env, t, "Invalid resolve state");
3131 		err = -EINVAL;
3132 	}
3133 
3134 	env->log_type_id = save_log_type_id;
3135 	return err;
3136 }
3137 
3138 static int btf_check_all_types(struct btf_verifier_env *env)
3139 {
3140 	struct btf *btf = env->btf;
3141 	u32 type_id;
3142 	int err;
3143 
3144 	err = env_resolve_init(env);
3145 	if (err)
3146 		return err;
3147 
3148 	env->phase++;
3149 	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3150 		const struct btf_type *t = btf_type_by_id(btf, type_id);
3151 
3152 		env->log_type_id = type_id;
3153 		if (btf_type_needs_resolve(t) &&
3154 		    !env_type_is_resolved(env, type_id)) {
3155 			err = btf_resolve(env, t, type_id);
3156 			if (err)
3157 				return err;
3158 		}
3159 
3160 		if (btf_type_is_func_proto(t)) {
3161 			err = btf_func_proto_check(env, t);
3162 			if (err)
3163 				return err;
3164 		}
3165 
3166 		if (btf_type_is_func(t)) {
3167 			err = btf_func_check(env, t);
3168 			if (err)
3169 				return err;
3170 		}
3171 	}
3172 
3173 	return 0;
3174 }
3175 
3176 static int btf_parse_type_sec(struct btf_verifier_env *env)
3177 {
3178 	const struct btf_header *hdr = &env->btf->hdr;
3179 	int err;
3180 
3181 	/* Type section must align to 4 bytes */
3182 	if (hdr->type_off & (sizeof(u32) - 1)) {
3183 		btf_verifier_log(env, "Unaligned type_off");
3184 		return -EINVAL;
3185 	}
3186 
3187 	if (!hdr->type_len) {
3188 		btf_verifier_log(env, "No type found");
3189 		return -EINVAL;
3190 	}
3191 
3192 	err = btf_check_all_metas(env);
3193 	if (err)
3194 		return err;
3195 
3196 	return btf_check_all_types(env);
3197 }
3198 
3199 static int btf_parse_str_sec(struct btf_verifier_env *env)
3200 {
3201 	const struct btf_header *hdr;
3202 	struct btf *btf = env->btf;
3203 	const char *start, *end;
3204 
3205 	hdr = &btf->hdr;
3206 	start = btf->nohdr_data + hdr->str_off;
3207 	end = start + hdr->str_len;
3208 
3209 	if (end != btf->data + btf->data_size) {
3210 		btf_verifier_log(env, "String section is not at the end");
3211 		return -EINVAL;
3212 	}
3213 
3214 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3215 	    start[0] || end[-1]) {
3216 		btf_verifier_log(env, "Invalid string section");
3217 		return -EINVAL;
3218 	}
3219 
3220 	btf->strings = start;
3221 
3222 	return 0;
3223 }
3224 
3225 static const size_t btf_sec_info_offset[] = {
3226 	offsetof(struct btf_header, type_off),
3227 	offsetof(struct btf_header, str_off),
3228 };
3229 
3230 static int btf_sec_info_cmp(const void *a, const void *b)
3231 {
3232 	const struct btf_sec_info *x = a;
3233 	const struct btf_sec_info *y = b;
3234 
3235 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3236 }
3237 
3238 static int btf_check_sec_info(struct btf_verifier_env *env,
3239 			      u32 btf_data_size)
3240 {
3241 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3242 	u32 total, expected_total, i;
3243 	const struct btf_header *hdr;
3244 	const struct btf *btf;
3245 
3246 	btf = env->btf;
3247 	hdr = &btf->hdr;
3248 
3249 	/* Populate the secs from hdr */
3250 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3251 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
3252 						   btf_sec_info_offset[i]);
3253 
3254 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3255 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3256 
3257 	/* Check for gaps and overlap among sections */
3258 	total = 0;
3259 	expected_total = btf_data_size - hdr->hdr_len;
3260 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3261 		if (expected_total < secs[i].off) {
3262 			btf_verifier_log(env, "Invalid section offset");
3263 			return -EINVAL;
3264 		}
3265 		if (total < secs[i].off) {
3266 			/* gap */
3267 			btf_verifier_log(env, "Unsupported section found");
3268 			return -EINVAL;
3269 		}
3270 		if (total > secs[i].off) {
3271 			btf_verifier_log(env, "Section overlap found");
3272 			return -EINVAL;
3273 		}
3274 		if (expected_total - total < secs[i].len) {
3275 			btf_verifier_log(env,
3276 					 "Total section length too long");
3277 			return -EINVAL;
3278 		}
3279 		total += secs[i].len;
3280 	}
3281 
3282 	/* There is data other than hdr and known sections */
3283 	if (expected_total != total) {
3284 		btf_verifier_log(env, "Unsupported section found");
3285 		return -EINVAL;
3286 	}
3287 
3288 	return 0;
3289 }
3290 
3291 static int btf_parse_hdr(struct btf_verifier_env *env)
3292 {
3293 	u32 hdr_len, hdr_copy, btf_data_size;
3294 	const struct btf_header *hdr;
3295 	struct btf *btf;
3296 	int err;
3297 
3298 	btf = env->btf;
3299 	btf_data_size = btf->data_size;
3300 
3301 	if (btf_data_size <
3302 	    offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3303 		btf_verifier_log(env, "hdr_len not found");
3304 		return -EINVAL;
3305 	}
3306 
3307 	hdr = btf->data;
3308 	hdr_len = hdr->hdr_len;
3309 	if (btf_data_size < hdr_len) {
3310 		btf_verifier_log(env, "btf_header not found");
3311 		return -EINVAL;
3312 	}
3313 
3314 	/* Ensure the unsupported header fields are zero */
3315 	if (hdr_len > sizeof(btf->hdr)) {
3316 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
3317 		u8 *end = btf->data + hdr_len;
3318 
3319 		for (; expected_zero < end; expected_zero++) {
3320 			if (*expected_zero) {
3321 				btf_verifier_log(env, "Unsupported btf_header");
3322 				return -E2BIG;
3323 			}
3324 		}
3325 	}
3326 
3327 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3328 	memcpy(&btf->hdr, btf->data, hdr_copy);
3329 
3330 	hdr = &btf->hdr;
3331 
3332 	btf_verifier_log_hdr(env, btf_data_size);
3333 
3334 	if (hdr->magic != BTF_MAGIC) {
3335 		btf_verifier_log(env, "Invalid magic");
3336 		return -EINVAL;
3337 	}
3338 
3339 	if (hdr->version != BTF_VERSION) {
3340 		btf_verifier_log(env, "Unsupported version");
3341 		return -ENOTSUPP;
3342 	}
3343 
3344 	if (hdr->flags) {
3345 		btf_verifier_log(env, "Unsupported flags");
3346 		return -ENOTSUPP;
3347 	}
3348 
3349 	if (btf_data_size == hdr->hdr_len) {
3350 		btf_verifier_log(env, "No data");
3351 		return -EINVAL;
3352 	}
3353 
3354 	err = btf_check_sec_info(env, btf_data_size);
3355 	if (err)
3356 		return err;
3357 
3358 	return 0;
3359 }
3360 
3361 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3362 			     u32 log_level, char __user *log_ubuf, u32 log_size)
3363 {
3364 	struct btf_verifier_env *env = NULL;
3365 	struct bpf_verifier_log *log;
3366 	struct btf *btf = NULL;
3367 	u8 *data;
3368 	int err;
3369 
3370 	if (btf_data_size > BTF_MAX_SIZE)
3371 		return ERR_PTR(-E2BIG);
3372 
3373 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3374 	if (!env)
3375 		return ERR_PTR(-ENOMEM);
3376 
3377 	log = &env->log;
3378 	if (log_level || log_ubuf || log_size) {
3379 		/* user requested verbose verifier output
3380 		 * and supplied buffer to store the verification trace
3381 		 */
3382 		log->level = log_level;
3383 		log->ubuf = log_ubuf;
3384 		log->len_total = log_size;
3385 
3386 		/* log attributes have to be sane */
3387 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3388 		    !log->level || !log->ubuf) {
3389 			err = -EINVAL;
3390 			goto errout;
3391 		}
3392 	}
3393 
3394 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3395 	if (!btf) {
3396 		err = -ENOMEM;
3397 		goto errout;
3398 	}
3399 	env->btf = btf;
3400 
3401 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3402 	if (!data) {
3403 		err = -ENOMEM;
3404 		goto errout;
3405 	}
3406 
3407 	btf->data = data;
3408 	btf->data_size = btf_data_size;
3409 
3410 	if (copy_from_user(data, btf_data, btf_data_size)) {
3411 		err = -EFAULT;
3412 		goto errout;
3413 	}
3414 
3415 	err = btf_parse_hdr(env);
3416 	if (err)
3417 		goto errout;
3418 
3419 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3420 
3421 	err = btf_parse_str_sec(env);
3422 	if (err)
3423 		goto errout;
3424 
3425 	err = btf_parse_type_sec(env);
3426 	if (err)
3427 		goto errout;
3428 
3429 	if (log->level && bpf_verifier_log_full(log)) {
3430 		err = -ENOSPC;
3431 		goto errout;
3432 	}
3433 
3434 	btf_verifier_env_free(env);
3435 	refcount_set(&btf->refcnt, 1);
3436 	return btf;
3437 
3438 errout:
3439 	btf_verifier_env_free(env);
3440 	if (btf)
3441 		btf_free(btf);
3442 	return ERR_PTR(err);
3443 }
3444 
3445 extern char __weak _binary__btf_vmlinux_bin_start[];
3446 extern char __weak _binary__btf_vmlinux_bin_end[];
3447 extern struct btf *btf_vmlinux;
3448 
3449 #define BPF_MAP_TYPE(_id, _ops)
3450 static union {
3451 	struct bpf_ctx_convert {
3452 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3453 	prog_ctx_type _id##_prog; \
3454 	kern_ctx_type _id##_kern;
3455 #include <linux/bpf_types.h>
3456 #undef BPF_PROG_TYPE
3457 	} *__t;
3458 	/* 't' is written once under lock. Read many times. */
3459 	const struct btf_type *t;
3460 } bpf_ctx_convert;
3461 enum {
3462 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3463 	__ctx_convert##_id,
3464 #include <linux/bpf_types.h>
3465 #undef BPF_PROG_TYPE
3466 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
3467 };
3468 static u8 bpf_ctx_convert_map[] = {
3469 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3470 	[_id] = __ctx_convert##_id,
3471 #include <linux/bpf_types.h>
3472 #undef BPF_PROG_TYPE
3473 	0, /* avoid empty array */
3474 };
3475 #undef BPF_MAP_TYPE
3476 
3477 static const struct btf_member *
3478 btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3479 		      const struct btf_type *t, enum bpf_prog_type prog_type)
3480 {
3481 	const struct btf_type *conv_struct;
3482 	const struct btf_type *ctx_struct;
3483 	const struct btf_member *ctx_type;
3484 	const char *tname, *ctx_tname;
3485 
3486 	conv_struct = bpf_ctx_convert.t;
3487 	if (!conv_struct) {
3488 		bpf_log(log, "btf_vmlinux is malformed\n");
3489 		return NULL;
3490 	}
3491 	t = btf_type_by_id(btf, t->type);
3492 	while (btf_type_is_modifier(t))
3493 		t = btf_type_by_id(btf, t->type);
3494 	if (!btf_type_is_struct(t)) {
3495 		/* Only pointer to struct is supported for now.
3496 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
3497 		 * is not supported yet.
3498 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
3499 		 */
3500 		bpf_log(log, "BPF program ctx type is not a struct\n");
3501 		return NULL;
3502 	}
3503 	tname = btf_name_by_offset(btf, t->name_off);
3504 	if (!tname) {
3505 		bpf_log(log, "BPF program ctx struct doesn't have a name\n");
3506 		return NULL;
3507 	}
3508 	/* prog_type is valid bpf program type. No need for bounds check. */
3509 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3510 	/* ctx_struct is a pointer to prog_ctx_type in vmlinux.
3511 	 * Like 'struct __sk_buff'
3512 	 */
3513 	ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3514 	if (!ctx_struct)
3515 		/* should not happen */
3516 		return NULL;
3517 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3518 	if (!ctx_tname) {
3519 		/* should not happen */
3520 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3521 		return NULL;
3522 	}
3523 	/* only compare that prog's ctx type name is the same as
3524 	 * kernel expects. No need to compare field by field.
3525 	 * It's ok for bpf prog to do:
3526 	 * struct __sk_buff {};
3527 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
3528 	 * { // no fields of skb are ever used }
3529 	 */
3530 	if (strcmp(ctx_tname, tname))
3531 		return NULL;
3532 	return ctx_type;
3533 }
3534 
3535 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3536 				     struct btf *btf,
3537 				     const struct btf_type *t,
3538 				     enum bpf_prog_type prog_type)
3539 {
3540 	const struct btf_member *prog_ctx_type, *kern_ctx_type;
3541 
3542 	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type);
3543 	if (!prog_ctx_type)
3544 		return -ENOENT;
3545 	kern_ctx_type = prog_ctx_type + 1;
3546 	return kern_ctx_type->type;
3547 }
3548 
3549 struct btf *btf_parse_vmlinux(void)
3550 {
3551 	struct btf_verifier_env *env = NULL;
3552 	struct bpf_verifier_log *log;
3553 	struct btf *btf = NULL;
3554 	int err, i;
3555 
3556 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3557 	if (!env)
3558 		return ERR_PTR(-ENOMEM);
3559 
3560 	log = &env->log;
3561 	log->level = BPF_LOG_KERNEL;
3562 
3563 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3564 	if (!btf) {
3565 		err = -ENOMEM;
3566 		goto errout;
3567 	}
3568 	env->btf = btf;
3569 
3570 	btf->data = _binary__btf_vmlinux_bin_start;
3571 	btf->data_size = _binary__btf_vmlinux_bin_end -
3572 		_binary__btf_vmlinux_bin_start;
3573 
3574 	err = btf_parse_hdr(env);
3575 	if (err)
3576 		goto errout;
3577 
3578 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3579 
3580 	err = btf_parse_str_sec(env);
3581 	if (err)
3582 		goto errout;
3583 
3584 	err = btf_check_all_metas(env);
3585 	if (err)
3586 		goto errout;
3587 
3588 	/* find struct bpf_ctx_convert for type checking later */
3589 	for (i = 1; i <= btf->nr_types; i++) {
3590 		const struct btf_type *t;
3591 		const char *tname;
3592 
3593 		t = btf_type_by_id(btf, i);
3594 		if (!__btf_type_is_struct(t))
3595 			continue;
3596 		tname = __btf_name_by_offset(btf, t->name_off);
3597 		if (!strcmp(tname, "bpf_ctx_convert")) {
3598 			/* btf_parse_vmlinux() runs under bpf_verifier_lock */
3599 			bpf_ctx_convert.t = t;
3600 			break;
3601 		}
3602 	}
3603 	if (i > btf->nr_types) {
3604 		err = -ENOENT;
3605 		goto errout;
3606 	}
3607 
3608 	btf_verifier_env_free(env);
3609 	refcount_set(&btf->refcnt, 1);
3610 	return btf;
3611 
3612 errout:
3613 	btf_verifier_env_free(env);
3614 	if (btf) {
3615 		kvfree(btf->types);
3616 		kfree(btf);
3617 	}
3618 	return ERR_PTR(err);
3619 }
3620 
3621 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3622 {
3623 	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3624 
3625 	if (tgt_prog) {
3626 		return tgt_prog->aux->btf;
3627 	} else {
3628 		return btf_vmlinux;
3629 	}
3630 }
3631 
3632 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3633 		    const struct bpf_prog *prog,
3634 		    struct bpf_insn_access_aux *info)
3635 {
3636 	const struct btf_type *t = prog->aux->attach_func_proto;
3637 	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3638 	struct btf *btf = bpf_prog_get_target_btf(prog);
3639 	const char *tname = prog->aux->attach_func_name;
3640 	struct bpf_verifier_log *log = info->log;
3641 	const struct btf_param *args;
3642 	u32 nr_args, arg;
3643 	int ret;
3644 
3645 	if (off % 8) {
3646 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3647 			tname, off);
3648 		return false;
3649 	}
3650 	arg = off / 8;
3651 	args = (const struct btf_param *)(t + 1);
3652 	/* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
3653 	nr_args = t ? btf_type_vlen(t) : 5;
3654 	if (prog->aux->attach_btf_trace) {
3655 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
3656 		args++;
3657 		nr_args--;
3658 	}
3659 
3660 	if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
3661 	    arg == nr_args) {
3662 		if (!t)
3663 			/* Default prog with 5 args. 6th arg is retval. */
3664 			return true;
3665 		/* function return type */
3666 		t = btf_type_by_id(btf, t->type);
3667 	} else if (arg >= nr_args) {
3668 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3669 			tname, arg + 1);
3670 		return false;
3671 	} else {
3672 		if (!t)
3673 			/* Default prog with 5 args */
3674 			return true;
3675 		t = btf_type_by_id(btf, args[arg].type);
3676 	}
3677 	/* skip modifiers */
3678 	while (btf_type_is_modifier(t))
3679 		t = btf_type_by_id(btf, t->type);
3680 	if (btf_type_is_int(t))
3681 		/* accessing a scalar */
3682 		return true;
3683 	if (!btf_type_is_ptr(t)) {
3684 		bpf_log(log,
3685 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3686 			tname, arg,
3687 			__btf_name_by_offset(btf, t->name_off),
3688 			btf_kind_str[BTF_INFO_KIND(t->info)]);
3689 		return false;
3690 	}
3691 	if (t->type == 0)
3692 		/* This is a pointer to void.
3693 		 * It is the same as scalar from the verifier safety pov.
3694 		 * No further pointer walking is allowed.
3695 		 */
3696 		return true;
3697 
3698 	/* this is a pointer to another type */
3699 	info->reg_type = PTR_TO_BTF_ID;
3700 	info->btf_id = t->type;
3701 
3702 	if (tgt_prog) {
3703 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type);
3704 		if (ret > 0) {
3705 			info->btf_id = ret;
3706 			return true;
3707 		} else {
3708 			return false;
3709 		}
3710 	}
3711 	t = btf_type_by_id(btf, t->type);
3712 	/* skip modifiers */
3713 	while (btf_type_is_modifier(t))
3714 		t = btf_type_by_id(btf, t->type);
3715 	if (!btf_type_is_struct(t)) {
3716 		bpf_log(log,
3717 			"func '%s' arg%d type %s is not a struct\n",
3718 			tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3719 		return false;
3720 	}
3721 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3722 		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3723 		__btf_name_by_offset(btf, t->name_off));
3724 	return true;
3725 }
3726 
3727 int btf_struct_access(struct bpf_verifier_log *log,
3728 		      const struct btf_type *t, int off, int size,
3729 		      enum bpf_access_type atype,
3730 		      u32 *next_btf_id)
3731 {
3732 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3733 	const struct btf_type *mtype, *elem_type = NULL;
3734 	const struct btf_member *member;
3735 	const char *tname, *mname;
3736 
3737 again:
3738 	tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3739 	if (!btf_type_is_struct(t)) {
3740 		bpf_log(log, "Type '%s' is not a struct", tname);
3741 		return -EINVAL;
3742 	}
3743 
3744 	for_each_member(i, t, member) {
3745 		if (btf_member_bitfield_size(t, member))
3746 			/* bitfields are not supported yet */
3747 			continue;
3748 
3749 		/* offset of the field in bytes */
3750 		moff = btf_member_bit_offset(t, member) / 8;
3751 		if (off + size <= moff)
3752 			/* won't find anything, field is already too far */
3753 			break;
3754 		/* In case of "off" is pointing to holes of a struct */
3755 		if (off < moff)
3756 			continue;
3757 
3758 		/* type of the field */
3759 		mtype = btf_type_by_id(btf_vmlinux, member->type);
3760 		mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3761 
3762 		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
3763 					 &elem_type, &total_nelems);
3764 		if (IS_ERR(mtype)) {
3765 			bpf_log(log, "field %s doesn't have size\n", mname);
3766 			return -EFAULT;
3767 		}
3768 
3769 		mtrue_end = moff + msize;
3770 		if (off >= mtrue_end)
3771 			/* no overlap with member, keep iterating */
3772 			continue;
3773 
3774 		if (btf_type_is_array(mtype)) {
3775 			u32 elem_idx;
3776 
3777 			/* btf_resolve_size() above helps to
3778 			 * linearize a multi-dimensional array.
3779 			 *
3780 			 * The logic here is treating an array
3781 			 * in a struct as the following way:
3782 			 *
3783 			 * struct outer {
3784 			 *	struct inner array[2][2];
3785 			 * };
3786 			 *
3787 			 * looks like:
3788 			 *
3789 			 * struct outer {
3790 			 *	struct inner array_elem0;
3791 			 *	struct inner array_elem1;
3792 			 *	struct inner array_elem2;
3793 			 *	struct inner array_elem3;
3794 			 * };
3795 			 *
3796 			 * When accessing outer->array[1][0], it moves
3797 			 * moff to "array_elem2", set mtype to
3798 			 * "struct inner", and msize also becomes
3799 			 * sizeof(struct inner).  Then most of the
3800 			 * remaining logic will fall through without
3801 			 * caring the current member is an array or
3802 			 * not.
3803 			 *
3804 			 * Unlike mtype/msize/moff, mtrue_end does not
3805 			 * change.  The naming difference ("_true") tells
3806 			 * that it is not always corresponding to
3807 			 * the current mtype/msize/moff.
3808 			 * It is the true end of the current
3809 			 * member (i.e. array in this case).  That
3810 			 * will allow an int array to be accessed like
3811 			 * a scratch space,
3812 			 * i.e. allow access beyond the size of
3813 			 *      the array's element as long as it is
3814 			 *      within the mtrue_end boundary.
3815 			 */
3816 
3817 			/* skip empty array */
3818 			if (moff == mtrue_end)
3819 				continue;
3820 
3821 			msize /= total_nelems;
3822 			elem_idx = (off - moff) / msize;
3823 			moff += elem_idx * msize;
3824 			mtype = elem_type;
3825 		}
3826 
3827 		/* the 'off' we're looking for is either equal to start
3828 		 * of this field or inside of this struct
3829 		 */
3830 		if (btf_type_is_struct(mtype)) {
3831 			/* our field must be inside that union or struct */
3832 			t = mtype;
3833 
3834 			/* adjust offset we're looking for */
3835 			off -= moff;
3836 			goto again;
3837 		}
3838 
3839 		if (btf_type_is_ptr(mtype)) {
3840 			const struct btf_type *stype;
3841 
3842 			if (msize != size || off != moff) {
3843 				bpf_log(log,
3844 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
3845 					mname, moff, tname, off, size);
3846 				return -EACCES;
3847 			}
3848 
3849 			stype = btf_type_by_id(btf_vmlinux, mtype->type);
3850 			/* skip modifiers */
3851 			while (btf_type_is_modifier(stype))
3852 				stype = btf_type_by_id(btf_vmlinux, stype->type);
3853 			if (btf_type_is_struct(stype)) {
3854 				*next_btf_id = mtype->type;
3855 				return PTR_TO_BTF_ID;
3856 			}
3857 		}
3858 
3859 		/* Allow more flexible access within an int as long as
3860 		 * it is within mtrue_end.
3861 		 * Since mtrue_end could be the end of an array,
3862 		 * that also allows using an array of int as a scratch
3863 		 * space. e.g. skb->cb[].
3864 		 */
3865 		if (off + size > mtrue_end) {
3866 			bpf_log(log,
3867 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
3868 				mname, mtrue_end, tname, off, size);
3869 			return -EACCES;
3870 		}
3871 
3872 		return SCALAR_VALUE;
3873 	}
3874 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
3875 	return -EINVAL;
3876 }
3877 
3878 static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
3879 				   int arg)
3880 {
3881 	char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
3882 	const struct btf_param *args;
3883 	const struct btf_type *t;
3884 	const char *tname, *sym;
3885 	u32 btf_id, i;
3886 
3887 	if (IS_ERR(btf_vmlinux)) {
3888 		bpf_log(log, "btf_vmlinux is malformed\n");
3889 		return -EINVAL;
3890 	}
3891 
3892 	sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
3893 	if (!sym) {
3894 		bpf_log(log, "kernel doesn't have kallsyms\n");
3895 		return -EFAULT;
3896 	}
3897 
3898 	for (i = 1; i <= btf_vmlinux->nr_types; i++) {
3899 		t = btf_type_by_id(btf_vmlinux, i);
3900 		if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
3901 			continue;
3902 		tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3903 		if (!strcmp(tname, fnname))
3904 			break;
3905 	}
3906 	if (i > btf_vmlinux->nr_types) {
3907 		bpf_log(log, "helper %s type is not found\n", fnname);
3908 		return -ENOENT;
3909 	}
3910 
3911 	t = btf_type_by_id(btf_vmlinux, t->type);
3912 	if (!btf_type_is_ptr(t))
3913 		return -EFAULT;
3914 	t = btf_type_by_id(btf_vmlinux, t->type);
3915 	if (!btf_type_is_func_proto(t))
3916 		return -EFAULT;
3917 
3918 	args = (const struct btf_param *)(t + 1);
3919 	if (arg >= btf_type_vlen(t)) {
3920 		bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
3921 			fnname, arg);
3922 		return -EINVAL;
3923 	}
3924 
3925 	t = btf_type_by_id(btf_vmlinux, args[arg].type);
3926 	if (!btf_type_is_ptr(t) || !t->type) {
3927 		/* anything but the pointer to struct is a helper config bug */
3928 		bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
3929 		return -EFAULT;
3930 	}
3931 	btf_id = t->type;
3932 	t = btf_type_by_id(btf_vmlinux, t->type);
3933 	/* skip modifiers */
3934 	while (btf_type_is_modifier(t)) {
3935 		btf_id = t->type;
3936 		t = btf_type_by_id(btf_vmlinux, t->type);
3937 	}
3938 	if (!btf_type_is_struct(t)) {
3939 		bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
3940 		return -EFAULT;
3941 	}
3942 	bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
3943 		arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
3944 	return btf_id;
3945 }
3946 
3947 int btf_resolve_helper_id(struct bpf_verifier_log *log,
3948 			  const struct bpf_func_proto *fn, int arg)
3949 {
3950 	int *btf_id = &fn->btf_id[arg];
3951 	int ret;
3952 
3953 	if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
3954 		return -EINVAL;
3955 
3956 	ret = READ_ONCE(*btf_id);
3957 	if (ret)
3958 		return ret;
3959 	/* ok to race the search. The result is the same */
3960 	ret = __btf_resolve_helper_id(log, fn->func, arg);
3961 	if (!ret) {
3962 		/* Function argument cannot be type 'void' */
3963 		bpf_log(log, "BTF resolution bug\n");
3964 		return -EFAULT;
3965 	}
3966 	WRITE_ONCE(*btf_id, ret);
3967 	return ret;
3968 }
3969 
3970 static int __get_type_size(struct btf *btf, u32 btf_id,
3971 			   const struct btf_type **bad_type)
3972 {
3973 	const struct btf_type *t;
3974 
3975 	if (!btf_id)
3976 		/* void */
3977 		return 0;
3978 	t = btf_type_by_id(btf, btf_id);
3979 	while (t && btf_type_is_modifier(t))
3980 		t = btf_type_by_id(btf, t->type);
3981 	if (!t) {
3982 		*bad_type = btf->types[0];
3983 		return -EINVAL;
3984 	}
3985 	if (btf_type_is_ptr(t))
3986 		/* kernel size of pointer. Not BPF's size of pointer*/
3987 		return sizeof(void *);
3988 	if (btf_type_is_int(t) || btf_type_is_enum(t))
3989 		return t->size;
3990 	*bad_type = t;
3991 	return -EINVAL;
3992 }
3993 
3994 int btf_distill_func_proto(struct bpf_verifier_log *log,
3995 			   struct btf *btf,
3996 			   const struct btf_type *func,
3997 			   const char *tname,
3998 			   struct btf_func_model *m)
3999 {
4000 	const struct btf_param *args;
4001 	const struct btf_type *t;
4002 	u32 i, nargs;
4003 	int ret;
4004 
4005 	if (!func) {
4006 		/* BTF function prototype doesn't match the verifier types.
4007 		 * Fall back to 5 u64 args.
4008 		 */
4009 		for (i = 0; i < 5; i++)
4010 			m->arg_size[i] = 8;
4011 		m->ret_size = 8;
4012 		m->nr_args = 5;
4013 		return 0;
4014 	}
4015 	args = (const struct btf_param *)(func + 1);
4016 	nargs = btf_type_vlen(func);
4017 	if (nargs >= MAX_BPF_FUNC_ARGS) {
4018 		bpf_log(log,
4019 			"The function %s has %d arguments. Too many.\n",
4020 			tname, nargs);
4021 		return -EINVAL;
4022 	}
4023 	ret = __get_type_size(btf, func->type, &t);
4024 	if (ret < 0) {
4025 		bpf_log(log,
4026 			"The function %s return type %s is unsupported.\n",
4027 			tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4028 		return -EINVAL;
4029 	}
4030 	m->ret_size = ret;
4031 
4032 	for (i = 0; i < nargs; i++) {
4033 		ret = __get_type_size(btf, args[i].type, &t);
4034 		if (ret < 0) {
4035 			bpf_log(log,
4036 				"The function %s arg%d type %s is unsupported.\n",
4037 				tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4038 			return -EINVAL;
4039 		}
4040 		m->arg_size[i] = ret;
4041 	}
4042 	m->nr_args = nargs;
4043 	return 0;
4044 }
4045 
4046 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog)
4047 {
4048 	struct bpf_verifier_state *st = env->cur_state;
4049 	struct bpf_func_state *func = st->frame[st->curframe];
4050 	struct bpf_reg_state *reg = func->regs;
4051 	struct bpf_verifier_log *log = &env->log;
4052 	struct bpf_prog *prog = env->prog;
4053 	struct btf *btf = prog->aux->btf;
4054 	const struct btf_param *args;
4055 	const struct btf_type *t;
4056 	u32 i, nargs, btf_id;
4057 	const char *tname;
4058 
4059 	if (!prog->aux->func_info)
4060 		return 0;
4061 
4062 	btf_id = prog->aux->func_info[subprog].type_id;
4063 	if (!btf_id)
4064 		return 0;
4065 
4066 	if (prog->aux->func_info_aux[subprog].unreliable)
4067 		return 0;
4068 
4069 	t = btf_type_by_id(btf, btf_id);
4070 	if (!t || !btf_type_is_func(t)) {
4071 		bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n",
4072 			subprog);
4073 		return -EINVAL;
4074 	}
4075 	tname = btf_name_by_offset(btf, t->name_off);
4076 
4077 	t = btf_type_by_id(btf, t->type);
4078 	if (!t || !btf_type_is_func_proto(t)) {
4079 		bpf_log(log, "Invalid type of func %s\n", tname);
4080 		return -EINVAL;
4081 	}
4082 	args = (const struct btf_param *)(t + 1);
4083 	nargs = btf_type_vlen(t);
4084 	if (nargs > 5) {
4085 		bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4086 		goto out;
4087 	}
4088 	/* check that BTF function arguments match actual types that the
4089 	 * verifier sees.
4090 	 */
4091 	for (i = 0; i < nargs; i++) {
4092 		t = btf_type_by_id(btf, args[i].type);
4093 		while (btf_type_is_modifier(t))
4094 			t = btf_type_by_id(btf, t->type);
4095 		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4096 			if (reg[i + 1].type == SCALAR_VALUE)
4097 				continue;
4098 			bpf_log(log, "R%d is not a scalar\n", i + 1);
4099 			goto out;
4100 		}
4101 		if (btf_type_is_ptr(t)) {
4102 			if (reg[i + 1].type == SCALAR_VALUE) {
4103 				bpf_log(log, "R%d is not a pointer\n", i + 1);
4104 				goto out;
4105 			}
4106 			/* If program is passing PTR_TO_CTX into subprogram
4107 			 * check that BTF type matches.
4108 			 */
4109 			if (reg[i + 1].type == PTR_TO_CTX &&
4110 			    !btf_get_prog_ctx_type(log, btf, t, prog->type))
4111 				goto out;
4112 			/* All other pointers are ok */
4113 			continue;
4114 		}
4115 		bpf_log(log, "Unrecognized argument type %s\n",
4116 			btf_kind_str[BTF_INFO_KIND(t->info)]);
4117 		goto out;
4118 	}
4119 	return 0;
4120 out:
4121 	/* LLVM optimizations can remove arguments from static functions. */
4122 	bpf_log(log,
4123 		"Type info disagrees with actual arguments due to compiler optimizations\n");
4124 	prog->aux->func_info_aux[subprog].unreliable = true;
4125 	return 0;
4126 }
4127 
4128 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4129 		       struct seq_file *m)
4130 {
4131 	const struct btf_type *t = btf_type_by_id(btf, type_id);
4132 
4133 	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4134 }
4135 
4136 #ifdef CONFIG_PROC_FS
4137 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4138 {
4139 	const struct btf *btf = filp->private_data;
4140 
4141 	seq_printf(m, "btf_id:\t%u\n", btf->id);
4142 }
4143 #endif
4144 
4145 static int btf_release(struct inode *inode, struct file *filp)
4146 {
4147 	btf_put(filp->private_data);
4148 	return 0;
4149 }
4150 
4151 const struct file_operations btf_fops = {
4152 #ifdef CONFIG_PROC_FS
4153 	.show_fdinfo	= bpf_btf_show_fdinfo,
4154 #endif
4155 	.release	= btf_release,
4156 };
4157 
4158 static int __btf_new_fd(struct btf *btf)
4159 {
4160 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4161 }
4162 
4163 int btf_new_fd(const union bpf_attr *attr)
4164 {
4165 	struct btf *btf;
4166 	int ret;
4167 
4168 	btf = btf_parse(u64_to_user_ptr(attr->btf),
4169 			attr->btf_size, attr->btf_log_level,
4170 			u64_to_user_ptr(attr->btf_log_buf),
4171 			attr->btf_log_size);
4172 	if (IS_ERR(btf))
4173 		return PTR_ERR(btf);
4174 
4175 	ret = btf_alloc_id(btf);
4176 	if (ret) {
4177 		btf_free(btf);
4178 		return ret;
4179 	}
4180 
4181 	/*
4182 	 * The BTF ID is published to the userspace.
4183 	 * All BTF free must go through call_rcu() from
4184 	 * now on (i.e. free by calling btf_put()).
4185 	 */
4186 
4187 	ret = __btf_new_fd(btf);
4188 	if (ret < 0)
4189 		btf_put(btf);
4190 
4191 	return ret;
4192 }
4193 
4194 struct btf *btf_get_by_fd(int fd)
4195 {
4196 	struct btf *btf;
4197 	struct fd f;
4198 
4199 	f = fdget(fd);
4200 
4201 	if (!f.file)
4202 		return ERR_PTR(-EBADF);
4203 
4204 	if (f.file->f_op != &btf_fops) {
4205 		fdput(f);
4206 		return ERR_PTR(-EINVAL);
4207 	}
4208 
4209 	btf = f.file->private_data;
4210 	refcount_inc(&btf->refcnt);
4211 	fdput(f);
4212 
4213 	return btf;
4214 }
4215 
4216 int btf_get_info_by_fd(const struct btf *btf,
4217 		       const union bpf_attr *attr,
4218 		       union bpf_attr __user *uattr)
4219 {
4220 	struct bpf_btf_info __user *uinfo;
4221 	struct bpf_btf_info info = {};
4222 	u32 info_copy, btf_copy;
4223 	void __user *ubtf;
4224 	u32 uinfo_len;
4225 
4226 	uinfo = u64_to_user_ptr(attr->info.info);
4227 	uinfo_len = attr->info.info_len;
4228 
4229 	info_copy = min_t(u32, uinfo_len, sizeof(info));
4230 	if (copy_from_user(&info, uinfo, info_copy))
4231 		return -EFAULT;
4232 
4233 	info.id = btf->id;
4234 	ubtf = u64_to_user_ptr(info.btf);
4235 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
4236 	if (copy_to_user(ubtf, btf->data, btf_copy))
4237 		return -EFAULT;
4238 	info.btf_size = btf->data_size;
4239 
4240 	if (copy_to_user(uinfo, &info, info_copy) ||
4241 	    put_user(info_copy, &uattr->info.info_len))
4242 		return -EFAULT;
4243 
4244 	return 0;
4245 }
4246 
4247 int btf_get_fd_by_id(u32 id)
4248 {
4249 	struct btf *btf;
4250 	int fd;
4251 
4252 	rcu_read_lock();
4253 	btf = idr_find(&btf_idr, id);
4254 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4255 		btf = ERR_PTR(-ENOENT);
4256 	rcu_read_unlock();
4257 
4258 	if (IS_ERR(btf))
4259 		return PTR_ERR(btf);
4260 
4261 	fd = __btf_new_fd(btf);
4262 	if (fd < 0)
4263 		btf_put(btf);
4264 
4265 	return fd;
4266 }
4267 
4268 u32 btf_id(const struct btf *btf)
4269 {
4270 	return btf->id;
4271 }
4272