xref: /openbmc/linux/kernel/bpf/btf.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/skmsg.h>
23 #include <linux/perf_event.h>
24 #include <linux/bsearch.h>
25 #include <linux/btf_ids.h>
26 #include <net/sock.h>
27 
28 /* BTF (BPF Type Format) is the meta data format which describes
29  * the data types of BPF program/map.  Hence, it basically focus
30  * on the C programming language which the modern BPF is primary
31  * using.
32  *
33  * ELF Section:
34  * ~~~~~~~~~~~
35  * The BTF data is stored under the ".BTF" ELF section
36  *
37  * struct btf_type:
38  * ~~~~~~~~~~~~~~~
39  * Each 'struct btf_type' object describes a C data type.
40  * Depending on the type it is describing, a 'struct btf_type'
41  * object may be followed by more data.  F.e.
42  * To describe an array, 'struct btf_type' is followed by
43  * 'struct btf_array'.
44  *
45  * 'struct btf_type' and any extra data following it are
46  * 4 bytes aligned.
47  *
48  * Type section:
49  * ~~~~~~~~~~~~~
50  * The BTF type section contains a list of 'struct btf_type' objects.
51  * Each one describes a C type.  Recall from the above section
52  * that a 'struct btf_type' object could be immediately followed by extra
53  * data in order to desribe some particular C types.
54  *
55  * type_id:
56  * ~~~~~~~
57  * Each btf_type object is identified by a type_id.  The type_id
58  * is implicitly implied by the location of the btf_type object in
59  * the BTF type section.  The first one has type_id 1.  The second
60  * one has type_id 2...etc.  Hence, an earlier btf_type has
61  * a smaller type_id.
62  *
63  * A btf_type object may refer to another btf_type object by using
64  * type_id (i.e. the "type" in the "struct btf_type").
65  *
66  * NOTE that we cannot assume any reference-order.
67  * A btf_type object can refer to an earlier btf_type object
68  * but it can also refer to a later btf_type object.
69  *
70  * For example, to describe "const void *".  A btf_type
71  * object describing "const" may refer to another btf_type
72  * object describing "void *".  This type-reference is done
73  * by specifying type_id:
74  *
75  * [1] CONST (anon) type_id=2
76  * [2] PTR (anon) type_id=0
77  *
78  * The above is the btf_verifier debug log:
79  *   - Each line started with "[?]" is a btf_type object
80  *   - [?] is the type_id of the btf_type object.
81  *   - CONST/PTR is the BTF_KIND_XXX
82  *   - "(anon)" is the name of the type.  It just
83  *     happens that CONST and PTR has no name.
84  *   - type_id=XXX is the 'u32 type' in btf_type
85  *
86  * NOTE: "void" has type_id 0
87  *
88  * String section:
89  * ~~~~~~~~~~~~~~
90  * The BTF string section contains the names used by the type section.
91  * Each string is referred by an "offset" from the beginning of the
92  * string section.
93  *
94  * Each string is '\0' terminated.
95  *
96  * The first character in the string section must be '\0'
97  * which is used to mean 'anonymous'. Some btf_type may not
98  * have a name.
99  */
100 
101 /* BTF verification:
102  *
103  * To verify BTF data, two passes are needed.
104  *
105  * Pass #1
106  * ~~~~~~~
107  * The first pass is to collect all btf_type objects to
108  * an array: "btf->types".
109  *
110  * Depending on the C type that a btf_type is describing,
111  * a btf_type may be followed by extra data.  We don't know
112  * how many btf_type is there, and more importantly we don't
113  * know where each btf_type is located in the type section.
114  *
115  * Without knowing the location of each type_id, most verifications
116  * cannot be done.  e.g. an earlier btf_type may refer to a later
117  * btf_type (recall the "const void *" above), so we cannot
118  * check this type-reference in the first pass.
119  *
120  * In the first pass, it still does some verifications (e.g.
121  * checking the name is a valid offset to the string section).
122  *
123  * Pass #2
124  * ~~~~~~~
125  * The main focus is to resolve a btf_type that is referring
126  * to another type.
127  *
128  * We have to ensure the referring type:
129  * 1) does exist in the BTF (i.e. in btf->types[])
130  * 2) does not cause a loop:
131  *	struct A {
132  *		struct B b;
133  *	};
134  *
135  *	struct B {
136  *		struct A a;
137  *	};
138  *
139  * btf_type_needs_resolve() decides if a btf_type needs
140  * to be resolved.
141  *
142  * The needs_resolve type implements the "resolve()" ops which
143  * essentially does a DFS and detects backedge.
144  *
145  * During resolve (or DFS), different C types have different
146  * "RESOLVED" conditions.
147  *
148  * When resolving a BTF_KIND_STRUCT, we need to resolve all its
149  * members because a member is always referring to another
150  * type.  A struct's member can be treated as "RESOLVED" if
151  * it is referring to a BTF_KIND_PTR.  Otherwise, the
152  * following valid C struct would be rejected:
153  *
154  *	struct A {
155  *		int m;
156  *		struct A *a;
157  *	};
158  *
159  * When resolving a BTF_KIND_PTR, it needs to keep resolving if
160  * it is referring to another BTF_KIND_PTR.  Otherwise, we cannot
161  * detect a pointer loop, e.g.:
162  * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
163  *                        ^                                         |
164  *                        +-----------------------------------------+
165  *
166  */
167 
168 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
169 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
170 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
171 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
172 #define BITS_ROUNDUP_BYTES(bits) \
173 	(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
174 
175 #define BTF_INFO_MASK 0x8f00ffff
176 #define BTF_INT_MASK 0x0fffffff
177 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
178 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
179 
180 /* 16MB for 64k structs and each has 16 members and
181  * a few MB spaces for the string section.
182  * The hard limit is S32_MAX.
183  */
184 #define BTF_MAX_SIZE (16 * 1024 * 1024)
185 
186 #define for_each_member_from(i, from, struct_type, member)		\
187 	for (i = from, member = btf_type_member(struct_type) + from;	\
188 	     i < btf_type_vlen(struct_type);				\
189 	     i++, member++)
190 
191 #define for_each_vsi(i, struct_type, member)			\
192 	for (i = 0, member = btf_type_var_secinfo(struct_type);	\
193 	     i < btf_type_vlen(struct_type);			\
194 	     i++, member++)
195 
196 #define for_each_vsi_from(i, from, struct_type, member)				\
197 	for (i = from, member = btf_type_var_secinfo(struct_type) + from;	\
198 	     i < btf_type_vlen(struct_type);					\
199 	     i++, member++)
200 
201 DEFINE_IDR(btf_idr);
202 DEFINE_SPINLOCK(btf_idr_lock);
203 
204 struct btf {
205 	void *data;
206 	struct btf_type **types;
207 	u32 *resolved_ids;
208 	u32 *resolved_sizes;
209 	const char *strings;
210 	void *nohdr_data;
211 	struct btf_header hdr;
212 	u32 nr_types;
213 	u32 types_size;
214 	u32 data_size;
215 	refcount_t refcnt;
216 	u32 id;
217 	struct rcu_head rcu;
218 };
219 
220 enum verifier_phase {
221 	CHECK_META,
222 	CHECK_TYPE,
223 };
224 
225 struct resolve_vertex {
226 	const struct btf_type *t;
227 	u32 type_id;
228 	u16 next_member;
229 };
230 
231 enum visit_state {
232 	NOT_VISITED,
233 	VISITED,
234 	RESOLVED,
235 };
236 
237 enum resolve_mode {
238 	RESOLVE_TBD,	/* To Be Determined */
239 	RESOLVE_PTR,	/* Resolving for Pointer */
240 	RESOLVE_STRUCT_OR_ARRAY,	/* Resolving for struct/union
241 					 * or array
242 					 */
243 };
244 
245 #define MAX_RESOLVE_DEPTH 32
246 
247 struct btf_sec_info {
248 	u32 off;
249 	u32 len;
250 };
251 
252 struct btf_verifier_env {
253 	struct btf *btf;
254 	u8 *visit_states;
255 	struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
256 	struct bpf_verifier_log log;
257 	u32 log_type_id;
258 	u32 top_stack;
259 	enum verifier_phase phase;
260 	enum resolve_mode resolve_mode;
261 };
262 
263 static const char * const btf_kind_str[NR_BTF_KINDS] = {
264 	[BTF_KIND_UNKN]		= "UNKNOWN",
265 	[BTF_KIND_INT]		= "INT",
266 	[BTF_KIND_PTR]		= "PTR",
267 	[BTF_KIND_ARRAY]	= "ARRAY",
268 	[BTF_KIND_STRUCT]	= "STRUCT",
269 	[BTF_KIND_UNION]	= "UNION",
270 	[BTF_KIND_ENUM]		= "ENUM",
271 	[BTF_KIND_FWD]		= "FWD",
272 	[BTF_KIND_TYPEDEF]	= "TYPEDEF",
273 	[BTF_KIND_VOLATILE]	= "VOLATILE",
274 	[BTF_KIND_CONST]	= "CONST",
275 	[BTF_KIND_RESTRICT]	= "RESTRICT",
276 	[BTF_KIND_FUNC]		= "FUNC",
277 	[BTF_KIND_FUNC_PROTO]	= "FUNC_PROTO",
278 	[BTF_KIND_VAR]		= "VAR",
279 	[BTF_KIND_DATASEC]	= "DATASEC",
280 };
281 
282 static const char *btf_type_str(const struct btf_type *t)
283 {
284 	return btf_kind_str[BTF_INFO_KIND(t->info)];
285 }
286 
287 struct btf_kind_operations {
288 	s32 (*check_meta)(struct btf_verifier_env *env,
289 			  const struct btf_type *t,
290 			  u32 meta_left);
291 	int (*resolve)(struct btf_verifier_env *env,
292 		       const struct resolve_vertex *v);
293 	int (*check_member)(struct btf_verifier_env *env,
294 			    const struct btf_type *struct_type,
295 			    const struct btf_member *member,
296 			    const struct btf_type *member_type);
297 	int (*check_kflag_member)(struct btf_verifier_env *env,
298 				  const struct btf_type *struct_type,
299 				  const struct btf_member *member,
300 				  const struct btf_type *member_type);
301 	void (*log_details)(struct btf_verifier_env *env,
302 			    const struct btf_type *t);
303 	void (*seq_show)(const struct btf *btf, const struct btf_type *t,
304 			 u32 type_id, void *data, u8 bits_offsets,
305 			 struct seq_file *m);
306 };
307 
308 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
309 static struct btf_type btf_void;
310 
311 static int btf_resolve(struct btf_verifier_env *env,
312 		       const struct btf_type *t, u32 type_id);
313 
314 static bool btf_type_is_modifier(const struct btf_type *t)
315 {
316 	/* Some of them is not strictly a C modifier
317 	 * but they are grouped into the same bucket
318 	 * for BTF concern:
319 	 *   A type (t) that refers to another
320 	 *   type through t->type AND its size cannot
321 	 *   be determined without following the t->type.
322 	 *
323 	 * ptr does not fall into this bucket
324 	 * because its size is always sizeof(void *).
325 	 */
326 	switch (BTF_INFO_KIND(t->info)) {
327 	case BTF_KIND_TYPEDEF:
328 	case BTF_KIND_VOLATILE:
329 	case BTF_KIND_CONST:
330 	case BTF_KIND_RESTRICT:
331 		return true;
332 	}
333 
334 	return false;
335 }
336 
337 bool btf_type_is_void(const struct btf_type *t)
338 {
339 	return t == &btf_void;
340 }
341 
342 static bool btf_type_is_fwd(const struct btf_type *t)
343 {
344 	return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
345 }
346 
347 static bool btf_type_nosize(const struct btf_type *t)
348 {
349 	return btf_type_is_void(t) || btf_type_is_fwd(t) ||
350 	       btf_type_is_func(t) || btf_type_is_func_proto(t);
351 }
352 
353 static bool btf_type_nosize_or_null(const struct btf_type *t)
354 {
355 	return !t || btf_type_nosize(t);
356 }
357 
358 /* union is only a special case of struct:
359  * all its offsetof(member) == 0
360  */
361 static bool btf_type_is_struct(const struct btf_type *t)
362 {
363 	u8 kind = BTF_INFO_KIND(t->info);
364 
365 	return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
366 }
367 
368 static bool __btf_type_is_struct(const struct btf_type *t)
369 {
370 	return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
371 }
372 
373 static bool btf_type_is_array(const struct btf_type *t)
374 {
375 	return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
376 }
377 
378 static bool btf_type_is_var(const struct btf_type *t)
379 {
380 	return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
381 }
382 
383 static bool btf_type_is_datasec(const struct btf_type *t)
384 {
385 	return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
386 }
387 
388 s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
389 {
390 	const struct btf_type *t;
391 	const char *tname;
392 	u32 i;
393 
394 	for (i = 1; i <= btf->nr_types; i++) {
395 		t = btf->types[i];
396 		if (BTF_INFO_KIND(t->info) != kind)
397 			continue;
398 
399 		tname = btf_name_by_offset(btf, t->name_off);
400 		if (!strcmp(tname, name))
401 			return i;
402 	}
403 
404 	return -ENOENT;
405 }
406 
407 const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
408 					       u32 id, u32 *res_id)
409 {
410 	const struct btf_type *t = btf_type_by_id(btf, id);
411 
412 	while (btf_type_is_modifier(t)) {
413 		id = t->type;
414 		t = btf_type_by_id(btf, t->type);
415 	}
416 
417 	if (res_id)
418 		*res_id = id;
419 
420 	return t;
421 }
422 
423 const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
424 					    u32 id, u32 *res_id)
425 {
426 	const struct btf_type *t;
427 
428 	t = btf_type_skip_modifiers(btf, id, NULL);
429 	if (!btf_type_is_ptr(t))
430 		return NULL;
431 
432 	return btf_type_skip_modifiers(btf, t->type, res_id);
433 }
434 
435 const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
436 						 u32 id, u32 *res_id)
437 {
438 	const struct btf_type *ptype;
439 
440 	ptype = btf_type_resolve_ptr(btf, id, res_id);
441 	if (ptype && btf_type_is_func_proto(ptype))
442 		return ptype;
443 
444 	return NULL;
445 }
446 
447 /* Types that act only as a source, not sink or intermediate
448  * type when resolving.
449  */
450 static bool btf_type_is_resolve_source_only(const struct btf_type *t)
451 {
452 	return btf_type_is_var(t) ||
453 	       btf_type_is_datasec(t);
454 }
455 
456 /* What types need to be resolved?
457  *
458  * btf_type_is_modifier() is an obvious one.
459  *
460  * btf_type_is_struct() because its member refers to
461  * another type (through member->type).
462  *
463  * btf_type_is_var() because the variable refers to
464  * another type. btf_type_is_datasec() holds multiple
465  * btf_type_is_var() types that need resolving.
466  *
467  * btf_type_is_array() because its element (array->type)
468  * refers to another type.  Array can be thought of a
469  * special case of struct while array just has the same
470  * member-type repeated by array->nelems of times.
471  */
472 static bool btf_type_needs_resolve(const struct btf_type *t)
473 {
474 	return btf_type_is_modifier(t) ||
475 	       btf_type_is_ptr(t) ||
476 	       btf_type_is_struct(t) ||
477 	       btf_type_is_array(t) ||
478 	       btf_type_is_var(t) ||
479 	       btf_type_is_datasec(t);
480 }
481 
482 /* t->size can be used */
483 static bool btf_type_has_size(const struct btf_type *t)
484 {
485 	switch (BTF_INFO_KIND(t->info)) {
486 	case BTF_KIND_INT:
487 	case BTF_KIND_STRUCT:
488 	case BTF_KIND_UNION:
489 	case BTF_KIND_ENUM:
490 	case BTF_KIND_DATASEC:
491 		return true;
492 	}
493 
494 	return false;
495 }
496 
497 static const char *btf_int_encoding_str(u8 encoding)
498 {
499 	if (encoding == 0)
500 		return "(none)";
501 	else if (encoding == BTF_INT_SIGNED)
502 		return "SIGNED";
503 	else if (encoding == BTF_INT_CHAR)
504 		return "CHAR";
505 	else if (encoding == BTF_INT_BOOL)
506 		return "BOOL";
507 	else
508 		return "UNKN";
509 }
510 
511 static u32 btf_type_int(const struct btf_type *t)
512 {
513 	return *(u32 *)(t + 1);
514 }
515 
516 static const struct btf_array *btf_type_array(const struct btf_type *t)
517 {
518 	return (const struct btf_array *)(t + 1);
519 }
520 
521 static const struct btf_enum *btf_type_enum(const struct btf_type *t)
522 {
523 	return (const struct btf_enum *)(t + 1);
524 }
525 
526 static const struct btf_var *btf_type_var(const struct btf_type *t)
527 {
528 	return (const struct btf_var *)(t + 1);
529 }
530 
531 static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
532 {
533 	return (const struct btf_var_secinfo *)(t + 1);
534 }
535 
536 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
537 {
538 	return kind_ops[BTF_INFO_KIND(t->info)];
539 }
540 
541 static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
542 {
543 	return BTF_STR_OFFSET_VALID(offset) &&
544 		offset < btf->hdr.str_len;
545 }
546 
547 static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
548 {
549 	if ((first ? !isalpha(c) :
550 		     !isalnum(c)) &&
551 	    c != '_' &&
552 	    ((c == '.' && !dot_ok) ||
553 	      c != '.'))
554 		return false;
555 	return true;
556 }
557 
558 static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
559 {
560 	/* offset must be valid */
561 	const char *src = &btf->strings[offset];
562 	const char *src_limit;
563 
564 	if (!__btf_name_char_ok(*src, true, dot_ok))
565 		return false;
566 
567 	/* set a limit on identifier length */
568 	src_limit = src + KSYM_NAME_LEN;
569 	src++;
570 	while (*src && src < src_limit) {
571 		if (!__btf_name_char_ok(*src, false, dot_ok))
572 			return false;
573 		src++;
574 	}
575 
576 	return !*src;
577 }
578 
579 /* Only C-style identifier is permitted. This can be relaxed if
580  * necessary.
581  */
582 static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
583 {
584 	return __btf_name_valid(btf, offset, false);
585 }
586 
587 static bool btf_name_valid_section(const struct btf *btf, u32 offset)
588 {
589 	return __btf_name_valid(btf, offset, true);
590 }
591 
592 static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
593 {
594 	if (!offset)
595 		return "(anon)";
596 	else if (offset < btf->hdr.str_len)
597 		return &btf->strings[offset];
598 	else
599 		return "(invalid-name-offset)";
600 }
601 
602 const char *btf_name_by_offset(const struct btf *btf, u32 offset)
603 {
604 	if (offset < btf->hdr.str_len)
605 		return &btf->strings[offset];
606 
607 	return NULL;
608 }
609 
610 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
611 {
612 	if (type_id > btf->nr_types)
613 		return NULL;
614 
615 	return btf->types[type_id];
616 }
617 
618 /*
619  * Regular int is not a bit field and it must be either
620  * u8/u16/u32/u64 or __int128.
621  */
622 static bool btf_type_int_is_regular(const struct btf_type *t)
623 {
624 	u8 nr_bits, nr_bytes;
625 	u32 int_data;
626 
627 	int_data = btf_type_int(t);
628 	nr_bits = BTF_INT_BITS(int_data);
629 	nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
630 	if (BITS_PER_BYTE_MASKED(nr_bits) ||
631 	    BTF_INT_OFFSET(int_data) ||
632 	    (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
633 	     nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
634 	     nr_bytes != (2 * sizeof(u64)))) {
635 		return false;
636 	}
637 
638 	return true;
639 }
640 
641 /*
642  * Check that given struct member is a regular int with expected
643  * offset and size.
644  */
645 bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
646 			   const struct btf_member *m,
647 			   u32 expected_offset, u32 expected_size)
648 {
649 	const struct btf_type *t;
650 	u32 id, int_data;
651 	u8 nr_bits;
652 
653 	id = m->type;
654 	t = btf_type_id_size(btf, &id, NULL);
655 	if (!t || !btf_type_is_int(t))
656 		return false;
657 
658 	int_data = btf_type_int(t);
659 	nr_bits = BTF_INT_BITS(int_data);
660 	if (btf_type_kflag(s)) {
661 		u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
662 		u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
663 
664 		/* if kflag set, int should be a regular int and
665 		 * bit offset should be at byte boundary.
666 		 */
667 		return !bitfield_size &&
668 		       BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
669 		       BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
670 	}
671 
672 	if (BTF_INT_OFFSET(int_data) ||
673 	    BITS_PER_BYTE_MASKED(m->offset) ||
674 	    BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
675 	    BITS_PER_BYTE_MASKED(nr_bits) ||
676 	    BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
677 		return false;
678 
679 	return true;
680 }
681 
682 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
683 					      const char *fmt, ...)
684 {
685 	va_list args;
686 
687 	va_start(args, fmt);
688 	bpf_verifier_vlog(log, fmt, args);
689 	va_end(args);
690 }
691 
692 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
693 					    const char *fmt, ...)
694 {
695 	struct bpf_verifier_log *log = &env->log;
696 	va_list args;
697 
698 	if (!bpf_verifier_log_needed(log))
699 		return;
700 
701 	va_start(args, fmt);
702 	bpf_verifier_vlog(log, fmt, args);
703 	va_end(args);
704 }
705 
706 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
707 						   const struct btf_type *t,
708 						   bool log_details,
709 						   const char *fmt, ...)
710 {
711 	struct bpf_verifier_log *log = &env->log;
712 	u8 kind = BTF_INFO_KIND(t->info);
713 	struct btf *btf = env->btf;
714 	va_list args;
715 
716 	if (!bpf_verifier_log_needed(log))
717 		return;
718 
719 	/* btf verifier prints all types it is processing via
720 	 * btf_verifier_log_type(..., fmt = NULL).
721 	 * Skip those prints for in-kernel BTF verification.
722 	 */
723 	if (log->level == BPF_LOG_KERNEL && !fmt)
724 		return;
725 
726 	__btf_verifier_log(log, "[%u] %s %s%s",
727 			   env->log_type_id,
728 			   btf_kind_str[kind],
729 			   __btf_name_by_offset(btf, t->name_off),
730 			   log_details ? " " : "");
731 
732 	if (log_details)
733 		btf_type_ops(t)->log_details(env, t);
734 
735 	if (fmt && *fmt) {
736 		__btf_verifier_log(log, " ");
737 		va_start(args, fmt);
738 		bpf_verifier_vlog(log, fmt, args);
739 		va_end(args);
740 	}
741 
742 	__btf_verifier_log(log, "\n");
743 }
744 
745 #define btf_verifier_log_type(env, t, ...) \
746 	__btf_verifier_log_type((env), (t), true, __VA_ARGS__)
747 #define btf_verifier_log_basic(env, t, ...) \
748 	__btf_verifier_log_type((env), (t), false, __VA_ARGS__)
749 
750 __printf(4, 5)
751 static void btf_verifier_log_member(struct btf_verifier_env *env,
752 				    const struct btf_type *struct_type,
753 				    const struct btf_member *member,
754 				    const char *fmt, ...)
755 {
756 	struct bpf_verifier_log *log = &env->log;
757 	struct btf *btf = env->btf;
758 	va_list args;
759 
760 	if (!bpf_verifier_log_needed(log))
761 		return;
762 
763 	if (log->level == BPF_LOG_KERNEL && !fmt)
764 		return;
765 	/* The CHECK_META phase already did a btf dump.
766 	 *
767 	 * If member is logged again, it must hit an error in
768 	 * parsing this member.  It is useful to print out which
769 	 * struct this member belongs to.
770 	 */
771 	if (env->phase != CHECK_META)
772 		btf_verifier_log_type(env, struct_type, NULL);
773 
774 	if (btf_type_kflag(struct_type))
775 		__btf_verifier_log(log,
776 				   "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
777 				   __btf_name_by_offset(btf, member->name_off),
778 				   member->type,
779 				   BTF_MEMBER_BITFIELD_SIZE(member->offset),
780 				   BTF_MEMBER_BIT_OFFSET(member->offset));
781 	else
782 		__btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
783 				   __btf_name_by_offset(btf, member->name_off),
784 				   member->type, member->offset);
785 
786 	if (fmt && *fmt) {
787 		__btf_verifier_log(log, " ");
788 		va_start(args, fmt);
789 		bpf_verifier_vlog(log, fmt, args);
790 		va_end(args);
791 	}
792 
793 	__btf_verifier_log(log, "\n");
794 }
795 
796 __printf(4, 5)
797 static void btf_verifier_log_vsi(struct btf_verifier_env *env,
798 				 const struct btf_type *datasec_type,
799 				 const struct btf_var_secinfo *vsi,
800 				 const char *fmt, ...)
801 {
802 	struct bpf_verifier_log *log = &env->log;
803 	va_list args;
804 
805 	if (!bpf_verifier_log_needed(log))
806 		return;
807 	if (log->level == BPF_LOG_KERNEL && !fmt)
808 		return;
809 	if (env->phase != CHECK_META)
810 		btf_verifier_log_type(env, datasec_type, NULL);
811 
812 	__btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
813 			   vsi->type, vsi->offset, vsi->size);
814 	if (fmt && *fmt) {
815 		__btf_verifier_log(log, " ");
816 		va_start(args, fmt);
817 		bpf_verifier_vlog(log, fmt, args);
818 		va_end(args);
819 	}
820 
821 	__btf_verifier_log(log, "\n");
822 }
823 
824 static void btf_verifier_log_hdr(struct btf_verifier_env *env,
825 				 u32 btf_data_size)
826 {
827 	struct bpf_verifier_log *log = &env->log;
828 	const struct btf *btf = env->btf;
829 	const struct btf_header *hdr;
830 
831 	if (!bpf_verifier_log_needed(log))
832 		return;
833 
834 	if (log->level == BPF_LOG_KERNEL)
835 		return;
836 	hdr = &btf->hdr;
837 	__btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
838 	__btf_verifier_log(log, "version: %u\n", hdr->version);
839 	__btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
840 	__btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
841 	__btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
842 	__btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
843 	__btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
844 	__btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
845 	__btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
846 }
847 
848 static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
849 {
850 	struct btf *btf = env->btf;
851 
852 	/* < 2 because +1 for btf_void which is always in btf->types[0].
853 	 * btf_void is not accounted in btf->nr_types because btf_void
854 	 * does not come from the BTF file.
855 	 */
856 	if (btf->types_size - btf->nr_types < 2) {
857 		/* Expand 'types' array */
858 
859 		struct btf_type **new_types;
860 		u32 expand_by, new_size;
861 
862 		if (btf->types_size == BTF_MAX_TYPE) {
863 			btf_verifier_log(env, "Exceeded max num of types");
864 			return -E2BIG;
865 		}
866 
867 		expand_by = max_t(u32, btf->types_size >> 2, 16);
868 		new_size = min_t(u32, BTF_MAX_TYPE,
869 				 btf->types_size + expand_by);
870 
871 		new_types = kvcalloc(new_size, sizeof(*new_types),
872 				     GFP_KERNEL | __GFP_NOWARN);
873 		if (!new_types)
874 			return -ENOMEM;
875 
876 		if (btf->nr_types == 0)
877 			new_types[0] = &btf_void;
878 		else
879 			memcpy(new_types, btf->types,
880 			       sizeof(*btf->types) * (btf->nr_types + 1));
881 
882 		kvfree(btf->types);
883 		btf->types = new_types;
884 		btf->types_size = new_size;
885 	}
886 
887 	btf->types[++(btf->nr_types)] = t;
888 
889 	return 0;
890 }
891 
892 static int btf_alloc_id(struct btf *btf)
893 {
894 	int id;
895 
896 	idr_preload(GFP_KERNEL);
897 	spin_lock_bh(&btf_idr_lock);
898 	id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
899 	if (id > 0)
900 		btf->id = id;
901 	spin_unlock_bh(&btf_idr_lock);
902 	idr_preload_end();
903 
904 	if (WARN_ON_ONCE(!id))
905 		return -ENOSPC;
906 
907 	return id > 0 ? 0 : id;
908 }
909 
910 static void btf_free_id(struct btf *btf)
911 {
912 	unsigned long flags;
913 
914 	/*
915 	 * In map-in-map, calling map_delete_elem() on outer
916 	 * map will call bpf_map_put on the inner map.
917 	 * It will then eventually call btf_free_id()
918 	 * on the inner map.  Some of the map_delete_elem()
919 	 * implementation may have irq disabled, so
920 	 * we need to use the _irqsave() version instead
921 	 * of the _bh() version.
922 	 */
923 	spin_lock_irqsave(&btf_idr_lock, flags);
924 	idr_remove(&btf_idr, btf->id);
925 	spin_unlock_irqrestore(&btf_idr_lock, flags);
926 }
927 
928 static void btf_free(struct btf *btf)
929 {
930 	kvfree(btf->types);
931 	kvfree(btf->resolved_sizes);
932 	kvfree(btf->resolved_ids);
933 	kvfree(btf->data);
934 	kfree(btf);
935 }
936 
937 static void btf_free_rcu(struct rcu_head *rcu)
938 {
939 	struct btf *btf = container_of(rcu, struct btf, rcu);
940 
941 	btf_free(btf);
942 }
943 
944 void btf_put(struct btf *btf)
945 {
946 	if (btf && refcount_dec_and_test(&btf->refcnt)) {
947 		btf_free_id(btf);
948 		call_rcu(&btf->rcu, btf_free_rcu);
949 	}
950 }
951 
952 static int env_resolve_init(struct btf_verifier_env *env)
953 {
954 	struct btf *btf = env->btf;
955 	u32 nr_types = btf->nr_types;
956 	u32 *resolved_sizes = NULL;
957 	u32 *resolved_ids = NULL;
958 	u8 *visit_states = NULL;
959 
960 	/* +1 for btf_void */
961 	resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
962 				  GFP_KERNEL | __GFP_NOWARN);
963 	if (!resolved_sizes)
964 		goto nomem;
965 
966 	resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
967 				GFP_KERNEL | __GFP_NOWARN);
968 	if (!resolved_ids)
969 		goto nomem;
970 
971 	visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
972 				GFP_KERNEL | __GFP_NOWARN);
973 	if (!visit_states)
974 		goto nomem;
975 
976 	btf->resolved_sizes = resolved_sizes;
977 	btf->resolved_ids = resolved_ids;
978 	env->visit_states = visit_states;
979 
980 	return 0;
981 
982 nomem:
983 	kvfree(resolved_sizes);
984 	kvfree(resolved_ids);
985 	kvfree(visit_states);
986 	return -ENOMEM;
987 }
988 
989 static void btf_verifier_env_free(struct btf_verifier_env *env)
990 {
991 	kvfree(env->visit_states);
992 	kfree(env);
993 }
994 
995 static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
996 				     const struct btf_type *next_type)
997 {
998 	switch (env->resolve_mode) {
999 	case RESOLVE_TBD:
1000 		/* int, enum or void is a sink */
1001 		return !btf_type_needs_resolve(next_type);
1002 	case RESOLVE_PTR:
1003 		/* int, enum, void, struct, array, func or func_proto is a sink
1004 		 * for ptr
1005 		 */
1006 		return !btf_type_is_modifier(next_type) &&
1007 			!btf_type_is_ptr(next_type);
1008 	case RESOLVE_STRUCT_OR_ARRAY:
1009 		/* int, enum, void, ptr, func or func_proto is a sink
1010 		 * for struct and array
1011 		 */
1012 		return !btf_type_is_modifier(next_type) &&
1013 			!btf_type_is_array(next_type) &&
1014 			!btf_type_is_struct(next_type);
1015 	default:
1016 		BUG();
1017 	}
1018 }
1019 
1020 static bool env_type_is_resolved(const struct btf_verifier_env *env,
1021 				 u32 type_id)
1022 {
1023 	return env->visit_states[type_id] == RESOLVED;
1024 }
1025 
1026 static int env_stack_push(struct btf_verifier_env *env,
1027 			  const struct btf_type *t, u32 type_id)
1028 {
1029 	struct resolve_vertex *v;
1030 
1031 	if (env->top_stack == MAX_RESOLVE_DEPTH)
1032 		return -E2BIG;
1033 
1034 	if (env->visit_states[type_id] != NOT_VISITED)
1035 		return -EEXIST;
1036 
1037 	env->visit_states[type_id] = VISITED;
1038 
1039 	v = &env->stack[env->top_stack++];
1040 	v->t = t;
1041 	v->type_id = type_id;
1042 	v->next_member = 0;
1043 
1044 	if (env->resolve_mode == RESOLVE_TBD) {
1045 		if (btf_type_is_ptr(t))
1046 			env->resolve_mode = RESOLVE_PTR;
1047 		else if (btf_type_is_struct(t) || btf_type_is_array(t))
1048 			env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1049 	}
1050 
1051 	return 0;
1052 }
1053 
1054 static void env_stack_set_next_member(struct btf_verifier_env *env,
1055 				      u16 next_member)
1056 {
1057 	env->stack[env->top_stack - 1].next_member = next_member;
1058 }
1059 
1060 static void env_stack_pop_resolved(struct btf_verifier_env *env,
1061 				   u32 resolved_type_id,
1062 				   u32 resolved_size)
1063 {
1064 	u32 type_id = env->stack[--(env->top_stack)].type_id;
1065 	struct btf *btf = env->btf;
1066 
1067 	btf->resolved_sizes[type_id] = resolved_size;
1068 	btf->resolved_ids[type_id] = resolved_type_id;
1069 	env->visit_states[type_id] = RESOLVED;
1070 }
1071 
1072 static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1073 {
1074 	return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1075 }
1076 
1077 /* Resolve the size of a passed-in "type"
1078  *
1079  * type: is an array (e.g. u32 array[x][y])
1080  * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1081  * *type_size: (x * y * sizeof(u32)).  Hence, *type_size always
1082  *             corresponds to the return type.
1083  * *elem_type: u32
1084  * *elem_id: id of u32
1085  * *total_nelems: (x * y).  Hence, individual elem size is
1086  *                (*type_size / *total_nelems)
1087  * *type_id: id of type if it's changed within the function, 0 if not
1088  *
1089  * type: is not an array (e.g. const struct X)
1090  * return type: type "struct X"
1091  * *type_size: sizeof(struct X)
1092  * *elem_type: same as return type ("struct X")
1093  * *elem_id: 0
1094  * *total_nelems: 1
1095  * *type_id: id of type if it's changed within the function, 0 if not
1096  */
1097 static const struct btf_type *
1098 __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1099 		   u32 *type_size, const struct btf_type **elem_type,
1100 		   u32 *elem_id, u32 *total_nelems, u32 *type_id)
1101 {
1102 	const struct btf_type *array_type = NULL;
1103 	const struct btf_array *array = NULL;
1104 	u32 i, size, nelems = 1, id = 0;
1105 
1106 	for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1107 		switch (BTF_INFO_KIND(type->info)) {
1108 		/* type->size can be used */
1109 		case BTF_KIND_INT:
1110 		case BTF_KIND_STRUCT:
1111 		case BTF_KIND_UNION:
1112 		case BTF_KIND_ENUM:
1113 			size = type->size;
1114 			goto resolved;
1115 
1116 		case BTF_KIND_PTR:
1117 			size = sizeof(void *);
1118 			goto resolved;
1119 
1120 		/* Modifiers */
1121 		case BTF_KIND_TYPEDEF:
1122 		case BTF_KIND_VOLATILE:
1123 		case BTF_KIND_CONST:
1124 		case BTF_KIND_RESTRICT:
1125 			id = type->type;
1126 			type = btf_type_by_id(btf, type->type);
1127 			break;
1128 
1129 		case BTF_KIND_ARRAY:
1130 			if (!array_type)
1131 				array_type = type;
1132 			array = btf_type_array(type);
1133 			if (nelems && array->nelems > U32_MAX / nelems)
1134 				return ERR_PTR(-EINVAL);
1135 			nelems *= array->nelems;
1136 			type = btf_type_by_id(btf, array->type);
1137 			break;
1138 
1139 		/* type without size */
1140 		default:
1141 			return ERR_PTR(-EINVAL);
1142 		}
1143 	}
1144 
1145 	return ERR_PTR(-EINVAL);
1146 
1147 resolved:
1148 	if (nelems && size > U32_MAX / nelems)
1149 		return ERR_PTR(-EINVAL);
1150 
1151 	*type_size = nelems * size;
1152 	if (total_nelems)
1153 		*total_nelems = nelems;
1154 	if (elem_type)
1155 		*elem_type = type;
1156 	if (elem_id)
1157 		*elem_id = array ? array->type : 0;
1158 	if (type_id && id)
1159 		*type_id = id;
1160 
1161 	return array_type ? : type;
1162 }
1163 
1164 const struct btf_type *
1165 btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1166 		 u32 *type_size)
1167 {
1168 	return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
1169 }
1170 
1171 /* The input param "type_id" must point to a needs_resolve type */
1172 static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1173 						  u32 *type_id)
1174 {
1175 	*type_id = btf->resolved_ids[*type_id];
1176 	return btf_type_by_id(btf, *type_id);
1177 }
1178 
1179 const struct btf_type *btf_type_id_size(const struct btf *btf,
1180 					u32 *type_id, u32 *ret_size)
1181 {
1182 	const struct btf_type *size_type;
1183 	u32 size_type_id = *type_id;
1184 	u32 size = 0;
1185 
1186 	size_type = btf_type_by_id(btf, size_type_id);
1187 	if (btf_type_nosize_or_null(size_type))
1188 		return NULL;
1189 
1190 	if (btf_type_has_size(size_type)) {
1191 		size = size_type->size;
1192 	} else if (btf_type_is_array(size_type)) {
1193 		size = btf->resolved_sizes[size_type_id];
1194 	} else if (btf_type_is_ptr(size_type)) {
1195 		size = sizeof(void *);
1196 	} else {
1197 		if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1198 				 !btf_type_is_var(size_type)))
1199 			return NULL;
1200 
1201 		size_type_id = btf->resolved_ids[size_type_id];
1202 		size_type = btf_type_by_id(btf, size_type_id);
1203 		if (btf_type_nosize_or_null(size_type))
1204 			return NULL;
1205 		else if (btf_type_has_size(size_type))
1206 			size = size_type->size;
1207 		else if (btf_type_is_array(size_type))
1208 			size = btf->resolved_sizes[size_type_id];
1209 		else if (btf_type_is_ptr(size_type))
1210 			size = sizeof(void *);
1211 		else
1212 			return NULL;
1213 	}
1214 
1215 	*type_id = size_type_id;
1216 	if (ret_size)
1217 		*ret_size = size;
1218 
1219 	return size_type;
1220 }
1221 
1222 static int btf_df_check_member(struct btf_verifier_env *env,
1223 			       const struct btf_type *struct_type,
1224 			       const struct btf_member *member,
1225 			       const struct btf_type *member_type)
1226 {
1227 	btf_verifier_log_basic(env, struct_type,
1228 			       "Unsupported check_member");
1229 	return -EINVAL;
1230 }
1231 
1232 static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1233 				     const struct btf_type *struct_type,
1234 				     const struct btf_member *member,
1235 				     const struct btf_type *member_type)
1236 {
1237 	btf_verifier_log_basic(env, struct_type,
1238 			       "Unsupported check_kflag_member");
1239 	return -EINVAL;
1240 }
1241 
1242 /* Used for ptr, array and struct/union type members.
1243  * int, enum and modifier types have their specific callback functions.
1244  */
1245 static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1246 					  const struct btf_type *struct_type,
1247 					  const struct btf_member *member,
1248 					  const struct btf_type *member_type)
1249 {
1250 	if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1251 		btf_verifier_log_member(env, struct_type, member,
1252 					"Invalid member bitfield_size");
1253 		return -EINVAL;
1254 	}
1255 
1256 	/* bitfield size is 0, so member->offset represents bit offset only.
1257 	 * It is safe to call non kflag check_member variants.
1258 	 */
1259 	return btf_type_ops(member_type)->check_member(env, struct_type,
1260 						       member,
1261 						       member_type);
1262 }
1263 
1264 static int btf_df_resolve(struct btf_verifier_env *env,
1265 			  const struct resolve_vertex *v)
1266 {
1267 	btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1268 	return -EINVAL;
1269 }
1270 
1271 static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1272 			    u32 type_id, void *data, u8 bits_offsets,
1273 			    struct seq_file *m)
1274 {
1275 	seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1276 }
1277 
1278 static int btf_int_check_member(struct btf_verifier_env *env,
1279 				const struct btf_type *struct_type,
1280 				const struct btf_member *member,
1281 				const struct btf_type *member_type)
1282 {
1283 	u32 int_data = btf_type_int(member_type);
1284 	u32 struct_bits_off = member->offset;
1285 	u32 struct_size = struct_type->size;
1286 	u32 nr_copy_bits;
1287 	u32 bytes_offset;
1288 
1289 	if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1290 		btf_verifier_log_member(env, struct_type, member,
1291 					"bits_offset exceeds U32_MAX");
1292 		return -EINVAL;
1293 	}
1294 
1295 	struct_bits_off += BTF_INT_OFFSET(int_data);
1296 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1297 	nr_copy_bits = BTF_INT_BITS(int_data) +
1298 		BITS_PER_BYTE_MASKED(struct_bits_off);
1299 
1300 	if (nr_copy_bits > BITS_PER_U128) {
1301 		btf_verifier_log_member(env, struct_type, member,
1302 					"nr_copy_bits exceeds 128");
1303 		return -EINVAL;
1304 	}
1305 
1306 	if (struct_size < bytes_offset ||
1307 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1308 		btf_verifier_log_member(env, struct_type, member,
1309 					"Member exceeds struct_size");
1310 		return -EINVAL;
1311 	}
1312 
1313 	return 0;
1314 }
1315 
1316 static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1317 				      const struct btf_type *struct_type,
1318 				      const struct btf_member *member,
1319 				      const struct btf_type *member_type)
1320 {
1321 	u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1322 	u32 int_data = btf_type_int(member_type);
1323 	u32 struct_size = struct_type->size;
1324 	u32 nr_copy_bits;
1325 
1326 	/* a regular int type is required for the kflag int member */
1327 	if (!btf_type_int_is_regular(member_type)) {
1328 		btf_verifier_log_member(env, struct_type, member,
1329 					"Invalid member base type");
1330 		return -EINVAL;
1331 	}
1332 
1333 	/* check sanity of bitfield size */
1334 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1335 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1336 	nr_int_data_bits = BTF_INT_BITS(int_data);
1337 	if (!nr_bits) {
1338 		/* Not a bitfield member, member offset must be at byte
1339 		 * boundary.
1340 		 */
1341 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1342 			btf_verifier_log_member(env, struct_type, member,
1343 						"Invalid member offset");
1344 			return -EINVAL;
1345 		}
1346 
1347 		nr_bits = nr_int_data_bits;
1348 	} else if (nr_bits > nr_int_data_bits) {
1349 		btf_verifier_log_member(env, struct_type, member,
1350 					"Invalid member bitfield_size");
1351 		return -EINVAL;
1352 	}
1353 
1354 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1355 	nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1356 	if (nr_copy_bits > BITS_PER_U128) {
1357 		btf_verifier_log_member(env, struct_type, member,
1358 					"nr_copy_bits exceeds 128");
1359 		return -EINVAL;
1360 	}
1361 
1362 	if (struct_size < bytes_offset ||
1363 	    struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1364 		btf_verifier_log_member(env, struct_type, member,
1365 					"Member exceeds struct_size");
1366 		return -EINVAL;
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 static s32 btf_int_check_meta(struct btf_verifier_env *env,
1373 			      const struct btf_type *t,
1374 			      u32 meta_left)
1375 {
1376 	u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1377 	u16 encoding;
1378 
1379 	if (meta_left < meta_needed) {
1380 		btf_verifier_log_basic(env, t,
1381 				       "meta_left:%u meta_needed:%u",
1382 				       meta_left, meta_needed);
1383 		return -EINVAL;
1384 	}
1385 
1386 	if (btf_type_vlen(t)) {
1387 		btf_verifier_log_type(env, t, "vlen != 0");
1388 		return -EINVAL;
1389 	}
1390 
1391 	if (btf_type_kflag(t)) {
1392 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1393 		return -EINVAL;
1394 	}
1395 
1396 	int_data = btf_type_int(t);
1397 	if (int_data & ~BTF_INT_MASK) {
1398 		btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1399 				       int_data);
1400 		return -EINVAL;
1401 	}
1402 
1403 	nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1404 
1405 	if (nr_bits > BITS_PER_U128) {
1406 		btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1407 				      BITS_PER_U128);
1408 		return -EINVAL;
1409 	}
1410 
1411 	if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1412 		btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1413 		return -EINVAL;
1414 	}
1415 
1416 	/*
1417 	 * Only one of the encoding bits is allowed and it
1418 	 * should be sufficient for the pretty print purpose (i.e. decoding).
1419 	 * Multiple bits can be allowed later if it is found
1420 	 * to be insufficient.
1421 	 */
1422 	encoding = BTF_INT_ENCODING(int_data);
1423 	if (encoding &&
1424 	    encoding != BTF_INT_SIGNED &&
1425 	    encoding != BTF_INT_CHAR &&
1426 	    encoding != BTF_INT_BOOL) {
1427 		btf_verifier_log_type(env, t, "Unsupported encoding");
1428 		return -ENOTSUPP;
1429 	}
1430 
1431 	btf_verifier_log_type(env, t, NULL);
1432 
1433 	return meta_needed;
1434 }
1435 
1436 static void btf_int_log(struct btf_verifier_env *env,
1437 			const struct btf_type *t)
1438 {
1439 	int int_data = btf_type_int(t);
1440 
1441 	btf_verifier_log(env,
1442 			 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1443 			 t->size, BTF_INT_OFFSET(int_data),
1444 			 BTF_INT_BITS(int_data),
1445 			 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1446 }
1447 
1448 static void btf_int128_print(struct seq_file *m, void *data)
1449 {
1450 	/* data points to a __int128 number.
1451 	 * Suppose
1452 	 *     int128_num = *(__int128 *)data;
1453 	 * The below formulas shows what upper_num and lower_num represents:
1454 	 *     upper_num = int128_num >> 64;
1455 	 *     lower_num = int128_num & 0xffffffffFFFFFFFFULL;
1456 	 */
1457 	u64 upper_num, lower_num;
1458 
1459 #ifdef __BIG_ENDIAN_BITFIELD
1460 	upper_num = *(u64 *)data;
1461 	lower_num = *(u64 *)(data + 8);
1462 #else
1463 	upper_num = *(u64 *)(data + 8);
1464 	lower_num = *(u64 *)data;
1465 #endif
1466 	if (upper_num == 0)
1467 		seq_printf(m, "0x%llx", lower_num);
1468 	else
1469 		seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1470 }
1471 
1472 static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1473 			     u16 right_shift_bits)
1474 {
1475 	u64 upper_num, lower_num;
1476 
1477 #ifdef __BIG_ENDIAN_BITFIELD
1478 	upper_num = print_num[0];
1479 	lower_num = print_num[1];
1480 #else
1481 	upper_num = print_num[1];
1482 	lower_num = print_num[0];
1483 #endif
1484 
1485 	/* shake out un-needed bits by shift/or operations */
1486 	if (left_shift_bits >= 64) {
1487 		upper_num = lower_num << (left_shift_bits - 64);
1488 		lower_num = 0;
1489 	} else {
1490 		upper_num = (upper_num << left_shift_bits) |
1491 			    (lower_num >> (64 - left_shift_bits));
1492 		lower_num = lower_num << left_shift_bits;
1493 	}
1494 
1495 	if (right_shift_bits >= 64) {
1496 		lower_num = upper_num >> (right_shift_bits - 64);
1497 		upper_num = 0;
1498 	} else {
1499 		lower_num = (lower_num >> right_shift_bits) |
1500 			    (upper_num << (64 - right_shift_bits));
1501 		upper_num = upper_num >> right_shift_bits;
1502 	}
1503 
1504 #ifdef __BIG_ENDIAN_BITFIELD
1505 	print_num[0] = upper_num;
1506 	print_num[1] = lower_num;
1507 #else
1508 	print_num[0] = lower_num;
1509 	print_num[1] = upper_num;
1510 #endif
1511 }
1512 
1513 static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1514 				  u8 nr_bits, struct seq_file *m)
1515 {
1516 	u16 left_shift_bits, right_shift_bits;
1517 	u8 nr_copy_bytes;
1518 	u8 nr_copy_bits;
1519 	u64 print_num[2] = {};
1520 
1521 	nr_copy_bits = nr_bits + bits_offset;
1522 	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1523 
1524 	memcpy(print_num, data, nr_copy_bytes);
1525 
1526 #ifdef __BIG_ENDIAN_BITFIELD
1527 	left_shift_bits = bits_offset;
1528 #else
1529 	left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1530 #endif
1531 	right_shift_bits = BITS_PER_U128 - nr_bits;
1532 
1533 	btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1534 	btf_int128_print(m, print_num);
1535 }
1536 
1537 
1538 static void btf_int_bits_seq_show(const struct btf *btf,
1539 				  const struct btf_type *t,
1540 				  void *data, u8 bits_offset,
1541 				  struct seq_file *m)
1542 {
1543 	u32 int_data = btf_type_int(t);
1544 	u8 nr_bits = BTF_INT_BITS(int_data);
1545 	u8 total_bits_offset;
1546 
1547 	/*
1548 	 * bits_offset is at most 7.
1549 	 * BTF_INT_OFFSET() cannot exceed 128 bits.
1550 	 */
1551 	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1552 	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1553 	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1554 	btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1555 }
1556 
1557 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1558 			     u32 type_id, void *data, u8 bits_offset,
1559 			     struct seq_file *m)
1560 {
1561 	u32 int_data = btf_type_int(t);
1562 	u8 encoding = BTF_INT_ENCODING(int_data);
1563 	bool sign = encoding & BTF_INT_SIGNED;
1564 	u8 nr_bits = BTF_INT_BITS(int_data);
1565 
1566 	if (bits_offset || BTF_INT_OFFSET(int_data) ||
1567 	    BITS_PER_BYTE_MASKED(nr_bits)) {
1568 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1569 		return;
1570 	}
1571 
1572 	switch (nr_bits) {
1573 	case 128:
1574 		btf_int128_print(m, data);
1575 		break;
1576 	case 64:
1577 		if (sign)
1578 			seq_printf(m, "%lld", *(s64 *)data);
1579 		else
1580 			seq_printf(m, "%llu", *(u64 *)data);
1581 		break;
1582 	case 32:
1583 		if (sign)
1584 			seq_printf(m, "%d", *(s32 *)data);
1585 		else
1586 			seq_printf(m, "%u", *(u32 *)data);
1587 		break;
1588 	case 16:
1589 		if (sign)
1590 			seq_printf(m, "%d", *(s16 *)data);
1591 		else
1592 			seq_printf(m, "%u", *(u16 *)data);
1593 		break;
1594 	case 8:
1595 		if (sign)
1596 			seq_printf(m, "%d", *(s8 *)data);
1597 		else
1598 			seq_printf(m, "%u", *(u8 *)data);
1599 		break;
1600 	default:
1601 		btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1602 	}
1603 }
1604 
1605 static const struct btf_kind_operations int_ops = {
1606 	.check_meta = btf_int_check_meta,
1607 	.resolve = btf_df_resolve,
1608 	.check_member = btf_int_check_member,
1609 	.check_kflag_member = btf_int_check_kflag_member,
1610 	.log_details = btf_int_log,
1611 	.seq_show = btf_int_seq_show,
1612 };
1613 
1614 static int btf_modifier_check_member(struct btf_verifier_env *env,
1615 				     const struct btf_type *struct_type,
1616 				     const struct btf_member *member,
1617 				     const struct btf_type *member_type)
1618 {
1619 	const struct btf_type *resolved_type;
1620 	u32 resolved_type_id = member->type;
1621 	struct btf_member resolved_member;
1622 	struct btf *btf = env->btf;
1623 
1624 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1625 	if (!resolved_type) {
1626 		btf_verifier_log_member(env, struct_type, member,
1627 					"Invalid member");
1628 		return -EINVAL;
1629 	}
1630 
1631 	resolved_member = *member;
1632 	resolved_member.type = resolved_type_id;
1633 
1634 	return btf_type_ops(resolved_type)->check_member(env, struct_type,
1635 							 &resolved_member,
1636 							 resolved_type);
1637 }
1638 
1639 static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1640 					   const struct btf_type *struct_type,
1641 					   const struct btf_member *member,
1642 					   const struct btf_type *member_type)
1643 {
1644 	const struct btf_type *resolved_type;
1645 	u32 resolved_type_id = member->type;
1646 	struct btf_member resolved_member;
1647 	struct btf *btf = env->btf;
1648 
1649 	resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1650 	if (!resolved_type) {
1651 		btf_verifier_log_member(env, struct_type, member,
1652 					"Invalid member");
1653 		return -EINVAL;
1654 	}
1655 
1656 	resolved_member = *member;
1657 	resolved_member.type = resolved_type_id;
1658 
1659 	return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1660 							       &resolved_member,
1661 							       resolved_type);
1662 }
1663 
1664 static int btf_ptr_check_member(struct btf_verifier_env *env,
1665 				const struct btf_type *struct_type,
1666 				const struct btf_member *member,
1667 				const struct btf_type *member_type)
1668 {
1669 	u32 struct_size, struct_bits_off, bytes_offset;
1670 
1671 	struct_size = struct_type->size;
1672 	struct_bits_off = member->offset;
1673 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1674 
1675 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1676 		btf_verifier_log_member(env, struct_type, member,
1677 					"Member is not byte aligned");
1678 		return -EINVAL;
1679 	}
1680 
1681 	if (struct_size - bytes_offset < sizeof(void *)) {
1682 		btf_verifier_log_member(env, struct_type, member,
1683 					"Member exceeds struct_size");
1684 		return -EINVAL;
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1691 				   const struct btf_type *t,
1692 				   u32 meta_left)
1693 {
1694 	if (btf_type_vlen(t)) {
1695 		btf_verifier_log_type(env, t, "vlen != 0");
1696 		return -EINVAL;
1697 	}
1698 
1699 	if (btf_type_kflag(t)) {
1700 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1701 		return -EINVAL;
1702 	}
1703 
1704 	if (!BTF_TYPE_ID_VALID(t->type)) {
1705 		btf_verifier_log_type(env, t, "Invalid type_id");
1706 		return -EINVAL;
1707 	}
1708 
1709 	/* typedef type must have a valid name, and other ref types,
1710 	 * volatile, const, restrict, should have a null name.
1711 	 */
1712 	if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1713 		if (!t->name_off ||
1714 		    !btf_name_valid_identifier(env->btf, t->name_off)) {
1715 			btf_verifier_log_type(env, t, "Invalid name");
1716 			return -EINVAL;
1717 		}
1718 	} else {
1719 		if (t->name_off) {
1720 			btf_verifier_log_type(env, t, "Invalid name");
1721 			return -EINVAL;
1722 		}
1723 	}
1724 
1725 	btf_verifier_log_type(env, t, NULL);
1726 
1727 	return 0;
1728 }
1729 
1730 static int btf_modifier_resolve(struct btf_verifier_env *env,
1731 				const struct resolve_vertex *v)
1732 {
1733 	const struct btf_type *t = v->t;
1734 	const struct btf_type *next_type;
1735 	u32 next_type_id = t->type;
1736 	struct btf *btf = env->btf;
1737 
1738 	next_type = btf_type_by_id(btf, next_type_id);
1739 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1740 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1741 		return -EINVAL;
1742 	}
1743 
1744 	if (!env_type_is_resolve_sink(env, next_type) &&
1745 	    !env_type_is_resolved(env, next_type_id))
1746 		return env_stack_push(env, next_type, next_type_id);
1747 
1748 	/* Figure out the resolved next_type_id with size.
1749 	 * They will be stored in the current modifier's
1750 	 * resolved_ids and resolved_sizes such that it can
1751 	 * save us a few type-following when we use it later (e.g. in
1752 	 * pretty print).
1753 	 */
1754 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1755 		if (env_type_is_resolved(env, next_type_id))
1756 			next_type = btf_type_id_resolve(btf, &next_type_id);
1757 
1758 		/* "typedef void new_void", "const void"...etc */
1759 		if (!btf_type_is_void(next_type) &&
1760 		    !btf_type_is_fwd(next_type) &&
1761 		    !btf_type_is_func_proto(next_type)) {
1762 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1763 			return -EINVAL;
1764 		}
1765 	}
1766 
1767 	env_stack_pop_resolved(env, next_type_id, 0);
1768 
1769 	return 0;
1770 }
1771 
1772 static int btf_var_resolve(struct btf_verifier_env *env,
1773 			   const struct resolve_vertex *v)
1774 {
1775 	const struct btf_type *next_type;
1776 	const struct btf_type *t = v->t;
1777 	u32 next_type_id = t->type;
1778 	struct btf *btf = env->btf;
1779 
1780 	next_type = btf_type_by_id(btf, next_type_id);
1781 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1782 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1783 		return -EINVAL;
1784 	}
1785 
1786 	if (!env_type_is_resolve_sink(env, next_type) &&
1787 	    !env_type_is_resolved(env, next_type_id))
1788 		return env_stack_push(env, next_type, next_type_id);
1789 
1790 	if (btf_type_is_modifier(next_type)) {
1791 		const struct btf_type *resolved_type;
1792 		u32 resolved_type_id;
1793 
1794 		resolved_type_id = next_type_id;
1795 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1796 
1797 		if (btf_type_is_ptr(resolved_type) &&
1798 		    !env_type_is_resolve_sink(env, resolved_type) &&
1799 		    !env_type_is_resolved(env, resolved_type_id))
1800 			return env_stack_push(env, resolved_type,
1801 					      resolved_type_id);
1802 	}
1803 
1804 	/* We must resolve to something concrete at this point, no
1805 	 * forward types or similar that would resolve to size of
1806 	 * zero is allowed.
1807 	 */
1808 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1809 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1810 		return -EINVAL;
1811 	}
1812 
1813 	env_stack_pop_resolved(env, next_type_id, 0);
1814 
1815 	return 0;
1816 }
1817 
1818 static int btf_ptr_resolve(struct btf_verifier_env *env,
1819 			   const struct resolve_vertex *v)
1820 {
1821 	const struct btf_type *next_type;
1822 	const struct btf_type *t = v->t;
1823 	u32 next_type_id = t->type;
1824 	struct btf *btf = env->btf;
1825 
1826 	next_type = btf_type_by_id(btf, next_type_id);
1827 	if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1828 		btf_verifier_log_type(env, v->t, "Invalid type_id");
1829 		return -EINVAL;
1830 	}
1831 
1832 	if (!env_type_is_resolve_sink(env, next_type) &&
1833 	    !env_type_is_resolved(env, next_type_id))
1834 		return env_stack_push(env, next_type, next_type_id);
1835 
1836 	/* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1837 	 * the modifier may have stopped resolving when it was resolved
1838 	 * to a ptr (last-resolved-ptr).
1839 	 *
1840 	 * We now need to continue from the last-resolved-ptr to
1841 	 * ensure the last-resolved-ptr will not referring back to
1842 	 * the currenct ptr (t).
1843 	 */
1844 	if (btf_type_is_modifier(next_type)) {
1845 		const struct btf_type *resolved_type;
1846 		u32 resolved_type_id;
1847 
1848 		resolved_type_id = next_type_id;
1849 		resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1850 
1851 		if (btf_type_is_ptr(resolved_type) &&
1852 		    !env_type_is_resolve_sink(env, resolved_type) &&
1853 		    !env_type_is_resolved(env, resolved_type_id))
1854 			return env_stack_push(env, resolved_type,
1855 					      resolved_type_id);
1856 	}
1857 
1858 	if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1859 		if (env_type_is_resolved(env, next_type_id))
1860 			next_type = btf_type_id_resolve(btf, &next_type_id);
1861 
1862 		if (!btf_type_is_void(next_type) &&
1863 		    !btf_type_is_fwd(next_type) &&
1864 		    !btf_type_is_func_proto(next_type)) {
1865 			btf_verifier_log_type(env, v->t, "Invalid type_id");
1866 			return -EINVAL;
1867 		}
1868 	}
1869 
1870 	env_stack_pop_resolved(env, next_type_id, 0);
1871 
1872 	return 0;
1873 }
1874 
1875 static void btf_modifier_seq_show(const struct btf *btf,
1876 				  const struct btf_type *t,
1877 				  u32 type_id, void *data,
1878 				  u8 bits_offset, struct seq_file *m)
1879 {
1880 	if (btf->resolved_ids)
1881 		t = btf_type_id_resolve(btf, &type_id);
1882 	else
1883 		t = btf_type_skip_modifiers(btf, type_id, NULL);
1884 
1885 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1886 }
1887 
1888 static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1889 			     u32 type_id, void *data, u8 bits_offset,
1890 			     struct seq_file *m)
1891 {
1892 	t = btf_type_id_resolve(btf, &type_id);
1893 
1894 	btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1895 }
1896 
1897 static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1898 			     u32 type_id, void *data, u8 bits_offset,
1899 			     struct seq_file *m)
1900 {
1901 	/* It is a hashed value */
1902 	seq_printf(m, "%p", *(void **)data);
1903 }
1904 
1905 static void btf_ref_type_log(struct btf_verifier_env *env,
1906 			     const struct btf_type *t)
1907 {
1908 	btf_verifier_log(env, "type_id=%u", t->type);
1909 }
1910 
1911 static struct btf_kind_operations modifier_ops = {
1912 	.check_meta = btf_ref_type_check_meta,
1913 	.resolve = btf_modifier_resolve,
1914 	.check_member = btf_modifier_check_member,
1915 	.check_kflag_member = btf_modifier_check_kflag_member,
1916 	.log_details = btf_ref_type_log,
1917 	.seq_show = btf_modifier_seq_show,
1918 };
1919 
1920 static struct btf_kind_operations ptr_ops = {
1921 	.check_meta = btf_ref_type_check_meta,
1922 	.resolve = btf_ptr_resolve,
1923 	.check_member = btf_ptr_check_member,
1924 	.check_kflag_member = btf_generic_check_kflag_member,
1925 	.log_details = btf_ref_type_log,
1926 	.seq_show = btf_ptr_seq_show,
1927 };
1928 
1929 static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1930 			      const struct btf_type *t,
1931 			      u32 meta_left)
1932 {
1933 	if (btf_type_vlen(t)) {
1934 		btf_verifier_log_type(env, t, "vlen != 0");
1935 		return -EINVAL;
1936 	}
1937 
1938 	if (t->type) {
1939 		btf_verifier_log_type(env, t, "type != 0");
1940 		return -EINVAL;
1941 	}
1942 
1943 	/* fwd type must have a valid name */
1944 	if (!t->name_off ||
1945 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
1946 		btf_verifier_log_type(env, t, "Invalid name");
1947 		return -EINVAL;
1948 	}
1949 
1950 	btf_verifier_log_type(env, t, NULL);
1951 
1952 	return 0;
1953 }
1954 
1955 static void btf_fwd_type_log(struct btf_verifier_env *env,
1956 			     const struct btf_type *t)
1957 {
1958 	btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1959 }
1960 
1961 static struct btf_kind_operations fwd_ops = {
1962 	.check_meta = btf_fwd_check_meta,
1963 	.resolve = btf_df_resolve,
1964 	.check_member = btf_df_check_member,
1965 	.check_kflag_member = btf_df_check_kflag_member,
1966 	.log_details = btf_fwd_type_log,
1967 	.seq_show = btf_df_seq_show,
1968 };
1969 
1970 static int btf_array_check_member(struct btf_verifier_env *env,
1971 				  const struct btf_type *struct_type,
1972 				  const struct btf_member *member,
1973 				  const struct btf_type *member_type)
1974 {
1975 	u32 struct_bits_off = member->offset;
1976 	u32 struct_size, bytes_offset;
1977 	u32 array_type_id, array_size;
1978 	struct btf *btf = env->btf;
1979 
1980 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1981 		btf_verifier_log_member(env, struct_type, member,
1982 					"Member is not byte aligned");
1983 		return -EINVAL;
1984 	}
1985 
1986 	array_type_id = member->type;
1987 	btf_type_id_size(btf, &array_type_id, &array_size);
1988 	struct_size = struct_type->size;
1989 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1990 	if (struct_size - bytes_offset < array_size) {
1991 		btf_verifier_log_member(env, struct_type, member,
1992 					"Member exceeds struct_size");
1993 		return -EINVAL;
1994 	}
1995 
1996 	return 0;
1997 }
1998 
1999 static s32 btf_array_check_meta(struct btf_verifier_env *env,
2000 				const struct btf_type *t,
2001 				u32 meta_left)
2002 {
2003 	const struct btf_array *array = btf_type_array(t);
2004 	u32 meta_needed = sizeof(*array);
2005 
2006 	if (meta_left < meta_needed) {
2007 		btf_verifier_log_basic(env, t,
2008 				       "meta_left:%u meta_needed:%u",
2009 				       meta_left, meta_needed);
2010 		return -EINVAL;
2011 	}
2012 
2013 	/* array type should not have a name */
2014 	if (t->name_off) {
2015 		btf_verifier_log_type(env, t, "Invalid name");
2016 		return -EINVAL;
2017 	}
2018 
2019 	if (btf_type_vlen(t)) {
2020 		btf_verifier_log_type(env, t, "vlen != 0");
2021 		return -EINVAL;
2022 	}
2023 
2024 	if (btf_type_kflag(t)) {
2025 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2026 		return -EINVAL;
2027 	}
2028 
2029 	if (t->size) {
2030 		btf_verifier_log_type(env, t, "size != 0");
2031 		return -EINVAL;
2032 	}
2033 
2034 	/* Array elem type and index type cannot be in type void,
2035 	 * so !array->type and !array->index_type are not allowed.
2036 	 */
2037 	if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2038 		btf_verifier_log_type(env, t, "Invalid elem");
2039 		return -EINVAL;
2040 	}
2041 
2042 	if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2043 		btf_verifier_log_type(env, t, "Invalid index");
2044 		return -EINVAL;
2045 	}
2046 
2047 	btf_verifier_log_type(env, t, NULL);
2048 
2049 	return meta_needed;
2050 }
2051 
2052 static int btf_array_resolve(struct btf_verifier_env *env,
2053 			     const struct resolve_vertex *v)
2054 {
2055 	const struct btf_array *array = btf_type_array(v->t);
2056 	const struct btf_type *elem_type, *index_type;
2057 	u32 elem_type_id, index_type_id;
2058 	struct btf *btf = env->btf;
2059 	u32 elem_size;
2060 
2061 	/* Check array->index_type */
2062 	index_type_id = array->index_type;
2063 	index_type = btf_type_by_id(btf, index_type_id);
2064 	if (btf_type_nosize_or_null(index_type) ||
2065 	    btf_type_is_resolve_source_only(index_type)) {
2066 		btf_verifier_log_type(env, v->t, "Invalid index");
2067 		return -EINVAL;
2068 	}
2069 
2070 	if (!env_type_is_resolve_sink(env, index_type) &&
2071 	    !env_type_is_resolved(env, index_type_id))
2072 		return env_stack_push(env, index_type, index_type_id);
2073 
2074 	index_type = btf_type_id_size(btf, &index_type_id, NULL);
2075 	if (!index_type || !btf_type_is_int(index_type) ||
2076 	    !btf_type_int_is_regular(index_type)) {
2077 		btf_verifier_log_type(env, v->t, "Invalid index");
2078 		return -EINVAL;
2079 	}
2080 
2081 	/* Check array->type */
2082 	elem_type_id = array->type;
2083 	elem_type = btf_type_by_id(btf, elem_type_id);
2084 	if (btf_type_nosize_or_null(elem_type) ||
2085 	    btf_type_is_resolve_source_only(elem_type)) {
2086 		btf_verifier_log_type(env, v->t,
2087 				      "Invalid elem");
2088 		return -EINVAL;
2089 	}
2090 
2091 	if (!env_type_is_resolve_sink(env, elem_type) &&
2092 	    !env_type_is_resolved(env, elem_type_id))
2093 		return env_stack_push(env, elem_type, elem_type_id);
2094 
2095 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2096 	if (!elem_type) {
2097 		btf_verifier_log_type(env, v->t, "Invalid elem");
2098 		return -EINVAL;
2099 	}
2100 
2101 	if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2102 		btf_verifier_log_type(env, v->t, "Invalid array of int");
2103 		return -EINVAL;
2104 	}
2105 
2106 	if (array->nelems && elem_size > U32_MAX / array->nelems) {
2107 		btf_verifier_log_type(env, v->t,
2108 				      "Array size overflows U32_MAX");
2109 		return -EINVAL;
2110 	}
2111 
2112 	env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2113 
2114 	return 0;
2115 }
2116 
2117 static void btf_array_log(struct btf_verifier_env *env,
2118 			  const struct btf_type *t)
2119 {
2120 	const struct btf_array *array = btf_type_array(t);
2121 
2122 	btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2123 			 array->type, array->index_type, array->nelems);
2124 }
2125 
2126 static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2127 			       u32 type_id, void *data, u8 bits_offset,
2128 			       struct seq_file *m)
2129 {
2130 	const struct btf_array *array = btf_type_array(t);
2131 	const struct btf_kind_operations *elem_ops;
2132 	const struct btf_type *elem_type;
2133 	u32 i, elem_size, elem_type_id;
2134 
2135 	elem_type_id = array->type;
2136 	elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2137 	elem_ops = btf_type_ops(elem_type);
2138 	seq_puts(m, "[");
2139 	for (i = 0; i < array->nelems; i++) {
2140 		if (i)
2141 			seq_puts(m, ",");
2142 
2143 		elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2144 				   bits_offset, m);
2145 		data += elem_size;
2146 	}
2147 	seq_puts(m, "]");
2148 }
2149 
2150 static struct btf_kind_operations array_ops = {
2151 	.check_meta = btf_array_check_meta,
2152 	.resolve = btf_array_resolve,
2153 	.check_member = btf_array_check_member,
2154 	.check_kflag_member = btf_generic_check_kflag_member,
2155 	.log_details = btf_array_log,
2156 	.seq_show = btf_array_seq_show,
2157 };
2158 
2159 static int btf_struct_check_member(struct btf_verifier_env *env,
2160 				   const struct btf_type *struct_type,
2161 				   const struct btf_member *member,
2162 				   const struct btf_type *member_type)
2163 {
2164 	u32 struct_bits_off = member->offset;
2165 	u32 struct_size, bytes_offset;
2166 
2167 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2168 		btf_verifier_log_member(env, struct_type, member,
2169 					"Member is not byte aligned");
2170 		return -EINVAL;
2171 	}
2172 
2173 	struct_size = struct_type->size;
2174 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2175 	if (struct_size - bytes_offset < member_type->size) {
2176 		btf_verifier_log_member(env, struct_type, member,
2177 					"Member exceeds struct_size");
2178 		return -EINVAL;
2179 	}
2180 
2181 	return 0;
2182 }
2183 
2184 static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2185 				 const struct btf_type *t,
2186 				 u32 meta_left)
2187 {
2188 	bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2189 	const struct btf_member *member;
2190 	u32 meta_needed, last_offset;
2191 	struct btf *btf = env->btf;
2192 	u32 struct_size = t->size;
2193 	u32 offset;
2194 	u16 i;
2195 
2196 	meta_needed = btf_type_vlen(t) * sizeof(*member);
2197 	if (meta_left < meta_needed) {
2198 		btf_verifier_log_basic(env, t,
2199 				       "meta_left:%u meta_needed:%u",
2200 				       meta_left, meta_needed);
2201 		return -EINVAL;
2202 	}
2203 
2204 	/* struct type either no name or a valid one */
2205 	if (t->name_off &&
2206 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2207 		btf_verifier_log_type(env, t, "Invalid name");
2208 		return -EINVAL;
2209 	}
2210 
2211 	btf_verifier_log_type(env, t, NULL);
2212 
2213 	last_offset = 0;
2214 	for_each_member(i, t, member) {
2215 		if (!btf_name_offset_valid(btf, member->name_off)) {
2216 			btf_verifier_log_member(env, t, member,
2217 						"Invalid member name_offset:%u",
2218 						member->name_off);
2219 			return -EINVAL;
2220 		}
2221 
2222 		/* struct member either no name or a valid one */
2223 		if (member->name_off &&
2224 		    !btf_name_valid_identifier(btf, member->name_off)) {
2225 			btf_verifier_log_member(env, t, member, "Invalid name");
2226 			return -EINVAL;
2227 		}
2228 		/* A member cannot be in type void */
2229 		if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2230 			btf_verifier_log_member(env, t, member,
2231 						"Invalid type_id");
2232 			return -EINVAL;
2233 		}
2234 
2235 		offset = btf_member_bit_offset(t, member);
2236 		if (is_union && offset) {
2237 			btf_verifier_log_member(env, t, member,
2238 						"Invalid member bits_offset");
2239 			return -EINVAL;
2240 		}
2241 
2242 		/*
2243 		 * ">" instead of ">=" because the last member could be
2244 		 * "char a[0];"
2245 		 */
2246 		if (last_offset > offset) {
2247 			btf_verifier_log_member(env, t, member,
2248 						"Invalid member bits_offset");
2249 			return -EINVAL;
2250 		}
2251 
2252 		if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2253 			btf_verifier_log_member(env, t, member,
2254 						"Member bits_offset exceeds its struct size");
2255 			return -EINVAL;
2256 		}
2257 
2258 		btf_verifier_log_member(env, t, member, NULL);
2259 		last_offset = offset;
2260 	}
2261 
2262 	return meta_needed;
2263 }
2264 
2265 static int btf_struct_resolve(struct btf_verifier_env *env,
2266 			      const struct resolve_vertex *v)
2267 {
2268 	const struct btf_member *member;
2269 	int err;
2270 	u16 i;
2271 
2272 	/* Before continue resolving the next_member,
2273 	 * ensure the last member is indeed resolved to a
2274 	 * type with size info.
2275 	 */
2276 	if (v->next_member) {
2277 		const struct btf_type *last_member_type;
2278 		const struct btf_member *last_member;
2279 		u16 last_member_type_id;
2280 
2281 		last_member = btf_type_member(v->t) + v->next_member - 1;
2282 		last_member_type_id = last_member->type;
2283 		if (WARN_ON_ONCE(!env_type_is_resolved(env,
2284 						       last_member_type_id)))
2285 			return -EINVAL;
2286 
2287 		last_member_type = btf_type_by_id(env->btf,
2288 						  last_member_type_id);
2289 		if (btf_type_kflag(v->t))
2290 			err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2291 								last_member,
2292 								last_member_type);
2293 		else
2294 			err = btf_type_ops(last_member_type)->check_member(env, v->t,
2295 								last_member,
2296 								last_member_type);
2297 		if (err)
2298 			return err;
2299 	}
2300 
2301 	for_each_member_from(i, v->next_member, v->t, member) {
2302 		u32 member_type_id = member->type;
2303 		const struct btf_type *member_type = btf_type_by_id(env->btf,
2304 								member_type_id);
2305 
2306 		if (btf_type_nosize_or_null(member_type) ||
2307 		    btf_type_is_resolve_source_only(member_type)) {
2308 			btf_verifier_log_member(env, v->t, member,
2309 						"Invalid member");
2310 			return -EINVAL;
2311 		}
2312 
2313 		if (!env_type_is_resolve_sink(env, member_type) &&
2314 		    !env_type_is_resolved(env, member_type_id)) {
2315 			env_stack_set_next_member(env, i + 1);
2316 			return env_stack_push(env, member_type, member_type_id);
2317 		}
2318 
2319 		if (btf_type_kflag(v->t))
2320 			err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2321 									    member,
2322 									    member_type);
2323 		else
2324 			err = btf_type_ops(member_type)->check_member(env, v->t,
2325 								      member,
2326 								      member_type);
2327 		if (err)
2328 			return err;
2329 	}
2330 
2331 	env_stack_pop_resolved(env, 0, 0);
2332 
2333 	return 0;
2334 }
2335 
2336 static void btf_struct_log(struct btf_verifier_env *env,
2337 			   const struct btf_type *t)
2338 {
2339 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2340 }
2341 
2342 /* find 'struct bpf_spin_lock' in map value.
2343  * return >= 0 offset if found
2344  * and < 0 in case of error
2345  */
2346 int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2347 {
2348 	const struct btf_member *member;
2349 	u32 i, off = -ENOENT;
2350 
2351 	if (!__btf_type_is_struct(t))
2352 		return -EINVAL;
2353 
2354 	for_each_member(i, t, member) {
2355 		const struct btf_type *member_type = btf_type_by_id(btf,
2356 								    member->type);
2357 		if (!__btf_type_is_struct(member_type))
2358 			continue;
2359 		if (member_type->size != sizeof(struct bpf_spin_lock))
2360 			continue;
2361 		if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2362 			   "bpf_spin_lock"))
2363 			continue;
2364 		if (off != -ENOENT)
2365 			/* only one 'struct bpf_spin_lock' is allowed */
2366 			return -E2BIG;
2367 		off = btf_member_bit_offset(t, member);
2368 		if (off % 8)
2369 			/* valid C code cannot generate such BTF */
2370 			return -EINVAL;
2371 		off /= 8;
2372 		if (off % __alignof__(struct bpf_spin_lock))
2373 			/* valid struct bpf_spin_lock will be 4 byte aligned */
2374 			return -EINVAL;
2375 	}
2376 	return off;
2377 }
2378 
2379 static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2380 				u32 type_id, void *data, u8 bits_offset,
2381 				struct seq_file *m)
2382 {
2383 	const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2384 	const struct btf_member *member;
2385 	u32 i;
2386 
2387 	seq_puts(m, "{");
2388 	for_each_member(i, t, member) {
2389 		const struct btf_type *member_type = btf_type_by_id(btf,
2390 								member->type);
2391 		const struct btf_kind_operations *ops;
2392 		u32 member_offset, bitfield_size;
2393 		u32 bytes_offset;
2394 		u8 bits8_offset;
2395 
2396 		if (i)
2397 			seq_puts(m, seq);
2398 
2399 		member_offset = btf_member_bit_offset(t, member);
2400 		bitfield_size = btf_member_bitfield_size(t, member);
2401 		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2402 		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2403 		if (bitfield_size) {
2404 			btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2405 					      bitfield_size, m);
2406 		} else {
2407 			ops = btf_type_ops(member_type);
2408 			ops->seq_show(btf, member_type, member->type,
2409 				      data + bytes_offset, bits8_offset, m);
2410 		}
2411 	}
2412 	seq_puts(m, "}");
2413 }
2414 
2415 static struct btf_kind_operations struct_ops = {
2416 	.check_meta = btf_struct_check_meta,
2417 	.resolve = btf_struct_resolve,
2418 	.check_member = btf_struct_check_member,
2419 	.check_kflag_member = btf_generic_check_kflag_member,
2420 	.log_details = btf_struct_log,
2421 	.seq_show = btf_struct_seq_show,
2422 };
2423 
2424 static int btf_enum_check_member(struct btf_verifier_env *env,
2425 				 const struct btf_type *struct_type,
2426 				 const struct btf_member *member,
2427 				 const struct btf_type *member_type)
2428 {
2429 	u32 struct_bits_off = member->offset;
2430 	u32 struct_size, bytes_offset;
2431 
2432 	if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2433 		btf_verifier_log_member(env, struct_type, member,
2434 					"Member is not byte aligned");
2435 		return -EINVAL;
2436 	}
2437 
2438 	struct_size = struct_type->size;
2439 	bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2440 	if (struct_size - bytes_offset < member_type->size) {
2441 		btf_verifier_log_member(env, struct_type, member,
2442 					"Member exceeds struct_size");
2443 		return -EINVAL;
2444 	}
2445 
2446 	return 0;
2447 }
2448 
2449 static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2450 				       const struct btf_type *struct_type,
2451 				       const struct btf_member *member,
2452 				       const struct btf_type *member_type)
2453 {
2454 	u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2455 	u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2456 
2457 	struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2458 	nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2459 	if (!nr_bits) {
2460 		if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2461 			btf_verifier_log_member(env, struct_type, member,
2462 						"Member is not byte aligned");
2463 			return -EINVAL;
2464 		}
2465 
2466 		nr_bits = int_bitsize;
2467 	} else if (nr_bits > int_bitsize) {
2468 		btf_verifier_log_member(env, struct_type, member,
2469 					"Invalid member bitfield_size");
2470 		return -EINVAL;
2471 	}
2472 
2473 	struct_size = struct_type->size;
2474 	bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2475 	if (struct_size < bytes_end) {
2476 		btf_verifier_log_member(env, struct_type, member,
2477 					"Member exceeds struct_size");
2478 		return -EINVAL;
2479 	}
2480 
2481 	return 0;
2482 }
2483 
2484 static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2485 			       const struct btf_type *t,
2486 			       u32 meta_left)
2487 {
2488 	const struct btf_enum *enums = btf_type_enum(t);
2489 	struct btf *btf = env->btf;
2490 	u16 i, nr_enums;
2491 	u32 meta_needed;
2492 
2493 	nr_enums = btf_type_vlen(t);
2494 	meta_needed = nr_enums * sizeof(*enums);
2495 
2496 	if (meta_left < meta_needed) {
2497 		btf_verifier_log_basic(env, t,
2498 				       "meta_left:%u meta_needed:%u",
2499 				       meta_left, meta_needed);
2500 		return -EINVAL;
2501 	}
2502 
2503 	if (btf_type_kflag(t)) {
2504 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2505 		return -EINVAL;
2506 	}
2507 
2508 	if (t->size > 8 || !is_power_of_2(t->size)) {
2509 		btf_verifier_log_type(env, t, "Unexpected size");
2510 		return -EINVAL;
2511 	}
2512 
2513 	/* enum type either no name or a valid one */
2514 	if (t->name_off &&
2515 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2516 		btf_verifier_log_type(env, t, "Invalid name");
2517 		return -EINVAL;
2518 	}
2519 
2520 	btf_verifier_log_type(env, t, NULL);
2521 
2522 	for (i = 0; i < nr_enums; i++) {
2523 		if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2524 			btf_verifier_log(env, "\tInvalid name_offset:%u",
2525 					 enums[i].name_off);
2526 			return -EINVAL;
2527 		}
2528 
2529 		/* enum member must have a valid name */
2530 		if (!enums[i].name_off ||
2531 		    !btf_name_valid_identifier(btf, enums[i].name_off)) {
2532 			btf_verifier_log_type(env, t, "Invalid name");
2533 			return -EINVAL;
2534 		}
2535 
2536 		if (env->log.level == BPF_LOG_KERNEL)
2537 			continue;
2538 		btf_verifier_log(env, "\t%s val=%d\n",
2539 				 __btf_name_by_offset(btf, enums[i].name_off),
2540 				 enums[i].val);
2541 	}
2542 
2543 	return meta_needed;
2544 }
2545 
2546 static void btf_enum_log(struct btf_verifier_env *env,
2547 			 const struct btf_type *t)
2548 {
2549 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2550 }
2551 
2552 static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2553 			      u32 type_id, void *data, u8 bits_offset,
2554 			      struct seq_file *m)
2555 {
2556 	const struct btf_enum *enums = btf_type_enum(t);
2557 	u32 i, nr_enums = btf_type_vlen(t);
2558 	int v = *(int *)data;
2559 
2560 	for (i = 0; i < nr_enums; i++) {
2561 		if (v == enums[i].val) {
2562 			seq_printf(m, "%s",
2563 				   __btf_name_by_offset(btf,
2564 							enums[i].name_off));
2565 			return;
2566 		}
2567 	}
2568 
2569 	seq_printf(m, "%d", v);
2570 }
2571 
2572 static struct btf_kind_operations enum_ops = {
2573 	.check_meta = btf_enum_check_meta,
2574 	.resolve = btf_df_resolve,
2575 	.check_member = btf_enum_check_member,
2576 	.check_kflag_member = btf_enum_check_kflag_member,
2577 	.log_details = btf_enum_log,
2578 	.seq_show = btf_enum_seq_show,
2579 };
2580 
2581 static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2582 				     const struct btf_type *t,
2583 				     u32 meta_left)
2584 {
2585 	u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2586 
2587 	if (meta_left < meta_needed) {
2588 		btf_verifier_log_basic(env, t,
2589 				       "meta_left:%u meta_needed:%u",
2590 				       meta_left, meta_needed);
2591 		return -EINVAL;
2592 	}
2593 
2594 	if (t->name_off) {
2595 		btf_verifier_log_type(env, t, "Invalid name");
2596 		return -EINVAL;
2597 	}
2598 
2599 	if (btf_type_kflag(t)) {
2600 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2601 		return -EINVAL;
2602 	}
2603 
2604 	btf_verifier_log_type(env, t, NULL);
2605 
2606 	return meta_needed;
2607 }
2608 
2609 static void btf_func_proto_log(struct btf_verifier_env *env,
2610 			       const struct btf_type *t)
2611 {
2612 	const struct btf_param *args = (const struct btf_param *)(t + 1);
2613 	u16 nr_args = btf_type_vlen(t), i;
2614 
2615 	btf_verifier_log(env, "return=%u args=(", t->type);
2616 	if (!nr_args) {
2617 		btf_verifier_log(env, "void");
2618 		goto done;
2619 	}
2620 
2621 	if (nr_args == 1 && !args[0].type) {
2622 		/* Only one vararg */
2623 		btf_verifier_log(env, "vararg");
2624 		goto done;
2625 	}
2626 
2627 	btf_verifier_log(env, "%u %s", args[0].type,
2628 			 __btf_name_by_offset(env->btf,
2629 					      args[0].name_off));
2630 	for (i = 1; i < nr_args - 1; i++)
2631 		btf_verifier_log(env, ", %u %s", args[i].type,
2632 				 __btf_name_by_offset(env->btf,
2633 						      args[i].name_off));
2634 
2635 	if (nr_args > 1) {
2636 		const struct btf_param *last_arg = &args[nr_args - 1];
2637 
2638 		if (last_arg->type)
2639 			btf_verifier_log(env, ", %u %s", last_arg->type,
2640 					 __btf_name_by_offset(env->btf,
2641 							      last_arg->name_off));
2642 		else
2643 			btf_verifier_log(env, ", vararg");
2644 	}
2645 
2646 done:
2647 	btf_verifier_log(env, ")");
2648 }
2649 
2650 static struct btf_kind_operations func_proto_ops = {
2651 	.check_meta = btf_func_proto_check_meta,
2652 	.resolve = btf_df_resolve,
2653 	/*
2654 	 * BTF_KIND_FUNC_PROTO cannot be directly referred by
2655 	 * a struct's member.
2656 	 *
2657 	 * It should be a funciton pointer instead.
2658 	 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
2659 	 *
2660 	 * Hence, there is no btf_func_check_member().
2661 	 */
2662 	.check_member = btf_df_check_member,
2663 	.check_kflag_member = btf_df_check_kflag_member,
2664 	.log_details = btf_func_proto_log,
2665 	.seq_show = btf_df_seq_show,
2666 };
2667 
2668 static s32 btf_func_check_meta(struct btf_verifier_env *env,
2669 			       const struct btf_type *t,
2670 			       u32 meta_left)
2671 {
2672 	if (!t->name_off ||
2673 	    !btf_name_valid_identifier(env->btf, t->name_off)) {
2674 		btf_verifier_log_type(env, t, "Invalid name");
2675 		return -EINVAL;
2676 	}
2677 
2678 	if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
2679 		btf_verifier_log_type(env, t, "Invalid func linkage");
2680 		return -EINVAL;
2681 	}
2682 
2683 	if (btf_type_kflag(t)) {
2684 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2685 		return -EINVAL;
2686 	}
2687 
2688 	btf_verifier_log_type(env, t, NULL);
2689 
2690 	return 0;
2691 }
2692 
2693 static struct btf_kind_operations func_ops = {
2694 	.check_meta = btf_func_check_meta,
2695 	.resolve = btf_df_resolve,
2696 	.check_member = btf_df_check_member,
2697 	.check_kflag_member = btf_df_check_kflag_member,
2698 	.log_details = btf_ref_type_log,
2699 	.seq_show = btf_df_seq_show,
2700 };
2701 
2702 static s32 btf_var_check_meta(struct btf_verifier_env *env,
2703 			      const struct btf_type *t,
2704 			      u32 meta_left)
2705 {
2706 	const struct btf_var *var;
2707 	u32 meta_needed = sizeof(*var);
2708 
2709 	if (meta_left < meta_needed) {
2710 		btf_verifier_log_basic(env, t,
2711 				       "meta_left:%u meta_needed:%u",
2712 				       meta_left, meta_needed);
2713 		return -EINVAL;
2714 	}
2715 
2716 	if (btf_type_vlen(t)) {
2717 		btf_verifier_log_type(env, t, "vlen != 0");
2718 		return -EINVAL;
2719 	}
2720 
2721 	if (btf_type_kflag(t)) {
2722 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2723 		return -EINVAL;
2724 	}
2725 
2726 	if (!t->name_off ||
2727 	    !__btf_name_valid(env->btf, t->name_off, true)) {
2728 		btf_verifier_log_type(env, t, "Invalid name");
2729 		return -EINVAL;
2730 	}
2731 
2732 	/* A var cannot be in type void */
2733 	if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2734 		btf_verifier_log_type(env, t, "Invalid type_id");
2735 		return -EINVAL;
2736 	}
2737 
2738 	var = btf_type_var(t);
2739 	if (var->linkage != BTF_VAR_STATIC &&
2740 	    var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2741 		btf_verifier_log_type(env, t, "Linkage not supported");
2742 		return -EINVAL;
2743 	}
2744 
2745 	btf_verifier_log_type(env, t, NULL);
2746 
2747 	return meta_needed;
2748 }
2749 
2750 static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2751 {
2752 	const struct btf_var *var = btf_type_var(t);
2753 
2754 	btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2755 }
2756 
2757 static const struct btf_kind_operations var_ops = {
2758 	.check_meta		= btf_var_check_meta,
2759 	.resolve		= btf_var_resolve,
2760 	.check_member		= btf_df_check_member,
2761 	.check_kflag_member	= btf_df_check_kflag_member,
2762 	.log_details		= btf_var_log,
2763 	.seq_show		= btf_var_seq_show,
2764 };
2765 
2766 static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2767 				  const struct btf_type *t,
2768 				  u32 meta_left)
2769 {
2770 	const struct btf_var_secinfo *vsi;
2771 	u64 last_vsi_end_off = 0, sum = 0;
2772 	u32 i, meta_needed;
2773 
2774 	meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2775 	if (meta_left < meta_needed) {
2776 		btf_verifier_log_basic(env, t,
2777 				       "meta_left:%u meta_needed:%u",
2778 				       meta_left, meta_needed);
2779 		return -EINVAL;
2780 	}
2781 
2782 	if (!btf_type_vlen(t)) {
2783 		btf_verifier_log_type(env, t, "vlen == 0");
2784 		return -EINVAL;
2785 	}
2786 
2787 	if (!t->size) {
2788 		btf_verifier_log_type(env, t, "size == 0");
2789 		return -EINVAL;
2790 	}
2791 
2792 	if (btf_type_kflag(t)) {
2793 		btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2794 		return -EINVAL;
2795 	}
2796 
2797 	if (!t->name_off ||
2798 	    !btf_name_valid_section(env->btf, t->name_off)) {
2799 		btf_verifier_log_type(env, t, "Invalid name");
2800 		return -EINVAL;
2801 	}
2802 
2803 	btf_verifier_log_type(env, t, NULL);
2804 
2805 	for_each_vsi(i, t, vsi) {
2806 		/* A var cannot be in type void */
2807 		if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2808 			btf_verifier_log_vsi(env, t, vsi,
2809 					     "Invalid type_id");
2810 			return -EINVAL;
2811 		}
2812 
2813 		if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2814 			btf_verifier_log_vsi(env, t, vsi,
2815 					     "Invalid offset");
2816 			return -EINVAL;
2817 		}
2818 
2819 		if (!vsi->size || vsi->size > t->size) {
2820 			btf_verifier_log_vsi(env, t, vsi,
2821 					     "Invalid size");
2822 			return -EINVAL;
2823 		}
2824 
2825 		last_vsi_end_off = vsi->offset + vsi->size;
2826 		if (last_vsi_end_off > t->size) {
2827 			btf_verifier_log_vsi(env, t, vsi,
2828 					     "Invalid offset+size");
2829 			return -EINVAL;
2830 		}
2831 
2832 		btf_verifier_log_vsi(env, t, vsi, NULL);
2833 		sum += vsi->size;
2834 	}
2835 
2836 	if (t->size < sum) {
2837 		btf_verifier_log_type(env, t, "Invalid btf_info size");
2838 		return -EINVAL;
2839 	}
2840 
2841 	return meta_needed;
2842 }
2843 
2844 static int btf_datasec_resolve(struct btf_verifier_env *env,
2845 			       const struct resolve_vertex *v)
2846 {
2847 	const struct btf_var_secinfo *vsi;
2848 	struct btf *btf = env->btf;
2849 	u16 i;
2850 
2851 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
2852 		u32 var_type_id = vsi->type, type_id, type_size = 0;
2853 		const struct btf_type *var_type = btf_type_by_id(env->btf,
2854 								 var_type_id);
2855 		if (!var_type || !btf_type_is_var(var_type)) {
2856 			btf_verifier_log_vsi(env, v->t, vsi,
2857 					     "Not a VAR kind member");
2858 			return -EINVAL;
2859 		}
2860 
2861 		if (!env_type_is_resolve_sink(env, var_type) &&
2862 		    !env_type_is_resolved(env, var_type_id)) {
2863 			env_stack_set_next_member(env, i + 1);
2864 			return env_stack_push(env, var_type, var_type_id);
2865 		}
2866 
2867 		type_id = var_type->type;
2868 		if (!btf_type_id_size(btf, &type_id, &type_size)) {
2869 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2870 			return -EINVAL;
2871 		}
2872 
2873 		if (vsi->size < type_size) {
2874 			btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2875 			return -EINVAL;
2876 		}
2877 	}
2878 
2879 	env_stack_pop_resolved(env, 0, 0);
2880 	return 0;
2881 }
2882 
2883 static void btf_datasec_log(struct btf_verifier_env *env,
2884 			    const struct btf_type *t)
2885 {
2886 	btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2887 }
2888 
2889 static void btf_datasec_seq_show(const struct btf *btf,
2890 				 const struct btf_type *t, u32 type_id,
2891 				 void *data, u8 bits_offset,
2892 				 struct seq_file *m)
2893 {
2894 	const struct btf_var_secinfo *vsi;
2895 	const struct btf_type *var;
2896 	u32 i;
2897 
2898 	seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2899 	for_each_vsi(i, t, vsi) {
2900 		var = btf_type_by_id(btf, vsi->type);
2901 		if (i)
2902 			seq_puts(m, ",");
2903 		btf_type_ops(var)->seq_show(btf, var, vsi->type,
2904 					    data + vsi->offset, bits_offset, m);
2905 	}
2906 	seq_puts(m, "}");
2907 }
2908 
2909 static const struct btf_kind_operations datasec_ops = {
2910 	.check_meta		= btf_datasec_check_meta,
2911 	.resolve		= btf_datasec_resolve,
2912 	.check_member		= btf_df_check_member,
2913 	.check_kflag_member	= btf_df_check_kflag_member,
2914 	.log_details		= btf_datasec_log,
2915 	.seq_show		= btf_datasec_seq_show,
2916 };
2917 
2918 static int btf_func_proto_check(struct btf_verifier_env *env,
2919 				const struct btf_type *t)
2920 {
2921 	const struct btf_type *ret_type;
2922 	const struct btf_param *args;
2923 	const struct btf *btf;
2924 	u16 nr_args, i;
2925 	int err;
2926 
2927 	btf = env->btf;
2928 	args = (const struct btf_param *)(t + 1);
2929 	nr_args = btf_type_vlen(t);
2930 
2931 	/* Check func return type which could be "void" (t->type == 0) */
2932 	if (t->type) {
2933 		u32 ret_type_id = t->type;
2934 
2935 		ret_type = btf_type_by_id(btf, ret_type_id);
2936 		if (!ret_type) {
2937 			btf_verifier_log_type(env, t, "Invalid return type");
2938 			return -EINVAL;
2939 		}
2940 
2941 		if (btf_type_needs_resolve(ret_type) &&
2942 		    !env_type_is_resolved(env, ret_type_id)) {
2943 			err = btf_resolve(env, ret_type, ret_type_id);
2944 			if (err)
2945 				return err;
2946 		}
2947 
2948 		/* Ensure the return type is a type that has a size */
2949 		if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2950 			btf_verifier_log_type(env, t, "Invalid return type");
2951 			return -EINVAL;
2952 		}
2953 	}
2954 
2955 	if (!nr_args)
2956 		return 0;
2957 
2958 	/* Last func arg type_id could be 0 if it is a vararg */
2959 	if (!args[nr_args - 1].type) {
2960 		if (args[nr_args - 1].name_off) {
2961 			btf_verifier_log_type(env, t, "Invalid arg#%u",
2962 					      nr_args);
2963 			return -EINVAL;
2964 		}
2965 		nr_args--;
2966 	}
2967 
2968 	err = 0;
2969 	for (i = 0; i < nr_args; i++) {
2970 		const struct btf_type *arg_type;
2971 		u32 arg_type_id;
2972 
2973 		arg_type_id = args[i].type;
2974 		arg_type = btf_type_by_id(btf, arg_type_id);
2975 		if (!arg_type) {
2976 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2977 			err = -EINVAL;
2978 			break;
2979 		}
2980 
2981 		if (args[i].name_off &&
2982 		    (!btf_name_offset_valid(btf, args[i].name_off) ||
2983 		     !btf_name_valid_identifier(btf, args[i].name_off))) {
2984 			btf_verifier_log_type(env, t,
2985 					      "Invalid arg#%u", i + 1);
2986 			err = -EINVAL;
2987 			break;
2988 		}
2989 
2990 		if (btf_type_needs_resolve(arg_type) &&
2991 		    !env_type_is_resolved(env, arg_type_id)) {
2992 			err = btf_resolve(env, arg_type, arg_type_id);
2993 			if (err)
2994 				break;
2995 		}
2996 
2997 		if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2998 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2999 			err = -EINVAL;
3000 			break;
3001 		}
3002 	}
3003 
3004 	return err;
3005 }
3006 
3007 static int btf_func_check(struct btf_verifier_env *env,
3008 			  const struct btf_type *t)
3009 {
3010 	const struct btf_type *proto_type;
3011 	const struct btf_param *args;
3012 	const struct btf *btf;
3013 	u16 nr_args, i;
3014 
3015 	btf = env->btf;
3016 	proto_type = btf_type_by_id(btf, t->type);
3017 
3018 	if (!proto_type || !btf_type_is_func_proto(proto_type)) {
3019 		btf_verifier_log_type(env, t, "Invalid type_id");
3020 		return -EINVAL;
3021 	}
3022 
3023 	args = (const struct btf_param *)(proto_type + 1);
3024 	nr_args = btf_type_vlen(proto_type);
3025 	for (i = 0; i < nr_args; i++) {
3026 		if (!args[i].name_off && args[i].type) {
3027 			btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3028 			return -EINVAL;
3029 		}
3030 	}
3031 
3032 	return 0;
3033 }
3034 
3035 static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
3036 	[BTF_KIND_INT] = &int_ops,
3037 	[BTF_KIND_PTR] = &ptr_ops,
3038 	[BTF_KIND_ARRAY] = &array_ops,
3039 	[BTF_KIND_STRUCT] = &struct_ops,
3040 	[BTF_KIND_UNION] = &struct_ops,
3041 	[BTF_KIND_ENUM] = &enum_ops,
3042 	[BTF_KIND_FWD] = &fwd_ops,
3043 	[BTF_KIND_TYPEDEF] = &modifier_ops,
3044 	[BTF_KIND_VOLATILE] = &modifier_ops,
3045 	[BTF_KIND_CONST] = &modifier_ops,
3046 	[BTF_KIND_RESTRICT] = &modifier_ops,
3047 	[BTF_KIND_FUNC] = &func_ops,
3048 	[BTF_KIND_FUNC_PROTO] = &func_proto_ops,
3049 	[BTF_KIND_VAR] = &var_ops,
3050 	[BTF_KIND_DATASEC] = &datasec_ops,
3051 };
3052 
3053 static s32 btf_check_meta(struct btf_verifier_env *env,
3054 			  const struct btf_type *t,
3055 			  u32 meta_left)
3056 {
3057 	u32 saved_meta_left = meta_left;
3058 	s32 var_meta_size;
3059 
3060 	if (meta_left < sizeof(*t)) {
3061 		btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3062 				 env->log_type_id, meta_left, sizeof(*t));
3063 		return -EINVAL;
3064 	}
3065 	meta_left -= sizeof(*t);
3066 
3067 	if (t->info & ~BTF_INFO_MASK) {
3068 		btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3069 				 env->log_type_id, t->info);
3070 		return -EINVAL;
3071 	}
3072 
3073 	if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3074 	    BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3075 		btf_verifier_log(env, "[%u] Invalid kind:%u",
3076 				 env->log_type_id, BTF_INFO_KIND(t->info));
3077 		return -EINVAL;
3078 	}
3079 
3080 	if (!btf_name_offset_valid(env->btf, t->name_off)) {
3081 		btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3082 				 env->log_type_id, t->name_off);
3083 		return -EINVAL;
3084 	}
3085 
3086 	var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3087 	if (var_meta_size < 0)
3088 		return var_meta_size;
3089 
3090 	meta_left -= var_meta_size;
3091 
3092 	return saved_meta_left - meta_left;
3093 }
3094 
3095 static int btf_check_all_metas(struct btf_verifier_env *env)
3096 {
3097 	struct btf *btf = env->btf;
3098 	struct btf_header *hdr;
3099 	void *cur, *end;
3100 
3101 	hdr = &btf->hdr;
3102 	cur = btf->nohdr_data + hdr->type_off;
3103 	end = cur + hdr->type_len;
3104 
3105 	env->log_type_id = 1;
3106 	while (cur < end) {
3107 		struct btf_type *t = cur;
3108 		s32 meta_size;
3109 
3110 		meta_size = btf_check_meta(env, t, end - cur);
3111 		if (meta_size < 0)
3112 			return meta_size;
3113 
3114 		btf_add_type(env, t);
3115 		cur += meta_size;
3116 		env->log_type_id++;
3117 	}
3118 
3119 	return 0;
3120 }
3121 
3122 static bool btf_resolve_valid(struct btf_verifier_env *env,
3123 			      const struct btf_type *t,
3124 			      u32 type_id)
3125 {
3126 	struct btf *btf = env->btf;
3127 
3128 	if (!env_type_is_resolved(env, type_id))
3129 		return false;
3130 
3131 	if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3132 		return !btf->resolved_ids[type_id] &&
3133 		       !btf->resolved_sizes[type_id];
3134 
3135 	if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3136 	    btf_type_is_var(t)) {
3137 		t = btf_type_id_resolve(btf, &type_id);
3138 		return t &&
3139 		       !btf_type_is_modifier(t) &&
3140 		       !btf_type_is_var(t) &&
3141 		       !btf_type_is_datasec(t);
3142 	}
3143 
3144 	if (btf_type_is_array(t)) {
3145 		const struct btf_array *array = btf_type_array(t);
3146 		const struct btf_type *elem_type;
3147 		u32 elem_type_id = array->type;
3148 		u32 elem_size;
3149 
3150 		elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3151 		return elem_type && !btf_type_is_modifier(elem_type) &&
3152 			(array->nelems * elem_size ==
3153 			 btf->resolved_sizes[type_id]);
3154 	}
3155 
3156 	return false;
3157 }
3158 
3159 static int btf_resolve(struct btf_verifier_env *env,
3160 		       const struct btf_type *t, u32 type_id)
3161 {
3162 	u32 save_log_type_id = env->log_type_id;
3163 	const struct resolve_vertex *v;
3164 	int err = 0;
3165 
3166 	env->resolve_mode = RESOLVE_TBD;
3167 	env_stack_push(env, t, type_id);
3168 	while (!err && (v = env_stack_peak(env))) {
3169 		env->log_type_id = v->type_id;
3170 		err = btf_type_ops(v->t)->resolve(env, v);
3171 	}
3172 
3173 	env->log_type_id = type_id;
3174 	if (err == -E2BIG) {
3175 		btf_verifier_log_type(env, t,
3176 				      "Exceeded max resolving depth:%u",
3177 				      MAX_RESOLVE_DEPTH);
3178 	} else if (err == -EEXIST) {
3179 		btf_verifier_log_type(env, t, "Loop detected");
3180 	}
3181 
3182 	/* Final sanity check */
3183 	if (!err && !btf_resolve_valid(env, t, type_id)) {
3184 		btf_verifier_log_type(env, t, "Invalid resolve state");
3185 		err = -EINVAL;
3186 	}
3187 
3188 	env->log_type_id = save_log_type_id;
3189 	return err;
3190 }
3191 
3192 static int btf_check_all_types(struct btf_verifier_env *env)
3193 {
3194 	struct btf *btf = env->btf;
3195 	u32 type_id;
3196 	int err;
3197 
3198 	err = env_resolve_init(env);
3199 	if (err)
3200 		return err;
3201 
3202 	env->phase++;
3203 	for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3204 		const struct btf_type *t = btf_type_by_id(btf, type_id);
3205 
3206 		env->log_type_id = type_id;
3207 		if (btf_type_needs_resolve(t) &&
3208 		    !env_type_is_resolved(env, type_id)) {
3209 			err = btf_resolve(env, t, type_id);
3210 			if (err)
3211 				return err;
3212 		}
3213 
3214 		if (btf_type_is_func_proto(t)) {
3215 			err = btf_func_proto_check(env, t);
3216 			if (err)
3217 				return err;
3218 		}
3219 
3220 		if (btf_type_is_func(t)) {
3221 			err = btf_func_check(env, t);
3222 			if (err)
3223 				return err;
3224 		}
3225 	}
3226 
3227 	return 0;
3228 }
3229 
3230 static int btf_parse_type_sec(struct btf_verifier_env *env)
3231 {
3232 	const struct btf_header *hdr = &env->btf->hdr;
3233 	int err;
3234 
3235 	/* Type section must align to 4 bytes */
3236 	if (hdr->type_off & (sizeof(u32) - 1)) {
3237 		btf_verifier_log(env, "Unaligned type_off");
3238 		return -EINVAL;
3239 	}
3240 
3241 	if (!hdr->type_len) {
3242 		btf_verifier_log(env, "No type found");
3243 		return -EINVAL;
3244 	}
3245 
3246 	err = btf_check_all_metas(env);
3247 	if (err)
3248 		return err;
3249 
3250 	return btf_check_all_types(env);
3251 }
3252 
3253 static int btf_parse_str_sec(struct btf_verifier_env *env)
3254 {
3255 	const struct btf_header *hdr;
3256 	struct btf *btf = env->btf;
3257 	const char *start, *end;
3258 
3259 	hdr = &btf->hdr;
3260 	start = btf->nohdr_data + hdr->str_off;
3261 	end = start + hdr->str_len;
3262 
3263 	if (end != btf->data + btf->data_size) {
3264 		btf_verifier_log(env, "String section is not at the end");
3265 		return -EINVAL;
3266 	}
3267 
3268 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3269 	    start[0] || end[-1]) {
3270 		btf_verifier_log(env, "Invalid string section");
3271 		return -EINVAL;
3272 	}
3273 
3274 	btf->strings = start;
3275 
3276 	return 0;
3277 }
3278 
3279 static const size_t btf_sec_info_offset[] = {
3280 	offsetof(struct btf_header, type_off),
3281 	offsetof(struct btf_header, str_off),
3282 };
3283 
3284 static int btf_sec_info_cmp(const void *a, const void *b)
3285 {
3286 	const struct btf_sec_info *x = a;
3287 	const struct btf_sec_info *y = b;
3288 
3289 	return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3290 }
3291 
3292 static int btf_check_sec_info(struct btf_verifier_env *env,
3293 			      u32 btf_data_size)
3294 {
3295 	struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3296 	u32 total, expected_total, i;
3297 	const struct btf_header *hdr;
3298 	const struct btf *btf;
3299 
3300 	btf = env->btf;
3301 	hdr = &btf->hdr;
3302 
3303 	/* Populate the secs from hdr */
3304 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3305 		secs[i] = *(struct btf_sec_info *)((void *)hdr +
3306 						   btf_sec_info_offset[i]);
3307 
3308 	sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3309 	     sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3310 
3311 	/* Check for gaps and overlap among sections */
3312 	total = 0;
3313 	expected_total = btf_data_size - hdr->hdr_len;
3314 	for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3315 		if (expected_total < secs[i].off) {
3316 			btf_verifier_log(env, "Invalid section offset");
3317 			return -EINVAL;
3318 		}
3319 		if (total < secs[i].off) {
3320 			/* gap */
3321 			btf_verifier_log(env, "Unsupported section found");
3322 			return -EINVAL;
3323 		}
3324 		if (total > secs[i].off) {
3325 			btf_verifier_log(env, "Section overlap found");
3326 			return -EINVAL;
3327 		}
3328 		if (expected_total - total < secs[i].len) {
3329 			btf_verifier_log(env,
3330 					 "Total section length too long");
3331 			return -EINVAL;
3332 		}
3333 		total += secs[i].len;
3334 	}
3335 
3336 	/* There is data other than hdr and known sections */
3337 	if (expected_total != total) {
3338 		btf_verifier_log(env, "Unsupported section found");
3339 		return -EINVAL;
3340 	}
3341 
3342 	return 0;
3343 }
3344 
3345 static int btf_parse_hdr(struct btf_verifier_env *env)
3346 {
3347 	u32 hdr_len, hdr_copy, btf_data_size;
3348 	const struct btf_header *hdr;
3349 	struct btf *btf;
3350 	int err;
3351 
3352 	btf = env->btf;
3353 	btf_data_size = btf->data_size;
3354 
3355 	if (btf_data_size <
3356 	    offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3357 		btf_verifier_log(env, "hdr_len not found");
3358 		return -EINVAL;
3359 	}
3360 
3361 	hdr = btf->data;
3362 	hdr_len = hdr->hdr_len;
3363 	if (btf_data_size < hdr_len) {
3364 		btf_verifier_log(env, "btf_header not found");
3365 		return -EINVAL;
3366 	}
3367 
3368 	/* Ensure the unsupported header fields are zero */
3369 	if (hdr_len > sizeof(btf->hdr)) {
3370 		u8 *expected_zero = btf->data + sizeof(btf->hdr);
3371 		u8 *end = btf->data + hdr_len;
3372 
3373 		for (; expected_zero < end; expected_zero++) {
3374 			if (*expected_zero) {
3375 				btf_verifier_log(env, "Unsupported btf_header");
3376 				return -E2BIG;
3377 			}
3378 		}
3379 	}
3380 
3381 	hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3382 	memcpy(&btf->hdr, btf->data, hdr_copy);
3383 
3384 	hdr = &btf->hdr;
3385 
3386 	btf_verifier_log_hdr(env, btf_data_size);
3387 
3388 	if (hdr->magic != BTF_MAGIC) {
3389 		btf_verifier_log(env, "Invalid magic");
3390 		return -EINVAL;
3391 	}
3392 
3393 	if (hdr->version != BTF_VERSION) {
3394 		btf_verifier_log(env, "Unsupported version");
3395 		return -ENOTSUPP;
3396 	}
3397 
3398 	if (hdr->flags) {
3399 		btf_verifier_log(env, "Unsupported flags");
3400 		return -ENOTSUPP;
3401 	}
3402 
3403 	if (btf_data_size == hdr->hdr_len) {
3404 		btf_verifier_log(env, "No data");
3405 		return -EINVAL;
3406 	}
3407 
3408 	err = btf_check_sec_info(env, btf_data_size);
3409 	if (err)
3410 		return err;
3411 
3412 	return 0;
3413 }
3414 
3415 static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3416 			     u32 log_level, char __user *log_ubuf, u32 log_size)
3417 {
3418 	struct btf_verifier_env *env = NULL;
3419 	struct bpf_verifier_log *log;
3420 	struct btf *btf = NULL;
3421 	u8 *data;
3422 	int err;
3423 
3424 	if (btf_data_size > BTF_MAX_SIZE)
3425 		return ERR_PTR(-E2BIG);
3426 
3427 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3428 	if (!env)
3429 		return ERR_PTR(-ENOMEM);
3430 
3431 	log = &env->log;
3432 	if (log_level || log_ubuf || log_size) {
3433 		/* user requested verbose verifier output
3434 		 * and supplied buffer to store the verification trace
3435 		 */
3436 		log->level = log_level;
3437 		log->ubuf = log_ubuf;
3438 		log->len_total = log_size;
3439 
3440 		/* log attributes have to be sane */
3441 		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3442 		    !log->level || !log->ubuf) {
3443 			err = -EINVAL;
3444 			goto errout;
3445 		}
3446 	}
3447 
3448 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3449 	if (!btf) {
3450 		err = -ENOMEM;
3451 		goto errout;
3452 	}
3453 	env->btf = btf;
3454 
3455 	data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3456 	if (!data) {
3457 		err = -ENOMEM;
3458 		goto errout;
3459 	}
3460 
3461 	btf->data = data;
3462 	btf->data_size = btf_data_size;
3463 
3464 	if (copy_from_user(data, btf_data, btf_data_size)) {
3465 		err = -EFAULT;
3466 		goto errout;
3467 	}
3468 
3469 	err = btf_parse_hdr(env);
3470 	if (err)
3471 		goto errout;
3472 
3473 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3474 
3475 	err = btf_parse_str_sec(env);
3476 	if (err)
3477 		goto errout;
3478 
3479 	err = btf_parse_type_sec(env);
3480 	if (err)
3481 		goto errout;
3482 
3483 	if (log->level && bpf_verifier_log_full(log)) {
3484 		err = -ENOSPC;
3485 		goto errout;
3486 	}
3487 
3488 	btf_verifier_env_free(env);
3489 	refcount_set(&btf->refcnt, 1);
3490 	return btf;
3491 
3492 errout:
3493 	btf_verifier_env_free(env);
3494 	if (btf)
3495 		btf_free(btf);
3496 	return ERR_PTR(err);
3497 }
3498 
3499 extern char __weak __start_BTF[];
3500 extern char __weak __stop_BTF[];
3501 extern struct btf *btf_vmlinux;
3502 
3503 #define BPF_MAP_TYPE(_id, _ops)
3504 #define BPF_LINK_TYPE(_id, _name)
3505 static union {
3506 	struct bpf_ctx_convert {
3507 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3508 	prog_ctx_type _id##_prog; \
3509 	kern_ctx_type _id##_kern;
3510 #include <linux/bpf_types.h>
3511 #undef BPF_PROG_TYPE
3512 	} *__t;
3513 	/* 't' is written once under lock. Read many times. */
3514 	const struct btf_type *t;
3515 } bpf_ctx_convert;
3516 enum {
3517 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3518 	__ctx_convert##_id,
3519 #include <linux/bpf_types.h>
3520 #undef BPF_PROG_TYPE
3521 	__ctx_convert_unused, /* to avoid empty enum in extreme .config */
3522 };
3523 static u8 bpf_ctx_convert_map[] = {
3524 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3525 	[_id] = __ctx_convert##_id,
3526 #include <linux/bpf_types.h>
3527 #undef BPF_PROG_TYPE
3528 	0, /* avoid empty array */
3529 };
3530 #undef BPF_MAP_TYPE
3531 #undef BPF_LINK_TYPE
3532 
3533 static const struct btf_member *
3534 btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3535 		      const struct btf_type *t, enum bpf_prog_type prog_type,
3536 		      int arg)
3537 {
3538 	const struct btf_type *conv_struct;
3539 	const struct btf_type *ctx_struct;
3540 	const struct btf_member *ctx_type;
3541 	const char *tname, *ctx_tname;
3542 
3543 	conv_struct = bpf_ctx_convert.t;
3544 	if (!conv_struct) {
3545 		bpf_log(log, "btf_vmlinux is malformed\n");
3546 		return NULL;
3547 	}
3548 	t = btf_type_by_id(btf, t->type);
3549 	while (btf_type_is_modifier(t))
3550 		t = btf_type_by_id(btf, t->type);
3551 	if (!btf_type_is_struct(t)) {
3552 		/* Only pointer to struct is supported for now.
3553 		 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
3554 		 * is not supported yet.
3555 		 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
3556 		 */
3557 		if (log->level & BPF_LOG_LEVEL)
3558 			bpf_log(log, "arg#%d type is not a struct\n", arg);
3559 		return NULL;
3560 	}
3561 	tname = btf_name_by_offset(btf, t->name_off);
3562 	if (!tname) {
3563 		bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
3564 		return NULL;
3565 	}
3566 	/* prog_type is valid bpf program type. No need for bounds check. */
3567 	ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3568 	/* ctx_struct is a pointer to prog_ctx_type in vmlinux.
3569 	 * Like 'struct __sk_buff'
3570 	 */
3571 	ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3572 	if (!ctx_struct)
3573 		/* should not happen */
3574 		return NULL;
3575 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3576 	if (!ctx_tname) {
3577 		/* should not happen */
3578 		bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3579 		return NULL;
3580 	}
3581 	/* only compare that prog's ctx type name is the same as
3582 	 * kernel expects. No need to compare field by field.
3583 	 * It's ok for bpf prog to do:
3584 	 * struct __sk_buff {};
3585 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
3586 	 * { // no fields of skb are ever used }
3587 	 */
3588 	if (strcmp(ctx_tname, tname))
3589 		return NULL;
3590 	return ctx_type;
3591 }
3592 
3593 static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
3594 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
3595 #define BPF_LINK_TYPE(_id, _name)
3596 #define BPF_MAP_TYPE(_id, _ops) \
3597 	[_id] = &_ops,
3598 #include <linux/bpf_types.h>
3599 #undef BPF_PROG_TYPE
3600 #undef BPF_LINK_TYPE
3601 #undef BPF_MAP_TYPE
3602 };
3603 
3604 static int btf_vmlinux_map_ids_init(const struct btf *btf,
3605 				    struct bpf_verifier_log *log)
3606 {
3607 	const struct bpf_map_ops *ops;
3608 	int i, btf_id;
3609 
3610 	for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
3611 		ops = btf_vmlinux_map_ops[i];
3612 		if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
3613 			continue;
3614 		if (!ops->map_btf_name || !ops->map_btf_id) {
3615 			bpf_log(log, "map type %d is misconfigured\n", i);
3616 			return -EINVAL;
3617 		}
3618 		btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
3619 					       BTF_KIND_STRUCT);
3620 		if (btf_id < 0)
3621 			return btf_id;
3622 		*ops->map_btf_id = btf_id;
3623 	}
3624 
3625 	return 0;
3626 }
3627 
3628 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3629 				     struct btf *btf,
3630 				     const struct btf_type *t,
3631 				     enum bpf_prog_type prog_type,
3632 				     int arg)
3633 {
3634 	const struct btf_member *prog_ctx_type, *kern_ctx_type;
3635 
3636 	prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
3637 	if (!prog_ctx_type)
3638 		return -ENOENT;
3639 	kern_ctx_type = prog_ctx_type + 1;
3640 	return kern_ctx_type->type;
3641 }
3642 
3643 BTF_ID_LIST(bpf_ctx_convert_btf_id)
3644 BTF_ID(struct, bpf_ctx_convert)
3645 
3646 struct btf *btf_parse_vmlinux(void)
3647 {
3648 	struct btf_verifier_env *env = NULL;
3649 	struct bpf_verifier_log *log;
3650 	struct btf *btf = NULL;
3651 	int err;
3652 
3653 	env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3654 	if (!env)
3655 		return ERR_PTR(-ENOMEM);
3656 
3657 	log = &env->log;
3658 	log->level = BPF_LOG_KERNEL;
3659 
3660 	btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3661 	if (!btf) {
3662 		err = -ENOMEM;
3663 		goto errout;
3664 	}
3665 	env->btf = btf;
3666 
3667 	btf->data = __start_BTF;
3668 	btf->data_size = __stop_BTF - __start_BTF;
3669 
3670 	err = btf_parse_hdr(env);
3671 	if (err)
3672 		goto errout;
3673 
3674 	btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3675 
3676 	err = btf_parse_str_sec(env);
3677 	if (err)
3678 		goto errout;
3679 
3680 	err = btf_check_all_metas(env);
3681 	if (err)
3682 		goto errout;
3683 
3684 	/* btf_parse_vmlinux() runs under bpf_verifier_lock */
3685 	bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
3686 
3687 	/* find bpf map structs for map_ptr access checking */
3688 	err = btf_vmlinux_map_ids_init(btf, log);
3689 	if (err < 0)
3690 		goto errout;
3691 
3692 	bpf_struct_ops_init(btf, log);
3693 
3694 	btf_verifier_env_free(env);
3695 	refcount_set(&btf->refcnt, 1);
3696 	return btf;
3697 
3698 errout:
3699 	btf_verifier_env_free(env);
3700 	if (btf) {
3701 		kvfree(btf->types);
3702 		kfree(btf);
3703 	}
3704 	return ERR_PTR(err);
3705 }
3706 
3707 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3708 {
3709 	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3710 
3711 	if (tgt_prog) {
3712 		return tgt_prog->aux->btf;
3713 	} else {
3714 		return btf_vmlinux;
3715 	}
3716 }
3717 
3718 static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
3719 {
3720 	/* t comes in already as a pointer */
3721 	t = btf_type_by_id(btf, t->type);
3722 
3723 	/* allow const */
3724 	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
3725 		t = btf_type_by_id(btf, t->type);
3726 
3727 	/* char, signed char, unsigned char */
3728 	return btf_type_is_int(t) && t->size == 1;
3729 }
3730 
3731 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3732 		    const struct bpf_prog *prog,
3733 		    struct bpf_insn_access_aux *info)
3734 {
3735 	const struct btf_type *t = prog->aux->attach_func_proto;
3736 	struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3737 	struct btf *btf = bpf_prog_get_target_btf(prog);
3738 	const char *tname = prog->aux->attach_func_name;
3739 	struct bpf_verifier_log *log = info->log;
3740 	const struct btf_param *args;
3741 	u32 nr_args, arg;
3742 	int i, ret;
3743 
3744 	if (off % 8) {
3745 		bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3746 			tname, off);
3747 		return false;
3748 	}
3749 	arg = off / 8;
3750 	args = (const struct btf_param *)(t + 1);
3751 	/* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */
3752 	nr_args = t ? btf_type_vlen(t) : 5;
3753 	if (prog->aux->attach_btf_trace) {
3754 		/* skip first 'void *__data' argument in btf_trace_##name typedef */
3755 		args++;
3756 		nr_args--;
3757 	}
3758 
3759 	if (arg > nr_args) {
3760 		bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3761 			tname, arg + 1);
3762 		return false;
3763 	}
3764 
3765 	if (arg == nr_args) {
3766 		switch (prog->expected_attach_type) {
3767 		case BPF_LSM_MAC:
3768 		case BPF_TRACE_FEXIT:
3769 			/* When LSM programs are attached to void LSM hooks
3770 			 * they use FEXIT trampolines and when attached to
3771 			 * int LSM hooks, they use MODIFY_RETURN trampolines.
3772 			 *
3773 			 * While the LSM programs are BPF_MODIFY_RETURN-like
3774 			 * the check:
3775 			 *
3776 			 *	if (ret_type != 'int')
3777 			 *		return -EINVAL;
3778 			 *
3779 			 * is _not_ done here. This is still safe as LSM hooks
3780 			 * have only void and int return types.
3781 			 */
3782 			if (!t)
3783 				return true;
3784 			t = btf_type_by_id(btf, t->type);
3785 			break;
3786 		case BPF_MODIFY_RETURN:
3787 			/* For now the BPF_MODIFY_RETURN can only be attached to
3788 			 * functions that return an int.
3789 			 */
3790 			if (!t)
3791 				return false;
3792 
3793 			t = btf_type_skip_modifiers(btf, t->type, NULL);
3794 			if (!btf_type_is_small_int(t)) {
3795 				bpf_log(log,
3796 					"ret type %s not allowed for fmod_ret\n",
3797 					btf_kind_str[BTF_INFO_KIND(t->info)]);
3798 				return false;
3799 			}
3800 			break;
3801 		default:
3802 			bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3803 				tname, arg + 1);
3804 			return false;
3805 		}
3806 	} else {
3807 		if (!t)
3808 			/* Default prog with 5 args */
3809 			return true;
3810 		t = btf_type_by_id(btf, args[arg].type);
3811 	}
3812 
3813 	/* skip modifiers */
3814 	while (btf_type_is_modifier(t))
3815 		t = btf_type_by_id(btf, t->type);
3816 	if (btf_type_is_small_int(t) || btf_type_is_enum(t))
3817 		/* accessing a scalar */
3818 		return true;
3819 	if (!btf_type_is_ptr(t)) {
3820 		bpf_log(log,
3821 			"func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3822 			tname, arg,
3823 			__btf_name_by_offset(btf, t->name_off),
3824 			btf_kind_str[BTF_INFO_KIND(t->info)]);
3825 		return false;
3826 	}
3827 
3828 	/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
3829 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
3830 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
3831 
3832 		if (ctx_arg_info->offset == off &&
3833 		    (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
3834 		     ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
3835 			info->reg_type = ctx_arg_info->reg_type;
3836 			return true;
3837 		}
3838 	}
3839 
3840 	if (t->type == 0)
3841 		/* This is a pointer to void.
3842 		 * It is the same as scalar from the verifier safety pov.
3843 		 * No further pointer walking is allowed.
3844 		 */
3845 		return true;
3846 
3847 	if (is_string_ptr(btf, t))
3848 		return true;
3849 
3850 	/* this is a pointer to another type */
3851 	for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
3852 		const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
3853 
3854 		if (ctx_arg_info->offset == off) {
3855 			info->reg_type = ctx_arg_info->reg_type;
3856 			info->btf_id = ctx_arg_info->btf_id;
3857 			return true;
3858 		}
3859 	}
3860 
3861 	info->reg_type = PTR_TO_BTF_ID;
3862 	if (tgt_prog) {
3863 		ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
3864 		if (ret > 0) {
3865 			info->btf_id = ret;
3866 			return true;
3867 		} else {
3868 			return false;
3869 		}
3870 	}
3871 
3872 	info->btf_id = t->type;
3873 	t = btf_type_by_id(btf, t->type);
3874 	/* skip modifiers */
3875 	while (btf_type_is_modifier(t)) {
3876 		info->btf_id = t->type;
3877 		t = btf_type_by_id(btf, t->type);
3878 	}
3879 	if (!btf_type_is_struct(t)) {
3880 		bpf_log(log,
3881 			"func '%s' arg%d type %s is not a struct\n",
3882 			tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3883 		return false;
3884 	}
3885 	bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3886 		tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3887 		__btf_name_by_offset(btf, t->name_off));
3888 	return true;
3889 }
3890 
3891 enum bpf_struct_walk_result {
3892 	/* < 0 error */
3893 	WALK_SCALAR = 0,
3894 	WALK_PTR,
3895 	WALK_STRUCT,
3896 };
3897 
3898 static int btf_struct_walk(struct bpf_verifier_log *log,
3899 			   const struct btf_type *t, int off, int size,
3900 			   u32 *next_btf_id)
3901 {
3902 	u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3903 	const struct btf_type *mtype, *elem_type = NULL;
3904 	const struct btf_member *member;
3905 	const char *tname, *mname;
3906 	u32 vlen, elem_id, mid;
3907 
3908 again:
3909 	tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3910 	if (!btf_type_is_struct(t)) {
3911 		bpf_log(log, "Type '%s' is not a struct\n", tname);
3912 		return -EINVAL;
3913 	}
3914 
3915 	vlen = btf_type_vlen(t);
3916 	if (off + size > t->size) {
3917 		/* If the last element is a variable size array, we may
3918 		 * need to relax the rule.
3919 		 */
3920 		struct btf_array *array_elem;
3921 
3922 		if (vlen == 0)
3923 			goto error;
3924 
3925 		member = btf_type_member(t) + vlen - 1;
3926 		mtype = btf_type_skip_modifiers(btf_vmlinux, member->type,
3927 						NULL);
3928 		if (!btf_type_is_array(mtype))
3929 			goto error;
3930 
3931 		array_elem = (struct btf_array *)(mtype + 1);
3932 		if (array_elem->nelems != 0)
3933 			goto error;
3934 
3935 		moff = btf_member_bit_offset(t, member) / 8;
3936 		if (off < moff)
3937 			goto error;
3938 
3939 		/* Only allow structure for now, can be relaxed for
3940 		 * other types later.
3941 		 */
3942 		t = btf_type_skip_modifiers(btf_vmlinux, array_elem->type,
3943 					    NULL);
3944 		if (!btf_type_is_struct(t))
3945 			goto error;
3946 
3947 		off = (off - moff) % t->size;
3948 		goto again;
3949 
3950 error:
3951 		bpf_log(log, "access beyond struct %s at off %u size %u\n",
3952 			tname, off, size);
3953 		return -EACCES;
3954 	}
3955 
3956 	for_each_member(i, t, member) {
3957 		/* offset of the field in bytes */
3958 		moff = btf_member_bit_offset(t, member) / 8;
3959 		if (off + size <= moff)
3960 			/* won't find anything, field is already too far */
3961 			break;
3962 
3963 		if (btf_member_bitfield_size(t, member)) {
3964 			u32 end_bit = btf_member_bit_offset(t, member) +
3965 				btf_member_bitfield_size(t, member);
3966 
3967 			/* off <= moff instead of off == moff because clang
3968 			 * does not generate a BTF member for anonymous
3969 			 * bitfield like the ":16" here:
3970 			 * struct {
3971 			 *	int :16;
3972 			 *	int x:8;
3973 			 * };
3974 			 */
3975 			if (off <= moff &&
3976 			    BITS_ROUNDUP_BYTES(end_bit) <= off + size)
3977 				return WALK_SCALAR;
3978 
3979 			/* off may be accessing a following member
3980 			 *
3981 			 * or
3982 			 *
3983 			 * Doing partial access at either end of this
3984 			 * bitfield.  Continue on this case also to
3985 			 * treat it as not accessing this bitfield
3986 			 * and eventually error out as field not
3987 			 * found to keep it simple.
3988 			 * It could be relaxed if there was a legit
3989 			 * partial access case later.
3990 			 */
3991 			continue;
3992 		}
3993 
3994 		/* In case of "off" is pointing to holes of a struct */
3995 		if (off < moff)
3996 			break;
3997 
3998 		/* type of the field */
3999 		mid = member->type;
4000 		mtype = btf_type_by_id(btf_vmlinux, member->type);
4001 		mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
4002 
4003 		mtype = __btf_resolve_size(btf_vmlinux, mtype, &msize,
4004 					   &elem_type, &elem_id, &total_nelems,
4005 					   &mid);
4006 		if (IS_ERR(mtype)) {
4007 			bpf_log(log, "field %s doesn't have size\n", mname);
4008 			return -EFAULT;
4009 		}
4010 
4011 		mtrue_end = moff + msize;
4012 		if (off >= mtrue_end)
4013 			/* no overlap with member, keep iterating */
4014 			continue;
4015 
4016 		if (btf_type_is_array(mtype)) {
4017 			u32 elem_idx;
4018 
4019 			/* __btf_resolve_size() above helps to
4020 			 * linearize a multi-dimensional array.
4021 			 *
4022 			 * The logic here is treating an array
4023 			 * in a struct as the following way:
4024 			 *
4025 			 * struct outer {
4026 			 *	struct inner array[2][2];
4027 			 * };
4028 			 *
4029 			 * looks like:
4030 			 *
4031 			 * struct outer {
4032 			 *	struct inner array_elem0;
4033 			 *	struct inner array_elem1;
4034 			 *	struct inner array_elem2;
4035 			 *	struct inner array_elem3;
4036 			 * };
4037 			 *
4038 			 * When accessing outer->array[1][0], it moves
4039 			 * moff to "array_elem2", set mtype to
4040 			 * "struct inner", and msize also becomes
4041 			 * sizeof(struct inner).  Then most of the
4042 			 * remaining logic will fall through without
4043 			 * caring the current member is an array or
4044 			 * not.
4045 			 *
4046 			 * Unlike mtype/msize/moff, mtrue_end does not
4047 			 * change.  The naming difference ("_true") tells
4048 			 * that it is not always corresponding to
4049 			 * the current mtype/msize/moff.
4050 			 * It is the true end of the current
4051 			 * member (i.e. array in this case).  That
4052 			 * will allow an int array to be accessed like
4053 			 * a scratch space,
4054 			 * i.e. allow access beyond the size of
4055 			 *      the array's element as long as it is
4056 			 *      within the mtrue_end boundary.
4057 			 */
4058 
4059 			/* skip empty array */
4060 			if (moff == mtrue_end)
4061 				continue;
4062 
4063 			msize /= total_nelems;
4064 			elem_idx = (off - moff) / msize;
4065 			moff += elem_idx * msize;
4066 			mtype = elem_type;
4067 			mid = elem_id;
4068 		}
4069 
4070 		/* the 'off' we're looking for is either equal to start
4071 		 * of this field or inside of this struct
4072 		 */
4073 		if (btf_type_is_struct(mtype)) {
4074 			/* our field must be inside that union or struct */
4075 			t = mtype;
4076 
4077 			/* return if the offset matches the member offset */
4078 			if (off == moff) {
4079 				*next_btf_id = mid;
4080 				return WALK_STRUCT;
4081 			}
4082 
4083 			/* adjust offset we're looking for */
4084 			off -= moff;
4085 			goto again;
4086 		}
4087 
4088 		if (btf_type_is_ptr(mtype)) {
4089 			const struct btf_type *stype;
4090 			u32 id;
4091 
4092 			if (msize != size || off != moff) {
4093 				bpf_log(log,
4094 					"cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
4095 					mname, moff, tname, off, size);
4096 				return -EACCES;
4097 			}
4098 			stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
4099 			if (btf_type_is_struct(stype)) {
4100 				*next_btf_id = id;
4101 				return WALK_PTR;
4102 			}
4103 		}
4104 
4105 		/* Allow more flexible access within an int as long as
4106 		 * it is within mtrue_end.
4107 		 * Since mtrue_end could be the end of an array,
4108 		 * that also allows using an array of int as a scratch
4109 		 * space. e.g. skb->cb[].
4110 		 */
4111 		if (off + size > mtrue_end) {
4112 			bpf_log(log,
4113 				"access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
4114 				mname, mtrue_end, tname, off, size);
4115 			return -EACCES;
4116 		}
4117 
4118 		return WALK_SCALAR;
4119 	}
4120 	bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
4121 	return -EINVAL;
4122 }
4123 
4124 int btf_struct_access(struct bpf_verifier_log *log,
4125 		      const struct btf_type *t, int off, int size,
4126 		      enum bpf_access_type atype __maybe_unused,
4127 		      u32 *next_btf_id)
4128 {
4129 	int err;
4130 	u32 id;
4131 
4132 	do {
4133 		err = btf_struct_walk(log, t, off, size, &id);
4134 
4135 		switch (err) {
4136 		case WALK_PTR:
4137 			/* If we found the pointer or scalar on t+off,
4138 			 * we're done.
4139 			 */
4140 			*next_btf_id = id;
4141 			return PTR_TO_BTF_ID;
4142 		case WALK_SCALAR:
4143 			return SCALAR_VALUE;
4144 		case WALK_STRUCT:
4145 			/* We found nested struct, so continue the search
4146 			 * by diving in it. At this point the offset is
4147 			 * aligned with the new type, so set it to 0.
4148 			 */
4149 			t = btf_type_by_id(btf_vmlinux, id);
4150 			off = 0;
4151 			break;
4152 		default:
4153 			/* It's either error or unknown return value..
4154 			 * scream and leave.
4155 			 */
4156 			if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
4157 				return -EINVAL;
4158 			return err;
4159 		}
4160 	} while (t);
4161 
4162 	return -EINVAL;
4163 }
4164 
4165 bool btf_struct_ids_match(struct bpf_verifier_log *log,
4166 			  int off, u32 id, u32 need_type_id)
4167 {
4168 	const struct btf_type *type;
4169 	int err;
4170 
4171 	/* Are we already done? */
4172 	if (need_type_id == id && off == 0)
4173 		return true;
4174 
4175 again:
4176 	type = btf_type_by_id(btf_vmlinux, id);
4177 	if (!type)
4178 		return false;
4179 	err = btf_struct_walk(log, type, off, 1, &id);
4180 	if (err != WALK_STRUCT)
4181 		return false;
4182 
4183 	/* We found nested struct object. If it matches
4184 	 * the requested ID, we're done. Otherwise let's
4185 	 * continue the search with offset 0 in the new
4186 	 * type.
4187 	 */
4188 	if (need_type_id != id) {
4189 		off = 0;
4190 		goto again;
4191 	}
4192 
4193 	return true;
4194 }
4195 
4196 int btf_resolve_helper_id(struct bpf_verifier_log *log,
4197 			  const struct bpf_func_proto *fn, int arg)
4198 {
4199 	int id;
4200 
4201 	if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
4202 		return -EINVAL;
4203 	id = fn->btf_id[arg];
4204 	if (!id || id > btf_vmlinux->nr_types)
4205 		return -EINVAL;
4206 	return id;
4207 }
4208 
4209 static int __get_type_size(struct btf *btf, u32 btf_id,
4210 			   const struct btf_type **bad_type)
4211 {
4212 	const struct btf_type *t;
4213 
4214 	if (!btf_id)
4215 		/* void */
4216 		return 0;
4217 	t = btf_type_by_id(btf, btf_id);
4218 	while (t && btf_type_is_modifier(t))
4219 		t = btf_type_by_id(btf, t->type);
4220 	if (!t) {
4221 		*bad_type = btf->types[0];
4222 		return -EINVAL;
4223 	}
4224 	if (btf_type_is_ptr(t))
4225 		/* kernel size of pointer. Not BPF's size of pointer*/
4226 		return sizeof(void *);
4227 	if (btf_type_is_int(t) || btf_type_is_enum(t))
4228 		return t->size;
4229 	*bad_type = t;
4230 	return -EINVAL;
4231 }
4232 
4233 int btf_distill_func_proto(struct bpf_verifier_log *log,
4234 			   struct btf *btf,
4235 			   const struct btf_type *func,
4236 			   const char *tname,
4237 			   struct btf_func_model *m)
4238 {
4239 	const struct btf_param *args;
4240 	const struct btf_type *t;
4241 	u32 i, nargs;
4242 	int ret;
4243 
4244 	if (!func) {
4245 		/* BTF function prototype doesn't match the verifier types.
4246 		 * Fall back to 5 u64 args.
4247 		 */
4248 		for (i = 0; i < 5; i++)
4249 			m->arg_size[i] = 8;
4250 		m->ret_size = 8;
4251 		m->nr_args = 5;
4252 		return 0;
4253 	}
4254 	args = (const struct btf_param *)(func + 1);
4255 	nargs = btf_type_vlen(func);
4256 	if (nargs >= MAX_BPF_FUNC_ARGS) {
4257 		bpf_log(log,
4258 			"The function %s has %d arguments. Too many.\n",
4259 			tname, nargs);
4260 		return -EINVAL;
4261 	}
4262 	ret = __get_type_size(btf, func->type, &t);
4263 	if (ret < 0) {
4264 		bpf_log(log,
4265 			"The function %s return type %s is unsupported.\n",
4266 			tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4267 		return -EINVAL;
4268 	}
4269 	m->ret_size = ret;
4270 
4271 	for (i = 0; i < nargs; i++) {
4272 		ret = __get_type_size(btf, args[i].type, &t);
4273 		if (ret < 0) {
4274 			bpf_log(log,
4275 				"The function %s arg%d type %s is unsupported.\n",
4276 				tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4277 			return -EINVAL;
4278 		}
4279 		m->arg_size[i] = ret;
4280 	}
4281 	m->nr_args = nargs;
4282 	return 0;
4283 }
4284 
4285 /* Compare BTFs of two functions assuming only scalars and pointers to context.
4286  * t1 points to BTF_KIND_FUNC in btf1
4287  * t2 points to BTF_KIND_FUNC in btf2
4288  * Returns:
4289  * EINVAL - function prototype mismatch
4290  * EFAULT - verifier bug
4291  * 0 - 99% match. The last 1% is validated by the verifier.
4292  */
4293 static int btf_check_func_type_match(struct bpf_verifier_log *log,
4294 				     struct btf *btf1, const struct btf_type *t1,
4295 				     struct btf *btf2, const struct btf_type *t2)
4296 {
4297 	const struct btf_param *args1, *args2;
4298 	const char *fn1, *fn2, *s1, *s2;
4299 	u32 nargs1, nargs2, i;
4300 
4301 	fn1 = btf_name_by_offset(btf1, t1->name_off);
4302 	fn2 = btf_name_by_offset(btf2, t2->name_off);
4303 
4304 	if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
4305 		bpf_log(log, "%s() is not a global function\n", fn1);
4306 		return -EINVAL;
4307 	}
4308 	if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
4309 		bpf_log(log, "%s() is not a global function\n", fn2);
4310 		return -EINVAL;
4311 	}
4312 
4313 	t1 = btf_type_by_id(btf1, t1->type);
4314 	if (!t1 || !btf_type_is_func_proto(t1))
4315 		return -EFAULT;
4316 	t2 = btf_type_by_id(btf2, t2->type);
4317 	if (!t2 || !btf_type_is_func_proto(t2))
4318 		return -EFAULT;
4319 
4320 	args1 = (const struct btf_param *)(t1 + 1);
4321 	nargs1 = btf_type_vlen(t1);
4322 	args2 = (const struct btf_param *)(t2 + 1);
4323 	nargs2 = btf_type_vlen(t2);
4324 
4325 	if (nargs1 != nargs2) {
4326 		bpf_log(log, "%s() has %d args while %s() has %d args\n",
4327 			fn1, nargs1, fn2, nargs2);
4328 		return -EINVAL;
4329 	}
4330 
4331 	t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4332 	t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4333 	if (t1->info != t2->info) {
4334 		bpf_log(log,
4335 			"Return type %s of %s() doesn't match type %s of %s()\n",
4336 			btf_type_str(t1), fn1,
4337 			btf_type_str(t2), fn2);
4338 		return -EINVAL;
4339 	}
4340 
4341 	for (i = 0; i < nargs1; i++) {
4342 		t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
4343 		t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
4344 
4345 		if (t1->info != t2->info) {
4346 			bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
4347 				i, fn1, btf_type_str(t1),
4348 				fn2, btf_type_str(t2));
4349 			return -EINVAL;
4350 		}
4351 		if (btf_type_has_size(t1) && t1->size != t2->size) {
4352 			bpf_log(log,
4353 				"arg%d in %s() has size %d while %s() has %d\n",
4354 				i, fn1, t1->size,
4355 				fn2, t2->size);
4356 			return -EINVAL;
4357 		}
4358 
4359 		/* global functions are validated with scalars and pointers
4360 		 * to context only. And only global functions can be replaced.
4361 		 * Hence type check only those types.
4362 		 */
4363 		if (btf_type_is_int(t1) || btf_type_is_enum(t1))
4364 			continue;
4365 		if (!btf_type_is_ptr(t1)) {
4366 			bpf_log(log,
4367 				"arg%d in %s() has unrecognized type\n",
4368 				i, fn1);
4369 			return -EINVAL;
4370 		}
4371 		t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4372 		t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4373 		if (!btf_type_is_struct(t1)) {
4374 			bpf_log(log,
4375 				"arg%d in %s() is not a pointer to context\n",
4376 				i, fn1);
4377 			return -EINVAL;
4378 		}
4379 		if (!btf_type_is_struct(t2)) {
4380 			bpf_log(log,
4381 				"arg%d in %s() is not a pointer to context\n",
4382 				i, fn2);
4383 			return -EINVAL;
4384 		}
4385 		/* This is an optional check to make program writing easier.
4386 		 * Compare names of structs and report an error to the user.
4387 		 * btf_prepare_func_args() already checked that t2 struct
4388 		 * is a context type. btf_prepare_func_args() will check
4389 		 * later that t1 struct is a context type as well.
4390 		 */
4391 		s1 = btf_name_by_offset(btf1, t1->name_off);
4392 		s2 = btf_name_by_offset(btf2, t2->name_off);
4393 		if (strcmp(s1, s2)) {
4394 			bpf_log(log,
4395 				"arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
4396 				i, fn1, s1, fn2, s2);
4397 			return -EINVAL;
4398 		}
4399 	}
4400 	return 0;
4401 }
4402 
4403 /* Compare BTFs of given program with BTF of target program */
4404 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
4405 			 struct btf *btf2, const struct btf_type *t2)
4406 {
4407 	struct btf *btf1 = prog->aux->btf;
4408 	const struct btf_type *t1;
4409 	u32 btf_id = 0;
4410 
4411 	if (!prog->aux->func_info) {
4412 		bpf_log(&env->log, "Program extension requires BTF\n");
4413 		return -EINVAL;
4414 	}
4415 
4416 	btf_id = prog->aux->func_info[0].type_id;
4417 	if (!btf_id)
4418 		return -EFAULT;
4419 
4420 	t1 = btf_type_by_id(btf1, btf_id);
4421 	if (!t1 || !btf_type_is_func(t1))
4422 		return -EFAULT;
4423 
4424 	return btf_check_func_type_match(&env->log, btf1, t1, btf2, t2);
4425 }
4426 
4427 /* Compare BTF of a function with given bpf_reg_state.
4428  * Returns:
4429  * EFAULT - there is a verifier bug. Abort verification.
4430  * EINVAL - there is a type mismatch or BTF is not available.
4431  * 0 - BTF matches with what bpf_reg_state expects.
4432  * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
4433  */
4434 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
4435 			     struct bpf_reg_state *reg)
4436 {
4437 	struct bpf_verifier_log *log = &env->log;
4438 	struct bpf_prog *prog = env->prog;
4439 	struct btf *btf = prog->aux->btf;
4440 	const struct btf_param *args;
4441 	const struct btf_type *t;
4442 	u32 i, nargs, btf_id;
4443 	const char *tname;
4444 
4445 	if (!prog->aux->func_info)
4446 		return -EINVAL;
4447 
4448 	btf_id = prog->aux->func_info[subprog].type_id;
4449 	if (!btf_id)
4450 		return -EFAULT;
4451 
4452 	if (prog->aux->func_info_aux[subprog].unreliable)
4453 		return -EINVAL;
4454 
4455 	t = btf_type_by_id(btf, btf_id);
4456 	if (!t || !btf_type_is_func(t)) {
4457 		/* These checks were already done by the verifier while loading
4458 		 * struct bpf_func_info
4459 		 */
4460 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4461 			subprog);
4462 		return -EFAULT;
4463 	}
4464 	tname = btf_name_by_offset(btf, t->name_off);
4465 
4466 	t = btf_type_by_id(btf, t->type);
4467 	if (!t || !btf_type_is_func_proto(t)) {
4468 		bpf_log(log, "Invalid BTF of func %s\n", tname);
4469 		return -EFAULT;
4470 	}
4471 	args = (const struct btf_param *)(t + 1);
4472 	nargs = btf_type_vlen(t);
4473 	if (nargs > 5) {
4474 		bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4475 		goto out;
4476 	}
4477 	/* check that BTF function arguments match actual types that the
4478 	 * verifier sees.
4479 	 */
4480 	for (i = 0; i < nargs; i++) {
4481 		t = btf_type_by_id(btf, args[i].type);
4482 		while (btf_type_is_modifier(t))
4483 			t = btf_type_by_id(btf, t->type);
4484 		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4485 			if (reg[i + 1].type == SCALAR_VALUE)
4486 				continue;
4487 			bpf_log(log, "R%d is not a scalar\n", i + 1);
4488 			goto out;
4489 		}
4490 		if (btf_type_is_ptr(t)) {
4491 			if (reg[i + 1].type == SCALAR_VALUE) {
4492 				bpf_log(log, "R%d is not a pointer\n", i + 1);
4493 				goto out;
4494 			}
4495 			/* If function expects ctx type in BTF check that caller
4496 			 * is passing PTR_TO_CTX.
4497 			 */
4498 			if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
4499 				if (reg[i + 1].type != PTR_TO_CTX) {
4500 					bpf_log(log,
4501 						"arg#%d expected pointer to ctx, but got %s\n",
4502 						i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4503 					goto out;
4504 				}
4505 				if (check_ctx_reg(env, &reg[i + 1], i + 1))
4506 					goto out;
4507 				continue;
4508 			}
4509 		}
4510 		bpf_log(log, "Unrecognized arg#%d type %s\n",
4511 			i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4512 		goto out;
4513 	}
4514 	return 0;
4515 out:
4516 	/* Compiler optimizations can remove arguments from static functions
4517 	 * or mismatched type can be passed into a global function.
4518 	 * In such cases mark the function as unreliable from BTF point of view.
4519 	 */
4520 	prog->aux->func_info_aux[subprog].unreliable = true;
4521 	return -EINVAL;
4522 }
4523 
4524 /* Convert BTF of a function into bpf_reg_state if possible
4525  * Returns:
4526  * EFAULT - there is a verifier bug. Abort verification.
4527  * EINVAL - cannot convert BTF.
4528  * 0 - Successfully converted BTF into bpf_reg_state
4529  * (either PTR_TO_CTX or SCALAR_VALUE).
4530  */
4531 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
4532 			  struct bpf_reg_state *reg)
4533 {
4534 	struct bpf_verifier_log *log = &env->log;
4535 	struct bpf_prog *prog = env->prog;
4536 	enum bpf_prog_type prog_type = prog->type;
4537 	struct btf *btf = prog->aux->btf;
4538 	const struct btf_param *args;
4539 	const struct btf_type *t;
4540 	u32 i, nargs, btf_id;
4541 	const char *tname;
4542 
4543 	if (!prog->aux->func_info ||
4544 	    prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
4545 		bpf_log(log, "Verifier bug\n");
4546 		return -EFAULT;
4547 	}
4548 
4549 	btf_id = prog->aux->func_info[subprog].type_id;
4550 	if (!btf_id) {
4551 		bpf_log(log, "Global functions need valid BTF\n");
4552 		return -EFAULT;
4553 	}
4554 
4555 	t = btf_type_by_id(btf, btf_id);
4556 	if (!t || !btf_type_is_func(t)) {
4557 		/* These checks were already done by the verifier while loading
4558 		 * struct bpf_func_info
4559 		 */
4560 		bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4561 			subprog);
4562 		return -EFAULT;
4563 	}
4564 	tname = btf_name_by_offset(btf, t->name_off);
4565 
4566 	if (log->level & BPF_LOG_LEVEL)
4567 		bpf_log(log, "Validating %s() func#%d...\n",
4568 			tname, subprog);
4569 
4570 	if (prog->aux->func_info_aux[subprog].unreliable) {
4571 		bpf_log(log, "Verifier bug in function %s()\n", tname);
4572 		return -EFAULT;
4573 	}
4574 	if (prog_type == BPF_PROG_TYPE_EXT)
4575 		prog_type = prog->aux->linked_prog->type;
4576 
4577 	t = btf_type_by_id(btf, t->type);
4578 	if (!t || !btf_type_is_func_proto(t)) {
4579 		bpf_log(log, "Invalid type of function %s()\n", tname);
4580 		return -EFAULT;
4581 	}
4582 	args = (const struct btf_param *)(t + 1);
4583 	nargs = btf_type_vlen(t);
4584 	if (nargs > 5) {
4585 		bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n",
4586 			tname, nargs);
4587 		return -EINVAL;
4588 	}
4589 	/* check that function returns int */
4590 	t = btf_type_by_id(btf, t->type);
4591 	while (btf_type_is_modifier(t))
4592 		t = btf_type_by_id(btf, t->type);
4593 	if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
4594 		bpf_log(log,
4595 			"Global function %s() doesn't return scalar. Only those are supported.\n",
4596 			tname);
4597 		return -EINVAL;
4598 	}
4599 	/* Convert BTF function arguments into verifier types.
4600 	 * Only PTR_TO_CTX and SCALAR are supported atm.
4601 	 */
4602 	for (i = 0; i < nargs; i++) {
4603 		t = btf_type_by_id(btf, args[i].type);
4604 		while (btf_type_is_modifier(t))
4605 			t = btf_type_by_id(btf, t->type);
4606 		if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4607 			reg[i + 1].type = SCALAR_VALUE;
4608 			continue;
4609 		}
4610 		if (btf_type_is_ptr(t) &&
4611 		    btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
4612 			reg[i + 1].type = PTR_TO_CTX;
4613 			continue;
4614 		}
4615 		bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
4616 			i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
4617 		return -EINVAL;
4618 	}
4619 	return 0;
4620 }
4621 
4622 void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4623 		       struct seq_file *m)
4624 {
4625 	const struct btf_type *t = btf_type_by_id(btf, type_id);
4626 
4627 	btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4628 }
4629 
4630 #ifdef CONFIG_PROC_FS
4631 static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4632 {
4633 	const struct btf *btf = filp->private_data;
4634 
4635 	seq_printf(m, "btf_id:\t%u\n", btf->id);
4636 }
4637 #endif
4638 
4639 static int btf_release(struct inode *inode, struct file *filp)
4640 {
4641 	btf_put(filp->private_data);
4642 	return 0;
4643 }
4644 
4645 const struct file_operations btf_fops = {
4646 #ifdef CONFIG_PROC_FS
4647 	.show_fdinfo	= bpf_btf_show_fdinfo,
4648 #endif
4649 	.release	= btf_release,
4650 };
4651 
4652 static int __btf_new_fd(struct btf *btf)
4653 {
4654 	return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4655 }
4656 
4657 int btf_new_fd(const union bpf_attr *attr)
4658 {
4659 	struct btf *btf;
4660 	int ret;
4661 
4662 	btf = btf_parse(u64_to_user_ptr(attr->btf),
4663 			attr->btf_size, attr->btf_log_level,
4664 			u64_to_user_ptr(attr->btf_log_buf),
4665 			attr->btf_log_size);
4666 	if (IS_ERR(btf))
4667 		return PTR_ERR(btf);
4668 
4669 	ret = btf_alloc_id(btf);
4670 	if (ret) {
4671 		btf_free(btf);
4672 		return ret;
4673 	}
4674 
4675 	/*
4676 	 * The BTF ID is published to the userspace.
4677 	 * All BTF free must go through call_rcu() from
4678 	 * now on (i.e. free by calling btf_put()).
4679 	 */
4680 
4681 	ret = __btf_new_fd(btf);
4682 	if (ret < 0)
4683 		btf_put(btf);
4684 
4685 	return ret;
4686 }
4687 
4688 struct btf *btf_get_by_fd(int fd)
4689 {
4690 	struct btf *btf;
4691 	struct fd f;
4692 
4693 	f = fdget(fd);
4694 
4695 	if (!f.file)
4696 		return ERR_PTR(-EBADF);
4697 
4698 	if (f.file->f_op != &btf_fops) {
4699 		fdput(f);
4700 		return ERR_PTR(-EINVAL);
4701 	}
4702 
4703 	btf = f.file->private_data;
4704 	refcount_inc(&btf->refcnt);
4705 	fdput(f);
4706 
4707 	return btf;
4708 }
4709 
4710 int btf_get_info_by_fd(const struct btf *btf,
4711 		       const union bpf_attr *attr,
4712 		       union bpf_attr __user *uattr)
4713 {
4714 	struct bpf_btf_info __user *uinfo;
4715 	struct bpf_btf_info info;
4716 	u32 info_copy, btf_copy;
4717 	void __user *ubtf;
4718 	u32 uinfo_len;
4719 
4720 	uinfo = u64_to_user_ptr(attr->info.info);
4721 	uinfo_len = attr->info.info_len;
4722 
4723 	info_copy = min_t(u32, uinfo_len, sizeof(info));
4724 	memset(&info, 0, sizeof(info));
4725 	if (copy_from_user(&info, uinfo, info_copy))
4726 		return -EFAULT;
4727 
4728 	info.id = btf->id;
4729 	ubtf = u64_to_user_ptr(info.btf);
4730 	btf_copy = min_t(u32, btf->data_size, info.btf_size);
4731 	if (copy_to_user(ubtf, btf->data, btf_copy))
4732 		return -EFAULT;
4733 	info.btf_size = btf->data_size;
4734 
4735 	if (copy_to_user(uinfo, &info, info_copy) ||
4736 	    put_user(info_copy, &uattr->info.info_len))
4737 		return -EFAULT;
4738 
4739 	return 0;
4740 }
4741 
4742 int btf_get_fd_by_id(u32 id)
4743 {
4744 	struct btf *btf;
4745 	int fd;
4746 
4747 	rcu_read_lock();
4748 	btf = idr_find(&btf_idr, id);
4749 	if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4750 		btf = ERR_PTR(-ENOENT);
4751 	rcu_read_unlock();
4752 
4753 	if (IS_ERR(btf))
4754 		return PTR_ERR(btf);
4755 
4756 	fd = __btf_new_fd(btf);
4757 	if (fd < 0)
4758 		btf_put(btf);
4759 
4760 	return fd;
4761 }
4762 
4763 u32 btf_id(const struct btf *btf)
4764 {
4765 	return btf->id;
4766 }
4767 
4768 static int btf_id_cmp_func(const void *a, const void *b)
4769 {
4770 	const int *pa = a, *pb = b;
4771 
4772 	return *pa - *pb;
4773 }
4774 
4775 bool btf_id_set_contains(struct btf_id_set *set, u32 id)
4776 {
4777 	return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
4778 }
4779