xref: /openbmc/linux/kernel/bpf/helpers.c (revision 31a254f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 
26 #include "../../lib/kstrtox.h"
27 
28 /* If kernel subsystem is allowing eBPF programs to call this function,
29  * inside its own verifier_ops->get_func_proto() callback it should return
30  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
31  *
32  * Different map implementations will rely on rcu in map methods
33  * lookup/update/delete, therefore eBPF programs must run under rcu lock
34  * if program is allowed to access maps, so check rcu_read_lock_held() or
35  * rcu_read_lock_trace_held() in all three functions.
36  */
37 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
38 {
39 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
40 		     !rcu_read_lock_bh_held());
41 	return (unsigned long) map->ops->map_lookup_elem(map, key);
42 }
43 
44 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
45 	.func		= bpf_map_lookup_elem,
46 	.gpl_only	= false,
47 	.pkt_access	= true,
48 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
49 	.arg1_type	= ARG_CONST_MAP_PTR,
50 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
51 };
52 
53 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
54 	   void *, value, u64, flags)
55 {
56 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
57 		     !rcu_read_lock_bh_held());
58 	return map->ops->map_update_elem(map, key, value, flags);
59 }
60 
61 const struct bpf_func_proto bpf_map_update_elem_proto = {
62 	.func		= bpf_map_update_elem,
63 	.gpl_only	= false,
64 	.pkt_access	= true,
65 	.ret_type	= RET_INTEGER,
66 	.arg1_type	= ARG_CONST_MAP_PTR,
67 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
68 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
69 	.arg4_type	= ARG_ANYTHING,
70 };
71 
72 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
73 {
74 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
75 		     !rcu_read_lock_bh_held());
76 	return map->ops->map_delete_elem(map, key);
77 }
78 
79 const struct bpf_func_proto bpf_map_delete_elem_proto = {
80 	.func		= bpf_map_delete_elem,
81 	.gpl_only	= false,
82 	.pkt_access	= true,
83 	.ret_type	= RET_INTEGER,
84 	.arg1_type	= ARG_CONST_MAP_PTR,
85 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
86 };
87 
88 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
89 {
90 	return map->ops->map_push_elem(map, value, flags);
91 }
92 
93 const struct bpf_func_proto bpf_map_push_elem_proto = {
94 	.func		= bpf_map_push_elem,
95 	.gpl_only	= false,
96 	.pkt_access	= true,
97 	.ret_type	= RET_INTEGER,
98 	.arg1_type	= ARG_CONST_MAP_PTR,
99 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
100 	.arg3_type	= ARG_ANYTHING,
101 };
102 
103 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
104 {
105 	return map->ops->map_pop_elem(map, value);
106 }
107 
108 const struct bpf_func_proto bpf_map_pop_elem_proto = {
109 	.func		= bpf_map_pop_elem,
110 	.gpl_only	= false,
111 	.ret_type	= RET_INTEGER,
112 	.arg1_type	= ARG_CONST_MAP_PTR,
113 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
114 };
115 
116 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
117 {
118 	return map->ops->map_peek_elem(map, value);
119 }
120 
121 const struct bpf_func_proto bpf_map_peek_elem_proto = {
122 	.func		= bpf_map_peek_elem,
123 	.gpl_only	= false,
124 	.ret_type	= RET_INTEGER,
125 	.arg1_type	= ARG_CONST_MAP_PTR,
126 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
127 };
128 
129 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
130 {
131 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
132 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
133 }
134 
135 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
136 	.func		= bpf_map_lookup_percpu_elem,
137 	.gpl_only	= false,
138 	.pkt_access	= true,
139 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
140 	.arg1_type	= ARG_CONST_MAP_PTR,
141 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
142 	.arg3_type	= ARG_ANYTHING,
143 };
144 
145 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
146 	.func		= bpf_user_rnd_u32,
147 	.gpl_only	= false,
148 	.ret_type	= RET_INTEGER,
149 };
150 
151 BPF_CALL_0(bpf_get_smp_processor_id)
152 {
153 	return smp_processor_id();
154 }
155 
156 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
157 	.func		= bpf_get_smp_processor_id,
158 	.gpl_only	= false,
159 	.ret_type	= RET_INTEGER,
160 };
161 
162 BPF_CALL_0(bpf_get_numa_node_id)
163 {
164 	return numa_node_id();
165 }
166 
167 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
168 	.func		= bpf_get_numa_node_id,
169 	.gpl_only	= false,
170 	.ret_type	= RET_INTEGER,
171 };
172 
173 BPF_CALL_0(bpf_ktime_get_ns)
174 {
175 	/* NMI safe access to clock monotonic */
176 	return ktime_get_mono_fast_ns();
177 }
178 
179 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
180 	.func		= bpf_ktime_get_ns,
181 	.gpl_only	= false,
182 	.ret_type	= RET_INTEGER,
183 };
184 
185 BPF_CALL_0(bpf_ktime_get_boot_ns)
186 {
187 	/* NMI safe access to clock boottime */
188 	return ktime_get_boot_fast_ns();
189 }
190 
191 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
192 	.func		= bpf_ktime_get_boot_ns,
193 	.gpl_only	= false,
194 	.ret_type	= RET_INTEGER,
195 };
196 
197 BPF_CALL_0(bpf_ktime_get_coarse_ns)
198 {
199 	return ktime_get_coarse_ns();
200 }
201 
202 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
203 	.func		= bpf_ktime_get_coarse_ns,
204 	.gpl_only	= false,
205 	.ret_type	= RET_INTEGER,
206 };
207 
208 BPF_CALL_0(bpf_ktime_get_tai_ns)
209 {
210 	/* NMI safe access to clock tai */
211 	return ktime_get_tai_fast_ns();
212 }
213 
214 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
215 	.func		= bpf_ktime_get_tai_ns,
216 	.gpl_only	= false,
217 	.ret_type	= RET_INTEGER,
218 };
219 
220 BPF_CALL_0(bpf_get_current_pid_tgid)
221 {
222 	struct task_struct *task = current;
223 
224 	if (unlikely(!task))
225 		return -EINVAL;
226 
227 	return (u64) task->tgid << 32 | task->pid;
228 }
229 
230 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
231 	.func		= bpf_get_current_pid_tgid,
232 	.gpl_only	= false,
233 	.ret_type	= RET_INTEGER,
234 };
235 
236 BPF_CALL_0(bpf_get_current_uid_gid)
237 {
238 	struct task_struct *task = current;
239 	kuid_t uid;
240 	kgid_t gid;
241 
242 	if (unlikely(!task))
243 		return -EINVAL;
244 
245 	current_uid_gid(&uid, &gid);
246 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
247 		     from_kuid(&init_user_ns, uid);
248 }
249 
250 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
251 	.func		= bpf_get_current_uid_gid,
252 	.gpl_only	= false,
253 	.ret_type	= RET_INTEGER,
254 };
255 
256 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
257 {
258 	struct task_struct *task = current;
259 
260 	if (unlikely(!task))
261 		goto err_clear;
262 
263 	/* Verifier guarantees that size > 0 */
264 	strscpy_pad(buf, task->comm, size);
265 	return 0;
266 err_clear:
267 	memset(buf, 0, size);
268 	return -EINVAL;
269 }
270 
271 const struct bpf_func_proto bpf_get_current_comm_proto = {
272 	.func		= bpf_get_current_comm,
273 	.gpl_only	= false,
274 	.ret_type	= RET_INTEGER,
275 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
276 	.arg2_type	= ARG_CONST_SIZE,
277 };
278 
279 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
280 
281 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
282 {
283 	arch_spinlock_t *l = (void *)lock;
284 	union {
285 		__u32 val;
286 		arch_spinlock_t lock;
287 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
288 
289 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
290 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
291 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
292 	preempt_disable();
293 	arch_spin_lock(l);
294 }
295 
296 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
297 {
298 	arch_spinlock_t *l = (void *)lock;
299 
300 	arch_spin_unlock(l);
301 	preempt_enable();
302 }
303 
304 #else
305 
306 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
307 {
308 	atomic_t *l = (void *)lock;
309 
310 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
311 	do {
312 		atomic_cond_read_relaxed(l, !VAL);
313 	} while (atomic_xchg(l, 1));
314 }
315 
316 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
317 {
318 	atomic_t *l = (void *)lock;
319 
320 	atomic_set_release(l, 0);
321 }
322 
323 #endif
324 
325 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
326 
327 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
328 {
329 	unsigned long flags;
330 
331 	local_irq_save(flags);
332 	__bpf_spin_lock(lock);
333 	__this_cpu_write(irqsave_flags, flags);
334 }
335 
336 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
337 {
338 	__bpf_spin_lock_irqsave(lock);
339 	return 0;
340 }
341 
342 const struct bpf_func_proto bpf_spin_lock_proto = {
343 	.func		= bpf_spin_lock,
344 	.gpl_only	= false,
345 	.ret_type	= RET_VOID,
346 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
347 	.arg1_btf_id    = BPF_PTR_POISON,
348 };
349 
350 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
351 {
352 	unsigned long flags;
353 
354 	flags = __this_cpu_read(irqsave_flags);
355 	__bpf_spin_unlock(lock);
356 	local_irq_restore(flags);
357 }
358 
359 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
360 {
361 	__bpf_spin_unlock_irqrestore(lock);
362 	return 0;
363 }
364 
365 const struct bpf_func_proto bpf_spin_unlock_proto = {
366 	.func		= bpf_spin_unlock,
367 	.gpl_only	= false,
368 	.ret_type	= RET_VOID,
369 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
370 	.arg1_btf_id    = BPF_PTR_POISON,
371 };
372 
373 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
374 			   bool lock_src)
375 {
376 	struct bpf_spin_lock *lock;
377 
378 	if (lock_src)
379 		lock = src + map->record->spin_lock_off;
380 	else
381 		lock = dst + map->record->spin_lock_off;
382 	preempt_disable();
383 	__bpf_spin_lock_irqsave(lock);
384 	copy_map_value(map, dst, src);
385 	__bpf_spin_unlock_irqrestore(lock);
386 	preempt_enable();
387 }
388 
389 BPF_CALL_0(bpf_jiffies64)
390 {
391 	return get_jiffies_64();
392 }
393 
394 const struct bpf_func_proto bpf_jiffies64_proto = {
395 	.func		= bpf_jiffies64,
396 	.gpl_only	= false,
397 	.ret_type	= RET_INTEGER,
398 };
399 
400 #ifdef CONFIG_CGROUPS
401 BPF_CALL_0(bpf_get_current_cgroup_id)
402 {
403 	struct cgroup *cgrp;
404 	u64 cgrp_id;
405 
406 	rcu_read_lock();
407 	cgrp = task_dfl_cgroup(current);
408 	cgrp_id = cgroup_id(cgrp);
409 	rcu_read_unlock();
410 
411 	return cgrp_id;
412 }
413 
414 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
415 	.func		= bpf_get_current_cgroup_id,
416 	.gpl_only	= false,
417 	.ret_type	= RET_INTEGER,
418 };
419 
420 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
421 {
422 	struct cgroup *cgrp;
423 	struct cgroup *ancestor;
424 	u64 cgrp_id;
425 
426 	rcu_read_lock();
427 	cgrp = task_dfl_cgroup(current);
428 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
429 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
430 	rcu_read_unlock();
431 
432 	return cgrp_id;
433 }
434 
435 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
436 	.func		= bpf_get_current_ancestor_cgroup_id,
437 	.gpl_only	= false,
438 	.ret_type	= RET_INTEGER,
439 	.arg1_type	= ARG_ANYTHING,
440 };
441 #endif /* CONFIG_CGROUPS */
442 
443 #define BPF_STRTOX_BASE_MASK 0x1F
444 
445 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
446 			  unsigned long long *res, bool *is_negative)
447 {
448 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
449 	const char *cur_buf = buf;
450 	size_t cur_len = buf_len;
451 	unsigned int consumed;
452 	size_t val_len;
453 	char str[64];
454 
455 	if (!buf || !buf_len || !res || !is_negative)
456 		return -EINVAL;
457 
458 	if (base != 0 && base != 8 && base != 10 && base != 16)
459 		return -EINVAL;
460 
461 	if (flags & ~BPF_STRTOX_BASE_MASK)
462 		return -EINVAL;
463 
464 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
465 		++cur_buf;
466 
467 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
468 	if (*is_negative)
469 		++cur_buf;
470 
471 	consumed = cur_buf - buf;
472 	cur_len -= consumed;
473 	if (!cur_len)
474 		return -EINVAL;
475 
476 	cur_len = min(cur_len, sizeof(str) - 1);
477 	memcpy(str, cur_buf, cur_len);
478 	str[cur_len] = '\0';
479 	cur_buf = str;
480 
481 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
482 	val_len = _parse_integer(cur_buf, base, res);
483 
484 	if (val_len & KSTRTOX_OVERFLOW)
485 		return -ERANGE;
486 
487 	if (val_len == 0)
488 		return -EINVAL;
489 
490 	cur_buf += val_len;
491 	consumed += cur_buf - str;
492 
493 	return consumed;
494 }
495 
496 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
497 			 long long *res)
498 {
499 	unsigned long long _res;
500 	bool is_negative;
501 	int err;
502 
503 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
504 	if (err < 0)
505 		return err;
506 	if (is_negative) {
507 		if ((long long)-_res > 0)
508 			return -ERANGE;
509 		*res = -_res;
510 	} else {
511 		if ((long long)_res < 0)
512 			return -ERANGE;
513 		*res = _res;
514 	}
515 	return err;
516 }
517 
518 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
519 	   long *, res)
520 {
521 	long long _res;
522 	int err;
523 
524 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
525 	if (err < 0)
526 		return err;
527 	if (_res != (long)_res)
528 		return -ERANGE;
529 	*res = _res;
530 	return err;
531 }
532 
533 const struct bpf_func_proto bpf_strtol_proto = {
534 	.func		= bpf_strtol,
535 	.gpl_only	= false,
536 	.ret_type	= RET_INTEGER,
537 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
538 	.arg2_type	= ARG_CONST_SIZE,
539 	.arg3_type	= ARG_ANYTHING,
540 	.arg4_type	= ARG_PTR_TO_LONG,
541 };
542 
543 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
544 	   unsigned long *, res)
545 {
546 	unsigned long long _res;
547 	bool is_negative;
548 	int err;
549 
550 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
551 	if (err < 0)
552 		return err;
553 	if (is_negative)
554 		return -EINVAL;
555 	if (_res != (unsigned long)_res)
556 		return -ERANGE;
557 	*res = _res;
558 	return err;
559 }
560 
561 const struct bpf_func_proto bpf_strtoul_proto = {
562 	.func		= bpf_strtoul,
563 	.gpl_only	= false,
564 	.ret_type	= RET_INTEGER,
565 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
566 	.arg2_type	= ARG_CONST_SIZE,
567 	.arg3_type	= ARG_ANYTHING,
568 	.arg4_type	= ARG_PTR_TO_LONG,
569 };
570 
571 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
572 {
573 	return strncmp(s1, s2, s1_sz);
574 }
575 
576 static const struct bpf_func_proto bpf_strncmp_proto = {
577 	.func		= bpf_strncmp,
578 	.gpl_only	= false,
579 	.ret_type	= RET_INTEGER,
580 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
581 	.arg2_type	= ARG_CONST_SIZE,
582 	.arg3_type	= ARG_PTR_TO_CONST_STR,
583 };
584 
585 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
586 	   struct bpf_pidns_info *, nsdata, u32, size)
587 {
588 	struct task_struct *task = current;
589 	struct pid_namespace *pidns;
590 	int err = -EINVAL;
591 
592 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
593 		goto clear;
594 
595 	if (unlikely((u64)(dev_t)dev != dev))
596 		goto clear;
597 
598 	if (unlikely(!task))
599 		goto clear;
600 
601 	pidns = task_active_pid_ns(task);
602 	if (unlikely(!pidns)) {
603 		err = -ENOENT;
604 		goto clear;
605 	}
606 
607 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
608 		goto clear;
609 
610 	nsdata->pid = task_pid_nr_ns(task, pidns);
611 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
612 	return 0;
613 clear:
614 	memset((void *)nsdata, 0, (size_t) size);
615 	return err;
616 }
617 
618 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
619 	.func		= bpf_get_ns_current_pid_tgid,
620 	.gpl_only	= false,
621 	.ret_type	= RET_INTEGER,
622 	.arg1_type	= ARG_ANYTHING,
623 	.arg2_type	= ARG_ANYTHING,
624 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
625 	.arg4_type      = ARG_CONST_SIZE,
626 };
627 
628 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
629 	.func		= bpf_get_raw_cpu_id,
630 	.gpl_only	= false,
631 	.ret_type	= RET_INTEGER,
632 };
633 
634 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
635 	   u64, flags, void *, data, u64, size)
636 {
637 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
638 		return -EINVAL;
639 
640 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
641 }
642 
643 const struct bpf_func_proto bpf_event_output_data_proto =  {
644 	.func		= bpf_event_output_data,
645 	.gpl_only       = true,
646 	.ret_type       = RET_INTEGER,
647 	.arg1_type      = ARG_PTR_TO_CTX,
648 	.arg2_type      = ARG_CONST_MAP_PTR,
649 	.arg3_type      = ARG_ANYTHING,
650 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
651 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
652 };
653 
654 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
655 	   const void __user *, user_ptr)
656 {
657 	int ret = copy_from_user(dst, user_ptr, size);
658 
659 	if (unlikely(ret)) {
660 		memset(dst, 0, size);
661 		ret = -EFAULT;
662 	}
663 
664 	return ret;
665 }
666 
667 const struct bpf_func_proto bpf_copy_from_user_proto = {
668 	.func		= bpf_copy_from_user,
669 	.gpl_only	= false,
670 	.might_sleep	= true,
671 	.ret_type	= RET_INTEGER,
672 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
673 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
674 	.arg3_type	= ARG_ANYTHING,
675 };
676 
677 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
678 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
679 {
680 	int ret;
681 
682 	/* flags is not used yet */
683 	if (unlikely(flags))
684 		return -EINVAL;
685 
686 	if (unlikely(!size))
687 		return 0;
688 
689 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
690 	if (ret == size)
691 		return 0;
692 
693 	memset(dst, 0, size);
694 	/* Return -EFAULT for partial read */
695 	return ret < 0 ? ret : -EFAULT;
696 }
697 
698 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
699 	.func		= bpf_copy_from_user_task,
700 	.gpl_only	= true,
701 	.might_sleep	= true,
702 	.ret_type	= RET_INTEGER,
703 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
704 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
705 	.arg3_type	= ARG_ANYTHING,
706 	.arg4_type	= ARG_PTR_TO_BTF_ID,
707 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
708 	.arg5_type	= ARG_ANYTHING
709 };
710 
711 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
712 {
713 	if (cpu >= nr_cpu_ids)
714 		return (unsigned long)NULL;
715 
716 	return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
717 }
718 
719 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
720 	.func		= bpf_per_cpu_ptr,
721 	.gpl_only	= false,
722 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
723 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
724 	.arg2_type	= ARG_ANYTHING,
725 };
726 
727 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
728 {
729 	return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
730 }
731 
732 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
733 	.func		= bpf_this_cpu_ptr,
734 	.gpl_only	= false,
735 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
736 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
737 };
738 
739 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
740 		size_t bufsz)
741 {
742 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
743 
744 	buf[0] = 0;
745 
746 	switch (fmt_ptype) {
747 	case 's':
748 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
749 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
750 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
751 		fallthrough;
752 #endif
753 	case 'k':
754 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
755 	case 'u':
756 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
757 	}
758 
759 	return -EINVAL;
760 }
761 
762 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
763  * arguments representation.
764  */
765 #define MAX_BPRINTF_BIN_ARGS	512
766 
767 /* Support executing three nested bprintf helper calls on a given CPU */
768 #define MAX_BPRINTF_NEST_LEVEL	3
769 struct bpf_bprintf_buffers {
770 	char bin_args[MAX_BPRINTF_BIN_ARGS];
771 	char buf[MAX_BPRINTF_BUF];
772 };
773 
774 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
775 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
776 
777 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
778 {
779 	int nest_level;
780 
781 	preempt_disable();
782 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
783 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
784 		this_cpu_dec(bpf_bprintf_nest_level);
785 		preempt_enable();
786 		return -EBUSY;
787 	}
788 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
789 
790 	return 0;
791 }
792 
793 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
794 {
795 	if (!data->bin_args && !data->buf)
796 		return;
797 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
798 		return;
799 	this_cpu_dec(bpf_bprintf_nest_level);
800 	preempt_enable();
801 }
802 
803 /*
804  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
805  *
806  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
807  *
808  * This can be used in two ways:
809  * - Format string verification only: when data->get_bin_args is false
810  * - Arguments preparation: in addition to the above verification, it writes in
811  *   data->bin_args a binary representation of arguments usable by bstr_printf
812  *   where pointers from BPF have been sanitized.
813  *
814  * In argument preparation mode, if 0 is returned, safe temporary buffers are
815  * allocated and bpf_bprintf_cleanup should be called to free them after use.
816  */
817 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
818 			u32 num_args, struct bpf_bprintf_data *data)
819 {
820 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
821 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
822 	struct bpf_bprintf_buffers *buffers = NULL;
823 	size_t sizeof_cur_arg, sizeof_cur_ip;
824 	int err, i, num_spec = 0;
825 	u64 cur_arg;
826 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
827 
828 	fmt_end = strnchr(fmt, fmt_size, 0);
829 	if (!fmt_end)
830 		return -EINVAL;
831 	fmt_size = fmt_end - fmt;
832 
833 	if (get_buffers && try_get_buffers(&buffers))
834 		return -EBUSY;
835 
836 	if (data->get_bin_args) {
837 		if (num_args)
838 			tmp_buf = buffers->bin_args;
839 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
840 		data->bin_args = (u32 *)tmp_buf;
841 	}
842 
843 	if (data->get_buf)
844 		data->buf = buffers->buf;
845 
846 	for (i = 0; i < fmt_size; i++) {
847 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
848 			err = -EINVAL;
849 			goto out;
850 		}
851 
852 		if (fmt[i] != '%')
853 			continue;
854 
855 		if (fmt[i + 1] == '%') {
856 			i++;
857 			continue;
858 		}
859 
860 		if (num_spec >= num_args) {
861 			err = -EINVAL;
862 			goto out;
863 		}
864 
865 		/* The string is zero-terminated so if fmt[i] != 0, we can
866 		 * always access fmt[i + 1], in the worst case it will be a 0
867 		 */
868 		i++;
869 
870 		/* skip optional "[0 +-][num]" width formatting field */
871 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
872 		       fmt[i] == ' ')
873 			i++;
874 		if (fmt[i] >= '1' && fmt[i] <= '9') {
875 			i++;
876 			while (fmt[i] >= '0' && fmt[i] <= '9')
877 				i++;
878 		}
879 
880 		if (fmt[i] == 'p') {
881 			sizeof_cur_arg = sizeof(long);
882 
883 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
884 			    fmt[i + 2] == 's') {
885 				fmt_ptype = fmt[i + 1];
886 				i += 2;
887 				goto fmt_str;
888 			}
889 
890 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
891 			    ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
892 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
893 			    fmt[i + 1] == 'S') {
894 				/* just kernel pointers */
895 				if (tmp_buf)
896 					cur_arg = raw_args[num_spec];
897 				i++;
898 				goto nocopy_fmt;
899 			}
900 
901 			if (fmt[i + 1] == 'B') {
902 				if (tmp_buf)  {
903 					err = snprintf(tmp_buf,
904 						       (tmp_buf_end - tmp_buf),
905 						       "%pB",
906 						       (void *)(long)raw_args[num_spec]);
907 					tmp_buf += (err + 1);
908 				}
909 
910 				i++;
911 				num_spec++;
912 				continue;
913 			}
914 
915 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
916 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
917 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
918 				err = -EINVAL;
919 				goto out;
920 			}
921 
922 			i += 2;
923 			if (!tmp_buf)
924 				goto nocopy_fmt;
925 
926 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
927 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
928 				err = -ENOSPC;
929 				goto out;
930 			}
931 
932 			unsafe_ptr = (char *)(long)raw_args[num_spec];
933 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
934 						       sizeof_cur_ip);
935 			if (err < 0)
936 				memset(cur_ip, 0, sizeof_cur_ip);
937 
938 			/* hack: bstr_printf expects IP addresses to be
939 			 * pre-formatted as strings, ironically, the easiest way
940 			 * to do that is to call snprintf.
941 			 */
942 			ip_spec[2] = fmt[i - 1];
943 			ip_spec[3] = fmt[i];
944 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
945 				       ip_spec, &cur_ip);
946 
947 			tmp_buf += err + 1;
948 			num_spec++;
949 
950 			continue;
951 		} else if (fmt[i] == 's') {
952 			fmt_ptype = fmt[i];
953 fmt_str:
954 			if (fmt[i + 1] != 0 &&
955 			    !isspace(fmt[i + 1]) &&
956 			    !ispunct(fmt[i + 1])) {
957 				err = -EINVAL;
958 				goto out;
959 			}
960 
961 			if (!tmp_buf)
962 				goto nocopy_fmt;
963 
964 			if (tmp_buf_end == tmp_buf) {
965 				err = -ENOSPC;
966 				goto out;
967 			}
968 
969 			unsafe_ptr = (char *)(long)raw_args[num_spec];
970 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
971 						    fmt_ptype,
972 						    tmp_buf_end - tmp_buf);
973 			if (err < 0) {
974 				tmp_buf[0] = '\0';
975 				err = 1;
976 			}
977 
978 			tmp_buf += err;
979 			num_spec++;
980 
981 			continue;
982 		} else if (fmt[i] == 'c') {
983 			if (!tmp_buf)
984 				goto nocopy_fmt;
985 
986 			if (tmp_buf_end == tmp_buf) {
987 				err = -ENOSPC;
988 				goto out;
989 			}
990 
991 			*tmp_buf = raw_args[num_spec];
992 			tmp_buf++;
993 			num_spec++;
994 
995 			continue;
996 		}
997 
998 		sizeof_cur_arg = sizeof(int);
999 
1000 		if (fmt[i] == 'l') {
1001 			sizeof_cur_arg = sizeof(long);
1002 			i++;
1003 		}
1004 		if (fmt[i] == 'l') {
1005 			sizeof_cur_arg = sizeof(long long);
1006 			i++;
1007 		}
1008 
1009 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1010 		    fmt[i] != 'x' && fmt[i] != 'X') {
1011 			err = -EINVAL;
1012 			goto out;
1013 		}
1014 
1015 		if (tmp_buf)
1016 			cur_arg = raw_args[num_spec];
1017 nocopy_fmt:
1018 		if (tmp_buf) {
1019 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1020 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1021 				err = -ENOSPC;
1022 				goto out;
1023 			}
1024 
1025 			if (sizeof_cur_arg == 8) {
1026 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1027 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1028 			} else {
1029 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1030 			}
1031 			tmp_buf += sizeof_cur_arg;
1032 		}
1033 		num_spec++;
1034 	}
1035 
1036 	err = 0;
1037 out:
1038 	if (err)
1039 		bpf_bprintf_cleanup(data);
1040 	return err;
1041 }
1042 
1043 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1044 	   const void *, args, u32, data_len)
1045 {
1046 	struct bpf_bprintf_data data = {
1047 		.get_bin_args	= true,
1048 	};
1049 	int err, num_args;
1050 
1051 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1052 	    (data_len && !args))
1053 		return -EINVAL;
1054 	num_args = data_len / 8;
1055 
1056 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1057 	 * can safely give an unbounded size.
1058 	 */
1059 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1060 	if (err < 0)
1061 		return err;
1062 
1063 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1064 
1065 	bpf_bprintf_cleanup(&data);
1066 
1067 	return err + 1;
1068 }
1069 
1070 const struct bpf_func_proto bpf_snprintf_proto = {
1071 	.func		= bpf_snprintf,
1072 	.gpl_only	= true,
1073 	.ret_type	= RET_INTEGER,
1074 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1075 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1076 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1077 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1078 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1079 };
1080 
1081 /* BPF map elements can contain 'struct bpf_timer'.
1082  * Such map owns all of its BPF timers.
1083  * 'struct bpf_timer' is allocated as part of map element allocation
1084  * and it's zero initialized.
1085  * That space is used to keep 'struct bpf_timer_kern'.
1086  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1087  * remembers 'struct bpf_map *' pointer it's part of.
1088  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1089  * bpf_timer_start() arms the timer.
1090  * If user space reference to a map goes to zero at this point
1091  * ops->map_release_uref callback is responsible for cancelling the timers,
1092  * freeing their memory, and decrementing prog's refcnts.
1093  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1094  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1095  * freeing the timers when inner map is replaced or deleted by user space.
1096  */
1097 struct bpf_hrtimer {
1098 	struct hrtimer timer;
1099 	struct bpf_map *map;
1100 	struct bpf_prog *prog;
1101 	void __rcu *callback_fn;
1102 	void *value;
1103 };
1104 
1105 /* the actual struct hidden inside uapi struct bpf_timer */
1106 struct bpf_timer_kern {
1107 	struct bpf_hrtimer *timer;
1108 	/* bpf_spin_lock is used here instead of spinlock_t to make
1109 	 * sure that it always fits into space reserved by struct bpf_timer
1110 	 * regardless of LOCKDEP and spinlock debug flags.
1111 	 */
1112 	struct bpf_spin_lock lock;
1113 } __attribute__((aligned(8)));
1114 
1115 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1116 
1117 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1118 {
1119 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1120 	struct bpf_map *map = t->map;
1121 	void *value = t->value;
1122 	bpf_callback_t callback_fn;
1123 	void *key;
1124 	u32 idx;
1125 
1126 	BTF_TYPE_EMIT(struct bpf_timer);
1127 	callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
1128 	if (!callback_fn)
1129 		goto out;
1130 
1131 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1132 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1133 	 * Remember the timer this callback is servicing to prevent
1134 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1135 	 * bpf_map_delete_elem() on the same timer.
1136 	 */
1137 	this_cpu_write(hrtimer_running, t);
1138 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1139 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1140 
1141 		/* compute the key */
1142 		idx = ((char *)value - array->value) / array->elem_size;
1143 		key = &idx;
1144 	} else { /* hash or lru */
1145 		key = value - round_up(map->key_size, 8);
1146 	}
1147 
1148 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1149 	/* The verifier checked that return value is zero. */
1150 
1151 	this_cpu_write(hrtimer_running, NULL);
1152 out:
1153 	return HRTIMER_NORESTART;
1154 }
1155 
1156 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1157 	   u64, flags)
1158 {
1159 	clockid_t clockid = flags & (MAX_CLOCKS - 1);
1160 	struct bpf_hrtimer *t;
1161 	int ret = 0;
1162 
1163 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1164 	BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer));
1165 	BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer));
1166 
1167 	if (in_nmi())
1168 		return -EOPNOTSUPP;
1169 
1170 	if (flags >= MAX_CLOCKS ||
1171 	    /* similar to timerfd except _ALARM variants are not supported */
1172 	    (clockid != CLOCK_MONOTONIC &&
1173 	     clockid != CLOCK_REALTIME &&
1174 	     clockid != CLOCK_BOOTTIME))
1175 		return -EINVAL;
1176 	__bpf_spin_lock_irqsave(&timer->lock);
1177 	t = timer->timer;
1178 	if (t) {
1179 		ret = -EBUSY;
1180 		goto out;
1181 	}
1182 	/* allocate hrtimer via map_kmalloc to use memcg accounting */
1183 	t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1184 	if (!t) {
1185 		ret = -ENOMEM;
1186 		goto out;
1187 	}
1188 	t->value = (void *)timer - map->record->timer_off;
1189 	t->map = map;
1190 	t->prog = NULL;
1191 	rcu_assign_pointer(t->callback_fn, NULL);
1192 	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1193 	t->timer.function = bpf_timer_cb;
1194 	WRITE_ONCE(timer->timer, t);
1195 	/* Guarantee the order between timer->timer and map->usercnt. So
1196 	 * when there are concurrent uref release and bpf timer init, either
1197 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1198 	 * timer or atomic64_read() below returns a zero usercnt.
1199 	 */
1200 	smp_mb();
1201 	if (!atomic64_read(&map->usercnt)) {
1202 		/* maps with timers must be either held by user space
1203 		 * or pinned in bpffs.
1204 		 */
1205 		WRITE_ONCE(timer->timer, NULL);
1206 		kfree(t);
1207 		ret = -EPERM;
1208 	}
1209 out:
1210 	__bpf_spin_unlock_irqrestore(&timer->lock);
1211 	return ret;
1212 }
1213 
1214 static const struct bpf_func_proto bpf_timer_init_proto = {
1215 	.func		= bpf_timer_init,
1216 	.gpl_only	= true,
1217 	.ret_type	= RET_INTEGER,
1218 	.arg1_type	= ARG_PTR_TO_TIMER,
1219 	.arg2_type	= ARG_CONST_MAP_PTR,
1220 	.arg3_type	= ARG_ANYTHING,
1221 };
1222 
1223 BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn,
1224 	   struct bpf_prog_aux *, aux)
1225 {
1226 	struct bpf_prog *prev, *prog = aux->prog;
1227 	struct bpf_hrtimer *t;
1228 	int ret = 0;
1229 
1230 	if (in_nmi())
1231 		return -EOPNOTSUPP;
1232 	__bpf_spin_lock_irqsave(&timer->lock);
1233 	t = timer->timer;
1234 	if (!t) {
1235 		ret = -EINVAL;
1236 		goto out;
1237 	}
1238 	if (!atomic64_read(&t->map->usercnt)) {
1239 		/* maps with timers must be either held by user space
1240 		 * or pinned in bpffs. Otherwise timer might still be
1241 		 * running even when bpf prog is detached and user space
1242 		 * is gone, since map_release_uref won't ever be called.
1243 		 */
1244 		ret = -EPERM;
1245 		goto out;
1246 	}
1247 	prev = t->prog;
1248 	if (prev != prog) {
1249 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1250 		 * can pick different callback_fn-s within the same prog.
1251 		 */
1252 		prog = bpf_prog_inc_not_zero(prog);
1253 		if (IS_ERR(prog)) {
1254 			ret = PTR_ERR(prog);
1255 			goto out;
1256 		}
1257 		if (prev)
1258 			/* Drop prev prog refcnt when swapping with new prog */
1259 			bpf_prog_put(prev);
1260 		t->prog = prog;
1261 	}
1262 	rcu_assign_pointer(t->callback_fn, callback_fn);
1263 out:
1264 	__bpf_spin_unlock_irqrestore(&timer->lock);
1265 	return ret;
1266 }
1267 
1268 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1269 	.func		= bpf_timer_set_callback,
1270 	.gpl_only	= true,
1271 	.ret_type	= RET_INTEGER,
1272 	.arg1_type	= ARG_PTR_TO_TIMER,
1273 	.arg2_type	= ARG_PTR_TO_FUNC,
1274 };
1275 
1276 BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags)
1277 {
1278 	struct bpf_hrtimer *t;
1279 	int ret = 0;
1280 	enum hrtimer_mode mode;
1281 
1282 	if (in_nmi())
1283 		return -EOPNOTSUPP;
1284 	if (flags > BPF_F_TIMER_ABS)
1285 		return -EINVAL;
1286 	__bpf_spin_lock_irqsave(&timer->lock);
1287 	t = timer->timer;
1288 	if (!t || !t->prog) {
1289 		ret = -EINVAL;
1290 		goto out;
1291 	}
1292 
1293 	if (flags & BPF_F_TIMER_ABS)
1294 		mode = HRTIMER_MODE_ABS_SOFT;
1295 	else
1296 		mode = HRTIMER_MODE_REL_SOFT;
1297 
1298 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1299 out:
1300 	__bpf_spin_unlock_irqrestore(&timer->lock);
1301 	return ret;
1302 }
1303 
1304 static const struct bpf_func_proto bpf_timer_start_proto = {
1305 	.func		= bpf_timer_start,
1306 	.gpl_only	= true,
1307 	.ret_type	= RET_INTEGER,
1308 	.arg1_type	= ARG_PTR_TO_TIMER,
1309 	.arg2_type	= ARG_ANYTHING,
1310 	.arg3_type	= ARG_ANYTHING,
1311 };
1312 
1313 static void drop_prog_refcnt(struct bpf_hrtimer *t)
1314 {
1315 	struct bpf_prog *prog = t->prog;
1316 
1317 	if (prog) {
1318 		bpf_prog_put(prog);
1319 		t->prog = NULL;
1320 		rcu_assign_pointer(t->callback_fn, NULL);
1321 	}
1322 }
1323 
1324 BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
1325 {
1326 	struct bpf_hrtimer *t;
1327 	int ret = 0;
1328 
1329 	if (in_nmi())
1330 		return -EOPNOTSUPP;
1331 	__bpf_spin_lock_irqsave(&timer->lock);
1332 	t = timer->timer;
1333 	if (!t) {
1334 		ret = -EINVAL;
1335 		goto out;
1336 	}
1337 	if (this_cpu_read(hrtimer_running) == t) {
1338 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1339 		 * its own timer the hrtimer_cancel() will deadlock
1340 		 * since it waits for callback_fn to finish
1341 		 */
1342 		ret = -EDEADLK;
1343 		goto out;
1344 	}
1345 	drop_prog_refcnt(t);
1346 out:
1347 	__bpf_spin_unlock_irqrestore(&timer->lock);
1348 	/* Cancel the timer and wait for associated callback to finish
1349 	 * if it was running.
1350 	 */
1351 	ret = ret ?: hrtimer_cancel(&t->timer);
1352 	return ret;
1353 }
1354 
1355 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1356 	.func		= bpf_timer_cancel,
1357 	.gpl_only	= true,
1358 	.ret_type	= RET_INTEGER,
1359 	.arg1_type	= ARG_PTR_TO_TIMER,
1360 };
1361 
1362 /* This function is called by map_delete/update_elem for individual element and
1363  * by ops->map_release_uref when the user space reference to a map reaches zero.
1364  */
1365 void bpf_timer_cancel_and_free(void *val)
1366 {
1367 	struct bpf_timer_kern *timer = val;
1368 	struct bpf_hrtimer *t;
1369 
1370 	/* Performance optimization: read timer->timer without lock first. */
1371 	if (!READ_ONCE(timer->timer))
1372 		return;
1373 
1374 	__bpf_spin_lock_irqsave(&timer->lock);
1375 	/* re-read it under lock */
1376 	t = timer->timer;
1377 	if (!t)
1378 		goto out;
1379 	drop_prog_refcnt(t);
1380 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1381 	 * this timer, since it won't be initialized.
1382 	 */
1383 	WRITE_ONCE(timer->timer, NULL);
1384 out:
1385 	__bpf_spin_unlock_irqrestore(&timer->lock);
1386 	if (!t)
1387 		return;
1388 	/* Cancel the timer and wait for callback to complete if it was running.
1389 	 * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
1390 	 * right after for both preallocated and non-preallocated maps.
1391 	 * The timer->timer = NULL was already done and no code path can
1392 	 * see address 't' anymore.
1393 	 *
1394 	 * Check that bpf_map_delete/update_elem() wasn't called from timer
1395 	 * callback_fn. In such case don't call hrtimer_cancel() (since it will
1396 	 * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
1397 	 * return -1). Though callback_fn is still running on this cpu it's
1398 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1399 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1400 	 * since timer->timer = NULL was already done. The timer will be
1401 	 * effectively cancelled because bpf_timer_cb() will return
1402 	 * HRTIMER_NORESTART.
1403 	 */
1404 	if (this_cpu_read(hrtimer_running) != t)
1405 		hrtimer_cancel(&t->timer);
1406 	kfree(t);
1407 }
1408 
1409 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
1410 {
1411 	unsigned long *kptr = map_value;
1412 
1413 	return xchg(kptr, (unsigned long)ptr);
1414 }
1415 
1416 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1417  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1418  * denote type that verifier will determine.
1419  */
1420 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1421 	.func         = bpf_kptr_xchg,
1422 	.gpl_only     = false,
1423 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1424 	.ret_btf_id   = BPF_PTR_POISON,
1425 	.arg1_type    = ARG_PTR_TO_KPTR,
1426 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1427 	.arg2_btf_id  = BPF_PTR_POISON,
1428 };
1429 
1430 /* Since the upper 8 bits of dynptr->size is reserved, the
1431  * maximum supported size is 2^24 - 1.
1432  */
1433 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1434 #define DYNPTR_TYPE_SHIFT	28
1435 #define DYNPTR_SIZE_MASK	0xFFFFFF
1436 #define DYNPTR_RDONLY_BIT	BIT(31)
1437 
1438 static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1439 {
1440 	return ptr->size & DYNPTR_RDONLY_BIT;
1441 }
1442 
1443 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1444 {
1445 	ptr->size |= DYNPTR_RDONLY_BIT;
1446 }
1447 
1448 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1449 {
1450 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1451 }
1452 
1453 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1454 {
1455 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1456 }
1457 
1458 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1459 {
1460 	return ptr->size & DYNPTR_SIZE_MASK;
1461 }
1462 
1463 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1464 {
1465 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1466 
1467 	ptr->size = new_size | metadata;
1468 }
1469 
1470 int bpf_dynptr_check_size(u32 size)
1471 {
1472 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1473 }
1474 
1475 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1476 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1477 {
1478 	ptr->data = data;
1479 	ptr->offset = offset;
1480 	ptr->size = size;
1481 	bpf_dynptr_set_type(ptr, type);
1482 }
1483 
1484 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1485 {
1486 	memset(ptr, 0, sizeof(*ptr));
1487 }
1488 
1489 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1490 {
1491 	u32 size = __bpf_dynptr_size(ptr);
1492 
1493 	if (len > size || offset > size - len)
1494 		return -E2BIG;
1495 
1496 	return 0;
1497 }
1498 
1499 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1500 {
1501 	int err;
1502 
1503 	BTF_TYPE_EMIT(struct bpf_dynptr);
1504 
1505 	err = bpf_dynptr_check_size(size);
1506 	if (err)
1507 		goto error;
1508 
1509 	/* flags is currently unsupported */
1510 	if (flags) {
1511 		err = -EINVAL;
1512 		goto error;
1513 	}
1514 
1515 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1516 
1517 	return 0;
1518 
1519 error:
1520 	bpf_dynptr_set_null(ptr);
1521 	return err;
1522 }
1523 
1524 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1525 	.func		= bpf_dynptr_from_mem,
1526 	.gpl_only	= false,
1527 	.ret_type	= RET_INTEGER,
1528 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1529 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1530 	.arg3_type	= ARG_ANYTHING,
1531 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
1532 };
1533 
1534 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1535 	   u32, offset, u64, flags)
1536 {
1537 	enum bpf_dynptr_type type;
1538 	int err;
1539 
1540 	if (!src->data || flags)
1541 		return -EINVAL;
1542 
1543 	err = bpf_dynptr_check_off_len(src, offset, len);
1544 	if (err)
1545 		return err;
1546 
1547 	type = bpf_dynptr_get_type(src);
1548 
1549 	switch (type) {
1550 	case BPF_DYNPTR_TYPE_LOCAL:
1551 	case BPF_DYNPTR_TYPE_RINGBUF:
1552 		/* Source and destination may possibly overlap, hence use memmove to
1553 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1554 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1555 		 */
1556 		memmove(dst, src->data + src->offset + offset, len);
1557 		return 0;
1558 	case BPF_DYNPTR_TYPE_SKB:
1559 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1560 	case BPF_DYNPTR_TYPE_XDP:
1561 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1562 	default:
1563 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1564 		return -EFAULT;
1565 	}
1566 }
1567 
1568 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1569 	.func		= bpf_dynptr_read,
1570 	.gpl_only	= false,
1571 	.ret_type	= RET_INTEGER,
1572 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1573 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1574 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1575 	.arg4_type	= ARG_ANYTHING,
1576 	.arg5_type	= ARG_ANYTHING,
1577 };
1578 
1579 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1580 	   u32, len, u64, flags)
1581 {
1582 	enum bpf_dynptr_type type;
1583 	int err;
1584 
1585 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1586 		return -EINVAL;
1587 
1588 	err = bpf_dynptr_check_off_len(dst, offset, len);
1589 	if (err)
1590 		return err;
1591 
1592 	type = bpf_dynptr_get_type(dst);
1593 
1594 	switch (type) {
1595 	case BPF_DYNPTR_TYPE_LOCAL:
1596 	case BPF_DYNPTR_TYPE_RINGBUF:
1597 		if (flags)
1598 			return -EINVAL;
1599 		/* Source and destination may possibly overlap, hence use memmove to
1600 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1601 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1602 		 */
1603 		memmove(dst->data + dst->offset + offset, src, len);
1604 		return 0;
1605 	case BPF_DYNPTR_TYPE_SKB:
1606 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1607 					     flags);
1608 	case BPF_DYNPTR_TYPE_XDP:
1609 		if (flags)
1610 			return -EINVAL;
1611 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1612 	default:
1613 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1614 		return -EFAULT;
1615 	}
1616 }
1617 
1618 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1619 	.func		= bpf_dynptr_write,
1620 	.gpl_only	= false,
1621 	.ret_type	= RET_INTEGER,
1622 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1623 	.arg2_type	= ARG_ANYTHING,
1624 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1625 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1626 	.arg5_type	= ARG_ANYTHING,
1627 };
1628 
1629 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1630 {
1631 	enum bpf_dynptr_type type;
1632 	int err;
1633 
1634 	if (!ptr->data)
1635 		return 0;
1636 
1637 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1638 	if (err)
1639 		return 0;
1640 
1641 	if (__bpf_dynptr_is_rdonly(ptr))
1642 		return 0;
1643 
1644 	type = bpf_dynptr_get_type(ptr);
1645 
1646 	switch (type) {
1647 	case BPF_DYNPTR_TYPE_LOCAL:
1648 	case BPF_DYNPTR_TYPE_RINGBUF:
1649 		return (unsigned long)(ptr->data + ptr->offset + offset);
1650 	case BPF_DYNPTR_TYPE_SKB:
1651 	case BPF_DYNPTR_TYPE_XDP:
1652 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1653 		return 0;
1654 	default:
1655 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1656 		return 0;
1657 	}
1658 }
1659 
1660 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1661 	.func		= bpf_dynptr_data,
1662 	.gpl_only	= false,
1663 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1664 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1665 	.arg2_type	= ARG_ANYTHING,
1666 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1667 };
1668 
1669 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1670 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1671 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1672 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1673 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1674 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1675 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1676 
1677 const struct bpf_func_proto *
1678 bpf_base_func_proto(enum bpf_func_id func_id)
1679 {
1680 	switch (func_id) {
1681 	case BPF_FUNC_map_lookup_elem:
1682 		return &bpf_map_lookup_elem_proto;
1683 	case BPF_FUNC_map_update_elem:
1684 		return &bpf_map_update_elem_proto;
1685 	case BPF_FUNC_map_delete_elem:
1686 		return &bpf_map_delete_elem_proto;
1687 	case BPF_FUNC_map_push_elem:
1688 		return &bpf_map_push_elem_proto;
1689 	case BPF_FUNC_map_pop_elem:
1690 		return &bpf_map_pop_elem_proto;
1691 	case BPF_FUNC_map_peek_elem:
1692 		return &bpf_map_peek_elem_proto;
1693 	case BPF_FUNC_map_lookup_percpu_elem:
1694 		return &bpf_map_lookup_percpu_elem_proto;
1695 	case BPF_FUNC_get_prandom_u32:
1696 		return &bpf_get_prandom_u32_proto;
1697 	case BPF_FUNC_get_smp_processor_id:
1698 		return &bpf_get_raw_smp_processor_id_proto;
1699 	case BPF_FUNC_get_numa_node_id:
1700 		return &bpf_get_numa_node_id_proto;
1701 	case BPF_FUNC_tail_call:
1702 		return &bpf_tail_call_proto;
1703 	case BPF_FUNC_ktime_get_ns:
1704 		return &bpf_ktime_get_ns_proto;
1705 	case BPF_FUNC_ktime_get_boot_ns:
1706 		return &bpf_ktime_get_boot_ns_proto;
1707 	case BPF_FUNC_ktime_get_tai_ns:
1708 		return &bpf_ktime_get_tai_ns_proto;
1709 	case BPF_FUNC_ringbuf_output:
1710 		return &bpf_ringbuf_output_proto;
1711 	case BPF_FUNC_ringbuf_reserve:
1712 		return &bpf_ringbuf_reserve_proto;
1713 	case BPF_FUNC_ringbuf_submit:
1714 		return &bpf_ringbuf_submit_proto;
1715 	case BPF_FUNC_ringbuf_discard:
1716 		return &bpf_ringbuf_discard_proto;
1717 	case BPF_FUNC_ringbuf_query:
1718 		return &bpf_ringbuf_query_proto;
1719 	case BPF_FUNC_strncmp:
1720 		return &bpf_strncmp_proto;
1721 	case BPF_FUNC_strtol:
1722 		return &bpf_strtol_proto;
1723 	case BPF_FUNC_strtoul:
1724 		return &bpf_strtoul_proto;
1725 	default:
1726 		break;
1727 	}
1728 
1729 	if (!bpf_capable())
1730 		return NULL;
1731 
1732 	switch (func_id) {
1733 	case BPF_FUNC_spin_lock:
1734 		return &bpf_spin_lock_proto;
1735 	case BPF_FUNC_spin_unlock:
1736 		return &bpf_spin_unlock_proto;
1737 	case BPF_FUNC_jiffies64:
1738 		return &bpf_jiffies64_proto;
1739 	case BPF_FUNC_per_cpu_ptr:
1740 		return &bpf_per_cpu_ptr_proto;
1741 	case BPF_FUNC_this_cpu_ptr:
1742 		return &bpf_this_cpu_ptr_proto;
1743 	case BPF_FUNC_timer_init:
1744 		return &bpf_timer_init_proto;
1745 	case BPF_FUNC_timer_set_callback:
1746 		return &bpf_timer_set_callback_proto;
1747 	case BPF_FUNC_timer_start:
1748 		return &bpf_timer_start_proto;
1749 	case BPF_FUNC_timer_cancel:
1750 		return &bpf_timer_cancel_proto;
1751 	case BPF_FUNC_kptr_xchg:
1752 		return &bpf_kptr_xchg_proto;
1753 	case BPF_FUNC_for_each_map_elem:
1754 		return &bpf_for_each_map_elem_proto;
1755 	case BPF_FUNC_loop:
1756 		return &bpf_loop_proto;
1757 	case BPF_FUNC_user_ringbuf_drain:
1758 		return &bpf_user_ringbuf_drain_proto;
1759 	case BPF_FUNC_ringbuf_reserve_dynptr:
1760 		return &bpf_ringbuf_reserve_dynptr_proto;
1761 	case BPF_FUNC_ringbuf_submit_dynptr:
1762 		return &bpf_ringbuf_submit_dynptr_proto;
1763 	case BPF_FUNC_ringbuf_discard_dynptr:
1764 		return &bpf_ringbuf_discard_dynptr_proto;
1765 	case BPF_FUNC_dynptr_from_mem:
1766 		return &bpf_dynptr_from_mem_proto;
1767 	case BPF_FUNC_dynptr_read:
1768 		return &bpf_dynptr_read_proto;
1769 	case BPF_FUNC_dynptr_write:
1770 		return &bpf_dynptr_write_proto;
1771 	case BPF_FUNC_dynptr_data:
1772 		return &bpf_dynptr_data_proto;
1773 #ifdef CONFIG_CGROUPS
1774 	case BPF_FUNC_cgrp_storage_get:
1775 		return &bpf_cgrp_storage_get_proto;
1776 	case BPF_FUNC_cgrp_storage_delete:
1777 		return &bpf_cgrp_storage_delete_proto;
1778 	case BPF_FUNC_get_current_cgroup_id:
1779 		return &bpf_get_current_cgroup_id_proto;
1780 	case BPF_FUNC_get_current_ancestor_cgroup_id:
1781 		return &bpf_get_current_ancestor_cgroup_id_proto;
1782 #endif
1783 	default:
1784 		break;
1785 	}
1786 
1787 	if (!perfmon_capable())
1788 		return NULL;
1789 
1790 	switch (func_id) {
1791 	case BPF_FUNC_trace_printk:
1792 		return bpf_get_trace_printk_proto();
1793 	case BPF_FUNC_get_current_task:
1794 		return &bpf_get_current_task_proto;
1795 	case BPF_FUNC_get_current_task_btf:
1796 		return &bpf_get_current_task_btf_proto;
1797 	case BPF_FUNC_probe_read_user:
1798 		return &bpf_probe_read_user_proto;
1799 	case BPF_FUNC_probe_read_kernel:
1800 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1801 		       NULL : &bpf_probe_read_kernel_proto;
1802 	case BPF_FUNC_probe_read_user_str:
1803 		return &bpf_probe_read_user_str_proto;
1804 	case BPF_FUNC_probe_read_kernel_str:
1805 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1806 		       NULL : &bpf_probe_read_kernel_str_proto;
1807 	case BPF_FUNC_snprintf_btf:
1808 		return &bpf_snprintf_btf_proto;
1809 	case BPF_FUNC_snprintf:
1810 		return &bpf_snprintf_proto;
1811 	case BPF_FUNC_task_pt_regs:
1812 		return &bpf_task_pt_regs_proto;
1813 	case BPF_FUNC_trace_vprintk:
1814 		return bpf_get_trace_vprintk_proto();
1815 	default:
1816 		return NULL;
1817 	}
1818 }
1819 
1820 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec);
1821 
1822 void bpf_list_head_free(const struct btf_field *field, void *list_head,
1823 			struct bpf_spin_lock *spin_lock)
1824 {
1825 	struct list_head *head = list_head, *orig_head = list_head;
1826 
1827 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
1828 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
1829 
1830 	/* Do the actual list draining outside the lock to not hold the lock for
1831 	 * too long, and also prevent deadlocks if tracing programs end up
1832 	 * executing on entry/exit of functions called inside the critical
1833 	 * section, and end up doing map ops that call bpf_list_head_free for
1834 	 * the same map value again.
1835 	 */
1836 	__bpf_spin_lock_irqsave(spin_lock);
1837 	if (!head->next || list_empty(head))
1838 		goto unlock;
1839 	head = head->next;
1840 unlock:
1841 	INIT_LIST_HEAD(orig_head);
1842 	__bpf_spin_unlock_irqrestore(spin_lock);
1843 
1844 	while (head != orig_head) {
1845 		void *obj = head;
1846 
1847 		obj -= field->graph_root.node_offset;
1848 		head = head->next;
1849 		/* The contained type can also have resources, including a
1850 		 * bpf_list_head which needs to be freed.
1851 		 */
1852 		migrate_disable();
1853 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec);
1854 		migrate_enable();
1855 	}
1856 }
1857 
1858 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
1859  * 'rb_node *', so field name of rb_node within containing struct is not
1860  * needed.
1861  *
1862  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
1863  * graph_root.node_offset, it's not necessary to know field name
1864  * or type of node struct
1865  */
1866 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
1867 	for (pos = rb_first_postorder(root); \
1868 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
1869 	    pos = n)
1870 
1871 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
1872 		      struct bpf_spin_lock *spin_lock)
1873 {
1874 	struct rb_root_cached orig_root, *root = rb_root;
1875 	struct rb_node *pos, *n;
1876 	void *obj;
1877 
1878 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
1879 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
1880 
1881 	__bpf_spin_lock_irqsave(spin_lock);
1882 	orig_root = *root;
1883 	*root = RB_ROOT_CACHED;
1884 	__bpf_spin_unlock_irqrestore(spin_lock);
1885 
1886 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
1887 		obj = pos;
1888 		obj -= field->graph_root.node_offset;
1889 
1890 
1891 		migrate_disable();
1892 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec);
1893 		migrate_enable();
1894 	}
1895 }
1896 
1897 __diag_push();
1898 __diag_ignore_all("-Wmissing-prototypes",
1899 		  "Global functions as their definitions will be in vmlinux BTF");
1900 
1901 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
1902 {
1903 	struct btf_struct_meta *meta = meta__ign;
1904 	u64 size = local_type_id__k;
1905 	void *p;
1906 
1907 	p = bpf_mem_alloc(&bpf_global_ma, size);
1908 	if (!p)
1909 		return NULL;
1910 	if (meta)
1911 		bpf_obj_init(meta->record, p);
1912 	return p;
1913 }
1914 
1915 /* Must be called under migrate_disable(), as required by bpf_mem_free */
1916 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
1917 {
1918 	if (rec && rec->refcount_off >= 0 &&
1919 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
1920 		/* Object is refcounted and refcount_dec didn't result in 0
1921 		 * refcount. Return without freeing the object
1922 		 */
1923 		return;
1924 	}
1925 
1926 	if (rec)
1927 		bpf_obj_free_fields(rec, p);
1928 
1929 	if (rec && rec->refcount_off >= 0)
1930 		bpf_mem_free_rcu(&bpf_global_ma, p);
1931 	else
1932 		bpf_mem_free(&bpf_global_ma, p);
1933 }
1934 
1935 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
1936 {
1937 	struct btf_struct_meta *meta = meta__ign;
1938 	void *p = p__alloc;
1939 
1940 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
1941 }
1942 
1943 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
1944 {
1945 	struct btf_struct_meta *meta = meta__ign;
1946 	struct bpf_refcount *ref;
1947 
1948 	/* Could just cast directly to refcount_t *, but need some code using
1949 	 * bpf_refcount type so that it is emitted in vmlinux BTF
1950 	 */
1951 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
1952 	if (!refcount_inc_not_zero((refcount_t *)ref))
1953 		return NULL;
1954 
1955 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
1956 	 * in verifier.c
1957 	 */
1958 	return (void *)p__refcounted_kptr;
1959 }
1960 
1961 static int __bpf_list_add(struct bpf_list_node_kern *node,
1962 			  struct bpf_list_head *head,
1963 			  bool tail, struct btf_record *rec, u64 off)
1964 {
1965 	struct list_head *n = &node->list_head, *h = (void *)head;
1966 
1967 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
1968 	 * called on its fields, so init here
1969 	 */
1970 	if (unlikely(!h->next))
1971 		INIT_LIST_HEAD(h);
1972 
1973 	/* node->owner != NULL implies !list_empty(n), no need to separately
1974 	 * check the latter
1975 	 */
1976 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
1977 		/* Only called from BPF prog, no need to migrate_disable */
1978 		__bpf_obj_drop_impl((void *)n - off, rec);
1979 		return -EINVAL;
1980 	}
1981 
1982 	tail ? list_add_tail(n, h) : list_add(n, h);
1983 	WRITE_ONCE(node->owner, head);
1984 
1985 	return 0;
1986 }
1987 
1988 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
1989 					 struct bpf_list_node *node,
1990 					 void *meta__ign, u64 off)
1991 {
1992 	struct bpf_list_node_kern *n = (void *)node;
1993 	struct btf_struct_meta *meta = meta__ign;
1994 
1995 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
1996 }
1997 
1998 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
1999 					struct bpf_list_node *node,
2000 					void *meta__ign, u64 off)
2001 {
2002 	struct bpf_list_node_kern *n = (void *)node;
2003 	struct btf_struct_meta *meta = meta__ign;
2004 
2005 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2006 }
2007 
2008 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2009 {
2010 	struct list_head *n, *h = (void *)head;
2011 	struct bpf_list_node_kern *node;
2012 
2013 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2014 	 * called on its fields, so init here
2015 	 */
2016 	if (unlikely(!h->next))
2017 		INIT_LIST_HEAD(h);
2018 	if (list_empty(h))
2019 		return NULL;
2020 
2021 	n = tail ? h->prev : h->next;
2022 	node = container_of(n, struct bpf_list_node_kern, list_head);
2023 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2024 		return NULL;
2025 
2026 	list_del_init(n);
2027 	WRITE_ONCE(node->owner, NULL);
2028 	return (struct bpf_list_node *)n;
2029 }
2030 
2031 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2032 {
2033 	return __bpf_list_del(head, false);
2034 }
2035 
2036 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2037 {
2038 	return __bpf_list_del(head, true);
2039 }
2040 
2041 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2042 						  struct bpf_rb_node *node)
2043 {
2044 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2045 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2046 	struct rb_node *n = &node_internal->rb_node;
2047 
2048 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2049 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2050 	 */
2051 	if (READ_ONCE(node_internal->owner) != root)
2052 		return NULL;
2053 
2054 	rb_erase_cached(n, r);
2055 	RB_CLEAR_NODE(n);
2056 	WRITE_ONCE(node_internal->owner, NULL);
2057 	return (struct bpf_rb_node *)n;
2058 }
2059 
2060 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2061  * program
2062  */
2063 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2064 			    struct bpf_rb_node_kern *node,
2065 			    void *less, struct btf_record *rec, u64 off)
2066 {
2067 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2068 	struct rb_node *parent = NULL, *n = &node->rb_node;
2069 	bpf_callback_t cb = (bpf_callback_t)less;
2070 	bool leftmost = true;
2071 
2072 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2073 	 * check the latter
2074 	 */
2075 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2076 		/* Only called from BPF prog, no need to migrate_disable */
2077 		__bpf_obj_drop_impl((void *)n - off, rec);
2078 		return -EINVAL;
2079 	}
2080 
2081 	while (*link) {
2082 		parent = *link;
2083 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2084 			link = &parent->rb_left;
2085 		} else {
2086 			link = &parent->rb_right;
2087 			leftmost = false;
2088 		}
2089 	}
2090 
2091 	rb_link_node(n, parent, link);
2092 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2093 	WRITE_ONCE(node->owner, root);
2094 	return 0;
2095 }
2096 
2097 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2098 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2099 				    void *meta__ign, u64 off)
2100 {
2101 	struct btf_struct_meta *meta = meta__ign;
2102 	struct bpf_rb_node_kern *n = (void *)node;
2103 
2104 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2105 }
2106 
2107 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2108 {
2109 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2110 
2111 	return (struct bpf_rb_node *)rb_first_cached(r);
2112 }
2113 
2114 /**
2115  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2116  * kfunc which is not stored in a map as a kptr, must be released by calling
2117  * bpf_task_release().
2118  * @p: The task on which a reference is being acquired.
2119  */
2120 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2121 {
2122 	if (refcount_inc_not_zero(&p->rcu_users))
2123 		return p;
2124 	return NULL;
2125 }
2126 
2127 /**
2128  * bpf_task_release - Release the reference acquired on a task.
2129  * @p: The task on which a reference is being released.
2130  */
2131 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2132 {
2133 	put_task_struct_rcu_user(p);
2134 }
2135 
2136 #ifdef CONFIG_CGROUPS
2137 /**
2138  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2139  * this kfunc which is not stored in a map as a kptr, must be released by
2140  * calling bpf_cgroup_release().
2141  * @cgrp: The cgroup on which a reference is being acquired.
2142  */
2143 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2144 {
2145 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2146 }
2147 
2148 /**
2149  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2150  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2151  * not be freed until the current grace period has ended, even if its refcount
2152  * drops to 0.
2153  * @cgrp: The cgroup on which a reference is being released.
2154  */
2155 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2156 {
2157 	cgroup_put(cgrp);
2158 }
2159 
2160 /**
2161  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2162  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2163  * map, must be released by calling bpf_cgroup_release().
2164  * @cgrp: The cgroup for which we're performing a lookup.
2165  * @level: The level of ancestor to look up.
2166  */
2167 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2168 {
2169 	struct cgroup *ancestor;
2170 
2171 	if (level > cgrp->level || level < 0)
2172 		return NULL;
2173 
2174 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2175 	ancestor = cgrp->ancestors[level];
2176 	if (!cgroup_tryget(ancestor))
2177 		return NULL;
2178 	return ancestor;
2179 }
2180 
2181 /**
2182  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2183  * kfunc which is not subsequently stored in a map, must be released by calling
2184  * bpf_cgroup_release().
2185  * @cgid: cgroup id.
2186  */
2187 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2188 {
2189 	struct cgroup *cgrp;
2190 
2191 	cgrp = cgroup_get_from_id(cgid);
2192 	if (IS_ERR(cgrp))
2193 		return NULL;
2194 	return cgrp;
2195 }
2196 
2197 /**
2198  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2199  * task's membership of cgroup ancestry.
2200  * @task: the task to be tested
2201  * @ancestor: possible ancestor of @task's cgroup
2202  *
2203  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2204  * It follows all the same rules as cgroup_is_descendant, and only applies
2205  * to the default hierarchy.
2206  */
2207 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2208 				       struct cgroup *ancestor)
2209 {
2210 	long ret;
2211 
2212 	rcu_read_lock();
2213 	ret = task_under_cgroup_hierarchy(task, ancestor);
2214 	rcu_read_unlock();
2215 	return ret;
2216 }
2217 #endif /* CONFIG_CGROUPS */
2218 
2219 /**
2220  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2221  * in the root pid namespace idr. If a task is returned, it must either be
2222  * stored in a map, or released with bpf_task_release().
2223  * @pid: The pid of the task being looked up.
2224  */
2225 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2226 {
2227 	struct task_struct *p;
2228 
2229 	rcu_read_lock();
2230 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2231 	if (p)
2232 		p = bpf_task_acquire(p);
2233 	rcu_read_unlock();
2234 
2235 	return p;
2236 }
2237 
2238 /**
2239  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2240  * @ptr: The dynptr whose data slice to retrieve
2241  * @offset: Offset into the dynptr
2242  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2243  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2244  *               length of the requested slice. This must be a constant.
2245  *
2246  * For non-skb and non-xdp type dynptrs, there is no difference between
2247  * bpf_dynptr_slice and bpf_dynptr_data.
2248  *
2249  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2250  *
2251  * If the intention is to write to the data slice, please use
2252  * bpf_dynptr_slice_rdwr.
2253  *
2254  * The user must check that the returned pointer is not null before using it.
2255  *
2256  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2257  * does not change the underlying packet data pointers, so a call to
2258  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2259  * the bpf program.
2260  *
2261  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2262  * data slice (can be either direct pointer to the data or a pointer to the user
2263  * provided buffer, with its contents containing the data, if unable to obtain
2264  * direct pointer)
2265  */
2266 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
2267 				   void *buffer__opt, u32 buffer__szk)
2268 {
2269 	enum bpf_dynptr_type type;
2270 	u32 len = buffer__szk;
2271 	int err;
2272 
2273 	if (!ptr->data)
2274 		return NULL;
2275 
2276 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2277 	if (err)
2278 		return NULL;
2279 
2280 	type = bpf_dynptr_get_type(ptr);
2281 
2282 	switch (type) {
2283 	case BPF_DYNPTR_TYPE_LOCAL:
2284 	case BPF_DYNPTR_TYPE_RINGBUF:
2285 		return ptr->data + ptr->offset + offset;
2286 	case BPF_DYNPTR_TYPE_SKB:
2287 		if (buffer__opt)
2288 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2289 		else
2290 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2291 	case BPF_DYNPTR_TYPE_XDP:
2292 	{
2293 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2294 		if (!IS_ERR_OR_NULL(xdp_ptr))
2295 			return xdp_ptr;
2296 
2297 		if (!buffer__opt)
2298 			return NULL;
2299 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2300 		return buffer__opt;
2301 	}
2302 	default:
2303 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2304 		return NULL;
2305 	}
2306 }
2307 
2308 /**
2309  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2310  * @ptr: The dynptr whose data slice to retrieve
2311  * @offset: Offset into the dynptr
2312  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2313  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2314  *               length of the requested slice. This must be a constant.
2315  *
2316  * For non-skb and non-xdp type dynptrs, there is no difference between
2317  * bpf_dynptr_slice and bpf_dynptr_data.
2318  *
2319  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2320  *
2321  * The returned pointer is writable and may point to either directly the dynptr
2322  * data at the requested offset or to the buffer if unable to obtain a direct
2323  * data pointer to (example: the requested slice is to the paged area of an skb
2324  * packet). In the case where the returned pointer is to the buffer, the user
2325  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2326  * usually looks something like this pattern:
2327  *
2328  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2329  * if (!eth)
2330  *	return TC_ACT_SHOT;
2331  *
2332  * // mutate eth header //
2333  *
2334  * if (eth == buffer)
2335  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2336  *
2337  * Please note that, as in the example above, the user must check that the
2338  * returned pointer is not null before using it.
2339  *
2340  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2341  * does not change the underlying packet data pointers, so a call to
2342  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2343  * the bpf program.
2344  *
2345  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2346  * data slice (can be either direct pointer to the data or a pointer to the user
2347  * provided buffer, with its contents containing the data, if unable to obtain
2348  * direct pointer)
2349  */
2350 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
2351 					void *buffer__opt, u32 buffer__szk)
2352 {
2353 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2354 		return NULL;
2355 
2356 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2357 	 *
2358 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2359 	 * if the bpf program allows skb data writes. There are two possiblities
2360 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2361 	 *
2362 	 * 1) The requested slice is in the head of the skb. In this case, the
2363 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2364 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2365 	 * The pointer can be directly written into.
2366 	 *
2367 	 * 2) Some portion of the requested slice is in the paged buffer area.
2368 	 * In this case, the requested data will be copied out into the buffer
2369 	 * and the returned pointer will be a pointer to the buffer. The skb
2370 	 * will not be pulled. To persist the write, the user will need to call
2371 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2372 	 *
2373 	 * Similarly for xdp programs, if the requested slice is not across xdp
2374 	 * fragments, then a direct pointer will be returned, otherwise the data
2375 	 * will be copied out into the buffer and the user will need to call
2376 	 * bpf_dynptr_write() to commit changes.
2377 	 */
2378 	return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
2379 }
2380 
2381 __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
2382 {
2383 	u32 size;
2384 
2385 	if (!ptr->data || start > end)
2386 		return -EINVAL;
2387 
2388 	size = __bpf_dynptr_size(ptr);
2389 
2390 	if (start > size || end > size)
2391 		return -ERANGE;
2392 
2393 	ptr->offset += start;
2394 	bpf_dynptr_set_size(ptr, end - start);
2395 
2396 	return 0;
2397 }
2398 
2399 __bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
2400 {
2401 	return !ptr->data;
2402 }
2403 
2404 __bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
2405 {
2406 	if (!ptr->data)
2407 		return false;
2408 
2409 	return __bpf_dynptr_is_rdonly(ptr);
2410 }
2411 
2412 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
2413 {
2414 	if (!ptr->data)
2415 		return -EINVAL;
2416 
2417 	return __bpf_dynptr_size(ptr);
2418 }
2419 
2420 __bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
2421 				 struct bpf_dynptr_kern *clone__uninit)
2422 {
2423 	if (!ptr->data) {
2424 		bpf_dynptr_set_null(clone__uninit);
2425 		return -EINVAL;
2426 	}
2427 
2428 	*clone__uninit = *ptr;
2429 
2430 	return 0;
2431 }
2432 
2433 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2434 {
2435 	return obj;
2436 }
2437 
2438 __bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
2439 {
2440 	return obj__ign;
2441 }
2442 
2443 __bpf_kfunc void bpf_rcu_read_lock(void)
2444 {
2445 	rcu_read_lock();
2446 }
2447 
2448 __bpf_kfunc void bpf_rcu_read_unlock(void)
2449 {
2450 	rcu_read_unlock();
2451 }
2452 
2453 __diag_pop();
2454 
2455 BTF_SET8_START(generic_btf_ids)
2456 #ifdef CONFIG_KEXEC_CORE
2457 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
2458 #endif
2459 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
2460 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
2461 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
2462 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
2463 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
2464 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
2465 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
2466 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2467 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
2468 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
2469 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
2470 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
2471 
2472 #ifdef CONFIG_CGROUPS
2473 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2474 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
2475 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
2476 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
2477 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
2478 #endif
2479 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
2480 BTF_SET8_END(generic_btf_ids)
2481 
2482 static const struct btf_kfunc_id_set generic_kfunc_set = {
2483 	.owner = THIS_MODULE,
2484 	.set   = &generic_btf_ids,
2485 };
2486 
2487 
2488 BTF_ID_LIST(generic_dtor_ids)
2489 BTF_ID(struct, task_struct)
2490 BTF_ID(func, bpf_task_release)
2491 #ifdef CONFIG_CGROUPS
2492 BTF_ID(struct, cgroup)
2493 BTF_ID(func, bpf_cgroup_release)
2494 #endif
2495 
2496 BTF_SET8_START(common_btf_ids)
2497 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
2498 BTF_ID_FLAGS(func, bpf_rdonly_cast)
2499 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
2500 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
2501 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
2502 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
2503 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
2504 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
2505 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
2506 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
2507 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
2508 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
2509 BTF_ID_FLAGS(func, bpf_dynptr_size)
2510 BTF_ID_FLAGS(func, bpf_dynptr_clone)
2511 BTF_SET8_END(common_btf_ids)
2512 
2513 static const struct btf_kfunc_id_set common_kfunc_set = {
2514 	.owner = THIS_MODULE,
2515 	.set   = &common_btf_ids,
2516 };
2517 
2518 static int __init kfunc_init(void)
2519 {
2520 	int ret;
2521 	const struct btf_id_dtor_kfunc generic_dtors[] = {
2522 		{
2523 			.btf_id       = generic_dtor_ids[0],
2524 			.kfunc_btf_id = generic_dtor_ids[1]
2525 		},
2526 #ifdef CONFIG_CGROUPS
2527 		{
2528 			.btf_id       = generic_dtor_ids[2],
2529 			.kfunc_btf_id = generic_dtor_ids[3]
2530 		},
2531 #endif
2532 	};
2533 
2534 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
2535 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
2536 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
2537 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
2538 						  ARRAY_SIZE(generic_dtors),
2539 						  THIS_MODULE);
2540 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
2541 }
2542 
2543 late_initcall(kfunc_init);
2544