xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 5a244f48)
1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  */
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
16 #include "trace.h"
17 
18 /**
19  * trace_call_bpf - invoke BPF program
20  * @prog: BPF program
21  * @ctx: opaque context pointer
22  *
23  * kprobe handlers execute BPF programs via this helper.
24  * Can be used from static tracepoints in the future.
25  *
26  * Return: BPF programs always return an integer which is interpreted by
27  * kprobe handler as:
28  * 0 - return from kprobe (event is filtered out)
29  * 1 - store kprobe event into ring buffer
30  * Other values are reserved and currently alias to 1
31  */
32 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
33 {
34 	unsigned int ret;
35 
36 	if (in_nmi()) /* not supported yet */
37 		return 1;
38 
39 	preempt_disable();
40 
41 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
42 		/*
43 		 * since some bpf program is already running on this cpu,
44 		 * don't call into another bpf program (same or different)
45 		 * and don't send kprobe event into ring-buffer,
46 		 * so return zero here
47 		 */
48 		ret = 0;
49 		goto out;
50 	}
51 
52 	rcu_read_lock();
53 	ret = BPF_PROG_RUN(prog, ctx);
54 	rcu_read_unlock();
55 
56  out:
57 	__this_cpu_dec(bpf_prog_active);
58 	preempt_enable();
59 
60 	return ret;
61 }
62 EXPORT_SYMBOL_GPL(trace_call_bpf);
63 
64 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
65 {
66 	int ret;
67 
68 	ret = probe_kernel_read(dst, unsafe_ptr, size);
69 	if (unlikely(ret < 0))
70 		memset(dst, 0, size);
71 
72 	return ret;
73 }
74 
75 static const struct bpf_func_proto bpf_probe_read_proto = {
76 	.func		= bpf_probe_read,
77 	.gpl_only	= true,
78 	.ret_type	= RET_INTEGER,
79 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
80 	.arg2_type	= ARG_CONST_SIZE,
81 	.arg3_type	= ARG_ANYTHING,
82 };
83 
84 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
85 	   u32, size)
86 {
87 	/*
88 	 * Ensure we're in user context which is safe for the helper to
89 	 * run. This helper has no business in a kthread.
90 	 *
91 	 * access_ok() should prevent writing to non-user memory, but in
92 	 * some situations (nommu, temporary switch, etc) access_ok() does
93 	 * not provide enough validation, hence the check on KERNEL_DS.
94 	 */
95 
96 	if (unlikely(in_interrupt() ||
97 		     current->flags & (PF_KTHREAD | PF_EXITING)))
98 		return -EPERM;
99 	if (unlikely(uaccess_kernel()))
100 		return -EPERM;
101 	if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
102 		return -EPERM;
103 
104 	return probe_kernel_write(unsafe_ptr, src, size);
105 }
106 
107 static const struct bpf_func_proto bpf_probe_write_user_proto = {
108 	.func		= bpf_probe_write_user,
109 	.gpl_only	= true,
110 	.ret_type	= RET_INTEGER,
111 	.arg1_type	= ARG_ANYTHING,
112 	.arg2_type	= ARG_PTR_TO_MEM,
113 	.arg3_type	= ARG_CONST_SIZE,
114 };
115 
116 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
117 {
118 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119 			    current->comm, task_pid_nr(current));
120 
121 	return &bpf_probe_write_user_proto;
122 }
123 
124 /*
125  * Only limited trace_printk() conversion specifiers allowed:
126  * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
127  */
128 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129 	   u64, arg2, u64, arg3)
130 {
131 	bool str_seen = false;
132 	int mod[3] = {};
133 	int fmt_cnt = 0;
134 	u64 unsafe_addr;
135 	char buf[64];
136 	int i;
137 
138 	/*
139 	 * bpf_check()->check_func_arg()->check_stack_boundary()
140 	 * guarantees that fmt points to bpf program stack,
141 	 * fmt_size bytes of it were initialized and fmt_size > 0
142 	 */
143 	if (fmt[--fmt_size] != 0)
144 		return -EINVAL;
145 
146 	/* check format string for allowed specifiers */
147 	for (i = 0; i < fmt_size; i++) {
148 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
149 			return -EINVAL;
150 
151 		if (fmt[i] != '%')
152 			continue;
153 
154 		if (fmt_cnt >= 3)
155 			return -EINVAL;
156 
157 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
158 		i++;
159 		if (fmt[i] == 'l') {
160 			mod[fmt_cnt]++;
161 			i++;
162 		} else if (fmt[i] == 'p' || fmt[i] == 's') {
163 			mod[fmt_cnt]++;
164 			i++;
165 			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
166 				return -EINVAL;
167 			fmt_cnt++;
168 			if (fmt[i - 1] == 's') {
169 				if (str_seen)
170 					/* allow only one '%s' per fmt string */
171 					return -EINVAL;
172 				str_seen = true;
173 
174 				switch (fmt_cnt) {
175 				case 1:
176 					unsafe_addr = arg1;
177 					arg1 = (long) buf;
178 					break;
179 				case 2:
180 					unsafe_addr = arg2;
181 					arg2 = (long) buf;
182 					break;
183 				case 3:
184 					unsafe_addr = arg3;
185 					arg3 = (long) buf;
186 					break;
187 				}
188 				buf[0] = 0;
189 				strncpy_from_unsafe(buf,
190 						    (void *) (long) unsafe_addr,
191 						    sizeof(buf));
192 			}
193 			continue;
194 		}
195 
196 		if (fmt[i] == 'l') {
197 			mod[fmt_cnt]++;
198 			i++;
199 		}
200 
201 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
202 		    fmt[i] != 'u' && fmt[i] != 'x')
203 			return -EINVAL;
204 		fmt_cnt++;
205 	}
206 
207 /* Horrid workaround for getting va_list handling working with different
208  * argument type combinations generically for 32 and 64 bit archs.
209  */
210 #define __BPF_TP_EMIT()	__BPF_ARG3_TP()
211 #define __BPF_TP(...)							\
212 	__trace_printk(1 /* Fake ip will not be printed. */,		\
213 		       fmt, ##__VA_ARGS__)
214 
215 #define __BPF_ARG1_TP(...)						\
216 	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
217 	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
218 	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
219 	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
220 	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
221 
222 #define __BPF_ARG2_TP(...)						\
223 	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
224 	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
225 	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
226 	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
227 	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
228 
229 #define __BPF_ARG3_TP(...)						\
230 	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
231 	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
232 	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
233 	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
234 	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
235 
236 	return __BPF_TP_EMIT();
237 }
238 
239 static const struct bpf_func_proto bpf_trace_printk_proto = {
240 	.func		= bpf_trace_printk,
241 	.gpl_only	= true,
242 	.ret_type	= RET_INTEGER,
243 	.arg1_type	= ARG_PTR_TO_MEM,
244 	.arg2_type	= ARG_CONST_SIZE,
245 };
246 
247 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
248 {
249 	/*
250 	 * this program might be calling bpf_trace_printk,
251 	 * so allocate per-cpu printk buffers
252 	 */
253 	trace_printk_init_buffers();
254 
255 	return &bpf_trace_printk_proto;
256 }
257 
258 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
259 {
260 	struct bpf_array *array = container_of(map, struct bpf_array, map);
261 	unsigned int cpu = smp_processor_id();
262 	u64 index = flags & BPF_F_INDEX_MASK;
263 	struct bpf_event_entry *ee;
264 	u64 value = 0;
265 	int err;
266 
267 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
268 		return -EINVAL;
269 	if (index == BPF_F_CURRENT_CPU)
270 		index = cpu;
271 	if (unlikely(index >= array->map.max_entries))
272 		return -E2BIG;
273 
274 	ee = READ_ONCE(array->ptrs[index]);
275 	if (!ee)
276 		return -ENOENT;
277 
278 	err = perf_event_read_local(ee->event, &value);
279 	/*
280 	 * this api is ugly since we miss [-22..-2] range of valid
281 	 * counter values, but that's uapi
282 	 */
283 	if (err)
284 		return err;
285 	return value;
286 }
287 
288 static const struct bpf_func_proto bpf_perf_event_read_proto = {
289 	.func		= bpf_perf_event_read,
290 	.gpl_only	= true,
291 	.ret_type	= RET_INTEGER,
292 	.arg1_type	= ARG_CONST_MAP_PTR,
293 	.arg2_type	= ARG_ANYTHING,
294 };
295 
296 static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
297 
298 static __always_inline u64
299 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
300 			u64 flags, struct perf_raw_record *raw)
301 {
302 	struct bpf_array *array = container_of(map, struct bpf_array, map);
303 	struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
304 	unsigned int cpu = smp_processor_id();
305 	u64 index = flags & BPF_F_INDEX_MASK;
306 	struct bpf_event_entry *ee;
307 	struct perf_event *event;
308 
309 	if (index == BPF_F_CURRENT_CPU)
310 		index = cpu;
311 	if (unlikely(index >= array->map.max_entries))
312 		return -E2BIG;
313 
314 	ee = READ_ONCE(array->ptrs[index]);
315 	if (!ee)
316 		return -ENOENT;
317 
318 	event = ee->event;
319 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
320 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
321 		return -EINVAL;
322 
323 	if (unlikely(event->oncpu != cpu))
324 		return -EOPNOTSUPP;
325 
326 	perf_sample_data_init(sd, 0, 0);
327 	sd->raw = raw;
328 	perf_event_output(event, sd, regs);
329 	return 0;
330 }
331 
332 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
333 	   u64, flags, void *, data, u64, size)
334 {
335 	struct perf_raw_record raw = {
336 		.frag = {
337 			.size = size,
338 			.data = data,
339 		},
340 	};
341 
342 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
343 		return -EINVAL;
344 
345 	return __bpf_perf_event_output(regs, map, flags, &raw);
346 }
347 
348 static const struct bpf_func_proto bpf_perf_event_output_proto = {
349 	.func		= bpf_perf_event_output,
350 	.gpl_only	= true,
351 	.ret_type	= RET_INTEGER,
352 	.arg1_type	= ARG_PTR_TO_CTX,
353 	.arg2_type	= ARG_CONST_MAP_PTR,
354 	.arg3_type	= ARG_ANYTHING,
355 	.arg4_type	= ARG_PTR_TO_MEM,
356 	.arg5_type	= ARG_CONST_SIZE,
357 };
358 
359 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
360 
361 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
362 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
363 {
364 	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
365 	struct perf_raw_frag frag = {
366 		.copy		= ctx_copy,
367 		.size		= ctx_size,
368 		.data		= ctx,
369 	};
370 	struct perf_raw_record raw = {
371 		.frag = {
372 			{
373 				.next	= ctx_size ? &frag : NULL,
374 			},
375 			.size	= meta_size,
376 			.data	= meta,
377 		},
378 	};
379 
380 	perf_fetch_caller_regs(regs);
381 
382 	return __bpf_perf_event_output(regs, map, flags, &raw);
383 }
384 
385 BPF_CALL_0(bpf_get_current_task)
386 {
387 	return (long) current;
388 }
389 
390 static const struct bpf_func_proto bpf_get_current_task_proto = {
391 	.func		= bpf_get_current_task,
392 	.gpl_only	= true,
393 	.ret_type	= RET_INTEGER,
394 };
395 
396 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
397 {
398 	struct bpf_array *array = container_of(map, struct bpf_array, map);
399 	struct cgroup *cgrp;
400 
401 	if (unlikely(in_interrupt()))
402 		return -EINVAL;
403 	if (unlikely(idx >= array->map.max_entries))
404 		return -E2BIG;
405 
406 	cgrp = READ_ONCE(array->ptrs[idx]);
407 	if (unlikely(!cgrp))
408 		return -EAGAIN;
409 
410 	return task_under_cgroup_hierarchy(current, cgrp);
411 }
412 
413 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
414 	.func           = bpf_current_task_under_cgroup,
415 	.gpl_only       = false,
416 	.ret_type       = RET_INTEGER,
417 	.arg1_type      = ARG_CONST_MAP_PTR,
418 	.arg2_type      = ARG_ANYTHING,
419 };
420 
421 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
422 	   const void *, unsafe_ptr)
423 {
424 	int ret;
425 
426 	/*
427 	 * The strncpy_from_unsafe() call will likely not fill the entire
428 	 * buffer, but that's okay in this circumstance as we're probing
429 	 * arbitrary memory anyway similar to bpf_probe_read() and might
430 	 * as well probe the stack. Thus, memory is explicitly cleared
431 	 * only in error case, so that improper users ignoring return
432 	 * code altogether don't copy garbage; otherwise length of string
433 	 * is returned that can be used for bpf_perf_event_output() et al.
434 	 */
435 	ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
436 	if (unlikely(ret < 0))
437 		memset(dst, 0, size);
438 
439 	return ret;
440 }
441 
442 static const struct bpf_func_proto bpf_probe_read_str_proto = {
443 	.func		= bpf_probe_read_str,
444 	.gpl_only	= true,
445 	.ret_type	= RET_INTEGER,
446 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
447 	.arg2_type	= ARG_CONST_SIZE,
448 	.arg3_type	= ARG_ANYTHING,
449 };
450 
451 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
452 {
453 	switch (func_id) {
454 	case BPF_FUNC_map_lookup_elem:
455 		return &bpf_map_lookup_elem_proto;
456 	case BPF_FUNC_map_update_elem:
457 		return &bpf_map_update_elem_proto;
458 	case BPF_FUNC_map_delete_elem:
459 		return &bpf_map_delete_elem_proto;
460 	case BPF_FUNC_probe_read:
461 		return &bpf_probe_read_proto;
462 	case BPF_FUNC_ktime_get_ns:
463 		return &bpf_ktime_get_ns_proto;
464 	case BPF_FUNC_tail_call:
465 		return &bpf_tail_call_proto;
466 	case BPF_FUNC_get_current_pid_tgid:
467 		return &bpf_get_current_pid_tgid_proto;
468 	case BPF_FUNC_get_current_task:
469 		return &bpf_get_current_task_proto;
470 	case BPF_FUNC_get_current_uid_gid:
471 		return &bpf_get_current_uid_gid_proto;
472 	case BPF_FUNC_get_current_comm:
473 		return &bpf_get_current_comm_proto;
474 	case BPF_FUNC_trace_printk:
475 		return bpf_get_trace_printk_proto();
476 	case BPF_FUNC_get_smp_processor_id:
477 		return &bpf_get_smp_processor_id_proto;
478 	case BPF_FUNC_get_numa_node_id:
479 		return &bpf_get_numa_node_id_proto;
480 	case BPF_FUNC_perf_event_read:
481 		return &bpf_perf_event_read_proto;
482 	case BPF_FUNC_probe_write_user:
483 		return bpf_get_probe_write_proto();
484 	case BPF_FUNC_current_task_under_cgroup:
485 		return &bpf_current_task_under_cgroup_proto;
486 	case BPF_FUNC_get_prandom_u32:
487 		return &bpf_get_prandom_u32_proto;
488 	case BPF_FUNC_probe_read_str:
489 		return &bpf_probe_read_str_proto;
490 	default:
491 		return NULL;
492 	}
493 }
494 
495 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
496 {
497 	switch (func_id) {
498 	case BPF_FUNC_perf_event_output:
499 		return &bpf_perf_event_output_proto;
500 	case BPF_FUNC_get_stackid:
501 		return &bpf_get_stackid_proto;
502 	default:
503 		return tracing_func_proto(func_id);
504 	}
505 }
506 
507 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
508 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
509 					struct bpf_insn_access_aux *info)
510 {
511 	if (off < 0 || off >= sizeof(struct pt_regs))
512 		return false;
513 	if (type != BPF_READ)
514 		return false;
515 	if (off % size != 0)
516 		return false;
517 	/*
518 	 * Assertion for 32 bit to make sure last 8 byte access
519 	 * (BPF_DW) to the last 4 byte member is disallowed.
520 	 */
521 	if (off + size > sizeof(struct pt_regs))
522 		return false;
523 
524 	return true;
525 }
526 
527 const struct bpf_verifier_ops kprobe_prog_ops = {
528 	.get_func_proto  = kprobe_prog_func_proto,
529 	.is_valid_access = kprobe_prog_is_valid_access,
530 };
531 
532 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
533 	   u64, flags, void *, data, u64, size)
534 {
535 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
536 
537 	/*
538 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
539 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
540 	 * from there and call the same bpf_perf_event_output() helper inline.
541 	 */
542 	return ____bpf_perf_event_output(regs, map, flags, data, size);
543 }
544 
545 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
546 	.func		= bpf_perf_event_output_tp,
547 	.gpl_only	= true,
548 	.ret_type	= RET_INTEGER,
549 	.arg1_type	= ARG_PTR_TO_CTX,
550 	.arg2_type	= ARG_CONST_MAP_PTR,
551 	.arg3_type	= ARG_ANYTHING,
552 	.arg4_type	= ARG_PTR_TO_MEM,
553 	.arg5_type	= ARG_CONST_SIZE,
554 };
555 
556 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
557 	   u64, flags)
558 {
559 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
560 
561 	/*
562 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
563 	 * the other helper's function body cannot be inlined due to being
564 	 * external, thus we need to call raw helper function.
565 	 */
566 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
567 			       flags, 0, 0);
568 }
569 
570 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
571 	.func		= bpf_get_stackid_tp,
572 	.gpl_only	= true,
573 	.ret_type	= RET_INTEGER,
574 	.arg1_type	= ARG_PTR_TO_CTX,
575 	.arg2_type	= ARG_CONST_MAP_PTR,
576 	.arg3_type	= ARG_ANYTHING,
577 };
578 
579 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
580 {
581 	switch (func_id) {
582 	case BPF_FUNC_perf_event_output:
583 		return &bpf_perf_event_output_proto_tp;
584 	case BPF_FUNC_get_stackid:
585 		return &bpf_get_stackid_proto_tp;
586 	default:
587 		return tracing_func_proto(func_id);
588 	}
589 }
590 
591 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
592 				    struct bpf_insn_access_aux *info)
593 {
594 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
595 		return false;
596 	if (type != BPF_READ)
597 		return false;
598 	if (off % size != 0)
599 		return false;
600 
601 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
602 	return true;
603 }
604 
605 const struct bpf_verifier_ops tracepoint_prog_ops = {
606 	.get_func_proto  = tp_prog_func_proto,
607 	.is_valid_access = tp_prog_is_valid_access,
608 };
609 
610 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
611 				    struct bpf_insn_access_aux *info)
612 {
613 	const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data,
614 					 sample_period);
615 
616 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
617 		return false;
618 	if (type != BPF_READ)
619 		return false;
620 	if (off % size != 0)
621 		return false;
622 
623 	switch (off) {
624 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
625 		bpf_ctx_record_field_size(info, size_sp);
626 		if (!bpf_ctx_narrow_access_ok(off, size, size_sp))
627 			return false;
628 		break;
629 	default:
630 		if (size != sizeof(long))
631 			return false;
632 	}
633 
634 	return true;
635 }
636 
637 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
638 				      const struct bpf_insn *si,
639 				      struct bpf_insn *insn_buf,
640 				      struct bpf_prog *prog, u32 *target_size)
641 {
642 	struct bpf_insn *insn = insn_buf;
643 
644 	switch (si->off) {
645 	case offsetof(struct bpf_perf_event_data, sample_period):
646 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
647 						       data), si->dst_reg, si->src_reg,
648 				      offsetof(struct bpf_perf_event_data_kern, data));
649 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
650 				      bpf_target_off(struct perf_sample_data, period, 8,
651 						     target_size));
652 		break;
653 	default:
654 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
655 						       regs), si->dst_reg, si->src_reg,
656 				      offsetof(struct bpf_perf_event_data_kern, regs));
657 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
658 				      si->off);
659 		break;
660 	}
661 
662 	return insn - insn_buf;
663 }
664 
665 const struct bpf_verifier_ops perf_event_prog_ops = {
666 	.get_func_proto		= tp_prog_func_proto,
667 	.is_valid_access	= pe_prog_is_valid_access,
668 	.convert_ctx_access	= pe_prog_convert_ctx_access,
669 };
670