xref: /openbmc/linux/tools/lib/bpf/bpf.c (revision b68fc09be48edbc47de1a0f3d42ef8adf6c0ac55)
1 // SPDX-License-Identifier: LGPL-2.1
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <memory.h>
26 #include <unistd.h>
27 #include <asm/unistd.h>
28 #include <linux/bpf.h>
29 #include "bpf.h"
30 #include "libbpf.h"
31 #include <errno.h>
32 
33 /*
34  * When building perf, unistd.h is overridden. __NR_bpf is
35  * required to be defined explicitly.
36  */
37 #ifndef __NR_bpf
38 # if defined(__i386__)
39 #  define __NR_bpf 357
40 # elif defined(__x86_64__)
41 #  define __NR_bpf 321
42 # elif defined(__aarch64__)
43 #  define __NR_bpf 280
44 # elif defined(__sparc__)
45 #  define __NR_bpf 349
46 # elif defined(__s390__)
47 #  define __NR_bpf 351
48 # else
49 #  error __NR_bpf not defined. libbpf does not support your arch.
50 # endif
51 #endif
52 
53 #ifndef min
54 #define min(x, y) ((x) < (y) ? (x) : (y))
55 #endif
56 
57 static inline __u64 ptr_to_u64(const void *ptr)
58 {
59 	return (__u64) (unsigned long) ptr;
60 }
61 
62 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
63 			  unsigned int size)
64 {
65 	return syscall(__NR_bpf, cmd, attr, size);
66 }
67 
68 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
69 {
70 	__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
71 	union bpf_attr attr;
72 
73 	memset(&attr, '\0', sizeof(attr));
74 
75 	attr.map_type = create_attr->map_type;
76 	attr.key_size = create_attr->key_size;
77 	attr.value_size = create_attr->value_size;
78 	attr.max_entries = create_attr->max_entries;
79 	attr.map_flags = create_attr->map_flags;
80 	memcpy(attr.map_name, create_attr->name,
81 	       min(name_len, BPF_OBJ_NAME_LEN - 1));
82 	attr.numa_node = create_attr->numa_node;
83 	attr.btf_fd = create_attr->btf_fd;
84 	attr.btf_key_type_id = create_attr->btf_key_type_id;
85 	attr.btf_value_type_id = create_attr->btf_value_type_id;
86 	attr.map_ifindex = create_attr->map_ifindex;
87 	attr.inner_map_fd = create_attr->inner_map_fd;
88 
89 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
90 }
91 
92 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
93 			int key_size, int value_size, int max_entries,
94 			__u32 map_flags, int node)
95 {
96 	struct bpf_create_map_attr map_attr = {};
97 
98 	map_attr.name = name;
99 	map_attr.map_type = map_type;
100 	map_attr.map_flags = map_flags;
101 	map_attr.key_size = key_size;
102 	map_attr.value_size = value_size;
103 	map_attr.max_entries = max_entries;
104 	if (node >= 0) {
105 		map_attr.numa_node = node;
106 		map_attr.map_flags |= BPF_F_NUMA_NODE;
107 	}
108 
109 	return bpf_create_map_xattr(&map_attr);
110 }
111 
112 int bpf_create_map(enum bpf_map_type map_type, int key_size,
113 		   int value_size, int max_entries, __u32 map_flags)
114 {
115 	struct bpf_create_map_attr map_attr = {};
116 
117 	map_attr.map_type = map_type;
118 	map_attr.map_flags = map_flags;
119 	map_attr.key_size = key_size;
120 	map_attr.value_size = value_size;
121 	map_attr.max_entries = max_entries;
122 
123 	return bpf_create_map_xattr(&map_attr);
124 }
125 
126 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
127 			int key_size, int value_size, int max_entries,
128 			__u32 map_flags)
129 {
130 	struct bpf_create_map_attr map_attr = {};
131 
132 	map_attr.name = name;
133 	map_attr.map_type = map_type;
134 	map_attr.map_flags = map_flags;
135 	map_attr.key_size = key_size;
136 	map_attr.value_size = value_size;
137 	map_attr.max_entries = max_entries;
138 
139 	return bpf_create_map_xattr(&map_attr);
140 }
141 
142 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
143 			       int key_size, int inner_map_fd, int max_entries,
144 			       __u32 map_flags, int node)
145 {
146 	__u32 name_len = name ? strlen(name) : 0;
147 	union bpf_attr attr;
148 
149 	memset(&attr, '\0', sizeof(attr));
150 
151 	attr.map_type = map_type;
152 	attr.key_size = key_size;
153 	attr.value_size = 4;
154 	attr.inner_map_fd = inner_map_fd;
155 	attr.max_entries = max_entries;
156 	attr.map_flags = map_flags;
157 	memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
158 
159 	if (node >= 0) {
160 		attr.map_flags |= BPF_F_NUMA_NODE;
161 		attr.numa_node = node;
162 	}
163 
164 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
165 }
166 
167 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
168 			  int key_size, int inner_map_fd, int max_entries,
169 			  __u32 map_flags)
170 {
171 	return bpf_create_map_in_map_node(map_type, name, key_size,
172 					  inner_map_fd, max_entries, map_flags,
173 					  -1);
174 }
175 
176 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
177 			   char *log_buf, size_t log_buf_sz)
178 {
179 	union bpf_attr attr;
180 	__u32 name_len;
181 	int fd;
182 
183 	if (!load_attr)
184 		return -EINVAL;
185 
186 	name_len = load_attr->name ? strlen(load_attr->name) : 0;
187 
188 	bzero(&attr, sizeof(attr));
189 	attr.prog_type = load_attr->prog_type;
190 	attr.expected_attach_type = load_attr->expected_attach_type;
191 	attr.insn_cnt = (__u32)load_attr->insns_cnt;
192 	attr.insns = ptr_to_u64(load_attr->insns);
193 	attr.license = ptr_to_u64(load_attr->license);
194 	attr.log_buf = ptr_to_u64(NULL);
195 	attr.log_size = 0;
196 	attr.log_level = 0;
197 	attr.kern_version = load_attr->kern_version;
198 	attr.prog_ifindex = load_attr->prog_ifindex;
199 	memcpy(attr.prog_name, load_attr->name,
200 	       min(name_len, BPF_OBJ_NAME_LEN - 1));
201 
202 	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
203 	if (fd >= 0 || !log_buf || !log_buf_sz)
204 		return fd;
205 
206 	/* Try again with log */
207 	attr.log_buf = ptr_to_u64(log_buf);
208 	attr.log_size = log_buf_sz;
209 	attr.log_level = 1;
210 	log_buf[0] = 0;
211 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
212 }
213 
214 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
215 		     size_t insns_cnt, const char *license,
216 		     __u32 kern_version, char *log_buf,
217 		     size_t log_buf_sz)
218 {
219 	struct bpf_load_program_attr load_attr;
220 
221 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
222 	load_attr.prog_type = type;
223 	load_attr.expected_attach_type = 0;
224 	load_attr.name = NULL;
225 	load_attr.insns = insns;
226 	load_attr.insns_cnt = insns_cnt;
227 	load_attr.license = license;
228 	load_attr.kern_version = kern_version;
229 
230 	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
231 }
232 
233 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
234 		       size_t insns_cnt, int strict_alignment,
235 		       const char *license, __u32 kern_version,
236 		       char *log_buf, size_t log_buf_sz, int log_level)
237 {
238 	union bpf_attr attr;
239 
240 	bzero(&attr, sizeof(attr));
241 	attr.prog_type = type;
242 	attr.insn_cnt = (__u32)insns_cnt;
243 	attr.insns = ptr_to_u64(insns);
244 	attr.license = ptr_to_u64(license);
245 	attr.log_buf = ptr_to_u64(log_buf);
246 	attr.log_size = log_buf_sz;
247 	attr.log_level = log_level;
248 	log_buf[0] = 0;
249 	attr.kern_version = kern_version;
250 	attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
251 
252 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
253 }
254 
255 int bpf_map_update_elem(int fd, const void *key, const void *value,
256 			__u64 flags)
257 {
258 	union bpf_attr attr;
259 
260 	bzero(&attr, sizeof(attr));
261 	attr.map_fd = fd;
262 	attr.key = ptr_to_u64(key);
263 	attr.value = ptr_to_u64(value);
264 	attr.flags = flags;
265 
266 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
267 }
268 
269 int bpf_map_lookup_elem(int fd, const void *key, void *value)
270 {
271 	union bpf_attr attr;
272 
273 	bzero(&attr, sizeof(attr));
274 	attr.map_fd = fd;
275 	attr.key = ptr_to_u64(key);
276 	attr.value = ptr_to_u64(value);
277 
278 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
279 }
280 
281 int bpf_map_delete_elem(int fd, const void *key)
282 {
283 	union bpf_attr attr;
284 
285 	bzero(&attr, sizeof(attr));
286 	attr.map_fd = fd;
287 	attr.key = ptr_to_u64(key);
288 
289 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
290 }
291 
292 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
293 {
294 	union bpf_attr attr;
295 
296 	bzero(&attr, sizeof(attr));
297 	attr.map_fd = fd;
298 	attr.key = ptr_to_u64(key);
299 	attr.next_key = ptr_to_u64(next_key);
300 
301 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
302 }
303 
304 int bpf_obj_pin(int fd, const char *pathname)
305 {
306 	union bpf_attr attr;
307 
308 	bzero(&attr, sizeof(attr));
309 	attr.pathname = ptr_to_u64((void *)pathname);
310 	attr.bpf_fd = fd;
311 
312 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
313 }
314 
315 int bpf_obj_get(const char *pathname)
316 {
317 	union bpf_attr attr;
318 
319 	bzero(&attr, sizeof(attr));
320 	attr.pathname = ptr_to_u64((void *)pathname);
321 
322 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
323 }
324 
325 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
326 		    unsigned int flags)
327 {
328 	union bpf_attr attr;
329 
330 	bzero(&attr, sizeof(attr));
331 	attr.target_fd	   = target_fd;
332 	attr.attach_bpf_fd = prog_fd;
333 	attr.attach_type   = type;
334 	attr.attach_flags  = flags;
335 
336 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
337 }
338 
339 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
340 {
341 	union bpf_attr attr;
342 
343 	bzero(&attr, sizeof(attr));
344 	attr.target_fd	 = target_fd;
345 	attr.attach_type = type;
346 
347 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
348 }
349 
350 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
351 {
352 	union bpf_attr attr;
353 
354 	bzero(&attr, sizeof(attr));
355 	attr.target_fd	 = target_fd;
356 	attr.attach_bpf_fd = prog_fd;
357 	attr.attach_type = type;
358 
359 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
360 }
361 
362 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
363 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
364 {
365 	union bpf_attr attr;
366 	int ret;
367 
368 	bzero(&attr, sizeof(attr));
369 	attr.query.target_fd	= target_fd;
370 	attr.query.attach_type	= type;
371 	attr.query.query_flags	= query_flags;
372 	attr.query.prog_cnt	= *prog_cnt;
373 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
374 
375 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
376 	if (attach_flags)
377 		*attach_flags = attr.query.attach_flags;
378 	*prog_cnt = attr.query.prog_cnt;
379 	return ret;
380 }
381 
382 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
383 		      void *data_out, __u32 *size_out, __u32 *retval,
384 		      __u32 *duration)
385 {
386 	union bpf_attr attr;
387 	int ret;
388 
389 	bzero(&attr, sizeof(attr));
390 	attr.test.prog_fd = prog_fd;
391 	attr.test.data_in = ptr_to_u64(data);
392 	attr.test.data_out = ptr_to_u64(data_out);
393 	attr.test.data_size_in = size;
394 	attr.test.repeat = repeat;
395 
396 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
397 	if (size_out)
398 		*size_out = attr.test.data_size_out;
399 	if (retval)
400 		*retval = attr.test.retval;
401 	if (duration)
402 		*duration = attr.test.duration;
403 	return ret;
404 }
405 
406 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
407 {
408 	union bpf_attr attr;
409 	int err;
410 
411 	bzero(&attr, sizeof(attr));
412 	attr.start_id = start_id;
413 
414 	err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
415 	if (!err)
416 		*next_id = attr.next_id;
417 
418 	return err;
419 }
420 
421 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
422 {
423 	union bpf_attr attr;
424 	int err;
425 
426 	bzero(&attr, sizeof(attr));
427 	attr.start_id = start_id;
428 
429 	err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
430 	if (!err)
431 		*next_id = attr.next_id;
432 
433 	return err;
434 }
435 
436 int bpf_prog_get_fd_by_id(__u32 id)
437 {
438 	union bpf_attr attr;
439 
440 	bzero(&attr, sizeof(attr));
441 	attr.prog_id = id;
442 
443 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
444 }
445 
446 int bpf_map_get_fd_by_id(__u32 id)
447 {
448 	union bpf_attr attr;
449 
450 	bzero(&attr, sizeof(attr));
451 	attr.map_id = id;
452 
453 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
454 }
455 
456 int bpf_btf_get_fd_by_id(__u32 id)
457 {
458 	union bpf_attr attr;
459 
460 	bzero(&attr, sizeof(attr));
461 	attr.btf_id = id;
462 
463 	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
464 }
465 
466 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
467 {
468 	union bpf_attr attr;
469 	int err;
470 
471 	bzero(&attr, sizeof(attr));
472 	attr.info.bpf_fd = prog_fd;
473 	attr.info.info_len = *info_len;
474 	attr.info.info = ptr_to_u64(info);
475 
476 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
477 	if (!err)
478 		*info_len = attr.info.info_len;
479 
480 	return err;
481 }
482 
483 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
484 {
485 	union bpf_attr attr;
486 
487 	bzero(&attr, sizeof(attr));
488 	attr.raw_tracepoint.name = ptr_to_u64(name);
489 	attr.raw_tracepoint.prog_fd = prog_fd;
490 
491 	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
492 }
493 
494 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
495 		 bool do_log)
496 {
497 	union bpf_attr attr = {};
498 	int fd;
499 
500 	attr.btf = ptr_to_u64(btf);
501 	attr.btf_size = btf_size;
502 
503 retry:
504 	if (do_log && log_buf && log_buf_size) {
505 		attr.btf_log_level = 1;
506 		attr.btf_log_size = log_buf_size;
507 		attr.btf_log_buf = ptr_to_u64(log_buf);
508 	}
509 
510 	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
511 	if (fd == -1 && !do_log && log_buf && log_buf_size) {
512 		do_log = true;
513 		goto retry;
514 	}
515 
516 	return fd;
517 }
518 
519 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
520 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
521 		      __u64 *probe_addr)
522 {
523 	union bpf_attr attr = {};
524 	int err;
525 
526 	attr.task_fd_query.pid = pid;
527 	attr.task_fd_query.fd = fd;
528 	attr.task_fd_query.flags = flags;
529 	attr.task_fd_query.buf = ptr_to_u64(buf);
530 	attr.task_fd_query.buf_len = *buf_len;
531 
532 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
533 	*buf_len = attr.task_fd_query.buf_len;
534 	*prog_id = attr.task_fd_query.prog_id;
535 	*fd_type = attr.task_fd_query.fd_type;
536 	*probe_offset = attr.task_fd_query.probe_offset;
537 	*probe_addr = attr.task_fd_query.probe_addr;
538 
539 	return err;
540 }
541