xref: /openbmc/linux/tools/lib/bpf/bpf.c (revision 96d4f267)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * common eBPF ELF operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation;
13  * version 2.1 of the License (not later!)
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this program; if not,  see <http://www.gnu.org/licenses>
22  */
23 
24 #include <stdlib.h>
25 #include <memory.h>
26 #include <unistd.h>
27 #include <asm/unistd.h>
28 #include <linux/bpf.h>
29 #include "bpf.h"
30 #include "libbpf.h"
31 #include <errno.h>
32 
33 /*
34  * When building perf, unistd.h is overridden. __NR_bpf is
35  * required to be defined explicitly.
36  */
37 #ifndef __NR_bpf
38 # if defined(__i386__)
39 #  define __NR_bpf 357
40 # elif defined(__x86_64__)
41 #  define __NR_bpf 321
42 # elif defined(__aarch64__)
43 #  define __NR_bpf 280
44 # elif defined(__sparc__)
45 #  define __NR_bpf 349
46 # elif defined(__s390__)
47 #  define __NR_bpf 351
48 # else
49 #  error __NR_bpf not defined. libbpf does not support your arch.
50 # endif
51 #endif
52 
53 #ifndef min
54 #define min(x, y) ((x) < (y) ? (x) : (y))
55 #endif
56 
57 static inline __u64 ptr_to_u64(const void *ptr)
58 {
59 	return (__u64) (unsigned long) ptr;
60 }
61 
62 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
63 			  unsigned int size)
64 {
65 	return syscall(__NR_bpf, cmd, attr, size);
66 }
67 
68 int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
69 {
70 	__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
71 	union bpf_attr attr;
72 
73 	memset(&attr, '\0', sizeof(attr));
74 
75 	attr.map_type = create_attr->map_type;
76 	attr.key_size = create_attr->key_size;
77 	attr.value_size = create_attr->value_size;
78 	attr.max_entries = create_attr->max_entries;
79 	attr.map_flags = create_attr->map_flags;
80 	memcpy(attr.map_name, create_attr->name,
81 	       min(name_len, BPF_OBJ_NAME_LEN - 1));
82 	attr.numa_node = create_attr->numa_node;
83 	attr.btf_fd = create_attr->btf_fd;
84 	attr.btf_key_type_id = create_attr->btf_key_type_id;
85 	attr.btf_value_type_id = create_attr->btf_value_type_id;
86 	attr.map_ifindex = create_attr->map_ifindex;
87 	attr.inner_map_fd = create_attr->inner_map_fd;
88 
89 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
90 }
91 
92 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
93 			int key_size, int value_size, int max_entries,
94 			__u32 map_flags, int node)
95 {
96 	struct bpf_create_map_attr map_attr = {};
97 
98 	map_attr.name = name;
99 	map_attr.map_type = map_type;
100 	map_attr.map_flags = map_flags;
101 	map_attr.key_size = key_size;
102 	map_attr.value_size = value_size;
103 	map_attr.max_entries = max_entries;
104 	if (node >= 0) {
105 		map_attr.numa_node = node;
106 		map_attr.map_flags |= BPF_F_NUMA_NODE;
107 	}
108 
109 	return bpf_create_map_xattr(&map_attr);
110 }
111 
112 int bpf_create_map(enum bpf_map_type map_type, int key_size,
113 		   int value_size, int max_entries, __u32 map_flags)
114 {
115 	struct bpf_create_map_attr map_attr = {};
116 
117 	map_attr.map_type = map_type;
118 	map_attr.map_flags = map_flags;
119 	map_attr.key_size = key_size;
120 	map_attr.value_size = value_size;
121 	map_attr.max_entries = max_entries;
122 
123 	return bpf_create_map_xattr(&map_attr);
124 }
125 
126 int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
127 			int key_size, int value_size, int max_entries,
128 			__u32 map_flags)
129 {
130 	struct bpf_create_map_attr map_attr = {};
131 
132 	map_attr.name = name;
133 	map_attr.map_type = map_type;
134 	map_attr.map_flags = map_flags;
135 	map_attr.key_size = key_size;
136 	map_attr.value_size = value_size;
137 	map_attr.max_entries = max_entries;
138 
139 	return bpf_create_map_xattr(&map_attr);
140 }
141 
142 int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
143 			       int key_size, int inner_map_fd, int max_entries,
144 			       __u32 map_flags, int node)
145 {
146 	__u32 name_len = name ? strlen(name) : 0;
147 	union bpf_attr attr;
148 
149 	memset(&attr, '\0', sizeof(attr));
150 
151 	attr.map_type = map_type;
152 	attr.key_size = key_size;
153 	attr.value_size = 4;
154 	attr.inner_map_fd = inner_map_fd;
155 	attr.max_entries = max_entries;
156 	attr.map_flags = map_flags;
157 	memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
158 
159 	if (node >= 0) {
160 		attr.map_flags |= BPF_F_NUMA_NODE;
161 		attr.numa_node = node;
162 	}
163 
164 	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
165 }
166 
167 int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
168 			  int key_size, int inner_map_fd, int max_entries,
169 			  __u32 map_flags)
170 {
171 	return bpf_create_map_in_map_node(map_type, name, key_size,
172 					  inner_map_fd, max_entries, map_flags,
173 					  -1);
174 }
175 
176 static void *
177 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
178 			__u32 actual_rec_size, __u32 expected_rec_size)
179 {
180 	__u64 info_len = actual_rec_size * cnt;
181 	void *info, *nrecord;
182 	int i;
183 
184 	info = malloc(info_len);
185 	if (!info)
186 		return NULL;
187 
188 	/* zero out bytes kernel does not understand */
189 	nrecord = info;
190 	for (i = 0; i < cnt; i++) {
191 		memcpy(nrecord, orecord, expected_rec_size);
192 		memset(nrecord + expected_rec_size, 0,
193 		       actual_rec_size - expected_rec_size);
194 		orecord += actual_rec_size;
195 		nrecord += actual_rec_size;
196 	}
197 
198 	return info;
199 }
200 
201 int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
202 			   char *log_buf, size_t log_buf_sz)
203 {
204 	void *finfo = NULL, *linfo = NULL;
205 	union bpf_attr attr;
206 	__u32 name_len;
207 	int fd;
208 
209 	if (!load_attr)
210 		return -EINVAL;
211 
212 	name_len = load_attr->name ? strlen(load_attr->name) : 0;
213 
214 	bzero(&attr, sizeof(attr));
215 	attr.prog_type = load_attr->prog_type;
216 	attr.expected_attach_type = load_attr->expected_attach_type;
217 	attr.insn_cnt = (__u32)load_attr->insns_cnt;
218 	attr.insns = ptr_to_u64(load_attr->insns);
219 	attr.license = ptr_to_u64(load_attr->license);
220 	attr.log_buf = ptr_to_u64(NULL);
221 	attr.log_size = 0;
222 	attr.log_level = 0;
223 	attr.kern_version = load_attr->kern_version;
224 	attr.prog_ifindex = load_attr->prog_ifindex;
225 	attr.prog_btf_fd = load_attr->prog_btf_fd;
226 	attr.func_info_rec_size = load_attr->func_info_rec_size;
227 	attr.func_info_cnt = load_attr->func_info_cnt;
228 	attr.func_info = ptr_to_u64(load_attr->func_info);
229 	attr.line_info_rec_size = load_attr->line_info_rec_size;
230 	attr.line_info_cnt = load_attr->line_info_cnt;
231 	attr.line_info = ptr_to_u64(load_attr->line_info);
232 	memcpy(attr.prog_name, load_attr->name,
233 	       min(name_len, BPF_OBJ_NAME_LEN - 1));
234 
235 	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
236 	if (fd >= 0)
237 		return fd;
238 
239 	/* After bpf_prog_load, the kernel may modify certain attributes
240 	 * to give user space a hint how to deal with loading failure.
241 	 * Check to see whether we can make some changes and load again.
242 	 */
243 	while (errno == E2BIG && (!finfo || !linfo)) {
244 		if (!finfo && attr.func_info_cnt &&
245 		    attr.func_info_rec_size < load_attr->func_info_rec_size) {
246 			/* try with corrected func info records */
247 			finfo = alloc_zero_tailing_info(load_attr->func_info,
248 							load_attr->func_info_cnt,
249 							load_attr->func_info_rec_size,
250 							attr.func_info_rec_size);
251 			if (!finfo)
252 				goto done;
253 
254 			attr.func_info = ptr_to_u64(finfo);
255 			attr.func_info_rec_size = load_attr->func_info_rec_size;
256 		} else if (!linfo && attr.line_info_cnt &&
257 			   attr.line_info_rec_size <
258 			   load_attr->line_info_rec_size) {
259 			linfo = alloc_zero_tailing_info(load_attr->line_info,
260 							load_attr->line_info_cnt,
261 							load_attr->line_info_rec_size,
262 							attr.line_info_rec_size);
263 			if (!linfo)
264 				goto done;
265 
266 			attr.line_info = ptr_to_u64(linfo);
267 			attr.line_info_rec_size = load_attr->line_info_rec_size;
268 		} else {
269 			break;
270 		}
271 
272 		fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
273 
274 		if (fd >= 0)
275 			goto done;
276 	}
277 
278 	if (!log_buf || !log_buf_sz)
279 		goto done;
280 
281 	/* Try again with log */
282 	attr.log_buf = ptr_to_u64(log_buf);
283 	attr.log_size = log_buf_sz;
284 	attr.log_level = 1;
285 	log_buf[0] = 0;
286 	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
287 done:
288 	free(finfo);
289 	free(linfo);
290 	return fd;
291 }
292 
293 int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
294 		     size_t insns_cnt, const char *license,
295 		     __u32 kern_version, char *log_buf,
296 		     size_t log_buf_sz)
297 {
298 	struct bpf_load_program_attr load_attr;
299 
300 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
301 	load_attr.prog_type = type;
302 	load_attr.expected_attach_type = 0;
303 	load_attr.name = NULL;
304 	load_attr.insns = insns;
305 	load_attr.insns_cnt = insns_cnt;
306 	load_attr.license = license;
307 	load_attr.kern_version = kern_version;
308 
309 	return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
310 }
311 
312 int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
313 		       size_t insns_cnt, __u32 prog_flags, const char *license,
314 		       __u32 kern_version, char *log_buf, size_t log_buf_sz,
315 		       int log_level)
316 {
317 	union bpf_attr attr;
318 
319 	bzero(&attr, sizeof(attr));
320 	attr.prog_type = type;
321 	attr.insn_cnt = (__u32)insns_cnt;
322 	attr.insns = ptr_to_u64(insns);
323 	attr.license = ptr_to_u64(license);
324 	attr.log_buf = ptr_to_u64(log_buf);
325 	attr.log_size = log_buf_sz;
326 	attr.log_level = log_level;
327 	log_buf[0] = 0;
328 	attr.kern_version = kern_version;
329 	attr.prog_flags = prog_flags;
330 
331 	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
332 }
333 
334 int bpf_map_update_elem(int fd, const void *key, const void *value,
335 			__u64 flags)
336 {
337 	union bpf_attr attr;
338 
339 	bzero(&attr, sizeof(attr));
340 	attr.map_fd = fd;
341 	attr.key = ptr_to_u64(key);
342 	attr.value = ptr_to_u64(value);
343 	attr.flags = flags;
344 
345 	return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
346 }
347 
348 int bpf_map_lookup_elem(int fd, const void *key, void *value)
349 {
350 	union bpf_attr attr;
351 
352 	bzero(&attr, sizeof(attr));
353 	attr.map_fd = fd;
354 	attr.key = ptr_to_u64(key);
355 	attr.value = ptr_to_u64(value);
356 
357 	return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
358 }
359 
360 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
361 {
362 	union bpf_attr attr;
363 
364 	bzero(&attr, sizeof(attr));
365 	attr.map_fd = fd;
366 	attr.key = ptr_to_u64(key);
367 	attr.value = ptr_to_u64(value);
368 
369 	return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
370 }
371 
372 int bpf_map_delete_elem(int fd, const void *key)
373 {
374 	union bpf_attr attr;
375 
376 	bzero(&attr, sizeof(attr));
377 	attr.map_fd = fd;
378 	attr.key = ptr_to_u64(key);
379 
380 	return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
381 }
382 
383 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
384 {
385 	union bpf_attr attr;
386 
387 	bzero(&attr, sizeof(attr));
388 	attr.map_fd = fd;
389 	attr.key = ptr_to_u64(key);
390 	attr.next_key = ptr_to_u64(next_key);
391 
392 	return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
393 }
394 
395 int bpf_obj_pin(int fd, const char *pathname)
396 {
397 	union bpf_attr attr;
398 
399 	bzero(&attr, sizeof(attr));
400 	attr.pathname = ptr_to_u64((void *)pathname);
401 	attr.bpf_fd = fd;
402 
403 	return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
404 }
405 
406 int bpf_obj_get(const char *pathname)
407 {
408 	union bpf_attr attr;
409 
410 	bzero(&attr, sizeof(attr));
411 	attr.pathname = ptr_to_u64((void *)pathname);
412 
413 	return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
414 }
415 
416 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
417 		    unsigned int flags)
418 {
419 	union bpf_attr attr;
420 
421 	bzero(&attr, sizeof(attr));
422 	attr.target_fd	   = target_fd;
423 	attr.attach_bpf_fd = prog_fd;
424 	attr.attach_type   = type;
425 	attr.attach_flags  = flags;
426 
427 	return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
428 }
429 
430 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
431 {
432 	union bpf_attr attr;
433 
434 	bzero(&attr, sizeof(attr));
435 	attr.target_fd	 = target_fd;
436 	attr.attach_type = type;
437 
438 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
439 }
440 
441 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
442 {
443 	union bpf_attr attr;
444 
445 	bzero(&attr, sizeof(attr));
446 	attr.target_fd	 = target_fd;
447 	attr.attach_bpf_fd = prog_fd;
448 	attr.attach_type = type;
449 
450 	return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
451 }
452 
453 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
454 		   __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
455 {
456 	union bpf_attr attr;
457 	int ret;
458 
459 	bzero(&attr, sizeof(attr));
460 	attr.query.target_fd	= target_fd;
461 	attr.query.attach_type	= type;
462 	attr.query.query_flags	= query_flags;
463 	attr.query.prog_cnt	= *prog_cnt;
464 	attr.query.prog_ids	= ptr_to_u64(prog_ids);
465 
466 	ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
467 	if (attach_flags)
468 		*attach_flags = attr.query.attach_flags;
469 	*prog_cnt = attr.query.prog_cnt;
470 	return ret;
471 }
472 
473 int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
474 		      void *data_out, __u32 *size_out, __u32 *retval,
475 		      __u32 *duration)
476 {
477 	union bpf_attr attr;
478 	int ret;
479 
480 	bzero(&attr, sizeof(attr));
481 	attr.test.prog_fd = prog_fd;
482 	attr.test.data_in = ptr_to_u64(data);
483 	attr.test.data_out = ptr_to_u64(data_out);
484 	attr.test.data_size_in = size;
485 	attr.test.repeat = repeat;
486 
487 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
488 	if (size_out)
489 		*size_out = attr.test.data_size_out;
490 	if (retval)
491 		*retval = attr.test.retval;
492 	if (duration)
493 		*duration = attr.test.duration;
494 	return ret;
495 }
496 
497 int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
498 {
499 	union bpf_attr attr;
500 	int ret;
501 
502 	if (!test_attr->data_out && test_attr->data_size_out > 0)
503 		return -EINVAL;
504 
505 	bzero(&attr, sizeof(attr));
506 	attr.test.prog_fd = test_attr->prog_fd;
507 	attr.test.data_in = ptr_to_u64(test_attr->data_in);
508 	attr.test.data_out = ptr_to_u64(test_attr->data_out);
509 	attr.test.data_size_in = test_attr->data_size_in;
510 	attr.test.data_size_out = test_attr->data_size_out;
511 	attr.test.repeat = test_attr->repeat;
512 
513 	ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
514 	test_attr->data_size_out = attr.test.data_size_out;
515 	test_attr->retval = attr.test.retval;
516 	test_attr->duration = attr.test.duration;
517 	return ret;
518 }
519 
520 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
521 {
522 	union bpf_attr attr;
523 	int err;
524 
525 	bzero(&attr, sizeof(attr));
526 	attr.start_id = start_id;
527 
528 	err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
529 	if (!err)
530 		*next_id = attr.next_id;
531 
532 	return err;
533 }
534 
535 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
536 {
537 	union bpf_attr attr;
538 	int err;
539 
540 	bzero(&attr, sizeof(attr));
541 	attr.start_id = start_id;
542 
543 	err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
544 	if (!err)
545 		*next_id = attr.next_id;
546 
547 	return err;
548 }
549 
550 int bpf_prog_get_fd_by_id(__u32 id)
551 {
552 	union bpf_attr attr;
553 
554 	bzero(&attr, sizeof(attr));
555 	attr.prog_id = id;
556 
557 	return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
558 }
559 
560 int bpf_map_get_fd_by_id(__u32 id)
561 {
562 	union bpf_attr attr;
563 
564 	bzero(&attr, sizeof(attr));
565 	attr.map_id = id;
566 
567 	return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
568 }
569 
570 int bpf_btf_get_fd_by_id(__u32 id)
571 {
572 	union bpf_attr attr;
573 
574 	bzero(&attr, sizeof(attr));
575 	attr.btf_id = id;
576 
577 	return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
578 }
579 
580 int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
581 {
582 	union bpf_attr attr;
583 	int err;
584 
585 	bzero(&attr, sizeof(attr));
586 	attr.info.bpf_fd = prog_fd;
587 	attr.info.info_len = *info_len;
588 	attr.info.info = ptr_to_u64(info);
589 
590 	err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
591 	if (!err)
592 		*info_len = attr.info.info_len;
593 
594 	return err;
595 }
596 
597 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
598 {
599 	union bpf_attr attr;
600 
601 	bzero(&attr, sizeof(attr));
602 	attr.raw_tracepoint.name = ptr_to_u64(name);
603 	attr.raw_tracepoint.prog_fd = prog_fd;
604 
605 	return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
606 }
607 
608 int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
609 		 bool do_log)
610 {
611 	union bpf_attr attr = {};
612 	int fd;
613 
614 	attr.btf = ptr_to_u64(btf);
615 	attr.btf_size = btf_size;
616 
617 retry:
618 	if (do_log && log_buf && log_buf_size) {
619 		attr.btf_log_level = 1;
620 		attr.btf_log_size = log_buf_size;
621 		attr.btf_log_buf = ptr_to_u64(log_buf);
622 	}
623 
624 	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
625 	if (fd == -1 && !do_log && log_buf && log_buf_size) {
626 		do_log = true;
627 		goto retry;
628 	}
629 
630 	return fd;
631 }
632 
633 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
634 		      __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
635 		      __u64 *probe_addr)
636 {
637 	union bpf_attr attr = {};
638 	int err;
639 
640 	attr.task_fd_query.pid = pid;
641 	attr.task_fd_query.fd = fd;
642 	attr.task_fd_query.flags = flags;
643 	attr.task_fd_query.buf = ptr_to_u64(buf);
644 	attr.task_fd_query.buf_len = *buf_len;
645 
646 	err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
647 	*buf_len = attr.task_fd_query.buf_len;
648 	*prog_id = attr.task_fd_query.prog_id;
649 	*fd_type = attr.task_fd_query.fd_type;
650 	*probe_offset = attr.task_fd_query.probe_offset;
651 	*probe_addr = attr.task_fd_query.probe_addr;
652 
653 	return err;
654 }
655