xref: /openbmc/linux/tools/lib/bpf/libbpf_probes.c (revision c8ed9fc9)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
3 
4 #include <errno.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <net/if.h>
10 #include <sys/utsname.h>
11 
12 #include <linux/btf.h>
13 #include <linux/filter.h>
14 #include <linux/kernel.h>
15 
16 #include "bpf.h"
17 #include "libbpf.h"
18 #include "libbpf_internal.h"
19 
20 /* make sure libbpf doesn't use kernel-only integer typedefs */
21 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
22 
23 static bool grep(const char *buffer, const char *pattern)
24 {
25 	return !!strstr(buffer, pattern);
26 }
27 
28 static int get_vendor_id(int ifindex)
29 {
30 	char ifname[IF_NAMESIZE], path[64], buf[8];
31 	ssize_t len;
32 	int fd;
33 
34 	if (!if_indextoname(ifindex, ifname))
35 		return -1;
36 
37 	snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
38 
39 	fd = open(path, O_RDONLY);
40 	if (fd < 0)
41 		return -1;
42 
43 	len = read(fd, buf, sizeof(buf));
44 	close(fd);
45 	if (len < 0)
46 		return -1;
47 	if (len >= (ssize_t)sizeof(buf))
48 		return -1;
49 	buf[len] = '\0';
50 
51 	return strtol(buf, NULL, 0);
52 }
53 
54 static int get_kernel_version(void)
55 {
56 	int version, subversion, patchlevel;
57 	struct utsname utsn;
58 
59 	/* Return 0 on failure, and attempt to probe with empty kversion */
60 	if (uname(&utsn))
61 		return 0;
62 
63 	if (sscanf(utsn.release, "%d.%d.%d",
64 		   &version, &subversion, &patchlevel) != 3)
65 		return 0;
66 
67 	return (version << 16) + (subversion << 8) + patchlevel;
68 }
69 
70 static void
71 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
72 	   size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
73 {
74 	struct bpf_load_program_attr xattr = {};
75 	int fd;
76 
77 	switch (prog_type) {
78 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
79 		xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
80 		break;
81 	case BPF_PROG_TYPE_KPROBE:
82 		xattr.kern_version = get_kernel_version();
83 		break;
84 	case BPF_PROG_TYPE_UNSPEC:
85 	case BPF_PROG_TYPE_SOCKET_FILTER:
86 	case BPF_PROG_TYPE_SCHED_CLS:
87 	case BPF_PROG_TYPE_SCHED_ACT:
88 	case BPF_PROG_TYPE_TRACEPOINT:
89 	case BPF_PROG_TYPE_XDP:
90 	case BPF_PROG_TYPE_PERF_EVENT:
91 	case BPF_PROG_TYPE_CGROUP_SKB:
92 	case BPF_PROG_TYPE_CGROUP_SOCK:
93 	case BPF_PROG_TYPE_LWT_IN:
94 	case BPF_PROG_TYPE_LWT_OUT:
95 	case BPF_PROG_TYPE_LWT_XMIT:
96 	case BPF_PROG_TYPE_SOCK_OPS:
97 	case BPF_PROG_TYPE_SK_SKB:
98 	case BPF_PROG_TYPE_CGROUP_DEVICE:
99 	case BPF_PROG_TYPE_SK_MSG:
100 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
101 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
102 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
103 	case BPF_PROG_TYPE_LIRC_MODE2:
104 	case BPF_PROG_TYPE_SK_REUSEPORT:
105 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
106 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
107 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
108 	case BPF_PROG_TYPE_TRACING:
109 	case BPF_PROG_TYPE_STRUCT_OPS:
110 	case BPF_PROG_TYPE_EXT:
111 	case BPF_PROG_TYPE_LSM:
112 	default:
113 		break;
114 	}
115 
116 	xattr.prog_type = prog_type;
117 	xattr.insns = insns;
118 	xattr.insns_cnt = insns_cnt;
119 	xattr.license = "GPL";
120 	xattr.prog_ifindex = ifindex;
121 
122 	fd = bpf_load_program_xattr(&xattr, buf, buf_len);
123 	if (fd >= 0)
124 		close(fd);
125 }
126 
127 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
128 {
129 	struct bpf_insn insns[2] = {
130 		BPF_MOV64_IMM(BPF_REG_0, 0),
131 		BPF_EXIT_INSN()
132 	};
133 
134 	if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
135 		/* nfp returns -EINVAL on exit(0) with TC offload */
136 		insns[0].imm = 2;
137 
138 	errno = 0;
139 	probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
140 
141 	return errno != EINVAL && errno != EOPNOTSUPP;
142 }
143 
144 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
145 			 const char *str_sec, size_t str_len)
146 {
147 	struct btf_header hdr = {
148 		.magic = BTF_MAGIC,
149 		.version = BTF_VERSION,
150 		.hdr_len = sizeof(struct btf_header),
151 		.type_len = types_len,
152 		.str_off = types_len,
153 		.str_len = str_len,
154 	};
155 	int btf_fd, btf_len;
156 	__u8 *raw_btf;
157 
158 	btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
159 	raw_btf = malloc(btf_len);
160 	if (!raw_btf)
161 		return -ENOMEM;
162 
163 	memcpy(raw_btf, &hdr, sizeof(hdr));
164 	memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
165 	memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
166 
167 	btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
168 
169 	free(raw_btf);
170 	return btf_fd;
171 }
172 
173 static int load_sk_storage_btf(void)
174 {
175 	const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
176 	/* struct bpf_spin_lock {
177 	 *   int val;
178 	 * };
179 	 * struct val {
180 	 *   int cnt;
181 	 *   struct bpf_spin_lock l;
182 	 * };
183 	 */
184 	__u32 types[] = {
185 		/* int */
186 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
187 		/* struct bpf_spin_lock */                      /* [2] */
188 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
189 		BTF_MEMBER_ENC(15, 1, 0), /* int val; */
190 		/* struct val */                                /* [3] */
191 		BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
192 		BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
193 		BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
194 	};
195 
196 	return libbpf__load_raw_btf((char *)types, sizeof(types),
197 				     strs, sizeof(strs));
198 }
199 
200 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
201 {
202 	int key_size, value_size, max_entries, map_flags;
203 	__u32 btf_key_type_id = 0, btf_value_type_id = 0;
204 	struct bpf_create_map_attr attr = {};
205 	int fd = -1, btf_fd = -1, fd_inner;
206 
207 	key_size	= sizeof(__u32);
208 	value_size	= sizeof(__u32);
209 	max_entries	= 1;
210 	map_flags	= 0;
211 
212 	switch (map_type) {
213 	case BPF_MAP_TYPE_STACK_TRACE:
214 		value_size	= sizeof(__u64);
215 		break;
216 	case BPF_MAP_TYPE_LPM_TRIE:
217 		key_size	= sizeof(__u64);
218 		value_size	= sizeof(__u64);
219 		map_flags	= BPF_F_NO_PREALLOC;
220 		break;
221 	case BPF_MAP_TYPE_CGROUP_STORAGE:
222 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
223 		key_size	= sizeof(struct bpf_cgroup_storage_key);
224 		value_size	= sizeof(__u64);
225 		max_entries	= 0;
226 		break;
227 	case BPF_MAP_TYPE_QUEUE:
228 	case BPF_MAP_TYPE_STACK:
229 		key_size	= 0;
230 		break;
231 	case BPF_MAP_TYPE_SK_STORAGE:
232 		btf_key_type_id = 1;
233 		btf_value_type_id = 3;
234 		value_size = 8;
235 		max_entries = 0;
236 		map_flags = BPF_F_NO_PREALLOC;
237 		btf_fd = load_sk_storage_btf();
238 		if (btf_fd < 0)
239 			return false;
240 		break;
241 	case BPF_MAP_TYPE_RINGBUF:
242 		key_size = 0;
243 		value_size = 0;
244 		max_entries = 4096;
245 		break;
246 	case BPF_MAP_TYPE_UNSPEC:
247 	case BPF_MAP_TYPE_HASH:
248 	case BPF_MAP_TYPE_ARRAY:
249 	case BPF_MAP_TYPE_PROG_ARRAY:
250 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
251 	case BPF_MAP_TYPE_PERCPU_HASH:
252 	case BPF_MAP_TYPE_PERCPU_ARRAY:
253 	case BPF_MAP_TYPE_CGROUP_ARRAY:
254 	case BPF_MAP_TYPE_LRU_HASH:
255 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
256 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
257 	case BPF_MAP_TYPE_HASH_OF_MAPS:
258 	case BPF_MAP_TYPE_DEVMAP:
259 	case BPF_MAP_TYPE_DEVMAP_HASH:
260 	case BPF_MAP_TYPE_SOCKMAP:
261 	case BPF_MAP_TYPE_CPUMAP:
262 	case BPF_MAP_TYPE_XSKMAP:
263 	case BPF_MAP_TYPE_SOCKHASH:
264 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
265 	case BPF_MAP_TYPE_STRUCT_OPS:
266 	default:
267 		break;
268 	}
269 
270 	if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
271 	    map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
272 		/* TODO: probe for device, once libbpf has a function to create
273 		 * map-in-map for offload
274 		 */
275 		if (ifindex)
276 			return false;
277 
278 		fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
279 					  sizeof(__u32), sizeof(__u32), 1, 0);
280 		if (fd_inner < 0)
281 			return false;
282 		fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
283 					   fd_inner, 1, 0);
284 		close(fd_inner);
285 	} else {
286 		/* Note: No other restriction on map type probes for offload */
287 		attr.map_type = map_type;
288 		attr.key_size = key_size;
289 		attr.value_size = value_size;
290 		attr.max_entries = max_entries;
291 		attr.map_flags = map_flags;
292 		attr.map_ifindex = ifindex;
293 		if (btf_fd >= 0) {
294 			attr.btf_fd = btf_fd;
295 			attr.btf_key_type_id = btf_key_type_id;
296 			attr.btf_value_type_id = btf_value_type_id;
297 		}
298 
299 		fd = bpf_create_map_xattr(&attr);
300 	}
301 	if (fd >= 0)
302 		close(fd);
303 	if (btf_fd >= 0)
304 		close(btf_fd);
305 
306 	return fd >= 0;
307 }
308 
309 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
310 		      __u32 ifindex)
311 {
312 	struct bpf_insn insns[2] = {
313 		BPF_EMIT_CALL(id),
314 		BPF_EXIT_INSN()
315 	};
316 	char buf[4096] = {};
317 	bool res;
318 
319 	probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
320 		   ifindex);
321 	res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
322 
323 	if (ifindex) {
324 		switch (get_vendor_id(ifindex)) {
325 		case 0x19ee: /* Netronome specific */
326 			res = res && !grep(buf, "not supported by FW") &&
327 				!grep(buf, "unsupported function id");
328 			break;
329 		default:
330 			break;
331 		}
332 	}
333 
334 	return res;
335 }
336 
337 /*
338  * Probe for availability of kernel commit (5.3):
339  *
340  * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
341  */
342 bool bpf_probe_large_insn_limit(__u32 ifindex)
343 {
344 	struct bpf_insn insns[BPF_MAXINSNS + 1];
345 	int i;
346 
347 	for (i = 0; i < BPF_MAXINSNS; i++)
348 		insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
349 	insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
350 
351 	errno = 0;
352 	probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
353 		   ifindex);
354 
355 	return errno != E2BIG && errno != EINVAL;
356 }
357