xref: /openbmc/linux/net/psample/psample.c (revision bc33f5e5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/psample/psample.c - Netlink channel for packet sampling
4  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/module.h>
11 #include <linux/timekeeping.h>
12 #include <net/net_namespace.h>
13 #include <net/sock.h>
14 #include <net/netlink.h>
15 #include <net/genetlink.h>
16 #include <net/psample.h>
17 #include <linux/spinlock.h>
18 #include <net/ip_tunnels.h>
19 #include <net/dst_metadata.h>
20 
21 #define PSAMPLE_MAX_PACKET_SIZE 0xffff
22 
23 static LIST_HEAD(psample_groups_list);
24 static DEFINE_SPINLOCK(psample_groups_lock);
25 
26 /* multicast groups */
27 enum psample_nl_multicast_groups {
28 	PSAMPLE_NL_MCGRP_CONFIG,
29 	PSAMPLE_NL_MCGRP_SAMPLE,
30 };
31 
32 static const struct genl_multicast_group psample_nl_mcgrps[] = {
33 	[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
34 	[PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
35 };
36 
37 static struct genl_family psample_nl_family __ro_after_init;
38 
39 static int psample_group_nl_fill(struct sk_buff *msg,
40 				 struct psample_group *group,
41 				 enum psample_command cmd, u32 portid, u32 seq,
42 				 int flags)
43 {
44 	void *hdr;
45 	int ret;
46 
47 	hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
48 	if (!hdr)
49 		return -EMSGSIZE;
50 
51 	ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
52 	if (ret < 0)
53 		goto error;
54 
55 	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
56 	if (ret < 0)
57 		goto error;
58 
59 	ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
60 	if (ret < 0)
61 		goto error;
62 
63 	genlmsg_end(msg, hdr);
64 	return 0;
65 
66 error:
67 	genlmsg_cancel(msg, hdr);
68 	return -EMSGSIZE;
69 }
70 
71 static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
72 					   struct netlink_callback *cb)
73 {
74 	struct psample_group *group;
75 	int start = cb->args[0];
76 	int idx = 0;
77 	int err;
78 
79 	spin_lock_bh(&psample_groups_lock);
80 	list_for_each_entry(group, &psample_groups_list, list) {
81 		if (!net_eq(group->net, sock_net(msg->sk)))
82 			continue;
83 		if (idx < start) {
84 			idx++;
85 			continue;
86 		}
87 		err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
88 					    NETLINK_CB(cb->skb).portid,
89 					    cb->nlh->nlmsg_seq, NLM_F_MULTI);
90 		if (err)
91 			break;
92 		idx++;
93 	}
94 
95 	spin_unlock_bh(&psample_groups_lock);
96 	cb->args[0] = idx;
97 	return msg->len;
98 }
99 
100 static const struct genl_small_ops psample_nl_ops[] = {
101 	{
102 		.cmd = PSAMPLE_CMD_GET_GROUP,
103 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
104 		.dumpit = psample_nl_cmd_get_group_dumpit,
105 		/* can be retrieved by unprivileged users */
106 	}
107 };
108 
109 static struct genl_family psample_nl_family __ro_after_init = {
110 	.name		= PSAMPLE_GENL_NAME,
111 	.version	= PSAMPLE_GENL_VERSION,
112 	.maxattr	= PSAMPLE_ATTR_MAX,
113 	.netnsok	= true,
114 	.module		= THIS_MODULE,
115 	.mcgrps		= psample_nl_mcgrps,
116 	.small_ops	= psample_nl_ops,
117 	.n_small_ops	= ARRAY_SIZE(psample_nl_ops),
118 	.resv_start_op	= PSAMPLE_CMD_GET_GROUP + 1,
119 	.n_mcgrps	= ARRAY_SIZE(psample_nl_mcgrps),
120 };
121 
122 static void psample_group_notify(struct psample_group *group,
123 				 enum psample_command cmd)
124 {
125 	struct sk_buff *msg;
126 	int err;
127 
128 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
129 	if (!msg)
130 		return;
131 
132 	err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
133 	if (!err)
134 		genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
135 					PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
136 	else
137 		nlmsg_free(msg);
138 }
139 
140 static struct psample_group *psample_group_create(struct net *net,
141 						  u32 group_num)
142 {
143 	struct psample_group *group;
144 
145 	group = kzalloc(sizeof(*group), GFP_ATOMIC);
146 	if (!group)
147 		return NULL;
148 
149 	group->net = net;
150 	group->group_num = group_num;
151 	list_add_tail(&group->list, &psample_groups_list);
152 
153 	psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
154 	return group;
155 }
156 
157 static void psample_group_destroy(struct psample_group *group)
158 {
159 	psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
160 	list_del(&group->list);
161 	kfree_rcu(group, rcu);
162 }
163 
164 static struct psample_group *
165 psample_group_lookup(struct net *net, u32 group_num)
166 {
167 	struct psample_group *group;
168 
169 	list_for_each_entry(group, &psample_groups_list, list)
170 		if ((group->group_num == group_num) && (group->net == net))
171 			return group;
172 	return NULL;
173 }
174 
175 struct psample_group *psample_group_get(struct net *net, u32 group_num)
176 {
177 	struct psample_group *group;
178 
179 	spin_lock_bh(&psample_groups_lock);
180 
181 	group = psample_group_lookup(net, group_num);
182 	if (!group) {
183 		group = psample_group_create(net, group_num);
184 		if (!group)
185 			goto out;
186 	}
187 	group->refcount++;
188 
189 out:
190 	spin_unlock_bh(&psample_groups_lock);
191 	return group;
192 }
193 EXPORT_SYMBOL_GPL(psample_group_get);
194 
195 void psample_group_take(struct psample_group *group)
196 {
197 	spin_lock_bh(&psample_groups_lock);
198 	group->refcount++;
199 	spin_unlock_bh(&psample_groups_lock);
200 }
201 EXPORT_SYMBOL_GPL(psample_group_take);
202 
203 void psample_group_put(struct psample_group *group)
204 {
205 	spin_lock_bh(&psample_groups_lock);
206 
207 	if (--group->refcount == 0)
208 		psample_group_destroy(group);
209 
210 	spin_unlock_bh(&psample_groups_lock);
211 }
212 EXPORT_SYMBOL_GPL(psample_group_put);
213 
214 #ifdef CONFIG_INET
215 static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
216 			      struct ip_tunnel_info *tun_info)
217 {
218 	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
219 	const void *tun_opts = ip_tunnel_info_opts(tun_info);
220 	const struct ip_tunnel_key *tun_key = &tun_info->key;
221 	int tun_opts_len = tun_info->options_len;
222 
223 	if (tun_key->tun_flags & TUNNEL_KEY &&
224 	    nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
225 			 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
226 		return -EMSGSIZE;
227 
228 	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
229 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
230 		return -EMSGSIZE;
231 
232 	switch (tun_proto) {
233 	case AF_INET:
234 		if (tun_key->u.ipv4.src &&
235 		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
236 				    tun_key->u.ipv4.src))
237 			return -EMSGSIZE;
238 		if (tun_key->u.ipv4.dst &&
239 		    nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
240 				    tun_key->u.ipv4.dst))
241 			return -EMSGSIZE;
242 		break;
243 	case AF_INET6:
244 		if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
245 		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
246 				     &tun_key->u.ipv6.src))
247 			return -EMSGSIZE;
248 		if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
249 		    nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
250 				     &tun_key->u.ipv6.dst))
251 			return -EMSGSIZE;
252 		break;
253 	}
254 	if (tun_key->tos &&
255 	    nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
256 		return -EMSGSIZE;
257 	if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
258 		return -EMSGSIZE;
259 	if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
260 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
261 		return -EMSGSIZE;
262 	if ((tun_key->tun_flags & TUNNEL_CSUM) &&
263 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
264 		return -EMSGSIZE;
265 	if (tun_key->tp_src &&
266 	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
267 		return -EMSGSIZE;
268 	if (tun_key->tp_dst &&
269 	    nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
270 		return -EMSGSIZE;
271 	if ((tun_key->tun_flags & TUNNEL_OAM) &&
272 	    nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
273 		return -EMSGSIZE;
274 	if (tun_opts_len) {
275 		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
276 		    nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
277 			    tun_opts_len, tun_opts))
278 			return -EMSGSIZE;
279 		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
280 			 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
281 				 tun_opts_len, tun_opts))
282 			return -EMSGSIZE;
283 	}
284 
285 	return 0;
286 }
287 
288 static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
289 			    struct ip_tunnel_info *tun_info)
290 {
291 	struct nlattr *nla;
292 	int err;
293 
294 	nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
295 	if (!nla)
296 		return -EMSGSIZE;
297 
298 	err = __psample_ip_tun_to_nlattr(skb, tun_info);
299 	if (err) {
300 		nla_nest_cancel(skb, nla);
301 		return err;
302 	}
303 
304 	nla_nest_end(skb, nla);
305 
306 	return 0;
307 }
308 
309 static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
310 {
311 	unsigned short tun_proto = ip_tunnel_info_af(tun_info);
312 	const struct ip_tunnel_key *tun_key = &tun_info->key;
313 	int tun_opts_len = tun_info->options_len;
314 	int sum = nla_total_size(0);	/* PSAMPLE_ATTR_TUNNEL */
315 
316 	if (tun_key->tun_flags & TUNNEL_KEY)
317 		sum += nla_total_size_64bit(sizeof(u64));
318 
319 	if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
320 		sum += nla_total_size(0);
321 
322 	switch (tun_proto) {
323 	case AF_INET:
324 		if (tun_key->u.ipv4.src)
325 			sum += nla_total_size(sizeof(u32));
326 		if (tun_key->u.ipv4.dst)
327 			sum += nla_total_size(sizeof(u32));
328 		break;
329 	case AF_INET6:
330 		if (!ipv6_addr_any(&tun_key->u.ipv6.src))
331 			sum += nla_total_size(sizeof(struct in6_addr));
332 		if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
333 			sum += nla_total_size(sizeof(struct in6_addr));
334 		break;
335 	}
336 	if (tun_key->tos)
337 		sum += nla_total_size(sizeof(u8));
338 	sum += nla_total_size(sizeof(u8));	/* TTL */
339 	if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
340 		sum += nla_total_size(0);
341 	if (tun_key->tun_flags & TUNNEL_CSUM)
342 		sum += nla_total_size(0);
343 	if (tun_key->tp_src)
344 		sum += nla_total_size(sizeof(u16));
345 	if (tun_key->tp_dst)
346 		sum += nla_total_size(sizeof(u16));
347 	if (tun_key->tun_flags & TUNNEL_OAM)
348 		sum += nla_total_size(0);
349 	if (tun_opts_len) {
350 		if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
351 			sum += nla_total_size(tun_opts_len);
352 		else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
353 			sum += nla_total_size(tun_opts_len);
354 	}
355 
356 	return sum;
357 }
358 #endif
359 
360 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
361 			   u32 sample_rate, const struct psample_metadata *md)
362 {
363 	ktime_t tstamp = ktime_get_real();
364 	int out_ifindex = md->out_ifindex;
365 	int in_ifindex = md->in_ifindex;
366 	u32 trunc_size = md->trunc_size;
367 #ifdef CONFIG_INET
368 	struct ip_tunnel_info *tun_info;
369 #endif
370 	struct sk_buff *nl_skb;
371 	int data_len;
372 	int meta_len;
373 	void *data;
374 	int ret;
375 
376 	meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
377 		   (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
378 		   (md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
379 		   (md->out_tc_occ_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
380 		   (md->latency_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
381 		   nla_total_size(sizeof(u32)) +	/* sample_rate */
382 		   nla_total_size(sizeof(u32)) +	/* orig_size */
383 		   nla_total_size(sizeof(u32)) +	/* group_num */
384 		   nla_total_size(sizeof(u32)) +	/* seq */
385 		   nla_total_size_64bit(sizeof(u64)) +	/* timestamp */
386 		   nla_total_size(sizeof(u16));		/* protocol */
387 
388 #ifdef CONFIG_INET
389 	tun_info = skb_tunnel_info(skb);
390 	if (tun_info)
391 		meta_len += psample_tunnel_meta_len(tun_info);
392 #endif
393 
394 	data_len = min(skb->len, trunc_size);
395 	if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
396 		data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
397 			    - NLA_ALIGNTO;
398 
399 	nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
400 	if (unlikely(!nl_skb))
401 		return;
402 
403 	data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
404 			   PSAMPLE_CMD_SAMPLE);
405 	if (unlikely(!data))
406 		goto error;
407 
408 	if (in_ifindex) {
409 		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
410 		if (unlikely(ret < 0))
411 			goto error;
412 	}
413 
414 	if (out_ifindex) {
415 		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
416 		if (unlikely(ret < 0))
417 			goto error;
418 	}
419 
420 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
421 	if (unlikely(ret < 0))
422 		goto error;
423 
424 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
425 	if (unlikely(ret < 0))
426 		goto error;
427 
428 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
429 	if (unlikely(ret < 0))
430 		goto error;
431 
432 	ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
433 	if (unlikely(ret < 0))
434 		goto error;
435 
436 	if (md->out_tc_valid) {
437 		ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OUT_TC, md->out_tc);
438 		if (unlikely(ret < 0))
439 			goto error;
440 	}
441 
442 	if (md->out_tc_occ_valid) {
443 		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_OUT_TC_OCC,
444 					md->out_tc_occ, PSAMPLE_ATTR_PAD);
445 		if (unlikely(ret < 0))
446 			goto error;
447 	}
448 
449 	if (md->latency_valid) {
450 		ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_LATENCY,
451 					md->latency, PSAMPLE_ATTR_PAD);
452 		if (unlikely(ret < 0))
453 			goto error;
454 	}
455 
456 	ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_TIMESTAMP,
457 				ktime_to_ns(tstamp), PSAMPLE_ATTR_PAD);
458 	if (unlikely(ret < 0))
459 		goto error;
460 
461 	ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_PROTO,
462 			  be16_to_cpu(skb->protocol));
463 	if (unlikely(ret < 0))
464 		goto error;
465 
466 	if (data_len) {
467 		int nla_len = nla_total_size(data_len);
468 		struct nlattr *nla;
469 
470 		nla = skb_put(nl_skb, nla_len);
471 		nla->nla_type = PSAMPLE_ATTR_DATA;
472 		nla->nla_len = nla_attr_size(data_len);
473 
474 		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
475 			goto error;
476 	}
477 
478 #ifdef CONFIG_INET
479 	if (tun_info) {
480 		ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
481 		if (unlikely(ret < 0))
482 			goto error;
483 	}
484 #endif
485 
486 	genlmsg_end(nl_skb, data);
487 	genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
488 				PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
489 
490 	return;
491 error:
492 	pr_err_ratelimited("Could not create psample log message\n");
493 	nlmsg_free(nl_skb);
494 }
495 EXPORT_SYMBOL_GPL(psample_sample_packet);
496 
497 static int __init psample_module_init(void)
498 {
499 	return genl_register_family(&psample_nl_family);
500 }
501 
502 static void __exit psample_module_exit(void)
503 {
504 	genl_unregister_family(&psample_nl_family);
505 }
506 
507 module_init(psample_module_init);
508 module_exit(psample_module_exit);
509 
510 MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
511 MODULE_DESCRIPTION("netlink channel for packet sampling");
512 MODULE_LICENSE("GPL v2");
513