xref: /openbmc/linux/net/sched/act_mirred.c (revision f412eef0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/dst.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
27 #include <net/tc_wrapper.h>
28 
29 static LIST_HEAD(mirred_list);
30 static DEFINE_SPINLOCK(mirred_list_lock);
31 
32 #define MIRRED_RECURSION_LIMIT    4
33 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
34 
35 static bool tcf_mirred_is_act_redirect(int action)
36 {
37 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
38 }
39 
40 static bool tcf_mirred_act_wants_ingress(int action)
41 {
42 	switch (action) {
43 	case TCA_EGRESS_REDIR:
44 	case TCA_EGRESS_MIRROR:
45 		return false;
46 	case TCA_INGRESS_REDIR:
47 	case TCA_INGRESS_MIRROR:
48 		return true;
49 	default:
50 		BUG();
51 	}
52 }
53 
54 static bool tcf_mirred_can_reinsert(int action)
55 {
56 	switch (action) {
57 	case TC_ACT_SHOT:
58 	case TC_ACT_STOLEN:
59 	case TC_ACT_QUEUED:
60 	case TC_ACT_TRAP:
61 		return true;
62 	}
63 	return false;
64 }
65 
66 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67 {
68 	return rcu_dereference_protected(m->tcfm_dev,
69 					 lockdep_is_held(&m->tcf_lock));
70 }
71 
72 static void tcf_mirred_release(struct tc_action *a)
73 {
74 	struct tcf_mirred *m = to_mirred(a);
75 	struct net_device *dev;
76 
77 	spin_lock(&mirred_list_lock);
78 	list_del(&m->tcfm_list);
79 	spin_unlock(&mirred_list_lock);
80 
81 	/* last reference to action, no need to lock */
82 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
83 	netdev_put(dev, &m->tcfm_dev_tracker);
84 }
85 
86 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
88 };
89 
90 static struct tc_action_ops act_mirred_ops;
91 
92 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 			   struct nlattr *est, struct tc_action **a,
94 			   struct tcf_proto *tp,
95 			   u32 flags, struct netlink_ext_ack *extack)
96 {
97 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
98 	bool bind = flags & TCA_ACT_FLAGS_BIND;
99 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 	struct tcf_chain *goto_ch = NULL;
101 	bool mac_header_xmit = false;
102 	struct tc_mirred *parm;
103 	struct tcf_mirred *m;
104 	bool exists = false;
105 	int ret, err;
106 	u32 index;
107 
108 	if (!nla) {
109 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
110 		return -EINVAL;
111 	}
112 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113 					  mirred_policy, extack);
114 	if (ret < 0)
115 		return ret;
116 	if (!tb[TCA_MIRRED_PARMS]) {
117 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
118 		return -EINVAL;
119 	}
120 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
121 	index = parm->index;
122 	err = tcf_idr_check_alloc(tn, &index, a, bind);
123 	if (err < 0)
124 		return err;
125 	exists = err;
126 	if (exists && bind)
127 		return 0;
128 
129 	switch (parm->eaction) {
130 	case TCA_EGRESS_MIRROR:
131 	case TCA_EGRESS_REDIR:
132 	case TCA_INGRESS_REDIR:
133 	case TCA_INGRESS_MIRROR:
134 		break;
135 	default:
136 		if (exists)
137 			tcf_idr_release(*a, bind);
138 		else
139 			tcf_idr_cleanup(tn, index);
140 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141 		return -EINVAL;
142 	}
143 
144 	if (!exists) {
145 		if (!parm->ifindex) {
146 			tcf_idr_cleanup(tn, index);
147 			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
148 			return -EINVAL;
149 		}
150 		ret = tcf_idr_create_from_flags(tn, index, est, a,
151 						&act_mirred_ops, bind, flags);
152 		if (ret) {
153 			tcf_idr_cleanup(tn, index);
154 			return ret;
155 		}
156 		ret = ACT_P_CREATED;
157 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
158 		tcf_idr_release(*a, bind);
159 		return -EEXIST;
160 	}
161 
162 	m = to_mirred(*a);
163 	if (ret == ACT_P_CREATED)
164 		INIT_LIST_HEAD(&m->tcfm_list);
165 
166 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
167 	if (err < 0)
168 		goto release_idr;
169 
170 	spin_lock_bh(&m->tcf_lock);
171 
172 	if (parm->ifindex) {
173 		struct net_device *odev, *ndev;
174 
175 		ndev = dev_get_by_index(net, parm->ifindex);
176 		if (!ndev) {
177 			spin_unlock_bh(&m->tcf_lock);
178 			err = -ENODEV;
179 			goto put_chain;
180 		}
181 		mac_header_xmit = dev_is_mac_header_xmit(ndev);
182 		odev = rcu_replace_pointer(m->tcfm_dev, ndev,
183 					  lockdep_is_held(&m->tcf_lock));
184 		netdev_put(odev, &m->tcfm_dev_tracker);
185 		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
186 		m->tcfm_mac_header_xmit = mac_header_xmit;
187 	}
188 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189 	m->tcfm_eaction = parm->eaction;
190 	spin_unlock_bh(&m->tcf_lock);
191 	if (goto_ch)
192 		tcf_chain_put_by_act(goto_ch);
193 
194 	if (ret == ACT_P_CREATED) {
195 		spin_lock(&mirred_list_lock);
196 		list_add(&m->tcfm_list, &mirred_list);
197 		spin_unlock(&mirred_list_lock);
198 	}
199 
200 	return ret;
201 put_chain:
202 	if (goto_ch)
203 		tcf_chain_put_by_act(goto_ch);
204 release_idr:
205 	tcf_idr_release(*a, bind);
206 	return err;
207 }
208 
209 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
210 {
211 	int err;
212 
213 	if (!want_ingress)
214 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
215 	else
216 		err = netif_receive_skb(skb);
217 
218 	return err;
219 }
220 
221 TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
222 				     const struct tc_action *a,
223 				     struct tcf_result *res)
224 {
225 	struct tcf_mirred *m = to_mirred(a);
226 	struct sk_buff *skb2 = skb;
227 	bool m_mac_header_xmit;
228 	struct net_device *dev;
229 	unsigned int rec_level;
230 	int retval, err = 0;
231 	bool use_reinsert;
232 	bool want_ingress;
233 	bool is_redirect;
234 	bool expects_nh;
235 	bool at_ingress;
236 	int m_eaction;
237 	int mac_len;
238 	bool at_nh;
239 
240 	rec_level = __this_cpu_inc_return(mirred_rec_level);
241 	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
242 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
243 				     netdev_name(skb->dev));
244 		__this_cpu_dec(mirred_rec_level);
245 		return TC_ACT_SHOT;
246 	}
247 
248 	tcf_lastuse_update(&m->tcf_tm);
249 	tcf_action_update_bstats(&m->common, skb);
250 
251 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
252 	m_eaction = READ_ONCE(m->tcfm_eaction);
253 	retval = READ_ONCE(m->tcf_action);
254 	dev = rcu_dereference_bh(m->tcfm_dev);
255 	if (unlikely(!dev)) {
256 		pr_notice_once("tc mirred: target device is gone\n");
257 		goto out;
258 	}
259 
260 	if (unlikely(!(dev->flags & IFF_UP))) {
261 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
262 				       dev->name);
263 		goto out;
264 	}
265 
266 	/* we could easily avoid the clone only if called by ingress and clsact;
267 	 * since we can't easily detect the clsact caller, skip clone only for
268 	 * ingress - that covers the TC S/W datapath.
269 	 */
270 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
271 	at_ingress = skb_at_tc_ingress(skb);
272 	use_reinsert = at_ingress && is_redirect &&
273 		       tcf_mirred_can_reinsert(retval);
274 	if (!use_reinsert) {
275 		skb2 = skb_clone(skb, GFP_ATOMIC);
276 		if (!skb2)
277 			goto out;
278 	}
279 
280 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
281 
282 	/* All mirred/redirected skbs should clear previous ct info */
283 	nf_reset_ct(skb2);
284 	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
285 		skb_dst_drop(skb2);
286 
287 	expects_nh = want_ingress || !m_mac_header_xmit;
288 	at_nh = skb->data == skb_network_header(skb);
289 	if (at_nh != expects_nh) {
290 		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
291 			  skb_network_header(skb) - skb_mac_header(skb);
292 		if (expects_nh) {
293 			/* target device/action expect data at nh */
294 			skb_pull_rcsum(skb2, mac_len);
295 		} else {
296 			/* target device/action expect data at mac */
297 			skb_push_rcsum(skb2, mac_len);
298 		}
299 	}
300 
301 	skb2->skb_iif = skb->dev->ifindex;
302 	skb2->dev = dev;
303 
304 	/* mirror is always swallowed */
305 	if (is_redirect) {
306 		skb_set_redirected(skb2, skb2->tc_at_ingress);
307 
308 		/* let's the caller reinsert the packet, if possible */
309 		if (use_reinsert) {
310 			err = tcf_mirred_forward(want_ingress, skb);
311 			if (err)
312 				tcf_action_inc_overlimit_qstats(&m->common);
313 			__this_cpu_dec(mirred_rec_level);
314 			return TC_ACT_CONSUMED;
315 		}
316 	}
317 
318 	err = tcf_mirred_forward(want_ingress, skb2);
319 	if (err) {
320 out:
321 		tcf_action_inc_overlimit_qstats(&m->common);
322 		if (tcf_mirred_is_act_redirect(m_eaction))
323 			retval = TC_ACT_SHOT;
324 	}
325 	__this_cpu_dec(mirred_rec_level);
326 
327 	return retval;
328 }
329 
330 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
331 			     u64 drops, u64 lastuse, bool hw)
332 {
333 	struct tcf_mirred *m = to_mirred(a);
334 	struct tcf_t *tm = &m->tcf_tm;
335 
336 	tcf_action_update_stats(a, bytes, packets, drops, hw);
337 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
338 }
339 
340 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
341 			   int ref)
342 {
343 	unsigned char *b = skb_tail_pointer(skb);
344 	struct tcf_mirred *m = to_mirred(a);
345 	struct tc_mirred opt = {
346 		.index   = m->tcf_index,
347 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
348 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
349 	};
350 	struct net_device *dev;
351 	struct tcf_t t;
352 
353 	spin_lock_bh(&m->tcf_lock);
354 	opt.action = m->tcf_action;
355 	opt.eaction = m->tcfm_eaction;
356 	dev = tcf_mirred_dev_dereference(m);
357 	if (dev)
358 		opt.ifindex = dev->ifindex;
359 
360 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
361 		goto nla_put_failure;
362 
363 	tcf_tm_dump(&t, &m->tcf_tm);
364 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
365 		goto nla_put_failure;
366 	spin_unlock_bh(&m->tcf_lock);
367 
368 	return skb->len;
369 
370 nla_put_failure:
371 	spin_unlock_bh(&m->tcf_lock);
372 	nlmsg_trim(skb, b);
373 	return -1;
374 }
375 
376 static int mirred_device_event(struct notifier_block *unused,
377 			       unsigned long event, void *ptr)
378 {
379 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
380 	struct tcf_mirred *m;
381 
382 	ASSERT_RTNL();
383 	if (event == NETDEV_UNREGISTER) {
384 		spin_lock(&mirred_list_lock);
385 		list_for_each_entry(m, &mirred_list, tcfm_list) {
386 			spin_lock_bh(&m->tcf_lock);
387 			if (tcf_mirred_dev_dereference(m) == dev) {
388 				netdev_put(dev, &m->tcfm_dev_tracker);
389 				/* Note : no rcu grace period necessary, as
390 				 * net_device are already rcu protected.
391 				 */
392 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
393 			}
394 			spin_unlock_bh(&m->tcf_lock);
395 		}
396 		spin_unlock(&mirred_list_lock);
397 	}
398 
399 	return NOTIFY_DONE;
400 }
401 
402 static struct notifier_block mirred_device_notifier = {
403 	.notifier_call = mirred_device_event,
404 };
405 
406 static void tcf_mirred_dev_put(void *priv)
407 {
408 	struct net_device *dev = priv;
409 
410 	dev_put(dev);
411 }
412 
413 static struct net_device *
414 tcf_mirred_get_dev(const struct tc_action *a,
415 		   tc_action_priv_destructor *destructor)
416 {
417 	struct tcf_mirred *m = to_mirred(a);
418 	struct net_device *dev;
419 
420 	rcu_read_lock();
421 	dev = rcu_dereference(m->tcfm_dev);
422 	if (dev) {
423 		dev_hold(dev);
424 		*destructor = tcf_mirred_dev_put;
425 	}
426 	rcu_read_unlock();
427 
428 	return dev;
429 }
430 
431 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
432 {
433 	return nla_total_size(sizeof(struct tc_mirred));
434 }
435 
436 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
437 				       const struct tc_action *act)
438 {
439 	entry->dev = act->ops->get_dev(act, &entry->destructor);
440 	if (!entry->dev)
441 		return;
442 	entry->destructor_priv = entry->dev;
443 }
444 
445 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
446 					u32 *index_inc, bool bind,
447 					struct netlink_ext_ack *extack)
448 {
449 	if (bind) {
450 		struct flow_action_entry *entry = entry_data;
451 
452 		if (is_tcf_mirred_egress_redirect(act)) {
453 			entry->id = FLOW_ACTION_REDIRECT;
454 			tcf_offload_mirred_get_dev(entry, act);
455 		} else if (is_tcf_mirred_egress_mirror(act)) {
456 			entry->id = FLOW_ACTION_MIRRED;
457 			tcf_offload_mirred_get_dev(entry, act);
458 		} else if (is_tcf_mirred_ingress_redirect(act)) {
459 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
460 			tcf_offload_mirred_get_dev(entry, act);
461 		} else if (is_tcf_mirred_ingress_mirror(act)) {
462 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
463 			tcf_offload_mirred_get_dev(entry, act);
464 		} else {
465 			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
466 			return -EOPNOTSUPP;
467 		}
468 		*index_inc = 1;
469 	} else {
470 		struct flow_offload_action *fl_action = entry_data;
471 
472 		if (is_tcf_mirred_egress_redirect(act))
473 			fl_action->id = FLOW_ACTION_REDIRECT;
474 		else if (is_tcf_mirred_egress_mirror(act))
475 			fl_action->id = FLOW_ACTION_MIRRED;
476 		else if (is_tcf_mirred_ingress_redirect(act))
477 			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
478 		else if (is_tcf_mirred_ingress_mirror(act))
479 			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
480 		else
481 			return -EOPNOTSUPP;
482 	}
483 
484 	return 0;
485 }
486 
487 static struct tc_action_ops act_mirred_ops = {
488 	.kind		=	"mirred",
489 	.id		=	TCA_ID_MIRRED,
490 	.owner		=	THIS_MODULE,
491 	.act		=	tcf_mirred_act,
492 	.stats_update	=	tcf_stats_update,
493 	.dump		=	tcf_mirred_dump,
494 	.cleanup	=	tcf_mirred_release,
495 	.init		=	tcf_mirred_init,
496 	.get_fill_size	=	tcf_mirred_get_fill_size,
497 	.offload_act_setup =	tcf_mirred_offload_act_setup,
498 	.size		=	sizeof(struct tcf_mirred),
499 	.get_dev	=	tcf_mirred_get_dev,
500 };
501 
502 static __net_init int mirred_init_net(struct net *net)
503 {
504 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
505 
506 	return tc_action_net_init(net, tn, &act_mirred_ops);
507 }
508 
509 static void __net_exit mirred_exit_net(struct list_head *net_list)
510 {
511 	tc_action_net_exit(net_list, act_mirred_ops.net_id);
512 }
513 
514 static struct pernet_operations mirred_net_ops = {
515 	.init = mirred_init_net,
516 	.exit_batch = mirred_exit_net,
517 	.id   = &act_mirred_ops.net_id,
518 	.size = sizeof(struct tc_action_net),
519 };
520 
521 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
522 MODULE_DESCRIPTION("Device Mirror/redirect actions");
523 MODULE_LICENSE("GPL");
524 
525 static int __init mirred_init_module(void)
526 {
527 	int err = register_netdevice_notifier(&mirred_device_notifier);
528 	if (err)
529 		return err;
530 
531 	pr_info("Mirror/redirect action on\n");
532 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
533 	if (err)
534 		unregister_netdevice_notifier(&mirred_device_notifier);
535 
536 	return err;
537 }
538 
539 static void __exit mirred_cleanup_module(void)
540 {
541 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
542 	unregister_netdevice_notifier(&mirred_device_notifier);
543 }
544 
545 module_init(mirred_init_module);
546 module_exit(mirred_cleanup_module);
547