xref: /openbmc/linux/net/sched/act_mirred.c (revision c358f538)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/dst.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
27 
28 static LIST_HEAD(mirred_list);
29 static DEFINE_SPINLOCK(mirred_list_lock);
30 
31 #define MIRRED_RECURSION_LIMIT    4
32 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
33 
34 static bool tcf_mirred_is_act_redirect(int action)
35 {
36 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
37 }
38 
39 static bool tcf_mirred_act_wants_ingress(int action)
40 {
41 	switch (action) {
42 	case TCA_EGRESS_REDIR:
43 	case TCA_EGRESS_MIRROR:
44 		return false;
45 	case TCA_INGRESS_REDIR:
46 	case TCA_INGRESS_MIRROR:
47 		return true;
48 	default:
49 		BUG();
50 	}
51 }
52 
53 static bool tcf_mirred_can_reinsert(int action)
54 {
55 	switch (action) {
56 	case TC_ACT_SHOT:
57 	case TC_ACT_STOLEN:
58 	case TC_ACT_QUEUED:
59 	case TC_ACT_TRAP:
60 		return true;
61 	}
62 	return false;
63 }
64 
65 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
66 {
67 	return rcu_dereference_protected(m->tcfm_dev,
68 					 lockdep_is_held(&m->tcf_lock));
69 }
70 
71 static void tcf_mirred_release(struct tc_action *a)
72 {
73 	struct tcf_mirred *m = to_mirred(a);
74 	struct net_device *dev;
75 
76 	spin_lock(&mirred_list_lock);
77 	list_del(&m->tcfm_list);
78 	spin_unlock(&mirred_list_lock);
79 
80 	/* last reference to action, no need to lock */
81 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
82 	netdev_put(dev, &m->tcfm_dev_tracker);
83 }
84 
85 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
87 };
88 
89 static struct tc_action_ops act_mirred_ops;
90 
91 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
92 			   struct nlattr *est, struct tc_action **a,
93 			   struct tcf_proto *tp,
94 			   u32 flags, struct netlink_ext_ack *extack)
95 {
96 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
97 	bool bind = flags & TCA_ACT_FLAGS_BIND;
98 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
99 	struct tcf_chain *goto_ch = NULL;
100 	bool mac_header_xmit = false;
101 	struct tc_mirred *parm;
102 	struct tcf_mirred *m;
103 	bool exists = false;
104 	int ret, err;
105 	u32 index;
106 
107 	if (!nla) {
108 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
109 		return -EINVAL;
110 	}
111 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
112 					  mirred_policy, extack);
113 	if (ret < 0)
114 		return ret;
115 	if (!tb[TCA_MIRRED_PARMS]) {
116 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
117 		return -EINVAL;
118 	}
119 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
120 	index = parm->index;
121 	err = tcf_idr_check_alloc(tn, &index, a, bind);
122 	if (err < 0)
123 		return err;
124 	exists = err;
125 	if (exists && bind)
126 		return 0;
127 
128 	switch (parm->eaction) {
129 	case TCA_EGRESS_MIRROR:
130 	case TCA_EGRESS_REDIR:
131 	case TCA_INGRESS_REDIR:
132 	case TCA_INGRESS_MIRROR:
133 		break;
134 	default:
135 		if (exists)
136 			tcf_idr_release(*a, bind);
137 		else
138 			tcf_idr_cleanup(tn, index);
139 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
140 		return -EINVAL;
141 	}
142 
143 	if (!exists) {
144 		if (!parm->ifindex) {
145 			tcf_idr_cleanup(tn, index);
146 			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
147 			return -EINVAL;
148 		}
149 		ret = tcf_idr_create_from_flags(tn, index, est, a,
150 						&act_mirred_ops, bind, flags);
151 		if (ret) {
152 			tcf_idr_cleanup(tn, index);
153 			return ret;
154 		}
155 		ret = ACT_P_CREATED;
156 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
157 		tcf_idr_release(*a, bind);
158 		return -EEXIST;
159 	}
160 
161 	m = to_mirred(*a);
162 	if (ret == ACT_P_CREATED)
163 		INIT_LIST_HEAD(&m->tcfm_list);
164 
165 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
166 	if (err < 0)
167 		goto release_idr;
168 
169 	spin_lock_bh(&m->tcf_lock);
170 
171 	if (parm->ifindex) {
172 		struct net_device *odev, *ndev;
173 
174 		ndev = dev_get_by_index(net, parm->ifindex);
175 		if (!ndev) {
176 			spin_unlock_bh(&m->tcf_lock);
177 			err = -ENODEV;
178 			goto put_chain;
179 		}
180 		mac_header_xmit = dev_is_mac_header_xmit(ndev);
181 		odev = rcu_replace_pointer(m->tcfm_dev, ndev,
182 					  lockdep_is_held(&m->tcf_lock));
183 		netdev_put(odev, &m->tcfm_dev_tracker);
184 		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
185 		m->tcfm_mac_header_xmit = mac_header_xmit;
186 	}
187 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 	m->tcfm_eaction = parm->eaction;
189 	spin_unlock_bh(&m->tcf_lock);
190 	if (goto_ch)
191 		tcf_chain_put_by_act(goto_ch);
192 
193 	if (ret == ACT_P_CREATED) {
194 		spin_lock(&mirred_list_lock);
195 		list_add(&m->tcfm_list, &mirred_list);
196 		spin_unlock(&mirred_list_lock);
197 	}
198 
199 	return ret;
200 put_chain:
201 	if (goto_ch)
202 		tcf_chain_put_by_act(goto_ch);
203 release_idr:
204 	tcf_idr_release(*a, bind);
205 	return err;
206 }
207 
208 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
209 {
210 	int err;
211 
212 	if (!want_ingress)
213 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
214 	else
215 		err = netif_receive_skb(skb);
216 
217 	return err;
218 }
219 
220 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
221 			  struct tcf_result *res)
222 {
223 	struct tcf_mirred *m = to_mirred(a);
224 	struct sk_buff *skb2 = skb;
225 	bool m_mac_header_xmit;
226 	struct net_device *dev;
227 	unsigned int rec_level;
228 	int retval, err = 0;
229 	bool use_reinsert;
230 	bool want_ingress;
231 	bool is_redirect;
232 	bool expects_nh;
233 	bool at_ingress;
234 	int m_eaction;
235 	int mac_len;
236 	bool at_nh;
237 
238 	rec_level = __this_cpu_inc_return(mirred_rec_level);
239 	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
240 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
241 				     netdev_name(skb->dev));
242 		__this_cpu_dec(mirred_rec_level);
243 		return TC_ACT_SHOT;
244 	}
245 
246 	tcf_lastuse_update(&m->tcf_tm);
247 	tcf_action_update_bstats(&m->common, skb);
248 
249 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
250 	m_eaction = READ_ONCE(m->tcfm_eaction);
251 	retval = READ_ONCE(m->tcf_action);
252 	dev = rcu_dereference_bh(m->tcfm_dev);
253 	if (unlikely(!dev)) {
254 		pr_notice_once("tc mirred: target device is gone\n");
255 		goto out;
256 	}
257 
258 	if (unlikely(!(dev->flags & IFF_UP))) {
259 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
260 				       dev->name);
261 		goto out;
262 	}
263 
264 	/* we could easily avoid the clone only if called by ingress and clsact;
265 	 * since we can't easily detect the clsact caller, skip clone only for
266 	 * ingress - that covers the TC S/W datapath.
267 	 */
268 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
269 	at_ingress = skb_at_tc_ingress(skb);
270 	use_reinsert = at_ingress && is_redirect &&
271 		       tcf_mirred_can_reinsert(retval);
272 	if (!use_reinsert) {
273 		skb2 = skb_clone(skb, GFP_ATOMIC);
274 		if (!skb2)
275 			goto out;
276 	}
277 
278 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
279 
280 	/* All mirred/redirected skbs should clear previous ct info */
281 	nf_reset_ct(skb2);
282 	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
283 		skb_dst_drop(skb2);
284 
285 	expects_nh = want_ingress || !m_mac_header_xmit;
286 	at_nh = skb->data == skb_network_header(skb);
287 	if (at_nh != expects_nh) {
288 		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
289 			  skb_network_header(skb) - skb_mac_header(skb);
290 		if (expects_nh) {
291 			/* target device/action expect data at nh */
292 			skb_pull_rcsum(skb2, mac_len);
293 		} else {
294 			/* target device/action expect data at mac */
295 			skb_push_rcsum(skb2, mac_len);
296 		}
297 	}
298 
299 	skb2->skb_iif = skb->dev->ifindex;
300 	skb2->dev = dev;
301 
302 	/* mirror is always swallowed */
303 	if (is_redirect) {
304 		skb_set_redirected(skb2, skb2->tc_at_ingress);
305 
306 		/* let's the caller reinsert the packet, if possible */
307 		if (use_reinsert) {
308 			err = tcf_mirred_forward(want_ingress, skb);
309 			if (err)
310 				tcf_action_inc_overlimit_qstats(&m->common);
311 			__this_cpu_dec(mirred_rec_level);
312 			return TC_ACT_CONSUMED;
313 		}
314 	}
315 
316 	err = tcf_mirred_forward(want_ingress, skb2);
317 	if (err) {
318 out:
319 		tcf_action_inc_overlimit_qstats(&m->common);
320 		if (tcf_mirred_is_act_redirect(m_eaction))
321 			retval = TC_ACT_SHOT;
322 	}
323 	__this_cpu_dec(mirred_rec_level);
324 
325 	return retval;
326 }
327 
328 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
329 			     u64 drops, u64 lastuse, bool hw)
330 {
331 	struct tcf_mirred *m = to_mirred(a);
332 	struct tcf_t *tm = &m->tcf_tm;
333 
334 	tcf_action_update_stats(a, bytes, packets, drops, hw);
335 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
336 }
337 
338 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
339 			   int ref)
340 {
341 	unsigned char *b = skb_tail_pointer(skb);
342 	struct tcf_mirred *m = to_mirred(a);
343 	struct tc_mirred opt = {
344 		.index   = m->tcf_index,
345 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
346 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
347 	};
348 	struct net_device *dev;
349 	struct tcf_t t;
350 
351 	spin_lock_bh(&m->tcf_lock);
352 	opt.action = m->tcf_action;
353 	opt.eaction = m->tcfm_eaction;
354 	dev = tcf_mirred_dev_dereference(m);
355 	if (dev)
356 		opt.ifindex = dev->ifindex;
357 
358 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
359 		goto nla_put_failure;
360 
361 	tcf_tm_dump(&t, &m->tcf_tm);
362 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
363 		goto nla_put_failure;
364 	spin_unlock_bh(&m->tcf_lock);
365 
366 	return skb->len;
367 
368 nla_put_failure:
369 	spin_unlock_bh(&m->tcf_lock);
370 	nlmsg_trim(skb, b);
371 	return -1;
372 }
373 
374 static int mirred_device_event(struct notifier_block *unused,
375 			       unsigned long event, void *ptr)
376 {
377 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
378 	struct tcf_mirred *m;
379 
380 	ASSERT_RTNL();
381 	if (event == NETDEV_UNREGISTER) {
382 		spin_lock(&mirred_list_lock);
383 		list_for_each_entry(m, &mirred_list, tcfm_list) {
384 			spin_lock_bh(&m->tcf_lock);
385 			if (tcf_mirred_dev_dereference(m) == dev) {
386 				netdev_put(dev, &m->tcfm_dev_tracker);
387 				/* Note : no rcu grace period necessary, as
388 				 * net_device are already rcu protected.
389 				 */
390 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
391 			}
392 			spin_unlock_bh(&m->tcf_lock);
393 		}
394 		spin_unlock(&mirred_list_lock);
395 	}
396 
397 	return NOTIFY_DONE;
398 }
399 
400 static struct notifier_block mirred_device_notifier = {
401 	.notifier_call = mirred_device_event,
402 };
403 
404 static void tcf_mirred_dev_put(void *priv)
405 {
406 	struct net_device *dev = priv;
407 
408 	dev_put(dev);
409 }
410 
411 static struct net_device *
412 tcf_mirred_get_dev(const struct tc_action *a,
413 		   tc_action_priv_destructor *destructor)
414 {
415 	struct tcf_mirred *m = to_mirred(a);
416 	struct net_device *dev;
417 
418 	rcu_read_lock();
419 	dev = rcu_dereference(m->tcfm_dev);
420 	if (dev) {
421 		dev_hold(dev);
422 		*destructor = tcf_mirred_dev_put;
423 	}
424 	rcu_read_unlock();
425 
426 	return dev;
427 }
428 
429 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
430 {
431 	return nla_total_size(sizeof(struct tc_mirred));
432 }
433 
434 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
435 				       const struct tc_action *act)
436 {
437 	entry->dev = act->ops->get_dev(act, &entry->destructor);
438 	if (!entry->dev)
439 		return;
440 	entry->destructor_priv = entry->dev;
441 }
442 
443 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
444 					u32 *index_inc, bool bind,
445 					struct netlink_ext_ack *extack)
446 {
447 	if (bind) {
448 		struct flow_action_entry *entry = entry_data;
449 
450 		if (is_tcf_mirred_egress_redirect(act)) {
451 			entry->id = FLOW_ACTION_REDIRECT;
452 			tcf_offload_mirred_get_dev(entry, act);
453 		} else if (is_tcf_mirred_egress_mirror(act)) {
454 			entry->id = FLOW_ACTION_MIRRED;
455 			tcf_offload_mirred_get_dev(entry, act);
456 		} else if (is_tcf_mirred_ingress_redirect(act)) {
457 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
458 			tcf_offload_mirred_get_dev(entry, act);
459 		} else if (is_tcf_mirred_ingress_mirror(act)) {
460 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
461 			tcf_offload_mirred_get_dev(entry, act);
462 		} else {
463 			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
464 			return -EOPNOTSUPP;
465 		}
466 		*index_inc = 1;
467 	} else {
468 		struct flow_offload_action *fl_action = entry_data;
469 
470 		if (is_tcf_mirred_egress_redirect(act))
471 			fl_action->id = FLOW_ACTION_REDIRECT;
472 		else if (is_tcf_mirred_egress_mirror(act))
473 			fl_action->id = FLOW_ACTION_MIRRED;
474 		else if (is_tcf_mirred_ingress_redirect(act))
475 			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
476 		else if (is_tcf_mirred_ingress_mirror(act))
477 			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
478 		else
479 			return -EOPNOTSUPP;
480 	}
481 
482 	return 0;
483 }
484 
485 static struct tc_action_ops act_mirred_ops = {
486 	.kind		=	"mirred",
487 	.id		=	TCA_ID_MIRRED,
488 	.owner		=	THIS_MODULE,
489 	.act		=	tcf_mirred_act,
490 	.stats_update	=	tcf_stats_update,
491 	.dump		=	tcf_mirred_dump,
492 	.cleanup	=	tcf_mirred_release,
493 	.init		=	tcf_mirred_init,
494 	.get_fill_size	=	tcf_mirred_get_fill_size,
495 	.offload_act_setup =	tcf_mirred_offload_act_setup,
496 	.size		=	sizeof(struct tcf_mirred),
497 	.get_dev	=	tcf_mirred_get_dev,
498 };
499 
500 static __net_init int mirred_init_net(struct net *net)
501 {
502 	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
503 
504 	return tc_action_net_init(net, tn, &act_mirred_ops);
505 }
506 
507 static void __net_exit mirred_exit_net(struct list_head *net_list)
508 {
509 	tc_action_net_exit(net_list, act_mirred_ops.net_id);
510 }
511 
512 static struct pernet_operations mirred_net_ops = {
513 	.init = mirred_init_net,
514 	.exit_batch = mirred_exit_net,
515 	.id   = &act_mirred_ops.net_id,
516 	.size = sizeof(struct tc_action_net),
517 };
518 
519 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
520 MODULE_DESCRIPTION("Device Mirror/redirect actions");
521 MODULE_LICENSE("GPL");
522 
523 static int __init mirred_init_module(void)
524 {
525 	int err = register_netdevice_notifier(&mirred_device_notifier);
526 	if (err)
527 		return err;
528 
529 	pr_info("Mirror/redirect action on\n");
530 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
531 	if (err)
532 		unregister_netdevice_notifier(&mirred_device_notifier);
533 
534 	return err;
535 }
536 
537 static void __exit mirred_cleanup_module(void)
538 {
539 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
540 	unregister_netdevice_notifier(&mirred_device_notifier);
541 }
542 
543 module_init(mirred_init_module);
544 module_exit(mirred_cleanup_module);
545