xref: /openbmc/linux/net/sched/act_mirred.c (revision 7b73a9c8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_mirred.c	packet mirroring and redirect actions
4  *
5  * Authors:	Jamal Hadi Salim (2002-4)
6  *
7  * TODO: Add ingress support (and socket redirect support)
8  */
9 
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24 #include <linux/tc_act/tc_mirred.h>
25 #include <net/tc_act/tc_mirred.h>
26 
27 static LIST_HEAD(mirred_list);
28 static DEFINE_SPINLOCK(mirred_list_lock);
29 
30 #define MIRRED_RECURSION_LIMIT    4
31 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32 
33 static bool tcf_mirred_is_act_redirect(int action)
34 {
35 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36 }
37 
38 static bool tcf_mirred_act_wants_ingress(int action)
39 {
40 	switch (action) {
41 	case TCA_EGRESS_REDIR:
42 	case TCA_EGRESS_MIRROR:
43 		return false;
44 	case TCA_INGRESS_REDIR:
45 	case TCA_INGRESS_MIRROR:
46 		return true;
47 	default:
48 		BUG();
49 	}
50 }
51 
52 static bool tcf_mirred_can_reinsert(int action)
53 {
54 	switch (action) {
55 	case TC_ACT_SHOT:
56 	case TC_ACT_STOLEN:
57 	case TC_ACT_QUEUED:
58 	case TC_ACT_TRAP:
59 		return true;
60 	}
61 	return false;
62 }
63 
64 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
65 {
66 	return rcu_dereference_protected(m->tcfm_dev,
67 					 lockdep_is_held(&m->tcf_lock));
68 }
69 
70 static void tcf_mirred_release(struct tc_action *a)
71 {
72 	struct tcf_mirred *m = to_mirred(a);
73 	struct net_device *dev;
74 
75 	spin_lock(&mirred_list_lock);
76 	list_del(&m->tcfm_list);
77 	spin_unlock(&mirred_list_lock);
78 
79 	/* last reference to action, no need to lock */
80 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
81 	if (dev)
82 		dev_put(dev);
83 }
84 
85 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
87 };
88 
89 static unsigned int mirred_net_id;
90 static struct tc_action_ops act_mirred_ops;
91 
92 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 			   struct nlattr *est, struct tc_action **a,
94 			   int ovr, int bind, bool rtnl_held,
95 			   struct tcf_proto *tp,
96 			   u32 flags, struct netlink_ext_ack *extack)
97 {
98 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
99 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 	struct tcf_chain *goto_ch = NULL;
101 	bool mac_header_xmit = false;
102 	struct tc_mirred *parm;
103 	struct tcf_mirred *m;
104 	struct net_device *dev;
105 	bool exists = false;
106 	int ret, err;
107 	u32 index;
108 
109 	if (!nla) {
110 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
111 		return -EINVAL;
112 	}
113 	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
114 					  mirred_policy, extack);
115 	if (ret < 0)
116 		return ret;
117 	if (!tb[TCA_MIRRED_PARMS]) {
118 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
119 		return -EINVAL;
120 	}
121 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
122 	index = parm->index;
123 	err = tcf_idr_check_alloc(tn, &index, a, bind);
124 	if (err < 0)
125 		return err;
126 	exists = err;
127 	if (exists && bind)
128 		return 0;
129 
130 	switch (parm->eaction) {
131 	case TCA_EGRESS_MIRROR:
132 	case TCA_EGRESS_REDIR:
133 	case TCA_INGRESS_REDIR:
134 	case TCA_INGRESS_MIRROR:
135 		break;
136 	default:
137 		if (exists)
138 			tcf_idr_release(*a, bind);
139 		else
140 			tcf_idr_cleanup(tn, index);
141 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
142 		return -EINVAL;
143 	}
144 
145 	if (!exists) {
146 		if (!parm->ifindex) {
147 			tcf_idr_cleanup(tn, index);
148 			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
149 			return -EINVAL;
150 		}
151 		ret = tcf_idr_create_from_flags(tn, index, est, a,
152 						&act_mirred_ops, bind, flags);
153 		if (ret) {
154 			tcf_idr_cleanup(tn, index);
155 			return ret;
156 		}
157 		ret = ACT_P_CREATED;
158 	} else if (!ovr) {
159 		tcf_idr_release(*a, bind);
160 		return -EEXIST;
161 	}
162 
163 	m = to_mirred(*a);
164 	if (ret == ACT_P_CREATED)
165 		INIT_LIST_HEAD(&m->tcfm_list);
166 
167 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
168 	if (err < 0)
169 		goto release_idr;
170 
171 	spin_lock_bh(&m->tcf_lock);
172 
173 	if (parm->ifindex) {
174 		dev = dev_get_by_index(net, parm->ifindex);
175 		if (!dev) {
176 			spin_unlock_bh(&m->tcf_lock);
177 			err = -ENODEV;
178 			goto put_chain;
179 		}
180 		mac_header_xmit = dev_is_mac_header_xmit(dev);
181 		dev = rcu_replace_pointer(m->tcfm_dev, dev,
182 					  lockdep_is_held(&m->tcf_lock));
183 		if (dev)
184 			dev_put(dev);
185 		m->tcfm_mac_header_xmit = mac_header_xmit;
186 	}
187 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 	m->tcfm_eaction = parm->eaction;
189 	spin_unlock_bh(&m->tcf_lock);
190 	if (goto_ch)
191 		tcf_chain_put_by_act(goto_ch);
192 
193 	if (ret == ACT_P_CREATED) {
194 		spin_lock(&mirred_list_lock);
195 		list_add(&m->tcfm_list, &mirred_list);
196 		spin_unlock(&mirred_list_lock);
197 
198 		tcf_idr_insert(tn, *a);
199 	}
200 
201 	return ret;
202 put_chain:
203 	if (goto_ch)
204 		tcf_chain_put_by_act(goto_ch);
205 release_idr:
206 	tcf_idr_release(*a, bind);
207 	return err;
208 }
209 
210 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
211 			  struct tcf_result *res)
212 {
213 	struct tcf_mirred *m = to_mirred(a);
214 	struct sk_buff *skb2 = skb;
215 	bool m_mac_header_xmit;
216 	struct net_device *dev;
217 	unsigned int rec_level;
218 	int retval, err = 0;
219 	bool use_reinsert;
220 	bool want_ingress;
221 	bool is_redirect;
222 	bool expects_nh;
223 	int m_eaction;
224 	int mac_len;
225 	bool at_nh;
226 
227 	rec_level = __this_cpu_inc_return(mirred_rec_level);
228 	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
229 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
230 				     netdev_name(skb->dev));
231 		__this_cpu_dec(mirred_rec_level);
232 		return TC_ACT_SHOT;
233 	}
234 
235 	tcf_lastuse_update(&m->tcf_tm);
236 	tcf_action_update_bstats(&m->common, skb);
237 
238 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
239 	m_eaction = READ_ONCE(m->tcfm_eaction);
240 	retval = READ_ONCE(m->tcf_action);
241 	dev = rcu_dereference_bh(m->tcfm_dev);
242 	if (unlikely(!dev)) {
243 		pr_notice_once("tc mirred: target device is gone\n");
244 		goto out;
245 	}
246 
247 	if (unlikely(!(dev->flags & IFF_UP))) {
248 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
249 				       dev->name);
250 		goto out;
251 	}
252 
253 	/* we could easily avoid the clone only if called by ingress and clsact;
254 	 * since we can't easily detect the clsact caller, skip clone only for
255 	 * ingress - that covers the TC S/W datapath.
256 	 */
257 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
258 	use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
259 		       tcf_mirred_can_reinsert(retval);
260 	if (!use_reinsert) {
261 		skb2 = skb_clone(skb, GFP_ATOMIC);
262 		if (!skb2)
263 			goto out;
264 	}
265 
266 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
267 
268 	expects_nh = want_ingress || !m_mac_header_xmit;
269 	at_nh = skb->data == skb_network_header(skb);
270 	if (at_nh != expects_nh) {
271 		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
272 			  skb_network_header(skb) - skb_mac_header(skb);
273 		if (expects_nh) {
274 			/* target device/action expect data at nh */
275 			skb_pull_rcsum(skb2, mac_len);
276 		} else {
277 			/* target device/action expect data at mac */
278 			skb_push_rcsum(skb2, mac_len);
279 		}
280 	}
281 
282 	skb2->skb_iif = skb->dev->ifindex;
283 	skb2->dev = dev;
284 
285 	/* mirror is always swallowed */
286 	if (is_redirect) {
287 		skb2->tc_redirected = 1;
288 		skb2->tc_from_ingress = skb2->tc_at_ingress;
289 		if (skb2->tc_from_ingress)
290 			skb2->tstamp = 0;
291 		/* let's the caller reinsert the packet, if possible */
292 		if (use_reinsert) {
293 			res->ingress = want_ingress;
294 			if (skb_tc_reinsert(skb, res))
295 				tcf_action_inc_overlimit_qstats(&m->common);
296 			__this_cpu_dec(mirred_rec_level);
297 			return TC_ACT_CONSUMED;
298 		}
299 	}
300 
301 	if (!want_ingress)
302 		err = dev_queue_xmit(skb2);
303 	else
304 		err = netif_receive_skb(skb2);
305 
306 	if (err) {
307 out:
308 		tcf_action_inc_overlimit_qstats(&m->common);
309 		if (tcf_mirred_is_act_redirect(m_eaction))
310 			retval = TC_ACT_SHOT;
311 	}
312 	__this_cpu_dec(mirred_rec_level);
313 
314 	return retval;
315 }
316 
317 static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
318 			     u64 lastuse, bool hw)
319 {
320 	struct tcf_mirred *m = to_mirred(a);
321 	struct tcf_t *tm = &m->tcf_tm;
322 
323 	tcf_action_update_stats(a, bytes, packets, false, hw);
324 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
325 }
326 
327 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
328 			   int ref)
329 {
330 	unsigned char *b = skb_tail_pointer(skb);
331 	struct tcf_mirred *m = to_mirred(a);
332 	struct tc_mirred opt = {
333 		.index   = m->tcf_index,
334 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
335 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
336 	};
337 	struct net_device *dev;
338 	struct tcf_t t;
339 
340 	spin_lock_bh(&m->tcf_lock);
341 	opt.action = m->tcf_action;
342 	opt.eaction = m->tcfm_eaction;
343 	dev = tcf_mirred_dev_dereference(m);
344 	if (dev)
345 		opt.ifindex = dev->ifindex;
346 
347 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
348 		goto nla_put_failure;
349 
350 	tcf_tm_dump(&t, &m->tcf_tm);
351 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
352 		goto nla_put_failure;
353 	spin_unlock_bh(&m->tcf_lock);
354 
355 	return skb->len;
356 
357 nla_put_failure:
358 	spin_unlock_bh(&m->tcf_lock);
359 	nlmsg_trim(skb, b);
360 	return -1;
361 }
362 
363 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
364 			     struct netlink_callback *cb, int type,
365 			     const struct tc_action_ops *ops,
366 			     struct netlink_ext_ack *extack)
367 {
368 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
369 
370 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
371 }
372 
373 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
374 {
375 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
376 
377 	return tcf_idr_search(tn, a, index);
378 }
379 
380 static int mirred_device_event(struct notifier_block *unused,
381 			       unsigned long event, void *ptr)
382 {
383 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
384 	struct tcf_mirred *m;
385 
386 	ASSERT_RTNL();
387 	if (event == NETDEV_UNREGISTER) {
388 		spin_lock(&mirred_list_lock);
389 		list_for_each_entry(m, &mirred_list, tcfm_list) {
390 			spin_lock_bh(&m->tcf_lock);
391 			if (tcf_mirred_dev_dereference(m) == dev) {
392 				dev_put(dev);
393 				/* Note : no rcu grace period necessary, as
394 				 * net_device are already rcu protected.
395 				 */
396 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
397 			}
398 			spin_unlock_bh(&m->tcf_lock);
399 		}
400 		spin_unlock(&mirred_list_lock);
401 	}
402 
403 	return NOTIFY_DONE;
404 }
405 
406 static struct notifier_block mirred_device_notifier = {
407 	.notifier_call = mirred_device_event,
408 };
409 
410 static void tcf_mirred_dev_put(void *priv)
411 {
412 	struct net_device *dev = priv;
413 
414 	dev_put(dev);
415 }
416 
417 static struct net_device *
418 tcf_mirred_get_dev(const struct tc_action *a,
419 		   tc_action_priv_destructor *destructor)
420 {
421 	struct tcf_mirred *m = to_mirred(a);
422 	struct net_device *dev;
423 
424 	rcu_read_lock();
425 	dev = rcu_dereference(m->tcfm_dev);
426 	if (dev) {
427 		dev_hold(dev);
428 		*destructor = tcf_mirred_dev_put;
429 	}
430 	rcu_read_unlock();
431 
432 	return dev;
433 }
434 
435 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
436 {
437 	return nla_total_size(sizeof(struct tc_mirred));
438 }
439 
440 static struct tc_action_ops act_mirred_ops = {
441 	.kind		=	"mirred",
442 	.id		=	TCA_ID_MIRRED,
443 	.owner		=	THIS_MODULE,
444 	.act		=	tcf_mirred_act,
445 	.stats_update	=	tcf_stats_update,
446 	.dump		=	tcf_mirred_dump,
447 	.cleanup	=	tcf_mirred_release,
448 	.init		=	tcf_mirred_init,
449 	.walk		=	tcf_mirred_walker,
450 	.lookup		=	tcf_mirred_search,
451 	.get_fill_size	=	tcf_mirred_get_fill_size,
452 	.size		=	sizeof(struct tcf_mirred),
453 	.get_dev	=	tcf_mirred_get_dev,
454 };
455 
456 static __net_init int mirred_init_net(struct net *net)
457 {
458 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
459 
460 	return tc_action_net_init(net, tn, &act_mirred_ops);
461 }
462 
463 static void __net_exit mirred_exit_net(struct list_head *net_list)
464 {
465 	tc_action_net_exit(net_list, mirred_net_id);
466 }
467 
468 static struct pernet_operations mirred_net_ops = {
469 	.init = mirred_init_net,
470 	.exit_batch = mirred_exit_net,
471 	.id   = &mirred_net_id,
472 	.size = sizeof(struct tc_action_net),
473 };
474 
475 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
476 MODULE_DESCRIPTION("Device Mirror/redirect actions");
477 MODULE_LICENSE("GPL");
478 
479 static int __init mirred_init_module(void)
480 {
481 	int err = register_netdevice_notifier(&mirred_device_notifier);
482 	if (err)
483 		return err;
484 
485 	pr_info("Mirror/redirect action on\n");
486 	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
487 	if (err)
488 		unregister_netdevice_notifier(&mirred_device_notifier);
489 
490 	return err;
491 }
492 
493 static void __exit mirred_cleanup_module(void)
494 {
495 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
496 	unregister_netdevice_notifier(&mirred_device_notifier);
497 }
498 
499 module_init(mirred_init_module);
500 module_exit(mirred_cleanup_module);
501