xref: /openbmc/linux/net/sched/act_mirred.c (revision 3213486f)
1 /*
2  * net/sched/act_mirred.c	packet mirroring and redirect actions
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Jamal Hadi Salim (2002-4)
10  *
11  * TODO: Add ingress support (and socket redirect support)
12  *
13  */
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/gfp.h>
24 #include <linux/if_arp.h>
25 #include <net/net_namespace.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <linux/tc_act/tc_mirred.h>
30 #include <net/tc_act/tc_mirred.h>
31 
32 static LIST_HEAD(mirred_list);
33 static DEFINE_SPINLOCK(mirred_list_lock);
34 
35 static bool tcf_mirred_is_act_redirect(int action)
36 {
37 	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
38 }
39 
40 static bool tcf_mirred_act_wants_ingress(int action)
41 {
42 	switch (action) {
43 	case TCA_EGRESS_REDIR:
44 	case TCA_EGRESS_MIRROR:
45 		return false;
46 	case TCA_INGRESS_REDIR:
47 	case TCA_INGRESS_MIRROR:
48 		return true;
49 	default:
50 		BUG();
51 	}
52 }
53 
54 static bool tcf_mirred_can_reinsert(int action)
55 {
56 	switch (action) {
57 	case TC_ACT_SHOT:
58 	case TC_ACT_STOLEN:
59 	case TC_ACT_QUEUED:
60 	case TC_ACT_TRAP:
61 		return true;
62 	}
63 	return false;
64 }
65 
66 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67 {
68 	return rcu_dereference_protected(m->tcfm_dev,
69 					 lockdep_is_held(&m->tcf_lock));
70 }
71 
72 static void tcf_mirred_release(struct tc_action *a)
73 {
74 	struct tcf_mirred *m = to_mirred(a);
75 	struct net_device *dev;
76 
77 	spin_lock(&mirred_list_lock);
78 	list_del(&m->tcfm_list);
79 	spin_unlock(&mirred_list_lock);
80 
81 	/* last reference to action, no need to lock */
82 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
83 	if (dev)
84 		dev_put(dev);
85 }
86 
87 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
88 	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
89 };
90 
91 static unsigned int mirred_net_id;
92 static struct tc_action_ops act_mirred_ops;
93 
94 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
95 			   struct nlattr *est, struct tc_action **a,
96 			   int ovr, int bind, bool rtnl_held,
97 			   struct tcf_proto *tp,
98 			   struct netlink_ext_ack *extack)
99 {
100 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
101 	struct nlattr *tb[TCA_MIRRED_MAX + 1];
102 	struct tcf_chain *goto_ch = NULL;
103 	bool mac_header_xmit = false;
104 	struct tc_mirred *parm;
105 	struct tcf_mirred *m;
106 	struct net_device *dev;
107 	bool exists = false;
108 	int ret, err;
109 
110 	if (!nla) {
111 		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
112 		return -EINVAL;
113 	}
114 	ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy, extack);
115 	if (ret < 0)
116 		return ret;
117 	if (!tb[TCA_MIRRED_PARMS]) {
118 		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
119 		return -EINVAL;
120 	}
121 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
122 
123 	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
124 	if (err < 0)
125 		return err;
126 	exists = err;
127 	if (exists && bind)
128 		return 0;
129 
130 	switch (parm->eaction) {
131 	case TCA_EGRESS_MIRROR:
132 	case TCA_EGRESS_REDIR:
133 	case TCA_INGRESS_REDIR:
134 	case TCA_INGRESS_MIRROR:
135 		break;
136 	default:
137 		if (exists)
138 			tcf_idr_release(*a, bind);
139 		else
140 			tcf_idr_cleanup(tn, parm->index);
141 		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
142 		return -EINVAL;
143 	}
144 
145 	if (!exists) {
146 		if (!parm->ifindex) {
147 			tcf_idr_cleanup(tn, parm->index);
148 			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
149 			return -EINVAL;
150 		}
151 		ret = tcf_idr_create(tn, parm->index, est, a,
152 				     &act_mirred_ops, bind, true);
153 		if (ret) {
154 			tcf_idr_cleanup(tn, parm->index);
155 			return ret;
156 		}
157 		ret = ACT_P_CREATED;
158 	} else if (!ovr) {
159 		tcf_idr_release(*a, bind);
160 		return -EEXIST;
161 	}
162 
163 	m = to_mirred(*a);
164 	if (ret == ACT_P_CREATED)
165 		INIT_LIST_HEAD(&m->tcfm_list);
166 
167 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
168 	if (err < 0)
169 		goto release_idr;
170 
171 	spin_lock_bh(&m->tcf_lock);
172 
173 	if (parm->ifindex) {
174 		dev = dev_get_by_index(net, parm->ifindex);
175 		if (!dev) {
176 			spin_unlock_bh(&m->tcf_lock);
177 			err = -ENODEV;
178 			goto put_chain;
179 		}
180 		mac_header_xmit = dev_is_mac_header_xmit(dev);
181 		rcu_swap_protected(m->tcfm_dev, dev,
182 				   lockdep_is_held(&m->tcf_lock));
183 		if (dev)
184 			dev_put(dev);
185 		m->tcfm_mac_header_xmit = mac_header_xmit;
186 	}
187 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 	m->tcfm_eaction = parm->eaction;
189 	spin_unlock_bh(&m->tcf_lock);
190 	if (goto_ch)
191 		tcf_chain_put_by_act(goto_ch);
192 
193 	if (ret == ACT_P_CREATED) {
194 		spin_lock(&mirred_list_lock);
195 		list_add(&m->tcfm_list, &mirred_list);
196 		spin_unlock(&mirred_list_lock);
197 
198 		tcf_idr_insert(tn, *a);
199 	}
200 
201 	return ret;
202 put_chain:
203 	if (goto_ch)
204 		tcf_chain_put_by_act(goto_ch);
205 release_idr:
206 	tcf_idr_release(*a, bind);
207 	return err;
208 }
209 
210 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
211 			  struct tcf_result *res)
212 {
213 	struct tcf_mirred *m = to_mirred(a);
214 	struct sk_buff *skb2 = skb;
215 	bool m_mac_header_xmit;
216 	struct net_device *dev;
217 	int retval, err = 0;
218 	bool use_reinsert;
219 	bool want_ingress;
220 	bool is_redirect;
221 	int m_eaction;
222 	int mac_len;
223 
224 	tcf_lastuse_update(&m->tcf_tm);
225 	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
226 
227 	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
228 	m_eaction = READ_ONCE(m->tcfm_eaction);
229 	retval = READ_ONCE(m->tcf_action);
230 	dev = rcu_dereference_bh(m->tcfm_dev);
231 	if (unlikely(!dev)) {
232 		pr_notice_once("tc mirred: target device is gone\n");
233 		goto out;
234 	}
235 
236 	if (unlikely(!(dev->flags & IFF_UP))) {
237 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
238 				       dev->name);
239 		goto out;
240 	}
241 
242 	/* we could easily avoid the clone only if called by ingress and clsact;
243 	 * since we can't easily detect the clsact caller, skip clone only for
244 	 * ingress - that covers the TC S/W datapath.
245 	 */
246 	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
247 	use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
248 		       tcf_mirred_can_reinsert(retval);
249 	if (!use_reinsert) {
250 		skb2 = skb_clone(skb, GFP_ATOMIC);
251 		if (!skb2)
252 			goto out;
253 	}
254 
255 	/* If action's target direction differs than filter's direction,
256 	 * and devices expect a mac header on xmit, then mac push/pull is
257 	 * needed.
258 	 */
259 	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
260 	if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
261 		if (!skb_at_tc_ingress(skb)) {
262 			/* caught at egress, act ingress: pull mac */
263 			mac_len = skb_network_header(skb) - skb_mac_header(skb);
264 			skb_pull_rcsum(skb2, mac_len);
265 		} else {
266 			/* caught at ingress, act egress: push mac */
267 			skb_push_rcsum(skb2, skb->mac_len);
268 		}
269 	}
270 
271 	skb2->skb_iif = skb->dev->ifindex;
272 	skb2->dev = dev;
273 
274 	/* mirror is always swallowed */
275 	if (is_redirect) {
276 		skb2->tc_redirected = 1;
277 		skb2->tc_from_ingress = skb2->tc_at_ingress;
278 		if (skb2->tc_from_ingress)
279 			skb2->tstamp = 0;
280 		/* let's the caller reinsert the packet, if possible */
281 		if (use_reinsert) {
282 			res->ingress = want_ingress;
283 			res->qstats = this_cpu_ptr(m->common.cpu_qstats);
284 			return TC_ACT_REINSERT;
285 		}
286 	}
287 
288 	if (!want_ingress)
289 		err = dev_queue_xmit(skb2);
290 	else
291 		err = netif_receive_skb(skb2);
292 
293 	if (err) {
294 out:
295 		qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
296 		if (tcf_mirred_is_act_redirect(m_eaction))
297 			retval = TC_ACT_SHOT;
298 	}
299 
300 	return retval;
301 }
302 
303 static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
304 			     u64 lastuse, bool hw)
305 {
306 	struct tcf_mirred *m = to_mirred(a);
307 	struct tcf_t *tm = &m->tcf_tm;
308 
309 	_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
310 	if (hw)
311 		_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
312 				   bytes, packets);
313 	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
314 }
315 
316 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
317 			   int ref)
318 {
319 	unsigned char *b = skb_tail_pointer(skb);
320 	struct tcf_mirred *m = to_mirred(a);
321 	struct tc_mirred opt = {
322 		.index   = m->tcf_index,
323 		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
324 		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
325 	};
326 	struct net_device *dev;
327 	struct tcf_t t;
328 
329 	spin_lock_bh(&m->tcf_lock);
330 	opt.action = m->tcf_action;
331 	opt.eaction = m->tcfm_eaction;
332 	dev = tcf_mirred_dev_dereference(m);
333 	if (dev)
334 		opt.ifindex = dev->ifindex;
335 
336 	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
337 		goto nla_put_failure;
338 
339 	tcf_tm_dump(&t, &m->tcf_tm);
340 	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
341 		goto nla_put_failure;
342 	spin_unlock_bh(&m->tcf_lock);
343 
344 	return skb->len;
345 
346 nla_put_failure:
347 	spin_unlock_bh(&m->tcf_lock);
348 	nlmsg_trim(skb, b);
349 	return -1;
350 }
351 
352 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
353 			     struct netlink_callback *cb, int type,
354 			     const struct tc_action_ops *ops,
355 			     struct netlink_ext_ack *extack)
356 {
357 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
358 
359 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
360 }
361 
362 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
363 {
364 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
365 
366 	return tcf_idr_search(tn, a, index);
367 }
368 
369 static int mirred_device_event(struct notifier_block *unused,
370 			       unsigned long event, void *ptr)
371 {
372 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
373 	struct tcf_mirred *m;
374 
375 	ASSERT_RTNL();
376 	if (event == NETDEV_UNREGISTER) {
377 		spin_lock(&mirred_list_lock);
378 		list_for_each_entry(m, &mirred_list, tcfm_list) {
379 			spin_lock_bh(&m->tcf_lock);
380 			if (tcf_mirred_dev_dereference(m) == dev) {
381 				dev_put(dev);
382 				/* Note : no rcu grace period necessary, as
383 				 * net_device are already rcu protected.
384 				 */
385 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
386 			}
387 			spin_unlock_bh(&m->tcf_lock);
388 		}
389 		spin_unlock(&mirred_list_lock);
390 	}
391 
392 	return NOTIFY_DONE;
393 }
394 
395 static struct notifier_block mirred_device_notifier = {
396 	.notifier_call = mirred_device_event,
397 };
398 
399 static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
400 {
401 	struct tcf_mirred *m = to_mirred(a);
402 	struct net_device *dev;
403 
404 	rcu_read_lock();
405 	dev = rcu_dereference(m->tcfm_dev);
406 	if (dev)
407 		dev_hold(dev);
408 	rcu_read_unlock();
409 
410 	return dev;
411 }
412 
413 static void tcf_mirred_put_dev(struct net_device *dev)
414 {
415 	dev_put(dev);
416 }
417 
418 static struct tc_action_ops act_mirred_ops = {
419 	.kind		=	"mirred",
420 	.id		=	TCA_ID_MIRRED,
421 	.owner		=	THIS_MODULE,
422 	.act		=	tcf_mirred_act,
423 	.stats_update	=	tcf_stats_update,
424 	.dump		=	tcf_mirred_dump,
425 	.cleanup	=	tcf_mirred_release,
426 	.init		=	tcf_mirred_init,
427 	.walk		=	tcf_mirred_walker,
428 	.lookup		=	tcf_mirred_search,
429 	.size		=	sizeof(struct tcf_mirred),
430 	.get_dev	=	tcf_mirred_get_dev,
431 	.put_dev	=	tcf_mirred_put_dev,
432 };
433 
434 static __net_init int mirred_init_net(struct net *net)
435 {
436 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
437 
438 	return tc_action_net_init(tn, &act_mirred_ops);
439 }
440 
441 static void __net_exit mirred_exit_net(struct list_head *net_list)
442 {
443 	tc_action_net_exit(net_list, mirred_net_id);
444 }
445 
446 static struct pernet_operations mirred_net_ops = {
447 	.init = mirred_init_net,
448 	.exit_batch = mirred_exit_net,
449 	.id   = &mirred_net_id,
450 	.size = sizeof(struct tc_action_net),
451 };
452 
453 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
454 MODULE_DESCRIPTION("Device Mirror/redirect actions");
455 MODULE_LICENSE("GPL");
456 
457 static int __init mirred_init_module(void)
458 {
459 	int err = register_netdevice_notifier(&mirred_device_notifier);
460 	if (err)
461 		return err;
462 
463 	pr_info("Mirror/redirect action on\n");
464 	return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
465 }
466 
467 static void __exit mirred_cleanup_module(void)
468 {
469 	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
470 	unregister_netdevice_notifier(&mirred_device_notifier);
471 }
472 
473 module_init(mirred_init_module);
474 module_exit(mirred_cleanup_module);
475