xref: /openbmc/linux/net/sched/act_ife.c (revision 6c8c1406)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
4  *
5  *		Refer to:
6  *		draft-ietf-forces-interfelfb-03
7  *		and
8  *		netdev01 paper:
9  *		"Distributing Linux Traffic Control Classifier-Action
10  *		Subsystem"
11  *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12  *
13  * copyright Jamal Hadi Salim (2015)
14 */
15 
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/pkt_cls.h>
28 #include <uapi/linux/tc_act/tc_ife.h>
29 #include <net/tc_act/tc_ife.h>
30 #include <linux/etherdevice.h>
31 #include <net/ife.h>
32 
33 static int max_metacnt = IFE_META_MAX + 1;
34 static struct tc_action_ops act_ife_ops;
35 
36 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
37 	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
38 	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
39 	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
40 	[TCA_IFE_TYPE] = { .type = NLA_U16},
41 };
42 
43 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
44 {
45 	u16 edata = 0;
46 
47 	if (mi->metaval)
48 		edata = *(u16 *)mi->metaval;
49 	else if (metaval)
50 		edata = metaval;
51 
52 	if (!edata) /* will not encode */
53 		return 0;
54 
55 	edata = htons(edata);
56 	return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
57 }
58 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
59 
60 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
61 {
62 	if (mi->metaval)
63 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
64 	else
65 		return nla_put(skb, mi->metaid, 0, NULL);
66 }
67 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
68 
69 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
70 {
71 	if (metaval || mi->metaval)
72 		return 8; /* T+L+V == 2+2+4 */
73 
74 	return 0;
75 }
76 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
77 
78 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
79 {
80 	if (metaval || mi->metaval)
81 		return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
82 
83 	return 0;
84 }
85 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
86 
87 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
88 {
89 	u32 edata = metaval;
90 
91 	if (mi->metaval)
92 		edata = *(u32 *)mi->metaval;
93 	else if (metaval)
94 		edata = metaval;
95 
96 	if (!edata) /* will not encode */
97 		return 0;
98 
99 	edata = htonl(edata);
100 	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
101 }
102 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
103 
104 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
105 {
106 	if (mi->metaval)
107 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
108 	else
109 		return nla_put(skb, mi->metaid, 0, NULL);
110 }
111 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
112 
113 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
114 {
115 	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
116 	if (!mi->metaval)
117 		return -ENOMEM;
118 
119 	return 0;
120 }
121 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
122 
123 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
124 {
125 	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
126 	if (!mi->metaval)
127 		return -ENOMEM;
128 
129 	return 0;
130 }
131 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
132 
133 void ife_release_meta_gen(struct tcf_meta_info *mi)
134 {
135 	kfree(mi->metaval);
136 }
137 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
138 
139 int ife_validate_meta_u32(void *val, int len)
140 {
141 	if (len == sizeof(u32))
142 		return 0;
143 
144 	return -EINVAL;
145 }
146 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
147 
148 int ife_validate_meta_u16(void *val, int len)
149 {
150 	/* length will not include padding */
151 	if (len == sizeof(u16))
152 		return 0;
153 
154 	return -EINVAL;
155 }
156 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
157 
158 static LIST_HEAD(ifeoplist);
159 static DEFINE_RWLOCK(ife_mod_lock);
160 
161 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
162 {
163 	struct tcf_meta_ops *o;
164 
165 	read_lock(&ife_mod_lock);
166 	list_for_each_entry(o, &ifeoplist, list) {
167 		if (o->metaid == metaid) {
168 			if (!try_module_get(o->owner))
169 				o = NULL;
170 			read_unlock(&ife_mod_lock);
171 			return o;
172 		}
173 	}
174 	read_unlock(&ife_mod_lock);
175 
176 	return NULL;
177 }
178 
179 int register_ife_op(struct tcf_meta_ops *mops)
180 {
181 	struct tcf_meta_ops *m;
182 
183 	if (!mops->metaid || !mops->metatype || !mops->name ||
184 	    !mops->check_presence || !mops->encode || !mops->decode ||
185 	    !mops->get || !mops->alloc)
186 		return -EINVAL;
187 
188 	write_lock(&ife_mod_lock);
189 
190 	list_for_each_entry(m, &ifeoplist, list) {
191 		if (m->metaid == mops->metaid ||
192 		    (strcmp(mops->name, m->name) == 0)) {
193 			write_unlock(&ife_mod_lock);
194 			return -EEXIST;
195 		}
196 	}
197 
198 	if (!mops->release)
199 		mops->release = ife_release_meta_gen;
200 
201 	list_add_tail(&mops->list, &ifeoplist);
202 	write_unlock(&ife_mod_lock);
203 	return 0;
204 }
205 EXPORT_SYMBOL_GPL(unregister_ife_op);
206 
207 int unregister_ife_op(struct tcf_meta_ops *mops)
208 {
209 	struct tcf_meta_ops *m;
210 	int err = -ENOENT;
211 
212 	write_lock(&ife_mod_lock);
213 	list_for_each_entry(m, &ifeoplist, list) {
214 		if (m->metaid == mops->metaid) {
215 			list_del(&mops->list);
216 			err = 0;
217 			break;
218 		}
219 	}
220 	write_unlock(&ife_mod_lock);
221 
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(register_ife_op);
225 
226 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
227 {
228 	int ret = 0;
229 	/* XXX: unfortunately cant use nla_policy at this point
230 	* because a length of 0 is valid in the case of
231 	* "allow". "use" semantics do enforce for proper
232 	* length and i couldve use nla_policy but it makes it hard
233 	* to use it just for that..
234 	*/
235 	if (ops->validate)
236 		return ops->validate(val, len);
237 
238 	if (ops->metatype == NLA_U32)
239 		ret = ife_validate_meta_u32(val, len);
240 	else if (ops->metatype == NLA_U16)
241 		ret = ife_validate_meta_u16(val, len);
242 
243 	return ret;
244 }
245 
246 #ifdef CONFIG_MODULES
247 static const char *ife_meta_id2name(u32 metaid)
248 {
249 	switch (metaid) {
250 	case IFE_META_SKBMARK:
251 		return "skbmark";
252 	case IFE_META_PRIO:
253 		return "skbprio";
254 	case IFE_META_TCINDEX:
255 		return "tcindex";
256 	default:
257 		return "unknown";
258 	}
259 }
260 #endif
261 
262 /* called when adding new meta information
263 */
264 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
265 {
266 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
267 	int ret = 0;
268 
269 	if (!ops) {
270 		ret = -ENOENT;
271 #ifdef CONFIG_MODULES
272 		if (rtnl_held)
273 			rtnl_unlock();
274 		request_module("ife-meta-%s", ife_meta_id2name(metaid));
275 		if (rtnl_held)
276 			rtnl_lock();
277 		ops = find_ife_oplist(metaid);
278 #endif
279 	}
280 
281 	if (ops) {
282 		ret = 0;
283 		if (len)
284 			ret = ife_validate_metatype(ops, val, len);
285 
286 		module_put(ops->owner);
287 	}
288 
289 	return ret;
290 }
291 
292 /* called when adding new meta information
293 */
294 static int __add_metainfo(const struct tcf_meta_ops *ops,
295 			  struct tcf_ife_info *ife, u32 metaid, void *metaval,
296 			  int len, bool atomic, bool exists)
297 {
298 	struct tcf_meta_info *mi = NULL;
299 	int ret = 0;
300 
301 	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
302 	if (!mi)
303 		return -ENOMEM;
304 
305 	mi->metaid = metaid;
306 	mi->ops = ops;
307 	if (len > 0) {
308 		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
309 		if (ret != 0) {
310 			kfree(mi);
311 			return ret;
312 		}
313 	}
314 
315 	if (exists)
316 		spin_lock_bh(&ife->tcf_lock);
317 	list_add_tail(&mi->metalist, &ife->metalist);
318 	if (exists)
319 		spin_unlock_bh(&ife->tcf_lock);
320 
321 	return ret;
322 }
323 
324 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
325 				    struct tcf_ife_info *ife, u32 metaid,
326 				    bool exists)
327 {
328 	int ret;
329 
330 	if (!try_module_get(ops->owner))
331 		return -ENOENT;
332 	ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
333 	if (ret)
334 		module_put(ops->owner);
335 	return ret;
336 }
337 
338 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
339 			int len, bool exists)
340 {
341 	const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
342 	int ret;
343 
344 	if (!ops)
345 		return -ENOENT;
346 	ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
347 	if (ret)
348 		/*put back what find_ife_oplist took */
349 		module_put(ops->owner);
350 	return ret;
351 }
352 
353 static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
354 {
355 	struct tcf_meta_ops *o;
356 	int rc = 0;
357 	int installed = 0;
358 
359 	read_lock(&ife_mod_lock);
360 	list_for_each_entry(o, &ifeoplist, list) {
361 		rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
362 		if (rc == 0)
363 			installed += 1;
364 	}
365 	read_unlock(&ife_mod_lock);
366 
367 	if (installed)
368 		return 0;
369 	else
370 		return -EINVAL;
371 }
372 
373 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
374 {
375 	struct tcf_meta_info *e;
376 	struct nlattr *nest;
377 	unsigned char *b = skb_tail_pointer(skb);
378 	int total_encoded = 0;
379 
380 	/*can only happen on decode */
381 	if (list_empty(&ife->metalist))
382 		return 0;
383 
384 	nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
385 	if (!nest)
386 		goto out_nlmsg_trim;
387 
388 	list_for_each_entry(e, &ife->metalist, metalist) {
389 		if (!e->ops->get(skb, e))
390 			total_encoded += 1;
391 	}
392 
393 	if (!total_encoded)
394 		goto out_nlmsg_trim;
395 
396 	nla_nest_end(skb, nest);
397 
398 	return 0;
399 
400 out_nlmsg_trim:
401 	nlmsg_trim(skb, b);
402 	return -1;
403 }
404 
405 /* under ife->tcf_lock */
406 static void _tcf_ife_cleanup(struct tc_action *a)
407 {
408 	struct tcf_ife_info *ife = to_ife(a);
409 	struct tcf_meta_info *e, *n;
410 
411 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
412 		list_del(&e->metalist);
413 		if (e->metaval) {
414 			if (e->ops->release)
415 				e->ops->release(e);
416 			else
417 				kfree(e->metaval);
418 		}
419 		module_put(e->ops->owner);
420 		kfree(e);
421 	}
422 }
423 
424 static void tcf_ife_cleanup(struct tc_action *a)
425 {
426 	struct tcf_ife_info *ife = to_ife(a);
427 	struct tcf_ife_params *p;
428 
429 	spin_lock_bh(&ife->tcf_lock);
430 	_tcf_ife_cleanup(a);
431 	spin_unlock_bh(&ife->tcf_lock);
432 
433 	p = rcu_dereference_protected(ife->params, 1);
434 	if (p)
435 		kfree_rcu(p, rcu);
436 }
437 
438 static int load_metalist(struct nlattr **tb, bool rtnl_held)
439 {
440 	int i;
441 
442 	for (i = 1; i < max_metacnt; i++) {
443 		if (tb[i]) {
444 			void *val = nla_data(tb[i]);
445 			int len = nla_len(tb[i]);
446 			int rc;
447 
448 			rc = load_metaops_and_vet(i, val, len, rtnl_held);
449 			if (rc != 0)
450 				return rc;
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
458 			     bool exists, bool rtnl_held)
459 {
460 	int len = 0;
461 	int rc = 0;
462 	int i = 0;
463 	void *val;
464 
465 	for (i = 1; i < max_metacnt; i++) {
466 		if (tb[i]) {
467 			val = nla_data(tb[i]);
468 			len = nla_len(tb[i]);
469 
470 			rc = add_metainfo(ife, i, val, len, exists);
471 			if (rc)
472 				return rc;
473 		}
474 	}
475 
476 	return rc;
477 }
478 
479 static int tcf_ife_init(struct net *net, struct nlattr *nla,
480 			struct nlattr *est, struct tc_action **a,
481 			struct tcf_proto *tp, u32 flags,
482 			struct netlink_ext_ack *extack)
483 {
484 	struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
485 	bool bind = flags & TCA_ACT_FLAGS_BIND;
486 	struct nlattr *tb[TCA_IFE_MAX + 1];
487 	struct nlattr *tb2[IFE_META_MAX + 1];
488 	struct tcf_chain *goto_ch = NULL;
489 	struct tcf_ife_params *p;
490 	struct tcf_ife_info *ife;
491 	u16 ife_type = ETH_P_IFE;
492 	struct tc_ife *parm;
493 	u8 *daddr = NULL;
494 	u8 *saddr = NULL;
495 	bool exists = false;
496 	int ret = 0;
497 	u32 index;
498 	int err;
499 
500 	if (!nla) {
501 		NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
502 		return -EINVAL;
503 	}
504 
505 	err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
506 					  NULL);
507 	if (err < 0)
508 		return err;
509 
510 	if (!tb[TCA_IFE_PARMS])
511 		return -EINVAL;
512 
513 	parm = nla_data(tb[TCA_IFE_PARMS]);
514 
515 	/* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
516 	 * they cannot run as the same time. Check on all other values which
517 	 * are not supported right now.
518 	 */
519 	if (parm->flags & ~IFE_ENCODE)
520 		return -EINVAL;
521 
522 	p = kzalloc(sizeof(*p), GFP_KERNEL);
523 	if (!p)
524 		return -ENOMEM;
525 
526 	if (tb[TCA_IFE_METALST]) {
527 		err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
528 						  tb[TCA_IFE_METALST], NULL,
529 						  NULL);
530 		if (err) {
531 			kfree(p);
532 			return err;
533 		}
534 		err = load_metalist(tb2, !(flags & TCA_ACT_FLAGS_NO_RTNL));
535 		if (err) {
536 			kfree(p);
537 			return err;
538 		}
539 	}
540 
541 	index = parm->index;
542 	err = tcf_idr_check_alloc(tn, &index, a, bind);
543 	if (err < 0) {
544 		kfree(p);
545 		return err;
546 	}
547 	exists = err;
548 	if (exists && bind) {
549 		kfree(p);
550 		return 0;
551 	}
552 
553 	if (!exists) {
554 		ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
555 				     bind, true, flags);
556 		if (ret) {
557 			tcf_idr_cleanup(tn, index);
558 			kfree(p);
559 			return ret;
560 		}
561 		ret = ACT_P_CREATED;
562 	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
563 		tcf_idr_release(*a, bind);
564 		kfree(p);
565 		return -EEXIST;
566 	}
567 
568 	ife = to_ife(*a);
569 	if (ret == ACT_P_CREATED)
570 		INIT_LIST_HEAD(&ife->metalist);
571 
572 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
573 	if (err < 0)
574 		goto release_idr;
575 
576 	p->flags = parm->flags;
577 
578 	if (parm->flags & IFE_ENCODE) {
579 		if (tb[TCA_IFE_TYPE])
580 			ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
581 		if (tb[TCA_IFE_DMAC])
582 			daddr = nla_data(tb[TCA_IFE_DMAC]);
583 		if (tb[TCA_IFE_SMAC])
584 			saddr = nla_data(tb[TCA_IFE_SMAC]);
585 	}
586 
587 	if (parm->flags & IFE_ENCODE) {
588 		if (daddr)
589 			ether_addr_copy(p->eth_dst, daddr);
590 		else
591 			eth_zero_addr(p->eth_dst);
592 
593 		if (saddr)
594 			ether_addr_copy(p->eth_src, saddr);
595 		else
596 			eth_zero_addr(p->eth_src);
597 
598 		p->eth_type = ife_type;
599 	}
600 
601 	if (tb[TCA_IFE_METALST]) {
602 		err = populate_metalist(ife, tb2, exists,
603 					!(flags & TCA_ACT_FLAGS_NO_RTNL));
604 		if (err)
605 			goto metadata_parse_err;
606 	} else {
607 		/* if no passed metadata allow list or passed allow-all
608 		 * then here we process by adding as many supported metadatum
609 		 * as we can. You better have at least one else we are
610 		 * going to bail out
611 		 */
612 		err = use_all_metadata(ife, exists);
613 		if (err)
614 			goto metadata_parse_err;
615 	}
616 
617 	if (exists)
618 		spin_lock_bh(&ife->tcf_lock);
619 	/* protected by tcf_lock when modifying existing action */
620 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
621 	p = rcu_replace_pointer(ife->params, p, 1);
622 
623 	if (exists)
624 		spin_unlock_bh(&ife->tcf_lock);
625 	if (goto_ch)
626 		tcf_chain_put_by_act(goto_ch);
627 	if (p)
628 		kfree_rcu(p, rcu);
629 
630 	return ret;
631 metadata_parse_err:
632 	if (goto_ch)
633 		tcf_chain_put_by_act(goto_ch);
634 release_idr:
635 	kfree(p);
636 	tcf_idr_release(*a, bind);
637 	return err;
638 }
639 
640 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
641 			int ref)
642 {
643 	unsigned char *b = skb_tail_pointer(skb);
644 	struct tcf_ife_info *ife = to_ife(a);
645 	struct tcf_ife_params *p;
646 	struct tc_ife opt = {
647 		.index = ife->tcf_index,
648 		.refcnt = refcount_read(&ife->tcf_refcnt) - ref,
649 		.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
650 	};
651 	struct tcf_t t;
652 
653 	spin_lock_bh(&ife->tcf_lock);
654 	opt.action = ife->tcf_action;
655 	p = rcu_dereference_protected(ife->params,
656 				      lockdep_is_held(&ife->tcf_lock));
657 	opt.flags = p->flags;
658 
659 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
660 		goto nla_put_failure;
661 
662 	tcf_tm_dump(&t, &ife->tcf_tm);
663 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
664 		goto nla_put_failure;
665 
666 	if (!is_zero_ether_addr(p->eth_dst)) {
667 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
668 			goto nla_put_failure;
669 	}
670 
671 	if (!is_zero_ether_addr(p->eth_src)) {
672 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
673 			goto nla_put_failure;
674 	}
675 
676 	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
677 		goto nla_put_failure;
678 
679 	if (dump_metalist(skb, ife)) {
680 		/*ignore failure to dump metalist */
681 		pr_info("Failed to dump metalist\n");
682 	}
683 
684 	spin_unlock_bh(&ife->tcf_lock);
685 	return skb->len;
686 
687 nla_put_failure:
688 	spin_unlock_bh(&ife->tcf_lock);
689 	nlmsg_trim(skb, b);
690 	return -1;
691 }
692 
693 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
694 			      u16 metaid, u16 mlen, void *mdata)
695 {
696 	struct tcf_meta_info *e;
697 
698 	/* XXX: use hash to speed up */
699 	list_for_each_entry(e, &ife->metalist, metalist) {
700 		if (metaid == e->metaid) {
701 			if (e->ops) {
702 				/* We check for decode presence already */
703 				return e->ops->decode(skb, mdata, mlen);
704 			}
705 		}
706 	}
707 
708 	return -ENOENT;
709 }
710 
711 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
712 			  struct tcf_result *res)
713 {
714 	struct tcf_ife_info *ife = to_ife(a);
715 	int action = ife->tcf_action;
716 	u8 *ifehdr_end;
717 	u8 *tlv_data;
718 	u16 metalen;
719 
720 	bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
721 	tcf_lastuse_update(&ife->tcf_tm);
722 
723 	if (skb_at_tc_ingress(skb))
724 		skb_push(skb, skb->dev->hard_header_len);
725 
726 	tlv_data = ife_decode(skb, &metalen);
727 	if (unlikely(!tlv_data)) {
728 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
729 		return TC_ACT_SHOT;
730 	}
731 
732 	ifehdr_end = tlv_data + metalen;
733 	for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
734 		u8 *curr_data;
735 		u16 mtype;
736 		u16 dlen;
737 
738 		curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
739 						&dlen, NULL);
740 		if (!curr_data) {
741 			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
742 			return TC_ACT_SHOT;
743 		}
744 
745 		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
746 			/* abuse overlimits to count when we receive metadata
747 			 * but dont have an ops for it
748 			 */
749 			pr_info_ratelimited("Unknown metaid %d dlen %d\n",
750 					    mtype, dlen);
751 			qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
752 		}
753 	}
754 
755 	if (WARN_ON(tlv_data != ifehdr_end)) {
756 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
757 		return TC_ACT_SHOT;
758 	}
759 
760 	skb->protocol = eth_type_trans(skb, skb->dev);
761 	skb_reset_network_header(skb);
762 
763 	return action;
764 }
765 
766 /*XXX: check if we can do this at install time instead of current
767  * send data path
768 **/
769 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
770 {
771 	struct tcf_meta_info *e, *n;
772 	int tot_run_sz = 0, run_sz = 0;
773 
774 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
775 		if (e->ops->check_presence) {
776 			run_sz = e->ops->check_presence(skb, e);
777 			tot_run_sz += run_sz;
778 		}
779 	}
780 
781 	return tot_run_sz;
782 }
783 
784 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
785 			  struct tcf_result *res, struct tcf_ife_params *p)
786 {
787 	struct tcf_ife_info *ife = to_ife(a);
788 	int action = ife->tcf_action;
789 	struct ethhdr *oethh;	/* outer ether header */
790 	struct tcf_meta_info *e;
791 	/*
792 	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
793 	   where ORIGDATA = original ethernet header ...
794 	 */
795 	u16 metalen = ife_get_sz(skb, ife);
796 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
797 	unsigned int skboff = 0;
798 	int new_len = skb->len + hdrm;
799 	bool exceed_mtu = false;
800 	void *ife_meta;
801 	int err = 0;
802 
803 	if (!skb_at_tc_ingress(skb)) {
804 		if (new_len > skb->dev->mtu)
805 			exceed_mtu = true;
806 	}
807 
808 	bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
809 	tcf_lastuse_update(&ife->tcf_tm);
810 
811 	if (!metalen) {		/* no metadata to send */
812 		/* abuse overlimits to count when we allow packet
813 		 * with no metadata
814 		 */
815 		qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
816 		return action;
817 	}
818 	/* could be stupid policy setup or mtu config
819 	 * so lets be conservative.. */
820 	if ((action == TC_ACT_SHOT) || exceed_mtu) {
821 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
822 		return TC_ACT_SHOT;
823 	}
824 
825 	if (skb_at_tc_ingress(skb))
826 		skb_push(skb, skb->dev->hard_header_len);
827 
828 	ife_meta = ife_encode(skb, metalen);
829 
830 	spin_lock(&ife->tcf_lock);
831 
832 	/* XXX: we dont have a clever way of telling encode to
833 	 * not repeat some of the computations that are done by
834 	 * ops->presence_check...
835 	 */
836 	list_for_each_entry(e, &ife->metalist, metalist) {
837 		if (e->ops->encode) {
838 			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
839 					     e);
840 		}
841 		if (err < 0) {
842 			/* too corrupt to keep around if overwritten */
843 			spin_unlock(&ife->tcf_lock);
844 			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
845 			return TC_ACT_SHOT;
846 		}
847 		skboff += err;
848 	}
849 	spin_unlock(&ife->tcf_lock);
850 	oethh = (struct ethhdr *)skb->data;
851 
852 	if (!is_zero_ether_addr(p->eth_src))
853 		ether_addr_copy(oethh->h_source, p->eth_src);
854 	if (!is_zero_ether_addr(p->eth_dst))
855 		ether_addr_copy(oethh->h_dest, p->eth_dst);
856 	oethh->h_proto = htons(p->eth_type);
857 
858 	if (skb_at_tc_ingress(skb))
859 		skb_pull(skb, skb->dev->hard_header_len);
860 
861 	return action;
862 }
863 
864 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
865 		       struct tcf_result *res)
866 {
867 	struct tcf_ife_info *ife = to_ife(a);
868 	struct tcf_ife_params *p;
869 	int ret;
870 
871 	p = rcu_dereference_bh(ife->params);
872 	if (p->flags & IFE_ENCODE) {
873 		ret = tcf_ife_encode(skb, a, res, p);
874 		return ret;
875 	}
876 
877 	return tcf_ife_decode(skb, a, res);
878 }
879 
880 static struct tc_action_ops act_ife_ops = {
881 	.kind = "ife",
882 	.id = TCA_ID_IFE,
883 	.owner = THIS_MODULE,
884 	.act = tcf_ife_act,
885 	.dump = tcf_ife_dump,
886 	.cleanup = tcf_ife_cleanup,
887 	.init = tcf_ife_init,
888 	.size =	sizeof(struct tcf_ife_info),
889 };
890 
891 static __net_init int ife_init_net(struct net *net)
892 {
893 	struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
894 
895 	return tc_action_net_init(net, tn, &act_ife_ops);
896 }
897 
898 static void __net_exit ife_exit_net(struct list_head *net_list)
899 {
900 	tc_action_net_exit(net_list, act_ife_ops.net_id);
901 }
902 
903 static struct pernet_operations ife_net_ops = {
904 	.init = ife_init_net,
905 	.exit_batch = ife_exit_net,
906 	.id   = &act_ife_ops.net_id,
907 	.size = sizeof(struct tc_action_net),
908 };
909 
910 static int __init ife_init_module(void)
911 {
912 	return tcf_register_action(&act_ife_ops, &ife_net_ops);
913 }
914 
915 static void __exit ife_cleanup_module(void)
916 {
917 	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
918 }
919 
920 module_init(ife_init_module);
921 module_exit(ife_cleanup_module);
922 
923 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
924 MODULE_DESCRIPTION("Inter-FE LFB action");
925 MODULE_LICENSE("GPL");
926