xref: /openbmc/linux/net/sched/act_ife.c (revision b732539e)
1 /*
2  * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
3  *
4  *		Refer to:
5  *		draft-ietf-forces-interfelfb-03
6  *		and
7  *		netdev01 paper:
8  *		"Distributing Linux Traffic Control Classifier-Action
9  *		Subsystem"
10  *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
11  *
12  *		This program is free software; you can redistribute it and/or
13  *		modify it under the terms of the GNU General Public License
14  *		as published by the Free Software Foundation; either version
15  *		2 of the License, or (at your option) any later version.
16  *
17  * copyright Jamal Hadi Salim (2015)
18  *
19 */
20 
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/skbuff.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <net/net_namespace.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <uapi/linux/tc_act/tc_ife.h>
33 #include <net/tc_act/tc_ife.h>
34 #include <linux/etherdevice.h>
35 #include <net/ife.h>
36 
37 static unsigned int ife_net_id;
38 static int max_metacnt = IFE_META_MAX + 1;
39 static struct tc_action_ops act_ife_ops;
40 
41 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
42 	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
43 	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
44 	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
45 	[TCA_IFE_TYPE] = { .type = NLA_U16},
46 };
47 
48 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
49 {
50 	u16 edata = 0;
51 
52 	if (mi->metaval)
53 		edata = *(u16 *)mi->metaval;
54 	else if (metaval)
55 		edata = metaval;
56 
57 	if (!edata) /* will not encode */
58 		return 0;
59 
60 	edata = htons(edata);
61 	return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
62 }
63 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
64 
65 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
66 {
67 	if (mi->metaval)
68 		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
69 	else
70 		return nla_put(skb, mi->metaid, 0, NULL);
71 }
72 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
73 
74 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
75 {
76 	if (metaval || mi->metaval)
77 		return 8; /* T+L+V == 2+2+4 */
78 
79 	return 0;
80 }
81 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
82 
83 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
84 {
85 	if (metaval || mi->metaval)
86 		return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
87 
88 	return 0;
89 }
90 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
91 
92 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
93 {
94 	u32 edata = metaval;
95 
96 	if (mi->metaval)
97 		edata = *(u32 *)mi->metaval;
98 	else if (metaval)
99 		edata = metaval;
100 
101 	if (!edata) /* will not encode */
102 		return 0;
103 
104 	edata = htonl(edata);
105 	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
106 }
107 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
108 
109 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
110 {
111 	if (mi->metaval)
112 		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
113 	else
114 		return nla_put(skb, mi->metaid, 0, NULL);
115 }
116 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
117 
118 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
119 {
120 	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
121 	if (!mi->metaval)
122 		return -ENOMEM;
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
127 
128 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
129 {
130 	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
131 	if (!mi->metaval)
132 		return -ENOMEM;
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
137 
138 void ife_release_meta_gen(struct tcf_meta_info *mi)
139 {
140 	kfree(mi->metaval);
141 }
142 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
143 
144 int ife_validate_meta_u32(void *val, int len)
145 {
146 	if (len == sizeof(u32))
147 		return 0;
148 
149 	return -EINVAL;
150 }
151 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
152 
153 int ife_validate_meta_u16(void *val, int len)
154 {
155 	/* length will not include padding */
156 	if (len == sizeof(u16))
157 		return 0;
158 
159 	return -EINVAL;
160 }
161 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
162 
163 static LIST_HEAD(ifeoplist);
164 static DEFINE_RWLOCK(ife_mod_lock);
165 
166 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
167 {
168 	struct tcf_meta_ops *o;
169 
170 	read_lock(&ife_mod_lock);
171 	list_for_each_entry(o, &ifeoplist, list) {
172 		if (o->metaid == metaid) {
173 			if (!try_module_get(o->owner))
174 				o = NULL;
175 			read_unlock(&ife_mod_lock);
176 			return o;
177 		}
178 	}
179 	read_unlock(&ife_mod_lock);
180 
181 	return NULL;
182 }
183 
184 int register_ife_op(struct tcf_meta_ops *mops)
185 {
186 	struct tcf_meta_ops *m;
187 
188 	if (!mops->metaid || !mops->metatype || !mops->name ||
189 	    !mops->check_presence || !mops->encode || !mops->decode ||
190 	    !mops->get || !mops->alloc)
191 		return -EINVAL;
192 
193 	write_lock(&ife_mod_lock);
194 
195 	list_for_each_entry(m, &ifeoplist, list) {
196 		if (m->metaid == mops->metaid ||
197 		    (strcmp(mops->name, m->name) == 0)) {
198 			write_unlock(&ife_mod_lock);
199 			return -EEXIST;
200 		}
201 	}
202 
203 	if (!mops->release)
204 		mops->release = ife_release_meta_gen;
205 
206 	list_add_tail(&mops->list, &ifeoplist);
207 	write_unlock(&ife_mod_lock);
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(unregister_ife_op);
211 
212 int unregister_ife_op(struct tcf_meta_ops *mops)
213 {
214 	struct tcf_meta_ops *m;
215 	int err = -ENOENT;
216 
217 	write_lock(&ife_mod_lock);
218 	list_for_each_entry(m, &ifeoplist, list) {
219 		if (m->metaid == mops->metaid) {
220 			list_del(&mops->list);
221 			err = 0;
222 			break;
223 		}
224 	}
225 	write_unlock(&ife_mod_lock);
226 
227 	return err;
228 }
229 EXPORT_SYMBOL_GPL(register_ife_op);
230 
231 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
232 {
233 	int ret = 0;
234 	/* XXX: unfortunately cant use nla_policy at this point
235 	* because a length of 0 is valid in the case of
236 	* "allow". "use" semantics do enforce for proper
237 	* length and i couldve use nla_policy but it makes it hard
238 	* to use it just for that..
239 	*/
240 	if (ops->validate)
241 		return ops->validate(val, len);
242 
243 	if (ops->metatype == NLA_U32)
244 		ret = ife_validate_meta_u32(val, len);
245 	else if (ops->metatype == NLA_U16)
246 		ret = ife_validate_meta_u16(val, len);
247 
248 	return ret;
249 }
250 
251 #ifdef CONFIG_MODULES
252 static const char *ife_meta_id2name(u32 metaid)
253 {
254 	switch (metaid) {
255 	case IFE_META_SKBMARK:
256 		return "skbmark";
257 	case IFE_META_PRIO:
258 		return "skbprio";
259 	case IFE_META_TCINDEX:
260 		return "tcindex";
261 	default:
262 		return "unknown";
263 	}
264 }
265 #endif
266 
267 /* called when adding new meta information
268  * under ife->tcf_lock for existing action
269 */
270 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
271 				void *val, int len, bool exists)
272 {
273 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
274 	int ret = 0;
275 
276 	if (!ops) {
277 		ret = -ENOENT;
278 #ifdef CONFIG_MODULES
279 		if (exists)
280 			spin_unlock_bh(&ife->tcf_lock);
281 		rtnl_unlock();
282 		request_module("ife-meta-%s", ife_meta_id2name(metaid));
283 		rtnl_lock();
284 		if (exists)
285 			spin_lock_bh(&ife->tcf_lock);
286 		ops = find_ife_oplist(metaid);
287 #endif
288 	}
289 
290 	if (ops) {
291 		ret = 0;
292 		if (len)
293 			ret = ife_validate_metatype(ops, val, len);
294 
295 		module_put(ops->owner);
296 	}
297 
298 	return ret;
299 }
300 
301 /* called when adding new meta information
302  * under ife->tcf_lock for existing action
303 */
304 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
305 			int len, bool atomic)
306 {
307 	struct tcf_meta_info *mi = NULL;
308 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
309 	int ret = 0;
310 
311 	if (!ops)
312 		return -ENOENT;
313 
314 	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
315 	if (!mi) {
316 		/*put back what find_ife_oplist took */
317 		module_put(ops->owner);
318 		return -ENOMEM;
319 	}
320 
321 	mi->metaid = metaid;
322 	mi->ops = ops;
323 	if (len > 0) {
324 		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
325 		if (ret != 0) {
326 			kfree(mi);
327 			module_put(ops->owner);
328 			return ret;
329 		}
330 	}
331 
332 	list_add_tail(&mi->metalist, &ife->metalist);
333 
334 	return ret;
335 }
336 
337 static int use_all_metadata(struct tcf_ife_info *ife)
338 {
339 	struct tcf_meta_ops *o;
340 	int rc = 0;
341 	int installed = 0;
342 
343 	read_lock(&ife_mod_lock);
344 	list_for_each_entry(o, &ifeoplist, list) {
345 		rc = add_metainfo(ife, o->metaid, NULL, 0, true);
346 		if (rc == 0)
347 			installed += 1;
348 	}
349 	read_unlock(&ife_mod_lock);
350 
351 	if (installed)
352 		return 0;
353 	else
354 		return -EINVAL;
355 }
356 
357 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
358 {
359 	struct tcf_meta_info *e;
360 	struct nlattr *nest;
361 	unsigned char *b = skb_tail_pointer(skb);
362 	int total_encoded = 0;
363 
364 	/*can only happen on decode */
365 	if (list_empty(&ife->metalist))
366 		return 0;
367 
368 	nest = nla_nest_start(skb, TCA_IFE_METALST);
369 	if (!nest)
370 		goto out_nlmsg_trim;
371 
372 	list_for_each_entry(e, &ife->metalist, metalist) {
373 		if (!e->ops->get(skb, e))
374 			total_encoded += 1;
375 	}
376 
377 	if (!total_encoded)
378 		goto out_nlmsg_trim;
379 
380 	nla_nest_end(skb, nest);
381 
382 	return 0;
383 
384 out_nlmsg_trim:
385 	nlmsg_trim(skb, b);
386 	return -1;
387 }
388 
389 /* under ife->tcf_lock */
390 static void _tcf_ife_cleanup(struct tc_action *a)
391 {
392 	struct tcf_ife_info *ife = to_ife(a);
393 	struct tcf_meta_info *e, *n;
394 
395 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
396 		module_put(e->ops->owner);
397 		list_del(&e->metalist);
398 		if (e->metaval) {
399 			if (e->ops->release)
400 				e->ops->release(e);
401 			else
402 				kfree(e->metaval);
403 		}
404 		kfree(e);
405 	}
406 }
407 
408 static void tcf_ife_cleanup(struct tc_action *a)
409 {
410 	struct tcf_ife_info *ife = to_ife(a);
411 	struct tcf_ife_params *p;
412 
413 	spin_lock_bh(&ife->tcf_lock);
414 	_tcf_ife_cleanup(a);
415 	spin_unlock_bh(&ife->tcf_lock);
416 
417 	p = rcu_dereference_protected(ife->params, 1);
418 	kfree_rcu(p, rcu);
419 }
420 
421 /* under ife->tcf_lock for existing action */
422 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
423 			     bool exists)
424 {
425 	int len = 0;
426 	int rc = 0;
427 	int i = 0;
428 	void *val;
429 
430 	for (i = 1; i < max_metacnt; i++) {
431 		if (tb[i]) {
432 			val = nla_data(tb[i]);
433 			len = nla_len(tb[i]);
434 
435 			rc = load_metaops_and_vet(ife, i, val, len, exists);
436 			if (rc != 0)
437 				return rc;
438 
439 			rc = add_metainfo(ife, i, val, len, exists);
440 			if (rc)
441 				return rc;
442 		}
443 	}
444 
445 	return rc;
446 }
447 
448 static int tcf_ife_init(struct net *net, struct nlattr *nla,
449 			struct nlattr *est, struct tc_action **a,
450 			int ovr, int bind, struct netlink_ext_ack *extack)
451 {
452 	struct tc_action_net *tn = net_generic(net, ife_net_id);
453 	struct nlattr *tb[TCA_IFE_MAX + 1];
454 	struct nlattr *tb2[IFE_META_MAX + 1];
455 	struct tcf_ife_params *p, *p_old;
456 	struct tcf_ife_info *ife;
457 	u16 ife_type = ETH_P_IFE;
458 	struct tc_ife *parm;
459 	u8 *daddr = NULL;
460 	u8 *saddr = NULL;
461 	bool exists = false;
462 	int ret = 0;
463 	int err;
464 
465 	err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
466 	if (err < 0)
467 		return err;
468 
469 	if (!tb[TCA_IFE_PARMS])
470 		return -EINVAL;
471 
472 	parm = nla_data(tb[TCA_IFE_PARMS]);
473 
474 	/* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
475 	 * they cannot run as the same time. Check on all other values which
476 	 * are not supported right now.
477 	 */
478 	if (parm->flags & ~IFE_ENCODE)
479 		return -EINVAL;
480 
481 	p = kzalloc(sizeof(*p), GFP_KERNEL);
482 	if (!p)
483 		return -ENOMEM;
484 
485 	exists = tcf_idr_check(tn, parm->index, a, bind);
486 	if (exists && bind) {
487 		kfree(p);
488 		return 0;
489 	}
490 
491 	if (!exists) {
492 		ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
493 				     bind, true);
494 		if (ret) {
495 			kfree(p);
496 			return ret;
497 		}
498 		ret = ACT_P_CREATED;
499 	} else {
500 		tcf_idr_release(*a, bind);
501 		if (!ovr) {
502 			kfree(p);
503 			return -EEXIST;
504 		}
505 	}
506 
507 	ife = to_ife(*a);
508 	p->flags = parm->flags;
509 
510 	if (parm->flags & IFE_ENCODE) {
511 		if (tb[TCA_IFE_TYPE])
512 			ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
513 		if (tb[TCA_IFE_DMAC])
514 			daddr = nla_data(tb[TCA_IFE_DMAC]);
515 		if (tb[TCA_IFE_SMAC])
516 			saddr = nla_data(tb[TCA_IFE_SMAC]);
517 	}
518 
519 	ife->tcf_action = parm->action;
520 
521 	if (parm->flags & IFE_ENCODE) {
522 		if (daddr)
523 			ether_addr_copy(p->eth_dst, daddr);
524 		else
525 			eth_zero_addr(p->eth_dst);
526 
527 		if (saddr)
528 			ether_addr_copy(p->eth_src, saddr);
529 		else
530 			eth_zero_addr(p->eth_src);
531 
532 		p->eth_type = ife_type;
533 	}
534 
535 	if (exists)
536 		spin_lock_bh(&ife->tcf_lock);
537 
538 	if (ret == ACT_P_CREATED)
539 		INIT_LIST_HEAD(&ife->metalist);
540 
541 	if (tb[TCA_IFE_METALST]) {
542 		err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
543 				       NULL, NULL);
544 		if (err) {
545 metadata_parse_err:
546 			if (exists)
547 				tcf_idr_release(*a, bind);
548 			if (ret == ACT_P_CREATED)
549 				_tcf_ife_cleanup(*a);
550 
551 			if (exists)
552 				spin_unlock_bh(&ife->tcf_lock);
553 			kfree(p);
554 			return err;
555 		}
556 
557 		err = populate_metalist(ife, tb2, exists);
558 		if (err)
559 			goto metadata_parse_err;
560 
561 	} else {
562 		/* if no passed metadata allow list or passed allow-all
563 		 * then here we process by adding as many supported metadatum
564 		 * as we can. You better have at least one else we are
565 		 * going to bail out
566 		 */
567 		err = use_all_metadata(ife);
568 		if (err) {
569 			if (ret == ACT_P_CREATED)
570 				_tcf_ife_cleanup(*a);
571 
572 			if (exists)
573 				spin_unlock_bh(&ife->tcf_lock);
574 			kfree(p);
575 			return err;
576 		}
577 	}
578 
579 	if (exists)
580 		spin_unlock_bh(&ife->tcf_lock);
581 
582 	p_old = rtnl_dereference(ife->params);
583 	rcu_assign_pointer(ife->params, p);
584 	if (p_old)
585 		kfree_rcu(p_old, rcu);
586 
587 	if (ret == ACT_P_CREATED)
588 		tcf_idr_insert(tn, *a);
589 
590 	return ret;
591 }
592 
593 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
594 			int ref)
595 {
596 	unsigned char *b = skb_tail_pointer(skb);
597 	struct tcf_ife_info *ife = to_ife(a);
598 	struct tcf_ife_params *p = rtnl_dereference(ife->params);
599 	struct tc_ife opt = {
600 		.index = ife->tcf_index,
601 		.refcnt = ife->tcf_refcnt - ref,
602 		.bindcnt = ife->tcf_bindcnt - bind,
603 		.action = ife->tcf_action,
604 		.flags = p->flags,
605 	};
606 	struct tcf_t t;
607 
608 	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
609 		goto nla_put_failure;
610 
611 	tcf_tm_dump(&t, &ife->tcf_tm);
612 	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
613 		goto nla_put_failure;
614 
615 	if (!is_zero_ether_addr(p->eth_dst)) {
616 		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
617 			goto nla_put_failure;
618 	}
619 
620 	if (!is_zero_ether_addr(p->eth_src)) {
621 		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
622 			goto nla_put_failure;
623 	}
624 
625 	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
626 		goto nla_put_failure;
627 
628 	if (dump_metalist(skb, ife)) {
629 		/*ignore failure to dump metalist */
630 		pr_info("Failed to dump metalist\n");
631 	}
632 
633 	return skb->len;
634 
635 nla_put_failure:
636 	nlmsg_trim(skb, b);
637 	return -1;
638 }
639 
640 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
641 			      u16 metaid, u16 mlen, void *mdata)
642 {
643 	struct tcf_meta_info *e;
644 
645 	/* XXX: use hash to speed up */
646 	list_for_each_entry(e, &ife->metalist, metalist) {
647 		if (metaid == e->metaid) {
648 			if (e->ops) {
649 				/* We check for decode presence already */
650 				return e->ops->decode(skb, mdata, mlen);
651 			}
652 		}
653 	}
654 
655 	return 0;
656 }
657 
658 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
659 			  struct tcf_result *res)
660 {
661 	struct tcf_ife_info *ife = to_ife(a);
662 	int action = ife->tcf_action;
663 	u8 *ifehdr_end;
664 	u8 *tlv_data;
665 	u16 metalen;
666 
667 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
668 	tcf_lastuse_update(&ife->tcf_tm);
669 
670 	if (skb_at_tc_ingress(skb))
671 		skb_push(skb, skb->dev->hard_header_len);
672 
673 	tlv_data = ife_decode(skb, &metalen);
674 	if (unlikely(!tlv_data)) {
675 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
676 		return TC_ACT_SHOT;
677 	}
678 
679 	ifehdr_end = tlv_data + metalen;
680 	for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
681 		u8 *curr_data;
682 		u16 mtype;
683 		u16 dlen;
684 
685 		curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
686 
687 		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
688 			/* abuse overlimits to count when we receive metadata
689 			 * but dont have an ops for it
690 			 */
691 			pr_info_ratelimited("Unknown metaid %d dlen %d\n",
692 					    mtype, dlen);
693 			qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
694 		}
695 	}
696 
697 	if (WARN_ON(tlv_data != ifehdr_end)) {
698 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
699 		return TC_ACT_SHOT;
700 	}
701 
702 	skb->protocol = eth_type_trans(skb, skb->dev);
703 	skb_reset_network_header(skb);
704 
705 	return action;
706 }
707 
708 /*XXX: check if we can do this at install time instead of current
709  * send data path
710 **/
711 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
712 {
713 	struct tcf_meta_info *e, *n;
714 	int tot_run_sz = 0, run_sz = 0;
715 
716 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
717 		if (e->ops->check_presence) {
718 			run_sz = e->ops->check_presence(skb, e);
719 			tot_run_sz += run_sz;
720 		}
721 	}
722 
723 	return tot_run_sz;
724 }
725 
726 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
727 			  struct tcf_result *res, struct tcf_ife_params *p)
728 {
729 	struct tcf_ife_info *ife = to_ife(a);
730 	int action = ife->tcf_action;
731 	struct ethhdr *oethh;	/* outer ether header */
732 	struct tcf_meta_info *e;
733 	/*
734 	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
735 	   where ORIGDATA = original ethernet header ...
736 	 */
737 	u16 metalen = ife_get_sz(skb, ife);
738 	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
739 	unsigned int skboff = 0;
740 	int new_len = skb->len + hdrm;
741 	bool exceed_mtu = false;
742 	void *ife_meta;
743 	int err = 0;
744 
745 	if (!skb_at_tc_ingress(skb)) {
746 		if (new_len > skb->dev->mtu)
747 			exceed_mtu = true;
748 	}
749 
750 	bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
751 	tcf_lastuse_update(&ife->tcf_tm);
752 
753 	if (!metalen) {		/* no metadata to send */
754 		/* abuse overlimits to count when we allow packet
755 		 * with no metadata
756 		 */
757 		qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
758 		return action;
759 	}
760 	/* could be stupid policy setup or mtu config
761 	 * so lets be conservative.. */
762 	if ((action == TC_ACT_SHOT) || exceed_mtu) {
763 		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
764 		return TC_ACT_SHOT;
765 	}
766 
767 	if (skb_at_tc_ingress(skb))
768 		skb_push(skb, skb->dev->hard_header_len);
769 
770 	ife_meta = ife_encode(skb, metalen);
771 
772 	spin_lock(&ife->tcf_lock);
773 
774 	/* XXX: we dont have a clever way of telling encode to
775 	 * not repeat some of the computations that are done by
776 	 * ops->presence_check...
777 	 */
778 	list_for_each_entry(e, &ife->metalist, metalist) {
779 		if (e->ops->encode) {
780 			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
781 					     e);
782 		}
783 		if (err < 0) {
784 			/* too corrupt to keep around if overwritten */
785 			spin_unlock(&ife->tcf_lock);
786 			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
787 			return TC_ACT_SHOT;
788 		}
789 		skboff += err;
790 	}
791 	spin_unlock(&ife->tcf_lock);
792 	oethh = (struct ethhdr *)skb->data;
793 
794 	if (!is_zero_ether_addr(p->eth_src))
795 		ether_addr_copy(oethh->h_source, p->eth_src);
796 	if (!is_zero_ether_addr(p->eth_dst))
797 		ether_addr_copy(oethh->h_dest, p->eth_dst);
798 	oethh->h_proto = htons(p->eth_type);
799 
800 	if (skb_at_tc_ingress(skb))
801 		skb_pull(skb, skb->dev->hard_header_len);
802 
803 	return action;
804 }
805 
806 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
807 		       struct tcf_result *res)
808 {
809 	struct tcf_ife_info *ife = to_ife(a);
810 	struct tcf_ife_params *p;
811 	int ret;
812 
813 	rcu_read_lock();
814 	p = rcu_dereference(ife->params);
815 	if (p->flags & IFE_ENCODE) {
816 		ret = tcf_ife_encode(skb, a, res, p);
817 		rcu_read_unlock();
818 		return ret;
819 	}
820 	rcu_read_unlock();
821 
822 	return tcf_ife_decode(skb, a, res);
823 }
824 
825 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
826 			  struct netlink_callback *cb, int type,
827 			  const struct tc_action_ops *ops,
828 			  struct netlink_ext_ack *extack)
829 {
830 	struct tc_action_net *tn = net_generic(net, ife_net_id);
831 
832 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
833 }
834 
835 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
836 			  struct netlink_ext_ack *extack)
837 {
838 	struct tc_action_net *tn = net_generic(net, ife_net_id);
839 
840 	return tcf_idr_search(tn, a, index);
841 }
842 
843 static struct tc_action_ops act_ife_ops = {
844 	.kind = "ife",
845 	.type = TCA_ACT_IFE,
846 	.owner = THIS_MODULE,
847 	.act = tcf_ife_act,
848 	.dump = tcf_ife_dump,
849 	.cleanup = tcf_ife_cleanup,
850 	.init = tcf_ife_init,
851 	.walk = tcf_ife_walker,
852 	.lookup = tcf_ife_search,
853 	.size =	sizeof(struct tcf_ife_info),
854 };
855 
856 static __net_init int ife_init_net(struct net *net)
857 {
858 	struct tc_action_net *tn = net_generic(net, ife_net_id);
859 
860 	return tc_action_net_init(tn, &act_ife_ops);
861 }
862 
863 static void __net_exit ife_exit_net(struct list_head *net_list)
864 {
865 	tc_action_net_exit(net_list, ife_net_id);
866 }
867 
868 static struct pernet_operations ife_net_ops = {
869 	.init = ife_init_net,
870 	.exit_batch = ife_exit_net,
871 	.id   = &ife_net_id,
872 	.size = sizeof(struct tc_action_net),
873 };
874 
875 static int __init ife_init_module(void)
876 {
877 	return tcf_register_action(&act_ife_ops, &ife_net_ops);
878 }
879 
880 static void __exit ife_cleanup_module(void)
881 {
882 	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
883 }
884 
885 module_init(ife_init_module);
886 module_exit(ife_cleanup_module);
887 
888 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
889 MODULE_DESCRIPTION("Inter-FE LFB action");
890 MODULE_LICENSE("GPL");
891