xref: /openbmc/linux/net/sched/act_csum.c (revision b409074e6693bcdaa7abbee2a035f22a9eabda53)
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <linux/skbuff.h>
24 
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
34 
35 #include <net/act_api.h>
36 
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
39 
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
42 };
43 
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
46 
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 			 struct nlattr *est, struct tc_action **a, int ovr,
49 			 int bind, bool rtnl_held,
50 			 struct netlink_ext_ack *extack)
51 {
52 	struct tc_action_net *tn = net_generic(net, csum_net_id);
53 	struct tcf_csum_params *params_old, *params_new;
54 	struct nlattr *tb[TCA_CSUM_MAX + 1];
55 	struct tc_csum *parm;
56 	struct tcf_csum *p;
57 	int ret = 0, err;
58 
59 	if (nla == NULL)
60 		return -EINVAL;
61 
62 	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
63 	if (err < 0)
64 		return err;
65 
66 	if (tb[TCA_CSUM_PARMS] == NULL)
67 		return -EINVAL;
68 	parm = nla_data(tb[TCA_CSUM_PARMS]);
69 
70 	if (!tcf_idr_check(tn, parm->index, a, bind)) {
71 		ret = tcf_idr_create(tn, parm->index, est, a,
72 				     &act_csum_ops, bind, true);
73 		if (ret)
74 			return ret;
75 		ret = ACT_P_CREATED;
76 	} else {
77 		if (bind)/* dont override defaults */
78 			return 0;
79 		tcf_idr_release(*a, bind);
80 		if (!ovr)
81 			return -EEXIST;
82 	}
83 
84 	p = to_tcf_csum(*a);
85 	ASSERT_RTNL();
86 
87 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
88 	if (unlikely(!params_new)) {
89 		if (ret == ACT_P_CREATED)
90 			tcf_idr_release(*a, bind);
91 		return -ENOMEM;
92 	}
93 	params_old = rtnl_dereference(p->params);
94 
95 	params_new->action = parm->action;
96 	params_new->update_flags = parm->update_flags;
97 	rcu_assign_pointer(p->params, params_new);
98 	if (params_old)
99 		kfree_rcu(params_old, rcu);
100 
101 	if (ret == ACT_P_CREATED)
102 		tcf_idr_insert(tn, *a);
103 
104 	return ret;
105 }
106 
107 /**
108  * tcf_csum_skb_nextlayer - Get next layer pointer
109  * @skb: sk_buff to use
110  * @ihl: previous summed headers length
111  * @ipl: complete packet length
112  * @jhl: next header length
113  *
114  * Check the expected next layer availability in the specified sk_buff.
115  * Return the next layer pointer if pass, NULL otherwise.
116  */
117 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
118 				    unsigned int ihl, unsigned int ipl,
119 				    unsigned int jhl)
120 {
121 	int ntkoff = skb_network_offset(skb);
122 	int hl = ihl + jhl;
123 
124 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
125 	    skb_try_make_writable(skb, hl + ntkoff))
126 		return NULL;
127 	else
128 		return (void *)(skb_network_header(skb) + ihl);
129 }
130 
131 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
132 			      unsigned int ipl)
133 {
134 	struct icmphdr *icmph;
135 
136 	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
137 	if (icmph == NULL)
138 		return 0;
139 
140 	icmph->checksum = 0;
141 	skb->csum = csum_partial(icmph, ipl - ihl, 0);
142 	icmph->checksum = csum_fold(skb->csum);
143 
144 	skb->ip_summed = CHECKSUM_NONE;
145 
146 	return 1;
147 }
148 
149 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
150 			      unsigned int ihl, unsigned int ipl)
151 {
152 	struct igmphdr *igmph;
153 
154 	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
155 	if (igmph == NULL)
156 		return 0;
157 
158 	igmph->csum = 0;
159 	skb->csum = csum_partial(igmph, ipl - ihl, 0);
160 	igmph->csum = csum_fold(skb->csum);
161 
162 	skb->ip_summed = CHECKSUM_NONE;
163 
164 	return 1;
165 }
166 
167 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
168 			      unsigned int ipl)
169 {
170 	struct icmp6hdr *icmp6h;
171 	const struct ipv6hdr *ip6h;
172 
173 	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
174 	if (icmp6h == NULL)
175 		return 0;
176 
177 	ip6h = ipv6_hdr(skb);
178 	icmp6h->icmp6_cksum = 0;
179 	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
180 	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
181 					      ipl - ihl, IPPROTO_ICMPV6,
182 					      skb->csum);
183 
184 	skb->ip_summed = CHECKSUM_NONE;
185 
186 	return 1;
187 }
188 
189 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
190 			     unsigned int ipl)
191 {
192 	struct tcphdr *tcph;
193 	const struct iphdr *iph;
194 
195 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
196 		return 1;
197 
198 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
199 	if (tcph == NULL)
200 		return 0;
201 
202 	iph = ip_hdr(skb);
203 	tcph->check = 0;
204 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
205 	tcph->check = tcp_v4_check(ipl - ihl,
206 				   iph->saddr, iph->daddr, skb->csum);
207 
208 	skb->ip_summed = CHECKSUM_NONE;
209 
210 	return 1;
211 }
212 
213 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
214 			     unsigned int ipl)
215 {
216 	struct tcphdr *tcph;
217 	const struct ipv6hdr *ip6h;
218 
219 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
220 		return 1;
221 
222 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
223 	if (tcph == NULL)
224 		return 0;
225 
226 	ip6h = ipv6_hdr(skb);
227 	tcph->check = 0;
228 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
229 	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
230 				      ipl - ihl, IPPROTO_TCP,
231 				      skb->csum);
232 
233 	skb->ip_summed = CHECKSUM_NONE;
234 
235 	return 1;
236 }
237 
238 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
239 			     unsigned int ipl, int udplite)
240 {
241 	struct udphdr *udph;
242 	const struct iphdr *iph;
243 	u16 ul;
244 
245 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
246 		return 1;
247 
248 	/*
249 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
250 	 * udph->len to get the real length without any protocol check,
251 	 * UDPLITE uses udph->len for another thing,
252 	 * Use iph->tot_len, or just ipl.
253 	 */
254 
255 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
256 	if (udph == NULL)
257 		return 0;
258 
259 	iph = ip_hdr(skb);
260 	ul = ntohs(udph->len);
261 
262 	if (udplite || udph->check) {
263 
264 		udph->check = 0;
265 
266 		if (udplite) {
267 			if (ul == 0)
268 				skb->csum = csum_partial(udph, ipl - ihl, 0);
269 			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
270 				skb->csum = csum_partial(udph, ul, 0);
271 			else
272 				goto ignore_obscure_skb;
273 		} else {
274 			if (ul != ipl - ihl)
275 				goto ignore_obscure_skb;
276 
277 			skb->csum = csum_partial(udph, ul, 0);
278 		}
279 
280 		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
281 						ul, iph->protocol,
282 						skb->csum);
283 
284 		if (!udph->check)
285 			udph->check = CSUM_MANGLED_0;
286 	}
287 
288 	skb->ip_summed = CHECKSUM_NONE;
289 
290 ignore_obscure_skb:
291 	return 1;
292 }
293 
294 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
295 			     unsigned int ipl, int udplite)
296 {
297 	struct udphdr *udph;
298 	const struct ipv6hdr *ip6h;
299 	u16 ul;
300 
301 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
302 		return 1;
303 
304 	/*
305 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
306 	 * udph->len to get the real length without any protocol check,
307 	 * UDPLITE uses udph->len for another thing,
308 	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
309 	 */
310 
311 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
312 	if (udph == NULL)
313 		return 0;
314 
315 	ip6h = ipv6_hdr(skb);
316 	ul = ntohs(udph->len);
317 
318 	udph->check = 0;
319 
320 	if (udplite) {
321 		if (ul == 0)
322 			skb->csum = csum_partial(udph, ipl - ihl, 0);
323 
324 		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
325 			skb->csum = csum_partial(udph, ul, 0);
326 
327 		else
328 			goto ignore_obscure_skb;
329 	} else {
330 		if (ul != ipl - ihl)
331 			goto ignore_obscure_skb;
332 
333 		skb->csum = csum_partial(udph, ul, 0);
334 	}
335 
336 	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
337 				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
338 				      skb->csum);
339 
340 	if (!udph->check)
341 		udph->check = CSUM_MANGLED_0;
342 
343 	skb->ip_summed = CHECKSUM_NONE;
344 
345 ignore_obscure_skb:
346 	return 1;
347 }
348 
349 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
350 			 unsigned int ipl)
351 {
352 	struct sctphdr *sctph;
353 
354 	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
355 		return 1;
356 
357 	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
358 	if (!sctph)
359 		return 0;
360 
361 	sctph->checksum = sctp_compute_cksum(skb,
362 					     skb_network_offset(skb) + ihl);
363 	skb->ip_summed = CHECKSUM_NONE;
364 	skb->csum_not_inet = 0;
365 
366 	return 1;
367 }
368 
369 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
370 {
371 	const struct iphdr *iph;
372 	int ntkoff;
373 
374 	ntkoff = skb_network_offset(skb);
375 
376 	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
377 		goto fail;
378 
379 	iph = ip_hdr(skb);
380 
381 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
382 	case IPPROTO_ICMP:
383 		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
384 			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
385 						ntohs(iph->tot_len)))
386 				goto fail;
387 		break;
388 	case IPPROTO_IGMP:
389 		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
390 			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
391 						ntohs(iph->tot_len)))
392 				goto fail;
393 		break;
394 	case IPPROTO_TCP:
395 		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
396 			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
397 					       ntohs(iph->tot_len)))
398 				goto fail;
399 		break;
400 	case IPPROTO_UDP:
401 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
402 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
403 					       ntohs(iph->tot_len), 0))
404 				goto fail;
405 		break;
406 	case IPPROTO_UDPLITE:
407 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
408 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
409 					       ntohs(iph->tot_len), 1))
410 				goto fail;
411 		break;
412 	case IPPROTO_SCTP:
413 		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
414 		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
415 			goto fail;
416 		break;
417 	}
418 
419 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
420 		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
421 			goto fail;
422 
423 		ip_send_check(ip_hdr(skb));
424 	}
425 
426 	return 1;
427 
428 fail:
429 	return 0;
430 }
431 
432 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
433 				 unsigned int *pl)
434 {
435 	int off, len, optlen;
436 	unsigned char *xh = (void *)ip6xh;
437 
438 	off = sizeof(*ip6xh);
439 	len = ixhl - off;
440 
441 	while (len > 1) {
442 		switch (xh[off]) {
443 		case IPV6_TLV_PAD1:
444 			optlen = 1;
445 			break;
446 		case IPV6_TLV_JUMBO:
447 			optlen = xh[off + 1] + 2;
448 			if (optlen != 6 || len < 6 || (off & 3) != 2)
449 				/* wrong jumbo option length/alignment */
450 				return 0;
451 			*pl = ntohl(*(__be32 *)(xh + off + 2));
452 			goto done;
453 		default:
454 			optlen = xh[off + 1] + 2;
455 			if (optlen > len)
456 				/* ignore obscure options */
457 				goto done;
458 			break;
459 		}
460 		off += optlen;
461 		len -= optlen;
462 	}
463 
464 done:
465 	return 1;
466 }
467 
468 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
469 {
470 	struct ipv6hdr *ip6h;
471 	struct ipv6_opt_hdr *ip6xh;
472 	unsigned int hl, ixhl;
473 	unsigned int pl;
474 	int ntkoff;
475 	u8 nexthdr;
476 
477 	ntkoff = skb_network_offset(skb);
478 
479 	hl = sizeof(*ip6h);
480 
481 	if (!pskb_may_pull(skb, hl + ntkoff))
482 		goto fail;
483 
484 	ip6h = ipv6_hdr(skb);
485 
486 	pl = ntohs(ip6h->payload_len);
487 	nexthdr = ip6h->nexthdr;
488 
489 	do {
490 		switch (nexthdr) {
491 		case NEXTHDR_FRAGMENT:
492 			goto ignore_skb;
493 		case NEXTHDR_ROUTING:
494 		case NEXTHDR_HOP:
495 		case NEXTHDR_DEST:
496 			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
497 				goto fail;
498 			ip6xh = (void *)(skb_network_header(skb) + hl);
499 			ixhl = ipv6_optlen(ip6xh);
500 			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
501 				goto fail;
502 			ip6xh = (void *)(skb_network_header(skb) + hl);
503 			if ((nexthdr == NEXTHDR_HOP) &&
504 			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
505 				goto fail;
506 			nexthdr = ip6xh->nexthdr;
507 			hl += ixhl;
508 			break;
509 		case IPPROTO_ICMPV6:
510 			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
511 				if (!tcf_csum_ipv6_icmp(skb,
512 							hl, pl + sizeof(*ip6h)))
513 					goto fail;
514 			goto done;
515 		case IPPROTO_TCP:
516 			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
517 				if (!tcf_csum_ipv6_tcp(skb,
518 						       hl, pl + sizeof(*ip6h)))
519 					goto fail;
520 			goto done;
521 		case IPPROTO_UDP:
522 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
523 				if (!tcf_csum_ipv6_udp(skb, hl,
524 						       pl + sizeof(*ip6h), 0))
525 					goto fail;
526 			goto done;
527 		case IPPROTO_UDPLITE:
528 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
529 				if (!tcf_csum_ipv6_udp(skb, hl,
530 						       pl + sizeof(*ip6h), 1))
531 					goto fail;
532 			goto done;
533 		case IPPROTO_SCTP:
534 			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
535 			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
536 				goto fail;
537 			goto done;
538 		default:
539 			goto ignore_skb;
540 		}
541 	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
542 
543 done:
544 ignore_skb:
545 	return 1;
546 
547 fail:
548 	return 0;
549 }
550 
551 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
552 		    struct tcf_result *res)
553 {
554 	struct tcf_csum *p = to_tcf_csum(a);
555 	struct tcf_csum_params *params;
556 	u32 update_flags;
557 	int action;
558 
559 	rcu_read_lock();
560 	params = rcu_dereference(p->params);
561 
562 	tcf_lastuse_update(&p->tcf_tm);
563 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
564 
565 	action = params->action;
566 	if (unlikely(action == TC_ACT_SHOT))
567 		goto drop_stats;
568 
569 	update_flags = params->update_flags;
570 	switch (tc_skb_protocol(skb)) {
571 	case cpu_to_be16(ETH_P_IP):
572 		if (!tcf_csum_ipv4(skb, update_flags))
573 			goto drop;
574 		break;
575 	case cpu_to_be16(ETH_P_IPV6):
576 		if (!tcf_csum_ipv6(skb, update_flags))
577 			goto drop;
578 		break;
579 	}
580 
581 unlock:
582 	rcu_read_unlock();
583 	return action;
584 
585 drop:
586 	action = TC_ACT_SHOT;
587 
588 drop_stats:
589 	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
590 	goto unlock;
591 }
592 
593 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
594 			 int ref)
595 {
596 	unsigned char *b = skb_tail_pointer(skb);
597 	struct tcf_csum *p = to_tcf_csum(a);
598 	struct tcf_csum_params *params;
599 	struct tc_csum opt = {
600 		.index   = p->tcf_index,
601 		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
602 		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
603 	};
604 	struct tcf_t t;
605 
606 	params = rtnl_dereference(p->params);
607 	opt.action = params->action;
608 	opt.update_flags = params->update_flags;
609 
610 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
611 		goto nla_put_failure;
612 
613 	tcf_tm_dump(&t, &p->tcf_tm);
614 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
615 		goto nla_put_failure;
616 
617 	return skb->len;
618 
619 nla_put_failure:
620 	nlmsg_trim(skb, b);
621 	return -1;
622 }
623 
624 static void tcf_csum_cleanup(struct tc_action *a)
625 {
626 	struct tcf_csum *p = to_tcf_csum(a);
627 	struct tcf_csum_params *params;
628 
629 	params = rcu_dereference_protected(p->params, 1);
630 	if (params)
631 		kfree_rcu(params, rcu);
632 }
633 
634 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
635 			   struct netlink_callback *cb, int type,
636 			   const struct tc_action_ops *ops,
637 			   struct netlink_ext_ack *extack)
638 {
639 	struct tc_action_net *tn = net_generic(net, csum_net_id);
640 
641 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
642 }
643 
644 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
645 			   struct netlink_ext_ack *extack)
646 {
647 	struct tc_action_net *tn = net_generic(net, csum_net_id);
648 
649 	return tcf_idr_search(tn, a, index);
650 }
651 
652 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
653 {
654 	return nla_total_size(sizeof(struct tc_csum));
655 }
656 
657 static int tcf_csum_delete(struct net *net, u32 index)
658 {
659 	struct tc_action_net *tn = net_generic(net, csum_net_id);
660 
661 	return tcf_idr_delete_index(tn, index);
662 }
663 
664 static struct tc_action_ops act_csum_ops = {
665 	.kind		= "csum",
666 	.type		= TCA_ACT_CSUM,
667 	.owner		= THIS_MODULE,
668 	.act		= tcf_csum,
669 	.dump		= tcf_csum_dump,
670 	.init		= tcf_csum_init,
671 	.cleanup	= tcf_csum_cleanup,
672 	.walk		= tcf_csum_walker,
673 	.lookup		= tcf_csum_search,
674 	.get_fill_size  = tcf_csum_get_fill_size,
675 	.delete		= tcf_csum_delete,
676 	.size		= sizeof(struct tcf_csum),
677 };
678 
679 static __net_init int csum_init_net(struct net *net)
680 {
681 	struct tc_action_net *tn = net_generic(net, csum_net_id);
682 
683 	return tc_action_net_init(tn, &act_csum_ops);
684 }
685 
686 static void __net_exit csum_exit_net(struct list_head *net_list)
687 {
688 	tc_action_net_exit(net_list, csum_net_id);
689 }
690 
691 static struct pernet_operations csum_net_ops = {
692 	.init = csum_init_net,
693 	.exit_batch = csum_exit_net,
694 	.id   = &csum_net_id,
695 	.size = sizeof(struct tc_action_net),
696 };
697 
698 MODULE_DESCRIPTION("Checksum updating actions");
699 MODULE_LICENSE("GPL");
700 
701 static int __init csum_init_module(void)
702 {
703 	return tcf_register_action(&act_csum_ops, &csum_net_ops);
704 }
705 
706 static void __exit csum_cleanup_module(void)
707 {
708 	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
709 }
710 
711 module_init(csum_init_module);
712 module_exit(csum_cleanup_module);
713