xref: /openbmc/linux/net/sched/act_csum.c (revision a17922de)
1 /*
2  * Checksum updating actions
3  *
4  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  *
11  */
12 
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
22 
23 #include <linux/skbuff.h>
24 
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27 #include <net/icmp.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
30 #include <net/tcp.h>
31 #include <net/udp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
34 
35 #include <net/act_api.h>
36 
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
39 
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 	[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
42 };
43 
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
46 
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 			 struct nlattr *est, struct tc_action **a, int ovr,
49 			 int bind, bool rtnl_held,
50 			 struct netlink_ext_ack *extack)
51 {
52 	struct tc_action_net *tn = net_generic(net, csum_net_id);
53 	struct tcf_csum_params *params_old, *params_new;
54 	struct nlattr *tb[TCA_CSUM_MAX + 1];
55 	struct tc_csum *parm;
56 	struct tcf_csum *p;
57 	int ret = 0, err;
58 
59 	if (nla == NULL)
60 		return -EINVAL;
61 
62 	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
63 	if (err < 0)
64 		return err;
65 
66 	if (tb[TCA_CSUM_PARMS] == NULL)
67 		return -EINVAL;
68 	parm = nla_data(tb[TCA_CSUM_PARMS]);
69 
70 	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
71 	if (!err) {
72 		ret = tcf_idr_create(tn, parm->index, est, a,
73 				     &act_csum_ops, bind, true);
74 		if (ret) {
75 			tcf_idr_cleanup(tn, parm->index);
76 			return ret;
77 		}
78 		ret = ACT_P_CREATED;
79 	} else if (err > 0) {
80 		if (bind)/* dont override defaults */
81 			return 0;
82 		if (!ovr) {
83 			tcf_idr_release(*a, bind);
84 			return -EEXIST;
85 		}
86 	} else {
87 		return err;
88 	}
89 
90 	p = to_tcf_csum(*a);
91 	ASSERT_RTNL();
92 
93 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
94 	if (unlikely(!params_new)) {
95 		tcf_idr_release(*a, bind);
96 		return -ENOMEM;
97 	}
98 	params_old = rtnl_dereference(p->params);
99 
100 	p->tcf_action = parm->action;
101 	params_new->update_flags = parm->update_flags;
102 	rcu_assign_pointer(p->params, params_new);
103 	if (params_old)
104 		kfree_rcu(params_old, rcu);
105 
106 	if (ret == ACT_P_CREATED)
107 		tcf_idr_insert(tn, *a);
108 
109 	return ret;
110 }
111 
112 /**
113  * tcf_csum_skb_nextlayer - Get next layer pointer
114  * @skb: sk_buff to use
115  * @ihl: previous summed headers length
116  * @ipl: complete packet length
117  * @jhl: next header length
118  *
119  * Check the expected next layer availability in the specified sk_buff.
120  * Return the next layer pointer if pass, NULL otherwise.
121  */
122 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
123 				    unsigned int ihl, unsigned int ipl,
124 				    unsigned int jhl)
125 {
126 	int ntkoff = skb_network_offset(skb);
127 	int hl = ihl + jhl;
128 
129 	if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
130 	    skb_try_make_writable(skb, hl + ntkoff))
131 		return NULL;
132 	else
133 		return (void *)(skb_network_header(skb) + ihl);
134 }
135 
136 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
137 			      unsigned int ipl)
138 {
139 	struct icmphdr *icmph;
140 
141 	icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
142 	if (icmph == NULL)
143 		return 0;
144 
145 	icmph->checksum = 0;
146 	skb->csum = csum_partial(icmph, ipl - ihl, 0);
147 	icmph->checksum = csum_fold(skb->csum);
148 
149 	skb->ip_summed = CHECKSUM_NONE;
150 
151 	return 1;
152 }
153 
154 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
155 			      unsigned int ihl, unsigned int ipl)
156 {
157 	struct igmphdr *igmph;
158 
159 	igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
160 	if (igmph == NULL)
161 		return 0;
162 
163 	igmph->csum = 0;
164 	skb->csum = csum_partial(igmph, ipl - ihl, 0);
165 	igmph->csum = csum_fold(skb->csum);
166 
167 	skb->ip_summed = CHECKSUM_NONE;
168 
169 	return 1;
170 }
171 
172 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
173 			      unsigned int ipl)
174 {
175 	struct icmp6hdr *icmp6h;
176 	const struct ipv6hdr *ip6h;
177 
178 	icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
179 	if (icmp6h == NULL)
180 		return 0;
181 
182 	ip6h = ipv6_hdr(skb);
183 	icmp6h->icmp6_cksum = 0;
184 	skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
185 	icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
186 					      ipl - ihl, IPPROTO_ICMPV6,
187 					      skb->csum);
188 
189 	skb->ip_summed = CHECKSUM_NONE;
190 
191 	return 1;
192 }
193 
194 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
195 			     unsigned int ipl)
196 {
197 	struct tcphdr *tcph;
198 	const struct iphdr *iph;
199 
200 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
201 		return 1;
202 
203 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
204 	if (tcph == NULL)
205 		return 0;
206 
207 	iph = ip_hdr(skb);
208 	tcph->check = 0;
209 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
210 	tcph->check = tcp_v4_check(ipl - ihl,
211 				   iph->saddr, iph->daddr, skb->csum);
212 
213 	skb->ip_summed = CHECKSUM_NONE;
214 
215 	return 1;
216 }
217 
218 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
219 			     unsigned int ipl)
220 {
221 	struct tcphdr *tcph;
222 	const struct ipv6hdr *ip6h;
223 
224 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
225 		return 1;
226 
227 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
228 	if (tcph == NULL)
229 		return 0;
230 
231 	ip6h = ipv6_hdr(skb);
232 	tcph->check = 0;
233 	skb->csum = csum_partial(tcph, ipl - ihl, 0);
234 	tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
235 				      ipl - ihl, IPPROTO_TCP,
236 				      skb->csum);
237 
238 	skb->ip_summed = CHECKSUM_NONE;
239 
240 	return 1;
241 }
242 
243 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
244 			     unsigned int ipl, int udplite)
245 {
246 	struct udphdr *udph;
247 	const struct iphdr *iph;
248 	u16 ul;
249 
250 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
251 		return 1;
252 
253 	/*
254 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
255 	 * udph->len to get the real length without any protocol check,
256 	 * UDPLITE uses udph->len for another thing,
257 	 * Use iph->tot_len, or just ipl.
258 	 */
259 
260 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
261 	if (udph == NULL)
262 		return 0;
263 
264 	iph = ip_hdr(skb);
265 	ul = ntohs(udph->len);
266 
267 	if (udplite || udph->check) {
268 
269 		udph->check = 0;
270 
271 		if (udplite) {
272 			if (ul == 0)
273 				skb->csum = csum_partial(udph, ipl - ihl, 0);
274 			else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
275 				skb->csum = csum_partial(udph, ul, 0);
276 			else
277 				goto ignore_obscure_skb;
278 		} else {
279 			if (ul != ipl - ihl)
280 				goto ignore_obscure_skb;
281 
282 			skb->csum = csum_partial(udph, ul, 0);
283 		}
284 
285 		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
286 						ul, iph->protocol,
287 						skb->csum);
288 
289 		if (!udph->check)
290 			udph->check = CSUM_MANGLED_0;
291 	}
292 
293 	skb->ip_summed = CHECKSUM_NONE;
294 
295 ignore_obscure_skb:
296 	return 1;
297 }
298 
299 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
300 			     unsigned int ipl, int udplite)
301 {
302 	struct udphdr *udph;
303 	const struct ipv6hdr *ip6h;
304 	u16 ul;
305 
306 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
307 		return 1;
308 
309 	/*
310 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
311 	 * udph->len to get the real length without any protocol check,
312 	 * UDPLITE uses udph->len for another thing,
313 	 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
314 	 */
315 
316 	udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
317 	if (udph == NULL)
318 		return 0;
319 
320 	ip6h = ipv6_hdr(skb);
321 	ul = ntohs(udph->len);
322 
323 	udph->check = 0;
324 
325 	if (udplite) {
326 		if (ul == 0)
327 			skb->csum = csum_partial(udph, ipl - ihl, 0);
328 
329 		else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
330 			skb->csum = csum_partial(udph, ul, 0);
331 
332 		else
333 			goto ignore_obscure_skb;
334 	} else {
335 		if (ul != ipl - ihl)
336 			goto ignore_obscure_skb;
337 
338 		skb->csum = csum_partial(udph, ul, 0);
339 	}
340 
341 	udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
342 				      udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
343 				      skb->csum);
344 
345 	if (!udph->check)
346 		udph->check = CSUM_MANGLED_0;
347 
348 	skb->ip_summed = CHECKSUM_NONE;
349 
350 ignore_obscure_skb:
351 	return 1;
352 }
353 
354 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
355 			 unsigned int ipl)
356 {
357 	struct sctphdr *sctph;
358 
359 	if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
360 		return 1;
361 
362 	sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
363 	if (!sctph)
364 		return 0;
365 
366 	sctph->checksum = sctp_compute_cksum(skb,
367 					     skb_network_offset(skb) + ihl);
368 	skb->ip_summed = CHECKSUM_NONE;
369 	skb->csum_not_inet = 0;
370 
371 	return 1;
372 }
373 
374 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
375 {
376 	const struct iphdr *iph;
377 	int ntkoff;
378 
379 	ntkoff = skb_network_offset(skb);
380 
381 	if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
382 		goto fail;
383 
384 	iph = ip_hdr(skb);
385 
386 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
387 	case IPPROTO_ICMP:
388 		if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
389 			if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
390 						ntohs(iph->tot_len)))
391 				goto fail;
392 		break;
393 	case IPPROTO_IGMP:
394 		if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
395 			if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
396 						ntohs(iph->tot_len)))
397 				goto fail;
398 		break;
399 	case IPPROTO_TCP:
400 		if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
401 			if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
402 					       ntohs(iph->tot_len)))
403 				goto fail;
404 		break;
405 	case IPPROTO_UDP:
406 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
407 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
408 					       ntohs(iph->tot_len), 0))
409 				goto fail;
410 		break;
411 	case IPPROTO_UDPLITE:
412 		if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
413 			if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
414 					       ntohs(iph->tot_len), 1))
415 				goto fail;
416 		break;
417 	case IPPROTO_SCTP:
418 		if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
419 		    !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
420 			goto fail;
421 		break;
422 	}
423 
424 	if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
425 		if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
426 			goto fail;
427 
428 		ip_send_check(ip_hdr(skb));
429 	}
430 
431 	return 1;
432 
433 fail:
434 	return 0;
435 }
436 
437 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
438 				 unsigned int *pl)
439 {
440 	int off, len, optlen;
441 	unsigned char *xh = (void *)ip6xh;
442 
443 	off = sizeof(*ip6xh);
444 	len = ixhl - off;
445 
446 	while (len > 1) {
447 		switch (xh[off]) {
448 		case IPV6_TLV_PAD1:
449 			optlen = 1;
450 			break;
451 		case IPV6_TLV_JUMBO:
452 			optlen = xh[off + 1] + 2;
453 			if (optlen != 6 || len < 6 || (off & 3) != 2)
454 				/* wrong jumbo option length/alignment */
455 				return 0;
456 			*pl = ntohl(*(__be32 *)(xh + off + 2));
457 			goto done;
458 		default:
459 			optlen = xh[off + 1] + 2;
460 			if (optlen > len)
461 				/* ignore obscure options */
462 				goto done;
463 			break;
464 		}
465 		off += optlen;
466 		len -= optlen;
467 	}
468 
469 done:
470 	return 1;
471 }
472 
473 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
474 {
475 	struct ipv6hdr *ip6h;
476 	struct ipv6_opt_hdr *ip6xh;
477 	unsigned int hl, ixhl;
478 	unsigned int pl;
479 	int ntkoff;
480 	u8 nexthdr;
481 
482 	ntkoff = skb_network_offset(skb);
483 
484 	hl = sizeof(*ip6h);
485 
486 	if (!pskb_may_pull(skb, hl + ntkoff))
487 		goto fail;
488 
489 	ip6h = ipv6_hdr(skb);
490 
491 	pl = ntohs(ip6h->payload_len);
492 	nexthdr = ip6h->nexthdr;
493 
494 	do {
495 		switch (nexthdr) {
496 		case NEXTHDR_FRAGMENT:
497 			goto ignore_skb;
498 		case NEXTHDR_ROUTING:
499 		case NEXTHDR_HOP:
500 		case NEXTHDR_DEST:
501 			if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
502 				goto fail;
503 			ip6xh = (void *)(skb_network_header(skb) + hl);
504 			ixhl = ipv6_optlen(ip6xh);
505 			if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
506 				goto fail;
507 			ip6xh = (void *)(skb_network_header(skb) + hl);
508 			if ((nexthdr == NEXTHDR_HOP) &&
509 			    !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
510 				goto fail;
511 			nexthdr = ip6xh->nexthdr;
512 			hl += ixhl;
513 			break;
514 		case IPPROTO_ICMPV6:
515 			if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
516 				if (!tcf_csum_ipv6_icmp(skb,
517 							hl, pl + sizeof(*ip6h)))
518 					goto fail;
519 			goto done;
520 		case IPPROTO_TCP:
521 			if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
522 				if (!tcf_csum_ipv6_tcp(skb,
523 						       hl, pl + sizeof(*ip6h)))
524 					goto fail;
525 			goto done;
526 		case IPPROTO_UDP:
527 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
528 				if (!tcf_csum_ipv6_udp(skb, hl,
529 						       pl + sizeof(*ip6h), 0))
530 					goto fail;
531 			goto done;
532 		case IPPROTO_UDPLITE:
533 			if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
534 				if (!tcf_csum_ipv6_udp(skb, hl,
535 						       pl + sizeof(*ip6h), 1))
536 					goto fail;
537 			goto done;
538 		case IPPROTO_SCTP:
539 			if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
540 			    !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
541 				goto fail;
542 			goto done;
543 		default:
544 			goto ignore_skb;
545 		}
546 	} while (pskb_may_pull(skb, hl + 1 + ntkoff));
547 
548 done:
549 ignore_skb:
550 	return 1;
551 
552 fail:
553 	return 0;
554 }
555 
556 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
557 		    struct tcf_result *res)
558 {
559 	struct tcf_csum *p = to_tcf_csum(a);
560 	struct tcf_csum_params *params;
561 	u32 update_flags;
562 	int action;
563 
564 	rcu_read_lock();
565 	params = rcu_dereference(p->params);
566 
567 	tcf_lastuse_update(&p->tcf_tm);
568 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
569 
570 	action = READ_ONCE(p->tcf_action);
571 	if (unlikely(action == TC_ACT_SHOT))
572 		goto drop_stats;
573 
574 	update_flags = params->update_flags;
575 	switch (tc_skb_protocol(skb)) {
576 	case cpu_to_be16(ETH_P_IP):
577 		if (!tcf_csum_ipv4(skb, update_flags))
578 			goto drop;
579 		break;
580 	case cpu_to_be16(ETH_P_IPV6):
581 		if (!tcf_csum_ipv6(skb, update_flags))
582 			goto drop;
583 		break;
584 	}
585 
586 unlock:
587 	rcu_read_unlock();
588 	return action;
589 
590 drop:
591 	action = TC_ACT_SHOT;
592 
593 drop_stats:
594 	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
595 	goto unlock;
596 }
597 
598 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
599 			 int ref)
600 {
601 	unsigned char *b = skb_tail_pointer(skb);
602 	struct tcf_csum *p = to_tcf_csum(a);
603 	struct tcf_csum_params *params;
604 	struct tc_csum opt = {
605 		.index   = p->tcf_index,
606 		.refcnt  = refcount_read(&p->tcf_refcnt) - ref,
607 		.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
608 		.action  = p->tcf_action,
609 	};
610 	struct tcf_t t;
611 
612 	params = rtnl_dereference(p->params);
613 	opt.update_flags = params->update_flags;
614 
615 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
616 		goto nla_put_failure;
617 
618 	tcf_tm_dump(&t, &p->tcf_tm);
619 	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
620 		goto nla_put_failure;
621 
622 	return skb->len;
623 
624 nla_put_failure:
625 	nlmsg_trim(skb, b);
626 	return -1;
627 }
628 
629 static void tcf_csum_cleanup(struct tc_action *a)
630 {
631 	struct tcf_csum *p = to_tcf_csum(a);
632 	struct tcf_csum_params *params;
633 
634 	params = rcu_dereference_protected(p->params, 1);
635 	if (params)
636 		kfree_rcu(params, rcu);
637 }
638 
639 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
640 			   struct netlink_callback *cb, int type,
641 			   const struct tc_action_ops *ops,
642 			   struct netlink_ext_ack *extack)
643 {
644 	struct tc_action_net *tn = net_generic(net, csum_net_id);
645 
646 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
647 }
648 
649 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
650 			   struct netlink_ext_ack *extack)
651 {
652 	struct tc_action_net *tn = net_generic(net, csum_net_id);
653 
654 	return tcf_idr_search(tn, a, index);
655 }
656 
657 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
658 {
659 	return nla_total_size(sizeof(struct tc_csum));
660 }
661 
662 static int tcf_csum_delete(struct net *net, u32 index)
663 {
664 	struct tc_action_net *tn = net_generic(net, csum_net_id);
665 
666 	return tcf_idr_delete_index(tn, index);
667 }
668 
669 static struct tc_action_ops act_csum_ops = {
670 	.kind		= "csum",
671 	.type		= TCA_ACT_CSUM,
672 	.owner		= THIS_MODULE,
673 	.act		= tcf_csum,
674 	.dump		= tcf_csum_dump,
675 	.init		= tcf_csum_init,
676 	.cleanup	= tcf_csum_cleanup,
677 	.walk		= tcf_csum_walker,
678 	.lookup		= tcf_csum_search,
679 	.get_fill_size  = tcf_csum_get_fill_size,
680 	.delete		= tcf_csum_delete,
681 	.size		= sizeof(struct tcf_csum),
682 };
683 
684 static __net_init int csum_init_net(struct net *net)
685 {
686 	struct tc_action_net *tn = net_generic(net, csum_net_id);
687 
688 	return tc_action_net_init(tn, &act_csum_ops);
689 }
690 
691 static void __net_exit csum_exit_net(struct list_head *net_list)
692 {
693 	tc_action_net_exit(net_list, csum_net_id);
694 }
695 
696 static struct pernet_operations csum_net_ops = {
697 	.init = csum_init_net,
698 	.exit_batch = csum_exit_net,
699 	.id   = &csum_net_id,
700 	.size = sizeof(struct tc_action_net),
701 };
702 
703 MODULE_DESCRIPTION("Checksum updating actions");
704 MODULE_LICENSE("GPL");
705 
706 static int __init csum_init_module(void)
707 {
708 	return tcf_register_action(&act_csum_ops, &csum_net_ops);
709 }
710 
711 static void __exit csum_cleanup_module(void)
712 {
713 	tcf_unregister_action(&act_csum_ops, &csum_net_ops);
714 }
715 
716 module_init(csum_init_module);
717 module_exit(csum_cleanup_module);
718