xref: /openbmc/linux/net/core/lwt_bpf.c (revision 3557b3fd)
1 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/types.h>
17 #include <linux/bpf.h>
18 #include <net/lwtunnel.h>
19 #include <net/gre.h>
20 #include <net/ip6_route.h>
21 #include <net/ipv6_stubs.h>
22 
23 struct bpf_lwt_prog {
24 	struct bpf_prog *prog;
25 	char *name;
26 };
27 
28 struct bpf_lwt {
29 	struct bpf_lwt_prog in;
30 	struct bpf_lwt_prog out;
31 	struct bpf_lwt_prog xmit;
32 	int family;
33 };
34 
35 #define MAX_PROG_NAME 256
36 
37 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
38 {
39 	return (struct bpf_lwt *)lwt->data;
40 }
41 
42 #define NO_REDIRECT false
43 #define CAN_REDIRECT true
44 
45 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
46 		       struct dst_entry *dst, bool can_redirect)
47 {
48 	int ret;
49 
50 	/* Preempt disable is needed to protect per-cpu redirect_info between
51 	 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
52 	 * access to maps strictly require a rcu_read_lock() for protection,
53 	 * mixing with BH RCU lock doesn't work.
54 	 */
55 	preempt_disable();
56 	bpf_compute_data_pointers(skb);
57 	ret = bpf_prog_run_save_cb(lwt->prog, skb);
58 
59 	switch (ret) {
60 	case BPF_OK:
61 	case BPF_LWT_REROUTE:
62 		break;
63 
64 	case BPF_REDIRECT:
65 		if (unlikely(!can_redirect)) {
66 			pr_warn_once("Illegal redirect return code in prog %s\n",
67 				     lwt->name ? : "<unknown>");
68 			ret = BPF_OK;
69 		} else {
70 			skb_reset_mac_header(skb);
71 			ret = skb_do_redirect(skb);
72 			if (ret == 0)
73 				ret = BPF_REDIRECT;
74 		}
75 		break;
76 
77 	case BPF_DROP:
78 		kfree_skb(skb);
79 		ret = -EPERM;
80 		break;
81 
82 	default:
83 		pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
84 		kfree_skb(skb);
85 		ret = -EINVAL;
86 		break;
87 	}
88 
89 	preempt_enable();
90 
91 	return ret;
92 }
93 
94 static int bpf_lwt_input_reroute(struct sk_buff *skb)
95 {
96 	int err = -EINVAL;
97 
98 	if (skb->protocol == htons(ETH_P_IP)) {
99 		struct iphdr *iph = ip_hdr(skb);
100 
101 		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
102 					   iph->tos, skb_dst(skb)->dev);
103 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
104 		err = ipv6_stub->ipv6_route_input(skb);
105 	} else {
106 		err = -EAFNOSUPPORT;
107 	}
108 
109 	if (err)
110 		goto err;
111 	return dst_input(skb);
112 
113 err:
114 	kfree_skb(skb);
115 	return err;
116 }
117 
118 static int bpf_input(struct sk_buff *skb)
119 {
120 	struct dst_entry *dst = skb_dst(skb);
121 	struct bpf_lwt *bpf;
122 	int ret;
123 
124 	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
125 	if (bpf->in.prog) {
126 		ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
127 		if (ret < 0)
128 			return ret;
129 		if (ret == BPF_LWT_REROUTE)
130 			return bpf_lwt_input_reroute(skb);
131 	}
132 
133 	if (unlikely(!dst->lwtstate->orig_input)) {
134 		kfree_skb(skb);
135 		return -EINVAL;
136 	}
137 
138 	return dst->lwtstate->orig_input(skb);
139 }
140 
141 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
142 {
143 	struct dst_entry *dst = skb_dst(skb);
144 	struct bpf_lwt *bpf;
145 	int ret;
146 
147 	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
148 	if (bpf->out.prog) {
149 		ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
150 		if (ret < 0)
151 			return ret;
152 	}
153 
154 	if (unlikely(!dst->lwtstate->orig_output)) {
155 		pr_warn_once("orig_output not set on dst for prog %s\n",
156 			     bpf->out.name);
157 		kfree_skb(skb);
158 		return -EINVAL;
159 	}
160 
161 	return dst->lwtstate->orig_output(net, sk, skb);
162 }
163 
164 static int xmit_check_hhlen(struct sk_buff *skb)
165 {
166 	int hh_len = skb_dst(skb)->dev->hard_header_len;
167 
168 	if (skb_headroom(skb) < hh_len) {
169 		int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
170 
171 		if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
172 			return -ENOMEM;
173 	}
174 
175 	return 0;
176 }
177 
178 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
179 {
180 	struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
181 	int oif = l3mdev ? l3mdev->ifindex : 0;
182 	struct dst_entry *dst = NULL;
183 	int err = -EAFNOSUPPORT;
184 	struct sock *sk;
185 	struct net *net;
186 	bool ipv4;
187 
188 	if (skb->protocol == htons(ETH_P_IP))
189 		ipv4 = true;
190 	else if (skb->protocol == htons(ETH_P_IPV6))
191 		ipv4 = false;
192 	else
193 		goto err;
194 
195 	sk = sk_to_full_sk(skb->sk);
196 	if (sk) {
197 		if (sk->sk_bound_dev_if)
198 			oif = sk->sk_bound_dev_if;
199 		net = sock_net(sk);
200 	} else {
201 		net = dev_net(skb_dst(skb)->dev);
202 	}
203 
204 	if (ipv4) {
205 		struct iphdr *iph = ip_hdr(skb);
206 		struct flowi4 fl4 = {};
207 		struct rtable *rt;
208 
209 		fl4.flowi4_oif = oif;
210 		fl4.flowi4_mark = skb->mark;
211 		fl4.flowi4_uid = sock_net_uid(net, sk);
212 		fl4.flowi4_tos = RT_TOS(iph->tos);
213 		fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
214 		fl4.flowi4_proto = iph->protocol;
215 		fl4.daddr = iph->daddr;
216 		fl4.saddr = iph->saddr;
217 
218 		rt = ip_route_output_key(net, &fl4);
219 		if (IS_ERR(rt)) {
220 			err = PTR_ERR(rt);
221 			goto err;
222 		}
223 		dst = &rt->dst;
224 	} else {
225 		struct ipv6hdr *iph6 = ipv6_hdr(skb);
226 		struct flowi6 fl6 = {};
227 
228 		fl6.flowi6_oif = oif;
229 		fl6.flowi6_mark = skb->mark;
230 		fl6.flowi6_uid = sock_net_uid(net, sk);
231 		fl6.flowlabel = ip6_flowinfo(iph6);
232 		fl6.flowi6_proto = iph6->nexthdr;
233 		fl6.daddr = iph6->daddr;
234 		fl6.saddr = iph6->saddr;
235 
236 		err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
237 		if (unlikely(err))
238 			goto err;
239 		if (IS_ERR(dst)) {
240 			err = PTR_ERR(dst);
241 			goto err;
242 		}
243 	}
244 	if (unlikely(dst->error)) {
245 		err = dst->error;
246 		dst_release(dst);
247 		goto err;
248 	}
249 
250 	/* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
251 	 * was done for the previous dst, so we are doing it here again, in
252 	 * case the new dst needs much more space. The call below is a noop
253 	 * if there is enough header space in skb.
254 	 */
255 	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
256 	if (unlikely(err))
257 		goto err;
258 
259 	skb_dst_drop(skb);
260 	skb_dst_set(skb, dst);
261 
262 	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
263 	if (unlikely(err))
264 		return err;
265 
266 	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
267 	return LWTUNNEL_XMIT_DONE;
268 
269 err:
270 	kfree_skb(skb);
271 	return err;
272 }
273 
274 static int bpf_xmit(struct sk_buff *skb)
275 {
276 	struct dst_entry *dst = skb_dst(skb);
277 	struct bpf_lwt *bpf;
278 
279 	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
280 	if (bpf->xmit.prog) {
281 		__be16 proto = skb->protocol;
282 		int ret;
283 
284 		ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
285 		switch (ret) {
286 		case BPF_OK:
287 			/* If the header changed, e.g. via bpf_lwt_push_encap,
288 			 * BPF_LWT_REROUTE below should have been used if the
289 			 * protocol was also changed.
290 			 */
291 			if (skb->protocol != proto) {
292 				kfree_skb(skb);
293 				return -EINVAL;
294 			}
295 			/* If the header was expanded, headroom might be too
296 			 * small for L2 header to come, expand as needed.
297 			 */
298 			ret = xmit_check_hhlen(skb);
299 			if (unlikely(ret))
300 				return ret;
301 
302 			return LWTUNNEL_XMIT_CONTINUE;
303 		case BPF_REDIRECT:
304 			return LWTUNNEL_XMIT_DONE;
305 		case BPF_LWT_REROUTE:
306 			return bpf_lwt_xmit_reroute(skb);
307 		default:
308 			return ret;
309 		}
310 	}
311 
312 	return LWTUNNEL_XMIT_CONTINUE;
313 }
314 
315 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
316 {
317 	if (prog->prog)
318 		bpf_prog_put(prog->prog);
319 
320 	kfree(prog->name);
321 }
322 
323 static void bpf_destroy_state(struct lwtunnel_state *lwt)
324 {
325 	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
326 
327 	bpf_lwt_prog_destroy(&bpf->in);
328 	bpf_lwt_prog_destroy(&bpf->out);
329 	bpf_lwt_prog_destroy(&bpf->xmit);
330 }
331 
332 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
333 	[LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
334 	[LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
335 				.len = MAX_PROG_NAME },
336 };
337 
338 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
339 			  enum bpf_prog_type type)
340 {
341 	struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
342 	struct bpf_prog *p;
343 	int ret;
344 	u32 fd;
345 
346 	ret = nla_parse_nested(tb, LWT_BPF_PROG_MAX, attr, bpf_prog_policy,
347 			       NULL);
348 	if (ret < 0)
349 		return ret;
350 
351 	if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
352 		return -EINVAL;
353 
354 	prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
355 	if (!prog->name)
356 		return -ENOMEM;
357 
358 	fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
359 	p = bpf_prog_get_type(fd, type);
360 	if (IS_ERR(p))
361 		return PTR_ERR(p);
362 
363 	prog->prog = p;
364 
365 	return 0;
366 }
367 
368 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
369 	[LWT_BPF_IN]		= { .type = NLA_NESTED, },
370 	[LWT_BPF_OUT]		= { .type = NLA_NESTED, },
371 	[LWT_BPF_XMIT]		= { .type = NLA_NESTED, },
372 	[LWT_BPF_XMIT_HEADROOM]	= { .type = NLA_U32 },
373 };
374 
375 static int bpf_build_state(struct nlattr *nla,
376 			   unsigned int family, const void *cfg,
377 			   struct lwtunnel_state **ts,
378 			   struct netlink_ext_ack *extack)
379 {
380 	struct nlattr *tb[LWT_BPF_MAX + 1];
381 	struct lwtunnel_state *newts;
382 	struct bpf_lwt *bpf;
383 	int ret;
384 
385 	if (family != AF_INET && family != AF_INET6)
386 		return -EAFNOSUPPORT;
387 
388 	ret = nla_parse_nested(tb, LWT_BPF_MAX, nla, bpf_nl_policy, extack);
389 	if (ret < 0)
390 		return ret;
391 
392 	if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
393 		return -EINVAL;
394 
395 	newts = lwtunnel_state_alloc(sizeof(*bpf));
396 	if (!newts)
397 		return -ENOMEM;
398 
399 	newts->type = LWTUNNEL_ENCAP_BPF;
400 	bpf = bpf_lwt_lwtunnel(newts);
401 
402 	if (tb[LWT_BPF_IN]) {
403 		newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
404 		ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
405 				     BPF_PROG_TYPE_LWT_IN);
406 		if (ret  < 0)
407 			goto errout;
408 	}
409 
410 	if (tb[LWT_BPF_OUT]) {
411 		newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
412 		ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
413 				     BPF_PROG_TYPE_LWT_OUT);
414 		if (ret < 0)
415 			goto errout;
416 	}
417 
418 	if (tb[LWT_BPF_XMIT]) {
419 		newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
420 		ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
421 				     BPF_PROG_TYPE_LWT_XMIT);
422 		if (ret < 0)
423 			goto errout;
424 	}
425 
426 	if (tb[LWT_BPF_XMIT_HEADROOM]) {
427 		u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
428 
429 		if (headroom > LWT_BPF_MAX_HEADROOM) {
430 			ret = -ERANGE;
431 			goto errout;
432 		}
433 
434 		newts->headroom = headroom;
435 	}
436 
437 	bpf->family = family;
438 	*ts = newts;
439 
440 	return 0;
441 
442 errout:
443 	bpf_destroy_state(newts);
444 	kfree(newts);
445 	return ret;
446 }
447 
448 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
449 			     struct bpf_lwt_prog *prog)
450 {
451 	struct nlattr *nest;
452 
453 	if (!prog->prog)
454 		return 0;
455 
456 	nest = nla_nest_start(skb, attr);
457 	if (!nest)
458 		return -EMSGSIZE;
459 
460 	if (prog->name &&
461 	    nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
462 		return -EMSGSIZE;
463 
464 	return nla_nest_end(skb, nest);
465 }
466 
467 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
468 {
469 	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
470 
471 	if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
472 	    bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
473 	    bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
474 		return -EMSGSIZE;
475 
476 	return 0;
477 }
478 
479 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
480 {
481 	int nest_len = nla_total_size(sizeof(struct nlattr)) +
482 		       nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
483 		       0;
484 
485 	return nest_len + /* LWT_BPF_IN */
486 	       nest_len + /* LWT_BPF_OUT */
487 	       nest_len + /* LWT_BPF_XMIT */
488 	       0;
489 }
490 
491 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
492 {
493 	/* FIXME:
494 	 * The LWT state is currently rebuilt for delete requests which
495 	 * results in a new bpf_prog instance. Comparing names for now.
496 	 */
497 	if (!a->name && !b->name)
498 		return 0;
499 
500 	if (!a->name || !b->name)
501 		return 1;
502 
503 	return strcmp(a->name, b->name);
504 }
505 
506 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
507 {
508 	struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
509 	struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
510 
511 	return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
512 	       bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
513 	       bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
514 }
515 
516 static const struct lwtunnel_encap_ops bpf_encap_ops = {
517 	.build_state	= bpf_build_state,
518 	.destroy_state	= bpf_destroy_state,
519 	.input		= bpf_input,
520 	.output		= bpf_output,
521 	.xmit		= bpf_xmit,
522 	.fill_encap	= bpf_fill_encap_info,
523 	.get_encap_size = bpf_encap_nlsize,
524 	.cmp_encap	= bpf_encap_cmp,
525 	.owner		= THIS_MODULE,
526 };
527 
528 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
529 			   int encap_len)
530 {
531 	struct skb_shared_info *shinfo = skb_shinfo(skb);
532 
533 	gso_type |= SKB_GSO_DODGY;
534 	shinfo->gso_type |= gso_type;
535 	skb_decrease_gso_size(shinfo, encap_len);
536 	shinfo->gso_segs = 0;
537 	return 0;
538 }
539 
540 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
541 {
542 	int next_hdr_offset;
543 	void *next_hdr;
544 	__u8 protocol;
545 
546 	/* SCTP and UDP_L4 gso need more nuanced handling than what
547 	 * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
548 	 * So at the moment only TCP GSO packets are let through.
549 	 */
550 	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
551 		return -ENOTSUPP;
552 
553 	if (ipv4) {
554 		protocol = ip_hdr(skb)->protocol;
555 		next_hdr_offset = sizeof(struct iphdr);
556 		next_hdr = skb_network_header(skb) + next_hdr_offset;
557 	} else {
558 		protocol = ipv6_hdr(skb)->nexthdr;
559 		next_hdr_offset = sizeof(struct ipv6hdr);
560 		next_hdr = skb_network_header(skb) + next_hdr_offset;
561 	}
562 
563 	switch (protocol) {
564 	case IPPROTO_GRE:
565 		next_hdr_offset += sizeof(struct gre_base_hdr);
566 		if (next_hdr_offset > encap_len)
567 			return -EINVAL;
568 
569 		if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
570 			return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
571 					       encap_len);
572 		return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
573 
574 	case IPPROTO_UDP:
575 		next_hdr_offset += sizeof(struct udphdr);
576 		if (next_hdr_offset > encap_len)
577 			return -EINVAL;
578 
579 		if (((struct udphdr *)next_hdr)->check)
580 			return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
581 					       encap_len);
582 		return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
583 
584 	case IPPROTO_IP:
585 	case IPPROTO_IPV6:
586 		if (ipv4)
587 			return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
588 		else
589 			return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
590 
591 	default:
592 		return -EPROTONOSUPPORT;
593 	}
594 }
595 
596 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
597 {
598 	struct iphdr *iph;
599 	bool ipv4;
600 	int err;
601 
602 	if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
603 		return -EINVAL;
604 
605 	/* validate protocol and length */
606 	iph = (struct iphdr *)hdr;
607 	if (iph->version == 4) {
608 		ipv4 = true;
609 		if (unlikely(len < iph->ihl * 4))
610 			return -EINVAL;
611 	} else if (iph->version == 6) {
612 		ipv4 = false;
613 		if (unlikely(len < sizeof(struct ipv6hdr)))
614 			return -EINVAL;
615 	} else {
616 		return -EINVAL;
617 	}
618 
619 	if (ingress)
620 		err = skb_cow_head(skb, len + skb->mac_len);
621 	else
622 		err = skb_cow_head(skb,
623 				   len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
624 	if (unlikely(err))
625 		return err;
626 
627 	/* push the encap headers and fix pointers */
628 	skb_reset_inner_headers(skb);
629 	skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
630 	skb_set_inner_protocol(skb, skb->protocol);
631 	skb->encapsulation = 1;
632 	skb_push(skb, len);
633 	if (ingress)
634 		skb_postpush_rcsum(skb, iph, len);
635 	skb_reset_network_header(skb);
636 	memcpy(skb_network_header(skb), hdr, len);
637 	bpf_compute_data_pointers(skb);
638 	skb_clear_hash(skb);
639 
640 	if (ipv4) {
641 		skb->protocol = htons(ETH_P_IP);
642 		iph = ip_hdr(skb);
643 
644 		if (!iph->check)
645 			iph->check = ip_fast_csum((unsigned char *)iph,
646 						  iph->ihl);
647 	} else {
648 		skb->protocol = htons(ETH_P_IPV6);
649 	}
650 
651 	if (skb_is_gso(skb))
652 		return handle_gso_encap(skb, ipv4, len);
653 
654 	return 0;
655 }
656 
657 static int __init bpf_lwt_init(void)
658 {
659 	return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
660 }
661 
662 subsys_initcall(bpf_lwt_init)
663