1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_pppox.h>
12 #include <linux/ppp_defs.h>
13 #include <net/ip.h>
14 #include <net/ipv6.h>
15 #include <net/ip6_route.h>
16 #include <net/neighbour.h>
17 #include <net/netfilter/nf_flow_table.h>
18 #include <net/netfilter/nf_conntrack_acct.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 
23 static int nf_flow_state_check(struct flow_offload *flow, int proto,
24 			       struct sk_buff *skb, unsigned int thoff)
25 {
26 	struct tcphdr *tcph;
27 
28 	if (proto != IPPROTO_TCP)
29 		return 0;
30 
31 	tcph = (void *)(skb_network_header(skb) + thoff);
32 	if (unlikely(tcph->fin || tcph->rst)) {
33 		flow_offload_teardown(flow);
34 		return -1;
35 	}
36 
37 	return 0;
38 }
39 
40 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
41 			       __be32 addr, __be32 new_addr)
42 {
43 	struct tcphdr *tcph;
44 
45 	tcph = (void *)(skb_network_header(skb) + thoff);
46 	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
47 }
48 
49 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
50 			       __be32 addr, __be32 new_addr)
51 {
52 	struct udphdr *udph;
53 
54 	udph = (void *)(skb_network_header(skb) + thoff);
55 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
56 		inet_proto_csum_replace4(&udph->check, skb, addr,
57 					 new_addr, true);
58 		if (!udph->check)
59 			udph->check = CSUM_MANGLED_0;
60 	}
61 }
62 
63 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
64 				   unsigned int thoff, __be32 addr,
65 				   __be32 new_addr)
66 {
67 	switch (iph->protocol) {
68 	case IPPROTO_TCP:
69 		nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
70 		break;
71 	case IPPROTO_UDP:
72 		nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
73 		break;
74 	}
75 }
76 
77 static void nf_flow_snat_ip(const struct flow_offload *flow,
78 			    struct sk_buff *skb, struct iphdr *iph,
79 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
80 {
81 	__be32 addr, new_addr;
82 
83 	switch (dir) {
84 	case FLOW_OFFLOAD_DIR_ORIGINAL:
85 		addr = iph->saddr;
86 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
87 		iph->saddr = new_addr;
88 		break;
89 	case FLOW_OFFLOAD_DIR_REPLY:
90 		addr = iph->daddr;
91 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
92 		iph->daddr = new_addr;
93 		break;
94 	}
95 	csum_replace4(&iph->check, addr, new_addr);
96 
97 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
98 }
99 
100 static void nf_flow_dnat_ip(const struct flow_offload *flow,
101 			    struct sk_buff *skb, struct iphdr *iph,
102 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
103 {
104 	__be32 addr, new_addr;
105 
106 	switch (dir) {
107 	case FLOW_OFFLOAD_DIR_ORIGINAL:
108 		addr = iph->daddr;
109 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
110 		iph->daddr = new_addr;
111 		break;
112 	case FLOW_OFFLOAD_DIR_REPLY:
113 		addr = iph->saddr;
114 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
115 		iph->saddr = new_addr;
116 		break;
117 	}
118 	csum_replace4(&iph->check, addr, new_addr);
119 
120 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
121 }
122 
123 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
124 			  unsigned int thoff, enum flow_offload_tuple_dir dir,
125 			  struct iphdr *iph)
126 {
127 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
128 		nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
129 		nf_flow_snat_ip(flow, skb, iph, thoff, dir);
130 	}
131 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
132 		nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
133 		nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
134 	}
135 }
136 
137 static bool ip_has_options(unsigned int thoff)
138 {
139 	return thoff != sizeof(struct iphdr);
140 }
141 
142 static void nf_flow_tuple_encap(struct sk_buff *skb,
143 				struct flow_offload_tuple *tuple)
144 {
145 	struct vlan_ethhdr *veth;
146 	struct pppoe_hdr *phdr;
147 	int i = 0;
148 
149 	if (skb_vlan_tag_present(skb)) {
150 		tuple->encap[i].id = skb_vlan_tag_get(skb);
151 		tuple->encap[i].proto = skb->vlan_proto;
152 		i++;
153 	}
154 	switch (skb->protocol) {
155 	case htons(ETH_P_8021Q):
156 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
157 		tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
158 		tuple->encap[i].proto = skb->protocol;
159 		break;
160 	case htons(ETH_P_PPP_SES):
161 		phdr = (struct pppoe_hdr *)skb_mac_header(skb);
162 		tuple->encap[i].id = ntohs(phdr->sid);
163 		tuple->encap[i].proto = skb->protocol;
164 		break;
165 	}
166 }
167 
168 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
169 			    struct flow_offload_tuple *tuple, u32 *hdrsize,
170 			    u32 offset)
171 {
172 	struct flow_ports *ports;
173 	unsigned int thoff;
174 	struct iphdr *iph;
175 
176 	if (!pskb_may_pull(skb, sizeof(*iph) + offset))
177 		return -1;
178 
179 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
180 	thoff = (iph->ihl * 4);
181 
182 	if (ip_is_fragment(iph) ||
183 	    unlikely(ip_has_options(thoff)))
184 		return -1;
185 
186 	thoff += offset;
187 
188 	switch (iph->protocol) {
189 	case IPPROTO_TCP:
190 		*hdrsize = sizeof(struct tcphdr);
191 		break;
192 	case IPPROTO_UDP:
193 		*hdrsize = sizeof(struct udphdr);
194 		break;
195 	default:
196 		return -1;
197 	}
198 
199 	if (iph->ttl <= 1)
200 		return -1;
201 
202 	if (!pskb_may_pull(skb, thoff + *hdrsize))
203 		return -1;
204 
205 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
206 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
207 
208 	tuple->src_v4.s_addr	= iph->saddr;
209 	tuple->dst_v4.s_addr	= iph->daddr;
210 	tuple->src_port		= ports->source;
211 	tuple->dst_port		= ports->dest;
212 	tuple->l3proto		= AF_INET;
213 	tuple->l4proto		= iph->protocol;
214 	tuple->iifidx		= dev->ifindex;
215 	nf_flow_tuple_encap(skb, tuple);
216 
217 	return 0;
218 }
219 
220 /* Based on ip_exceeds_mtu(). */
221 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
222 {
223 	if (skb->len <= mtu)
224 		return false;
225 
226 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
227 		return false;
228 
229 	return true;
230 }
231 
232 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
233 				      const struct nf_hook_state *state,
234 				      struct dst_entry *dst)
235 {
236 	skb_orphan(skb);
237 	skb_dst_set_noref(skb, dst);
238 	dst_output(state->net, state->sk, skb);
239 	return NF_STOLEN;
240 }
241 
242 static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
243 {
244 	__be16 proto;
245 
246 	proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
247 			     sizeof(struct pppoe_hdr)));
248 	switch (proto) {
249 	case htons(PPP_IP):
250 		return htons(ETH_P_IP);
251 	case htons(PPP_IPV6):
252 		return htons(ETH_P_IPV6);
253 	}
254 
255 	return 0;
256 }
257 
258 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
259 				       u32 *offset)
260 {
261 	struct vlan_ethhdr *veth;
262 
263 	switch (skb->protocol) {
264 	case htons(ETH_P_8021Q):
265 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
266 		if (veth->h_vlan_encapsulated_proto == proto) {
267 			*offset += VLAN_HLEN;
268 			return true;
269 		}
270 		break;
271 	case htons(ETH_P_PPP_SES):
272 		if (nf_flow_pppoe_proto(skb) == proto) {
273 			*offset += PPPOE_SES_HLEN;
274 			return true;
275 		}
276 		break;
277 	}
278 
279 	return false;
280 }
281 
282 static void nf_flow_encap_pop(struct sk_buff *skb,
283 			      struct flow_offload_tuple_rhash *tuplehash)
284 {
285 	struct vlan_hdr *vlan_hdr;
286 	int i;
287 
288 	for (i = 0; i < tuplehash->tuple.encap_num; i++) {
289 		if (skb_vlan_tag_present(skb)) {
290 			__vlan_hwaccel_clear_tag(skb);
291 			continue;
292 		}
293 		switch (skb->protocol) {
294 		case htons(ETH_P_8021Q):
295 			vlan_hdr = (struct vlan_hdr *)skb->data;
296 			__skb_pull(skb, VLAN_HLEN);
297 			vlan_set_encap_proto(skb, vlan_hdr);
298 			skb_reset_network_header(skb);
299 			break;
300 		case htons(ETH_P_PPP_SES):
301 			skb->protocol = nf_flow_pppoe_proto(skb);
302 			skb_pull(skb, PPPOE_SES_HLEN);
303 			skb_reset_network_header(skb);
304 			break;
305 		}
306 	}
307 }
308 
309 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
310 				       const struct flow_offload_tuple_rhash *tuplehash,
311 				       unsigned short type)
312 {
313 	struct net_device *outdev;
314 
315 	outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
316 	if (!outdev)
317 		return NF_DROP;
318 
319 	skb->dev = outdev;
320 	dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
321 			tuplehash->tuple.out.h_source, skb->len);
322 	dev_queue_xmit(skb);
323 
324 	return NF_STOLEN;
325 }
326 
327 unsigned int
328 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
329 			const struct nf_hook_state *state)
330 {
331 	struct flow_offload_tuple_rhash *tuplehash;
332 	struct nf_flowtable *flow_table = priv;
333 	struct flow_offload_tuple tuple = {};
334 	enum flow_offload_tuple_dir dir;
335 	struct flow_offload *flow;
336 	struct net_device *outdev;
337 	u32 hdrsize, offset = 0;
338 	unsigned int thoff, mtu;
339 	struct rtable *rt;
340 	struct iphdr *iph;
341 	__be32 nexthop;
342 	int ret;
343 
344 	if (skb->protocol != htons(ETH_P_IP) &&
345 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
346 		return NF_ACCEPT;
347 
348 	if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
349 		return NF_ACCEPT;
350 
351 	tuplehash = flow_offload_lookup(flow_table, &tuple);
352 	if (tuplehash == NULL)
353 		return NF_ACCEPT;
354 
355 	dir = tuplehash->tuple.dir;
356 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
357 
358 	mtu = flow->tuplehash[dir].tuple.mtu + offset;
359 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
360 		return NF_ACCEPT;
361 
362 	iph = (struct iphdr *)(skb_network_header(skb) + offset);
363 	thoff = (iph->ihl * 4) + offset;
364 	if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
365 		return NF_ACCEPT;
366 
367 	if (skb_try_make_writable(skb, thoff + hdrsize))
368 		return NF_DROP;
369 
370 	flow_offload_refresh(flow_table, flow);
371 
372 	nf_flow_encap_pop(skb, tuplehash);
373 	thoff -= offset;
374 
375 	iph = ip_hdr(skb);
376 	nf_flow_nat_ip(flow, skb, thoff, dir, iph);
377 
378 	ip_decrease_ttl(iph);
379 	skb->tstamp = 0;
380 
381 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
382 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
383 
384 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
385 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
386 		memset(skb->cb, 0, sizeof(struct inet_skb_parm));
387 		IPCB(skb)->iif = skb->dev->ifindex;
388 		IPCB(skb)->flags = IPSKB_FORWARDED;
389 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
390 	}
391 
392 	switch (tuplehash->tuple.xmit_type) {
393 	case FLOW_OFFLOAD_XMIT_NEIGH:
394 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
395 		outdev = rt->dst.dev;
396 		skb->dev = outdev;
397 		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
398 		skb_dst_set_noref(skb, &rt->dst);
399 		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
400 		ret = NF_STOLEN;
401 		break;
402 	case FLOW_OFFLOAD_XMIT_DIRECT:
403 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
404 		if (ret == NF_DROP)
405 			flow_offload_teardown(flow);
406 		break;
407 	}
408 
409 	return ret;
410 }
411 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
412 
413 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
414 				 struct in6_addr *addr,
415 				 struct in6_addr *new_addr,
416 				 struct ipv6hdr *ip6h)
417 {
418 	struct tcphdr *tcph;
419 
420 	tcph = (void *)(skb_network_header(skb) + thoff);
421 	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
422 				  new_addr->s6_addr32, true);
423 }
424 
425 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
426 				 struct in6_addr *addr,
427 				 struct in6_addr *new_addr)
428 {
429 	struct udphdr *udph;
430 
431 	udph = (void *)(skb_network_header(skb) + thoff);
432 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
433 		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
434 					  new_addr->s6_addr32, true);
435 		if (!udph->check)
436 			udph->check = CSUM_MANGLED_0;
437 	}
438 }
439 
440 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
441 				     unsigned int thoff, struct in6_addr *addr,
442 				     struct in6_addr *new_addr)
443 {
444 	switch (ip6h->nexthdr) {
445 	case IPPROTO_TCP:
446 		nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
447 		break;
448 	case IPPROTO_UDP:
449 		nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
450 		break;
451 	}
452 }
453 
454 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
455 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
456 			      unsigned int thoff,
457 			      enum flow_offload_tuple_dir dir)
458 {
459 	struct in6_addr addr, new_addr;
460 
461 	switch (dir) {
462 	case FLOW_OFFLOAD_DIR_ORIGINAL:
463 		addr = ip6h->saddr;
464 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
465 		ip6h->saddr = new_addr;
466 		break;
467 	case FLOW_OFFLOAD_DIR_REPLY:
468 		addr = ip6h->daddr;
469 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
470 		ip6h->daddr = new_addr;
471 		break;
472 	}
473 
474 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
475 }
476 
477 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
478 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
479 			      unsigned int thoff,
480 			      enum flow_offload_tuple_dir dir)
481 {
482 	struct in6_addr addr, new_addr;
483 
484 	switch (dir) {
485 	case FLOW_OFFLOAD_DIR_ORIGINAL:
486 		addr = ip6h->daddr;
487 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
488 		ip6h->daddr = new_addr;
489 		break;
490 	case FLOW_OFFLOAD_DIR_REPLY:
491 		addr = ip6h->saddr;
492 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
493 		ip6h->saddr = new_addr;
494 		break;
495 	}
496 
497 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
498 }
499 
500 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
501 			     struct sk_buff *skb,
502 			     enum flow_offload_tuple_dir dir,
503 			     struct ipv6hdr *ip6h)
504 {
505 	unsigned int thoff = sizeof(*ip6h);
506 
507 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
508 		nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
509 		nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
510 	}
511 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
512 		nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
513 		nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
514 	}
515 }
516 
517 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
518 			      struct flow_offload_tuple *tuple, u32 *hdrsize,
519 			      u32 offset)
520 {
521 	struct flow_ports *ports;
522 	struct ipv6hdr *ip6h;
523 	unsigned int thoff;
524 
525 	thoff = sizeof(*ip6h) + offset;
526 	if (!pskb_may_pull(skb, thoff))
527 		return -1;
528 
529 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
530 
531 	switch (ip6h->nexthdr) {
532 	case IPPROTO_TCP:
533 		*hdrsize = sizeof(struct tcphdr);
534 		break;
535 	case IPPROTO_UDP:
536 		*hdrsize = sizeof(struct udphdr);
537 		break;
538 	default:
539 		return -1;
540 	}
541 
542 	if (ip6h->hop_limit <= 1)
543 		return -1;
544 
545 	if (!pskb_may_pull(skb, thoff + *hdrsize))
546 		return -1;
547 
548 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
549 	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
550 
551 	tuple->src_v6		= ip6h->saddr;
552 	tuple->dst_v6		= ip6h->daddr;
553 	tuple->src_port		= ports->source;
554 	tuple->dst_port		= ports->dest;
555 	tuple->l3proto		= AF_INET6;
556 	tuple->l4proto		= ip6h->nexthdr;
557 	tuple->iifidx		= dev->ifindex;
558 	nf_flow_tuple_encap(skb, tuple);
559 
560 	return 0;
561 }
562 
563 unsigned int
564 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
565 			  const struct nf_hook_state *state)
566 {
567 	struct flow_offload_tuple_rhash *tuplehash;
568 	struct nf_flowtable *flow_table = priv;
569 	struct flow_offload_tuple tuple = {};
570 	enum flow_offload_tuple_dir dir;
571 	const struct in6_addr *nexthop;
572 	struct flow_offload *flow;
573 	struct net_device *outdev;
574 	unsigned int thoff, mtu;
575 	u32 hdrsize, offset = 0;
576 	struct ipv6hdr *ip6h;
577 	struct rt6_info *rt;
578 	int ret;
579 
580 	if (skb->protocol != htons(ETH_P_IPV6) &&
581 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset))
582 		return NF_ACCEPT;
583 
584 	if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0)
585 		return NF_ACCEPT;
586 
587 	tuplehash = flow_offload_lookup(flow_table, &tuple);
588 	if (tuplehash == NULL)
589 		return NF_ACCEPT;
590 
591 	dir = tuplehash->tuple.dir;
592 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
593 
594 	mtu = flow->tuplehash[dir].tuple.mtu + offset;
595 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
596 		return NF_ACCEPT;
597 
598 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
599 	thoff = sizeof(*ip6h) + offset;
600 	if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
601 		return NF_ACCEPT;
602 
603 	if (skb_try_make_writable(skb, thoff + hdrsize))
604 		return NF_DROP;
605 
606 	flow_offload_refresh(flow_table, flow);
607 
608 	nf_flow_encap_pop(skb, tuplehash);
609 
610 	ip6h = ipv6_hdr(skb);
611 	nf_flow_nat_ipv6(flow, skb, dir, ip6h);
612 
613 	ip6h->hop_limit--;
614 	skb->tstamp = 0;
615 
616 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
617 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
618 
619 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
620 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
621 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
622 		IP6CB(skb)->iif = skb->dev->ifindex;
623 		IP6CB(skb)->flags = IP6SKB_FORWARDED;
624 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
625 	}
626 
627 	switch (tuplehash->tuple.xmit_type) {
628 	case FLOW_OFFLOAD_XMIT_NEIGH:
629 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
630 		outdev = rt->dst.dev;
631 		skb->dev = outdev;
632 		nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
633 		skb_dst_set_noref(skb, &rt->dst);
634 		neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
635 		ret = NF_STOLEN;
636 		break;
637 	case FLOW_OFFLOAD_XMIT_DIRECT:
638 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
639 		if (ret == NF_DROP)
640 			flow_offload_teardown(flow);
641 		break;
642 	}
643 
644 	return ret;
645 }
646 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
647