1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <net/gso.h>
12 #include <net/ip.h>
13 #include <net/ipv6.h>
14 #include <net/ip6_route.h>
15 #include <net/neighbour.h>
16 #include <net/netfilter/nf_flow_table.h>
17 #include <net/netfilter/nf_conntrack_acct.h>
18 /* For layer 4 checksum field offset. */
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 
nf_flow_state_check(struct flow_offload * flow,int proto,struct sk_buff * skb,unsigned int thoff)22 static int nf_flow_state_check(struct flow_offload *flow, int proto,
23 			       struct sk_buff *skb, unsigned int thoff)
24 {
25 	struct tcphdr *tcph;
26 
27 	if (proto != IPPROTO_TCP)
28 		return 0;
29 
30 	tcph = (void *)(skb_network_header(skb) + thoff);
31 	if (unlikely(tcph->fin || tcph->rst)) {
32 		flow_offload_teardown(flow);
33 		return -1;
34 	}
35 
36 	return 0;
37 }
38 
nf_flow_nat_ip_tcp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)39 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 			       __be32 addr, __be32 new_addr)
41 {
42 	struct tcphdr *tcph;
43 
44 	tcph = (void *)(skb_network_header(skb) + thoff);
45 	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
46 }
47 
nf_flow_nat_ip_udp(struct sk_buff * skb,unsigned int thoff,__be32 addr,__be32 new_addr)48 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
49 			       __be32 addr, __be32 new_addr)
50 {
51 	struct udphdr *udph;
52 
53 	udph = (void *)(skb_network_header(skb) + thoff);
54 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
55 		inet_proto_csum_replace4(&udph->check, skb, addr,
56 					 new_addr, true);
57 		if (!udph->check)
58 			udph->check = CSUM_MANGLED_0;
59 	}
60 }
61 
nf_flow_nat_ip_l4proto(struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,__be32 addr,__be32 new_addr)62 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
63 				   unsigned int thoff, __be32 addr,
64 				   __be32 new_addr)
65 {
66 	switch (iph->protocol) {
67 	case IPPROTO_TCP:
68 		nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
69 		break;
70 	case IPPROTO_UDP:
71 		nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
72 		break;
73 	}
74 }
75 
nf_flow_snat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)76 static void nf_flow_snat_ip(const struct flow_offload *flow,
77 			    struct sk_buff *skb, struct iphdr *iph,
78 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
79 {
80 	__be32 addr, new_addr;
81 
82 	switch (dir) {
83 	case FLOW_OFFLOAD_DIR_ORIGINAL:
84 		addr = iph->saddr;
85 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
86 		iph->saddr = new_addr;
87 		break;
88 	case FLOW_OFFLOAD_DIR_REPLY:
89 		addr = iph->daddr;
90 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
91 		iph->daddr = new_addr;
92 		break;
93 	}
94 	csum_replace4(&iph->check, addr, new_addr);
95 
96 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
97 }
98 
nf_flow_dnat_ip(const struct flow_offload * flow,struct sk_buff * skb,struct iphdr * iph,unsigned int thoff,enum flow_offload_tuple_dir dir)99 static void nf_flow_dnat_ip(const struct flow_offload *flow,
100 			    struct sk_buff *skb, struct iphdr *iph,
101 			    unsigned int thoff, enum flow_offload_tuple_dir dir)
102 {
103 	__be32 addr, new_addr;
104 
105 	switch (dir) {
106 	case FLOW_OFFLOAD_DIR_ORIGINAL:
107 		addr = iph->daddr;
108 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
109 		iph->daddr = new_addr;
110 		break;
111 	case FLOW_OFFLOAD_DIR_REPLY:
112 		addr = iph->saddr;
113 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
114 		iph->saddr = new_addr;
115 		break;
116 	}
117 	csum_replace4(&iph->check, addr, new_addr);
118 
119 	nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
120 }
121 
nf_flow_nat_ip(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,enum flow_offload_tuple_dir dir,struct iphdr * iph)122 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
123 			  unsigned int thoff, enum flow_offload_tuple_dir dir,
124 			  struct iphdr *iph)
125 {
126 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
127 		nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
128 		nf_flow_snat_ip(flow, skb, iph, thoff, dir);
129 	}
130 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
131 		nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
132 		nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
133 	}
134 }
135 
ip_has_options(unsigned int thoff)136 static bool ip_has_options(unsigned int thoff)
137 {
138 	return thoff != sizeof(struct iphdr);
139 }
140 
nf_flow_tuple_encap(struct sk_buff * skb,struct flow_offload_tuple * tuple)141 static void nf_flow_tuple_encap(struct sk_buff *skb,
142 				struct flow_offload_tuple *tuple)
143 {
144 	struct vlan_ethhdr *veth;
145 	struct pppoe_hdr *phdr;
146 	int i = 0;
147 
148 	if (skb_vlan_tag_present(skb)) {
149 		tuple->encap[i].id = skb_vlan_tag_get(skb);
150 		tuple->encap[i].proto = skb->vlan_proto;
151 		i++;
152 	}
153 	switch (skb->protocol) {
154 	case htons(ETH_P_8021Q):
155 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
156 		tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
157 		tuple->encap[i].proto = skb->protocol;
158 		break;
159 	case htons(ETH_P_PPP_SES):
160 		phdr = (struct pppoe_hdr *)skb_network_header(skb);
161 		tuple->encap[i].id = ntohs(phdr->sid);
162 		tuple->encap[i].proto = skb->protocol;
163 		break;
164 	}
165 }
166 
167 struct nf_flowtable_ctx {
168 	const struct net_device	*in;
169 	u32			offset;
170 	u32			hdrsize;
171 };
172 
nf_flow_tuple_ip(struct nf_flowtable_ctx * ctx,struct sk_buff * skb,struct flow_offload_tuple * tuple)173 static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
174 			    struct flow_offload_tuple *tuple)
175 {
176 	struct flow_ports *ports;
177 	unsigned int thoff;
178 	struct iphdr *iph;
179 	u8 ipproto;
180 
181 	if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset))
182 		return -1;
183 
184 	iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
185 	thoff = (iph->ihl * 4);
186 
187 	if (ip_is_fragment(iph) ||
188 	    unlikely(ip_has_options(thoff)))
189 		return -1;
190 
191 	thoff += ctx->offset;
192 
193 	ipproto = iph->protocol;
194 	switch (ipproto) {
195 	case IPPROTO_TCP:
196 		ctx->hdrsize = sizeof(struct tcphdr);
197 		break;
198 	case IPPROTO_UDP:
199 		ctx->hdrsize = sizeof(struct udphdr);
200 		break;
201 #ifdef CONFIG_NF_CT_PROTO_GRE
202 	case IPPROTO_GRE:
203 		ctx->hdrsize = sizeof(struct gre_base_hdr);
204 		break;
205 #endif
206 	default:
207 		return -1;
208 	}
209 
210 	if (iph->ttl <= 1)
211 		return -1;
212 
213 	if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
214 		return -1;
215 
216 	switch (ipproto) {
217 	case IPPROTO_TCP:
218 	case IPPROTO_UDP:
219 		ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
220 		tuple->src_port		= ports->source;
221 		tuple->dst_port		= ports->dest;
222 		break;
223 	case IPPROTO_GRE: {
224 		struct gre_base_hdr *greh;
225 
226 		greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
227 		if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
228 			return -1;
229 		break;
230 	}
231 	}
232 
233 	iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
234 
235 	tuple->src_v4.s_addr	= iph->saddr;
236 	tuple->dst_v4.s_addr	= iph->daddr;
237 	tuple->l3proto		= AF_INET;
238 	tuple->l4proto		= ipproto;
239 	tuple->iifidx		= ctx->in->ifindex;
240 	nf_flow_tuple_encap(skb, tuple);
241 
242 	return 0;
243 }
244 
245 /* Based on ip_exceeds_mtu(). */
nf_flow_exceeds_mtu(const struct sk_buff * skb,unsigned int mtu)246 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
247 {
248 	if (skb->len <= mtu)
249 		return false;
250 
251 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
252 		return false;
253 
254 	return true;
255 }
256 
nf_flow_dst_check(struct flow_offload_tuple * tuple)257 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
258 {
259 	if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
260 	    tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
261 		return true;
262 
263 	return dst_check(tuple->dst_cache, tuple->dst_cookie);
264 }
265 
nf_flow_xmit_xfrm(struct sk_buff * skb,const struct nf_hook_state * state,struct dst_entry * dst)266 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
267 				      const struct nf_hook_state *state,
268 				      struct dst_entry *dst)
269 {
270 	skb_orphan(skb);
271 	skb_dst_set_noref(skb, dst);
272 	dst_output(state->net, state->sk, skb);
273 	return NF_STOLEN;
274 }
275 
nf_flow_skb_encap_protocol(struct sk_buff * skb,__be16 proto,u32 * offset)276 static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
277 				       u32 *offset)
278 {
279 	struct vlan_ethhdr *veth;
280 	__be16 inner_proto;
281 
282 	switch (skb->protocol) {
283 	case htons(ETH_P_8021Q):
284 		veth = (struct vlan_ethhdr *)skb_mac_header(skb);
285 		if (veth->h_vlan_encapsulated_proto == proto) {
286 			*offset += VLAN_HLEN;
287 			return true;
288 		}
289 		break;
290 	case htons(ETH_P_PPP_SES):
291 		if (nf_flow_pppoe_proto(skb, &inner_proto) &&
292 		    inner_proto == proto) {
293 			*offset += PPPOE_SES_HLEN;
294 			return true;
295 		}
296 		break;
297 	}
298 
299 	return false;
300 }
301 
nf_flow_encap_pop(struct sk_buff * skb,struct flow_offload_tuple_rhash * tuplehash)302 static void nf_flow_encap_pop(struct sk_buff *skb,
303 			      struct flow_offload_tuple_rhash *tuplehash)
304 {
305 	struct vlan_hdr *vlan_hdr;
306 	int i;
307 
308 	for (i = 0; i < tuplehash->tuple.encap_num; i++) {
309 		if (skb_vlan_tag_present(skb)) {
310 			__vlan_hwaccel_clear_tag(skb);
311 			continue;
312 		}
313 		switch (skb->protocol) {
314 		case htons(ETH_P_8021Q):
315 			vlan_hdr = (struct vlan_hdr *)skb->data;
316 			__skb_pull(skb, VLAN_HLEN);
317 			vlan_set_encap_proto(skb, vlan_hdr);
318 			skb_reset_network_header(skb);
319 			break;
320 		case htons(ETH_P_PPP_SES):
321 			skb->protocol = __nf_flow_pppoe_proto(skb);
322 			skb_pull(skb, PPPOE_SES_HLEN);
323 			skb_reset_network_header(skb);
324 			break;
325 		}
326 	}
327 }
328 
nf_flow_queue_xmit(struct net * net,struct sk_buff * skb,const struct flow_offload_tuple_rhash * tuplehash,unsigned short type)329 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
330 				       const struct flow_offload_tuple_rhash *tuplehash,
331 				       unsigned short type)
332 {
333 	struct net_device *outdev;
334 
335 	outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
336 	if (!outdev)
337 		return NF_DROP;
338 
339 	skb->dev = outdev;
340 	dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
341 			tuplehash->tuple.out.h_source, skb->len);
342 	dev_queue_xmit(skb);
343 
344 	return NF_STOLEN;
345 }
346 
347 static struct flow_offload_tuple_rhash *
nf_flow_offload_lookup(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct sk_buff * skb)348 nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx,
349 		       struct nf_flowtable *flow_table, struct sk_buff *skb)
350 {
351 	struct flow_offload_tuple tuple = {};
352 
353 	if (skb->protocol != htons(ETH_P_IP) &&
354 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset))
355 		return NULL;
356 
357 	if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0)
358 		return NULL;
359 
360 	return flow_offload_lookup(flow_table, &tuple);
361 }
362 
nf_flow_offload_forward(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct flow_offload_tuple_rhash * tuplehash,struct sk_buff * skb)363 static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx,
364 				   struct nf_flowtable *flow_table,
365 				   struct flow_offload_tuple_rhash *tuplehash,
366 				   struct sk_buff *skb)
367 {
368 	enum flow_offload_tuple_dir dir;
369 	struct flow_offload *flow;
370 	unsigned int thoff, mtu;
371 	struct iphdr *iph;
372 
373 	dir = tuplehash->tuple.dir;
374 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
375 
376 	mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
377 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
378 		return 0;
379 
380 	iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset);
381 	thoff = (iph->ihl * 4) + ctx->offset;
382 	if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
383 		return 0;
384 
385 	if (!nf_flow_dst_check(&tuplehash->tuple)) {
386 		flow_offload_teardown(flow);
387 		return 0;
388 	}
389 
390 	if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
391 		return -1;
392 
393 	flow_offload_refresh(flow_table, flow, false);
394 
395 	nf_flow_encap_pop(skb, tuplehash);
396 	thoff -= ctx->offset;
397 
398 	iph = ip_hdr(skb);
399 	nf_flow_nat_ip(flow, skb, thoff, dir, iph);
400 
401 	ip_decrease_ttl(iph);
402 	skb_clear_tstamp(skb);
403 
404 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
405 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
406 
407 	return 1;
408 }
409 
410 unsigned int
nf_flow_offload_ip_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)411 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
412 			const struct nf_hook_state *state)
413 {
414 	struct flow_offload_tuple_rhash *tuplehash;
415 	struct nf_flowtable *flow_table = priv;
416 	enum flow_offload_tuple_dir dir;
417 	struct nf_flowtable_ctx ctx = {
418 		.in	= state->in,
419 	};
420 	struct flow_offload *flow;
421 	struct net_device *outdev;
422 	struct rtable *rt;
423 	__be32 nexthop;
424 	int ret;
425 
426 	tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb);
427 	if (!tuplehash)
428 		return NF_ACCEPT;
429 
430 	ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb);
431 	if (ret < 0)
432 		return NF_DROP;
433 	else if (ret == 0)
434 		return NF_ACCEPT;
435 
436 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
437 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
438 		memset(skb->cb, 0, sizeof(struct inet_skb_parm));
439 		IPCB(skb)->iif = skb->dev->ifindex;
440 		IPCB(skb)->flags = IPSKB_FORWARDED;
441 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
442 	}
443 
444 	dir = tuplehash->tuple.dir;
445 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
446 
447 	switch (tuplehash->tuple.xmit_type) {
448 	case FLOW_OFFLOAD_XMIT_NEIGH:
449 		rt = (struct rtable *)tuplehash->tuple.dst_cache;
450 		outdev = rt->dst.dev;
451 		skb->dev = outdev;
452 		nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
453 		skb_dst_set_noref(skb, &rt->dst);
454 		neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
455 		ret = NF_STOLEN;
456 		break;
457 	case FLOW_OFFLOAD_XMIT_DIRECT:
458 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
459 		if (ret == NF_DROP)
460 			flow_offload_teardown(flow);
461 		break;
462 	default:
463 		WARN_ON_ONCE(1);
464 		ret = NF_DROP;
465 		break;
466 	}
467 
468 	return ret;
469 }
470 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
471 
nf_flow_nat_ipv6_tcp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr,struct ipv6hdr * ip6h)472 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
473 				 struct in6_addr *addr,
474 				 struct in6_addr *new_addr,
475 				 struct ipv6hdr *ip6h)
476 {
477 	struct tcphdr *tcph;
478 
479 	tcph = (void *)(skb_network_header(skb) + thoff);
480 	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
481 				  new_addr->s6_addr32, true);
482 }
483 
nf_flow_nat_ipv6_udp(struct sk_buff * skb,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)484 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
485 				 struct in6_addr *addr,
486 				 struct in6_addr *new_addr)
487 {
488 	struct udphdr *udph;
489 
490 	udph = (void *)(skb_network_header(skb) + thoff);
491 	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
492 		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
493 					  new_addr->s6_addr32, true);
494 		if (!udph->check)
495 			udph->check = CSUM_MANGLED_0;
496 	}
497 }
498 
nf_flow_nat_ipv6_l4proto(struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,struct in6_addr * addr,struct in6_addr * new_addr)499 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
500 				     unsigned int thoff, struct in6_addr *addr,
501 				     struct in6_addr *new_addr)
502 {
503 	switch (ip6h->nexthdr) {
504 	case IPPROTO_TCP:
505 		nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
506 		break;
507 	case IPPROTO_UDP:
508 		nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
509 		break;
510 	}
511 }
512 
nf_flow_snat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)513 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
514 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
515 			      unsigned int thoff,
516 			      enum flow_offload_tuple_dir dir)
517 {
518 	struct in6_addr addr, new_addr;
519 
520 	switch (dir) {
521 	case FLOW_OFFLOAD_DIR_ORIGINAL:
522 		addr = ip6h->saddr;
523 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
524 		ip6h->saddr = new_addr;
525 		break;
526 	case FLOW_OFFLOAD_DIR_REPLY:
527 		addr = ip6h->daddr;
528 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
529 		ip6h->daddr = new_addr;
530 		break;
531 	}
532 
533 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
534 }
535 
nf_flow_dnat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,struct ipv6hdr * ip6h,unsigned int thoff,enum flow_offload_tuple_dir dir)536 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
537 			      struct sk_buff *skb, struct ipv6hdr *ip6h,
538 			      unsigned int thoff,
539 			      enum flow_offload_tuple_dir dir)
540 {
541 	struct in6_addr addr, new_addr;
542 
543 	switch (dir) {
544 	case FLOW_OFFLOAD_DIR_ORIGINAL:
545 		addr = ip6h->daddr;
546 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
547 		ip6h->daddr = new_addr;
548 		break;
549 	case FLOW_OFFLOAD_DIR_REPLY:
550 		addr = ip6h->saddr;
551 		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
552 		ip6h->saddr = new_addr;
553 		break;
554 	}
555 
556 	nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
557 }
558 
nf_flow_nat_ipv6(const struct flow_offload * flow,struct sk_buff * skb,enum flow_offload_tuple_dir dir,struct ipv6hdr * ip6h)559 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
560 			     struct sk_buff *skb,
561 			     enum flow_offload_tuple_dir dir,
562 			     struct ipv6hdr *ip6h)
563 {
564 	unsigned int thoff = sizeof(*ip6h);
565 
566 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
567 		nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
568 		nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
569 	}
570 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
571 		nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
572 		nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
573 	}
574 }
575 
nf_flow_tuple_ipv6(struct nf_flowtable_ctx * ctx,struct sk_buff * skb,struct flow_offload_tuple * tuple)576 static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb,
577 			      struct flow_offload_tuple *tuple)
578 {
579 	struct flow_ports *ports;
580 	struct ipv6hdr *ip6h;
581 	unsigned int thoff;
582 	u8 nexthdr;
583 
584 	thoff = sizeof(*ip6h) + ctx->offset;
585 	if (!pskb_may_pull(skb, thoff))
586 		return -1;
587 
588 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
589 
590 	nexthdr = ip6h->nexthdr;
591 	switch (nexthdr) {
592 	case IPPROTO_TCP:
593 		ctx->hdrsize = sizeof(struct tcphdr);
594 		break;
595 	case IPPROTO_UDP:
596 		ctx->hdrsize = sizeof(struct udphdr);
597 		break;
598 #ifdef CONFIG_NF_CT_PROTO_GRE
599 	case IPPROTO_GRE:
600 		ctx->hdrsize = sizeof(struct gre_base_hdr);
601 		break;
602 #endif
603 	default:
604 		return -1;
605 	}
606 
607 	if (ip6h->hop_limit <= 1)
608 		return -1;
609 
610 	if (!pskb_may_pull(skb, thoff + ctx->hdrsize))
611 		return -1;
612 
613 	switch (nexthdr) {
614 	case IPPROTO_TCP:
615 	case IPPROTO_UDP:
616 		ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
617 		tuple->src_port		= ports->source;
618 		tuple->dst_port		= ports->dest;
619 		break;
620 	case IPPROTO_GRE: {
621 		struct gre_base_hdr *greh;
622 
623 		greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
624 		if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
625 			return -1;
626 		break;
627 	}
628 	}
629 
630 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
631 
632 	tuple->src_v6		= ip6h->saddr;
633 	tuple->dst_v6		= ip6h->daddr;
634 	tuple->l3proto		= AF_INET6;
635 	tuple->l4proto		= nexthdr;
636 	tuple->iifidx		= ctx->in->ifindex;
637 	nf_flow_tuple_encap(skb, tuple);
638 
639 	return 0;
640 }
641 
nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct flow_offload_tuple_rhash * tuplehash,struct sk_buff * skb)642 static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx,
643 					struct nf_flowtable *flow_table,
644 					struct flow_offload_tuple_rhash *tuplehash,
645 					struct sk_buff *skb)
646 {
647 	enum flow_offload_tuple_dir dir;
648 	struct flow_offload *flow;
649 	unsigned int thoff, mtu;
650 	struct ipv6hdr *ip6h;
651 
652 	dir = tuplehash->tuple.dir;
653 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
654 
655 	mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset;
656 	if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
657 		return 0;
658 
659 	ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset);
660 	thoff = sizeof(*ip6h) + ctx->offset;
661 	if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
662 		return 0;
663 
664 	if (!nf_flow_dst_check(&tuplehash->tuple)) {
665 		flow_offload_teardown(flow);
666 		return 0;
667 	}
668 
669 	if (skb_try_make_writable(skb, thoff + ctx->hdrsize))
670 		return -1;
671 
672 	flow_offload_refresh(flow_table, flow, false);
673 
674 	nf_flow_encap_pop(skb, tuplehash);
675 
676 	ip6h = ipv6_hdr(skb);
677 	nf_flow_nat_ipv6(flow, skb, dir, ip6h);
678 
679 	ip6h->hop_limit--;
680 	skb_clear_tstamp(skb);
681 
682 	if (flow_table->flags & NF_FLOWTABLE_COUNTER)
683 		nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
684 
685 	return 1;
686 }
687 
688 static struct flow_offload_tuple_rhash *
nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx * ctx,struct nf_flowtable * flow_table,struct sk_buff * skb)689 nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx,
690 			    struct nf_flowtable *flow_table,
691 			    struct sk_buff *skb)
692 {
693 	struct flow_offload_tuple tuple = {};
694 
695 	if (skb->protocol != htons(ETH_P_IPV6) &&
696 	    !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset))
697 		return NULL;
698 
699 	if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0)
700 		return NULL;
701 
702 	return flow_offload_lookup(flow_table, &tuple);
703 }
704 
705 unsigned int
nf_flow_offload_ipv6_hook(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)706 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
707 			  const struct nf_hook_state *state)
708 {
709 	struct flow_offload_tuple_rhash *tuplehash;
710 	struct nf_flowtable *flow_table = priv;
711 	enum flow_offload_tuple_dir dir;
712 	struct nf_flowtable_ctx ctx = {
713 		.in	= state->in,
714 	};
715 	const struct in6_addr *nexthop;
716 	struct flow_offload *flow;
717 	struct net_device *outdev;
718 	struct rt6_info *rt;
719 	int ret;
720 
721 	tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb);
722 	if (tuplehash == NULL)
723 		return NF_ACCEPT;
724 
725 	ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb);
726 	if (ret < 0)
727 		return NF_DROP;
728 	else if (ret == 0)
729 		return NF_ACCEPT;
730 
731 	if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
732 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
733 		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
734 		IP6CB(skb)->iif = skb->dev->ifindex;
735 		IP6CB(skb)->flags = IP6SKB_FORWARDED;
736 		return nf_flow_xmit_xfrm(skb, state, &rt->dst);
737 	}
738 
739 	dir = tuplehash->tuple.dir;
740 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
741 
742 	switch (tuplehash->tuple.xmit_type) {
743 	case FLOW_OFFLOAD_XMIT_NEIGH:
744 		rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
745 		outdev = rt->dst.dev;
746 		skb->dev = outdev;
747 		nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
748 		skb_dst_set_noref(skb, &rt->dst);
749 		neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
750 		ret = NF_STOLEN;
751 		break;
752 	case FLOW_OFFLOAD_XMIT_DIRECT:
753 		ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
754 		if (ret == NF_DROP)
755 			flow_offload_teardown(flow);
756 		break;
757 	default:
758 		WARN_ON_ONCE(1);
759 		ret = NF_DROP;
760 		break;
761 	}
762 
763 	return ret;
764 }
765 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
766