xref: /openbmc/linux/net/netfilter/ipvs/ip_vs_xmit.c (revision d0b73b48)
1 /*
2  * ip_vs_xmit.c: various packet transmitters for IPVS
3  *
4  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
5  *              Julian Anastasov <ja@ssi.bg>
6  *
7  *              This program is free software; you can redistribute it and/or
8  *              modify it under the terms of the GNU General Public License
9  *              as published by the Free Software Foundation; either version
10  *              2 of the License, or (at your option) any later version.
11  *
12  * Changes:
13  *
14  * Description of forwarding methods:
15  * - all transmitters are called from LOCAL_IN (remote clients) and
16  * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17  * - not all connections have destination server, for example,
18  * connections in backup server when fwmark is used
19  * - bypass connections use daddr from packet
20  * LOCAL_OUT rules:
21  * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22  * - skb->pkt_type is not set yet
23  * - the only place where we can see skb->sk != NULL
24  */
25 
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h>                  /* for tcphdr */
32 #include <net/ip.h>
33 #include <net/tcp.h>                    /* for csum_tcpudp_magic */
34 #include <net/udp.h>
35 #include <net/icmp.h>                   /* for icmp_send */
36 #include <net/route.h>                  /* for ip_route_output */
37 #include <net/ipv6.h>
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
43 
44 #include <net/ip_vs.h>
45 
46 enum {
47 	IP_VS_RT_MODE_LOCAL	= 1, /* Allow local dest */
48 	IP_VS_RT_MODE_NON_LOCAL	= 2, /* Allow non-local dest */
49 	IP_VS_RT_MODE_RDR	= 4, /* Allow redirect from remote daddr to
50 				      * local
51 				      */
52 	IP_VS_RT_MODE_CONNECT	= 8, /* Always bind route to saddr */
53 	IP_VS_RT_MODE_KNOWN_NH	= 16,/* Route via remote addr */
54 };
55 
56 /*
57  *      Destination cache to speed up outgoing route lookup
58  */
59 static inline void
60 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
61 		u32 dst_cookie)
62 {
63 	struct dst_entry *old_dst;
64 
65 	old_dst = dest->dst_cache;
66 	dest->dst_cache = dst;
67 	dest->dst_rtos = rtos;
68 	dest->dst_cookie = dst_cookie;
69 	dst_release(old_dst);
70 }
71 
72 static inline struct dst_entry *
73 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
74 {
75 	struct dst_entry *dst = dest->dst_cache;
76 
77 	if (!dst)
78 		return NULL;
79 	if ((dst->obsolete || rtos != dest->dst_rtos) &&
80 	    dst->ops->check(dst, dest->dst_cookie) == NULL) {
81 		dest->dst_cache = NULL;
82 		dst_release(dst);
83 		return NULL;
84 	}
85 	dst_hold(dst);
86 	return dst;
87 }
88 
89 static inline bool
90 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
91 {
92 	if (IP6CB(skb)->frag_max_size) {
93 		/* frag_max_size tell us that, this packet have been
94 		 * defragmented by netfilter IPv6 conntrack module.
95 		 */
96 		if (IP6CB(skb)->frag_max_size > mtu)
97 			return true; /* largest fragment violate MTU */
98 	}
99 	else if (skb->len > mtu && !skb_is_gso(skb)) {
100 		return true; /* Packet size violate MTU size */
101 	}
102 	return false;
103 }
104 
105 /* Get route to daddr, update *saddr, optionally bind route to saddr */
106 static struct rtable *do_output_route4(struct net *net, __be32 daddr,
107 				       u32 rtos, int rt_mode, __be32 *saddr)
108 {
109 	struct flowi4 fl4;
110 	struct rtable *rt;
111 	int loop = 0;
112 
113 	memset(&fl4, 0, sizeof(fl4));
114 	fl4.daddr = daddr;
115 	fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
116 	fl4.flowi4_tos = rtos;
117 	fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
118 			   FLOWI_FLAG_KNOWN_NH : 0;
119 
120 retry:
121 	rt = ip_route_output_key(net, &fl4);
122 	if (IS_ERR(rt)) {
123 		/* Invalid saddr ? */
124 		if (PTR_ERR(rt) == -EINVAL && *saddr &&
125 		    rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
126 			*saddr = 0;
127 			flowi4_update_output(&fl4, 0, rtos, daddr, 0);
128 			goto retry;
129 		}
130 		IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
131 		return NULL;
132 	} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
133 		ip_rt_put(rt);
134 		*saddr = fl4.saddr;
135 		flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr);
136 		loop++;
137 		goto retry;
138 	}
139 	*saddr = fl4.saddr;
140 	return rt;
141 }
142 
143 /* Get route to destination or remote server */
144 static struct rtable *
145 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
146 		   __be32 daddr, u32 rtos, int rt_mode, __be32 *ret_saddr)
147 {
148 	struct net *net = dev_net(skb_dst(skb)->dev);
149 	struct rtable *rt;			/* Route to the other host */
150 	struct rtable *ort;			/* Original route */
151 	int local;
152 
153 	if (dest) {
154 		spin_lock(&dest->dst_lock);
155 		if (!(rt = (struct rtable *)
156 		      __ip_vs_dst_check(dest, rtos))) {
157 			rt = do_output_route4(net, dest->addr.ip, rtos,
158 					      rt_mode, &dest->dst_saddr.ip);
159 			if (!rt) {
160 				spin_unlock(&dest->dst_lock);
161 				return NULL;
162 			}
163 			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
164 			IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
165 				  "rtos=%X\n",
166 				  &dest->addr.ip, &dest->dst_saddr.ip,
167 				  atomic_read(&rt->dst.__refcnt), rtos);
168 		}
169 		daddr = dest->addr.ip;
170 		if (ret_saddr)
171 			*ret_saddr = dest->dst_saddr.ip;
172 		spin_unlock(&dest->dst_lock);
173 	} else {
174 		__be32 saddr = htonl(INADDR_ANY);
175 
176 		/* For such unconfigured boxes avoid many route lookups
177 		 * for performance reasons because we do not remember saddr
178 		 */
179 		rt_mode &= ~IP_VS_RT_MODE_CONNECT;
180 		rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr);
181 		if (!rt)
182 			return NULL;
183 		if (ret_saddr)
184 			*ret_saddr = saddr;
185 	}
186 
187 	local = rt->rt_flags & RTCF_LOCAL;
188 	if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
189 	      rt_mode)) {
190 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
191 			     (rt->rt_flags & RTCF_LOCAL) ?
192 			     "local":"non-local", &daddr);
193 		ip_rt_put(rt);
194 		return NULL;
195 	}
196 	if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
197 	    !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
198 		IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
199 			     "requires NAT method, dest: %pI4\n",
200 			     &ip_hdr(skb)->daddr, &daddr);
201 		ip_rt_put(rt);
202 		return NULL;
203 	}
204 	if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
205 		IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
206 			     "to non-local address, dest: %pI4\n",
207 			     &ip_hdr(skb)->saddr, &daddr);
208 		ip_rt_put(rt);
209 		return NULL;
210 	}
211 
212 	return rt;
213 }
214 
215 /* Reroute packet to local IPv4 stack after DNAT */
216 static int
217 __ip_vs_reroute_locally(struct sk_buff *skb)
218 {
219 	struct rtable *rt = skb_rtable(skb);
220 	struct net_device *dev = rt->dst.dev;
221 	struct net *net = dev_net(dev);
222 	struct iphdr *iph = ip_hdr(skb);
223 
224 	if (rt_is_input_route(rt)) {
225 		unsigned long orefdst = skb->_skb_refdst;
226 
227 		if (ip_route_input(skb, iph->daddr, iph->saddr,
228 				   iph->tos, skb->dev))
229 			return 0;
230 		refdst_drop(orefdst);
231 	} else {
232 		struct flowi4 fl4 = {
233 			.daddr = iph->daddr,
234 			.saddr = iph->saddr,
235 			.flowi4_tos = RT_TOS(iph->tos),
236 			.flowi4_mark = skb->mark,
237 		};
238 
239 		rt = ip_route_output_key(net, &fl4);
240 		if (IS_ERR(rt))
241 			return 0;
242 		if (!(rt->rt_flags & RTCF_LOCAL)) {
243 			ip_rt_put(rt);
244 			return 0;
245 		}
246 		/* Drop old route. */
247 		skb_dst_drop(skb);
248 		skb_dst_set(skb, &rt->dst);
249 	}
250 	return 1;
251 }
252 
253 #ifdef CONFIG_IP_VS_IPV6
254 
255 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
256 {
257 	return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
258 }
259 
260 static struct dst_entry *
261 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
262 			struct in6_addr *ret_saddr, int do_xfrm)
263 {
264 	struct dst_entry *dst;
265 	struct flowi6 fl6 = {
266 		.daddr = *daddr,
267 	};
268 
269 	dst = ip6_route_output(net, NULL, &fl6);
270 	if (dst->error)
271 		goto out_err;
272 	if (!ret_saddr)
273 		return dst;
274 	if (ipv6_addr_any(&fl6.saddr) &&
275 	    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
276 			       &fl6.daddr, 0, &fl6.saddr) < 0)
277 		goto out_err;
278 	if (do_xfrm) {
279 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
280 		if (IS_ERR(dst)) {
281 			dst = NULL;
282 			goto out_err;
283 		}
284 	}
285 	*ret_saddr = fl6.saddr;
286 	return dst;
287 
288 out_err:
289 	dst_release(dst);
290 	IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
291 	return NULL;
292 }
293 
294 /*
295  * Get route to destination or remote server
296  */
297 static struct rt6_info *
298 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
299 		      struct in6_addr *daddr, struct in6_addr *ret_saddr,
300 		      int do_xfrm, int rt_mode)
301 {
302 	struct net *net = dev_net(skb_dst(skb)->dev);
303 	struct rt6_info *rt;			/* Route to the other host */
304 	struct rt6_info *ort;			/* Original route */
305 	struct dst_entry *dst;
306 	int local;
307 
308 	if (dest) {
309 		spin_lock(&dest->dst_lock);
310 		rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
311 		if (!rt) {
312 			u32 cookie;
313 
314 			dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
315 						      &dest->dst_saddr.in6,
316 						      do_xfrm);
317 			if (!dst) {
318 				spin_unlock(&dest->dst_lock);
319 				return NULL;
320 			}
321 			rt = (struct rt6_info *) dst;
322 			cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
323 			__ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
324 			IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
325 				  &dest->addr.in6, &dest->dst_saddr.in6,
326 				  atomic_read(&rt->dst.__refcnt));
327 		}
328 		if (ret_saddr)
329 			*ret_saddr = dest->dst_saddr.in6;
330 		spin_unlock(&dest->dst_lock);
331 	} else {
332 		dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
333 		if (!dst)
334 			return NULL;
335 		rt = (struct rt6_info *) dst;
336 	}
337 
338 	local = __ip_vs_is_local_route6(rt);
339 	if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
340 	      rt_mode)) {
341 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
342 			     local ? "local":"non-local", daddr);
343 		dst_release(&rt->dst);
344 		return NULL;
345 	}
346 	if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
347 	    !((ort = (struct rt6_info *) skb_dst(skb)) &&
348 	      __ip_vs_is_local_route6(ort))) {
349 		IP_VS_DBG_RL("Redirect from non-local address %pI6c to local "
350 			     "requires NAT method, dest: %pI6c\n",
351 			     &ipv6_hdr(skb)->daddr, daddr);
352 		dst_release(&rt->dst);
353 		return NULL;
354 	}
355 	if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
356 		     ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
357 				    IPV6_ADDR_LOOPBACK)) {
358 		IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c "
359 			     "to non-local address, dest: %pI6c\n",
360 			     &ipv6_hdr(skb)->saddr, daddr);
361 		dst_release(&rt->dst);
362 		return NULL;
363 	}
364 
365 	return rt;
366 }
367 #endif
368 
369 
370 /*
371  *	Release dest->dst_cache before a dest is removed
372  */
373 void
374 ip_vs_dst_reset(struct ip_vs_dest *dest)
375 {
376 	struct dst_entry *old_dst;
377 
378 	old_dst = dest->dst_cache;
379 	dest->dst_cache = NULL;
380 	dst_release(old_dst);
381 	dest->dst_saddr.ip = 0;
382 }
383 
384 #define IP_VS_XMIT_TUNNEL(skb, cp)				\
385 ({								\
386 	int __ret = NF_ACCEPT;					\
387 								\
388 	(skb)->ipvs_property = 1;				\
389 	if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT))		\
390 		__ret = ip_vs_confirm_conntrack(skb);		\
391 	if (__ret == NF_ACCEPT) {				\
392 		nf_reset(skb);					\
393 		skb_forward_csum(skb);				\
394 	}							\
395 	__ret;							\
396 })
397 
398 #define IP_VS_XMIT_NAT(pf, skb, cp, local)		\
399 do {							\
400 	(skb)->ipvs_property = 1;			\
401 	if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT)))	\
402 		ip_vs_notrack(skb);			\
403 	else						\
404 		ip_vs_update_conntrack(skb, cp, 1);	\
405 	if (local)					\
406 		return NF_ACCEPT;			\
407 	skb_forward_csum(skb);				\
408 	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
409 		skb_dst(skb)->dev, dst_output);		\
410 } while (0)
411 
412 #define IP_VS_XMIT(pf, skb, cp, local)			\
413 do {							\
414 	(skb)->ipvs_property = 1;			\
415 	if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT)))	\
416 		ip_vs_notrack(skb);			\
417 	if (local)					\
418 		return NF_ACCEPT;			\
419 	skb_forward_csum(skb);				\
420 	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
421 		skb_dst(skb)->dev, dst_output);		\
422 } while (0)
423 
424 
425 /*
426  *      NULL transmitter (do nothing except return NF_ACCEPT)
427  */
428 int
429 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
430 		struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
431 {
432 	/* we do not touch skb and do not need pskb ptr */
433 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
434 }
435 
436 
437 /*
438  *      Bypass transmitter
439  *      Let packets bypass the destination when the destination is not
440  *      available, it may be only used in transparent cache cluster.
441  */
442 int
443 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
444 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
445 {
446 	struct rtable *rt;			/* Route to the other host */
447 	struct iphdr  *iph = ip_hdr(skb);
448 	int    mtu;
449 
450 	EnterFunction(10);
451 
452 	if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
453 				      IP_VS_RT_MODE_NON_LOCAL, NULL)))
454 		goto tx_error_icmp;
455 
456 	/* MTU checking */
457 	mtu = dst_mtu(&rt->dst);
458 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
459 	    !skb_is_gso(skb)) {
460 		ip_rt_put(rt);
461 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
462 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
463 		goto tx_error;
464 	}
465 
466 	/*
467 	 * Call ip_send_check because we are not sure it is called
468 	 * after ip_defrag. Is copy-on-write needed?
469 	 */
470 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
471 		ip_rt_put(rt);
472 		return NF_STOLEN;
473 	}
474 	ip_send_check(ip_hdr(skb));
475 
476 	/* drop old route */
477 	skb_dst_drop(skb);
478 	skb_dst_set(skb, &rt->dst);
479 
480 	/* Another hack: avoid icmp_send in ip_fragment */
481 	skb->local_df = 1;
482 
483 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
484 
485 	LeaveFunction(10);
486 	return NF_STOLEN;
487 
488  tx_error_icmp:
489 	dst_link_failure(skb);
490  tx_error:
491 	kfree_skb(skb);
492 	LeaveFunction(10);
493 	return NF_STOLEN;
494 }
495 
496 #ifdef CONFIG_IP_VS_IPV6
497 int
498 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
499 		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
500 {
501 	struct rt6_info *rt;			/* Route to the other host */
502 	int    mtu;
503 
504 	EnterFunction(10);
505 
506 	rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0,
507 				   IP_VS_RT_MODE_NON_LOCAL);
508 	if (!rt)
509 		goto tx_error_icmp;
510 
511 	/* MTU checking */
512 	mtu = dst_mtu(&rt->dst);
513 	if (__mtu_check_toobig_v6(skb, mtu)) {
514 		if (!skb->dev) {
515 			struct net *net = dev_net(skb_dst(skb)->dev);
516 
517 			skb->dev = net->loopback_dev;
518 		}
519 		/* only send ICMP too big on first fragment */
520 		if (!iph->fragoffs)
521 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
522 		dst_release(&rt->dst);
523 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
524 		goto tx_error;
525 	}
526 
527 	/*
528 	 * Call ip_send_check because we are not sure it is called
529 	 * after ip_defrag. Is copy-on-write needed?
530 	 */
531 	skb = skb_share_check(skb, GFP_ATOMIC);
532 	if (unlikely(skb == NULL)) {
533 		dst_release(&rt->dst);
534 		return NF_STOLEN;
535 	}
536 
537 	/* drop old route */
538 	skb_dst_drop(skb);
539 	skb_dst_set(skb, &rt->dst);
540 
541 	/* Another hack: avoid icmp_send in ip_fragment */
542 	skb->local_df = 1;
543 
544 	IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
545 
546 	LeaveFunction(10);
547 	return NF_STOLEN;
548 
549  tx_error_icmp:
550 	dst_link_failure(skb);
551  tx_error:
552 	kfree_skb(skb);
553 	LeaveFunction(10);
554 	return NF_STOLEN;
555 }
556 #endif
557 
558 /*
559  *      NAT transmitter (only for outside-to-inside nat forwarding)
560  *      Not used for related ICMP
561  */
562 int
563 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
564 	       struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
565 {
566 	struct rtable *rt;		/* Route to the other host */
567 	int mtu;
568 	struct iphdr *iph = ip_hdr(skb);
569 	int local;
570 
571 	EnterFunction(10);
572 
573 	/* check if it is a connection of no-client-port */
574 	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
575 		__be16 _pt, *p;
576 		p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
577 		if (p == NULL)
578 			goto tx_error;
579 		ip_vs_conn_fill_cport(cp, *p);
580 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
581 	}
582 
583 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
584 				      RT_TOS(iph->tos),
585 				      IP_VS_RT_MODE_LOCAL |
586 					IP_VS_RT_MODE_NON_LOCAL |
587 					IP_VS_RT_MODE_RDR, NULL)))
588 		goto tx_error_icmp;
589 	local = rt->rt_flags & RTCF_LOCAL;
590 	/*
591 	 * Avoid duplicate tuple in reply direction for NAT traffic
592 	 * to local address when connection is sync-ed
593 	 */
594 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
595 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
596 		enum ip_conntrack_info ctinfo;
597 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
598 
599 		if (ct && !nf_ct_is_untracked(ct)) {
600 			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
601 					 "ip_vs_nat_xmit(): "
602 					 "stopping DNAT to local address");
603 			goto tx_error_put;
604 		}
605 	}
606 #endif
607 
608 	/* From world but DNAT to loopback address? */
609 	if (local && ipv4_is_loopback(cp->daddr.ip) &&
610 	    rt_is_input_route(skb_rtable(skb))) {
611 		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
612 				 "stopping DNAT to loopback address");
613 		goto tx_error_put;
614 	}
615 
616 	/* MTU checking */
617 	mtu = dst_mtu(&rt->dst);
618 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
619 	    !skb_is_gso(skb)) {
620 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
621 		IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
622 				 "ip_vs_nat_xmit(): frag needed for");
623 		goto tx_error_put;
624 	}
625 
626 	/* copy-on-write the packet before mangling it */
627 	if (!skb_make_writable(skb, sizeof(struct iphdr)))
628 		goto tx_error_put;
629 
630 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
631 		goto tx_error_put;
632 
633 	/* mangle the packet */
634 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
635 		goto tx_error_put;
636 	ip_hdr(skb)->daddr = cp->daddr.ip;
637 	ip_send_check(ip_hdr(skb));
638 
639 	if (!local) {
640 		/* drop old route */
641 		skb_dst_drop(skb);
642 		skb_dst_set(skb, &rt->dst);
643 	} else {
644 		ip_rt_put(rt);
645 		/*
646 		 * Some IPv4 replies get local address from routes,
647 		 * not from iph, so while we DNAT after routing
648 		 * we need this second input/output route.
649 		 */
650 		if (!__ip_vs_reroute_locally(skb))
651 			goto tx_error;
652 	}
653 
654 	IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
655 
656 	/* FIXME: when application helper enlarges the packet and the length
657 	   is larger than the MTU of outgoing device, there will be still
658 	   MTU problem. */
659 
660 	/* Another hack: avoid icmp_send in ip_fragment */
661 	skb->local_df = 1;
662 
663 	IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
664 
665 	LeaveFunction(10);
666 	return NF_STOLEN;
667 
668   tx_error_icmp:
669 	dst_link_failure(skb);
670   tx_error:
671 	kfree_skb(skb);
672 	LeaveFunction(10);
673 	return NF_STOLEN;
674   tx_error_put:
675 	ip_rt_put(rt);
676 	goto tx_error;
677 }
678 
679 #ifdef CONFIG_IP_VS_IPV6
680 int
681 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
682 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
683 {
684 	struct rt6_info *rt;		/* Route to the other host */
685 	int mtu;
686 	int local;
687 
688 	EnterFunction(10);
689 
690 	/* check if it is a connection of no-client-port */
691 	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) {
692 		__be16 _pt, *p;
693 		p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt);
694 		if (p == NULL)
695 			goto tx_error;
696 		ip_vs_conn_fill_cport(cp, *p);
697 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
698 	}
699 
700 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
701 					 0, (IP_VS_RT_MODE_LOCAL |
702 					     IP_VS_RT_MODE_NON_LOCAL |
703 					     IP_VS_RT_MODE_RDR))))
704 		goto tx_error_icmp;
705 	local = __ip_vs_is_local_route6(rt);
706 	/*
707 	 * Avoid duplicate tuple in reply direction for NAT traffic
708 	 * to local address when connection is sync-ed
709 	 */
710 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
711 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
712 		enum ip_conntrack_info ctinfo;
713 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
714 
715 		if (ct && !nf_ct_is_untracked(ct)) {
716 			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
717 					 "ip_vs_nat_xmit_v6(): "
718 					 "stopping DNAT to local address");
719 			goto tx_error_put;
720 		}
721 	}
722 #endif
723 
724 	/* From world but DNAT to loopback address? */
725 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
726 	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
727 		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
728 				 "ip_vs_nat_xmit_v6(): "
729 				 "stopping DNAT to loopback address");
730 		goto tx_error_put;
731 	}
732 
733 	/* MTU checking */
734 	mtu = dst_mtu(&rt->dst);
735 	if (__mtu_check_toobig_v6(skb, mtu)) {
736 		if (!skb->dev) {
737 			struct net *net = dev_net(skb_dst(skb)->dev);
738 
739 			skb->dev = net->loopback_dev;
740 		}
741 		/* only send ICMP too big on first fragment */
742 		if (!iph->fragoffs)
743 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
744 		IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
745 				 "ip_vs_nat_xmit_v6(): frag needed for");
746 		goto tx_error_put;
747 	}
748 
749 	/* copy-on-write the packet before mangling it */
750 	if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
751 		goto tx_error_put;
752 
753 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
754 		goto tx_error_put;
755 
756 	/* mangle the packet */
757 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph))
758 		goto tx_error;
759 	ipv6_hdr(skb)->daddr = cp->daddr.in6;
760 
761 	if (!local || !skb->dev) {
762 		/* drop the old route when skb is not shared */
763 		skb_dst_drop(skb);
764 		skb_dst_set(skb, &rt->dst);
765 	} else {
766 		/* destined to loopback, do we need to change route? */
767 		dst_release(&rt->dst);
768 	}
769 
770 	IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
771 
772 	/* FIXME: when application helper enlarges the packet and the length
773 	   is larger than the MTU of outgoing device, there will be still
774 	   MTU problem. */
775 
776 	/* Another hack: avoid icmp_send in ip_fragment */
777 	skb->local_df = 1;
778 
779 	IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
780 
781 	LeaveFunction(10);
782 	return NF_STOLEN;
783 
784 tx_error_icmp:
785 	dst_link_failure(skb);
786 tx_error:
787 	LeaveFunction(10);
788 	kfree_skb(skb);
789 	return NF_STOLEN;
790 tx_error_put:
791 	dst_release(&rt->dst);
792 	goto tx_error;
793 }
794 #endif
795 
796 
797 /*
798  *   IP Tunneling transmitter
799  *
800  *   This function encapsulates the packet in a new IP packet, its
801  *   destination will be set to cp->daddr. Most code of this function
802  *   is taken from ipip.c.
803  *
804  *   It is used in VS/TUN cluster. The load balancer selects a real
805  *   server from a cluster based on a scheduling algorithm,
806  *   encapsulates the request packet and forwards it to the selected
807  *   server. For example, all real servers are configured with
808  *   "ifconfig tunl0 <Virtual IP Address> up". When the server receives
809  *   the encapsulated packet, it will decapsulate the packet, processe
810  *   the request and return the response packets directly to the client
811  *   without passing the load balancer. This can greatly increase the
812  *   scalability of virtual server.
813  *
814  *   Used for ANY protocol
815  */
816 int
817 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
818 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
819 {
820 	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
821 	struct rtable *rt;			/* Route to the other host */
822 	__be32 saddr;				/* Source for tunnel */
823 	struct net_device *tdev;		/* Device to other host */
824 	struct iphdr  *old_iph = ip_hdr(skb);
825 	u8     tos = old_iph->tos;
826 	__be16 df;
827 	struct iphdr  *iph;			/* Our new IP header */
828 	unsigned int max_headroom;		/* The extra header space needed */
829 	int    mtu;
830 	int ret;
831 
832 	EnterFunction(10);
833 
834 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
835 				      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
836 						   IP_VS_RT_MODE_NON_LOCAL |
837 						   IP_VS_RT_MODE_CONNECT,
838 						   &saddr)))
839 		goto tx_error_icmp;
840 	if (rt->rt_flags & RTCF_LOCAL) {
841 		ip_rt_put(rt);
842 		IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
843 	}
844 
845 	tdev = rt->dst.dev;
846 
847 	mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
848 	if (mtu < 68) {
849 		IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
850 		goto tx_error_put;
851 	}
852 	if (rt_is_output_route(skb_rtable(skb)))
853 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
854 
855 	/* Copy DF, reset fragment offset and MF */
856 	df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
857 
858 	if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
859 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
860 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
861 		goto tx_error_put;
862 	}
863 
864 	/*
865 	 * Okay, now see if we can stuff it in the buffer as-is.
866 	 */
867 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
868 
869 	if (skb_headroom(skb) < max_headroom
870 	    || skb_cloned(skb) || skb_shared(skb)) {
871 		struct sk_buff *new_skb =
872 			skb_realloc_headroom(skb, max_headroom);
873 		if (!new_skb) {
874 			ip_rt_put(rt);
875 			kfree_skb(skb);
876 			IP_VS_ERR_RL("%s(): no memory\n", __func__);
877 			return NF_STOLEN;
878 		}
879 		consume_skb(skb);
880 		skb = new_skb;
881 		old_iph = ip_hdr(skb);
882 	}
883 
884 	skb->transport_header = skb->network_header;
885 
886 	/* fix old IP header checksum */
887 	ip_send_check(old_iph);
888 
889 	skb_push(skb, sizeof(struct iphdr));
890 	skb_reset_network_header(skb);
891 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
892 
893 	/* drop old route */
894 	skb_dst_drop(skb);
895 	skb_dst_set(skb, &rt->dst);
896 
897 	/*
898 	 *	Push down and install the IPIP header.
899 	 */
900 	iph			=	ip_hdr(skb);
901 	iph->version		=	4;
902 	iph->ihl		=	sizeof(struct iphdr)>>2;
903 	iph->frag_off		=	df;
904 	iph->protocol		=	IPPROTO_IPIP;
905 	iph->tos		=	tos;
906 	iph->daddr		=	cp->daddr.ip;
907 	iph->saddr		=	saddr;
908 	iph->ttl		=	old_iph->ttl;
909 	ip_select_ident(iph, &rt->dst, NULL);
910 
911 	/* Another hack: avoid icmp_send in ip_fragment */
912 	skb->local_df = 1;
913 
914 	ret = IP_VS_XMIT_TUNNEL(skb, cp);
915 	if (ret == NF_ACCEPT)
916 		ip_local_out(skb);
917 	else if (ret == NF_DROP)
918 		kfree_skb(skb);
919 
920 	LeaveFunction(10);
921 
922 	return NF_STOLEN;
923 
924   tx_error_icmp:
925 	dst_link_failure(skb);
926   tx_error:
927 	kfree_skb(skb);
928 	LeaveFunction(10);
929 	return NF_STOLEN;
930 tx_error_put:
931 	ip_rt_put(rt);
932 	goto tx_error;
933 }
934 
935 #ifdef CONFIG_IP_VS_IPV6
936 int
937 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
938 		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
939 {
940 	struct rt6_info *rt;		/* Route to the other host */
941 	struct in6_addr saddr;		/* Source for tunnel */
942 	struct net_device *tdev;	/* Device to other host */
943 	struct ipv6hdr  *old_iph = ipv6_hdr(skb);
944 	struct ipv6hdr  *iph;		/* Our new IP header */
945 	unsigned int max_headroom;	/* The extra header space needed */
946 	int    mtu;
947 	int ret;
948 
949 	EnterFunction(10);
950 
951 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
952 					 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
953 						     IP_VS_RT_MODE_NON_LOCAL))))
954 		goto tx_error_icmp;
955 	if (__ip_vs_is_local_route6(rt)) {
956 		dst_release(&rt->dst);
957 		IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
958 	}
959 
960 	tdev = rt->dst.dev;
961 
962 	mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
963 	if (mtu < IPV6_MIN_MTU) {
964 		IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
965 			     IPV6_MIN_MTU);
966 		goto tx_error_put;
967 	}
968 	if (skb_dst(skb))
969 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
970 
971 	/* MTU checking: Notice that 'mtu' have been adjusted before hand */
972 	if (__mtu_check_toobig_v6(skb, mtu)) {
973 		if (!skb->dev) {
974 			struct net *net = dev_net(skb_dst(skb)->dev);
975 
976 			skb->dev = net->loopback_dev;
977 		}
978 		/* only send ICMP too big on first fragment */
979 		if (!ipvsh->fragoffs)
980 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
981 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
982 		goto tx_error_put;
983 	}
984 
985 	/*
986 	 * Okay, now see if we can stuff it in the buffer as-is.
987 	 */
988 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
989 
990 	if (skb_headroom(skb) < max_headroom
991 	    || skb_cloned(skb) || skb_shared(skb)) {
992 		struct sk_buff *new_skb =
993 			skb_realloc_headroom(skb, max_headroom);
994 		if (!new_skb) {
995 			dst_release(&rt->dst);
996 			kfree_skb(skb);
997 			IP_VS_ERR_RL("%s(): no memory\n", __func__);
998 			return NF_STOLEN;
999 		}
1000 		consume_skb(skb);
1001 		skb = new_skb;
1002 		old_iph = ipv6_hdr(skb);
1003 	}
1004 
1005 	skb->transport_header = skb->network_header;
1006 
1007 	skb_push(skb, sizeof(struct ipv6hdr));
1008 	skb_reset_network_header(skb);
1009 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1010 
1011 	/* drop old route */
1012 	skb_dst_drop(skb);
1013 	skb_dst_set(skb, &rt->dst);
1014 
1015 	/*
1016 	 *	Push down and install the IPIP header.
1017 	 */
1018 	iph			=	ipv6_hdr(skb);
1019 	iph->version		=	6;
1020 	iph->nexthdr		=	IPPROTO_IPV6;
1021 	iph->payload_len	=	old_iph->payload_len;
1022 	be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
1023 	iph->priority		=	old_iph->priority;
1024 	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
1025 	iph->daddr = cp->daddr.in6;
1026 	iph->saddr = saddr;
1027 	iph->hop_limit		=	old_iph->hop_limit;
1028 
1029 	/* Another hack: avoid icmp_send in ip_fragment */
1030 	skb->local_df = 1;
1031 
1032 	ret = IP_VS_XMIT_TUNNEL(skb, cp);
1033 	if (ret == NF_ACCEPT)
1034 		ip6_local_out(skb);
1035 	else if (ret == NF_DROP)
1036 		kfree_skb(skb);
1037 
1038 	LeaveFunction(10);
1039 
1040 	return NF_STOLEN;
1041 
1042 tx_error_icmp:
1043 	dst_link_failure(skb);
1044 tx_error:
1045 	kfree_skb(skb);
1046 	LeaveFunction(10);
1047 	return NF_STOLEN;
1048 tx_error_put:
1049 	dst_release(&rt->dst);
1050 	goto tx_error;
1051 }
1052 #endif
1053 
1054 
1055 /*
1056  *      Direct Routing transmitter
1057  *      Used for ANY protocol
1058  */
1059 int
1060 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1061 	      struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1062 {
1063 	struct rtable *rt;			/* Route to the other host */
1064 	struct iphdr  *iph = ip_hdr(skb);
1065 	int    mtu;
1066 
1067 	EnterFunction(10);
1068 
1069 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1070 				      RT_TOS(iph->tos),
1071 				      IP_VS_RT_MODE_LOCAL |
1072 				      IP_VS_RT_MODE_NON_LOCAL |
1073 				      IP_VS_RT_MODE_KNOWN_NH, NULL)))
1074 		goto tx_error_icmp;
1075 	if (rt->rt_flags & RTCF_LOCAL) {
1076 		ip_rt_put(rt);
1077 		IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
1078 	}
1079 
1080 	/* MTU checking */
1081 	mtu = dst_mtu(&rt->dst);
1082 	if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
1083 	    !skb_is_gso(skb)) {
1084 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1085 		ip_rt_put(rt);
1086 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1087 		goto tx_error;
1088 	}
1089 
1090 	/*
1091 	 * Call ip_send_check because we are not sure it is called
1092 	 * after ip_defrag. Is copy-on-write needed?
1093 	 */
1094 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1095 		ip_rt_put(rt);
1096 		return NF_STOLEN;
1097 	}
1098 	ip_send_check(ip_hdr(skb));
1099 
1100 	/* drop old route */
1101 	skb_dst_drop(skb);
1102 	skb_dst_set(skb, &rt->dst);
1103 
1104 	/* Another hack: avoid icmp_send in ip_fragment */
1105 	skb->local_df = 1;
1106 
1107 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1108 
1109 	LeaveFunction(10);
1110 	return NF_STOLEN;
1111 
1112   tx_error_icmp:
1113 	dst_link_failure(skb);
1114   tx_error:
1115 	kfree_skb(skb);
1116 	LeaveFunction(10);
1117 	return NF_STOLEN;
1118 }
1119 
1120 #ifdef CONFIG_IP_VS_IPV6
1121 int
1122 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1123 		 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
1124 {
1125 	struct rt6_info *rt;			/* Route to the other host */
1126 	int    mtu;
1127 
1128 	EnterFunction(10);
1129 
1130 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1131 					 0, (IP_VS_RT_MODE_LOCAL |
1132 					     IP_VS_RT_MODE_NON_LOCAL))))
1133 		goto tx_error_icmp;
1134 	if (__ip_vs_is_local_route6(rt)) {
1135 		dst_release(&rt->dst);
1136 		IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1137 	}
1138 
1139 	/* MTU checking */
1140 	mtu = dst_mtu(&rt->dst);
1141 	if (__mtu_check_toobig_v6(skb, mtu)) {
1142 		if (!skb->dev) {
1143 			struct net *net = dev_net(skb_dst(skb)->dev);
1144 
1145 			skb->dev = net->loopback_dev;
1146 		}
1147 		/* only send ICMP too big on first fragment */
1148 		if (!iph->fragoffs)
1149 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1150 		dst_release(&rt->dst);
1151 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1152 		goto tx_error;
1153 	}
1154 
1155 	/*
1156 	 * Call ip_send_check because we are not sure it is called
1157 	 * after ip_defrag. Is copy-on-write needed?
1158 	 */
1159 	skb = skb_share_check(skb, GFP_ATOMIC);
1160 	if (unlikely(skb == NULL)) {
1161 		dst_release(&rt->dst);
1162 		return NF_STOLEN;
1163 	}
1164 
1165 	/* drop old route */
1166 	skb_dst_drop(skb);
1167 	skb_dst_set(skb, &rt->dst);
1168 
1169 	/* Another hack: avoid icmp_send in ip_fragment */
1170 	skb->local_df = 1;
1171 
1172 	IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1173 
1174 	LeaveFunction(10);
1175 	return NF_STOLEN;
1176 
1177 tx_error_icmp:
1178 	dst_link_failure(skb);
1179 tx_error:
1180 	kfree_skb(skb);
1181 	LeaveFunction(10);
1182 	return NF_STOLEN;
1183 }
1184 #endif
1185 
1186 
1187 /*
1188  *	ICMP packet transmitter
1189  *	called by the ip_vs_in_icmp
1190  */
1191 int
1192 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1193 		struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1194 		struct ip_vs_iphdr *iph)
1195 {
1196 	struct rtable	*rt;	/* Route to the other host */
1197 	int mtu;
1198 	int rc;
1199 	int local;
1200 	int rt_mode;
1201 
1202 	EnterFunction(10);
1203 
1204 	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1205 	   forwarded directly here, because there is no need to
1206 	   translate address/port back */
1207 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1208 		if (cp->packet_xmit)
1209 			rc = cp->packet_xmit(skb, cp, pp, iph);
1210 		else
1211 			rc = NF_ACCEPT;
1212 		/* do not touch skb anymore */
1213 		atomic_inc(&cp->in_pkts);
1214 		goto out;
1215 	}
1216 
1217 	/*
1218 	 * mangle and send the packet here (only for VS/NAT)
1219 	 */
1220 
1221 	/* LOCALNODE from FORWARD hook is not supported */
1222 	rt_mode = (hooknum != NF_INET_FORWARD) ?
1223 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1224 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1225 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1226 				      RT_TOS(ip_hdr(skb)->tos),
1227 				      rt_mode, NULL)))
1228 		goto tx_error_icmp;
1229 	local = rt->rt_flags & RTCF_LOCAL;
1230 
1231 	/*
1232 	 * Avoid duplicate tuple in reply direction for NAT traffic
1233 	 * to local address when connection is sync-ed
1234 	 */
1235 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1236 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1237 		enum ip_conntrack_info ctinfo;
1238 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1239 
1240 		if (ct && !nf_ct_is_untracked(ct)) {
1241 			IP_VS_DBG(10, "%s(): "
1242 				  "stopping DNAT to local address %pI4\n",
1243 				  __func__, &cp->daddr.ip);
1244 			goto tx_error_put;
1245 		}
1246 	}
1247 #endif
1248 
1249 	/* From world but DNAT to loopback address? */
1250 	if (local && ipv4_is_loopback(cp->daddr.ip) &&
1251 	    rt_is_input_route(skb_rtable(skb))) {
1252 		IP_VS_DBG(1, "%s(): "
1253 			  "stopping DNAT to loopback %pI4\n",
1254 			  __func__, &cp->daddr.ip);
1255 		goto tx_error_put;
1256 	}
1257 
1258 	/* MTU checking */
1259 	mtu = dst_mtu(&rt->dst);
1260 	if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1261 	    !skb_is_gso(skb)) {
1262 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1263 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1264 		goto tx_error_put;
1265 	}
1266 
1267 	/* copy-on-write the packet before mangling it */
1268 	if (!skb_make_writable(skb, offset))
1269 		goto tx_error_put;
1270 
1271 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
1272 		goto tx_error_put;
1273 
1274 	ip_vs_nat_icmp(skb, pp, cp, 0);
1275 
1276 	if (!local) {
1277 		/* drop the old route when skb is not shared */
1278 		skb_dst_drop(skb);
1279 		skb_dst_set(skb, &rt->dst);
1280 	} else {
1281 		ip_rt_put(rt);
1282 		/*
1283 		 * Some IPv4 replies get local address from routes,
1284 		 * not from iph, so while we DNAT after routing
1285 		 * we need this second input/output route.
1286 		 */
1287 		if (!__ip_vs_reroute_locally(skb))
1288 			goto tx_error;
1289 	}
1290 
1291 	/* Another hack: avoid icmp_send in ip_fragment */
1292 	skb->local_df = 1;
1293 
1294 	IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1295 
1296 	rc = NF_STOLEN;
1297 	goto out;
1298 
1299   tx_error_icmp:
1300 	dst_link_failure(skb);
1301   tx_error:
1302 	dev_kfree_skb(skb);
1303 	rc = NF_STOLEN;
1304   out:
1305 	LeaveFunction(10);
1306 	return rc;
1307   tx_error_put:
1308 	ip_rt_put(rt);
1309 	goto tx_error;
1310 }
1311 
1312 #ifdef CONFIG_IP_VS_IPV6
1313 int
1314 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1315 		struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1316 		struct ip_vs_iphdr *iph)
1317 {
1318 	struct rt6_info	*rt;	/* Route to the other host */
1319 	int mtu;
1320 	int rc;
1321 	int local;
1322 	int rt_mode;
1323 
1324 	EnterFunction(10);
1325 
1326 	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1327 	   forwarded directly here, because there is no need to
1328 	   translate address/port back */
1329 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1330 		if (cp->packet_xmit)
1331 			rc = cp->packet_xmit(skb, cp, pp, iph);
1332 		else
1333 			rc = NF_ACCEPT;
1334 		/* do not touch skb anymore */
1335 		atomic_inc(&cp->in_pkts);
1336 		goto out;
1337 	}
1338 
1339 	/*
1340 	 * mangle and send the packet here (only for VS/NAT)
1341 	 */
1342 
1343 	/* LOCALNODE from FORWARD hook is not supported */
1344 	rt_mode = (hooknum != NF_INET_FORWARD) ?
1345 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1346 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1347 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1348 					 0, rt_mode)))
1349 		goto tx_error_icmp;
1350 
1351 	local = __ip_vs_is_local_route6(rt);
1352 	/*
1353 	 * Avoid duplicate tuple in reply direction for NAT traffic
1354 	 * to local address when connection is sync-ed
1355 	 */
1356 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1357 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1358 		enum ip_conntrack_info ctinfo;
1359 		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1360 
1361 		if (ct && !nf_ct_is_untracked(ct)) {
1362 			IP_VS_DBG(10, "%s(): "
1363 				  "stopping DNAT to local address %pI6\n",
1364 				  __func__, &cp->daddr.in6);
1365 			goto tx_error_put;
1366 		}
1367 	}
1368 #endif
1369 
1370 	/* From world but DNAT to loopback address? */
1371 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1372 	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1373 		IP_VS_DBG(1, "%s(): "
1374 			  "stopping DNAT to loopback %pI6\n",
1375 			  __func__, &cp->daddr.in6);
1376 		goto tx_error_put;
1377 	}
1378 
1379 	/* MTU checking */
1380 	mtu = dst_mtu(&rt->dst);
1381 	if (__mtu_check_toobig_v6(skb, mtu)) {
1382 		if (!skb->dev) {
1383 			struct net *net = dev_net(skb_dst(skb)->dev);
1384 
1385 			skb->dev = net->loopback_dev;
1386 		}
1387 		/* only send ICMP too big on first fragment */
1388 		if (!iph->fragoffs)
1389 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1390 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1391 		goto tx_error_put;
1392 	}
1393 
1394 	/* copy-on-write the packet before mangling it */
1395 	if (!skb_make_writable(skb, offset))
1396 		goto tx_error_put;
1397 
1398 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
1399 		goto tx_error_put;
1400 
1401 	ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1402 
1403 	if (!local || !skb->dev) {
1404 		/* drop the old route when skb is not shared */
1405 		skb_dst_drop(skb);
1406 		skb_dst_set(skb, &rt->dst);
1407 	} else {
1408 		/* destined to loopback, do we need to change route? */
1409 		dst_release(&rt->dst);
1410 	}
1411 
1412 	/* Another hack: avoid icmp_send in ip_fragment */
1413 	skb->local_df = 1;
1414 
1415 	IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1416 
1417 	rc = NF_STOLEN;
1418 	goto out;
1419 
1420 tx_error_icmp:
1421 	dst_link_failure(skb);
1422 tx_error:
1423 	dev_kfree_skb(skb);
1424 	rc = NF_STOLEN;
1425 out:
1426 	LeaveFunction(10);
1427 	return rc;
1428 tx_error_put:
1429 	dst_release(&rt->dst);
1430 	goto tx_error;
1431 }
1432 #endif
1433