xref: /openbmc/linux/net/netfilter/ipvs/ip_vs_xmit.c (revision 22fd411a)
1 /*
2  * ip_vs_xmit.c: various packet transmitters for IPVS
3  *
4  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
5  *              Julian Anastasov <ja@ssi.bg>
6  *
7  *              This program is free software; you can redistribute it and/or
8  *              modify it under the terms of the GNU General Public License
9  *              as published by the Free Software Foundation; either version
10  *              2 of the License, or (at your option) any later version.
11  *
12  * Changes:
13  *
14  * Description of forwarding methods:
15  * - all transmitters are called from LOCAL_IN (remote clients) and
16  * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17  * - not all connections have destination server, for example,
18  * connections in backup server when fwmark is used
19  * - bypass connections use daddr from packet
20  * LOCAL_OUT rules:
21  * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22  * - skb->pkt_type is not set yet
23  * - the only place where we can see skb->sk != NULL
24  */
25 
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h>                  /* for tcphdr */
32 #include <net/ip.h>
33 #include <net/tcp.h>                    /* for csum_tcpudp_magic */
34 #include <net/udp.h>
35 #include <net/icmp.h>                   /* for icmp_send */
36 #include <net/route.h>                  /* for ip_route_output */
37 #include <net/ipv6.h>
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
43 
44 #include <net/ip_vs.h>
45 
46 
47 /*
48  *      Destination cache to speed up outgoing route lookup
49  */
50 static inline void
51 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
52 		u32 dst_cookie)
53 {
54 	struct dst_entry *old_dst;
55 
56 	old_dst = dest->dst_cache;
57 	dest->dst_cache = dst;
58 	dest->dst_rtos = rtos;
59 	dest->dst_cookie = dst_cookie;
60 	dst_release(old_dst);
61 }
62 
63 static inline struct dst_entry *
64 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
65 {
66 	struct dst_entry *dst = dest->dst_cache;
67 
68 	if (!dst)
69 		return NULL;
70 	if ((dst->obsolete || rtos != dest->dst_rtos) &&
71 	    dst->ops->check(dst, dest->dst_cookie) == NULL) {
72 		dest->dst_cache = NULL;
73 		dst_release(dst);
74 		return NULL;
75 	}
76 	dst_hold(dst);
77 	return dst;
78 }
79 
80 /*
81  * Get route to destination or remote server
82  * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
83  *	    &4=Allow redirect from remote daddr to local
84  */
85 static struct rtable *
86 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
87 		   __be32 daddr, u32 rtos, int rt_mode)
88 {
89 	struct net *net = dev_net(skb_dst(skb)->dev);
90 	struct rtable *rt;			/* Route to the other host */
91 	struct rtable *ort;			/* Original route */
92 	int local;
93 
94 	if (dest) {
95 		spin_lock(&dest->dst_lock);
96 		if (!(rt = (struct rtable *)
97 		      __ip_vs_dst_check(dest, rtos))) {
98 			struct flowi fl = {
99 				.fl4_dst = dest->addr.ip,
100 				.fl4_tos = rtos,
101 			};
102 
103 			if (ip_route_output_key(net, &rt, &fl)) {
104 				spin_unlock(&dest->dst_lock);
105 				IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
106 					     &dest->addr.ip);
107 				return NULL;
108 			}
109 			__ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
110 			IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
111 				  &dest->addr.ip,
112 				  atomic_read(&rt->dst.__refcnt), rtos);
113 		}
114 		spin_unlock(&dest->dst_lock);
115 	} else {
116 		struct flowi fl = {
117 			.fl4_dst = daddr,
118 			.fl4_tos = rtos,
119 		};
120 
121 		if (ip_route_output_key(net, &rt, &fl)) {
122 			IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
123 				     &daddr);
124 			return NULL;
125 		}
126 	}
127 
128 	local = rt->rt_flags & RTCF_LOCAL;
129 	if (!((local ? 1 : 2) & rt_mode)) {
130 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
131 			     (rt->rt_flags & RTCF_LOCAL) ?
132 			     "local":"non-local", &rt->rt_dst);
133 		ip_rt_put(rt);
134 		return NULL;
135 	}
136 	if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
137 					 ort->rt_flags & RTCF_LOCAL)) {
138 		IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
139 			     "requires NAT method, dest: %pI4\n",
140 			     &ip_hdr(skb)->daddr, &rt->rt_dst);
141 		ip_rt_put(rt);
142 		return NULL;
143 	}
144 	if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
145 		IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
146 			     "to non-local address, dest: %pI4\n",
147 			     &ip_hdr(skb)->saddr, &rt->rt_dst);
148 		ip_rt_put(rt);
149 		return NULL;
150 	}
151 
152 	return rt;
153 }
154 
155 /* Reroute packet to local IPv4 stack after DNAT */
156 static int
157 __ip_vs_reroute_locally(struct sk_buff *skb)
158 {
159 	struct rtable *rt = skb_rtable(skb);
160 	struct net_device *dev = rt->dst.dev;
161 	struct net *net = dev_net(dev);
162 	struct iphdr *iph = ip_hdr(skb);
163 
164 	if (rt_is_input_route(rt)) {
165 		unsigned long orefdst = skb->_skb_refdst;
166 
167 		if (ip_route_input(skb, iph->daddr, iph->saddr,
168 				   iph->tos, skb->dev))
169 			return 0;
170 		refdst_drop(orefdst);
171 	} else {
172 		struct flowi fl = {
173 			.fl4_dst = iph->daddr,
174 			.fl4_src = iph->saddr,
175 			.fl4_tos = RT_TOS(iph->tos),
176 			.mark = skb->mark,
177 		};
178 		struct rtable *rt;
179 
180 		if (ip_route_output_key(net, &rt, &fl))
181 			return 0;
182 		if (!(rt->rt_flags & RTCF_LOCAL)) {
183 			ip_rt_put(rt);
184 			return 0;
185 		}
186 		/* Drop old route. */
187 		skb_dst_drop(skb);
188 		skb_dst_set(skb, &rt->dst);
189 	}
190 	return 1;
191 }
192 
193 #ifdef CONFIG_IP_VS_IPV6
194 
195 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
196 {
197 	return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
198 }
199 
200 static struct dst_entry *
201 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
202 			struct in6_addr *ret_saddr, int do_xfrm)
203 {
204 	struct dst_entry *dst;
205 	struct flowi fl = {
206 		.fl6_dst = *daddr,
207 	};
208 
209 	dst = ip6_route_output(net, NULL, &fl);
210 	if (dst->error)
211 		goto out_err;
212 	if (!ret_saddr)
213 		return dst;
214 	if (ipv6_addr_any(&fl.fl6_src) &&
215 	    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
216 			       &fl.fl6_dst, 0, &fl.fl6_src) < 0)
217 		goto out_err;
218 	if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
219 		goto out_err;
220 	ipv6_addr_copy(ret_saddr, &fl.fl6_src);
221 	return dst;
222 
223 out_err:
224 	dst_release(dst);
225 	IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
226 	return NULL;
227 }
228 
229 /*
230  * Get route to destination or remote server
231  * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
232  *	    &4=Allow redirect from remote daddr to local
233  */
234 static struct rt6_info *
235 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
236 		      struct in6_addr *daddr, struct in6_addr *ret_saddr,
237 		      int do_xfrm, int rt_mode)
238 {
239 	struct net *net = dev_net(skb_dst(skb)->dev);
240 	struct rt6_info *rt;			/* Route to the other host */
241 	struct rt6_info *ort;			/* Original route */
242 	struct dst_entry *dst;
243 	int local;
244 
245 	if (dest) {
246 		spin_lock(&dest->dst_lock);
247 		rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
248 		if (!rt) {
249 			u32 cookie;
250 
251 			dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
252 						      &dest->dst_saddr,
253 						      do_xfrm);
254 			if (!dst) {
255 				spin_unlock(&dest->dst_lock);
256 				return NULL;
257 			}
258 			rt = (struct rt6_info *) dst;
259 			cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
260 			__ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
261 			IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
262 				  &dest->addr.in6, &dest->dst_saddr,
263 				  atomic_read(&rt->dst.__refcnt));
264 		}
265 		if (ret_saddr)
266 			ipv6_addr_copy(ret_saddr, &dest->dst_saddr);
267 		spin_unlock(&dest->dst_lock);
268 	} else {
269 		dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
270 		if (!dst)
271 			return NULL;
272 		rt = (struct rt6_info *) dst;
273 	}
274 
275 	local = __ip_vs_is_local_route6(rt);
276 	if (!((local ? 1 : 2) & rt_mode)) {
277 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
278 			     local ? "local":"non-local", daddr);
279 		dst_release(&rt->dst);
280 		return NULL;
281 	}
282 	if (local && !(rt_mode & 4) &&
283 	    !((ort = (struct rt6_info *) skb_dst(skb)) &&
284 	      __ip_vs_is_local_route6(ort))) {
285 		IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
286 			     "requires NAT method, dest: %pI6\n",
287 			     &ipv6_hdr(skb)->daddr, daddr);
288 		dst_release(&rt->dst);
289 		return NULL;
290 	}
291 	if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
292 		     ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
293 				    IPV6_ADDR_LOOPBACK)) {
294 		IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
295 			     "to non-local address, dest: %pI6\n",
296 			     &ipv6_hdr(skb)->saddr, daddr);
297 		dst_release(&rt->dst);
298 		return NULL;
299 	}
300 
301 	return rt;
302 }
303 #endif
304 
305 
306 /*
307  *	Release dest->dst_cache before a dest is removed
308  */
309 void
310 ip_vs_dst_reset(struct ip_vs_dest *dest)
311 {
312 	struct dst_entry *old_dst;
313 
314 	old_dst = dest->dst_cache;
315 	dest->dst_cache = NULL;
316 	dst_release(old_dst);
317 }
318 
319 #define IP_VS_XMIT_TUNNEL(skb, cp)				\
320 ({								\
321 	int __ret = NF_ACCEPT;					\
322 								\
323 	(skb)->ipvs_property = 1;				\
324 	if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT))		\
325 		__ret = ip_vs_confirm_conntrack(skb, cp);	\
326 	if (__ret == NF_ACCEPT) {				\
327 		nf_reset(skb);					\
328 		skb_forward_csum(skb);				\
329 	}							\
330 	__ret;							\
331 })
332 
333 #define IP_VS_XMIT_NAT(pf, skb, cp, local)		\
334 do {							\
335 	(skb)->ipvs_property = 1;			\
336 	if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT)))	\
337 		ip_vs_notrack(skb);			\
338 	else						\
339 		ip_vs_update_conntrack(skb, cp, 1);	\
340 	if (local)					\
341 		return NF_ACCEPT;			\
342 	skb_forward_csum(skb);				\
343 	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
344 		skb_dst(skb)->dev, dst_output);		\
345 } while (0)
346 
347 #define IP_VS_XMIT(pf, skb, cp, local)			\
348 do {							\
349 	(skb)->ipvs_property = 1;			\
350 	if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT)))	\
351 		ip_vs_notrack(skb);			\
352 	if (local)					\
353 		return NF_ACCEPT;			\
354 	skb_forward_csum(skb);				\
355 	NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL,	\
356 		skb_dst(skb)->dev, dst_output);		\
357 } while (0)
358 
359 
360 /*
361  *      NULL transmitter (do nothing except return NF_ACCEPT)
362  */
363 int
364 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
365 		struct ip_vs_protocol *pp)
366 {
367 	/* we do not touch skb and do not need pskb ptr */
368 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
369 }
370 
371 
372 /*
373  *      Bypass transmitter
374  *      Let packets bypass the destination when the destination is not
375  *      available, it may be only used in transparent cache cluster.
376  */
377 int
378 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
379 		  struct ip_vs_protocol *pp)
380 {
381 	struct rtable *rt;			/* Route to the other host */
382 	struct iphdr  *iph = ip_hdr(skb);
383 	int    mtu;
384 
385 	EnterFunction(10);
386 
387 	if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
388 				      RT_TOS(iph->tos), 2)))
389 		goto tx_error_icmp;
390 
391 	/* MTU checking */
392 	mtu = dst_mtu(&rt->dst);
393 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
394 		ip_rt_put(rt);
395 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
396 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
397 		goto tx_error;
398 	}
399 
400 	/*
401 	 * Call ip_send_check because we are not sure it is called
402 	 * after ip_defrag. Is copy-on-write needed?
403 	 */
404 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
405 		ip_rt_put(rt);
406 		return NF_STOLEN;
407 	}
408 	ip_send_check(ip_hdr(skb));
409 
410 	/* drop old route */
411 	skb_dst_drop(skb);
412 	skb_dst_set(skb, &rt->dst);
413 
414 	/* Another hack: avoid icmp_send in ip_fragment */
415 	skb->local_df = 1;
416 
417 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
418 
419 	LeaveFunction(10);
420 	return NF_STOLEN;
421 
422  tx_error_icmp:
423 	dst_link_failure(skb);
424  tx_error:
425 	kfree_skb(skb);
426 	LeaveFunction(10);
427 	return NF_STOLEN;
428 }
429 
430 #ifdef CONFIG_IP_VS_IPV6
431 int
432 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
433 		     struct ip_vs_protocol *pp)
434 {
435 	struct rt6_info *rt;			/* Route to the other host */
436 	struct ipv6hdr  *iph = ipv6_hdr(skb);
437 	int    mtu;
438 
439 	EnterFunction(10);
440 
441 	if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2)))
442 		goto tx_error_icmp;
443 
444 	/* MTU checking */
445 	mtu = dst_mtu(&rt->dst);
446 	if (skb->len > mtu) {
447 		if (!skb->dev) {
448 			struct net *net = dev_net(skb_dst(skb)->dev);
449 
450 			skb->dev = net->loopback_dev;
451 		}
452 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
453 		dst_release(&rt->dst);
454 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
455 		goto tx_error;
456 	}
457 
458 	/*
459 	 * Call ip_send_check because we are not sure it is called
460 	 * after ip_defrag. Is copy-on-write needed?
461 	 */
462 	skb = skb_share_check(skb, GFP_ATOMIC);
463 	if (unlikely(skb == NULL)) {
464 		dst_release(&rt->dst);
465 		return NF_STOLEN;
466 	}
467 
468 	/* drop old route */
469 	skb_dst_drop(skb);
470 	skb_dst_set(skb, &rt->dst);
471 
472 	/* Another hack: avoid icmp_send in ip_fragment */
473 	skb->local_df = 1;
474 
475 	IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
476 
477 	LeaveFunction(10);
478 	return NF_STOLEN;
479 
480  tx_error_icmp:
481 	dst_link_failure(skb);
482  tx_error:
483 	kfree_skb(skb);
484 	LeaveFunction(10);
485 	return NF_STOLEN;
486 }
487 #endif
488 
489 /*
490  *      NAT transmitter (only for outside-to-inside nat forwarding)
491  *      Not used for related ICMP
492  */
493 int
494 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
495 	       struct ip_vs_protocol *pp)
496 {
497 	struct rtable *rt;		/* Route to the other host */
498 	int mtu;
499 	struct iphdr *iph = ip_hdr(skb);
500 	int local;
501 
502 	EnterFunction(10);
503 
504 	/* check if it is a connection of no-client-port */
505 	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
506 		__be16 _pt, *p;
507 		p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
508 		if (p == NULL)
509 			goto tx_error;
510 		ip_vs_conn_fill_cport(cp, *p);
511 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
512 	}
513 
514 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
515 				      RT_TOS(iph->tos), 1|2|4)))
516 		goto tx_error_icmp;
517 	local = rt->rt_flags & RTCF_LOCAL;
518 	/*
519 	 * Avoid duplicate tuple in reply direction for NAT traffic
520 	 * to local address when connection is sync-ed
521 	 */
522 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
523 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
524 		enum ip_conntrack_info ctinfo;
525 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
526 
527 		if (ct && !nf_ct_is_untracked(ct)) {
528 			IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
529 					 "ip_vs_nat_xmit(): "
530 					 "stopping DNAT to local address");
531 			goto tx_error_put;
532 		}
533 	}
534 #endif
535 
536 	/* From world but DNAT to loopback address? */
537 	if (local && ipv4_is_loopback(rt->rt_dst) &&
538 	    rt_is_input_route(skb_rtable(skb))) {
539 		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
540 				 "stopping DNAT to loopback address");
541 		goto tx_error_put;
542 	}
543 
544 	/* MTU checking */
545 	mtu = dst_mtu(&rt->dst);
546 	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
547 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
548 		IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
549 				 "ip_vs_nat_xmit(): frag needed for");
550 		goto tx_error_put;
551 	}
552 
553 	/* copy-on-write the packet before mangling it */
554 	if (!skb_make_writable(skb, sizeof(struct iphdr)))
555 		goto tx_error_put;
556 
557 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
558 		goto tx_error_put;
559 
560 	/* mangle the packet */
561 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
562 		goto tx_error_put;
563 	ip_hdr(skb)->daddr = cp->daddr.ip;
564 	ip_send_check(ip_hdr(skb));
565 
566 	if (!local) {
567 		/* drop old route */
568 		skb_dst_drop(skb);
569 		skb_dst_set(skb, &rt->dst);
570 	} else {
571 		ip_rt_put(rt);
572 		/*
573 		 * Some IPv4 replies get local address from routes,
574 		 * not from iph, so while we DNAT after routing
575 		 * we need this second input/output route.
576 		 */
577 		if (!__ip_vs_reroute_locally(skb))
578 			goto tx_error;
579 	}
580 
581 	IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
582 
583 	/* FIXME: when application helper enlarges the packet and the length
584 	   is larger than the MTU of outgoing device, there will be still
585 	   MTU problem. */
586 
587 	/* Another hack: avoid icmp_send in ip_fragment */
588 	skb->local_df = 1;
589 
590 	IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
591 
592 	LeaveFunction(10);
593 	return NF_STOLEN;
594 
595   tx_error_icmp:
596 	dst_link_failure(skb);
597   tx_error:
598 	kfree_skb(skb);
599 	LeaveFunction(10);
600 	return NF_STOLEN;
601   tx_error_put:
602 	ip_rt_put(rt);
603 	goto tx_error;
604 }
605 
606 #ifdef CONFIG_IP_VS_IPV6
607 int
608 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
609 		  struct ip_vs_protocol *pp)
610 {
611 	struct rt6_info *rt;		/* Route to the other host */
612 	int mtu;
613 	int local;
614 
615 	EnterFunction(10);
616 
617 	/* check if it is a connection of no-client-port */
618 	if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
619 		__be16 _pt, *p;
620 		p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
621 				       sizeof(_pt), &_pt);
622 		if (p == NULL)
623 			goto tx_error;
624 		ip_vs_conn_fill_cport(cp, *p);
625 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
626 	}
627 
628 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
629 					 0, 1|2|4)))
630 		goto tx_error_icmp;
631 	local = __ip_vs_is_local_route6(rt);
632 	/*
633 	 * Avoid duplicate tuple in reply direction for NAT traffic
634 	 * to local address when connection is sync-ed
635 	 */
636 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
637 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
638 		enum ip_conntrack_info ctinfo;
639 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
640 
641 		if (ct && !nf_ct_is_untracked(ct)) {
642 			IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
643 					 "ip_vs_nat_xmit_v6(): "
644 					 "stopping DNAT to local address");
645 			goto tx_error_put;
646 		}
647 	}
648 #endif
649 
650 	/* From world but DNAT to loopback address? */
651 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
652 	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
653 		IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
654 				 "ip_vs_nat_xmit_v6(): "
655 				 "stopping DNAT to loopback address");
656 		goto tx_error_put;
657 	}
658 
659 	/* MTU checking */
660 	mtu = dst_mtu(&rt->dst);
661 	if (skb->len > mtu) {
662 		if (!skb->dev) {
663 			struct net *net = dev_net(skb_dst(skb)->dev);
664 
665 			skb->dev = net->loopback_dev;
666 		}
667 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
668 		IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
669 				 "ip_vs_nat_xmit_v6(): frag needed for");
670 		goto tx_error_put;
671 	}
672 
673 	/* copy-on-write the packet before mangling it */
674 	if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
675 		goto tx_error_put;
676 
677 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
678 		goto tx_error_put;
679 
680 	/* mangle the packet */
681 	if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
682 		goto tx_error;
683 	ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
684 
685 	if (!local || !skb->dev) {
686 		/* drop the old route when skb is not shared */
687 		skb_dst_drop(skb);
688 		skb_dst_set(skb, &rt->dst);
689 	} else {
690 		/* destined to loopback, do we need to change route? */
691 		dst_release(&rt->dst);
692 	}
693 
694 	IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
695 
696 	/* FIXME: when application helper enlarges the packet and the length
697 	   is larger than the MTU of outgoing device, there will be still
698 	   MTU problem. */
699 
700 	/* Another hack: avoid icmp_send in ip_fragment */
701 	skb->local_df = 1;
702 
703 	IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
704 
705 	LeaveFunction(10);
706 	return NF_STOLEN;
707 
708 tx_error_icmp:
709 	dst_link_failure(skb);
710 tx_error:
711 	LeaveFunction(10);
712 	kfree_skb(skb);
713 	return NF_STOLEN;
714 tx_error_put:
715 	dst_release(&rt->dst);
716 	goto tx_error;
717 }
718 #endif
719 
720 
721 /*
722  *   IP Tunneling transmitter
723  *
724  *   This function encapsulates the packet in a new IP packet, its
725  *   destination will be set to cp->daddr. Most code of this function
726  *   is taken from ipip.c.
727  *
728  *   It is used in VS/TUN cluster. The load balancer selects a real
729  *   server from a cluster based on a scheduling algorithm,
730  *   encapsulates the request packet and forwards it to the selected
731  *   server. For example, all real servers are configured with
732  *   "ifconfig tunl0 <Virtual IP Address> up". When the server receives
733  *   the encapsulated packet, it will decapsulate the packet, processe
734  *   the request and return the response packets directly to the client
735  *   without passing the load balancer. This can greatly increase the
736  *   scalability of virtual server.
737  *
738  *   Used for ANY protocol
739  */
740 int
741 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
742 		  struct ip_vs_protocol *pp)
743 {
744 	struct rtable *rt;			/* Route to the other host */
745 	struct net_device *tdev;		/* Device to other host */
746 	struct iphdr  *old_iph = ip_hdr(skb);
747 	u8     tos = old_iph->tos;
748 	__be16 df = old_iph->frag_off;
749 	struct iphdr  *iph;			/* Our new IP header */
750 	unsigned int max_headroom;		/* The extra header space needed */
751 	int    mtu;
752 	int ret;
753 
754 	EnterFunction(10);
755 
756 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
757 				      RT_TOS(tos), 1|2)))
758 		goto tx_error_icmp;
759 	if (rt->rt_flags & RTCF_LOCAL) {
760 		ip_rt_put(rt);
761 		IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
762 	}
763 
764 	tdev = rt->dst.dev;
765 
766 	mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
767 	if (mtu < 68) {
768 		IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
769 		goto tx_error_put;
770 	}
771 	if (skb_dst(skb))
772 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
773 
774 	df |= (old_iph->frag_off & htons(IP_DF));
775 
776 	if ((old_iph->frag_off & htons(IP_DF))
777 	    && mtu < ntohs(old_iph->tot_len)) {
778 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
779 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
780 		goto tx_error_put;
781 	}
782 
783 	/*
784 	 * Okay, now see if we can stuff it in the buffer as-is.
785 	 */
786 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
787 
788 	if (skb_headroom(skb) < max_headroom
789 	    || skb_cloned(skb) || skb_shared(skb)) {
790 		struct sk_buff *new_skb =
791 			skb_realloc_headroom(skb, max_headroom);
792 		if (!new_skb) {
793 			ip_rt_put(rt);
794 			kfree_skb(skb);
795 			IP_VS_ERR_RL("%s(): no memory\n", __func__);
796 			return NF_STOLEN;
797 		}
798 		kfree_skb(skb);
799 		skb = new_skb;
800 		old_iph = ip_hdr(skb);
801 	}
802 
803 	skb->transport_header = skb->network_header;
804 
805 	/* fix old IP header checksum */
806 	ip_send_check(old_iph);
807 
808 	skb_push(skb, sizeof(struct iphdr));
809 	skb_reset_network_header(skb);
810 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
811 
812 	/* drop old route */
813 	skb_dst_drop(skb);
814 	skb_dst_set(skb, &rt->dst);
815 
816 	/*
817 	 *	Push down and install the IPIP header.
818 	 */
819 	iph			=	ip_hdr(skb);
820 	iph->version		=	4;
821 	iph->ihl		=	sizeof(struct iphdr)>>2;
822 	iph->frag_off		=	df;
823 	iph->protocol		=	IPPROTO_IPIP;
824 	iph->tos		=	tos;
825 	iph->daddr		=	rt->rt_dst;
826 	iph->saddr		=	rt->rt_src;
827 	iph->ttl		=	old_iph->ttl;
828 	ip_select_ident(iph, &rt->dst, NULL);
829 
830 	/* Another hack: avoid icmp_send in ip_fragment */
831 	skb->local_df = 1;
832 
833 	ret = IP_VS_XMIT_TUNNEL(skb, cp);
834 	if (ret == NF_ACCEPT)
835 		ip_local_out(skb);
836 	else if (ret == NF_DROP)
837 		kfree_skb(skb);
838 
839 	LeaveFunction(10);
840 
841 	return NF_STOLEN;
842 
843   tx_error_icmp:
844 	dst_link_failure(skb);
845   tx_error:
846 	kfree_skb(skb);
847 	LeaveFunction(10);
848 	return NF_STOLEN;
849 tx_error_put:
850 	ip_rt_put(rt);
851 	goto tx_error;
852 }
853 
854 #ifdef CONFIG_IP_VS_IPV6
855 int
856 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
857 		     struct ip_vs_protocol *pp)
858 {
859 	struct rt6_info *rt;		/* Route to the other host */
860 	struct in6_addr saddr;		/* Source for tunnel */
861 	struct net_device *tdev;	/* Device to other host */
862 	struct ipv6hdr  *old_iph = ipv6_hdr(skb);
863 	struct ipv6hdr  *iph;		/* Our new IP header */
864 	unsigned int max_headroom;	/* The extra header space needed */
865 	int    mtu;
866 	int ret;
867 
868 	EnterFunction(10);
869 
870 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
871 					 &saddr, 1, 1|2)))
872 		goto tx_error_icmp;
873 	if (__ip_vs_is_local_route6(rt)) {
874 		dst_release(&rt->dst);
875 		IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
876 	}
877 
878 	tdev = rt->dst.dev;
879 
880 	mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
881 	if (mtu < IPV6_MIN_MTU) {
882 		IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
883 			     IPV6_MIN_MTU);
884 		goto tx_error_put;
885 	}
886 	if (skb_dst(skb))
887 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
888 
889 	if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
890 		if (!skb->dev) {
891 			struct net *net = dev_net(skb_dst(skb)->dev);
892 
893 			skb->dev = net->loopback_dev;
894 		}
895 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
896 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
897 		goto tx_error_put;
898 	}
899 
900 	/*
901 	 * Okay, now see if we can stuff it in the buffer as-is.
902 	 */
903 	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
904 
905 	if (skb_headroom(skb) < max_headroom
906 	    || skb_cloned(skb) || skb_shared(skb)) {
907 		struct sk_buff *new_skb =
908 			skb_realloc_headroom(skb, max_headroom);
909 		if (!new_skb) {
910 			dst_release(&rt->dst);
911 			kfree_skb(skb);
912 			IP_VS_ERR_RL("%s(): no memory\n", __func__);
913 			return NF_STOLEN;
914 		}
915 		kfree_skb(skb);
916 		skb = new_skb;
917 		old_iph = ipv6_hdr(skb);
918 	}
919 
920 	skb->transport_header = skb->network_header;
921 
922 	skb_push(skb, sizeof(struct ipv6hdr));
923 	skb_reset_network_header(skb);
924 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
925 
926 	/* drop old route */
927 	skb_dst_drop(skb);
928 	skb_dst_set(skb, &rt->dst);
929 
930 	/*
931 	 *	Push down and install the IPIP header.
932 	 */
933 	iph			=	ipv6_hdr(skb);
934 	iph->version		=	6;
935 	iph->nexthdr		=	IPPROTO_IPV6;
936 	iph->payload_len	=	old_iph->payload_len;
937 	be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
938 	iph->priority		=	old_iph->priority;
939 	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
940 	ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
941 	ipv6_addr_copy(&iph->saddr, &saddr);
942 	iph->hop_limit		=	old_iph->hop_limit;
943 
944 	/* Another hack: avoid icmp_send in ip_fragment */
945 	skb->local_df = 1;
946 
947 	ret = IP_VS_XMIT_TUNNEL(skb, cp);
948 	if (ret == NF_ACCEPT)
949 		ip6_local_out(skb);
950 	else if (ret == NF_DROP)
951 		kfree_skb(skb);
952 
953 	LeaveFunction(10);
954 
955 	return NF_STOLEN;
956 
957 tx_error_icmp:
958 	dst_link_failure(skb);
959 tx_error:
960 	kfree_skb(skb);
961 	LeaveFunction(10);
962 	return NF_STOLEN;
963 tx_error_put:
964 	dst_release(&rt->dst);
965 	goto tx_error;
966 }
967 #endif
968 
969 
970 /*
971  *      Direct Routing transmitter
972  *      Used for ANY protocol
973  */
974 int
975 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
976 	      struct ip_vs_protocol *pp)
977 {
978 	struct rtable *rt;			/* Route to the other host */
979 	struct iphdr  *iph = ip_hdr(skb);
980 	int    mtu;
981 
982 	EnterFunction(10);
983 
984 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
985 				      RT_TOS(iph->tos), 1|2)))
986 		goto tx_error_icmp;
987 	if (rt->rt_flags & RTCF_LOCAL) {
988 		ip_rt_put(rt);
989 		IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
990 	}
991 
992 	/* MTU checking */
993 	mtu = dst_mtu(&rt->dst);
994 	if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
995 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
996 		ip_rt_put(rt);
997 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
998 		goto tx_error;
999 	}
1000 
1001 	/*
1002 	 * Call ip_send_check because we are not sure it is called
1003 	 * after ip_defrag. Is copy-on-write needed?
1004 	 */
1005 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1006 		ip_rt_put(rt);
1007 		return NF_STOLEN;
1008 	}
1009 	ip_send_check(ip_hdr(skb));
1010 
1011 	/* drop old route */
1012 	skb_dst_drop(skb);
1013 	skb_dst_set(skb, &rt->dst);
1014 
1015 	/* Another hack: avoid icmp_send in ip_fragment */
1016 	skb->local_df = 1;
1017 
1018 	IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1019 
1020 	LeaveFunction(10);
1021 	return NF_STOLEN;
1022 
1023   tx_error_icmp:
1024 	dst_link_failure(skb);
1025   tx_error:
1026 	kfree_skb(skb);
1027 	LeaveFunction(10);
1028 	return NF_STOLEN;
1029 }
1030 
1031 #ifdef CONFIG_IP_VS_IPV6
1032 int
1033 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1034 		 struct ip_vs_protocol *pp)
1035 {
1036 	struct rt6_info *rt;			/* Route to the other host */
1037 	int    mtu;
1038 
1039 	EnterFunction(10);
1040 
1041 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1042 					 0, 1|2)))
1043 		goto tx_error_icmp;
1044 	if (__ip_vs_is_local_route6(rt)) {
1045 		dst_release(&rt->dst);
1046 		IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1047 	}
1048 
1049 	/* MTU checking */
1050 	mtu = dst_mtu(&rt->dst);
1051 	if (skb->len > mtu) {
1052 		if (!skb->dev) {
1053 			struct net *net = dev_net(skb_dst(skb)->dev);
1054 
1055 			skb->dev = net->loopback_dev;
1056 		}
1057 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1058 		dst_release(&rt->dst);
1059 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1060 		goto tx_error;
1061 	}
1062 
1063 	/*
1064 	 * Call ip_send_check because we are not sure it is called
1065 	 * after ip_defrag. Is copy-on-write needed?
1066 	 */
1067 	skb = skb_share_check(skb, GFP_ATOMIC);
1068 	if (unlikely(skb == NULL)) {
1069 		dst_release(&rt->dst);
1070 		return NF_STOLEN;
1071 	}
1072 
1073 	/* drop old route */
1074 	skb_dst_drop(skb);
1075 	skb_dst_set(skb, &rt->dst);
1076 
1077 	/* Another hack: avoid icmp_send in ip_fragment */
1078 	skb->local_df = 1;
1079 
1080 	IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1081 
1082 	LeaveFunction(10);
1083 	return NF_STOLEN;
1084 
1085 tx_error_icmp:
1086 	dst_link_failure(skb);
1087 tx_error:
1088 	kfree_skb(skb);
1089 	LeaveFunction(10);
1090 	return NF_STOLEN;
1091 }
1092 #endif
1093 
1094 
1095 /*
1096  *	ICMP packet transmitter
1097  *	called by the ip_vs_in_icmp
1098  */
1099 int
1100 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1101 		struct ip_vs_protocol *pp, int offset)
1102 {
1103 	struct rtable	*rt;	/* Route to the other host */
1104 	int mtu;
1105 	int rc;
1106 	int local;
1107 
1108 	EnterFunction(10);
1109 
1110 	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1111 	   forwarded directly here, because there is no need to
1112 	   translate address/port back */
1113 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1114 		if (cp->packet_xmit)
1115 			rc = cp->packet_xmit(skb, cp, pp);
1116 		else
1117 			rc = NF_ACCEPT;
1118 		/* do not touch skb anymore */
1119 		atomic_inc(&cp->in_pkts);
1120 		goto out;
1121 	}
1122 
1123 	/*
1124 	 * mangle and send the packet here (only for VS/NAT)
1125 	 */
1126 
1127 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1128 				      RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
1129 		goto tx_error_icmp;
1130 	local = rt->rt_flags & RTCF_LOCAL;
1131 
1132 	/*
1133 	 * Avoid duplicate tuple in reply direction for NAT traffic
1134 	 * to local address when connection is sync-ed
1135 	 */
1136 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1137 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1138 		enum ip_conntrack_info ctinfo;
1139 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1140 
1141 		if (ct && !nf_ct_is_untracked(ct)) {
1142 			IP_VS_DBG(10, "%s(): "
1143 				  "stopping DNAT to local address %pI4\n",
1144 				  __func__, &cp->daddr.ip);
1145 			goto tx_error_put;
1146 		}
1147 	}
1148 #endif
1149 
1150 	/* From world but DNAT to loopback address? */
1151 	if (local && ipv4_is_loopback(rt->rt_dst) &&
1152 	    rt_is_input_route(skb_rtable(skb))) {
1153 		IP_VS_DBG(1, "%s(): "
1154 			  "stopping DNAT to loopback %pI4\n",
1155 			  __func__, &cp->daddr.ip);
1156 		goto tx_error_put;
1157 	}
1158 
1159 	/* MTU checking */
1160 	mtu = dst_mtu(&rt->dst);
1161 	if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
1162 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1163 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1164 		goto tx_error_put;
1165 	}
1166 
1167 	/* copy-on-write the packet before mangling it */
1168 	if (!skb_make_writable(skb, offset))
1169 		goto tx_error_put;
1170 
1171 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
1172 		goto tx_error_put;
1173 
1174 	ip_vs_nat_icmp(skb, pp, cp, 0);
1175 
1176 	if (!local) {
1177 		/* drop the old route when skb is not shared */
1178 		skb_dst_drop(skb);
1179 		skb_dst_set(skb, &rt->dst);
1180 	} else {
1181 		ip_rt_put(rt);
1182 		/*
1183 		 * Some IPv4 replies get local address from routes,
1184 		 * not from iph, so while we DNAT after routing
1185 		 * we need this second input/output route.
1186 		 */
1187 		if (!__ip_vs_reroute_locally(skb))
1188 			goto tx_error;
1189 	}
1190 
1191 	/* Another hack: avoid icmp_send in ip_fragment */
1192 	skb->local_df = 1;
1193 
1194 	IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1195 
1196 	rc = NF_STOLEN;
1197 	goto out;
1198 
1199   tx_error_icmp:
1200 	dst_link_failure(skb);
1201   tx_error:
1202 	dev_kfree_skb(skb);
1203 	rc = NF_STOLEN;
1204   out:
1205 	LeaveFunction(10);
1206 	return rc;
1207   tx_error_put:
1208 	ip_rt_put(rt);
1209 	goto tx_error;
1210 }
1211 
1212 #ifdef CONFIG_IP_VS_IPV6
1213 int
1214 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1215 		struct ip_vs_protocol *pp, int offset)
1216 {
1217 	struct rt6_info	*rt;	/* Route to the other host */
1218 	int mtu;
1219 	int rc;
1220 	int local;
1221 
1222 	EnterFunction(10);
1223 
1224 	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1225 	   forwarded directly here, because there is no need to
1226 	   translate address/port back */
1227 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1228 		if (cp->packet_xmit)
1229 			rc = cp->packet_xmit(skb, cp, pp);
1230 		else
1231 			rc = NF_ACCEPT;
1232 		/* do not touch skb anymore */
1233 		atomic_inc(&cp->in_pkts);
1234 		goto out;
1235 	}
1236 
1237 	/*
1238 	 * mangle and send the packet here (only for VS/NAT)
1239 	 */
1240 
1241 	if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1242 					 0, 1|2|4)))
1243 		goto tx_error_icmp;
1244 
1245 	local = __ip_vs_is_local_route6(rt);
1246 	/*
1247 	 * Avoid duplicate tuple in reply direction for NAT traffic
1248 	 * to local address when connection is sync-ed
1249 	 */
1250 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1251 	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1252 		enum ip_conntrack_info ctinfo;
1253 		struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1254 
1255 		if (ct && !nf_ct_is_untracked(ct)) {
1256 			IP_VS_DBG(10, "%s(): "
1257 				  "stopping DNAT to local address %pI6\n",
1258 				  __func__, &cp->daddr.in6);
1259 			goto tx_error_put;
1260 		}
1261 	}
1262 #endif
1263 
1264 	/* From world but DNAT to loopback address? */
1265 	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1266 	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1267 		IP_VS_DBG(1, "%s(): "
1268 			  "stopping DNAT to loopback %pI6\n",
1269 			  __func__, &cp->daddr.in6);
1270 		goto tx_error_put;
1271 	}
1272 
1273 	/* MTU checking */
1274 	mtu = dst_mtu(&rt->dst);
1275 	if (skb->len > mtu) {
1276 		if (!skb->dev) {
1277 			struct net *net = dev_net(skb_dst(skb)->dev);
1278 
1279 			skb->dev = net->loopback_dev;
1280 		}
1281 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1282 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1283 		goto tx_error_put;
1284 	}
1285 
1286 	/* copy-on-write the packet before mangling it */
1287 	if (!skb_make_writable(skb, offset))
1288 		goto tx_error_put;
1289 
1290 	if (skb_cow(skb, rt->dst.dev->hard_header_len))
1291 		goto tx_error_put;
1292 
1293 	ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1294 
1295 	if (!local || !skb->dev) {
1296 		/* drop the old route when skb is not shared */
1297 		skb_dst_drop(skb);
1298 		skb_dst_set(skb, &rt->dst);
1299 	} else {
1300 		/* destined to loopback, do we need to change route? */
1301 		dst_release(&rt->dst);
1302 	}
1303 
1304 	/* Another hack: avoid icmp_send in ip_fragment */
1305 	skb->local_df = 1;
1306 
1307 	IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1308 
1309 	rc = NF_STOLEN;
1310 	goto out;
1311 
1312 tx_error_icmp:
1313 	dst_link_failure(skb);
1314 tx_error:
1315 	dev_kfree_skb(skb);
1316 	rc = NF_STOLEN;
1317 out:
1318 	LeaveFunction(10);
1319 	return rc;
1320 tx_error_put:
1321 	dst_release(&rt->dst);
1322 	goto tx_error;
1323 }
1324 #endif
1325