xref: /openbmc/linux/net/netfilter/ipvs/ip_vs_core.c (revision a8da474e)
1 /*
2  * IPVS         An implementation of the IP virtual server support for the
3  *              LINUX operating system.  IPVS is now implemented as a module
4  *              over the Netfilter framework. IPVS can be used to build a
5  *              high-performance and highly available server based on a
6  *              cluster of servers.
7  *
8  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
9  *              Peter Kese <peter.kese@ijs.si>
10  *              Julian Anastasov <ja@ssi.bg>
11  *
12  *              This program is free software; you can redistribute it and/or
13  *              modify it under the terms of the GNU General Public License
14  *              as published by the Free Software Foundation; either version
15  *              2 of the License, or (at your option) any later version.
16  *
17  * The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
18  * with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
19  * and others.
20  *
21  * Changes:
22  *	Paul `Rusty' Russell		properly handle non-linear skbs
23  *	Harald Welte			don't use nfcache
24  *
25  */
26 
27 #define KMSG_COMPONENT "IPVS"
28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/ip.h>
33 #include <linux/tcp.h>
34 #include <linux/sctp.h>
35 #include <linux/icmp.h>
36 #include <linux/slab.h>
37 
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/icmp.h>                   /* for icmp_send */
42 #include <net/route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/netns/generic.h>		/* net_generic() */
45 
46 #include <linux/netfilter.h>
47 #include <linux/netfilter_ipv4.h>
48 
49 #ifdef CONFIG_IP_VS_IPV6
50 #include <net/ipv6.h>
51 #include <linux/netfilter_ipv6.h>
52 #include <net/ip6_route.h>
53 #endif
54 
55 #include <net/ip_vs.h>
56 
57 
58 EXPORT_SYMBOL(register_ip_vs_scheduler);
59 EXPORT_SYMBOL(unregister_ip_vs_scheduler);
60 EXPORT_SYMBOL(ip_vs_proto_name);
61 EXPORT_SYMBOL(ip_vs_conn_new);
62 EXPORT_SYMBOL(ip_vs_conn_in_get);
63 EXPORT_SYMBOL(ip_vs_conn_out_get);
64 #ifdef CONFIG_IP_VS_PROTO_TCP
65 EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
66 #endif
67 EXPORT_SYMBOL(ip_vs_conn_put);
68 #ifdef CONFIG_IP_VS_DEBUG
69 EXPORT_SYMBOL(ip_vs_get_debug_level);
70 #endif
71 
72 static int ip_vs_net_id __read_mostly;
73 /* netns cnt used for uniqueness */
74 static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
75 
76 /* ID used in ICMP lookups */
77 #define icmp_id(icmph)          (((icmph)->un).echo.id)
78 #define icmpv6_id(icmph)        (icmph->icmp6_dataun.u_echo.identifier)
79 
80 const char *ip_vs_proto_name(unsigned int proto)
81 {
82 	static char buf[20];
83 
84 	switch (proto) {
85 	case IPPROTO_IP:
86 		return "IP";
87 	case IPPROTO_UDP:
88 		return "UDP";
89 	case IPPROTO_TCP:
90 		return "TCP";
91 	case IPPROTO_SCTP:
92 		return "SCTP";
93 	case IPPROTO_ICMP:
94 		return "ICMP";
95 #ifdef CONFIG_IP_VS_IPV6
96 	case IPPROTO_ICMPV6:
97 		return "ICMPv6";
98 #endif
99 	default:
100 		sprintf(buf, "IP_%u", proto);
101 		return buf;
102 	}
103 }
104 
105 void ip_vs_init_hash_table(struct list_head *table, int rows)
106 {
107 	while (--rows >= 0)
108 		INIT_LIST_HEAD(&table[rows]);
109 }
110 
111 static inline void
112 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
113 {
114 	struct ip_vs_dest *dest = cp->dest;
115 	struct netns_ipvs *ipvs = cp->ipvs;
116 
117 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
118 		struct ip_vs_cpu_stats *s;
119 		struct ip_vs_service *svc;
120 
121 		s = this_cpu_ptr(dest->stats.cpustats);
122 		u64_stats_update_begin(&s->syncp);
123 		s->cnt.inpkts++;
124 		s->cnt.inbytes += skb->len;
125 		u64_stats_update_end(&s->syncp);
126 
127 		rcu_read_lock();
128 		svc = rcu_dereference(dest->svc);
129 		s = this_cpu_ptr(svc->stats.cpustats);
130 		u64_stats_update_begin(&s->syncp);
131 		s->cnt.inpkts++;
132 		s->cnt.inbytes += skb->len;
133 		u64_stats_update_end(&s->syncp);
134 		rcu_read_unlock();
135 
136 		s = this_cpu_ptr(ipvs->tot_stats.cpustats);
137 		u64_stats_update_begin(&s->syncp);
138 		s->cnt.inpkts++;
139 		s->cnt.inbytes += skb->len;
140 		u64_stats_update_end(&s->syncp);
141 	}
142 }
143 
144 
145 static inline void
146 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
147 {
148 	struct ip_vs_dest *dest = cp->dest;
149 	struct netns_ipvs *ipvs = cp->ipvs;
150 
151 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
152 		struct ip_vs_cpu_stats *s;
153 		struct ip_vs_service *svc;
154 
155 		s = this_cpu_ptr(dest->stats.cpustats);
156 		u64_stats_update_begin(&s->syncp);
157 		s->cnt.outpkts++;
158 		s->cnt.outbytes += skb->len;
159 		u64_stats_update_end(&s->syncp);
160 
161 		rcu_read_lock();
162 		svc = rcu_dereference(dest->svc);
163 		s = this_cpu_ptr(svc->stats.cpustats);
164 		u64_stats_update_begin(&s->syncp);
165 		s->cnt.outpkts++;
166 		s->cnt.outbytes += skb->len;
167 		u64_stats_update_end(&s->syncp);
168 		rcu_read_unlock();
169 
170 		s = this_cpu_ptr(ipvs->tot_stats.cpustats);
171 		u64_stats_update_begin(&s->syncp);
172 		s->cnt.outpkts++;
173 		s->cnt.outbytes += skb->len;
174 		u64_stats_update_end(&s->syncp);
175 	}
176 }
177 
178 
179 static inline void
180 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
181 {
182 	struct netns_ipvs *ipvs = svc->ipvs;
183 	struct ip_vs_cpu_stats *s;
184 
185 	s = this_cpu_ptr(cp->dest->stats.cpustats);
186 	u64_stats_update_begin(&s->syncp);
187 	s->cnt.conns++;
188 	u64_stats_update_end(&s->syncp);
189 
190 	s = this_cpu_ptr(svc->stats.cpustats);
191 	u64_stats_update_begin(&s->syncp);
192 	s->cnt.conns++;
193 	u64_stats_update_end(&s->syncp);
194 
195 	s = this_cpu_ptr(ipvs->tot_stats.cpustats);
196 	u64_stats_update_begin(&s->syncp);
197 	s->cnt.conns++;
198 	u64_stats_update_end(&s->syncp);
199 }
200 
201 
202 static inline void
203 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
204 		const struct sk_buff *skb,
205 		struct ip_vs_proto_data *pd)
206 {
207 	if (likely(pd->pp->state_transition))
208 		pd->pp->state_transition(cp, direction, skb, pd);
209 }
210 
211 static inline int
212 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
213 			      struct sk_buff *skb, int protocol,
214 			      const union nf_inet_addr *caddr, __be16 cport,
215 			      const union nf_inet_addr *vaddr, __be16 vport,
216 			      struct ip_vs_conn_param *p)
217 {
218 	ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
219 			      vport, p);
220 	p->pe = rcu_dereference(svc->pe);
221 	if (p->pe && p->pe->fill_param)
222 		return p->pe->fill_param(p, skb);
223 
224 	return 0;
225 }
226 
227 /*
228  *  IPVS persistent scheduling function
229  *  It creates a connection entry according to its template if exists,
230  *  or selects a server and creates a connection entry plus a template.
231  *  Locking: we are svc user (svc->refcnt), so we hold all dests too
232  *  Protocols supported: TCP, UDP
233  */
234 static struct ip_vs_conn *
235 ip_vs_sched_persist(struct ip_vs_service *svc,
236 		    struct sk_buff *skb, __be16 src_port, __be16 dst_port,
237 		    int *ignored, struct ip_vs_iphdr *iph)
238 {
239 	struct ip_vs_conn *cp = NULL;
240 	struct ip_vs_dest *dest;
241 	struct ip_vs_conn *ct;
242 	__be16 dport = 0;		/* destination port to forward */
243 	unsigned int flags;
244 	struct ip_vs_conn_param param;
245 	const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
246 	union nf_inet_addr snet;	/* source network of the client,
247 					   after masking */
248 	const union nf_inet_addr *src_addr, *dst_addr;
249 
250 	if (likely(!ip_vs_iph_inverse(iph))) {
251 		src_addr = &iph->saddr;
252 		dst_addr = &iph->daddr;
253 	} else {
254 		src_addr = &iph->daddr;
255 		dst_addr = &iph->saddr;
256 	}
257 
258 
259 	/* Mask saddr with the netmask to adjust template granularity */
260 #ifdef CONFIG_IP_VS_IPV6
261 	if (svc->af == AF_INET6)
262 		ipv6_addr_prefix(&snet.in6, &src_addr->in6,
263 				 (__force __u32) svc->netmask);
264 	else
265 #endif
266 		snet.ip = src_addr->ip & svc->netmask;
267 
268 	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
269 		      "mnet %s\n",
270 		      IP_VS_DBG_ADDR(svc->af, src_addr), ntohs(src_port),
271 		      IP_VS_DBG_ADDR(svc->af, dst_addr), ntohs(dst_port),
272 		      IP_VS_DBG_ADDR(svc->af, &snet));
273 
274 	/*
275 	 * As far as we know, FTP is a very complicated network protocol, and
276 	 * it uses control connection and data connections. For active FTP,
277 	 * FTP server initialize data connection to the client, its source port
278 	 * is often 20. For passive FTP, FTP server tells the clients the port
279 	 * that it passively listens to,  and the client issues the data
280 	 * connection. In the tunneling or direct routing mode, the load
281 	 * balancer is on the client-to-server half of connection, the port
282 	 * number is unknown to the load balancer. So, a conn template like
283 	 * <caddr, 0, vaddr, 0, daddr, 0> is created for persistent FTP
284 	 * service, and a template like <caddr, 0, vaddr, vport, daddr, dport>
285 	 * is created for other persistent services.
286 	 */
287 	{
288 		int protocol = iph->protocol;
289 		const union nf_inet_addr *vaddr = dst_addr;
290 		__be16 vport = 0;
291 
292 		if (dst_port == svc->port) {
293 			/* non-FTP template:
294 			 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
295 			 * FTP template:
296 			 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
297 			 */
298 			if (svc->port != FTPPORT)
299 				vport = dst_port;
300 		} else {
301 			/* Note: persistent fwmark-based services and
302 			 * persistent port zero service are handled here.
303 			 * fwmark template:
304 			 * <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
305 			 * port zero template:
306 			 * <protocol,caddr,0,vaddr,0,daddr,0>
307 			 */
308 			if (svc->fwmark) {
309 				protocol = IPPROTO_IP;
310 				vaddr = &fwmark;
311 			}
312 		}
313 		/* return *ignored = -1 so NF_DROP can be used */
314 		if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
315 						  vaddr, vport, &param) < 0) {
316 			*ignored = -1;
317 			return NULL;
318 		}
319 	}
320 
321 	/* Check if a template already exists */
322 	ct = ip_vs_ct_in_get(&param);
323 	if (!ct || !ip_vs_check_template(ct)) {
324 		struct ip_vs_scheduler *sched;
325 
326 		/*
327 		 * No template found or the dest of the connection
328 		 * template is not available.
329 		 * return *ignored=0 i.e. ICMP and NF_DROP
330 		 */
331 		sched = rcu_dereference(svc->scheduler);
332 		if (sched) {
333 			/* read svc->sched_data after svc->scheduler */
334 			smp_rmb();
335 			dest = sched->schedule(svc, skb, iph);
336 		} else {
337 			dest = NULL;
338 		}
339 		if (!dest) {
340 			IP_VS_DBG(1, "p-schedule: no dest found.\n");
341 			kfree(param.pe_data);
342 			*ignored = 0;
343 			return NULL;
344 		}
345 
346 		if (dst_port == svc->port && svc->port != FTPPORT)
347 			dport = dest->port;
348 
349 		/* Create a template
350 		 * This adds param.pe_data to the template,
351 		 * and thus param.pe_data will be destroyed
352 		 * when the template expires */
353 		ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
354 				    IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
355 		if (ct == NULL) {
356 			kfree(param.pe_data);
357 			*ignored = -1;
358 			return NULL;
359 		}
360 
361 		ct->timeout = svc->timeout;
362 	} else {
363 		/* set destination with the found template */
364 		dest = ct->dest;
365 		kfree(param.pe_data);
366 	}
367 
368 	dport = dst_port;
369 	if (dport == svc->port && dest->port)
370 		dport = dest->port;
371 
372 	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
373 		 && iph->protocol == IPPROTO_UDP) ?
374 		IP_VS_CONN_F_ONE_PACKET : 0;
375 
376 	/*
377 	 *    Create a new connection according to the template
378 	 */
379 	ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
380 			      src_port, dst_addr, dst_port, &param);
381 
382 	cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
383 			    skb->mark);
384 	if (cp == NULL) {
385 		ip_vs_conn_put(ct);
386 		*ignored = -1;
387 		return NULL;
388 	}
389 
390 	/*
391 	 *    Add its control
392 	 */
393 	ip_vs_control_add(cp, ct);
394 	ip_vs_conn_put(ct);
395 
396 	ip_vs_conn_stats(cp, svc);
397 	return cp;
398 }
399 
400 
401 /*
402  *  IPVS main scheduling function
403  *  It selects a server according to the virtual service, and
404  *  creates a connection entry.
405  *  Protocols supported: TCP, UDP
406  *
407  *  Usage of *ignored
408  *
409  * 1 :   protocol tried to schedule (eg. on SYN), found svc but the
410  *       svc/scheduler decides that this packet should be accepted with
411  *       NF_ACCEPT because it must not be scheduled.
412  *
413  * 0 :   scheduler can not find destination, so try bypass or
414  *       return ICMP and then NF_DROP (ip_vs_leave).
415  *
416  * -1 :  scheduler tried to schedule but fatal error occurred, eg.
417  *       ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
418  *       failure such as missing Call-ID, ENOMEM on skb_linearize
419  *       or pe_data. In this case we should return NF_DROP without
420  *       any attempts to send ICMP with ip_vs_leave.
421  */
422 struct ip_vs_conn *
423 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
424 	       struct ip_vs_proto_data *pd, int *ignored,
425 	       struct ip_vs_iphdr *iph)
426 {
427 	struct ip_vs_protocol *pp = pd->pp;
428 	struct ip_vs_conn *cp = NULL;
429 	struct ip_vs_scheduler *sched;
430 	struct ip_vs_dest *dest;
431 	__be16 _ports[2], *pptr, cport, vport;
432 	const void *caddr, *vaddr;
433 	unsigned int flags;
434 
435 	*ignored = 1;
436 	/*
437 	 * IPv6 frags, only the first hit here.
438 	 */
439 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
440 	if (pptr == NULL)
441 		return NULL;
442 
443 	if (likely(!ip_vs_iph_inverse(iph))) {
444 		cport = pptr[0];
445 		caddr = &iph->saddr;
446 		vport = pptr[1];
447 		vaddr = &iph->daddr;
448 	} else {
449 		cport = pptr[1];
450 		caddr = &iph->daddr;
451 		vport = pptr[0];
452 		vaddr = &iph->saddr;
453 	}
454 
455 	/*
456 	 * FTPDATA needs this check when using local real server.
457 	 * Never schedule Active FTPDATA connections from real server.
458 	 * For LVS-NAT they must be already created. For other methods
459 	 * with persistence the connection is created on SYN+ACK.
460 	 */
461 	if (cport == FTPDATA) {
462 		IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
463 			      "Not scheduling FTPDATA");
464 		return NULL;
465 	}
466 
467 	/*
468 	 *    Do not schedule replies from local real server.
469 	 */
470 	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
471 		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
472 		cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
473 		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
474 
475 		if (cp) {
476 			IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off,
477 				      "Not scheduling reply for existing"
478 				      " connection");
479 			__ip_vs_conn_put(cp);
480 			return NULL;
481 		}
482 	}
483 
484 	/*
485 	 *    Persistent service
486 	 */
487 	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
488 		return ip_vs_sched_persist(svc, skb, cport, vport, ignored,
489 					   iph);
490 
491 	*ignored = 0;
492 
493 	/*
494 	 *    Non-persistent service
495 	 */
496 	if (!svc->fwmark && vport != svc->port) {
497 		if (!svc->port)
498 			pr_err("Schedule: port zero only supported "
499 			       "in persistent services, "
500 			       "check your ipvs configuration\n");
501 		return NULL;
502 	}
503 
504 	sched = rcu_dereference(svc->scheduler);
505 	if (sched) {
506 		/* read svc->sched_data after svc->scheduler */
507 		smp_rmb();
508 		dest = sched->schedule(svc, skb, iph);
509 	} else {
510 		dest = NULL;
511 	}
512 	if (dest == NULL) {
513 		IP_VS_DBG(1, "Schedule: no dest found.\n");
514 		return NULL;
515 	}
516 
517 	flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
518 		 && iph->protocol == IPPROTO_UDP) ?
519 		IP_VS_CONN_F_ONE_PACKET : 0;
520 
521 	/*
522 	 *    Create a connection entry.
523 	 */
524 	{
525 		struct ip_vs_conn_param p;
526 
527 		ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
528 				      caddr, cport, vaddr, vport, &p);
529 		cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
530 				    dest->port ? dest->port : vport,
531 				    flags, dest, skb->mark);
532 		if (!cp) {
533 			*ignored = -1;
534 			return NULL;
535 		}
536 	}
537 
538 	IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
539 		      "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
540 		      ip_vs_fwd_tag(cp),
541 		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
542 		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
543 		      IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
544 		      cp->flags, atomic_read(&cp->refcnt));
545 
546 	ip_vs_conn_stats(cp, svc);
547 	return cp;
548 }
549 
550 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
551 					union nf_inet_addr *addr)
552 {
553 #ifdef CONFIG_IP_VS_IPV6
554 	if (af == AF_INET6)
555 		return ipv6_addr_type(&addr->in6) & IPV6_ADDR_UNICAST;
556 #endif
557 	return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
558 }
559 
560 /*
561  *  Pass or drop the packet.
562  *  Called by ip_vs_in, when the virtual service is available but
563  *  no destination is available for a new connection.
564  */
565 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
566 		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
567 {
568 	__be16 _ports[2], *pptr, dport;
569 	struct netns_ipvs *ipvs = svc->ipvs;
570 	struct net *net = ipvs->net;
571 
572 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
573 	if (!pptr)
574 		return NF_DROP;
575 	dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
576 
577 	/* if it is fwmark-based service, the cache_bypass sysctl is up
578 	   and the destination is a non-local unicast, then create
579 	   a cache_bypass connection entry */
580 	if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
581 	    !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
582 	    ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
583 		int ret;
584 		struct ip_vs_conn *cp;
585 		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
586 				      iph->protocol == IPPROTO_UDP) ?
587 				      IP_VS_CONN_F_ONE_PACKET : 0;
588 		union nf_inet_addr daddr =  { .all = { 0, 0, 0, 0 } };
589 
590 		/* create a new connection entry */
591 		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
592 		{
593 			struct ip_vs_conn_param p;
594 			ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
595 					      &iph->saddr, pptr[0],
596 					      &iph->daddr, pptr[1], &p);
597 			cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
598 					    IP_VS_CONN_F_BYPASS | flags,
599 					    NULL, skb->mark);
600 			if (!cp)
601 				return NF_DROP;
602 		}
603 
604 		/* statistics */
605 		ip_vs_in_stats(cp, skb);
606 
607 		/* set state */
608 		ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
609 
610 		/* transmit the first SYN packet */
611 		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
612 		/* do not touch skb anymore */
613 
614 		atomic_inc(&cp->in_pkts);
615 		ip_vs_conn_put(cp);
616 		return ret;
617 	}
618 
619 	/*
620 	 * When the virtual ftp service is presented, packets destined
621 	 * for other services on the VIP may get here (except services
622 	 * listed in the ipvs table), pass the packets, because it is
623 	 * not ipvs job to decide to drop the packets.
624 	 */
625 	if (svc->port == FTPPORT && dport != FTPPORT)
626 		return NF_ACCEPT;
627 
628 	if (unlikely(ip_vs_iph_icmp(iph)))
629 		return NF_DROP;
630 
631 	/*
632 	 * Notify the client that the destination is unreachable, and
633 	 * release the socket buffer.
634 	 * Since it is in IP layer, the TCP socket is not actually
635 	 * created, the TCP RST packet cannot be sent, instead that
636 	 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
637 	 */
638 #ifdef CONFIG_IP_VS_IPV6
639 	if (svc->af == AF_INET6) {
640 		if (!skb->dev)
641 			skb->dev = net->loopback_dev;
642 		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
643 	} else
644 #endif
645 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
646 
647 	return NF_DROP;
648 }
649 
650 #ifdef CONFIG_SYSCTL
651 
652 static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
653 {
654 	return ipvs->sysctl_snat_reroute;
655 }
656 
657 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
658 {
659 	return ipvs->sysctl_nat_icmp_send;
660 }
661 
662 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
663 {
664 	return ipvs->sysctl_expire_nodest_conn;
665 }
666 
667 #else
668 
669 static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
670 static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
671 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
672 
673 #endif
674 
675 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
676 {
677 	return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
678 }
679 
680 static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
681 {
682 	if (NF_INET_LOCAL_IN == hooknum)
683 		return IP_DEFRAG_VS_IN;
684 	if (NF_INET_FORWARD == hooknum)
685 		return IP_DEFRAG_VS_FWD;
686 	return IP_DEFRAG_VS_OUT;
687 }
688 
689 static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
690 				     struct sk_buff *skb, u_int32_t user)
691 {
692 	int err;
693 
694 	local_bh_disable();
695 	err = ip_defrag(ipvs->net, skb, user);
696 	local_bh_enable();
697 	if (!err)
698 		ip_send_check(ip_hdr(skb));
699 
700 	return err;
701 }
702 
703 static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
704 				 struct sk_buff *skb, unsigned int hooknum)
705 {
706 	if (!sysctl_snat_reroute(ipvs))
707 		return 0;
708 	/* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
709 	if (NF_INET_LOCAL_IN == hooknum)
710 		return 0;
711 #ifdef CONFIG_IP_VS_IPV6
712 	if (af == AF_INET6) {
713 		struct dst_entry *dst = skb_dst(skb);
714 
715 		if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
716 		    ip6_route_me_harder(ipvs->net, skb) != 0)
717 			return 1;
718 	} else
719 #endif
720 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
721 		    ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
722 			return 1;
723 
724 	return 0;
725 }
726 
727 /*
728  * Packet has been made sufficiently writable in caller
729  * - inout: 1=in->out, 0=out->in
730  */
731 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
732 		    struct ip_vs_conn *cp, int inout)
733 {
734 	struct iphdr *iph	 = ip_hdr(skb);
735 	unsigned int icmp_offset = iph->ihl*4;
736 	struct icmphdr *icmph	 = (struct icmphdr *)(skb_network_header(skb) +
737 						      icmp_offset);
738 	struct iphdr *ciph	 = (struct iphdr *)(icmph + 1);
739 
740 	if (inout) {
741 		iph->saddr = cp->vaddr.ip;
742 		ip_send_check(iph);
743 		ciph->daddr = cp->vaddr.ip;
744 		ip_send_check(ciph);
745 	} else {
746 		iph->daddr = cp->daddr.ip;
747 		ip_send_check(iph);
748 		ciph->saddr = cp->daddr.ip;
749 		ip_send_check(ciph);
750 	}
751 
752 	/* the TCP/UDP/SCTP port */
753 	if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
754 	    IPPROTO_SCTP == ciph->protocol) {
755 		__be16 *ports = (void *)ciph + ciph->ihl*4;
756 
757 		if (inout)
758 			ports[1] = cp->vport;
759 		else
760 			ports[0] = cp->dport;
761 	}
762 
763 	/* And finally the ICMP checksum */
764 	icmph->checksum = 0;
765 	icmph->checksum = ip_vs_checksum_complete(skb, icmp_offset);
766 	skb->ip_summed = CHECKSUM_UNNECESSARY;
767 
768 	if (inout)
769 		IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
770 			"Forwarding altered outgoing ICMP");
771 	else
772 		IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph,
773 			"Forwarding altered incoming ICMP");
774 }
775 
776 #ifdef CONFIG_IP_VS_IPV6
777 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
778 		    struct ip_vs_conn *cp, int inout)
779 {
780 	struct ipv6hdr *iph	 = ipv6_hdr(skb);
781 	unsigned int icmp_offset = 0;
782 	unsigned int offs	 = 0; /* header offset*/
783 	int protocol;
784 	struct icmp6hdr *icmph;
785 	struct ipv6hdr *ciph;
786 	unsigned short fragoffs;
787 
788 	ipv6_find_hdr(skb, &icmp_offset, IPPROTO_ICMPV6, &fragoffs, NULL);
789 	icmph = (struct icmp6hdr *)(skb_network_header(skb) + icmp_offset);
790 	offs = icmp_offset + sizeof(struct icmp6hdr);
791 	ciph = (struct ipv6hdr *)(skb_network_header(skb) + offs);
792 
793 	protocol = ipv6_find_hdr(skb, &offs, -1, &fragoffs, NULL);
794 
795 	if (inout) {
796 		iph->saddr = cp->vaddr.in6;
797 		ciph->daddr = cp->vaddr.in6;
798 	} else {
799 		iph->daddr = cp->daddr.in6;
800 		ciph->saddr = cp->daddr.in6;
801 	}
802 
803 	/* the TCP/UDP/SCTP port */
804 	if (!fragoffs && (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
805 			  IPPROTO_SCTP == protocol)) {
806 		__be16 *ports = (void *)(skb_network_header(skb) + offs);
807 
808 		IP_VS_DBG(11, "%s() changed port %d to %d\n", __func__,
809 			      ntohs(inout ? ports[1] : ports[0]),
810 			      ntohs(inout ? cp->vport : cp->dport));
811 		if (inout)
812 			ports[1] = cp->vport;
813 		else
814 			ports[0] = cp->dport;
815 	}
816 
817 	/* And finally the ICMP checksum */
818 	icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
819 					      skb->len - icmp_offset,
820 					      IPPROTO_ICMPV6, 0);
821 	skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
822 	skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
823 	skb->ip_summed = CHECKSUM_PARTIAL;
824 
825 	if (inout)
826 		IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
827 			      (void *)ciph - (void *)iph,
828 			      "Forwarding altered outgoing ICMPv6");
829 	else
830 		IP_VS_DBG_PKT(11, AF_INET6, pp, skb,
831 			      (void *)ciph - (void *)iph,
832 			      "Forwarding altered incoming ICMPv6");
833 }
834 #endif
835 
836 /* Handle relevant response ICMP messages - forward to the right
837  * destination host.
838  */
839 static int handle_response_icmp(int af, struct sk_buff *skb,
840 				union nf_inet_addr *snet,
841 				__u8 protocol, struct ip_vs_conn *cp,
842 				struct ip_vs_protocol *pp,
843 				unsigned int offset, unsigned int ihl,
844 				unsigned int hooknum)
845 {
846 	unsigned int verdict = NF_DROP;
847 
848 	if (IP_VS_FWD_METHOD(cp) != 0) {
849 		pr_err("shouldn't reach here, because the box is on the "
850 		       "half connection in the tun/dr module.\n");
851 	}
852 
853 	/* Ensure the checksum is correct */
854 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
855 		/* Failed checksum! */
856 		IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
857 			      IP_VS_DBG_ADDR(af, snet));
858 		goto out;
859 	}
860 
861 	if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
862 	    IPPROTO_SCTP == protocol)
863 		offset += 2 * sizeof(__u16);
864 	if (!skb_make_writable(skb, offset))
865 		goto out;
866 
867 #ifdef CONFIG_IP_VS_IPV6
868 	if (af == AF_INET6)
869 		ip_vs_nat_icmp_v6(skb, pp, cp, 1);
870 	else
871 #endif
872 		ip_vs_nat_icmp(skb, pp, cp, 1);
873 
874 	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
875 		goto out;
876 
877 	/* do the statistics and put it back */
878 	ip_vs_out_stats(cp, skb);
879 
880 	skb->ipvs_property = 1;
881 	if (!(cp->flags & IP_VS_CONN_F_NFCT))
882 		ip_vs_notrack(skb);
883 	else
884 		ip_vs_update_conntrack(skb, cp, 0);
885 	verdict = NF_ACCEPT;
886 
887 out:
888 	__ip_vs_conn_put(cp);
889 
890 	return verdict;
891 }
892 
893 /*
894  *	Handle ICMP messages in the inside-to-outside direction (outgoing).
895  *	Find any that might be relevant, check against existing connections.
896  *	Currently handles error types - unreachable, quench, ttl exceeded.
897  */
898 static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
899 			  int *related, unsigned int hooknum)
900 {
901 	struct iphdr *iph;
902 	struct icmphdr	_icmph, *ic;
903 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
904 	struct ip_vs_iphdr ciph;
905 	struct ip_vs_conn *cp;
906 	struct ip_vs_protocol *pp;
907 	unsigned int offset, ihl;
908 	union nf_inet_addr snet;
909 
910 	*related = 1;
911 
912 	/* reassemble IP fragments */
913 	if (ip_is_fragment(ip_hdr(skb))) {
914 		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
915 			return NF_STOLEN;
916 	}
917 
918 	iph = ip_hdr(skb);
919 	offset = ihl = iph->ihl * 4;
920 	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
921 	if (ic == NULL)
922 		return NF_DROP;
923 
924 	IP_VS_DBG(12, "Outgoing ICMP (%d,%d) %pI4->%pI4\n",
925 		  ic->type, ntohs(icmp_id(ic)),
926 		  &iph->saddr, &iph->daddr);
927 
928 	/*
929 	 * Work through seeing if this is for us.
930 	 * These checks are supposed to be in an order that means easy
931 	 * things are checked first to speed up processing.... however
932 	 * this means that some packets will manage to get a long way
933 	 * down this stack and then be rejected, but that's life.
934 	 */
935 	if ((ic->type != ICMP_DEST_UNREACH) &&
936 	    (ic->type != ICMP_SOURCE_QUENCH) &&
937 	    (ic->type != ICMP_TIME_EXCEEDED)) {
938 		*related = 0;
939 		return NF_ACCEPT;
940 	}
941 
942 	/* Now find the contained IP header */
943 	offset += sizeof(_icmph);
944 	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
945 	if (cih == NULL)
946 		return NF_ACCEPT; /* The packet looks wrong, ignore */
947 
948 	pp = ip_vs_proto_get(cih->protocol);
949 	if (!pp)
950 		return NF_ACCEPT;
951 
952 	/* Is the embedded protocol header present? */
953 	if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
954 		     pp->dont_defrag))
955 		return NF_ACCEPT;
956 
957 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
958 		      "Checking outgoing ICMP for");
959 
960 	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
961 
962 	/* The embedded headers contain source and dest in reverse order */
963 	cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
964 	if (!cp)
965 		return NF_ACCEPT;
966 
967 	snet.ip = iph->saddr;
968 	return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
969 				    pp, ciph.len, ihl, hooknum);
970 }
971 
972 #ifdef CONFIG_IP_VS_IPV6
973 static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
974 			     int *related,  unsigned int hooknum,
975 			     struct ip_vs_iphdr *ipvsh)
976 {
977 	struct icmp6hdr	_icmph, *ic;
978 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
979 	struct ip_vs_conn *cp;
980 	struct ip_vs_protocol *pp;
981 	union nf_inet_addr snet;
982 	unsigned int offset;
983 
984 	*related = 1;
985 	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
986 	if (ic == NULL)
987 		return NF_DROP;
988 
989 	/*
990 	 * Work through seeing if this is for us.
991 	 * These checks are supposed to be in an order that means easy
992 	 * things are checked first to speed up processing.... however
993 	 * this means that some packets will manage to get a long way
994 	 * down this stack and then be rejected, but that's life.
995 	 */
996 	if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
997 		*related = 0;
998 		return NF_ACCEPT;
999 	}
1000 	/* Fragment header that is before ICMP header tells us that:
1001 	 * it's not an error message since they can't be fragmented.
1002 	 */
1003 	if (ipvsh->flags & IP6_FH_F_FRAG)
1004 		return NF_DROP;
1005 
1006 	IP_VS_DBG(8, "Outgoing ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1007 		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
1008 		  &ipvsh->saddr, &ipvsh->daddr);
1009 
1010 	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, ipvsh->len + sizeof(_icmph),
1011 				     true, &ciph))
1012 		return NF_ACCEPT; /* The packet looks wrong, ignore */
1013 
1014 	pp = ip_vs_proto_get(ciph.protocol);
1015 	if (!pp)
1016 		return NF_ACCEPT;
1017 
1018 	/* The embedded headers contain source and dest in reverse order */
1019 	cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
1020 	if (!cp)
1021 		return NF_ACCEPT;
1022 
1023 	snet.in6 = ciph.saddr.in6;
1024 	offset = ciph.len;
1025 	return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1026 				    pp, offset, sizeof(struct ipv6hdr),
1027 				    hooknum);
1028 }
1029 #endif
1030 
1031 /*
1032  * Check if sctp chunc is ABORT chunk
1033  */
1034 static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
1035 {
1036 	sctp_chunkhdr_t *sch, schunk;
1037 	sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
1038 			sizeof(schunk), &schunk);
1039 	if (sch == NULL)
1040 		return 0;
1041 	if (sch->type == SCTP_CID_ABORT)
1042 		return 1;
1043 	return 0;
1044 }
1045 
1046 static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
1047 {
1048 	struct tcphdr _tcph, *th;
1049 
1050 	th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
1051 	if (th == NULL)
1052 		return 0;
1053 	return th->rst;
1054 }
1055 
1056 static inline bool is_new_conn(const struct sk_buff *skb,
1057 			       struct ip_vs_iphdr *iph)
1058 {
1059 	switch (iph->protocol) {
1060 	case IPPROTO_TCP: {
1061 		struct tcphdr _tcph, *th;
1062 
1063 		th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
1064 		if (th == NULL)
1065 			return false;
1066 		return th->syn;
1067 	}
1068 	case IPPROTO_SCTP: {
1069 		sctp_chunkhdr_t *sch, schunk;
1070 
1071 		sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t),
1072 					 sizeof(schunk), &schunk);
1073 		if (sch == NULL)
1074 			return false;
1075 		return sch->type == SCTP_CID_INIT;
1076 	}
1077 	default:
1078 		return false;
1079 	}
1080 }
1081 
1082 static inline bool is_new_conn_expected(const struct ip_vs_conn *cp,
1083 					int conn_reuse_mode)
1084 {
1085 	/* Controlled (FTP DATA or persistence)? */
1086 	if (cp->control)
1087 		return false;
1088 
1089 	switch (cp->protocol) {
1090 	case IPPROTO_TCP:
1091 		return (cp->state == IP_VS_TCP_S_TIME_WAIT) ||
1092 			((conn_reuse_mode & 2) &&
1093 			 (cp->state == IP_VS_TCP_S_FIN_WAIT) &&
1094 			 (cp->flags & IP_VS_CONN_F_NOOUTPUT));
1095 	case IPPROTO_SCTP:
1096 		return cp->state == IP_VS_SCTP_S_CLOSED;
1097 	default:
1098 		return false;
1099 	}
1100 }
1101 
1102 /* Handle response packets: rewrite addresses and send away...
1103  */
1104 static unsigned int
1105 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1106 		struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1107 		unsigned int hooknum)
1108 {
1109 	struct ip_vs_protocol *pp = pd->pp;
1110 
1111 	IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
1112 
1113 	if (!skb_make_writable(skb, iph->len))
1114 		goto drop;
1115 
1116 	/* mangle the packet */
1117 	if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph))
1118 		goto drop;
1119 
1120 #ifdef CONFIG_IP_VS_IPV6
1121 	if (af == AF_INET6)
1122 		ipv6_hdr(skb)->saddr = cp->vaddr.in6;
1123 	else
1124 #endif
1125 	{
1126 		ip_hdr(skb)->saddr = cp->vaddr.ip;
1127 		ip_send_check(ip_hdr(skb));
1128 	}
1129 
1130 	/*
1131 	 * nf_iterate does not expect change in the skb->dst->dev.
1132 	 * It looks like it is not fatal to enable this code for hooks
1133 	 * where our handlers are at the end of the chain list and
1134 	 * when all next handlers use skb->dst->dev and not outdev.
1135 	 * It will definitely route properly the inout NAT traffic
1136 	 * when multiple paths are used.
1137 	 */
1138 
1139 	/* For policy routing, packets originating from this
1140 	 * machine itself may be routed differently to packets
1141 	 * passing through.  We want this packet to be routed as
1142 	 * if it came from this machine itself.  So re-compute
1143 	 * the routing information.
1144 	 */
1145 	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
1146 		goto drop;
1147 
1148 	IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
1149 
1150 	ip_vs_out_stats(cp, skb);
1151 	ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
1152 	skb->ipvs_property = 1;
1153 	if (!(cp->flags & IP_VS_CONN_F_NFCT))
1154 		ip_vs_notrack(skb);
1155 	else
1156 		ip_vs_update_conntrack(skb, cp, 0);
1157 	ip_vs_conn_put(cp);
1158 
1159 	LeaveFunction(11);
1160 	return NF_ACCEPT;
1161 
1162 drop:
1163 	ip_vs_conn_put(cp);
1164 	kfree_skb(skb);
1165 	LeaveFunction(11);
1166 	return NF_STOLEN;
1167 }
1168 
1169 /*
1170  *	Check if outgoing packet belongs to the established ip_vs_conn.
1171  */
1172 static unsigned int
1173 ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1174 {
1175 	struct ip_vs_iphdr iph;
1176 	struct ip_vs_protocol *pp;
1177 	struct ip_vs_proto_data *pd;
1178 	struct ip_vs_conn *cp;
1179 	struct sock *sk;
1180 
1181 	EnterFunction(11);
1182 
1183 	/* Already marked as IPVS request or reply? */
1184 	if (skb->ipvs_property)
1185 		return NF_ACCEPT;
1186 
1187 	sk = skb_to_full_sk(skb);
1188 	/* Bad... Do not break raw sockets */
1189 	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1190 		     af == AF_INET)) {
1191 
1192 		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1193 			return NF_ACCEPT;
1194 	}
1195 
1196 	if (unlikely(!skb_dst(skb)))
1197 		return NF_ACCEPT;
1198 
1199 	if (!ipvs->enable)
1200 		return NF_ACCEPT;
1201 
1202 	ip_vs_fill_iph_skb(af, skb, false, &iph);
1203 #ifdef CONFIG_IP_VS_IPV6
1204 	if (af == AF_INET6) {
1205 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1206 			int related;
1207 			int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
1208 							hooknum, &iph);
1209 
1210 			if (related)
1211 				return verdict;
1212 		}
1213 	} else
1214 #endif
1215 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1216 			int related;
1217 			int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
1218 
1219 			if (related)
1220 				return verdict;
1221 		}
1222 
1223 	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1224 	if (unlikely(!pd))
1225 		return NF_ACCEPT;
1226 	pp = pd->pp;
1227 
1228 	/* reassemble IP fragments */
1229 #ifdef CONFIG_IP_VS_IPV6
1230 	if (af == AF_INET)
1231 #endif
1232 		if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1233 			if (ip_vs_gather_frags(ipvs, skb,
1234 					       ip_vs_defrag_user(hooknum)))
1235 				return NF_STOLEN;
1236 
1237 			ip_vs_fill_iph_skb(AF_INET, skb, false, &iph);
1238 		}
1239 
1240 	/*
1241 	 * Check if the packet belongs to an existing entry
1242 	 */
1243 	cp = pp->conn_out_get(ipvs, af, skb, &iph);
1244 
1245 	if (likely(cp))
1246 		return handle_response(af, skb, pd, cp, &iph, hooknum);
1247 	if (sysctl_nat_icmp_send(ipvs) &&
1248 	    (pp->protocol == IPPROTO_TCP ||
1249 	     pp->protocol == IPPROTO_UDP ||
1250 	     pp->protocol == IPPROTO_SCTP)) {
1251 		__be16 _ports[2], *pptr;
1252 
1253 		pptr = frag_safe_skb_hp(skb, iph.len,
1254 					 sizeof(_ports), _ports, &iph);
1255 		if (pptr == NULL)
1256 			return NF_ACCEPT;	/* Not for me */
1257 		if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
1258 					   pptr[0])) {
1259 			/*
1260 			 * Notify the real server: there is no
1261 			 * existing entry if it is not RST
1262 			 * packet or not TCP packet.
1263 			 */
1264 			if ((iph.protocol != IPPROTO_TCP &&
1265 			     iph.protocol != IPPROTO_SCTP)
1266 			     || ((iph.protocol == IPPROTO_TCP
1267 				  && !is_tcp_reset(skb, iph.len))
1268 				 || (iph.protocol == IPPROTO_SCTP
1269 					&& !is_sctp_abort(skb,
1270 						iph.len)))) {
1271 #ifdef CONFIG_IP_VS_IPV6
1272 				if (af == AF_INET6) {
1273 					if (!skb->dev)
1274 						skb->dev = ipvs->net->loopback_dev;
1275 					icmpv6_send(skb,
1276 						    ICMPV6_DEST_UNREACH,
1277 						    ICMPV6_PORT_UNREACH,
1278 						    0);
1279 				} else
1280 #endif
1281 					icmp_send(skb,
1282 						  ICMP_DEST_UNREACH,
1283 						  ICMP_PORT_UNREACH, 0);
1284 				return NF_DROP;
1285 			}
1286 		}
1287 	}
1288 	IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1289 		      "ip_vs_out: packet continues traversal as normal");
1290 	return NF_ACCEPT;
1291 }
1292 
1293 /*
1294  *	It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1295  *	used only for VS/NAT.
1296  *	Check if packet is reply for established ip_vs_conn.
1297  */
1298 static unsigned int
1299 ip_vs_reply4(void *priv, struct sk_buff *skb,
1300 	     const struct nf_hook_state *state)
1301 {
1302 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1303 }
1304 
1305 /*
1306  *	It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1307  *	Check if packet is reply for established ip_vs_conn.
1308  */
1309 static unsigned int
1310 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
1311 		   const struct nf_hook_state *state)
1312 {
1313 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
1314 }
1315 
1316 #ifdef CONFIG_IP_VS_IPV6
1317 
1318 /*
1319  *	It is hooked at the NF_INET_FORWARD and NF_INET_LOCAL_IN chain,
1320  *	used only for VS/NAT.
1321  *	Check if packet is reply for established ip_vs_conn.
1322  */
1323 static unsigned int
1324 ip_vs_reply6(void *priv, struct sk_buff *skb,
1325 	     const struct nf_hook_state *state)
1326 {
1327 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1328 }
1329 
1330 /*
1331  *	It is hooked at the NF_INET_LOCAL_OUT chain, used only for VS/NAT.
1332  *	Check if packet is reply for established ip_vs_conn.
1333  */
1334 static unsigned int
1335 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
1336 		   const struct nf_hook_state *state)
1337 {
1338 	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
1339 }
1340 
1341 #endif
1342 
1343 static unsigned int
1344 ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
1345 		      struct ip_vs_proto_data *pd,
1346 		      int *verdict, struct ip_vs_conn **cpp,
1347 		      struct ip_vs_iphdr *iph)
1348 {
1349 	struct ip_vs_protocol *pp = pd->pp;
1350 
1351 	if (!iph->fragoffs) {
1352 		/* No (second) fragments need to enter here, as nf_defrag_ipv6
1353 		 * replayed fragment zero will already have created the cp
1354 		 */
1355 
1356 		/* Schedule and create new connection entry into cpp */
1357 		if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
1358 			return 0;
1359 	}
1360 
1361 	if (unlikely(!*cpp)) {
1362 		/* sorry, all this trouble for a no-hit :) */
1363 		IP_VS_DBG_PKT(12, af, pp, skb, iph->off,
1364 			      "ip_vs_in: packet continues traversal as normal");
1365 		if (iph->fragoffs) {
1366 			/* Fragment that couldn't be mapped to a conn entry
1367 			 * is missing module nf_defrag_ipv6
1368 			 */
1369 			IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
1370 			IP_VS_DBG_PKT(7, af, pp, skb, iph->off,
1371 				      "unhandled fragment");
1372 		}
1373 		*verdict = NF_ACCEPT;
1374 		return 0;
1375 	}
1376 
1377 	return 1;
1378 }
1379 
1380 /*
1381  *	Handle ICMP messages in the outside-to-inside direction (incoming).
1382  *	Find any that might be relevant, check against existing connections,
1383  *	forward to the right destination host if relevant.
1384  *	Currently handles error types - unreachable, quench, ttl exceeded.
1385  */
1386 static int
1387 ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
1388 	      unsigned int hooknum)
1389 {
1390 	struct iphdr *iph;
1391 	struct icmphdr	_icmph, *ic;
1392 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
1393 	struct ip_vs_iphdr ciph;
1394 	struct ip_vs_conn *cp;
1395 	struct ip_vs_protocol *pp;
1396 	struct ip_vs_proto_data *pd;
1397 	unsigned int offset, offset2, ihl, verdict;
1398 	bool ipip, new_cp = false;
1399 
1400 	*related = 1;
1401 
1402 	/* reassemble IP fragments */
1403 	if (ip_is_fragment(ip_hdr(skb))) {
1404 		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
1405 			return NF_STOLEN;
1406 	}
1407 
1408 	iph = ip_hdr(skb);
1409 	offset = ihl = iph->ihl * 4;
1410 	ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1411 	if (ic == NULL)
1412 		return NF_DROP;
1413 
1414 	IP_VS_DBG(12, "Incoming ICMP (%d,%d) %pI4->%pI4\n",
1415 		  ic->type, ntohs(icmp_id(ic)),
1416 		  &iph->saddr, &iph->daddr);
1417 
1418 	/*
1419 	 * Work through seeing if this is for us.
1420 	 * These checks are supposed to be in an order that means easy
1421 	 * things are checked first to speed up processing.... however
1422 	 * this means that some packets will manage to get a long way
1423 	 * down this stack and then be rejected, but that's life.
1424 	 */
1425 	if ((ic->type != ICMP_DEST_UNREACH) &&
1426 	    (ic->type != ICMP_SOURCE_QUENCH) &&
1427 	    (ic->type != ICMP_TIME_EXCEEDED)) {
1428 		*related = 0;
1429 		return NF_ACCEPT;
1430 	}
1431 
1432 	/* Now find the contained IP header */
1433 	offset += sizeof(_icmph);
1434 	cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1435 	if (cih == NULL)
1436 		return NF_ACCEPT; /* The packet looks wrong, ignore */
1437 
1438 	/* Special case for errors for IPIP packets */
1439 	ipip = false;
1440 	if (cih->protocol == IPPROTO_IPIP) {
1441 		if (unlikely(cih->frag_off & htons(IP_OFFSET)))
1442 			return NF_ACCEPT;
1443 		/* Error for our IPIP must arrive at LOCAL_IN */
1444 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
1445 			return NF_ACCEPT;
1446 		offset += cih->ihl * 4;
1447 		cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1448 		if (cih == NULL)
1449 			return NF_ACCEPT; /* The packet looks wrong, ignore */
1450 		ipip = true;
1451 	}
1452 
1453 	pd = ip_vs_proto_data_get(ipvs, cih->protocol);
1454 	if (!pd)
1455 		return NF_ACCEPT;
1456 	pp = pd->pp;
1457 
1458 	/* Is the embedded protocol header present? */
1459 	if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
1460 		     pp->dont_defrag))
1461 		return NF_ACCEPT;
1462 
1463 	IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset,
1464 		      "Checking incoming ICMP for");
1465 
1466 	offset2 = offset;
1467 	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
1468 	offset = ciph.len;
1469 
1470 	/* The embedded headers contain source and dest in reverse order.
1471 	 * For IPIP this is error for request, not for reply.
1472 	 */
1473 	cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
1474 
1475 	if (!cp) {
1476 		int v;
1477 
1478 		if (!sysctl_schedule_icmp(ipvs))
1479 			return NF_ACCEPT;
1480 
1481 		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
1482 			return v;
1483 		new_cp = true;
1484 	}
1485 
1486 	verdict = NF_DROP;
1487 
1488 	/* Ensure the checksum is correct */
1489 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
1490 		/* Failed checksum! */
1491 		IP_VS_DBG(1, "Incoming ICMP: failed checksum from %pI4!\n",
1492 			  &iph->saddr);
1493 		goto out;
1494 	}
1495 
1496 	if (ipip) {
1497 		__be32 info = ic->un.gateway;
1498 		__u8 type = ic->type;
1499 		__u8 code = ic->code;
1500 
1501 		/* Update the MTU */
1502 		if (ic->type == ICMP_DEST_UNREACH &&
1503 		    ic->code == ICMP_FRAG_NEEDED) {
1504 			struct ip_vs_dest *dest = cp->dest;
1505 			u32 mtu = ntohs(ic->un.frag.mtu);
1506 			__be16 frag_off = cih->frag_off;
1507 
1508 			/* Strip outer IP and ICMP, go to IPIP header */
1509 			if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
1510 				goto ignore_ipip;
1511 			offset2 -= ihl + sizeof(_icmph);
1512 			skb_reset_network_header(skb);
1513 			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
1514 				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
1515 			ipv4_update_pmtu(skb, ipvs->net,
1516 					 mtu, 0, 0, 0, 0);
1517 			/* Client uses PMTUD? */
1518 			if (!(frag_off & htons(IP_DF)))
1519 				goto ignore_ipip;
1520 			/* Prefer the resulting PMTU */
1521 			if (dest) {
1522 				struct ip_vs_dest_dst *dest_dst;
1523 
1524 				rcu_read_lock();
1525 				dest_dst = rcu_dereference(dest->dest_dst);
1526 				if (dest_dst)
1527 					mtu = dst_mtu(dest_dst->dst_cache);
1528 				rcu_read_unlock();
1529 			}
1530 			if (mtu > 68 + sizeof(struct iphdr))
1531 				mtu -= sizeof(struct iphdr);
1532 			info = htonl(mtu);
1533 		}
1534 		/* Strip outer IP, ICMP and IPIP, go to IP header of
1535 		 * original request.
1536 		 */
1537 		if (pskb_pull(skb, offset2) == NULL)
1538 			goto ignore_ipip;
1539 		skb_reset_network_header(skb);
1540 		IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
1541 			&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1542 			type, code, ntohl(info));
1543 		icmp_send(skb, type, code, info);
1544 		/* ICMP can be shorter but anyways, account it */
1545 		ip_vs_out_stats(cp, skb);
1546 
1547 ignore_ipip:
1548 		consume_skb(skb);
1549 		verdict = NF_STOLEN;
1550 		goto out;
1551 	}
1552 
1553 	/* do the statistics and put it back */
1554 	ip_vs_in_stats(cp, skb);
1555 	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1556 	    IPPROTO_SCTP == cih->protocol)
1557 		offset += 2 * sizeof(__u16);
1558 	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1559 
1560 out:
1561 	if (likely(!new_cp))
1562 		__ip_vs_conn_put(cp);
1563 	else
1564 		ip_vs_conn_put(cp);
1565 
1566 	return verdict;
1567 }
1568 
1569 #ifdef CONFIG_IP_VS_IPV6
1570 static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
1571 			    int *related, unsigned int hooknum,
1572 			    struct ip_vs_iphdr *iph)
1573 {
1574 	struct icmp6hdr	_icmph, *ic;
1575 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
1576 	struct ip_vs_conn *cp;
1577 	struct ip_vs_protocol *pp;
1578 	struct ip_vs_proto_data *pd;
1579 	unsigned int offset, verdict;
1580 	bool new_cp = false;
1581 
1582 	*related = 1;
1583 
1584 	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
1585 	if (ic == NULL)
1586 		return NF_DROP;
1587 
1588 	/*
1589 	 * Work through seeing if this is for us.
1590 	 * These checks are supposed to be in an order that means easy
1591 	 * things are checked first to speed up processing.... however
1592 	 * this means that some packets will manage to get a long way
1593 	 * down this stack and then be rejected, but that's life.
1594 	 */
1595 	if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) {
1596 		*related = 0;
1597 		return NF_ACCEPT;
1598 	}
1599 	/* Fragment header that is before ICMP header tells us that:
1600 	 * it's not an error message since they can't be fragmented.
1601 	 */
1602 	if (iph->flags & IP6_FH_F_FRAG)
1603 		return NF_DROP;
1604 
1605 	IP_VS_DBG(8, "Incoming ICMPv6 (%d,%d) %pI6c->%pI6c\n",
1606 		  ic->icmp6_type, ntohs(icmpv6_id(ic)),
1607 		  &iph->saddr, &iph->daddr);
1608 
1609 	offset = iph->len + sizeof(_icmph);
1610 	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
1611 		return NF_ACCEPT;
1612 
1613 	pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
1614 	if (!pd)
1615 		return NF_ACCEPT;
1616 	pp = pd->pp;
1617 
1618 	/* Cannot handle fragmented embedded protocol */
1619 	if (ciph.fragoffs)
1620 		return NF_ACCEPT;
1621 
1622 	IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset,
1623 		      "Checking incoming ICMPv6 for");
1624 
1625 	/* The embedded headers contain source and dest in reverse order
1626 	 * if not from localhost
1627 	 */
1628 	cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
1629 
1630 	if (!cp) {
1631 		int v;
1632 
1633 		if (!sysctl_schedule_icmp(ipvs))
1634 			return NF_ACCEPT;
1635 
1636 		if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
1637 			return v;
1638 
1639 		new_cp = true;
1640 	}
1641 
1642 	/* VS/TUN, VS/DR and LOCALNODE just let it go */
1643 	if ((hooknum == NF_INET_LOCAL_OUT) &&
1644 	    (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)) {
1645 		verdict = NF_ACCEPT;
1646 		goto out;
1647 	}
1648 
1649 	/* do the statistics and put it back */
1650 	ip_vs_in_stats(cp, skb);
1651 
1652 	/* Need to mangle contained IPv6 header in ICMPv6 packet */
1653 	offset = ciph.len;
1654 	if (IPPROTO_TCP == ciph.protocol || IPPROTO_UDP == ciph.protocol ||
1655 	    IPPROTO_SCTP == ciph.protocol)
1656 		offset += 2 * sizeof(__u16); /* Also mangle ports */
1657 
1658 	verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph);
1659 
1660 out:
1661 	if (likely(!new_cp))
1662 		__ip_vs_conn_put(cp);
1663 	else
1664 		ip_vs_conn_put(cp);
1665 
1666 	return verdict;
1667 }
1668 #endif
1669 
1670 
1671 /*
1672  *	Check if it's for virtual services, look it up,
1673  *	and send it on its way...
1674  */
1675 static unsigned int
1676 ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
1677 {
1678 	struct ip_vs_iphdr iph;
1679 	struct ip_vs_protocol *pp;
1680 	struct ip_vs_proto_data *pd;
1681 	struct ip_vs_conn *cp;
1682 	int ret, pkts;
1683 	int conn_reuse_mode;
1684 	struct sock *sk;
1685 
1686 	/* Already marked as IPVS request or reply? */
1687 	if (skb->ipvs_property)
1688 		return NF_ACCEPT;
1689 
1690 	/*
1691 	 *	Big tappo:
1692 	 *	- remote client: only PACKET_HOST
1693 	 *	- route: used for struct net when skb->dev is unset
1694 	 */
1695 	if (unlikely((skb->pkt_type != PACKET_HOST &&
1696 		      hooknum != NF_INET_LOCAL_OUT) ||
1697 		     !skb_dst(skb))) {
1698 		ip_vs_fill_iph_skb(af, skb, false, &iph);
1699 		IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s"
1700 			      " ignored in hook %u\n",
1701 			      skb->pkt_type, iph.protocol,
1702 			      IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
1703 		return NF_ACCEPT;
1704 	}
1705 	/* ipvs enabled in this netns ? */
1706 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1707 		return NF_ACCEPT;
1708 
1709 	ip_vs_fill_iph_skb(af, skb, false, &iph);
1710 
1711 	/* Bad... Do not break raw sockets */
1712 	sk = skb_to_full_sk(skb);
1713 	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1714 		     af == AF_INET)) {
1715 
1716 		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1717 			return NF_ACCEPT;
1718 	}
1719 
1720 #ifdef CONFIG_IP_VS_IPV6
1721 	if (af == AF_INET6) {
1722 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
1723 			int related;
1724 			int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
1725 						       hooknum, &iph);
1726 
1727 			if (related)
1728 				return verdict;
1729 		}
1730 	} else
1731 #endif
1732 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
1733 			int related;
1734 			int verdict = ip_vs_in_icmp(ipvs, skb, &related,
1735 						    hooknum);
1736 
1737 			if (related)
1738 				return verdict;
1739 		}
1740 
1741 	/* Protocol supported? */
1742 	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
1743 	if (unlikely(!pd)) {
1744 		/* The only way we'll see this packet again is if it's
1745 		 * encapsulated, so mark it with ipvs_property=1 so we
1746 		 * skip it if we're ignoring tunneled packets
1747 		 */
1748 		if (sysctl_ignore_tunneled(ipvs))
1749 			skb->ipvs_property = 1;
1750 
1751 		return NF_ACCEPT;
1752 	}
1753 	pp = pd->pp;
1754 	/*
1755 	 * Check if the packet belongs to an existing connection entry
1756 	 */
1757 	cp = pp->conn_in_get(ipvs, af, skb, &iph);
1758 
1759 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
1760 	if (conn_reuse_mode && !iph.fragoffs &&
1761 	    is_new_conn(skb, &iph) && cp &&
1762 	    ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
1763 	      unlikely(!atomic_read(&cp->dest->weight))) ||
1764 	     unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
1765 		if (!atomic_read(&cp->n_control))
1766 			ip_vs_conn_expire_now(cp);
1767 		__ip_vs_conn_put(cp);
1768 		cp = NULL;
1769 	}
1770 
1771 	if (unlikely(!cp)) {
1772 		int v;
1773 
1774 		if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
1775 			return v;
1776 	}
1777 
1778 	IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet");
1779 
1780 	/* Check the server status */
1781 	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1782 		/* the destination server is not available */
1783 
1784 		if (sysctl_expire_nodest_conn(ipvs)) {
1785 			/* try to expire the connection immediately */
1786 			ip_vs_conn_expire_now(cp);
1787 		}
1788 		/* don't restart its timer, and silently
1789 		   drop the packet. */
1790 		__ip_vs_conn_put(cp);
1791 		return NF_DROP;
1792 	}
1793 
1794 	ip_vs_in_stats(cp, skb);
1795 	ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
1796 	if (cp->packet_xmit)
1797 		ret = cp->packet_xmit(skb, cp, pp, &iph);
1798 		/* do not touch skb anymore */
1799 	else {
1800 		IP_VS_DBG_RL("warning: packet_xmit is null");
1801 		ret = NF_ACCEPT;
1802 	}
1803 
1804 	/* Increase its packet counter and check if it is needed
1805 	 * to be synchronized
1806 	 *
1807 	 * Sync connection if it is about to close to
1808 	 * encorage the standby servers to update the connections timeout
1809 	 *
1810 	 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
1811 	 */
1812 
1813 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
1814 		pkts = sysctl_sync_threshold(ipvs);
1815 	else
1816 		pkts = atomic_add_return(1, &cp->in_pkts);
1817 
1818 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
1819 		ip_vs_sync_conn(ipvs, cp, pkts);
1820 
1821 	ip_vs_conn_put(cp);
1822 	return ret;
1823 }
1824 
1825 /*
1826  *	AF_INET handler in NF_INET_LOCAL_IN chain
1827  *	Schedule and forward packets from remote clients
1828  */
1829 static unsigned int
1830 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
1831 		      const struct nf_hook_state *state)
1832 {
1833 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
1834 }
1835 
1836 /*
1837  *	AF_INET handler in NF_INET_LOCAL_OUT chain
1838  *	Schedule and forward packets from local clients
1839  */
1840 static unsigned int
1841 ip_vs_local_request4(void *priv, struct sk_buff *skb,
1842 		     const struct nf_hook_state *state)
1843 {
1844 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
1845 }
1846 
1847 #ifdef CONFIG_IP_VS_IPV6
1848 
1849 /*
1850  *	AF_INET6 handler in NF_INET_LOCAL_IN chain
1851  *	Schedule and forward packets from remote clients
1852  */
1853 static unsigned int
1854 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
1855 		      const struct nf_hook_state *state)
1856 {
1857 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
1858 }
1859 
1860 /*
1861  *	AF_INET6 handler in NF_INET_LOCAL_OUT chain
1862  *	Schedule and forward packets from local clients
1863  */
1864 static unsigned int
1865 ip_vs_local_request6(void *priv, struct sk_buff *skb,
1866 		     const struct nf_hook_state *state)
1867 {
1868 	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
1869 }
1870 
1871 #endif
1872 
1873 
1874 /*
1875  *	It is hooked at the NF_INET_FORWARD chain, in order to catch ICMP
1876  *      related packets destined for 0.0.0.0/0.
1877  *      When fwmark-based virtual service is used, such as transparent
1878  *      cache cluster, TCP packets can be marked and routed to ip_vs_in,
1879  *      but ICMP destined for 0.0.0.0/0 cannot not be easily marked and
1880  *      sent to ip_vs_in_icmp. So, catch them at the NF_INET_FORWARD chain
1881  *      and send them to ip_vs_in_icmp.
1882  */
1883 static unsigned int
1884 ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
1885 		   const struct nf_hook_state *state)
1886 {
1887 	int r;
1888 	struct netns_ipvs *ipvs = net_ipvs(state->net);
1889 
1890 	if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
1891 		return NF_ACCEPT;
1892 
1893 	/* ipvs enabled in this netns ? */
1894 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1895 		return NF_ACCEPT;
1896 
1897 	return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
1898 }
1899 
1900 #ifdef CONFIG_IP_VS_IPV6
1901 static unsigned int
1902 ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
1903 		      const struct nf_hook_state *state)
1904 {
1905 	int r;
1906 	struct netns_ipvs *ipvs = net_ipvs(state->net);
1907 	struct ip_vs_iphdr iphdr;
1908 
1909 	ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
1910 	if (iphdr.protocol != IPPROTO_ICMPV6)
1911 		return NF_ACCEPT;
1912 
1913 	/* ipvs enabled in this netns ? */
1914 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
1915 		return NF_ACCEPT;
1916 
1917 	return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
1918 }
1919 #endif
1920 
1921 
1922 static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1923 	/* After packet filtering, change source only for VS/NAT */
1924 	{
1925 		.hook		= ip_vs_reply4,
1926 		.pf		= NFPROTO_IPV4,
1927 		.hooknum	= NF_INET_LOCAL_IN,
1928 		.priority	= NF_IP_PRI_NAT_SRC - 2,
1929 	},
1930 	/* After packet filtering, forward packet through VS/DR, VS/TUN,
1931 	 * or VS/NAT(change destination), so that filtering rules can be
1932 	 * applied to IPVS. */
1933 	{
1934 		.hook		= ip_vs_remote_request4,
1935 		.pf		= NFPROTO_IPV4,
1936 		.hooknum	= NF_INET_LOCAL_IN,
1937 		.priority	= NF_IP_PRI_NAT_SRC - 1,
1938 	},
1939 	/* Before ip_vs_in, change source only for VS/NAT */
1940 	{
1941 		.hook		= ip_vs_local_reply4,
1942 		.pf		= NFPROTO_IPV4,
1943 		.hooknum	= NF_INET_LOCAL_OUT,
1944 		.priority	= NF_IP_PRI_NAT_DST + 1,
1945 	},
1946 	/* After mangle, schedule and forward local requests */
1947 	{
1948 		.hook		= ip_vs_local_request4,
1949 		.pf		= NFPROTO_IPV4,
1950 		.hooknum	= NF_INET_LOCAL_OUT,
1951 		.priority	= NF_IP_PRI_NAT_DST + 2,
1952 	},
1953 	/* After packet filtering (but before ip_vs_out_icmp), catch icmp
1954 	 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1955 	{
1956 		.hook		= ip_vs_forward_icmp,
1957 		.pf		= NFPROTO_IPV4,
1958 		.hooknum	= NF_INET_FORWARD,
1959 		.priority	= 99,
1960 	},
1961 	/* After packet filtering, change source only for VS/NAT */
1962 	{
1963 		.hook		= ip_vs_reply4,
1964 		.pf		= NFPROTO_IPV4,
1965 		.hooknum	= NF_INET_FORWARD,
1966 		.priority	= 100,
1967 	},
1968 #ifdef CONFIG_IP_VS_IPV6
1969 	/* After packet filtering, change source only for VS/NAT */
1970 	{
1971 		.hook		= ip_vs_reply6,
1972 		.pf		= NFPROTO_IPV6,
1973 		.hooknum	= NF_INET_LOCAL_IN,
1974 		.priority	= NF_IP6_PRI_NAT_SRC - 2,
1975 	},
1976 	/* After packet filtering, forward packet through VS/DR, VS/TUN,
1977 	 * or VS/NAT(change destination), so that filtering rules can be
1978 	 * applied to IPVS. */
1979 	{
1980 		.hook		= ip_vs_remote_request6,
1981 		.pf		= NFPROTO_IPV6,
1982 		.hooknum	= NF_INET_LOCAL_IN,
1983 		.priority	= NF_IP6_PRI_NAT_SRC - 1,
1984 	},
1985 	/* Before ip_vs_in, change source only for VS/NAT */
1986 	{
1987 		.hook		= ip_vs_local_reply6,
1988 		.pf		= NFPROTO_IPV6,
1989 		.hooknum	= NF_INET_LOCAL_OUT,
1990 		.priority	= NF_IP6_PRI_NAT_DST + 1,
1991 	},
1992 	/* After mangle, schedule and forward local requests */
1993 	{
1994 		.hook		= ip_vs_local_request6,
1995 		.pf		= NFPROTO_IPV6,
1996 		.hooknum	= NF_INET_LOCAL_OUT,
1997 		.priority	= NF_IP6_PRI_NAT_DST + 2,
1998 	},
1999 	/* After packet filtering (but before ip_vs_out_icmp), catch icmp
2000 	 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
2001 	{
2002 		.hook		= ip_vs_forward_icmp_v6,
2003 		.pf		= NFPROTO_IPV6,
2004 		.hooknum	= NF_INET_FORWARD,
2005 		.priority	= 99,
2006 	},
2007 	/* After packet filtering, change source only for VS/NAT */
2008 	{
2009 		.hook		= ip_vs_reply6,
2010 		.pf		= NFPROTO_IPV6,
2011 		.hooknum	= NF_INET_FORWARD,
2012 		.priority	= 100,
2013 	},
2014 #endif
2015 };
2016 /*
2017  *	Initialize IP Virtual Server netns mem.
2018  */
2019 static int __net_init __ip_vs_init(struct net *net)
2020 {
2021 	struct netns_ipvs *ipvs;
2022 
2023 	ipvs = net_generic(net, ip_vs_net_id);
2024 	if (ipvs == NULL)
2025 		return -ENOMEM;
2026 
2027 	/* Hold the beast until a service is registerd */
2028 	ipvs->enable = 0;
2029 	ipvs->net = net;
2030 	/* Counters used for creating unique names */
2031 	ipvs->gen = atomic_read(&ipvs_netns_cnt);
2032 	atomic_inc(&ipvs_netns_cnt);
2033 	net->ipvs = ipvs;
2034 
2035 	if (ip_vs_estimator_net_init(ipvs) < 0)
2036 		goto estimator_fail;
2037 
2038 	if (ip_vs_control_net_init(ipvs) < 0)
2039 		goto control_fail;
2040 
2041 	if (ip_vs_protocol_net_init(ipvs) < 0)
2042 		goto protocol_fail;
2043 
2044 	if (ip_vs_app_net_init(ipvs) < 0)
2045 		goto app_fail;
2046 
2047 	if (ip_vs_conn_net_init(ipvs) < 0)
2048 		goto conn_fail;
2049 
2050 	if (ip_vs_sync_net_init(ipvs) < 0)
2051 		goto sync_fail;
2052 
2053 	printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
2054 			 sizeof(struct netns_ipvs), ipvs->gen);
2055 	return 0;
2056 /*
2057  * Error handling
2058  */
2059 
2060 sync_fail:
2061 	ip_vs_conn_net_cleanup(ipvs);
2062 conn_fail:
2063 	ip_vs_app_net_cleanup(ipvs);
2064 app_fail:
2065 	ip_vs_protocol_net_cleanup(ipvs);
2066 protocol_fail:
2067 	ip_vs_control_net_cleanup(ipvs);
2068 control_fail:
2069 	ip_vs_estimator_net_cleanup(ipvs);
2070 estimator_fail:
2071 	net->ipvs = NULL;
2072 	return -ENOMEM;
2073 }
2074 
2075 static void __net_exit __ip_vs_cleanup(struct net *net)
2076 {
2077 	struct netns_ipvs *ipvs = net_ipvs(net);
2078 
2079 	ip_vs_service_net_cleanup(ipvs);	/* ip_vs_flush() with locks */
2080 	ip_vs_conn_net_cleanup(ipvs);
2081 	ip_vs_app_net_cleanup(ipvs);
2082 	ip_vs_protocol_net_cleanup(ipvs);
2083 	ip_vs_control_net_cleanup(ipvs);
2084 	ip_vs_estimator_net_cleanup(ipvs);
2085 	IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
2086 	net->ipvs = NULL;
2087 }
2088 
2089 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
2090 {
2091 	struct netns_ipvs *ipvs = net_ipvs(net);
2092 	EnterFunction(2);
2093 	ipvs->enable = 0;	/* Disable packet reception */
2094 	smp_wmb();
2095 	ip_vs_sync_net_cleanup(ipvs);
2096 	LeaveFunction(2);
2097 }
2098 
2099 static struct pernet_operations ipvs_core_ops = {
2100 	.init = __ip_vs_init,
2101 	.exit = __ip_vs_cleanup,
2102 	.id   = &ip_vs_net_id,
2103 	.size = sizeof(struct netns_ipvs),
2104 };
2105 
2106 static struct pernet_operations ipvs_core_dev_ops = {
2107 	.exit = __ip_vs_dev_cleanup,
2108 };
2109 
2110 /*
2111  *	Initialize IP Virtual Server
2112  */
2113 static int __init ip_vs_init(void)
2114 {
2115 	int ret;
2116 
2117 	ret = ip_vs_control_init();
2118 	if (ret < 0) {
2119 		pr_err("can't setup control.\n");
2120 		goto exit;
2121 	}
2122 
2123 	ip_vs_protocol_init();
2124 
2125 	ret = ip_vs_conn_init();
2126 	if (ret < 0) {
2127 		pr_err("can't setup connection table.\n");
2128 		goto cleanup_protocol;
2129 	}
2130 
2131 	ret = register_pernet_subsys(&ipvs_core_ops);	/* Alloc ip_vs struct */
2132 	if (ret < 0)
2133 		goto cleanup_conn;
2134 
2135 	ret = register_pernet_device(&ipvs_core_dev_ops);
2136 	if (ret < 0)
2137 		goto cleanup_sub;
2138 
2139 	ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2140 	if (ret < 0) {
2141 		pr_err("can't register hooks.\n");
2142 		goto cleanup_dev;
2143 	}
2144 
2145 	ret = ip_vs_register_nl_ioctl();
2146 	if (ret < 0) {
2147 		pr_err("can't register netlink/ioctl.\n");
2148 		goto cleanup_hooks;
2149 	}
2150 
2151 	pr_info("ipvs loaded.\n");
2152 
2153 	return ret;
2154 
2155 cleanup_hooks:
2156 	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2157 cleanup_dev:
2158 	unregister_pernet_device(&ipvs_core_dev_ops);
2159 cleanup_sub:
2160 	unregister_pernet_subsys(&ipvs_core_ops);
2161 cleanup_conn:
2162 	ip_vs_conn_cleanup();
2163 cleanup_protocol:
2164 	ip_vs_protocol_cleanup();
2165 	ip_vs_control_cleanup();
2166 exit:
2167 	return ret;
2168 }
2169 
2170 static void __exit ip_vs_cleanup(void)
2171 {
2172 	ip_vs_unregister_nl_ioctl();
2173 	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2174 	unregister_pernet_device(&ipvs_core_dev_ops);
2175 	unregister_pernet_subsys(&ipvs_core_ops);	/* free ip_vs struct */
2176 	ip_vs_conn_cleanup();
2177 	ip_vs_protocol_cleanup();
2178 	ip_vs_control_cleanup();
2179 	pr_info("ipvs unloaded.\n");
2180 }
2181 
2182 module_init(ip_vs_init);
2183 module_exit(ip_vs_cleanup);
2184 MODULE_LICENSE("GPL");
2185