xref: /openbmc/linux/include/net/ip_vs.h (revision ca460cc2)
1 /* IP Virtual Server
2  * data structure and functionality definitions
3  */
4 
5 #ifndef _NET_IP_VS_H
6 #define _NET_IP_VS_H
7 
8 #include <linux/ip_vs.h>                /* definitions shared with userland */
9 
10 #include <asm/types.h>                  /* for __uXX types */
11 
12 #include <linux/list.h>                 /* for struct list_head */
13 #include <linux/spinlock.h>             /* for struct rwlock_t */
14 #include <linux/atomic.h>               /* for struct atomic_t */
15 #include <linux/compiler.h>
16 #include <linux/timer.h>
17 #include <linux/bug.h>
18 
19 #include <net/checksum.h>
20 #include <linux/netfilter.h>		/* for union nf_inet_addr */
21 #include <linux/ip.h>
22 #include <linux/ipv6.h>			/* for struct ipv6hdr */
23 #include <net/ipv6.h>
24 #if IS_ENABLED(CONFIG_IP_VS_IPV6)
25 #include <linux/netfilter_ipv6/ip6_tables.h>
26 #endif
27 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
28 #include <net/netfilter/nf_conntrack.h>
29 #endif
30 #include <net/net_namespace.h>		/* Netw namespace */
31 
32 /* Generic access of ipvs struct */
33 static inline struct netns_ipvs *net_ipvs(struct net* net)
34 {
35 	return net->ipvs;
36 }
37 
38 /* Get net ptr from skb in traffic cases
39  * use skb_sknet when call is from userland (ioctl or netlink)
40  */
41 static inline struct net *skb_net(const struct sk_buff *skb)
42 {
43 #ifdef CONFIG_NET_NS
44 #ifdef CONFIG_IP_VS_DEBUG
45 	/*
46 	 * This is used for debug only.
47 	 * Start with the most likely hit
48 	 * End with BUG
49 	 */
50 	if (likely(skb->dev && skb->dev->nd_net))
51 		return dev_net(skb->dev);
52 	if (skb_dst(skb) && skb_dst(skb)->dev)
53 		return dev_net(skb_dst(skb)->dev);
54 	WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
55 		      __func__, __LINE__);
56 	if (likely(skb->sk && skb->sk->sk_net))
57 		return sock_net(skb->sk);
58 	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
59 		__func__, __LINE__);
60 	BUG();
61 #else
62 	return dev_net(skb->dev ? : skb_dst(skb)->dev);
63 #endif
64 #else
65 	return &init_net;
66 #endif
67 }
68 
69 static inline struct net *skb_sknet(const struct sk_buff *skb)
70 {
71 #ifdef CONFIG_NET_NS
72 #ifdef CONFIG_IP_VS_DEBUG
73 	/* Start with the most likely hit */
74 	if (likely(skb->sk && skb->sk->sk_net))
75 		return sock_net(skb->sk);
76 	WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
77 		       __func__, __LINE__);
78 	if (likely(skb->dev && skb->dev->nd_net))
79 		return dev_net(skb->dev);
80 	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
81 		__func__, __LINE__);
82 	BUG();
83 #else
84 	return sock_net(skb->sk);
85 #endif
86 #else
87 	return &init_net;
88 #endif
89 }
90 
91 /* This one needed for single_open_net since net is stored directly in
92  * private not as a struct i.e. seq_file_net can't be used.
93  */
94 static inline struct net *seq_file_single_net(struct seq_file *seq)
95 {
96 #ifdef CONFIG_NET_NS
97 	return (struct net *)seq->private;
98 #else
99 	return &init_net;
100 #endif
101 }
102 
103 /* Connections' size value needed by ip_vs_ctl.c */
104 extern int ip_vs_conn_tab_size;
105 
106 struct ip_vs_iphdr {
107 	__u32 len;	/* IPv4 simply where L4 starts
108 			 * IPv6 where L4 Transport Header starts */
109 	__u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
110 	__s16 protocol;
111 	__s32 flags;
112 	union nf_inet_addr saddr;
113 	union nf_inet_addr daddr;
114 };
115 
116 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
117 				      int len, void *buffer,
118 				      const struct ip_vs_iphdr *ipvsh)
119 {
120 	return skb_header_pointer(skb, offset, len, buffer);
121 }
122 
123 static inline void
124 ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
125 {
126 	const struct iphdr *iph = nh;
127 
128 	iphdr->len	= iph->ihl * 4;
129 	iphdr->fragoffs	= 0;
130 	iphdr->protocol	= iph->protocol;
131 	iphdr->saddr.ip	= iph->saddr;
132 	iphdr->daddr.ip	= iph->daddr;
133 }
134 
135 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
136  * IPv6 requires some extra work, as finding proper header position,
137  * depend on the IPv6 extension headers.
138  */
139 static inline void
140 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
141 {
142 #ifdef CONFIG_IP_VS_IPV6
143 	if (af == AF_INET6) {
144 		const struct ipv6hdr *iph =
145 			(struct ipv6hdr *)skb_network_header(skb);
146 		iphdr->saddr.in6 = iph->saddr;
147 		iphdr->daddr.in6 = iph->daddr;
148 		/* ipv6_find_hdr() updates len, flags */
149 		iphdr->len	 = 0;
150 		iphdr->flags	 = 0;
151 		iphdr->protocol  = ipv6_find_hdr(skb, &iphdr->len, -1,
152 						 &iphdr->fragoffs,
153 						 &iphdr->flags);
154 	} else
155 #endif
156 	{
157 		const struct iphdr *iph =
158 			(struct iphdr *)skb_network_header(skb);
159 		iphdr->len	= iph->ihl * 4;
160 		iphdr->fragoffs	= 0;
161 		iphdr->protocol	= iph->protocol;
162 		iphdr->saddr.ip	= iph->saddr;
163 		iphdr->daddr.ip	= iph->daddr;
164 	}
165 }
166 
167 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
168 				   const union nf_inet_addr *src)
169 {
170 #ifdef CONFIG_IP_VS_IPV6
171 	if (af == AF_INET6)
172 		dst->in6 = src->in6;
173 	else
174 #endif
175 	dst->ip = src->ip;
176 }
177 
178 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst,
179 				  const union nf_inet_addr *src)
180 {
181 #ifdef CONFIG_IP_VS_IPV6
182 	if (af == AF_INET6) {
183 		dst->in6 = src->in6;
184 		return;
185 	}
186 #endif
187 	dst->ip = src->ip;
188 	dst->all[1] = 0;
189 	dst->all[2] = 0;
190 	dst->all[3] = 0;
191 }
192 
193 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
194 				   const union nf_inet_addr *b)
195 {
196 #ifdef CONFIG_IP_VS_IPV6
197 	if (af == AF_INET6)
198 		return ipv6_addr_equal(&a->in6, &b->in6);
199 #endif
200 	return a->ip == b->ip;
201 }
202 
203 #ifdef CONFIG_IP_VS_DEBUG
204 #include <linux/net.h>
205 
206 int ip_vs_get_debug_level(void);
207 
208 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
209 					 const union nf_inet_addr *addr,
210 					 int *idx)
211 {
212 	int len;
213 #ifdef CONFIG_IP_VS_IPV6
214 	if (af == AF_INET6)
215 		len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]",
216 			       &addr->in6) + 1;
217 	else
218 #endif
219 		len = snprintf(&buf[*idx], buf_len - *idx, "%pI4",
220 			       &addr->ip) + 1;
221 
222 	*idx += len;
223 	BUG_ON(*idx > buf_len + 1);
224 	return &buf[*idx - len];
225 }
226 
227 #define IP_VS_DBG_BUF(level, msg, ...)					\
228 	do {								\
229 		char ip_vs_dbg_buf[160];				\
230 		int ip_vs_dbg_idx = 0;					\
231 		if (level <= ip_vs_get_debug_level())			\
232 			printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__);	\
233 	} while (0)
234 #define IP_VS_ERR_BUF(msg...)						\
235 	do {								\
236 		char ip_vs_dbg_buf[160];				\
237 		int ip_vs_dbg_idx = 0;					\
238 		pr_err(msg);						\
239 	} while (0)
240 
241 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
242 #define IP_VS_DBG_ADDR(af, addr)					\
243 	ip_vs_dbg_addr(af, ip_vs_dbg_buf,				\
244 		       sizeof(ip_vs_dbg_buf), addr,			\
245 		       &ip_vs_dbg_idx)
246 
247 #define IP_VS_DBG(level, msg, ...)					\
248 	do {								\
249 		if (level <= ip_vs_get_debug_level())			\
250 			printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__);	\
251 	} while (0)
252 #define IP_VS_DBG_RL(msg, ...)						\
253 	do {								\
254 		if (net_ratelimit())					\
255 			printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__);	\
256 	} while (0)
257 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg)			\
258 	do {								\
259 		if (level <= ip_vs_get_debug_level())			\
260 			pp->debug_packet(af, pp, skb, ofs, msg);	\
261 	} while (0)
262 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg)			\
263 	do {								\
264 		if (level <= ip_vs_get_debug_level() &&			\
265 		    net_ratelimit())					\
266 			pp->debug_packet(af, pp, skb, ofs, msg);	\
267 	} while (0)
268 #else	/* NO DEBUGGING at ALL */
269 #define IP_VS_DBG_BUF(level, msg...)  do {} while (0)
270 #define IP_VS_ERR_BUF(msg...)  do {} while (0)
271 #define IP_VS_DBG(level, msg...)  do {} while (0)
272 #define IP_VS_DBG_RL(msg...)  do {} while (0)
273 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg)	do {} while (0)
274 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg)	do {} while (0)
275 #endif
276 
277 #define IP_VS_BUG() BUG()
278 #define IP_VS_ERR_RL(msg, ...)						\
279 	do {								\
280 		if (net_ratelimit())					\
281 			pr_err(msg, ##__VA_ARGS__);			\
282 	} while (0)
283 
284 #ifdef CONFIG_IP_VS_DEBUG
285 #define EnterFunction(level)						\
286 	do {								\
287 		if (level <= ip_vs_get_debug_level())			\
288 			printk(KERN_DEBUG				\
289 			       pr_fmt("Enter: %s, %s line %i\n"),	\
290 			       __func__, __FILE__, __LINE__);		\
291 	} while (0)
292 #define LeaveFunction(level)						\
293 	do {								\
294 		if (level <= ip_vs_get_debug_level())			\
295 			printk(KERN_DEBUG				\
296 			       pr_fmt("Leave: %s, %s line %i\n"),	\
297 			       __func__, __FILE__, __LINE__);		\
298 	} while (0)
299 #else
300 #define EnterFunction(level)   do {} while (0)
301 #define LeaveFunction(level)   do {} while (0)
302 #endif
303 
304 /* The port number of FTP service (in network order). */
305 #define FTPPORT  cpu_to_be16(21)
306 #define FTPDATA  cpu_to_be16(20)
307 
308 /* TCP State Values */
309 enum {
310 	IP_VS_TCP_S_NONE = 0,
311 	IP_VS_TCP_S_ESTABLISHED,
312 	IP_VS_TCP_S_SYN_SENT,
313 	IP_VS_TCP_S_SYN_RECV,
314 	IP_VS_TCP_S_FIN_WAIT,
315 	IP_VS_TCP_S_TIME_WAIT,
316 	IP_VS_TCP_S_CLOSE,
317 	IP_VS_TCP_S_CLOSE_WAIT,
318 	IP_VS_TCP_S_LAST_ACK,
319 	IP_VS_TCP_S_LISTEN,
320 	IP_VS_TCP_S_SYNACK,
321 	IP_VS_TCP_S_LAST
322 };
323 
324 /* UDP State Values */
325 enum {
326 	IP_VS_UDP_S_NORMAL,
327 	IP_VS_UDP_S_LAST,
328 };
329 
330 /* ICMP State Values */
331 enum {
332 	IP_VS_ICMP_S_NORMAL,
333 	IP_VS_ICMP_S_LAST,
334 };
335 
336 /* SCTP State Values */
337 enum ip_vs_sctp_states {
338 	IP_VS_SCTP_S_NONE,
339 	IP_VS_SCTP_S_INIT1,
340 	IP_VS_SCTP_S_INIT,
341 	IP_VS_SCTP_S_COOKIE_SENT,
342 	IP_VS_SCTP_S_COOKIE_REPLIED,
343 	IP_VS_SCTP_S_COOKIE_WAIT,
344 	IP_VS_SCTP_S_COOKIE,
345 	IP_VS_SCTP_S_COOKIE_ECHOED,
346 	IP_VS_SCTP_S_ESTABLISHED,
347 	IP_VS_SCTP_S_SHUTDOWN_SENT,
348 	IP_VS_SCTP_S_SHUTDOWN_RECEIVED,
349 	IP_VS_SCTP_S_SHUTDOWN_ACK_SENT,
350 	IP_VS_SCTP_S_REJECTED,
351 	IP_VS_SCTP_S_CLOSED,
352 	IP_VS_SCTP_S_LAST
353 };
354 
355 /* Delta sequence info structure
356  * Each ip_vs_conn has 2 (output AND input seq. changes).
357  * Only used in the VS/NAT.
358  */
359 struct ip_vs_seq {
360 	__u32			init_seq;	/* Add delta from this seq */
361 	__u32			delta;		/* Delta in sequence numbers */
362 	__u32			previous_delta;	/* Delta in sequence numbers
363 						 * before last resized pkt */
364 };
365 
366 /* counters per cpu */
367 struct ip_vs_counters {
368 	__u32		conns;		/* connections scheduled */
369 	__u32		inpkts;		/* incoming packets */
370 	__u32		outpkts;	/* outgoing packets */
371 	__u64		inbytes;	/* incoming bytes */
372 	__u64		outbytes;	/* outgoing bytes */
373 };
374 /* Stats per cpu */
375 struct ip_vs_cpu_stats {
376 	struct ip_vs_counters   ustats;
377 	struct u64_stats_sync   syncp;
378 };
379 
380 /* IPVS statistics objects */
381 struct ip_vs_estimator {
382 	struct list_head	list;
383 
384 	u64			last_inbytes;
385 	u64			last_outbytes;
386 	u32			last_conns;
387 	u32			last_inpkts;
388 	u32			last_outpkts;
389 
390 	u32			cps;
391 	u32			inpps;
392 	u32			outpps;
393 	u32			inbps;
394 	u32			outbps;
395 };
396 
397 struct ip_vs_stats {
398 	struct ip_vs_stats_user	ustats;		/* statistics */
399 	struct ip_vs_estimator	est;		/* estimator */
400 	struct ip_vs_cpu_stats __percpu	*cpustats;	/* per cpu counters */
401 	spinlock_t		lock;		/* spin lock */
402 	struct ip_vs_stats_user	ustats0;	/* reset values */
403 };
404 
405 struct dst_entry;
406 struct iphdr;
407 struct ip_vs_conn;
408 struct ip_vs_app;
409 struct sk_buff;
410 struct ip_vs_proto_data;
411 
412 struct ip_vs_protocol {
413 	struct ip_vs_protocol	*next;
414 	char			*name;
415 	u16			protocol;
416 	u16			num_states;
417 	int			dont_defrag;
418 
419 	void (*init)(struct ip_vs_protocol *pp);
420 
421 	void (*exit)(struct ip_vs_protocol *pp);
422 
423 	int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
424 
425 	void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
426 
427 	int (*conn_schedule)(int af, struct sk_buff *skb,
428 			     struct ip_vs_proto_data *pd,
429 			     int *verdict, struct ip_vs_conn **cpp,
430 			     struct ip_vs_iphdr *iph);
431 
432 	struct ip_vs_conn *
433 	(*conn_in_get)(int af,
434 		       const struct sk_buff *skb,
435 		       const struct ip_vs_iphdr *iph,
436 		       int inverse);
437 
438 	struct ip_vs_conn *
439 	(*conn_out_get)(int af,
440 			const struct sk_buff *skb,
441 			const struct ip_vs_iphdr *iph,
442 			int inverse);
443 
444 	int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
445 			    struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
446 
447 	int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
448 			    struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
449 
450 	int (*csum_check)(int af, struct sk_buff *skb,
451 			  struct ip_vs_protocol *pp);
452 
453 	const char *(*state_name)(int state);
454 
455 	void (*state_transition)(struct ip_vs_conn *cp, int direction,
456 				 const struct sk_buff *skb,
457 				 struct ip_vs_proto_data *pd);
458 
459 	int (*register_app)(struct net *net, struct ip_vs_app *inc);
460 
461 	void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
462 
463 	int (*app_conn_bind)(struct ip_vs_conn *cp);
464 
465 	void (*debug_packet)(int af, struct ip_vs_protocol *pp,
466 			     const struct sk_buff *skb,
467 			     int offset,
468 			     const char *msg);
469 
470 	void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
471 };
472 
473 /* protocol data per netns */
474 struct ip_vs_proto_data {
475 	struct ip_vs_proto_data	*next;
476 	struct ip_vs_protocol	*pp;
477 	int			*timeout_table;	/* protocol timeout table */
478 	atomic_t		appcnt;		/* counter of proto app incs. */
479 	struct tcp_states_t	*tcp_state_table;
480 };
481 
482 struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
483 struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
484 					      unsigned short proto);
485 
486 struct ip_vs_conn_param {
487 	struct net			*net;
488 	const union nf_inet_addr	*caddr;
489 	const union nf_inet_addr	*vaddr;
490 	__be16				cport;
491 	__be16				vport;
492 	__u16				protocol;
493 	u16				af;
494 
495 	const struct ip_vs_pe		*pe;
496 	char				*pe_data;
497 	__u8				pe_data_len;
498 };
499 
500 /* IP_VS structure allocated for each dynamically scheduled connection */
501 struct ip_vs_conn {
502 	struct hlist_node	c_list;         /* hashed list heads */
503 	/* Protocol, addresses and port numbers */
504 	__be16                  cport;
505 	__be16                  dport;
506 	__be16                  vport;
507 	u16			af;		/* address family */
508 	union nf_inet_addr      caddr;          /* client address */
509 	union nf_inet_addr      vaddr;          /* virtual address */
510 	union nf_inet_addr      daddr;          /* destination address */
511 	volatile __u32          flags;          /* status flags */
512 	__u16                   protocol;       /* Which protocol (TCP/UDP) */
513 	__u16			daf;		/* Address family of the dest */
514 #ifdef CONFIG_NET_NS
515 	struct net              *net;           /* Name space */
516 #endif
517 
518 	/* counter and timer */
519 	atomic_t		refcnt;		/* reference count */
520 	struct timer_list	timer;		/* Expiration timer */
521 	volatile unsigned long	timeout;	/* timeout */
522 
523 	/* Flags and state transition */
524 	spinlock_t              lock;           /* lock for state transition */
525 	volatile __u16          state;          /* state info */
526 	volatile __u16          old_state;      /* old state, to be used for
527 						 * state transition triggerd
528 						 * synchronization
529 						 */
530 	__u32			fwmark;		/* Fire wall mark from skb */
531 	unsigned long		sync_endtime;	/* jiffies + sent_retries */
532 
533 	/* Control members */
534 	struct ip_vs_conn       *control;       /* Master control connection */
535 	atomic_t                n_control;      /* Number of controlled ones */
536 	struct ip_vs_dest       *dest;          /* real server */
537 	atomic_t                in_pkts;        /* incoming packet counter */
538 
539 	/* Packet transmitter for different forwarding methods.  If it
540 	 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
541 	 * otherwise this must be changed to a sk_buff **.
542 	 * NF_ACCEPT can be returned when destination is local.
543 	 */
544 	int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp,
545 			   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
546 
547 	/* Note: we can group the following members into a structure,
548 	 * in order to save more space, and the following members are
549 	 * only used in VS/NAT anyway
550 	 */
551 	struct ip_vs_app        *app;           /* bound ip_vs_app object */
552 	void                    *app_data;      /* Application private data */
553 	struct ip_vs_seq        in_seq;         /* incoming seq. struct */
554 	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
555 
556 	const struct ip_vs_pe	*pe;
557 	char			*pe_data;
558 	__u8			pe_data_len;
559 
560 	struct rcu_head		rcu_head;
561 };
562 
563 /* To save some memory in conn table when name space is disabled. */
564 static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
565 {
566 #ifdef CONFIG_NET_NS
567 	return cp->net;
568 #else
569 	return &init_net;
570 #endif
571 }
572 
573 static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
574 {
575 #ifdef CONFIG_NET_NS
576 	cp->net = net;
577 #endif
578 }
579 
580 static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
581 				    struct net *net)
582 {
583 #ifdef CONFIG_NET_NS
584 	return cp->net == net;
585 #else
586 	return 1;
587 #endif
588 }
589 
590 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
591  * for IPv6 support.
592  *
593  * We need these to conveniently pass around service and destination
594  * options, but unfortunately, we also need to keep the old definitions to
595  * maintain userspace backwards compatibility for the setsockopt interface.
596  */
597 struct ip_vs_service_user_kern {
598 	/* virtual service addresses */
599 	u16			af;
600 	u16			protocol;
601 	union nf_inet_addr	addr;		/* virtual ip address */
602 	__be16			port;
603 	u32			fwmark;		/* firwall mark of service */
604 
605 	/* virtual service options */
606 	char			*sched_name;
607 	char			*pe_name;
608 	unsigned int		flags;		/* virtual service flags */
609 	unsigned int		timeout;	/* persistent timeout in sec */
610 	__be32			netmask;	/* persistent netmask or plen */
611 };
612 
613 
614 struct ip_vs_dest_user_kern {
615 	/* destination server address */
616 	union nf_inet_addr	addr;
617 	__be16			port;
618 
619 	/* real server options */
620 	unsigned int		conn_flags;	/* connection flags */
621 	int			weight;		/* destination weight */
622 
623 	/* thresholds for active connections */
624 	u32			u_threshold;	/* upper threshold */
625 	u32			l_threshold;	/* lower threshold */
626 
627 	/* Address family of addr */
628 	u16			af;
629 };
630 
631 
632 /*
633  * The information about the virtual service offered to the net and the
634  * forwarding entries.
635  */
636 struct ip_vs_service {
637 	struct hlist_node	s_list;   /* for normal service table */
638 	struct hlist_node	f_list;   /* for fwmark-based service table */
639 	atomic_t		refcnt;   /* reference counter */
640 
641 	u16			af;       /* address family */
642 	__u16			protocol; /* which protocol (TCP/UDP) */
643 	union nf_inet_addr	addr;	  /* IP address for virtual service */
644 	__be16			port;	  /* port number for the service */
645 	__u32                   fwmark;   /* firewall mark of the service */
646 	unsigned int		flags;	  /* service status flags */
647 	unsigned int		timeout;  /* persistent timeout in ticks */
648 	__be32			netmask;  /* grouping granularity, mask/plen */
649 	struct net		*net;
650 
651 	struct list_head	destinations;  /* real server d-linked list */
652 	__u32			num_dests;     /* number of servers */
653 	struct ip_vs_stats      stats;         /* statistics for the service */
654 
655 	/* for scheduling */
656 	struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */
657 	spinlock_t		sched_lock;    /* lock sched_data */
658 	void			*sched_data;   /* scheduler application data */
659 
660 	/* alternate persistence engine */
661 	struct ip_vs_pe __rcu	*pe;
662 
663 	struct rcu_head		rcu_head;
664 };
665 
666 /* Information for cached dst */
667 struct ip_vs_dest_dst {
668 	struct dst_entry	*dst_cache;	/* destination cache entry */
669 	u32			dst_cookie;
670 	union nf_inet_addr	dst_saddr;
671 	struct rcu_head		rcu_head;
672 };
673 
674 /* The real server destination forwarding entry with ip address, port number,
675  * and so on.
676  */
677 struct ip_vs_dest {
678 	struct list_head	n_list;   /* for the dests in the service */
679 	struct hlist_node	d_list;   /* for table with all the dests */
680 
681 	u16			af;		/* address family */
682 	__be16			port;		/* port number of the server */
683 	union nf_inet_addr	addr;		/* IP address of the server */
684 	volatile unsigned int	flags;		/* dest status flags */
685 	atomic_t		conn_flags;	/* flags to copy to conn */
686 	atomic_t		weight;		/* server weight */
687 
688 	atomic_t		refcnt;		/* reference counter */
689 	struct ip_vs_stats      stats;          /* statistics */
690 	unsigned long		idle_start;	/* start time, jiffies */
691 
692 	/* connection counters and thresholds */
693 	atomic_t		activeconns;	/* active connections */
694 	atomic_t		inactconns;	/* inactive connections */
695 	atomic_t		persistconns;	/* persistent connections */
696 	__u32			u_threshold;	/* upper threshold */
697 	__u32			l_threshold;	/* lower threshold */
698 
699 	/* for destination cache */
700 	spinlock_t		dst_lock;	/* lock of dst_cache */
701 	struct ip_vs_dest_dst __rcu *dest_dst;	/* cached dst info */
702 
703 	/* for virtual service */
704 	struct ip_vs_service __rcu *svc;	/* service it belongs to */
705 	__u16			protocol;	/* which protocol (TCP/UDP) */
706 	__be16			vport;		/* virtual port number */
707 	union nf_inet_addr	vaddr;		/* virtual IP address */
708 	__u32			vfwmark;	/* firewall mark of service */
709 
710 	struct list_head	t_list;		/* in dest_trash */
711 	unsigned int		in_rs_table:1;	/* we are in rs_table */
712 };
713 
714 /* The scheduler object */
715 struct ip_vs_scheduler {
716 	struct list_head	n_list;		/* d-linked list head */
717 	char			*name;		/* scheduler name */
718 	atomic_t		refcnt;		/* reference counter */
719 	struct module		*module;	/* THIS_MODULE/NULL */
720 
721 	/* scheduler initializing service */
722 	int (*init_service)(struct ip_vs_service *svc);
723 	/* scheduling service finish */
724 	void (*done_service)(struct ip_vs_service *svc);
725 	/* dest is linked */
726 	int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
727 	/* dest is unlinked */
728 	int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
729 	/* dest is updated */
730 	int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
731 
732 	/* selecting a server from the given service */
733 	struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
734 				       const struct sk_buff *skb,
735 				       struct ip_vs_iphdr *iph);
736 };
737 
738 /* The persistence engine object */
739 struct ip_vs_pe {
740 	struct list_head	n_list;		/* d-linked list head */
741 	char			*name;		/* scheduler name */
742 	atomic_t		refcnt;		/* reference counter */
743 	struct module		*module;	/* THIS_MODULE/NULL */
744 
745 	/* get the connection template, if any */
746 	int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb);
747 	bool (*ct_match)(const struct ip_vs_conn_param *p,
748 			 struct ip_vs_conn *ct);
749 	u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
750 			   bool inverse);
751 	int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
752 };
753 
754 /* The application module object (a.k.a. app incarnation) */
755 struct ip_vs_app {
756 	struct list_head	a_list;		/* member in app list */
757 	int			type;		/* IP_VS_APP_TYPE_xxx */
758 	char			*name;		/* application module name */
759 	__u16			protocol;
760 	struct module		*module;	/* THIS_MODULE/NULL */
761 	struct list_head	incs_list;	/* list of incarnations */
762 
763 	/* members for application incarnations */
764 	struct list_head	p_list;		/* member in proto app list */
765 	struct ip_vs_app	*app;		/* its real application */
766 	__be16			port;		/* port number in net order */
767 	atomic_t		usecnt;		/* usage counter */
768 	struct rcu_head		rcu_head;
769 
770 	/* output hook: Process packet in inout direction, diff set for TCP.
771 	 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
772 	 *	   2=Mangled but checksum was not updated
773 	 */
774 	int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *,
775 		       struct sk_buff *, int *diff);
776 
777 	/* input hook: Process packet in outin direction, diff set for TCP.
778 	 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok,
779 	 *	   2=Mangled but checksum was not updated
780 	 */
781 	int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *,
782 		      struct sk_buff *, int *diff);
783 
784 	/* ip_vs_app initializer */
785 	int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *);
786 
787 	/* ip_vs_app finish */
788 	int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *);
789 
790 
791 	/* not used now */
792 	int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *,
793 			 struct ip_vs_protocol *);
794 
795 	void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *);
796 
797 	int *			timeout_table;
798 	int *			timeouts;
799 	int			timeouts_size;
800 
801 	int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app,
802 			     int *verdict, struct ip_vs_conn **cpp);
803 
804 	struct ip_vs_conn *
805 	(*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app,
806 		       const struct iphdr *iph, int inverse);
807 
808 	struct ip_vs_conn *
809 	(*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app,
810 			const struct iphdr *iph, int inverse);
811 
812 	int (*state_transition)(struct ip_vs_conn *cp, int direction,
813 				const struct sk_buff *skb,
814 				struct ip_vs_app *app);
815 
816 	void (*timeout_change)(struct ip_vs_app *app, int flags);
817 };
818 
819 struct ipvs_master_sync_state {
820 	struct list_head	sync_queue;
821 	struct ip_vs_sync_buff	*sync_buff;
822 	unsigned long		sync_queue_len;
823 	unsigned int		sync_queue_delay;
824 	struct task_struct	*master_thread;
825 	struct delayed_work	master_wakeup_work;
826 	struct netns_ipvs	*ipvs;
827 };
828 
829 /* How much time to keep dests in trash */
830 #define IP_VS_DEST_TRASH_PERIOD		(120 * HZ)
831 
832 /* IPVS in network namespace */
833 struct netns_ipvs {
834 	int			gen;		/* Generation */
835 	int			enable;		/* enable like nf_hooks do */
836 	/* Hash table: for real service lookups */
837 	#define IP_VS_RTAB_BITS 4
838 	#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
839 	#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
840 
841 	struct hlist_head	rs_table[IP_VS_RTAB_SIZE];
842 	/* ip_vs_app */
843 	struct list_head	app_list;
844 	/* ip_vs_proto */
845 	#define IP_VS_PROTO_TAB_SIZE	32	/* must be power of 2 */
846 	struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
847 	/* ip_vs_proto_tcp */
848 #ifdef CONFIG_IP_VS_PROTO_TCP
849 	#define	TCP_APP_TAB_BITS	4
850 	#define	TCP_APP_TAB_SIZE	(1 << TCP_APP_TAB_BITS)
851 	#define	TCP_APP_TAB_MASK	(TCP_APP_TAB_SIZE - 1)
852 	struct list_head	tcp_apps[TCP_APP_TAB_SIZE];
853 #endif
854 	/* ip_vs_proto_udp */
855 #ifdef CONFIG_IP_VS_PROTO_UDP
856 	#define	UDP_APP_TAB_BITS	4
857 	#define	UDP_APP_TAB_SIZE	(1 << UDP_APP_TAB_BITS)
858 	#define	UDP_APP_TAB_MASK	(UDP_APP_TAB_SIZE - 1)
859 	struct list_head	udp_apps[UDP_APP_TAB_SIZE];
860 #endif
861 	/* ip_vs_proto_sctp */
862 #ifdef CONFIG_IP_VS_PROTO_SCTP
863 	#define SCTP_APP_TAB_BITS	4
864 	#define SCTP_APP_TAB_SIZE	(1 << SCTP_APP_TAB_BITS)
865 	#define SCTP_APP_TAB_MASK	(SCTP_APP_TAB_SIZE - 1)
866 	/* Hash table for SCTP application incarnations	 */
867 	struct list_head	sctp_apps[SCTP_APP_TAB_SIZE];
868 #endif
869 	/* ip_vs_conn */
870 	atomic_t		conn_count;      /* connection counter */
871 
872 	/* ip_vs_ctl */
873 	struct ip_vs_stats		tot_stats;  /* Statistics & est. */
874 
875 	int			num_services;    /* no of virtual services */
876 
877 	/* Trash for destinations */
878 	struct list_head	dest_trash;
879 	spinlock_t		dest_trash_lock;
880 	struct timer_list	dest_trash_timer; /* expiration timer */
881 	/* Service counters */
882 	atomic_t		ftpsvc_counter;
883 	atomic_t		nullsvc_counter;
884 
885 #ifdef CONFIG_SYSCTL
886 	/* 1/rate drop and drop-entry variables */
887 	struct delayed_work	defense_work;   /* Work handler */
888 	int			drop_rate;
889 	int			drop_counter;
890 	atomic_t		dropentry;
891 	/* locks in ctl.c */
892 	spinlock_t		dropentry_lock;  /* drop entry handling */
893 	spinlock_t		droppacket_lock; /* drop packet handling */
894 	spinlock_t		securetcp_lock;  /* state and timeout tables */
895 
896 	/* sys-ctl struct */
897 	struct ctl_table_header	*sysctl_hdr;
898 	struct ctl_table	*sysctl_tbl;
899 #endif
900 
901 	/* sysctl variables */
902 	int			sysctl_amemthresh;
903 	int			sysctl_am_droprate;
904 	int			sysctl_drop_entry;
905 	int			sysctl_drop_packet;
906 	int			sysctl_secure_tcp;
907 #ifdef CONFIG_IP_VS_NFCT
908 	int			sysctl_conntrack;
909 #endif
910 	int			sysctl_snat_reroute;
911 	int			sysctl_sync_ver;
912 	int			sysctl_sync_ports;
913 	int			sysctl_sync_persist_mode;
914 	unsigned long		sysctl_sync_qlen_max;
915 	int			sysctl_sync_sock_size;
916 	int			sysctl_cache_bypass;
917 	int			sysctl_expire_nodest_conn;
918 	int			sysctl_sloppy_tcp;
919 	int			sysctl_sloppy_sctp;
920 	int			sysctl_expire_quiescent_template;
921 	int			sysctl_sync_threshold[2];
922 	unsigned int		sysctl_sync_refresh_period;
923 	int			sysctl_sync_retries;
924 	int			sysctl_nat_icmp_send;
925 	int			sysctl_pmtu_disc;
926 	int			sysctl_backup_only;
927 
928 	/* ip_vs_lblc */
929 	int			sysctl_lblc_expiration;
930 	struct ctl_table_header	*lblc_ctl_header;
931 	struct ctl_table	*lblc_ctl_table;
932 	/* ip_vs_lblcr */
933 	int			sysctl_lblcr_expiration;
934 	struct ctl_table_header	*lblcr_ctl_header;
935 	struct ctl_table	*lblcr_ctl_table;
936 	/* ip_vs_est */
937 	struct list_head	est_list;	/* estimator list */
938 	spinlock_t		est_lock;
939 	struct timer_list	est_timer;	/* Estimation timer */
940 	/* ip_vs_sync */
941 	spinlock_t		sync_lock;
942 	struct ipvs_master_sync_state *ms;
943 	spinlock_t		sync_buff_lock;
944 	struct task_struct	**backup_threads;
945 	int			threads_mask;
946 	int			send_mesg_maxlen;
947 	int			recv_mesg_maxlen;
948 	volatile int		sync_state;
949 	volatile int		master_syncid;
950 	volatile int		backup_syncid;
951 	struct mutex		sync_mutex;
952 	/* multicast interface name */
953 	char			master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
954 	char			backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
955 	/* net name space ptr */
956 	struct net		*net;            /* Needed by timer routines */
957 	/* Number of heterogeneous destinations, needed becaus heterogeneous
958 	 * are not supported when synchronization is enabled.
959 	 */
960 	unsigned int		mixed_address_family_dests;
961 };
962 
963 #define DEFAULT_SYNC_THRESHOLD	3
964 #define DEFAULT_SYNC_PERIOD	50
965 #define DEFAULT_SYNC_VER	1
966 #define DEFAULT_SLOPPY_TCP	0
967 #define DEFAULT_SLOPPY_SCTP	0
968 #define DEFAULT_SYNC_REFRESH_PERIOD	(0U * HZ)
969 #define DEFAULT_SYNC_RETRIES		0
970 #define IPVS_SYNC_WAKEUP_RATE	8
971 #define IPVS_SYNC_QLEN_MAX	(IPVS_SYNC_WAKEUP_RATE * 4)
972 #define IPVS_SYNC_SEND_DELAY	(HZ / 50)
973 #define IPVS_SYNC_CHECK_PERIOD	HZ
974 #define IPVS_SYNC_FLUSH_TIME	(HZ * 2)
975 #define IPVS_SYNC_PORTS_MAX	(1 << 6)
976 
977 #ifdef CONFIG_SYSCTL
978 
979 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
980 {
981 	return ipvs->sysctl_sync_threshold[0];
982 }
983 
984 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
985 {
986 	return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
987 }
988 
989 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
990 {
991 	return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
992 }
993 
994 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
995 {
996 	return ipvs->sysctl_sync_retries;
997 }
998 
999 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
1000 {
1001 	return ipvs->sysctl_sync_ver;
1002 }
1003 
1004 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
1005 {
1006 	return ipvs->sysctl_sloppy_tcp;
1007 }
1008 
1009 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
1010 {
1011 	return ipvs->sysctl_sloppy_sctp;
1012 }
1013 
1014 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
1015 {
1016 	return ACCESS_ONCE(ipvs->sysctl_sync_ports);
1017 }
1018 
1019 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
1020 {
1021 	return ipvs->sysctl_sync_persist_mode;
1022 }
1023 
1024 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
1025 {
1026 	return ipvs->sysctl_sync_qlen_max;
1027 }
1028 
1029 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
1030 {
1031 	return ipvs->sysctl_sync_sock_size;
1032 }
1033 
1034 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1035 {
1036 	return ipvs->sysctl_pmtu_disc;
1037 }
1038 
1039 static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1040 {
1041 	return ipvs->sync_state & IP_VS_STATE_BACKUP &&
1042 	       ipvs->sysctl_backup_only;
1043 }
1044 
1045 #else
1046 
1047 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
1048 {
1049 	return DEFAULT_SYNC_THRESHOLD;
1050 }
1051 
1052 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
1053 {
1054 	return DEFAULT_SYNC_PERIOD;
1055 }
1056 
1057 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
1058 {
1059 	return DEFAULT_SYNC_REFRESH_PERIOD;
1060 }
1061 
1062 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
1063 {
1064 	return DEFAULT_SYNC_RETRIES & 3;
1065 }
1066 
1067 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
1068 {
1069 	return DEFAULT_SYNC_VER;
1070 }
1071 
1072 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs)
1073 {
1074 	return DEFAULT_SLOPPY_TCP;
1075 }
1076 
1077 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
1078 {
1079 	return DEFAULT_SLOPPY_SCTP;
1080 }
1081 
1082 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
1083 {
1084 	return 1;
1085 }
1086 
1087 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
1088 {
1089 	return 0;
1090 }
1091 
1092 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
1093 {
1094 	return IPVS_SYNC_QLEN_MAX;
1095 }
1096 
1097 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
1098 {
1099 	return 0;
1100 }
1101 
1102 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
1103 {
1104 	return 1;
1105 }
1106 
1107 static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
1108 {
1109 	return 0;
1110 }
1111 
1112 #endif
1113 
1114 /* IPVS core functions
1115  * (from ip_vs_core.c)
1116  */
1117 const char *ip_vs_proto_name(unsigned int proto);
1118 void ip_vs_init_hash_table(struct list_head *table, int rows);
1119 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
1120 
1121 #define IP_VS_APP_TYPE_FTP	1
1122 
1123 /* ip_vs_conn handling functions
1124  * (from ip_vs_conn.c)
1125  */
1126 enum {
1127 	IP_VS_DIR_INPUT = 0,
1128 	IP_VS_DIR_OUTPUT,
1129 	IP_VS_DIR_INPUT_ONLY,
1130 	IP_VS_DIR_LAST,
1131 };
1132 
1133 static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
1134 					 const union nf_inet_addr *caddr,
1135 					 __be16 cport,
1136 					 const union nf_inet_addr *vaddr,
1137 					 __be16 vport,
1138 					 struct ip_vs_conn_param *p)
1139 {
1140 	p->net = net;
1141 	p->af = af;
1142 	p->protocol = protocol;
1143 	p->caddr = caddr;
1144 	p->cport = cport;
1145 	p->vaddr = vaddr;
1146 	p->vport = vport;
1147 	p->pe = NULL;
1148 	p->pe_data = NULL;
1149 }
1150 
1151 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
1152 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
1153 
1154 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
1155 					    const struct ip_vs_iphdr *iph,
1156 					    int inverse);
1157 
1158 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
1159 
1160 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
1161 					     const struct ip_vs_iphdr *iph,
1162 					     int inverse);
1163 
1164 /* Get reference to gain full access to conn.
1165  * By default, RCU read-side critical sections have access only to
1166  * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference.
1167  */
1168 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
1169 {
1170 	return atomic_inc_not_zero(&cp->refcnt);
1171 }
1172 
1173 /* put back the conn without restarting its timer */
1174 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
1175 {
1176 	smp_mb__before_atomic();
1177 	atomic_dec(&cp->refcnt);
1178 }
1179 void ip_vs_conn_put(struct ip_vs_conn *cp);
1180 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
1181 
1182 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
1183 				  const union nf_inet_addr *daddr,
1184 				  __be16 dport, unsigned int flags,
1185 				  struct ip_vs_dest *dest, __u32 fwmark);
1186 void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1187 
1188 const char *ip_vs_state_name(__u16 proto, int state);
1189 
1190 void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
1191 int ip_vs_check_template(struct ip_vs_conn *ct);
1192 void ip_vs_random_dropentry(struct net *net);
1193 int ip_vs_conn_init(void);
1194 void ip_vs_conn_cleanup(void);
1195 
1196 static inline void ip_vs_control_del(struct ip_vs_conn *cp)
1197 {
1198 	struct ip_vs_conn *ctl_cp = cp->control;
1199 	if (!ctl_cp) {
1200 		IP_VS_ERR_BUF("request control DEL for uncontrolled: "
1201 			      "%s:%d to %s:%d\n",
1202 			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1203 			      ntohs(cp->cport),
1204 			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1205 			      ntohs(cp->vport));
1206 
1207 		return;
1208 	}
1209 
1210 	IP_VS_DBG_BUF(7, "DELeting control for: "
1211 		      "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
1212 		      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1213 		      ntohs(cp->cport),
1214 		      IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
1215 		      ntohs(ctl_cp->cport));
1216 
1217 	cp->control = NULL;
1218 	if (atomic_read(&ctl_cp->n_control) == 0) {
1219 		IP_VS_ERR_BUF("BUG control DEL with n=0 : "
1220 			      "%s:%d to %s:%d\n",
1221 			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1222 			      ntohs(cp->cport),
1223 			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1224 			      ntohs(cp->vport));
1225 
1226 		return;
1227 	}
1228 	atomic_dec(&ctl_cp->n_control);
1229 }
1230 
1231 static inline void
1232 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1233 {
1234 	if (cp->control) {
1235 		IP_VS_ERR_BUF("request control ADD for already controlled: "
1236 			      "%s:%d to %s:%d\n",
1237 			      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1238 			      ntohs(cp->cport),
1239 			      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1240 			      ntohs(cp->vport));
1241 
1242 		ip_vs_control_del(cp);
1243 	}
1244 
1245 	IP_VS_DBG_BUF(7, "ADDing control for: "
1246 		      "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
1247 		      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1248 		      ntohs(cp->cport),
1249 		      IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
1250 		      ntohs(ctl_cp->cport));
1251 
1252 	cp->control = ctl_cp;
1253 	atomic_inc(&ctl_cp->n_control);
1254 }
1255 
1256 /* IPVS netns init & cleanup functions */
1257 int ip_vs_estimator_net_init(struct net *net);
1258 int ip_vs_control_net_init(struct net *net);
1259 int ip_vs_protocol_net_init(struct net *net);
1260 int ip_vs_app_net_init(struct net *net);
1261 int ip_vs_conn_net_init(struct net *net);
1262 int ip_vs_sync_net_init(struct net *net);
1263 void ip_vs_conn_net_cleanup(struct net *net);
1264 void ip_vs_app_net_cleanup(struct net *net);
1265 void ip_vs_protocol_net_cleanup(struct net *net);
1266 void ip_vs_control_net_cleanup(struct net *net);
1267 void ip_vs_estimator_net_cleanup(struct net *net);
1268 void ip_vs_sync_net_cleanup(struct net *net);
1269 void ip_vs_service_net_cleanup(struct net *net);
1270 
1271 /* IPVS application functions
1272  * (from ip_vs_app.c)
1273  */
1274 #define IP_VS_APP_MAX_PORTS  8
1275 struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
1276 void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
1277 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1278 void ip_vs_unbind_app(struct ip_vs_conn *cp);
1279 int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
1280 			   __u16 port);
1281 int ip_vs_app_inc_get(struct ip_vs_app *inc);
1282 void ip_vs_app_inc_put(struct ip_vs_app *inc);
1283 
1284 int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
1285 int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
1286 
1287 int register_ip_vs_pe(struct ip_vs_pe *pe);
1288 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
1289 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
1290 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
1291 
1292 /* Use a #define to avoid all of module.h just for these trivial ops */
1293 #define ip_vs_pe_get(pe)			\
1294 	if (pe && pe->module)			\
1295 		__module_get(pe->module);
1296 
1297 #define ip_vs_pe_put(pe)			\
1298 	if (pe && pe->module)			\
1299 		module_put(pe->module);
1300 
1301 /* IPVS protocol functions (from ip_vs_proto.c) */
1302 int ip_vs_protocol_init(void);
1303 void ip_vs_protocol_cleanup(void);
1304 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
1305 int *ip_vs_create_timeout_table(int *table, int size);
1306 int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
1307 			    const char *name, int to);
1308 void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
1309 			       const struct sk_buff *skb, int offset,
1310 			       const char *msg);
1311 
1312 extern struct ip_vs_protocol ip_vs_protocol_tcp;
1313 extern struct ip_vs_protocol ip_vs_protocol_udp;
1314 extern struct ip_vs_protocol ip_vs_protocol_icmp;
1315 extern struct ip_vs_protocol ip_vs_protocol_esp;
1316 extern struct ip_vs_protocol ip_vs_protocol_ah;
1317 extern struct ip_vs_protocol ip_vs_protocol_sctp;
1318 
1319 /* Registering/unregistering scheduler functions
1320  * (from ip_vs_sched.c)
1321  */
1322 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1323 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1324 int ip_vs_bind_scheduler(struct ip_vs_service *svc,
1325 			 struct ip_vs_scheduler *scheduler);
1326 void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
1327 			    struct ip_vs_scheduler *sched);
1328 struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
1329 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
1330 struct ip_vs_conn *
1331 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
1332 	       struct ip_vs_proto_data *pd, int *ignored,
1333 	       struct ip_vs_iphdr *iph);
1334 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
1335 		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
1336 
1337 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
1338 
1339 /* IPVS control data and functions (from ip_vs_ctl.c) */
1340 extern struct ip_vs_stats ip_vs_stats;
1341 extern int sysctl_ip_vs_sync_ver;
1342 
1343 struct ip_vs_service *
1344 ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
1345 		  const union nf_inet_addr *vaddr, __be16 vport);
1346 
1347 bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
1348 			    const union nf_inet_addr *daddr, __be16 dport);
1349 
1350 int ip_vs_use_count_inc(void);
1351 void ip_vs_use_count_dec(void);
1352 int ip_vs_register_nl_ioctl(void);
1353 void ip_vs_unregister_nl_ioctl(void);
1354 int ip_vs_control_init(void);
1355 void ip_vs_control_cleanup(void);
1356 struct ip_vs_dest *
1357 ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
1358 		const union nf_inet_addr *daddr, __be16 dport,
1359 		const union nf_inet_addr *vaddr, __be16 vport,
1360 		__u16 protocol, __u32 fwmark, __u32 flags);
1361 void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1362 
1363 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
1364 {
1365 	atomic_inc(&dest->refcnt);
1366 }
1367 
1368 static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
1369 {
1370 	smp_mb__before_atomic();
1371 	atomic_dec(&dest->refcnt);
1372 }
1373 
1374 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
1375 {
1376 	if (atomic_dec_return(&dest->refcnt) < 0)
1377 		kfree(dest);
1378 }
1379 
1380 /* IPVS sync daemon data and function prototypes
1381  * (from ip_vs_sync.c)
1382  */
1383 int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
1384 int stop_sync_thread(struct net *net, int state);
1385 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
1386 
1387 /* IPVS rate estimator prototypes (from ip_vs_est.c) */
1388 void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
1389 void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
1390 void ip_vs_zero_estimator(struct ip_vs_stats *stats);
1391 void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
1392 			  struct ip_vs_stats *stats);
1393 
1394 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */
1395 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1396 		    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1397 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1398 		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1399 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1400 		   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1401 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1402 		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1403 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1404 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1405 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1406 		    struct ip_vs_protocol *pp, int offset,
1407 		    unsigned int hooknum, struct ip_vs_iphdr *iph);
1408 void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
1409 
1410 #ifdef CONFIG_IP_VS_IPV6
1411 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1412 			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1413 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1414 		      struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1415 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1416 			 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1417 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1418 		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
1419 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1420 		       struct ip_vs_protocol *pp, int offset,
1421 		       unsigned int hooknum, struct ip_vs_iphdr *iph);
1422 #endif
1423 
1424 #ifdef CONFIG_SYSCTL
1425 /* This is a simple mechanism to ignore packets when
1426  * we are loaded. Just set ip_vs_drop_rate to 'n' and
1427  * we start to drop 1/rate of the packets
1428  */
1429 static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
1430 {
1431 	if (!ipvs->drop_rate)
1432 		return 0;
1433 	if (--ipvs->drop_counter > 0)
1434 		return 0;
1435 	ipvs->drop_counter = ipvs->drop_rate;
1436 	return 1;
1437 }
1438 #else
1439 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
1440 #endif
1441 
1442 /* ip_vs_fwd_tag returns the forwarding tag of the connection */
1443 #define IP_VS_FWD_METHOD(cp)  (cp->flags & IP_VS_CONN_F_FWD_MASK)
1444 
1445 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
1446 {
1447 	char fwd;
1448 
1449 	switch (IP_VS_FWD_METHOD(cp)) {
1450 	case IP_VS_CONN_F_MASQ:
1451 		fwd = 'M'; break;
1452 	case IP_VS_CONN_F_LOCALNODE:
1453 		fwd = 'L'; break;
1454 	case IP_VS_CONN_F_TUNNEL:
1455 		fwd = 'T'; break;
1456 	case IP_VS_CONN_F_DROUTE:
1457 		fwd = 'R'; break;
1458 	case IP_VS_CONN_F_BYPASS:
1459 		fwd = 'B'; break;
1460 	default:
1461 		fwd = '?'; break;
1462 	}
1463 	return fwd;
1464 }
1465 
1466 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
1467 		    struct ip_vs_conn *cp, int dir);
1468 
1469 #ifdef CONFIG_IP_VS_IPV6
1470 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
1471 		       struct ip_vs_conn *cp, int dir);
1472 #endif
1473 
1474 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
1475 
1476 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
1477 {
1478 	__be32 diff[2] = { ~old, new };
1479 
1480 	return csum_partial(diff, sizeof(diff), oldsum);
1481 }
1482 
1483 #ifdef CONFIG_IP_VS_IPV6
1484 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
1485 					__wsum oldsum)
1486 {
1487 	__be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
1488 			    new[3],  new[2],  new[1],  new[0] };
1489 
1490 	return csum_partial(diff, sizeof(diff), oldsum);
1491 }
1492 #endif
1493 
1494 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
1495 {
1496 	__be16 diff[2] = { ~old, new };
1497 
1498 	return csum_partial(diff, sizeof(diff), oldsum);
1499 }
1500 
1501 /* Forget current conntrack (unconfirmed) and attach notrack entry */
1502 static inline void ip_vs_notrack(struct sk_buff *skb)
1503 {
1504 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1505 	enum ip_conntrack_info ctinfo;
1506 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1507 
1508 	if (!ct || !nf_ct_is_untracked(ct)) {
1509 		nf_conntrack_put(skb->nfct);
1510 		skb->nfct = &nf_ct_untracked_get()->ct_general;
1511 		skb->nfctinfo = IP_CT_NEW;
1512 		nf_conntrack_get(skb->nfct);
1513 	}
1514 #endif
1515 }
1516 
1517 #ifdef CONFIG_IP_VS_NFCT
1518 /* Netfilter connection tracking
1519  * (from ip_vs_nfct.c)
1520  */
1521 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
1522 {
1523 #ifdef CONFIG_SYSCTL
1524 	return ipvs->sysctl_conntrack;
1525 #else
1526 	return 0;
1527 #endif
1528 }
1529 
1530 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
1531 			    int outin);
1532 int ip_vs_confirm_conntrack(struct sk_buff *skb);
1533 void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
1534 			       struct ip_vs_conn *cp, u_int8_t proto,
1535 			       const __be16 port, int from_rs);
1536 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
1537 
1538 #else
1539 
1540 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
1541 {
1542 	return 0;
1543 }
1544 
1545 static inline void ip_vs_update_conntrack(struct sk_buff *skb,
1546 					  struct ip_vs_conn *cp, int outin)
1547 {
1548 }
1549 
1550 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb)
1551 {
1552 	return NF_ACCEPT;
1553 }
1554 
1555 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
1556 {
1557 }
1558 #endif /* CONFIG_IP_VS_NFCT */
1559 
1560 static inline int
1561 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
1562 {
1563 	/* We think the overhead of processing active connections is 256
1564 	 * times higher than that of inactive connections in average. (This
1565 	 * 256 times might not be accurate, we will change it later) We
1566 	 * use the following formula to estimate the overhead now:
1567 	 *		  dest->activeconns*256 + dest->inactconns
1568 	 */
1569 	return (atomic_read(&dest->activeconns) << 8) +
1570 		atomic_read(&dest->inactconns);
1571 }
1572 
1573 #endif	/* _NET_IP_VS_H */
1574