1 /* 2 * IP Virtual Server 3 * data structure and functionality definitions 4 */ 5 6 #ifndef _NET_IP_VS_H 7 #define _NET_IP_VS_H 8 9 #include <linux/ip_vs.h> /* definitions shared with userland */ 10 11 #include <asm/types.h> /* for __uXX types */ 12 13 #include <linux/list.h> /* for struct list_head */ 14 #include <linux/spinlock.h> /* for struct rwlock_t */ 15 #include <linux/atomic.h> /* for struct atomic_t */ 16 #include <linux/compiler.h> 17 #include <linux/timer.h> 18 #include <linux/bug.h> 19 20 #include <net/checksum.h> 21 #include <linux/netfilter.h> /* for union nf_inet_addr */ 22 #include <linux/ip.h> 23 #include <linux/ipv6.h> /* for struct ipv6hdr */ 24 #include <net/ipv6.h> 25 #if IS_ENABLED(CONFIG_IP_VS_IPV6) 26 #include <linux/netfilter_ipv6/ip6_tables.h> 27 #endif 28 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 29 #include <net/netfilter/nf_conntrack.h> 30 #endif 31 #include <net/net_namespace.h> /* Netw namespace */ 32 33 /* 34 * Generic access of ipvs struct 35 */ 36 static inline struct netns_ipvs *net_ipvs(struct net* net) 37 { 38 return net->ipvs; 39 } 40 /* 41 * Get net ptr from skb in traffic cases 42 * use skb_sknet when call is from userland (ioctl or netlink) 43 */ 44 static inline struct net *skb_net(const struct sk_buff *skb) 45 { 46 #ifdef CONFIG_NET_NS 47 #ifdef CONFIG_IP_VS_DEBUG 48 /* 49 * This is used for debug only. 50 * Start with the most likely hit 51 * End with BUG 52 */ 53 if (likely(skb->dev && skb->dev->nd_net)) 54 return dev_net(skb->dev); 55 if (skb_dst(skb) && skb_dst(skb)->dev) 56 return dev_net(skb_dst(skb)->dev); 57 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", 58 __func__, __LINE__); 59 if (likely(skb->sk && skb->sk->sk_net)) 60 return sock_net(skb->sk); 61 pr_err("There is no net ptr to find in the skb in %s() line:%d\n", 62 __func__, __LINE__); 63 BUG(); 64 #else 65 return dev_net(skb->dev ? : skb_dst(skb)->dev); 66 #endif 67 #else 68 return &init_net; 69 #endif 70 } 71 72 static inline struct net *skb_sknet(const struct sk_buff *skb) 73 { 74 #ifdef CONFIG_NET_NS 75 #ifdef CONFIG_IP_VS_DEBUG 76 /* Start with the most likely hit */ 77 if (likely(skb->sk && skb->sk->sk_net)) 78 return sock_net(skb->sk); 79 WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", 80 __func__, __LINE__); 81 if (likely(skb->dev && skb->dev->nd_net)) 82 return dev_net(skb->dev); 83 pr_err("There is no net ptr to find in the skb in %s() line:%d\n", 84 __func__, __LINE__); 85 BUG(); 86 #else 87 return sock_net(skb->sk); 88 #endif 89 #else 90 return &init_net; 91 #endif 92 } 93 /* 94 * This one needed for single_open_net since net is stored directly in 95 * private not as a struct i.e. seq_file_net can't be used. 96 */ 97 static inline struct net *seq_file_single_net(struct seq_file *seq) 98 { 99 #ifdef CONFIG_NET_NS 100 return (struct net *)seq->private; 101 #else 102 return &init_net; 103 #endif 104 } 105 106 /* Connections' size value needed by ip_vs_ctl.c */ 107 extern int ip_vs_conn_tab_size; 108 109 struct ip_vs_iphdr { 110 __u32 len; /* IPv4 simply where L4 starts 111 IPv6 where L4 Transport Header starts */ 112 __u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */ 113 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 114 __s16 protocol; 115 __s32 flags; 116 union nf_inet_addr saddr; 117 union nf_inet_addr daddr; 118 }; 119 120 /* Dependency to module: nf_defrag_ipv6 */ 121 #if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 122 static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb) 123 { 124 return skb->nfct_reasm; 125 } 126 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 127 int len, void *buffer, 128 const struct ip_vs_iphdr *ipvsh) 129 { 130 if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb))) 131 return skb_header_pointer(skb_nfct_reasm(skb), 132 ipvsh->thoff_reasm, len, buffer); 133 134 return skb_header_pointer(skb, offset, len, buffer); 135 } 136 #else 137 static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb) 138 { 139 return NULL; 140 } 141 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 142 int len, void *buffer, 143 const struct ip_vs_iphdr *ipvsh) 144 { 145 return skb_header_pointer(skb, offset, len, buffer); 146 } 147 #endif 148 149 static inline void 150 ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr) 151 { 152 const struct iphdr *iph = nh; 153 154 iphdr->len = iph->ihl * 4; 155 iphdr->fragoffs = 0; 156 iphdr->protocol = iph->protocol; 157 iphdr->saddr.ip = iph->saddr; 158 iphdr->daddr.ip = iph->daddr; 159 } 160 161 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 162 * IPv6 requires some extra work, as finding proper header position, 163 * depend on the IPv6 extension headers. 164 */ 165 static inline void 166 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr) 167 { 168 #ifdef CONFIG_IP_VS_IPV6 169 if (af == AF_INET6) { 170 const struct ipv6hdr *iph = 171 (struct ipv6hdr *)skb_network_header(skb); 172 iphdr->saddr.in6 = iph->saddr; 173 iphdr->daddr.in6 = iph->daddr; 174 /* ipv6_find_hdr() updates len, flags, thoff_reasm */ 175 iphdr->thoff_reasm = 0; 176 iphdr->len = 0; 177 iphdr->flags = 0; 178 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 179 &iphdr->fragoffs, 180 &iphdr->flags); 181 /* get proto from re-assembled packet and it's offset */ 182 if (skb_nfct_reasm(skb)) 183 iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb), 184 &iphdr->thoff_reasm, 185 -1, NULL, NULL); 186 187 } else 188 #endif 189 { 190 const struct iphdr *iph = 191 (struct iphdr *)skb_network_header(skb); 192 iphdr->len = iph->ihl * 4; 193 iphdr->fragoffs = 0; 194 iphdr->protocol = iph->protocol; 195 iphdr->saddr.ip = iph->saddr; 196 iphdr->daddr.ip = iph->daddr; 197 } 198 } 199 200 /* This function is a faster version of ip_vs_fill_iph_skb(). 201 * Where we only populate {s,d}addr (and avoid calling ipv6_find_hdr()). 202 * This is used by the some of the ip_vs_*_schedule() functions. 203 * (Mostly done to avoid ABI breakage of external schedulers) 204 */ 205 static inline void 206 ip_vs_fill_iph_addr_only(int af, const struct sk_buff *skb, 207 struct ip_vs_iphdr *iphdr) 208 { 209 #ifdef CONFIG_IP_VS_IPV6 210 if (af == AF_INET6) { 211 const struct ipv6hdr *iph = 212 (struct ipv6hdr *)skb_network_header(skb); 213 iphdr->saddr.in6 = iph->saddr; 214 iphdr->daddr.in6 = iph->daddr; 215 } else 216 #endif 217 { 218 const struct iphdr *iph = 219 (struct iphdr *)skb_network_header(skb); 220 iphdr->saddr.ip = iph->saddr; 221 iphdr->daddr.ip = iph->daddr; 222 } 223 } 224 225 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 226 const union nf_inet_addr *src) 227 { 228 #ifdef CONFIG_IP_VS_IPV6 229 if (af == AF_INET6) 230 dst->in6 = src->in6; 231 else 232 #endif 233 dst->ip = src->ip; 234 } 235 236 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, 237 const union nf_inet_addr *src) 238 { 239 #ifdef CONFIG_IP_VS_IPV6 240 if (af == AF_INET6) { 241 dst->in6 = src->in6; 242 return; 243 } 244 #endif 245 dst->ip = src->ip; 246 dst->all[1] = 0; 247 dst->all[2] = 0; 248 dst->all[3] = 0; 249 } 250 251 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 252 const union nf_inet_addr *b) 253 { 254 #ifdef CONFIG_IP_VS_IPV6 255 if (af == AF_INET6) 256 return ipv6_addr_equal(&a->in6, &b->in6); 257 #endif 258 return a->ip == b->ip; 259 } 260 261 #ifdef CONFIG_IP_VS_DEBUG 262 #include <linux/net.h> 263 264 extern int ip_vs_get_debug_level(void); 265 266 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, 267 const union nf_inet_addr *addr, 268 int *idx) 269 { 270 int len; 271 #ifdef CONFIG_IP_VS_IPV6 272 if (af == AF_INET6) 273 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", 274 &addr->in6) + 1; 275 else 276 #endif 277 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", 278 &addr->ip) + 1; 279 280 *idx += len; 281 BUG_ON(*idx > buf_len + 1); 282 return &buf[*idx - len]; 283 } 284 285 #define IP_VS_DBG_BUF(level, msg, ...) \ 286 do { \ 287 char ip_vs_dbg_buf[160]; \ 288 int ip_vs_dbg_idx = 0; \ 289 if (level <= ip_vs_get_debug_level()) \ 290 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 291 } while (0) 292 #define IP_VS_ERR_BUF(msg...) \ 293 do { \ 294 char ip_vs_dbg_buf[160]; \ 295 int ip_vs_dbg_idx = 0; \ 296 pr_err(msg); \ 297 } while (0) 298 299 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ 300 #define IP_VS_DBG_ADDR(af, addr) \ 301 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ 302 sizeof(ip_vs_dbg_buf), addr, \ 303 &ip_vs_dbg_idx) 304 305 #define IP_VS_DBG(level, msg, ...) \ 306 do { \ 307 if (level <= ip_vs_get_debug_level()) \ 308 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 309 } while (0) 310 #define IP_VS_DBG_RL(msg, ...) \ 311 do { \ 312 if (net_ratelimit()) \ 313 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 314 } while (0) 315 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ 316 do { \ 317 if (level <= ip_vs_get_debug_level()) \ 318 pp->debug_packet(af, pp, skb, ofs, msg); \ 319 } while (0) 320 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ 321 do { \ 322 if (level <= ip_vs_get_debug_level() && \ 323 net_ratelimit()) \ 324 pp->debug_packet(af, pp, skb, ofs, msg); \ 325 } while (0) 326 #else /* NO DEBUGGING at ALL */ 327 #define IP_VS_DBG_BUF(level, msg...) do {} while (0) 328 #define IP_VS_ERR_BUF(msg...) do {} while (0) 329 #define IP_VS_DBG(level, msg...) do {} while (0) 330 #define IP_VS_DBG_RL(msg...) do {} while (0) 331 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 332 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 333 #endif 334 335 #define IP_VS_BUG() BUG() 336 #define IP_VS_ERR_RL(msg, ...) \ 337 do { \ 338 if (net_ratelimit()) \ 339 pr_err(msg, ##__VA_ARGS__); \ 340 } while (0) 341 342 #ifdef CONFIG_IP_VS_DEBUG 343 #define EnterFunction(level) \ 344 do { \ 345 if (level <= ip_vs_get_debug_level()) \ 346 printk(KERN_DEBUG \ 347 pr_fmt("Enter: %s, %s line %i\n"), \ 348 __func__, __FILE__, __LINE__); \ 349 } while (0) 350 #define LeaveFunction(level) \ 351 do { \ 352 if (level <= ip_vs_get_debug_level()) \ 353 printk(KERN_DEBUG \ 354 pr_fmt("Leave: %s, %s line %i\n"), \ 355 __func__, __FILE__, __LINE__); \ 356 } while (0) 357 #else 358 #define EnterFunction(level) do {} while (0) 359 #define LeaveFunction(level) do {} while (0) 360 #endif 361 362 #define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); } 363 364 365 /* 366 * The port number of FTP service (in network order). 367 */ 368 #define FTPPORT cpu_to_be16(21) 369 #define FTPDATA cpu_to_be16(20) 370 371 /* 372 * TCP State Values 373 */ 374 enum { 375 IP_VS_TCP_S_NONE = 0, 376 IP_VS_TCP_S_ESTABLISHED, 377 IP_VS_TCP_S_SYN_SENT, 378 IP_VS_TCP_S_SYN_RECV, 379 IP_VS_TCP_S_FIN_WAIT, 380 IP_VS_TCP_S_TIME_WAIT, 381 IP_VS_TCP_S_CLOSE, 382 IP_VS_TCP_S_CLOSE_WAIT, 383 IP_VS_TCP_S_LAST_ACK, 384 IP_VS_TCP_S_LISTEN, 385 IP_VS_TCP_S_SYNACK, 386 IP_VS_TCP_S_LAST 387 }; 388 389 /* 390 * UDP State Values 391 */ 392 enum { 393 IP_VS_UDP_S_NORMAL, 394 IP_VS_UDP_S_LAST, 395 }; 396 397 /* 398 * ICMP State Values 399 */ 400 enum { 401 IP_VS_ICMP_S_NORMAL, 402 IP_VS_ICMP_S_LAST, 403 }; 404 405 /* 406 * SCTP State Values 407 */ 408 enum ip_vs_sctp_states { 409 IP_VS_SCTP_S_NONE, 410 IP_VS_SCTP_S_INIT_CLI, 411 IP_VS_SCTP_S_INIT_SER, 412 IP_VS_SCTP_S_INIT_ACK_CLI, 413 IP_VS_SCTP_S_INIT_ACK_SER, 414 IP_VS_SCTP_S_ECHO_CLI, 415 IP_VS_SCTP_S_ECHO_SER, 416 IP_VS_SCTP_S_ESTABLISHED, 417 IP_VS_SCTP_S_SHUT_CLI, 418 IP_VS_SCTP_S_SHUT_SER, 419 IP_VS_SCTP_S_SHUT_ACK_CLI, 420 IP_VS_SCTP_S_SHUT_ACK_SER, 421 IP_VS_SCTP_S_CLOSED, 422 IP_VS_SCTP_S_LAST 423 }; 424 425 /* 426 * Delta sequence info structure 427 * Each ip_vs_conn has 2 (output AND input seq. changes). 428 * Only used in the VS/NAT. 429 */ 430 struct ip_vs_seq { 431 __u32 init_seq; /* Add delta from this seq */ 432 __u32 delta; /* Delta in sequence numbers */ 433 __u32 previous_delta; /* Delta in sequence numbers 434 before last resized pkt */ 435 }; 436 437 /* 438 * counters per cpu 439 */ 440 struct ip_vs_counters { 441 __u32 conns; /* connections scheduled */ 442 __u32 inpkts; /* incoming packets */ 443 __u32 outpkts; /* outgoing packets */ 444 __u64 inbytes; /* incoming bytes */ 445 __u64 outbytes; /* outgoing bytes */ 446 }; 447 /* 448 * Stats per cpu 449 */ 450 struct ip_vs_cpu_stats { 451 struct ip_vs_counters ustats; 452 struct u64_stats_sync syncp; 453 }; 454 455 /* 456 * IPVS statistics objects 457 */ 458 struct ip_vs_estimator { 459 struct list_head list; 460 461 u64 last_inbytes; 462 u64 last_outbytes; 463 u32 last_conns; 464 u32 last_inpkts; 465 u32 last_outpkts; 466 467 u32 cps; 468 u32 inpps; 469 u32 outpps; 470 u32 inbps; 471 u32 outbps; 472 }; 473 474 struct ip_vs_stats { 475 struct ip_vs_stats_user ustats; /* statistics */ 476 struct ip_vs_estimator est; /* estimator */ 477 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ 478 spinlock_t lock; /* spin lock */ 479 struct ip_vs_stats_user ustats0; /* reset values */ 480 }; 481 482 struct dst_entry; 483 struct iphdr; 484 struct ip_vs_conn; 485 struct ip_vs_app; 486 struct sk_buff; 487 struct ip_vs_proto_data; 488 489 struct ip_vs_protocol { 490 struct ip_vs_protocol *next; 491 char *name; 492 u16 protocol; 493 u16 num_states; 494 int dont_defrag; 495 496 void (*init)(struct ip_vs_protocol *pp); 497 498 void (*exit)(struct ip_vs_protocol *pp); 499 500 int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); 501 502 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); 503 504 int (*conn_schedule)(int af, struct sk_buff *skb, 505 struct ip_vs_proto_data *pd, 506 int *verdict, struct ip_vs_conn **cpp, 507 struct ip_vs_iphdr *iph); 508 509 struct ip_vs_conn * 510 (*conn_in_get)(int af, 511 const struct sk_buff *skb, 512 const struct ip_vs_iphdr *iph, 513 int inverse); 514 515 struct ip_vs_conn * 516 (*conn_out_get)(int af, 517 const struct sk_buff *skb, 518 const struct ip_vs_iphdr *iph, 519 int inverse); 520 521 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 522 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 523 524 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 525 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 526 527 int (*csum_check)(int af, struct sk_buff *skb, 528 struct ip_vs_protocol *pp); 529 530 const char *(*state_name)(int state); 531 532 void (*state_transition)(struct ip_vs_conn *cp, int direction, 533 const struct sk_buff *skb, 534 struct ip_vs_proto_data *pd); 535 536 int (*register_app)(struct net *net, struct ip_vs_app *inc); 537 538 void (*unregister_app)(struct net *net, struct ip_vs_app *inc); 539 540 int (*app_conn_bind)(struct ip_vs_conn *cp); 541 542 void (*debug_packet)(int af, struct ip_vs_protocol *pp, 543 const struct sk_buff *skb, 544 int offset, 545 const char *msg); 546 547 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 548 }; 549 550 /* 551 * protocol data per netns 552 */ 553 struct ip_vs_proto_data { 554 struct ip_vs_proto_data *next; 555 struct ip_vs_protocol *pp; 556 int *timeout_table; /* protocol timeout table */ 557 atomic_t appcnt; /* counter of proto app incs. */ 558 struct tcp_states_t *tcp_state_table; 559 }; 560 561 extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 562 extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net, 563 unsigned short proto); 564 565 struct ip_vs_conn_param { 566 struct net *net; 567 const union nf_inet_addr *caddr; 568 const union nf_inet_addr *vaddr; 569 __be16 cport; 570 __be16 vport; 571 __u16 protocol; 572 u16 af; 573 574 const struct ip_vs_pe *pe; 575 char *pe_data; 576 __u8 pe_data_len; 577 }; 578 579 /* 580 * IP_VS structure allocated for each dynamically scheduled connection 581 */ 582 struct ip_vs_conn { 583 struct hlist_node c_list; /* hashed list heads */ 584 /* Protocol, addresses and port numbers */ 585 __be16 cport; 586 __be16 dport; 587 __be16 vport; 588 u16 af; /* address family */ 589 union nf_inet_addr caddr; /* client address */ 590 union nf_inet_addr vaddr; /* virtual address */ 591 union nf_inet_addr daddr; /* destination address */ 592 volatile __u32 flags; /* status flags */ 593 __u16 protocol; /* Which protocol (TCP/UDP) */ 594 #ifdef CONFIG_NET_NS 595 struct net *net; /* Name space */ 596 #endif 597 598 /* counter and timer */ 599 atomic_t refcnt; /* reference count */ 600 struct timer_list timer; /* Expiration timer */ 601 volatile unsigned long timeout; /* timeout */ 602 603 /* Flags and state transition */ 604 spinlock_t lock; /* lock for state transition */ 605 volatile __u16 state; /* state info */ 606 volatile __u16 old_state; /* old state, to be used for 607 * state transition triggerd 608 * synchronization 609 */ 610 __u32 fwmark; /* Fire wall mark from skb */ 611 unsigned long sync_endtime; /* jiffies + sent_retries */ 612 613 /* Control members */ 614 struct ip_vs_conn *control; /* Master control connection */ 615 atomic_t n_control; /* Number of controlled ones */ 616 struct ip_vs_dest *dest; /* real server */ 617 atomic_t in_pkts; /* incoming packet counter */ 618 619 /* packet transmitter for different forwarding methods. If it 620 mangles the packet, it must return NF_DROP or better NF_STOLEN, 621 otherwise this must be changed to a sk_buff **. 622 NF_ACCEPT can be returned when destination is local. 623 */ 624 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 625 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 626 627 /* Note: we can group the following members into a structure, 628 in order to save more space, and the following members are 629 only used in VS/NAT anyway */ 630 struct ip_vs_app *app; /* bound ip_vs_app object */ 631 void *app_data; /* Application private data */ 632 struct ip_vs_seq in_seq; /* incoming seq. struct */ 633 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 634 635 const struct ip_vs_pe *pe; 636 char *pe_data; 637 __u8 pe_data_len; 638 639 struct rcu_head rcu_head; 640 }; 641 642 /* 643 * To save some memory in conn table when name space is disabled. 644 */ 645 static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) 646 { 647 #ifdef CONFIG_NET_NS 648 return cp->net; 649 #else 650 return &init_net; 651 #endif 652 } 653 static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) 654 { 655 #ifdef CONFIG_NET_NS 656 cp->net = net; 657 #endif 658 } 659 660 static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, 661 struct net *net) 662 { 663 #ifdef CONFIG_NET_NS 664 return cp->net == net; 665 #else 666 return 1; 667 #endif 668 } 669 670 /* 671 * Extended internal versions of struct ip_vs_service_user and 672 * ip_vs_dest_user for IPv6 support. 673 * 674 * We need these to conveniently pass around service and destination 675 * options, but unfortunately, we also need to keep the old definitions to 676 * maintain userspace backwards compatibility for the setsockopt interface. 677 */ 678 struct ip_vs_service_user_kern { 679 /* virtual service addresses */ 680 u16 af; 681 u16 protocol; 682 union nf_inet_addr addr; /* virtual ip address */ 683 u16 port; 684 u32 fwmark; /* firwall mark of service */ 685 686 /* virtual service options */ 687 char *sched_name; 688 char *pe_name; 689 unsigned int flags; /* virtual service flags */ 690 unsigned int timeout; /* persistent timeout in sec */ 691 u32 netmask; /* persistent netmask */ 692 }; 693 694 695 struct ip_vs_dest_user_kern { 696 /* destination server address */ 697 union nf_inet_addr addr; 698 u16 port; 699 700 /* real server options */ 701 unsigned int conn_flags; /* connection flags */ 702 int weight; /* destination weight */ 703 704 /* thresholds for active connections */ 705 u32 u_threshold; /* upper threshold */ 706 u32 l_threshold; /* lower threshold */ 707 }; 708 709 710 /* 711 * The information about the virtual service offered to the net 712 * and the forwarding entries 713 */ 714 struct ip_vs_service { 715 struct list_head s_list; /* for normal service table */ 716 struct list_head f_list; /* for fwmark-based service table */ 717 atomic_t refcnt; /* reference counter */ 718 atomic_t usecnt; /* use counter */ 719 720 u16 af; /* address family */ 721 __u16 protocol; /* which protocol (TCP/UDP) */ 722 union nf_inet_addr addr; /* IP address for virtual service */ 723 __be16 port; /* port number for the service */ 724 __u32 fwmark; /* firewall mark of the service */ 725 unsigned int flags; /* service status flags */ 726 unsigned int timeout; /* persistent timeout in ticks */ 727 __be32 netmask; /* grouping granularity */ 728 struct net *net; 729 730 struct list_head destinations; /* real server d-linked list */ 731 __u32 num_dests; /* number of servers */ 732 struct ip_vs_stats stats; /* statistics for the service */ 733 struct ip_vs_app *inc; /* bind conns to this app inc */ 734 735 /* for scheduling */ 736 struct ip_vs_scheduler *scheduler; /* bound scheduler object */ 737 rwlock_t sched_lock; /* lock sched_data */ 738 void *sched_data; /* scheduler application data */ 739 740 /* alternate persistence engine */ 741 struct ip_vs_pe *pe; 742 }; 743 744 /* Information for cached dst */ 745 struct ip_vs_dest_dst { 746 struct dst_entry *dst_cache; /* destination cache entry */ 747 u32 dst_cookie; 748 union nf_inet_addr dst_saddr; 749 struct rcu_head rcu_head; 750 }; 751 752 /* 753 * The real server destination forwarding entry 754 * with ip address, port number, and so on. 755 */ 756 struct ip_vs_dest { 757 struct list_head n_list; /* for the dests in the service */ 758 struct hlist_node d_list; /* for table with all the dests */ 759 760 u16 af; /* address family */ 761 __be16 port; /* port number of the server */ 762 union nf_inet_addr addr; /* IP address of the server */ 763 volatile unsigned int flags; /* dest status flags */ 764 atomic_t conn_flags; /* flags to copy to conn */ 765 atomic_t weight; /* server weight */ 766 767 atomic_t refcnt; /* reference counter */ 768 struct ip_vs_stats stats; /* statistics */ 769 770 /* connection counters and thresholds */ 771 atomic_t activeconns; /* active connections */ 772 atomic_t inactconns; /* inactive connections */ 773 atomic_t persistconns; /* persistent connections */ 774 __u32 u_threshold; /* upper threshold */ 775 __u32 l_threshold; /* lower threshold */ 776 777 /* for destination cache */ 778 spinlock_t dst_lock; /* lock of dst_cache */ 779 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 780 781 /* for virtual service */ 782 struct ip_vs_service *svc; /* service it belongs to */ 783 __u16 protocol; /* which protocol (TCP/UDP) */ 784 __be16 vport; /* virtual port number */ 785 union nf_inet_addr vaddr; /* virtual IP address */ 786 __u32 vfwmark; /* firewall mark of service */ 787 788 struct rcu_head rcu_head; 789 unsigned int in_rs_table:1; /* we are in rs_table */ 790 }; 791 792 793 /* 794 * The scheduler object 795 */ 796 struct ip_vs_scheduler { 797 struct list_head n_list; /* d-linked list head */ 798 char *name; /* scheduler name */ 799 atomic_t refcnt; /* reference counter */ 800 struct module *module; /* THIS_MODULE/NULL */ 801 802 /* scheduler initializing service */ 803 int (*init_service)(struct ip_vs_service *svc); 804 /* scheduling service finish */ 805 int (*done_service)(struct ip_vs_service *svc); 806 /* scheduler updating service */ 807 int (*update_service)(struct ip_vs_service *svc); 808 /* dest is linked */ 809 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 810 /* dest is unlinked */ 811 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 812 /* dest is updated */ 813 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 814 815 /* selecting a server from the given service */ 816 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 817 const struct sk_buff *skb); 818 }; 819 820 /* The persistence engine object */ 821 struct ip_vs_pe { 822 struct list_head n_list; /* d-linked list head */ 823 char *name; /* scheduler name */ 824 atomic_t refcnt; /* reference counter */ 825 struct module *module; /* THIS_MODULE/NULL */ 826 827 /* get the connection template, if any */ 828 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); 829 bool (*ct_match)(const struct ip_vs_conn_param *p, 830 struct ip_vs_conn *ct); 831 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, 832 bool inverse); 833 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 834 }; 835 836 /* 837 * The application module object (a.k.a. app incarnation) 838 */ 839 struct ip_vs_app { 840 struct list_head a_list; /* member in app list */ 841 int type; /* IP_VS_APP_TYPE_xxx */ 842 char *name; /* application module name */ 843 __u16 protocol; 844 struct module *module; /* THIS_MODULE/NULL */ 845 struct list_head incs_list; /* list of incarnations */ 846 847 /* members for application incarnations */ 848 struct list_head p_list; /* member in proto app list */ 849 struct ip_vs_app *app; /* its real application */ 850 __be16 port; /* port number in net order */ 851 atomic_t usecnt; /* usage counter */ 852 struct rcu_head rcu_head; 853 854 /* 855 * output hook: Process packet in inout direction, diff set for TCP. 856 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 857 * 2=Mangled but checksum was not updated 858 */ 859 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 860 struct sk_buff *, int *diff); 861 862 /* 863 * input hook: Process packet in outin direction, diff set for TCP. 864 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 865 * 2=Mangled but checksum was not updated 866 */ 867 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, 868 struct sk_buff *, int *diff); 869 870 /* ip_vs_app initializer */ 871 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); 872 873 /* ip_vs_app finish */ 874 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); 875 876 877 /* not used now */ 878 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, 879 struct ip_vs_protocol *); 880 881 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); 882 883 int * timeout_table; 884 int * timeouts; 885 int timeouts_size; 886 887 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, 888 int *verdict, struct ip_vs_conn **cpp); 889 890 struct ip_vs_conn * 891 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 892 const struct iphdr *iph, int inverse); 893 894 struct ip_vs_conn * 895 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 896 const struct iphdr *iph, int inverse); 897 898 int (*state_transition)(struct ip_vs_conn *cp, int direction, 899 const struct sk_buff *skb, 900 struct ip_vs_app *app); 901 902 void (*timeout_change)(struct ip_vs_app *app, int flags); 903 }; 904 905 struct ipvs_master_sync_state { 906 struct list_head sync_queue; 907 struct ip_vs_sync_buff *sync_buff; 908 int sync_queue_len; 909 unsigned int sync_queue_delay; 910 struct task_struct *master_thread; 911 struct delayed_work master_wakeup_work; 912 struct netns_ipvs *ipvs; 913 }; 914 915 /* IPVS in network namespace */ 916 struct netns_ipvs { 917 int gen; /* Generation */ 918 int enable; /* enable like nf_hooks do */ 919 /* 920 * Hash table: for real service lookups 921 */ 922 #define IP_VS_RTAB_BITS 4 923 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 924 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 925 926 struct hlist_head rs_table[IP_VS_RTAB_SIZE]; 927 /* ip_vs_app */ 928 struct list_head app_list; 929 /* ip_vs_proto */ 930 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 931 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 932 /* ip_vs_proto_tcp */ 933 #ifdef CONFIG_IP_VS_PROTO_TCP 934 #define TCP_APP_TAB_BITS 4 935 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 936 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 937 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 938 #endif 939 /* ip_vs_proto_udp */ 940 #ifdef CONFIG_IP_VS_PROTO_UDP 941 #define UDP_APP_TAB_BITS 4 942 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 943 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 944 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 945 #endif 946 /* ip_vs_proto_sctp */ 947 #ifdef CONFIG_IP_VS_PROTO_SCTP 948 #define SCTP_APP_TAB_BITS 4 949 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) 950 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 951 /* Hash table for SCTP application incarnations */ 952 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 953 #endif 954 /* ip_vs_conn */ 955 atomic_t conn_count; /* connection counter */ 956 957 /* ip_vs_ctl */ 958 struct ip_vs_stats tot_stats; /* Statistics & est. */ 959 960 int num_services; /* no of virtual services */ 961 962 /* Trash for destinations */ 963 struct list_head dest_trash; 964 /* Service counters */ 965 atomic_t ftpsvc_counter; 966 atomic_t nullsvc_counter; 967 968 #ifdef CONFIG_SYSCTL 969 /* 1/rate drop and drop-entry variables */ 970 struct delayed_work defense_work; /* Work handler */ 971 int drop_rate; 972 int drop_counter; 973 atomic_t dropentry; 974 /* locks in ctl.c */ 975 spinlock_t dropentry_lock; /* drop entry handling */ 976 spinlock_t droppacket_lock; /* drop packet handling */ 977 spinlock_t securetcp_lock; /* state and timeout tables */ 978 979 /* sys-ctl struct */ 980 struct ctl_table_header *sysctl_hdr; 981 struct ctl_table *sysctl_tbl; 982 #endif 983 984 /* sysctl variables */ 985 int sysctl_amemthresh; 986 int sysctl_am_droprate; 987 int sysctl_drop_entry; 988 int sysctl_drop_packet; 989 int sysctl_secure_tcp; 990 #ifdef CONFIG_IP_VS_NFCT 991 int sysctl_conntrack; 992 #endif 993 int sysctl_snat_reroute; 994 int sysctl_sync_ver; 995 int sysctl_sync_ports; 996 int sysctl_sync_qlen_max; 997 int sysctl_sync_sock_size; 998 int sysctl_cache_bypass; 999 int sysctl_expire_nodest_conn; 1000 int sysctl_expire_quiescent_template; 1001 int sysctl_sync_threshold[2]; 1002 unsigned int sysctl_sync_refresh_period; 1003 int sysctl_sync_retries; 1004 int sysctl_nat_icmp_send; 1005 int sysctl_pmtu_disc; 1006 int sysctl_backup_only; 1007 1008 /* ip_vs_lblc */ 1009 int sysctl_lblc_expiration; 1010 struct ctl_table_header *lblc_ctl_header; 1011 struct ctl_table *lblc_ctl_table; 1012 /* ip_vs_lblcr */ 1013 int sysctl_lblcr_expiration; 1014 struct ctl_table_header *lblcr_ctl_header; 1015 struct ctl_table *lblcr_ctl_table; 1016 /* ip_vs_est */ 1017 struct list_head est_list; /* estimator list */ 1018 spinlock_t est_lock; 1019 struct timer_list est_timer; /* Estimation timer */ 1020 /* ip_vs_sync */ 1021 spinlock_t sync_lock; 1022 struct ipvs_master_sync_state *ms; 1023 spinlock_t sync_buff_lock; 1024 struct task_struct **backup_threads; 1025 int threads_mask; 1026 int send_mesg_maxlen; 1027 int recv_mesg_maxlen; 1028 volatile int sync_state; 1029 volatile int master_syncid; 1030 volatile int backup_syncid; 1031 struct mutex sync_mutex; 1032 /* multicast interface name */ 1033 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 1034 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 1035 /* net name space ptr */ 1036 struct net *net; /* Needed by timer routines */ 1037 }; 1038 1039 #define DEFAULT_SYNC_THRESHOLD 3 1040 #define DEFAULT_SYNC_PERIOD 50 1041 #define DEFAULT_SYNC_VER 1 1042 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) 1043 #define DEFAULT_SYNC_RETRIES 0 1044 #define IPVS_SYNC_WAKEUP_RATE 8 1045 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) 1046 #define IPVS_SYNC_SEND_DELAY (HZ / 50) 1047 #define IPVS_SYNC_CHECK_PERIOD HZ 1048 #define IPVS_SYNC_FLUSH_TIME (HZ * 2) 1049 #define IPVS_SYNC_PORTS_MAX (1 << 6) 1050 1051 #ifdef CONFIG_SYSCTL 1052 1053 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1054 { 1055 return ipvs->sysctl_sync_threshold[0]; 1056 } 1057 1058 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1059 { 1060 return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); 1061 } 1062 1063 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1064 { 1065 return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); 1066 } 1067 1068 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1069 { 1070 return ipvs->sysctl_sync_retries; 1071 } 1072 1073 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1074 { 1075 return ipvs->sysctl_sync_ver; 1076 } 1077 1078 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1079 { 1080 return ACCESS_ONCE(ipvs->sysctl_sync_ports); 1081 } 1082 1083 static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1084 { 1085 return ipvs->sysctl_sync_qlen_max; 1086 } 1087 1088 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1089 { 1090 return ipvs->sysctl_sync_sock_size; 1091 } 1092 1093 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1094 { 1095 return ipvs->sysctl_pmtu_disc; 1096 } 1097 1098 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1099 { 1100 return ipvs->sync_state & IP_VS_STATE_BACKUP && 1101 ipvs->sysctl_backup_only; 1102 } 1103 1104 #else 1105 1106 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1107 { 1108 return DEFAULT_SYNC_THRESHOLD; 1109 } 1110 1111 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1112 { 1113 return DEFAULT_SYNC_PERIOD; 1114 } 1115 1116 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1117 { 1118 return DEFAULT_SYNC_REFRESH_PERIOD; 1119 } 1120 1121 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1122 { 1123 return DEFAULT_SYNC_RETRIES & 3; 1124 } 1125 1126 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1127 { 1128 return DEFAULT_SYNC_VER; 1129 } 1130 1131 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1132 { 1133 return 1; 1134 } 1135 1136 static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1137 { 1138 return IPVS_SYNC_QLEN_MAX; 1139 } 1140 1141 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1142 { 1143 return 0; 1144 } 1145 1146 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1147 { 1148 return 1; 1149 } 1150 1151 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1152 { 1153 return 0; 1154 } 1155 1156 #endif 1157 1158 /* 1159 * IPVS core functions 1160 * (from ip_vs_core.c) 1161 */ 1162 extern const char *ip_vs_proto_name(unsigned int proto); 1163 extern void ip_vs_init_hash_table(struct list_head *table, int rows); 1164 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 1165 1166 #define IP_VS_APP_TYPE_FTP 1 1167 1168 /* 1169 * ip_vs_conn handling functions 1170 * (from ip_vs_conn.c) 1171 */ 1172 1173 enum { 1174 IP_VS_DIR_INPUT = 0, 1175 IP_VS_DIR_OUTPUT, 1176 IP_VS_DIR_INPUT_ONLY, 1177 IP_VS_DIR_LAST, 1178 }; 1179 1180 static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, 1181 const union nf_inet_addr *caddr, 1182 __be16 cport, 1183 const union nf_inet_addr *vaddr, 1184 __be16 vport, 1185 struct ip_vs_conn_param *p) 1186 { 1187 p->net = net; 1188 p->af = af; 1189 p->protocol = protocol; 1190 p->caddr = caddr; 1191 p->cport = cport; 1192 p->vaddr = vaddr; 1193 p->vport = vport; 1194 p->pe = NULL; 1195 p->pe_data = NULL; 1196 } 1197 1198 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1199 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1200 1201 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 1202 const struct ip_vs_iphdr *iph, 1203 int inverse); 1204 1205 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1206 1207 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 1208 const struct ip_vs_iphdr *iph, 1209 int inverse); 1210 1211 /* Get reference to gain full access to conn. 1212 * By default, RCU read-side critical sections have access only to 1213 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. 1214 */ 1215 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1216 { 1217 return atomic_inc_not_zero(&cp->refcnt); 1218 } 1219 1220 /* put back the conn without restarting its timer */ 1221 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1222 { 1223 smp_mb__before_atomic_dec(); 1224 atomic_dec(&cp->refcnt); 1225 } 1226 extern void ip_vs_conn_put(struct ip_vs_conn *cp); 1227 extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1228 1229 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, 1230 const union nf_inet_addr *daddr, 1231 __be16 dport, unsigned int flags, 1232 struct ip_vs_dest *dest, __u32 fwmark); 1233 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1234 1235 extern const char * ip_vs_state_name(__u16 proto, int state); 1236 1237 extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp); 1238 extern int ip_vs_check_template(struct ip_vs_conn *ct); 1239 extern void ip_vs_random_dropentry(struct net *net); 1240 extern int ip_vs_conn_init(void); 1241 extern void ip_vs_conn_cleanup(void); 1242 1243 static inline void ip_vs_control_del(struct ip_vs_conn *cp) 1244 { 1245 struct ip_vs_conn *ctl_cp = cp->control; 1246 if (!ctl_cp) { 1247 IP_VS_ERR_BUF("request control DEL for uncontrolled: " 1248 "%s:%d to %s:%d\n", 1249 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1250 ntohs(cp->cport), 1251 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1252 ntohs(cp->vport)); 1253 1254 return; 1255 } 1256 1257 IP_VS_DBG_BUF(7, "DELeting control for: " 1258 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1259 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1260 ntohs(cp->cport), 1261 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1262 ntohs(ctl_cp->cport)); 1263 1264 cp->control = NULL; 1265 if (atomic_read(&ctl_cp->n_control) == 0) { 1266 IP_VS_ERR_BUF("BUG control DEL with n=0 : " 1267 "%s:%d to %s:%d\n", 1268 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1269 ntohs(cp->cport), 1270 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1271 ntohs(cp->vport)); 1272 1273 return; 1274 } 1275 atomic_dec(&ctl_cp->n_control); 1276 } 1277 1278 static inline void 1279 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 1280 { 1281 if (cp->control) { 1282 IP_VS_ERR_BUF("request control ADD for already controlled: " 1283 "%s:%d to %s:%d\n", 1284 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1285 ntohs(cp->cport), 1286 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1287 ntohs(cp->vport)); 1288 1289 ip_vs_control_del(cp); 1290 } 1291 1292 IP_VS_DBG_BUF(7, "ADDing control for: " 1293 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1294 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1295 ntohs(cp->cport), 1296 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1297 ntohs(ctl_cp->cport)); 1298 1299 cp->control = ctl_cp; 1300 atomic_inc(&ctl_cp->n_control); 1301 } 1302 1303 /* 1304 * IPVS netns init & cleanup functions 1305 */ 1306 extern int ip_vs_estimator_net_init(struct net *net); 1307 extern int ip_vs_control_net_init(struct net *net); 1308 extern int ip_vs_protocol_net_init(struct net *net); 1309 extern int ip_vs_app_net_init(struct net *net); 1310 extern int ip_vs_conn_net_init(struct net *net); 1311 extern int ip_vs_sync_net_init(struct net *net); 1312 extern void ip_vs_conn_net_cleanup(struct net *net); 1313 extern void ip_vs_app_net_cleanup(struct net *net); 1314 extern void ip_vs_protocol_net_cleanup(struct net *net); 1315 extern void ip_vs_control_net_cleanup(struct net *net); 1316 extern void ip_vs_estimator_net_cleanup(struct net *net); 1317 extern void ip_vs_sync_net_cleanup(struct net *net); 1318 extern void ip_vs_service_net_cleanup(struct net *net); 1319 1320 /* 1321 * IPVS application functions 1322 * (from ip_vs_app.c) 1323 */ 1324 #define IP_VS_APP_MAX_PORTS 8 1325 extern struct ip_vs_app *register_ip_vs_app(struct net *net, 1326 struct ip_vs_app *app); 1327 extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); 1328 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1329 extern void ip_vs_unbind_app(struct ip_vs_conn *cp); 1330 extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, 1331 __u16 proto, __u16 port); 1332 extern int ip_vs_app_inc_get(struct ip_vs_app *inc); 1333 extern void ip_vs_app_inc_put(struct ip_vs_app *inc); 1334 1335 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); 1336 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); 1337 1338 void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe); 1339 void ip_vs_unbind_pe(struct ip_vs_service *svc); 1340 int register_ip_vs_pe(struct ip_vs_pe *pe); 1341 int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1342 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1343 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1344 1345 /* 1346 * Use a #define to avoid all of module.h just for these trivial ops 1347 */ 1348 #define ip_vs_pe_get(pe) \ 1349 if (pe && pe->module) \ 1350 __module_get(pe->module); 1351 1352 #define ip_vs_pe_put(pe) \ 1353 if (pe && pe->module) \ 1354 module_put(pe->module); 1355 1356 /* 1357 * IPVS protocol functions (from ip_vs_proto.c) 1358 */ 1359 extern int ip_vs_protocol_init(void); 1360 extern void ip_vs_protocol_cleanup(void); 1361 extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1362 extern int *ip_vs_create_timeout_table(int *table, int size); 1363 extern int 1364 ip_vs_set_state_timeout(int *table, int num, const char *const *names, 1365 const char *name, int to); 1366 extern void 1367 ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, 1368 const struct sk_buff *skb, 1369 int offset, const char *msg); 1370 1371 extern struct ip_vs_protocol ip_vs_protocol_tcp; 1372 extern struct ip_vs_protocol ip_vs_protocol_udp; 1373 extern struct ip_vs_protocol ip_vs_protocol_icmp; 1374 extern struct ip_vs_protocol ip_vs_protocol_esp; 1375 extern struct ip_vs_protocol ip_vs_protocol_ah; 1376 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1377 1378 /* 1379 * Registering/unregistering scheduler functions 1380 * (from ip_vs_sched.c) 1381 */ 1382 extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1383 extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1384 extern int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1385 struct ip_vs_scheduler *scheduler); 1386 extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc); 1387 extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1388 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1389 extern struct ip_vs_conn * 1390 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1391 struct ip_vs_proto_data *pd, int *ignored, 1392 struct ip_vs_iphdr *iph); 1393 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1394 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1395 1396 extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1397 1398 1399 /* 1400 * IPVS control data and functions (from ip_vs_ctl.c) 1401 */ 1402 extern struct ip_vs_stats ip_vs_stats; 1403 extern int sysctl_ip_vs_sync_ver; 1404 1405 extern struct ip_vs_service * 1406 ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, 1407 const union nf_inet_addr *vaddr, __be16 vport); 1408 1409 static inline void ip_vs_service_put(struct ip_vs_service *svc) 1410 { 1411 atomic_dec(&svc->usecnt); 1412 } 1413 1414 extern bool 1415 ip_vs_has_real_service(struct net *net, int af, __u16 protocol, 1416 const union nf_inet_addr *daddr, __be16 dport); 1417 1418 extern int ip_vs_use_count_inc(void); 1419 extern void ip_vs_use_count_dec(void); 1420 extern int ip_vs_register_nl_ioctl(void); 1421 extern void ip_vs_unregister_nl_ioctl(void); 1422 extern int ip_vs_control_init(void); 1423 extern void ip_vs_control_cleanup(void); 1424 extern struct ip_vs_dest * 1425 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, 1426 __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, 1427 __u16 protocol, __u32 fwmark, __u32 flags); 1428 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1429 1430 1431 /* 1432 * IPVS sync daemon data and function prototypes 1433 * (from ip_vs_sync.c) 1434 */ 1435 extern int start_sync_thread(struct net *net, int state, char *mcast_ifn, 1436 __u8 syncid); 1437 extern int stop_sync_thread(struct net *net, int state); 1438 extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); 1439 1440 1441 /* 1442 * IPVS rate estimator prototypes (from ip_vs_est.c) 1443 */ 1444 extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); 1445 extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); 1446 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1447 extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst, 1448 struct ip_vs_stats *stats); 1449 1450 /* 1451 * Various IPVS packet transmitters (from ip_vs_xmit.c) 1452 */ 1453 extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1454 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1455 extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1456 struct ip_vs_protocol *pp, 1457 struct ip_vs_iphdr *iph); 1458 extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1459 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1460 extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1461 struct ip_vs_protocol *pp, 1462 struct ip_vs_iphdr *iph); 1463 extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1464 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1465 extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1466 struct ip_vs_protocol *pp, int offset, 1467 unsigned int hooknum, struct ip_vs_iphdr *iph); 1468 extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head); 1469 1470 #ifdef CONFIG_IP_VS_IPV6 1471 extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1472 struct ip_vs_protocol *pp, 1473 struct ip_vs_iphdr *iph); 1474 extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1475 struct ip_vs_protocol *pp, 1476 struct ip_vs_iphdr *iph); 1477 extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1478 struct ip_vs_protocol *pp, 1479 struct ip_vs_iphdr *iph); 1480 extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1481 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1482 extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1483 struct ip_vs_protocol *pp, int offset, 1484 unsigned int hooknum, struct ip_vs_iphdr *iph); 1485 #endif 1486 1487 #ifdef CONFIG_SYSCTL 1488 /* 1489 * This is a simple mechanism to ignore packets when 1490 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1491 * we start to drop 1/rate of the packets 1492 */ 1493 1494 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1495 { 1496 if (!ipvs->drop_rate) 1497 return 0; 1498 if (--ipvs->drop_counter > 0) 1499 return 0; 1500 ipvs->drop_counter = ipvs->drop_rate; 1501 return 1; 1502 } 1503 #else 1504 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1505 #endif 1506 1507 /* 1508 * ip_vs_fwd_tag returns the forwarding tag of the connection 1509 */ 1510 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1511 1512 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1513 { 1514 char fwd; 1515 1516 switch (IP_VS_FWD_METHOD(cp)) { 1517 case IP_VS_CONN_F_MASQ: 1518 fwd = 'M'; break; 1519 case IP_VS_CONN_F_LOCALNODE: 1520 fwd = 'L'; break; 1521 case IP_VS_CONN_F_TUNNEL: 1522 fwd = 'T'; break; 1523 case IP_VS_CONN_F_DROUTE: 1524 fwd = 'R'; break; 1525 case IP_VS_CONN_F_BYPASS: 1526 fwd = 'B'; break; 1527 default: 1528 fwd = '?'; break; 1529 } 1530 return fwd; 1531 } 1532 1533 extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 1534 struct ip_vs_conn *cp, int dir); 1535 1536 #ifdef CONFIG_IP_VS_IPV6 1537 extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, 1538 struct ip_vs_conn *cp, int dir); 1539 #endif 1540 1541 extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 1542 1543 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) 1544 { 1545 __be32 diff[2] = { ~old, new }; 1546 1547 return csum_partial(diff, sizeof(diff), oldsum); 1548 } 1549 1550 #ifdef CONFIG_IP_VS_IPV6 1551 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, 1552 __wsum oldsum) 1553 { 1554 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 1555 new[3], new[2], new[1], new[0] }; 1556 1557 return csum_partial(diff, sizeof(diff), oldsum); 1558 } 1559 #endif 1560 1561 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 1562 { 1563 __be16 diff[2] = { ~old, new }; 1564 1565 return csum_partial(diff, sizeof(diff), oldsum); 1566 } 1567 1568 /* 1569 * Forget current conntrack (unconfirmed) and attach notrack entry 1570 */ 1571 static inline void ip_vs_notrack(struct sk_buff *skb) 1572 { 1573 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1574 enum ip_conntrack_info ctinfo; 1575 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1576 1577 if (!ct || !nf_ct_is_untracked(ct)) { 1578 nf_conntrack_put(skb->nfct); 1579 skb->nfct = &nf_ct_untracked_get()->ct_general; 1580 skb->nfctinfo = IP_CT_NEW; 1581 nf_conntrack_get(skb->nfct); 1582 } 1583 #endif 1584 } 1585 1586 #ifdef CONFIG_IP_VS_NFCT 1587 /* 1588 * Netfilter connection tracking 1589 * (from ip_vs_nfct.c) 1590 */ 1591 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1592 { 1593 #ifdef CONFIG_SYSCTL 1594 return ipvs->sysctl_conntrack; 1595 #else 1596 return 0; 1597 #endif 1598 } 1599 1600 extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, 1601 int outin); 1602 extern int ip_vs_confirm_conntrack(struct sk_buff *skb); 1603 extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, 1604 struct ip_vs_conn *cp, u_int8_t proto, 1605 const __be16 port, int from_rs); 1606 extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); 1607 1608 #else 1609 1610 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1611 { 1612 return 0; 1613 } 1614 1615 static inline void ip_vs_update_conntrack(struct sk_buff *skb, 1616 struct ip_vs_conn *cp, int outin) 1617 { 1618 } 1619 1620 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) 1621 { 1622 return NF_ACCEPT; 1623 } 1624 1625 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1626 { 1627 } 1628 /* CONFIG_IP_VS_NFCT */ 1629 #endif 1630 1631 static inline unsigned int 1632 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1633 { 1634 /* 1635 * We think the overhead of processing active connections is 256 1636 * times higher than that of inactive connections in average. (This 1637 * 256 times might not be accurate, we will change it later) We 1638 * use the following formula to estimate the overhead now: 1639 * dest->activeconns*256 + dest->inactconns 1640 */ 1641 return (atomic_read(&dest->activeconns) << 8) + 1642 atomic_read(&dest->inactconns); 1643 } 1644 1645 #endif /* _NET_IP_VS_H */ 1646