1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* IP Virtual Server 3 * data structure and functionality definitions 4 */ 5 6 #ifndef _NET_IP_VS_H 7 #define _NET_IP_VS_H 8 9 #include <linux/ip_vs.h> /* definitions shared with userland */ 10 11 #include <asm/types.h> /* for __uXX types */ 12 13 #include <linux/list.h> /* for struct list_head */ 14 #include <linux/spinlock.h> /* for struct rwlock_t */ 15 #include <linux/atomic.h> /* for struct atomic_t */ 16 #include <linux/refcount.h> /* for struct refcount_t */ 17 18 #include <linux/compiler.h> 19 #include <linux/timer.h> 20 #include <linux/bug.h> 21 22 #include <net/checksum.h> 23 #include <linux/netfilter.h> /* for union nf_inet_addr */ 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> /* for struct ipv6hdr */ 26 #include <net/ipv6.h> 27 #if IS_ENABLED(CONFIG_IP_VS_IPV6) 28 #include <linux/netfilter_ipv6/ip6_tables.h> 29 #endif 30 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 31 #include <net/netfilter/nf_conntrack.h> 32 #endif 33 #include <net/net_namespace.h> /* Netw namespace */ 34 35 #define IP_VS_HDR_INVERSE 1 36 #define IP_VS_HDR_ICMP 2 37 38 /* Generic access of ipvs struct */ 39 static inline struct netns_ipvs *net_ipvs(struct net* net) 40 { 41 return net->ipvs; 42 } 43 44 /* Connections' size value needed by ip_vs_ctl.c */ 45 extern int ip_vs_conn_tab_size; 46 47 struct ip_vs_iphdr { 48 int hdr_flags; /* ipvs flags */ 49 __u32 off; /* Where IP or IPv4 header starts */ 50 __u32 len; /* IPv4 simply where L4 starts 51 * IPv6 where L4 Transport Header starts */ 52 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 53 __s16 protocol; 54 __s32 flags; 55 union nf_inet_addr saddr; 56 union nf_inet_addr daddr; 57 }; 58 59 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 60 int len, void *buffer) 61 { 62 return skb_header_pointer(skb, offset, len, buffer); 63 } 64 65 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 66 * IPv6 requires some extra work, as finding proper header position, 67 * depend on the IPv6 extension headers. 68 */ 69 static inline int 70 ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, 71 int hdr_flags, struct ip_vs_iphdr *iphdr) 72 { 73 iphdr->hdr_flags = hdr_flags; 74 iphdr->off = offset; 75 76 #ifdef CONFIG_IP_VS_IPV6 77 if (af == AF_INET6) { 78 struct ipv6hdr _iph; 79 const struct ipv6hdr *iph = skb_header_pointer( 80 skb, offset, sizeof(_iph), &_iph); 81 if (!iph) 82 return 0; 83 84 iphdr->saddr.in6 = iph->saddr; 85 iphdr->daddr.in6 = iph->daddr; 86 /* ipv6_find_hdr() updates len, flags */ 87 iphdr->len = offset; 88 iphdr->flags = 0; 89 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 90 &iphdr->fragoffs, 91 &iphdr->flags); 92 if (iphdr->protocol < 0) 93 return 0; 94 } else 95 #endif 96 { 97 struct iphdr _iph; 98 const struct iphdr *iph = skb_header_pointer( 99 skb, offset, sizeof(_iph), &_iph); 100 if (!iph) 101 return 0; 102 103 iphdr->len = offset + iph->ihl * 4; 104 iphdr->fragoffs = 0; 105 iphdr->protocol = iph->protocol; 106 iphdr->saddr.ip = iph->saddr; 107 iphdr->daddr.ip = iph->daddr; 108 } 109 110 return 1; 111 } 112 113 static inline int 114 ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, 115 bool inverse, struct ip_vs_iphdr *iphdr) 116 { 117 int hdr_flags = IP_VS_HDR_ICMP; 118 119 if (inverse) 120 hdr_flags |= IP_VS_HDR_INVERSE; 121 122 return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); 123 } 124 125 static inline int 126 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, 127 struct ip_vs_iphdr *iphdr) 128 { 129 int hdr_flags = 0; 130 131 if (inverse) 132 hdr_flags |= IP_VS_HDR_INVERSE; 133 134 return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), 135 hdr_flags, iphdr); 136 } 137 138 static inline bool 139 ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) 140 { 141 return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); 142 } 143 144 static inline bool 145 ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) 146 { 147 return !!(iph->hdr_flags & IP_VS_HDR_ICMP); 148 } 149 150 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 151 const union nf_inet_addr *src) 152 { 153 #ifdef CONFIG_IP_VS_IPV6 154 if (af == AF_INET6) 155 dst->in6 = src->in6; 156 else 157 #endif 158 dst->ip = src->ip; 159 } 160 161 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, 162 const union nf_inet_addr *src) 163 { 164 #ifdef CONFIG_IP_VS_IPV6 165 if (af == AF_INET6) { 166 dst->in6 = src->in6; 167 return; 168 } 169 #endif 170 dst->ip = src->ip; 171 dst->all[1] = 0; 172 dst->all[2] = 0; 173 dst->all[3] = 0; 174 } 175 176 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 177 const union nf_inet_addr *b) 178 { 179 #ifdef CONFIG_IP_VS_IPV6 180 if (af == AF_INET6) 181 return ipv6_addr_equal(&a->in6, &b->in6); 182 #endif 183 return a->ip == b->ip; 184 } 185 186 #ifdef CONFIG_IP_VS_DEBUG 187 #include <linux/net.h> 188 189 int ip_vs_get_debug_level(void); 190 191 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, 192 const union nf_inet_addr *addr, 193 int *idx) 194 { 195 int len; 196 #ifdef CONFIG_IP_VS_IPV6 197 if (af == AF_INET6) 198 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", 199 &addr->in6) + 1; 200 else 201 #endif 202 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", 203 &addr->ip) + 1; 204 205 *idx += len; 206 BUG_ON(*idx > buf_len + 1); 207 return &buf[*idx - len]; 208 } 209 210 #define IP_VS_DBG_BUF(level, msg, ...) \ 211 do { \ 212 char ip_vs_dbg_buf[160]; \ 213 int ip_vs_dbg_idx = 0; \ 214 if (level <= ip_vs_get_debug_level()) \ 215 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 216 } while (0) 217 #define IP_VS_ERR_BUF(msg...) \ 218 do { \ 219 char ip_vs_dbg_buf[160]; \ 220 int ip_vs_dbg_idx = 0; \ 221 pr_err(msg); \ 222 } while (0) 223 224 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ 225 #define IP_VS_DBG_ADDR(af, addr) \ 226 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ 227 sizeof(ip_vs_dbg_buf), addr, \ 228 &ip_vs_dbg_idx) 229 230 #define IP_VS_DBG(level, msg, ...) \ 231 do { \ 232 if (level <= ip_vs_get_debug_level()) \ 233 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 234 } while (0) 235 #define IP_VS_DBG_RL(msg, ...) \ 236 do { \ 237 if (net_ratelimit()) \ 238 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 239 } while (0) 240 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ 241 do { \ 242 if (level <= ip_vs_get_debug_level()) \ 243 pp->debug_packet(af, pp, skb, ofs, msg); \ 244 } while (0) 245 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ 246 do { \ 247 if (level <= ip_vs_get_debug_level() && \ 248 net_ratelimit()) \ 249 pp->debug_packet(af, pp, skb, ofs, msg); \ 250 } while (0) 251 #else /* NO DEBUGGING at ALL */ 252 #define IP_VS_DBG_BUF(level, msg...) do {} while (0) 253 #define IP_VS_ERR_BUF(msg...) do {} while (0) 254 #define IP_VS_DBG(level, msg...) do {} while (0) 255 #define IP_VS_DBG_RL(msg...) do {} while (0) 256 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 257 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 258 #endif 259 260 #define IP_VS_BUG() BUG() 261 #define IP_VS_ERR_RL(msg, ...) \ 262 do { \ 263 if (net_ratelimit()) \ 264 pr_err(msg, ##__VA_ARGS__); \ 265 } while (0) 266 267 #ifdef CONFIG_IP_VS_DEBUG 268 #define EnterFunction(level) \ 269 do { \ 270 if (level <= ip_vs_get_debug_level()) \ 271 printk(KERN_DEBUG \ 272 pr_fmt("Enter: %s, %s line %i\n"), \ 273 __func__, __FILE__, __LINE__); \ 274 } while (0) 275 #define LeaveFunction(level) \ 276 do { \ 277 if (level <= ip_vs_get_debug_level()) \ 278 printk(KERN_DEBUG \ 279 pr_fmt("Leave: %s, %s line %i\n"), \ 280 __func__, __FILE__, __LINE__); \ 281 } while (0) 282 #else 283 #define EnterFunction(level) do {} while (0) 284 #define LeaveFunction(level) do {} while (0) 285 #endif 286 287 /* The port number of FTP service (in network order). */ 288 #define FTPPORT cpu_to_be16(21) 289 #define FTPDATA cpu_to_be16(20) 290 291 /* TCP State Values */ 292 enum { 293 IP_VS_TCP_S_NONE = 0, 294 IP_VS_TCP_S_ESTABLISHED, 295 IP_VS_TCP_S_SYN_SENT, 296 IP_VS_TCP_S_SYN_RECV, 297 IP_VS_TCP_S_FIN_WAIT, 298 IP_VS_TCP_S_TIME_WAIT, 299 IP_VS_TCP_S_CLOSE, 300 IP_VS_TCP_S_CLOSE_WAIT, 301 IP_VS_TCP_S_LAST_ACK, 302 IP_VS_TCP_S_LISTEN, 303 IP_VS_TCP_S_SYNACK, 304 IP_VS_TCP_S_LAST 305 }; 306 307 /* UDP State Values */ 308 enum { 309 IP_VS_UDP_S_NORMAL, 310 IP_VS_UDP_S_LAST, 311 }; 312 313 /* ICMP State Values */ 314 enum { 315 IP_VS_ICMP_S_NORMAL, 316 IP_VS_ICMP_S_LAST, 317 }; 318 319 /* SCTP State Values */ 320 enum ip_vs_sctp_states { 321 IP_VS_SCTP_S_NONE, 322 IP_VS_SCTP_S_INIT1, 323 IP_VS_SCTP_S_INIT, 324 IP_VS_SCTP_S_COOKIE_SENT, 325 IP_VS_SCTP_S_COOKIE_REPLIED, 326 IP_VS_SCTP_S_COOKIE_WAIT, 327 IP_VS_SCTP_S_COOKIE, 328 IP_VS_SCTP_S_COOKIE_ECHOED, 329 IP_VS_SCTP_S_ESTABLISHED, 330 IP_VS_SCTP_S_SHUTDOWN_SENT, 331 IP_VS_SCTP_S_SHUTDOWN_RECEIVED, 332 IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, 333 IP_VS_SCTP_S_REJECTED, 334 IP_VS_SCTP_S_CLOSED, 335 IP_VS_SCTP_S_LAST 336 }; 337 338 /* Connection templates use bits from state */ 339 #define IP_VS_CTPL_S_NONE 0x0000 340 #define IP_VS_CTPL_S_ASSURED 0x0001 341 #define IP_VS_CTPL_S_LAST 0x0002 342 343 /* Delta sequence info structure 344 * Each ip_vs_conn has 2 (output AND input seq. changes). 345 * Only used in the VS/NAT. 346 */ 347 struct ip_vs_seq { 348 __u32 init_seq; /* Add delta from this seq */ 349 __u32 delta; /* Delta in sequence numbers */ 350 __u32 previous_delta; /* Delta in sequence numbers 351 * before last resized pkt */ 352 }; 353 354 /* counters per cpu */ 355 struct ip_vs_counters { 356 __u64 conns; /* connections scheduled */ 357 __u64 inpkts; /* incoming packets */ 358 __u64 outpkts; /* outgoing packets */ 359 __u64 inbytes; /* incoming bytes */ 360 __u64 outbytes; /* outgoing bytes */ 361 }; 362 /* Stats per cpu */ 363 struct ip_vs_cpu_stats { 364 struct ip_vs_counters cnt; 365 struct u64_stats_sync syncp; 366 }; 367 368 /* IPVS statistics objects */ 369 struct ip_vs_estimator { 370 struct list_head list; 371 372 u64 last_inbytes; 373 u64 last_outbytes; 374 u64 last_conns; 375 u64 last_inpkts; 376 u64 last_outpkts; 377 378 u64 cps; 379 u64 inpps; 380 u64 outpps; 381 u64 inbps; 382 u64 outbps; 383 }; 384 385 /* 386 * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user 387 */ 388 struct ip_vs_kstats { 389 u64 conns; /* connections scheduled */ 390 u64 inpkts; /* incoming packets */ 391 u64 outpkts; /* outgoing packets */ 392 u64 inbytes; /* incoming bytes */ 393 u64 outbytes; /* outgoing bytes */ 394 395 u64 cps; /* current connection rate */ 396 u64 inpps; /* current in packet rate */ 397 u64 outpps; /* current out packet rate */ 398 u64 inbps; /* current in byte rate */ 399 u64 outbps; /* current out byte rate */ 400 }; 401 402 struct ip_vs_stats { 403 struct ip_vs_kstats kstats; /* kernel statistics */ 404 struct ip_vs_estimator est; /* estimator */ 405 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ 406 spinlock_t lock; /* spin lock */ 407 struct ip_vs_kstats kstats0; /* reset values */ 408 }; 409 410 struct dst_entry; 411 struct iphdr; 412 struct ip_vs_conn; 413 struct ip_vs_app; 414 struct sk_buff; 415 struct ip_vs_proto_data; 416 417 struct ip_vs_protocol { 418 struct ip_vs_protocol *next; 419 char *name; 420 u16 protocol; 421 u16 num_states; 422 int dont_defrag; 423 424 void (*init)(struct ip_vs_protocol *pp); 425 426 void (*exit)(struct ip_vs_protocol *pp); 427 428 int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 429 430 void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 431 432 int (*conn_schedule)(struct netns_ipvs *ipvs, 433 int af, struct sk_buff *skb, 434 struct ip_vs_proto_data *pd, 435 int *verdict, struct ip_vs_conn **cpp, 436 struct ip_vs_iphdr *iph); 437 438 struct ip_vs_conn * 439 (*conn_in_get)(struct netns_ipvs *ipvs, 440 int af, 441 const struct sk_buff *skb, 442 const struct ip_vs_iphdr *iph); 443 444 struct ip_vs_conn * 445 (*conn_out_get)(struct netns_ipvs *ipvs, 446 int af, 447 const struct sk_buff *skb, 448 const struct ip_vs_iphdr *iph); 449 450 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 451 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 452 453 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 454 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 455 456 int (*csum_check)(int af, struct sk_buff *skb, 457 struct ip_vs_protocol *pp); 458 459 const char *(*state_name)(int state); 460 461 void (*state_transition)(struct ip_vs_conn *cp, int direction, 462 const struct sk_buff *skb, 463 struct ip_vs_proto_data *pd); 464 465 int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 466 467 void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 468 469 int (*app_conn_bind)(struct ip_vs_conn *cp); 470 471 void (*debug_packet)(int af, struct ip_vs_protocol *pp, 472 const struct sk_buff *skb, 473 int offset, 474 const char *msg); 475 476 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 477 }; 478 479 /* protocol data per netns */ 480 struct ip_vs_proto_data { 481 struct ip_vs_proto_data *next; 482 struct ip_vs_protocol *pp; 483 int *timeout_table; /* protocol timeout table */ 484 atomic_t appcnt; /* counter of proto app incs. */ 485 struct tcp_states_t *tcp_state_table; 486 }; 487 488 struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 489 struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, 490 unsigned short proto); 491 492 struct ip_vs_conn_param { 493 struct netns_ipvs *ipvs; 494 const union nf_inet_addr *caddr; 495 const union nf_inet_addr *vaddr; 496 __be16 cport; 497 __be16 vport; 498 __u16 protocol; 499 u16 af; 500 501 const struct ip_vs_pe *pe; 502 char *pe_data; 503 __u8 pe_data_len; 504 }; 505 506 /* IP_VS structure allocated for each dynamically scheduled connection */ 507 struct ip_vs_conn { 508 struct hlist_node c_list; /* hashed list heads */ 509 /* Protocol, addresses and port numbers */ 510 __be16 cport; 511 __be16 dport; 512 __be16 vport; 513 u16 af; /* address family */ 514 union nf_inet_addr caddr; /* client address */ 515 union nf_inet_addr vaddr; /* virtual address */ 516 union nf_inet_addr daddr; /* destination address */ 517 volatile __u32 flags; /* status flags */ 518 __u16 protocol; /* Which protocol (TCP/UDP) */ 519 __u16 daf; /* Address family of the dest */ 520 struct netns_ipvs *ipvs; 521 522 /* counter and timer */ 523 refcount_t refcnt; /* reference count */ 524 struct timer_list timer; /* Expiration timer */ 525 volatile unsigned long timeout; /* timeout */ 526 527 /* Flags and state transition */ 528 spinlock_t lock; /* lock for state transition */ 529 volatile __u16 state; /* state info */ 530 volatile __u16 old_state; /* old state, to be used for 531 * state transition triggerd 532 * synchronization 533 */ 534 __u32 fwmark; /* Fire wall mark from skb */ 535 unsigned long sync_endtime; /* jiffies + sent_retries */ 536 537 /* Control members */ 538 struct ip_vs_conn *control; /* Master control connection */ 539 atomic_t n_control; /* Number of controlled ones */ 540 struct ip_vs_dest *dest; /* real server */ 541 atomic_t in_pkts; /* incoming packet counter */ 542 543 /* Packet transmitter for different forwarding methods. If it 544 * mangles the packet, it must return NF_DROP or better NF_STOLEN, 545 * otherwise this must be changed to a sk_buff **. 546 * NF_ACCEPT can be returned when destination is local. 547 */ 548 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 549 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 550 551 /* Note: we can group the following members into a structure, 552 * in order to save more space, and the following members are 553 * only used in VS/NAT anyway 554 */ 555 struct ip_vs_app *app; /* bound ip_vs_app object */ 556 void *app_data; /* Application private data */ 557 struct ip_vs_seq in_seq; /* incoming seq. struct */ 558 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 559 560 const struct ip_vs_pe *pe; 561 char *pe_data; 562 __u8 pe_data_len; 563 564 struct rcu_head rcu_head; 565 }; 566 567 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 568 * for IPv6 support. 569 * 570 * We need these to conveniently pass around service and destination 571 * options, but unfortunately, we also need to keep the old definitions to 572 * maintain userspace backwards compatibility for the setsockopt interface. 573 */ 574 struct ip_vs_service_user_kern { 575 /* virtual service addresses */ 576 u16 af; 577 u16 protocol; 578 union nf_inet_addr addr; /* virtual ip address */ 579 __be16 port; 580 u32 fwmark; /* firwall mark of service */ 581 582 /* virtual service options */ 583 char *sched_name; 584 char *pe_name; 585 unsigned int flags; /* virtual service flags */ 586 unsigned int timeout; /* persistent timeout in sec */ 587 __be32 netmask; /* persistent netmask or plen */ 588 }; 589 590 591 struct ip_vs_dest_user_kern { 592 /* destination server address */ 593 union nf_inet_addr addr; 594 __be16 port; 595 596 /* real server options */ 597 unsigned int conn_flags; /* connection flags */ 598 int weight; /* destination weight */ 599 600 /* thresholds for active connections */ 601 u32 u_threshold; /* upper threshold */ 602 u32 l_threshold; /* lower threshold */ 603 604 /* Address family of addr */ 605 u16 af; 606 }; 607 608 609 /* 610 * The information about the virtual service offered to the net and the 611 * forwarding entries. 612 */ 613 struct ip_vs_service { 614 struct hlist_node s_list; /* for normal service table */ 615 struct hlist_node f_list; /* for fwmark-based service table */ 616 atomic_t refcnt; /* reference counter */ 617 618 u16 af; /* address family */ 619 __u16 protocol; /* which protocol (TCP/UDP) */ 620 union nf_inet_addr addr; /* IP address for virtual service */ 621 __be16 port; /* port number for the service */ 622 __u32 fwmark; /* firewall mark of the service */ 623 unsigned int flags; /* service status flags */ 624 unsigned int timeout; /* persistent timeout in ticks */ 625 __be32 netmask; /* grouping granularity, mask/plen */ 626 struct netns_ipvs *ipvs; 627 628 struct list_head destinations; /* real server d-linked list */ 629 __u32 num_dests; /* number of servers */ 630 struct ip_vs_stats stats; /* statistics for the service */ 631 632 /* for scheduling */ 633 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ 634 spinlock_t sched_lock; /* lock sched_data */ 635 void *sched_data; /* scheduler application data */ 636 637 /* alternate persistence engine */ 638 struct ip_vs_pe __rcu *pe; 639 int conntrack_afmask; 640 641 struct rcu_head rcu_head; 642 }; 643 644 /* Information for cached dst */ 645 struct ip_vs_dest_dst { 646 struct dst_entry *dst_cache; /* destination cache entry */ 647 u32 dst_cookie; 648 union nf_inet_addr dst_saddr; 649 struct rcu_head rcu_head; 650 }; 651 652 /* The real server destination forwarding entry with ip address, port number, 653 * and so on. 654 */ 655 struct ip_vs_dest { 656 struct list_head n_list; /* for the dests in the service */ 657 struct hlist_node d_list; /* for table with all the dests */ 658 659 u16 af; /* address family */ 660 __be16 port; /* port number of the server */ 661 union nf_inet_addr addr; /* IP address of the server */ 662 volatile unsigned int flags; /* dest status flags */ 663 atomic_t conn_flags; /* flags to copy to conn */ 664 atomic_t weight; /* server weight */ 665 atomic_t last_weight; /* server latest weight */ 666 667 refcount_t refcnt; /* reference counter */ 668 struct ip_vs_stats stats; /* statistics */ 669 unsigned long idle_start; /* start time, jiffies */ 670 671 /* connection counters and thresholds */ 672 atomic_t activeconns; /* active connections */ 673 atomic_t inactconns; /* inactive connections */ 674 atomic_t persistconns; /* persistent connections */ 675 __u32 u_threshold; /* upper threshold */ 676 __u32 l_threshold; /* lower threshold */ 677 678 /* for destination cache */ 679 spinlock_t dst_lock; /* lock of dst_cache */ 680 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 681 682 /* for virtual service */ 683 struct ip_vs_service __rcu *svc; /* service it belongs to */ 684 __u16 protocol; /* which protocol (TCP/UDP) */ 685 __be16 vport; /* virtual port number */ 686 union nf_inet_addr vaddr; /* virtual IP address */ 687 __u32 vfwmark; /* firewall mark of service */ 688 689 struct list_head t_list; /* in dest_trash */ 690 unsigned int in_rs_table:1; /* we are in rs_table */ 691 }; 692 693 /* The scheduler object */ 694 struct ip_vs_scheduler { 695 struct list_head n_list; /* d-linked list head */ 696 char *name; /* scheduler name */ 697 atomic_t refcnt; /* reference counter */ 698 struct module *module; /* THIS_MODULE/NULL */ 699 700 /* scheduler initializing service */ 701 int (*init_service)(struct ip_vs_service *svc); 702 /* scheduling service finish */ 703 void (*done_service)(struct ip_vs_service *svc); 704 /* dest is linked */ 705 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 706 /* dest is unlinked */ 707 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 708 /* dest is updated */ 709 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 710 711 /* selecting a server from the given service */ 712 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 713 const struct sk_buff *skb, 714 struct ip_vs_iphdr *iph); 715 }; 716 717 /* The persistence engine object */ 718 struct ip_vs_pe { 719 struct list_head n_list; /* d-linked list head */ 720 char *name; /* scheduler name */ 721 atomic_t refcnt; /* reference counter */ 722 struct module *module; /* THIS_MODULE/NULL */ 723 724 /* get the connection template, if any */ 725 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); 726 bool (*ct_match)(const struct ip_vs_conn_param *p, 727 struct ip_vs_conn *ct); 728 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, 729 bool inverse); 730 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 731 /* create connections for real-server outgoing packets */ 732 struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, 733 struct ip_vs_dest *dest, 734 struct sk_buff *skb, 735 const struct ip_vs_iphdr *iph, 736 __be16 dport, __be16 cport); 737 }; 738 739 /* The application module object (a.k.a. app incarnation) */ 740 struct ip_vs_app { 741 struct list_head a_list; /* member in app list */ 742 int type; /* IP_VS_APP_TYPE_xxx */ 743 char *name; /* application module name */ 744 __u16 protocol; 745 struct module *module; /* THIS_MODULE/NULL */ 746 struct list_head incs_list; /* list of incarnations */ 747 748 /* members for application incarnations */ 749 struct list_head p_list; /* member in proto app list */ 750 struct ip_vs_app *app; /* its real application */ 751 __be16 port; /* port number in net order */ 752 atomic_t usecnt; /* usage counter */ 753 struct rcu_head rcu_head; 754 755 /* output hook: Process packet in inout direction, diff set for TCP. 756 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 757 * 2=Mangled but checksum was not updated 758 */ 759 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 760 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 761 762 /* input hook: Process packet in outin direction, diff set for TCP. 763 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 764 * 2=Mangled but checksum was not updated 765 */ 766 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, 767 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 768 769 /* ip_vs_app initializer */ 770 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); 771 772 /* ip_vs_app finish */ 773 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); 774 775 776 /* not used now */ 777 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, 778 struct ip_vs_protocol *); 779 780 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); 781 782 int * timeout_table; 783 int * timeouts; 784 int timeouts_size; 785 786 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, 787 int *verdict, struct ip_vs_conn **cpp); 788 789 struct ip_vs_conn * 790 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 791 const struct iphdr *iph, int inverse); 792 793 struct ip_vs_conn * 794 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 795 const struct iphdr *iph, int inverse); 796 797 int (*state_transition)(struct ip_vs_conn *cp, int direction, 798 const struct sk_buff *skb, 799 struct ip_vs_app *app); 800 801 void (*timeout_change)(struct ip_vs_app *app, int flags); 802 }; 803 804 struct ipvs_master_sync_state { 805 struct list_head sync_queue; 806 struct ip_vs_sync_buff *sync_buff; 807 unsigned long sync_queue_len; 808 unsigned int sync_queue_delay; 809 struct task_struct *master_thread; 810 struct delayed_work master_wakeup_work; 811 struct netns_ipvs *ipvs; 812 }; 813 814 /* How much time to keep dests in trash */ 815 #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) 816 817 struct ipvs_sync_daemon_cfg { 818 union nf_inet_addr mcast_group; 819 int syncid; 820 u16 sync_maxlen; 821 u16 mcast_port; 822 u8 mcast_af; 823 u8 mcast_ttl; 824 /* multicast interface name */ 825 char mcast_ifn[IP_VS_IFNAME_MAXLEN]; 826 }; 827 828 /* IPVS in network namespace */ 829 struct netns_ipvs { 830 int gen; /* Generation */ 831 int enable; /* enable like nf_hooks do */ 832 /* Hash table: for real service lookups */ 833 #define IP_VS_RTAB_BITS 4 834 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 835 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 836 837 struct hlist_head rs_table[IP_VS_RTAB_SIZE]; 838 /* ip_vs_app */ 839 struct list_head app_list; 840 /* ip_vs_proto */ 841 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 842 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 843 /* ip_vs_proto_tcp */ 844 #ifdef CONFIG_IP_VS_PROTO_TCP 845 #define TCP_APP_TAB_BITS 4 846 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 847 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 848 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 849 #endif 850 /* ip_vs_proto_udp */ 851 #ifdef CONFIG_IP_VS_PROTO_UDP 852 #define UDP_APP_TAB_BITS 4 853 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 854 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 855 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 856 #endif 857 /* ip_vs_proto_sctp */ 858 #ifdef CONFIG_IP_VS_PROTO_SCTP 859 #define SCTP_APP_TAB_BITS 4 860 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) 861 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 862 /* Hash table for SCTP application incarnations */ 863 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 864 #endif 865 /* ip_vs_conn */ 866 atomic_t conn_count; /* connection counter */ 867 868 /* ip_vs_ctl */ 869 struct ip_vs_stats tot_stats; /* Statistics & est. */ 870 871 int num_services; /* no of virtual services */ 872 873 /* Trash for destinations */ 874 struct list_head dest_trash; 875 spinlock_t dest_trash_lock; 876 struct timer_list dest_trash_timer; /* expiration timer */ 877 /* Service counters */ 878 atomic_t ftpsvc_counter; 879 atomic_t nullsvc_counter; 880 atomic_t conn_out_counter; 881 882 #ifdef CONFIG_SYSCTL 883 /* 1/rate drop and drop-entry variables */ 884 struct delayed_work defense_work; /* Work handler */ 885 int drop_rate; 886 int drop_counter; 887 atomic_t dropentry; 888 /* locks in ctl.c */ 889 spinlock_t dropentry_lock; /* drop entry handling */ 890 spinlock_t droppacket_lock; /* drop packet handling */ 891 spinlock_t securetcp_lock; /* state and timeout tables */ 892 893 /* sys-ctl struct */ 894 struct ctl_table_header *sysctl_hdr; 895 struct ctl_table *sysctl_tbl; 896 #endif 897 898 /* sysctl variables */ 899 int sysctl_amemthresh; 900 int sysctl_am_droprate; 901 int sysctl_drop_entry; 902 int sysctl_drop_packet; 903 int sysctl_secure_tcp; 904 #ifdef CONFIG_IP_VS_NFCT 905 int sysctl_conntrack; 906 #endif 907 int sysctl_snat_reroute; 908 int sysctl_sync_ver; 909 int sysctl_sync_ports; 910 int sysctl_sync_persist_mode; 911 unsigned long sysctl_sync_qlen_max; 912 int sysctl_sync_sock_size; 913 int sysctl_cache_bypass; 914 int sysctl_expire_nodest_conn; 915 int sysctl_sloppy_tcp; 916 int sysctl_sloppy_sctp; 917 int sysctl_expire_quiescent_template; 918 int sysctl_sync_threshold[2]; 919 unsigned int sysctl_sync_refresh_period; 920 int sysctl_sync_retries; 921 int sysctl_nat_icmp_send; 922 int sysctl_pmtu_disc; 923 int sysctl_backup_only; 924 int sysctl_conn_reuse_mode; 925 int sysctl_schedule_icmp; 926 int sysctl_ignore_tunneled; 927 928 /* ip_vs_lblc */ 929 int sysctl_lblc_expiration; 930 struct ctl_table_header *lblc_ctl_header; 931 struct ctl_table *lblc_ctl_table; 932 /* ip_vs_lblcr */ 933 int sysctl_lblcr_expiration; 934 struct ctl_table_header *lblcr_ctl_header; 935 struct ctl_table *lblcr_ctl_table; 936 /* ip_vs_est */ 937 struct list_head est_list; /* estimator list */ 938 spinlock_t est_lock; 939 struct timer_list est_timer; /* Estimation timer */ 940 /* ip_vs_sync */ 941 spinlock_t sync_lock; 942 struct ipvs_master_sync_state *ms; 943 spinlock_t sync_buff_lock; 944 struct task_struct **backup_threads; 945 int threads_mask; 946 volatile int sync_state; 947 struct mutex sync_mutex; 948 struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */ 949 struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ 950 /* net name space ptr */ 951 struct net *net; /* Needed by timer routines */ 952 /* Number of heterogeneous destinations, needed becaus heterogeneous 953 * are not supported when synchronization is enabled. 954 */ 955 unsigned int mixed_address_family_dests; 956 }; 957 958 #define DEFAULT_SYNC_THRESHOLD 3 959 #define DEFAULT_SYNC_PERIOD 50 960 #define DEFAULT_SYNC_VER 1 961 #define DEFAULT_SLOPPY_TCP 0 962 #define DEFAULT_SLOPPY_SCTP 0 963 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) 964 #define DEFAULT_SYNC_RETRIES 0 965 #define IPVS_SYNC_WAKEUP_RATE 8 966 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) 967 #define IPVS_SYNC_SEND_DELAY (HZ / 50) 968 #define IPVS_SYNC_CHECK_PERIOD HZ 969 #define IPVS_SYNC_FLUSH_TIME (HZ * 2) 970 #define IPVS_SYNC_PORTS_MAX (1 << 6) 971 972 #ifdef CONFIG_SYSCTL 973 974 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 975 { 976 return ipvs->sysctl_sync_threshold[0]; 977 } 978 979 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 980 { 981 return READ_ONCE(ipvs->sysctl_sync_threshold[1]); 982 } 983 984 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 985 { 986 return READ_ONCE(ipvs->sysctl_sync_refresh_period); 987 } 988 989 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 990 { 991 return ipvs->sysctl_sync_retries; 992 } 993 994 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 995 { 996 return ipvs->sysctl_sync_ver; 997 } 998 999 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1000 { 1001 return ipvs->sysctl_sloppy_tcp; 1002 } 1003 1004 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1005 { 1006 return ipvs->sysctl_sloppy_sctp; 1007 } 1008 1009 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1010 { 1011 return READ_ONCE(ipvs->sysctl_sync_ports); 1012 } 1013 1014 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1015 { 1016 return ipvs->sysctl_sync_persist_mode; 1017 } 1018 1019 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1020 { 1021 return ipvs->sysctl_sync_qlen_max; 1022 } 1023 1024 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1025 { 1026 return ipvs->sysctl_sync_sock_size; 1027 } 1028 1029 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1030 { 1031 return ipvs->sysctl_pmtu_disc; 1032 } 1033 1034 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1035 { 1036 return ipvs->sync_state & IP_VS_STATE_BACKUP && 1037 ipvs->sysctl_backup_only; 1038 } 1039 1040 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1041 { 1042 return ipvs->sysctl_conn_reuse_mode; 1043 } 1044 1045 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1046 { 1047 return ipvs->sysctl_schedule_icmp; 1048 } 1049 1050 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1051 { 1052 return ipvs->sysctl_ignore_tunneled; 1053 } 1054 1055 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1056 { 1057 return ipvs->sysctl_cache_bypass; 1058 } 1059 1060 #else 1061 1062 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1063 { 1064 return DEFAULT_SYNC_THRESHOLD; 1065 } 1066 1067 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1068 { 1069 return DEFAULT_SYNC_PERIOD; 1070 } 1071 1072 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1073 { 1074 return DEFAULT_SYNC_REFRESH_PERIOD; 1075 } 1076 1077 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1078 { 1079 return DEFAULT_SYNC_RETRIES & 3; 1080 } 1081 1082 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1083 { 1084 return DEFAULT_SYNC_VER; 1085 } 1086 1087 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1088 { 1089 return DEFAULT_SLOPPY_TCP; 1090 } 1091 1092 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1093 { 1094 return DEFAULT_SLOPPY_SCTP; 1095 } 1096 1097 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1098 { 1099 return 1; 1100 } 1101 1102 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1103 { 1104 return 0; 1105 } 1106 1107 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1108 { 1109 return IPVS_SYNC_QLEN_MAX; 1110 } 1111 1112 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1113 { 1114 return 0; 1115 } 1116 1117 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1118 { 1119 return 1; 1120 } 1121 1122 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1123 { 1124 return 0; 1125 } 1126 1127 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1128 { 1129 return 1; 1130 } 1131 1132 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1133 { 1134 return 0; 1135 } 1136 1137 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1138 { 1139 return 0; 1140 } 1141 1142 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1143 { 1144 return 0; 1145 } 1146 1147 #endif 1148 1149 /* IPVS core functions 1150 * (from ip_vs_core.c) 1151 */ 1152 const char *ip_vs_proto_name(unsigned int proto); 1153 void ip_vs_init_hash_table(struct list_head *table, int rows); 1154 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, 1155 struct ip_vs_dest *dest, 1156 struct sk_buff *skb, 1157 const struct ip_vs_iphdr *iph, 1158 __be16 dport, 1159 __be16 cport); 1160 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 1161 1162 #define IP_VS_APP_TYPE_FTP 1 1163 1164 /* ip_vs_conn handling functions 1165 * (from ip_vs_conn.c) 1166 */ 1167 enum { 1168 IP_VS_DIR_INPUT = 0, 1169 IP_VS_DIR_OUTPUT, 1170 IP_VS_DIR_INPUT_ONLY, 1171 IP_VS_DIR_LAST, 1172 }; 1173 1174 static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, 1175 const union nf_inet_addr *caddr, 1176 __be16 cport, 1177 const union nf_inet_addr *vaddr, 1178 __be16 vport, 1179 struct ip_vs_conn_param *p) 1180 { 1181 p->ipvs = ipvs; 1182 p->af = af; 1183 p->protocol = protocol; 1184 p->caddr = caddr; 1185 p->cport = cport; 1186 p->vaddr = vaddr; 1187 p->vport = vport; 1188 p->pe = NULL; 1189 p->pe_data = NULL; 1190 } 1191 1192 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1193 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1194 1195 struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, 1196 const struct sk_buff *skb, 1197 const struct ip_vs_iphdr *iph); 1198 1199 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1200 1201 struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, 1202 const struct sk_buff *skb, 1203 const struct ip_vs_iphdr *iph); 1204 1205 /* Get reference to gain full access to conn. 1206 * By default, RCU read-side critical sections have access only to 1207 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. 1208 */ 1209 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1210 { 1211 return refcount_inc_not_zero(&cp->refcnt); 1212 } 1213 1214 /* put back the conn without restarting its timer */ 1215 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1216 { 1217 smp_mb__before_atomic(); 1218 refcount_dec(&cp->refcnt); 1219 } 1220 void ip_vs_conn_put(struct ip_vs_conn *cp); 1221 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1222 1223 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, 1224 const union nf_inet_addr *daddr, 1225 __be16 dport, unsigned int flags, 1226 struct ip_vs_dest *dest, __u32 fwmark); 1227 void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1228 1229 const char *ip_vs_state_name(const struct ip_vs_conn *cp); 1230 1231 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1232 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1233 void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1234 int ip_vs_conn_init(void); 1235 void ip_vs_conn_cleanup(void); 1236 1237 static inline void ip_vs_control_del(struct ip_vs_conn *cp) 1238 { 1239 struct ip_vs_conn *ctl_cp = cp->control; 1240 if (!ctl_cp) { 1241 IP_VS_ERR_BUF("request control DEL for uncontrolled: " 1242 "%s:%d to %s:%d\n", 1243 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1244 ntohs(cp->cport), 1245 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1246 ntohs(cp->vport)); 1247 1248 return; 1249 } 1250 1251 IP_VS_DBG_BUF(7, "DELeting control for: " 1252 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1253 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1254 ntohs(cp->cport), 1255 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1256 ntohs(ctl_cp->cport)); 1257 1258 cp->control = NULL; 1259 if (atomic_read(&ctl_cp->n_control) == 0) { 1260 IP_VS_ERR_BUF("BUG control DEL with n=0 : " 1261 "%s:%d to %s:%d\n", 1262 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1263 ntohs(cp->cport), 1264 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1265 ntohs(cp->vport)); 1266 1267 return; 1268 } 1269 atomic_dec(&ctl_cp->n_control); 1270 } 1271 1272 static inline void 1273 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 1274 { 1275 if (cp->control) { 1276 IP_VS_ERR_BUF("request control ADD for already controlled: " 1277 "%s:%d to %s:%d\n", 1278 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1279 ntohs(cp->cport), 1280 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1281 ntohs(cp->vport)); 1282 1283 ip_vs_control_del(cp); 1284 } 1285 1286 IP_VS_DBG_BUF(7, "ADDing control for: " 1287 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1288 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1289 ntohs(cp->cport), 1290 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1291 ntohs(ctl_cp->cport)); 1292 1293 cp->control = ctl_cp; 1294 atomic_inc(&ctl_cp->n_control); 1295 } 1296 1297 /* Mark our template as assured */ 1298 static inline void 1299 ip_vs_control_assure_ct(struct ip_vs_conn *cp) 1300 { 1301 struct ip_vs_conn *ct = cp->control; 1302 1303 if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && 1304 (ct->flags & IP_VS_CONN_F_TEMPLATE)) 1305 ct->state |= IP_VS_CTPL_S_ASSURED; 1306 } 1307 1308 /* IPVS netns init & cleanup functions */ 1309 int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); 1310 int ip_vs_control_net_init(struct netns_ipvs *ipvs); 1311 int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); 1312 int ip_vs_app_net_init(struct netns_ipvs *ipvs); 1313 int ip_vs_conn_net_init(struct netns_ipvs *ipvs); 1314 int ip_vs_sync_net_init(struct netns_ipvs *ipvs); 1315 void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); 1316 void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); 1317 void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); 1318 void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); 1319 void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); 1320 void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); 1321 void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs); 1322 1323 /* IPVS application functions 1324 * (from ip_vs_app.c) 1325 */ 1326 #define IP_VS_APP_MAX_PORTS 8 1327 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1328 void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1329 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1330 void ip_vs_unbind_app(struct ip_vs_conn *cp); 1331 int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, 1332 __u16 port); 1333 int ip_vs_app_inc_get(struct ip_vs_app *inc); 1334 void ip_vs_app_inc_put(struct ip_vs_app *inc); 1335 1336 int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb, 1337 struct ip_vs_iphdr *ipvsh); 1338 int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb, 1339 struct ip_vs_iphdr *ipvsh); 1340 1341 int register_ip_vs_pe(struct ip_vs_pe *pe); 1342 int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1343 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1344 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1345 1346 /* Use a #define to avoid all of module.h just for these trivial ops */ 1347 #define ip_vs_pe_get(pe) \ 1348 if (pe && pe->module) \ 1349 __module_get(pe->module); 1350 1351 #define ip_vs_pe_put(pe) \ 1352 if (pe && pe->module) \ 1353 module_put(pe->module); 1354 1355 /* IPVS protocol functions (from ip_vs_proto.c) */ 1356 int ip_vs_protocol_init(void); 1357 void ip_vs_protocol_cleanup(void); 1358 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1359 int *ip_vs_create_timeout_table(int *table, int size); 1360 void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, 1361 const struct sk_buff *skb, int offset, 1362 const char *msg); 1363 1364 extern struct ip_vs_protocol ip_vs_protocol_tcp; 1365 extern struct ip_vs_protocol ip_vs_protocol_udp; 1366 extern struct ip_vs_protocol ip_vs_protocol_icmp; 1367 extern struct ip_vs_protocol ip_vs_protocol_esp; 1368 extern struct ip_vs_protocol ip_vs_protocol_ah; 1369 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1370 1371 /* Registering/unregistering scheduler functions 1372 * (from ip_vs_sched.c) 1373 */ 1374 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1375 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1376 int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1377 struct ip_vs_scheduler *scheduler); 1378 void ip_vs_unbind_scheduler(struct ip_vs_service *svc, 1379 struct ip_vs_scheduler *sched); 1380 struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1381 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1382 struct ip_vs_conn * 1383 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1384 struct ip_vs_proto_data *pd, int *ignored, 1385 struct ip_vs_iphdr *iph); 1386 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1387 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1388 1389 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1390 1391 /* IPVS control data and functions (from ip_vs_ctl.c) */ 1392 extern struct ip_vs_stats ip_vs_stats; 1393 extern int sysctl_ip_vs_sync_ver; 1394 1395 struct ip_vs_service * 1396 ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, 1397 const union nf_inet_addr *vaddr, __be16 vport); 1398 1399 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1400 const union nf_inet_addr *daddr, __be16 dport); 1401 1402 struct ip_vs_dest * 1403 ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1404 const union nf_inet_addr *daddr, __be16 dport); 1405 1406 int ip_vs_use_count_inc(void); 1407 void ip_vs_use_count_dec(void); 1408 int ip_vs_register_nl_ioctl(void); 1409 void ip_vs_unregister_nl_ioctl(void); 1410 int ip_vs_control_init(void); 1411 void ip_vs_control_cleanup(void); 1412 struct ip_vs_dest * 1413 ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, 1414 const union nf_inet_addr *daddr, __be16 dport, 1415 const union nf_inet_addr *vaddr, __be16 vport, 1416 __u16 protocol, __u32 fwmark, __u32 flags); 1417 void ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1418 1419 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) 1420 { 1421 refcount_inc(&dest->refcnt); 1422 } 1423 1424 static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1425 { 1426 smp_mb__before_atomic(); 1427 refcount_dec(&dest->refcnt); 1428 } 1429 1430 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1431 { 1432 if (refcount_dec_and_test(&dest->refcnt)) 1433 kfree(dest); 1434 } 1435 1436 /* IPVS sync daemon data and function prototypes 1437 * (from ip_vs_sync.c) 1438 */ 1439 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, 1440 int state); 1441 int stop_sync_thread(struct netns_ipvs *ipvs, int state); 1442 void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); 1443 1444 /* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1445 void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1446 void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1447 void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1448 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); 1449 1450 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ 1451 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1452 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1453 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1454 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1455 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1456 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1457 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1458 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1459 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1460 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1461 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1462 struct ip_vs_protocol *pp, int offset, 1463 unsigned int hooknum, struct ip_vs_iphdr *iph); 1464 void ip_vs_dest_dst_rcu_free(struct rcu_head *head); 1465 1466 #ifdef CONFIG_IP_VS_IPV6 1467 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1468 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1469 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1470 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1471 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1472 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1473 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1474 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1475 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1476 struct ip_vs_protocol *pp, int offset, 1477 unsigned int hooknum, struct ip_vs_iphdr *iph); 1478 #endif 1479 1480 #ifdef CONFIG_SYSCTL 1481 /* This is a simple mechanism to ignore packets when 1482 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1483 * we start to drop 1/rate of the packets 1484 */ 1485 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1486 { 1487 if (!ipvs->drop_rate) 1488 return 0; 1489 if (--ipvs->drop_counter > 0) 1490 return 0; 1491 ipvs->drop_counter = ipvs->drop_rate; 1492 return 1; 1493 } 1494 #else 1495 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1496 #endif 1497 1498 /* ip_vs_fwd_tag returns the forwarding tag of the connection */ 1499 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1500 1501 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1502 { 1503 char fwd; 1504 1505 switch (IP_VS_FWD_METHOD(cp)) { 1506 case IP_VS_CONN_F_MASQ: 1507 fwd = 'M'; break; 1508 case IP_VS_CONN_F_LOCALNODE: 1509 fwd = 'L'; break; 1510 case IP_VS_CONN_F_TUNNEL: 1511 fwd = 'T'; break; 1512 case IP_VS_CONN_F_DROUTE: 1513 fwd = 'R'; break; 1514 case IP_VS_CONN_F_BYPASS: 1515 fwd = 'B'; break; 1516 default: 1517 fwd = '?'; break; 1518 } 1519 return fwd; 1520 } 1521 1522 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 1523 struct ip_vs_conn *cp, int dir); 1524 1525 #ifdef CONFIG_IP_VS_IPV6 1526 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, 1527 struct ip_vs_conn *cp, int dir); 1528 #endif 1529 1530 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 1531 1532 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) 1533 { 1534 __be32 diff[2] = { ~old, new }; 1535 1536 return csum_partial(diff, sizeof(diff), oldsum); 1537 } 1538 1539 #ifdef CONFIG_IP_VS_IPV6 1540 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, 1541 __wsum oldsum) 1542 { 1543 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 1544 new[3], new[2], new[1], new[0] }; 1545 1546 return csum_partial(diff, sizeof(diff), oldsum); 1547 } 1548 #endif 1549 1550 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 1551 { 1552 __be16 diff[2] = { ~old, new }; 1553 1554 return csum_partial(diff, sizeof(diff), oldsum); 1555 } 1556 1557 /* Forget current conntrack (unconfirmed) and attach notrack entry */ 1558 static inline void ip_vs_notrack(struct sk_buff *skb) 1559 { 1560 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1561 enum ip_conntrack_info ctinfo; 1562 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1563 1564 if (ct) { 1565 nf_conntrack_put(&ct->ct_general); 1566 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 1567 } 1568 #endif 1569 } 1570 1571 #ifdef CONFIG_IP_VS_NFCT 1572 /* Netfilter connection tracking 1573 * (from ip_vs_nfct.c) 1574 */ 1575 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1576 { 1577 #ifdef CONFIG_SYSCTL 1578 return ipvs->sysctl_conntrack; 1579 #else 1580 return 0; 1581 #endif 1582 } 1583 1584 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, 1585 int outin); 1586 int ip_vs_confirm_conntrack(struct sk_buff *skb); 1587 void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, 1588 struct ip_vs_conn *cp, u_int8_t proto, 1589 const __be16 port, int from_rs); 1590 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); 1591 1592 #else 1593 1594 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1595 { 1596 return 0; 1597 } 1598 1599 static inline void ip_vs_update_conntrack(struct sk_buff *skb, 1600 struct ip_vs_conn *cp, int outin) 1601 { 1602 } 1603 1604 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) 1605 { 1606 return NF_ACCEPT; 1607 } 1608 1609 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1610 { 1611 } 1612 #endif /* CONFIG_IP_VS_NFCT */ 1613 1614 /* Really using conntrack? */ 1615 static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, 1616 struct sk_buff *skb) 1617 { 1618 #ifdef CONFIG_IP_VS_NFCT 1619 enum ip_conntrack_info ctinfo; 1620 struct nf_conn *ct; 1621 1622 if (!(cp->flags & IP_VS_CONN_F_NFCT)) 1623 return false; 1624 ct = nf_ct_get(skb, &ctinfo); 1625 if (ct) 1626 return true; 1627 #endif 1628 return false; 1629 } 1630 1631 static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) 1632 { 1633 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1634 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1635 int ret = 0; 1636 1637 if (!(svc->conntrack_afmask & afmask)) { 1638 ret = nf_ct_netns_get(svc->ipvs->net, svc->af); 1639 if (ret >= 0) 1640 svc->conntrack_afmask |= afmask; 1641 } 1642 return ret; 1643 #else 1644 return 0; 1645 #endif 1646 } 1647 1648 static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) 1649 { 1650 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1651 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1652 1653 if (svc->conntrack_afmask & afmask) { 1654 nf_ct_netns_put(svc->ipvs->net, svc->af); 1655 svc->conntrack_afmask &= ~afmask; 1656 } 1657 #endif 1658 } 1659 1660 static inline int 1661 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1662 { 1663 /* We think the overhead of processing active connections is 256 1664 * times higher than that of inactive connections in average. (This 1665 * 256 times might not be accurate, we will change it later) We 1666 * use the following formula to estimate the overhead now: 1667 * dest->activeconns*256 + dest->inactconns 1668 */ 1669 return (atomic_read(&dest->activeconns) << 8) + 1670 atomic_read(&dest->inactconns); 1671 } 1672 1673 #endif /* _NET_IP_VS_H */ 1674