1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* IP Virtual Server 3 * data structure and functionality definitions 4 */ 5 6 #ifndef _NET_IP_VS_H 7 #define _NET_IP_VS_H 8 9 #include <linux/ip_vs.h> /* definitions shared with userland */ 10 11 #include <asm/types.h> /* for __uXX types */ 12 13 #include <linux/list.h> /* for struct list_head */ 14 #include <linux/spinlock.h> /* for struct rwlock_t */ 15 #include <linux/atomic.h> /* for struct atomic_t */ 16 #include <linux/refcount.h> /* for struct refcount_t */ 17 18 #include <linux/compiler.h> 19 #include <linux/timer.h> 20 #include <linux/bug.h> 21 22 #include <net/checksum.h> 23 #include <linux/netfilter.h> /* for union nf_inet_addr */ 24 #include <linux/ip.h> 25 #include <linux/ipv6.h> /* for struct ipv6hdr */ 26 #include <net/ipv6.h> 27 #if IS_ENABLED(CONFIG_IP_VS_IPV6) 28 #include <linux/netfilter_ipv6/ip6_tables.h> 29 #endif 30 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 31 #include <net/netfilter/nf_conntrack.h> 32 #endif 33 #include <net/net_namespace.h> /* Netw namespace */ 34 35 #define IP_VS_HDR_INVERSE 1 36 #define IP_VS_HDR_ICMP 2 37 38 /* Generic access of ipvs struct */ 39 static inline struct netns_ipvs *net_ipvs(struct net* net) 40 { 41 return net->ipvs; 42 } 43 44 /* Connections' size value needed by ip_vs_ctl.c */ 45 extern int ip_vs_conn_tab_size; 46 47 struct ip_vs_iphdr { 48 int hdr_flags; /* ipvs flags */ 49 __u32 off; /* Where IP or IPv4 header starts */ 50 __u32 len; /* IPv4 simply where L4 starts 51 * IPv6 where L4 Transport Header starts */ 52 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 53 __s16 protocol; 54 __s32 flags; 55 union nf_inet_addr saddr; 56 union nf_inet_addr daddr; 57 }; 58 59 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 60 int len, void *buffer) 61 { 62 return skb_header_pointer(skb, offset, len, buffer); 63 } 64 65 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 66 * IPv6 requires some extra work, as finding proper header position, 67 * depend on the IPv6 extension headers. 68 */ 69 static inline int 70 ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, 71 int hdr_flags, struct ip_vs_iphdr *iphdr) 72 { 73 iphdr->hdr_flags = hdr_flags; 74 iphdr->off = offset; 75 76 #ifdef CONFIG_IP_VS_IPV6 77 if (af == AF_INET6) { 78 struct ipv6hdr _iph; 79 const struct ipv6hdr *iph = skb_header_pointer( 80 skb, offset, sizeof(_iph), &_iph); 81 if (!iph) 82 return 0; 83 84 iphdr->saddr.in6 = iph->saddr; 85 iphdr->daddr.in6 = iph->daddr; 86 /* ipv6_find_hdr() updates len, flags */ 87 iphdr->len = offset; 88 iphdr->flags = 0; 89 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 90 &iphdr->fragoffs, 91 &iphdr->flags); 92 if (iphdr->protocol < 0) 93 return 0; 94 } else 95 #endif 96 { 97 struct iphdr _iph; 98 const struct iphdr *iph = skb_header_pointer( 99 skb, offset, sizeof(_iph), &_iph); 100 if (!iph) 101 return 0; 102 103 iphdr->len = offset + iph->ihl * 4; 104 iphdr->fragoffs = 0; 105 iphdr->protocol = iph->protocol; 106 iphdr->saddr.ip = iph->saddr; 107 iphdr->daddr.ip = iph->daddr; 108 } 109 110 return 1; 111 } 112 113 static inline int 114 ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, 115 bool inverse, struct ip_vs_iphdr *iphdr) 116 { 117 int hdr_flags = IP_VS_HDR_ICMP; 118 119 if (inverse) 120 hdr_flags |= IP_VS_HDR_INVERSE; 121 122 return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); 123 } 124 125 static inline int 126 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, 127 struct ip_vs_iphdr *iphdr) 128 { 129 int hdr_flags = 0; 130 131 if (inverse) 132 hdr_flags |= IP_VS_HDR_INVERSE; 133 134 return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), 135 hdr_flags, iphdr); 136 } 137 138 static inline bool 139 ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) 140 { 141 return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); 142 } 143 144 static inline bool 145 ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) 146 { 147 return !!(iph->hdr_flags & IP_VS_HDR_ICMP); 148 } 149 150 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 151 const union nf_inet_addr *src) 152 { 153 #ifdef CONFIG_IP_VS_IPV6 154 if (af == AF_INET6) 155 dst->in6 = src->in6; 156 else 157 #endif 158 dst->ip = src->ip; 159 } 160 161 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, 162 const union nf_inet_addr *src) 163 { 164 #ifdef CONFIG_IP_VS_IPV6 165 if (af == AF_INET6) { 166 dst->in6 = src->in6; 167 return; 168 } 169 #endif 170 dst->ip = src->ip; 171 dst->all[1] = 0; 172 dst->all[2] = 0; 173 dst->all[3] = 0; 174 } 175 176 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 177 const union nf_inet_addr *b) 178 { 179 #ifdef CONFIG_IP_VS_IPV6 180 if (af == AF_INET6) 181 return ipv6_addr_equal(&a->in6, &b->in6); 182 #endif 183 return a->ip == b->ip; 184 } 185 186 #ifdef CONFIG_IP_VS_DEBUG 187 #include <linux/net.h> 188 189 int ip_vs_get_debug_level(void); 190 191 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, 192 const union nf_inet_addr *addr, 193 int *idx) 194 { 195 int len; 196 #ifdef CONFIG_IP_VS_IPV6 197 if (af == AF_INET6) 198 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", 199 &addr->in6) + 1; 200 else 201 #endif 202 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", 203 &addr->ip) + 1; 204 205 *idx += len; 206 BUG_ON(*idx > buf_len + 1); 207 return &buf[*idx - len]; 208 } 209 210 #define IP_VS_DBG_BUF(level, msg, ...) \ 211 do { \ 212 char ip_vs_dbg_buf[160]; \ 213 int ip_vs_dbg_idx = 0; \ 214 if (level <= ip_vs_get_debug_level()) \ 215 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 216 } while (0) 217 #define IP_VS_ERR_BUF(msg...) \ 218 do { \ 219 char ip_vs_dbg_buf[160]; \ 220 int ip_vs_dbg_idx = 0; \ 221 pr_err(msg); \ 222 } while (0) 223 224 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ 225 #define IP_VS_DBG_ADDR(af, addr) \ 226 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ 227 sizeof(ip_vs_dbg_buf), addr, \ 228 &ip_vs_dbg_idx) 229 230 #define IP_VS_DBG(level, msg, ...) \ 231 do { \ 232 if (level <= ip_vs_get_debug_level()) \ 233 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 234 } while (0) 235 #define IP_VS_DBG_RL(msg, ...) \ 236 do { \ 237 if (net_ratelimit()) \ 238 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 239 } while (0) 240 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ 241 do { \ 242 if (level <= ip_vs_get_debug_level()) \ 243 pp->debug_packet(af, pp, skb, ofs, msg); \ 244 } while (0) 245 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ 246 do { \ 247 if (level <= ip_vs_get_debug_level() && \ 248 net_ratelimit()) \ 249 pp->debug_packet(af, pp, skb, ofs, msg); \ 250 } while (0) 251 #else /* NO DEBUGGING at ALL */ 252 #define IP_VS_DBG_BUF(level, msg...) do {} while (0) 253 #define IP_VS_ERR_BUF(msg...) do {} while (0) 254 #define IP_VS_DBG(level, msg...) do {} while (0) 255 #define IP_VS_DBG_RL(msg...) do {} while (0) 256 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 257 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 258 #endif 259 260 #define IP_VS_BUG() BUG() 261 #define IP_VS_ERR_RL(msg, ...) \ 262 do { \ 263 if (net_ratelimit()) \ 264 pr_err(msg, ##__VA_ARGS__); \ 265 } while (0) 266 267 #ifdef CONFIG_IP_VS_DEBUG 268 #define EnterFunction(level) \ 269 do { \ 270 if (level <= ip_vs_get_debug_level()) \ 271 printk(KERN_DEBUG \ 272 pr_fmt("Enter: %s, %s line %i\n"), \ 273 __func__, __FILE__, __LINE__); \ 274 } while (0) 275 #define LeaveFunction(level) \ 276 do { \ 277 if (level <= ip_vs_get_debug_level()) \ 278 printk(KERN_DEBUG \ 279 pr_fmt("Leave: %s, %s line %i\n"), \ 280 __func__, __FILE__, __LINE__); \ 281 } while (0) 282 #else 283 #define EnterFunction(level) do {} while (0) 284 #define LeaveFunction(level) do {} while (0) 285 #endif 286 287 /* The port number of FTP service (in network order). */ 288 #define FTPPORT cpu_to_be16(21) 289 #define FTPDATA cpu_to_be16(20) 290 291 /* TCP State Values */ 292 enum { 293 IP_VS_TCP_S_NONE = 0, 294 IP_VS_TCP_S_ESTABLISHED, 295 IP_VS_TCP_S_SYN_SENT, 296 IP_VS_TCP_S_SYN_RECV, 297 IP_VS_TCP_S_FIN_WAIT, 298 IP_VS_TCP_S_TIME_WAIT, 299 IP_VS_TCP_S_CLOSE, 300 IP_VS_TCP_S_CLOSE_WAIT, 301 IP_VS_TCP_S_LAST_ACK, 302 IP_VS_TCP_S_LISTEN, 303 IP_VS_TCP_S_SYNACK, 304 IP_VS_TCP_S_LAST 305 }; 306 307 /* UDP State Values */ 308 enum { 309 IP_VS_UDP_S_NORMAL, 310 IP_VS_UDP_S_LAST, 311 }; 312 313 /* ICMP State Values */ 314 enum { 315 IP_VS_ICMP_S_NORMAL, 316 IP_VS_ICMP_S_LAST, 317 }; 318 319 /* SCTP State Values */ 320 enum ip_vs_sctp_states { 321 IP_VS_SCTP_S_NONE, 322 IP_VS_SCTP_S_INIT1, 323 IP_VS_SCTP_S_INIT, 324 IP_VS_SCTP_S_COOKIE_SENT, 325 IP_VS_SCTP_S_COOKIE_REPLIED, 326 IP_VS_SCTP_S_COOKIE_WAIT, 327 IP_VS_SCTP_S_COOKIE, 328 IP_VS_SCTP_S_COOKIE_ECHOED, 329 IP_VS_SCTP_S_ESTABLISHED, 330 IP_VS_SCTP_S_SHUTDOWN_SENT, 331 IP_VS_SCTP_S_SHUTDOWN_RECEIVED, 332 IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, 333 IP_VS_SCTP_S_REJECTED, 334 IP_VS_SCTP_S_CLOSED, 335 IP_VS_SCTP_S_LAST 336 }; 337 338 /* Connection templates use bits from state */ 339 #define IP_VS_CTPL_S_NONE 0x0000 340 #define IP_VS_CTPL_S_ASSURED 0x0001 341 #define IP_VS_CTPL_S_LAST 0x0002 342 343 /* Delta sequence info structure 344 * Each ip_vs_conn has 2 (output AND input seq. changes). 345 * Only used in the VS/NAT. 346 */ 347 struct ip_vs_seq { 348 __u32 init_seq; /* Add delta from this seq */ 349 __u32 delta; /* Delta in sequence numbers */ 350 __u32 previous_delta; /* Delta in sequence numbers 351 * before last resized pkt */ 352 }; 353 354 /* counters per cpu */ 355 struct ip_vs_counters { 356 __u64 conns; /* connections scheduled */ 357 __u64 inpkts; /* incoming packets */ 358 __u64 outpkts; /* outgoing packets */ 359 __u64 inbytes; /* incoming bytes */ 360 __u64 outbytes; /* outgoing bytes */ 361 }; 362 /* Stats per cpu */ 363 struct ip_vs_cpu_stats { 364 struct ip_vs_counters cnt; 365 struct u64_stats_sync syncp; 366 }; 367 368 /* IPVS statistics objects */ 369 struct ip_vs_estimator { 370 struct list_head list; 371 372 u64 last_inbytes; 373 u64 last_outbytes; 374 u64 last_conns; 375 u64 last_inpkts; 376 u64 last_outpkts; 377 378 u64 cps; 379 u64 inpps; 380 u64 outpps; 381 u64 inbps; 382 u64 outbps; 383 }; 384 385 /* 386 * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user 387 */ 388 struct ip_vs_kstats { 389 u64 conns; /* connections scheduled */ 390 u64 inpkts; /* incoming packets */ 391 u64 outpkts; /* outgoing packets */ 392 u64 inbytes; /* incoming bytes */ 393 u64 outbytes; /* outgoing bytes */ 394 395 u64 cps; /* current connection rate */ 396 u64 inpps; /* current in packet rate */ 397 u64 outpps; /* current out packet rate */ 398 u64 inbps; /* current in byte rate */ 399 u64 outbps; /* current out byte rate */ 400 }; 401 402 struct ip_vs_stats { 403 struct ip_vs_kstats kstats; /* kernel statistics */ 404 struct ip_vs_estimator est; /* estimator */ 405 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ 406 spinlock_t lock; /* spin lock */ 407 struct ip_vs_kstats kstats0; /* reset values */ 408 }; 409 410 struct dst_entry; 411 struct iphdr; 412 struct ip_vs_conn; 413 struct ip_vs_app; 414 struct sk_buff; 415 struct ip_vs_proto_data; 416 417 struct ip_vs_protocol { 418 struct ip_vs_protocol *next; 419 char *name; 420 u16 protocol; 421 u16 num_states; 422 int dont_defrag; 423 424 void (*init)(struct ip_vs_protocol *pp); 425 426 void (*exit)(struct ip_vs_protocol *pp); 427 428 int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 429 430 void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 431 432 int (*conn_schedule)(struct netns_ipvs *ipvs, 433 int af, struct sk_buff *skb, 434 struct ip_vs_proto_data *pd, 435 int *verdict, struct ip_vs_conn **cpp, 436 struct ip_vs_iphdr *iph); 437 438 struct ip_vs_conn * 439 (*conn_in_get)(struct netns_ipvs *ipvs, 440 int af, 441 const struct sk_buff *skb, 442 const struct ip_vs_iphdr *iph); 443 444 struct ip_vs_conn * 445 (*conn_out_get)(struct netns_ipvs *ipvs, 446 int af, 447 const struct sk_buff *skb, 448 const struct ip_vs_iphdr *iph); 449 450 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 451 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 452 453 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 454 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 455 456 const char *(*state_name)(int state); 457 458 void (*state_transition)(struct ip_vs_conn *cp, int direction, 459 const struct sk_buff *skb, 460 struct ip_vs_proto_data *pd); 461 462 int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 463 464 void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 465 466 int (*app_conn_bind)(struct ip_vs_conn *cp); 467 468 void (*debug_packet)(int af, struct ip_vs_protocol *pp, 469 const struct sk_buff *skb, 470 int offset, 471 const char *msg); 472 473 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 474 }; 475 476 /* protocol data per netns */ 477 struct ip_vs_proto_data { 478 struct ip_vs_proto_data *next; 479 struct ip_vs_protocol *pp; 480 int *timeout_table; /* protocol timeout table */ 481 atomic_t appcnt; /* counter of proto app incs. */ 482 struct tcp_states_t *tcp_state_table; 483 }; 484 485 struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 486 struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, 487 unsigned short proto); 488 489 struct ip_vs_conn_param { 490 struct netns_ipvs *ipvs; 491 const union nf_inet_addr *caddr; 492 const union nf_inet_addr *vaddr; 493 __be16 cport; 494 __be16 vport; 495 __u16 protocol; 496 u16 af; 497 498 const struct ip_vs_pe *pe; 499 char *pe_data; 500 __u8 pe_data_len; 501 }; 502 503 /* IP_VS structure allocated for each dynamically scheduled connection */ 504 struct ip_vs_conn { 505 struct hlist_node c_list; /* hashed list heads */ 506 /* Protocol, addresses and port numbers */ 507 __be16 cport; 508 __be16 dport; 509 __be16 vport; 510 u16 af; /* address family */ 511 union nf_inet_addr caddr; /* client address */ 512 union nf_inet_addr vaddr; /* virtual address */ 513 union nf_inet_addr daddr; /* destination address */ 514 volatile __u32 flags; /* status flags */ 515 __u16 protocol; /* Which protocol (TCP/UDP) */ 516 __u16 daf; /* Address family of the dest */ 517 struct netns_ipvs *ipvs; 518 519 /* counter and timer */ 520 refcount_t refcnt; /* reference count */ 521 struct timer_list timer; /* Expiration timer */ 522 volatile unsigned long timeout; /* timeout */ 523 524 /* Flags and state transition */ 525 spinlock_t lock; /* lock for state transition */ 526 volatile __u16 state; /* state info */ 527 volatile __u16 old_state; /* old state, to be used for 528 * state transition triggerd 529 * synchronization 530 */ 531 __u32 fwmark; /* Fire wall mark from skb */ 532 unsigned long sync_endtime; /* jiffies + sent_retries */ 533 534 /* Control members */ 535 struct ip_vs_conn *control; /* Master control connection */ 536 atomic_t n_control; /* Number of controlled ones */ 537 struct ip_vs_dest *dest; /* real server */ 538 atomic_t in_pkts; /* incoming packet counter */ 539 540 /* Packet transmitter for different forwarding methods. If it 541 * mangles the packet, it must return NF_DROP or better NF_STOLEN, 542 * otherwise this must be changed to a sk_buff **. 543 * NF_ACCEPT can be returned when destination is local. 544 */ 545 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 546 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 547 548 /* Note: we can group the following members into a structure, 549 * in order to save more space, and the following members are 550 * only used in VS/NAT anyway 551 */ 552 struct ip_vs_app *app; /* bound ip_vs_app object */ 553 void *app_data; /* Application private data */ 554 struct ip_vs_seq in_seq; /* incoming seq. struct */ 555 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 556 557 const struct ip_vs_pe *pe; 558 char *pe_data; 559 __u8 pe_data_len; 560 561 struct rcu_head rcu_head; 562 }; 563 564 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 565 * for IPv6 support. 566 * 567 * We need these to conveniently pass around service and destination 568 * options, but unfortunately, we also need to keep the old definitions to 569 * maintain userspace backwards compatibility for the setsockopt interface. 570 */ 571 struct ip_vs_service_user_kern { 572 /* virtual service addresses */ 573 u16 af; 574 u16 protocol; 575 union nf_inet_addr addr; /* virtual ip address */ 576 __be16 port; 577 u32 fwmark; /* firwall mark of service */ 578 579 /* virtual service options */ 580 char *sched_name; 581 char *pe_name; 582 unsigned int flags; /* virtual service flags */ 583 unsigned int timeout; /* persistent timeout in sec */ 584 __be32 netmask; /* persistent netmask or plen */ 585 }; 586 587 588 struct ip_vs_dest_user_kern { 589 /* destination server address */ 590 union nf_inet_addr addr; 591 __be16 port; 592 593 /* real server options */ 594 unsigned int conn_flags; /* connection flags */ 595 int weight; /* destination weight */ 596 597 /* thresholds for active connections */ 598 u32 u_threshold; /* upper threshold */ 599 u32 l_threshold; /* lower threshold */ 600 601 /* Address family of addr */ 602 u16 af; 603 }; 604 605 606 /* 607 * The information about the virtual service offered to the net and the 608 * forwarding entries. 609 */ 610 struct ip_vs_service { 611 struct hlist_node s_list; /* for normal service table */ 612 struct hlist_node f_list; /* for fwmark-based service table */ 613 atomic_t refcnt; /* reference counter */ 614 615 u16 af; /* address family */ 616 __u16 protocol; /* which protocol (TCP/UDP) */ 617 union nf_inet_addr addr; /* IP address for virtual service */ 618 __be16 port; /* port number for the service */ 619 __u32 fwmark; /* firewall mark of the service */ 620 unsigned int flags; /* service status flags */ 621 unsigned int timeout; /* persistent timeout in ticks */ 622 __be32 netmask; /* grouping granularity, mask/plen */ 623 struct netns_ipvs *ipvs; 624 625 struct list_head destinations; /* real server d-linked list */ 626 __u32 num_dests; /* number of servers */ 627 struct ip_vs_stats stats; /* statistics for the service */ 628 629 /* for scheduling */ 630 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ 631 spinlock_t sched_lock; /* lock sched_data */ 632 void *sched_data; /* scheduler application data */ 633 634 /* alternate persistence engine */ 635 struct ip_vs_pe __rcu *pe; 636 int conntrack_afmask; 637 638 struct rcu_head rcu_head; 639 }; 640 641 /* Information for cached dst */ 642 struct ip_vs_dest_dst { 643 struct dst_entry *dst_cache; /* destination cache entry */ 644 u32 dst_cookie; 645 union nf_inet_addr dst_saddr; 646 struct rcu_head rcu_head; 647 }; 648 649 /* The real server destination forwarding entry with ip address, port number, 650 * and so on. 651 */ 652 struct ip_vs_dest { 653 struct list_head n_list; /* for the dests in the service */ 654 struct hlist_node d_list; /* for table with all the dests */ 655 656 u16 af; /* address family */ 657 __be16 port; /* port number of the server */ 658 union nf_inet_addr addr; /* IP address of the server */ 659 volatile unsigned int flags; /* dest status flags */ 660 atomic_t conn_flags; /* flags to copy to conn */ 661 atomic_t weight; /* server weight */ 662 atomic_t last_weight; /* server latest weight */ 663 664 refcount_t refcnt; /* reference counter */ 665 struct ip_vs_stats stats; /* statistics */ 666 unsigned long idle_start; /* start time, jiffies */ 667 668 /* connection counters and thresholds */ 669 atomic_t activeconns; /* active connections */ 670 atomic_t inactconns; /* inactive connections */ 671 atomic_t persistconns; /* persistent connections */ 672 __u32 u_threshold; /* upper threshold */ 673 __u32 l_threshold; /* lower threshold */ 674 675 /* for destination cache */ 676 spinlock_t dst_lock; /* lock of dst_cache */ 677 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 678 679 /* for virtual service */ 680 struct ip_vs_service __rcu *svc; /* service it belongs to */ 681 __u16 protocol; /* which protocol (TCP/UDP) */ 682 __be16 vport; /* virtual port number */ 683 union nf_inet_addr vaddr; /* virtual IP address */ 684 __u32 vfwmark; /* firewall mark of service */ 685 686 struct list_head t_list; /* in dest_trash */ 687 unsigned int in_rs_table:1; /* we are in rs_table */ 688 }; 689 690 /* The scheduler object */ 691 struct ip_vs_scheduler { 692 struct list_head n_list; /* d-linked list head */ 693 char *name; /* scheduler name */ 694 atomic_t refcnt; /* reference counter */ 695 struct module *module; /* THIS_MODULE/NULL */ 696 697 /* scheduler initializing service */ 698 int (*init_service)(struct ip_vs_service *svc); 699 /* scheduling service finish */ 700 void (*done_service)(struct ip_vs_service *svc); 701 /* dest is linked */ 702 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 703 /* dest is unlinked */ 704 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 705 /* dest is updated */ 706 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 707 708 /* selecting a server from the given service */ 709 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 710 const struct sk_buff *skb, 711 struct ip_vs_iphdr *iph); 712 }; 713 714 /* The persistence engine object */ 715 struct ip_vs_pe { 716 struct list_head n_list; /* d-linked list head */ 717 char *name; /* scheduler name */ 718 atomic_t refcnt; /* reference counter */ 719 struct module *module; /* THIS_MODULE/NULL */ 720 721 /* get the connection template, if any */ 722 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); 723 bool (*ct_match)(const struct ip_vs_conn_param *p, 724 struct ip_vs_conn *ct); 725 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, 726 bool inverse); 727 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 728 /* create connections for real-server outgoing packets */ 729 struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, 730 struct ip_vs_dest *dest, 731 struct sk_buff *skb, 732 const struct ip_vs_iphdr *iph, 733 __be16 dport, __be16 cport); 734 }; 735 736 /* The application module object (a.k.a. app incarnation) */ 737 struct ip_vs_app { 738 struct list_head a_list; /* member in app list */ 739 int type; /* IP_VS_APP_TYPE_xxx */ 740 char *name; /* application module name */ 741 __u16 protocol; 742 struct module *module; /* THIS_MODULE/NULL */ 743 struct list_head incs_list; /* list of incarnations */ 744 745 /* members for application incarnations */ 746 struct list_head p_list; /* member in proto app list */ 747 struct ip_vs_app *app; /* its real application */ 748 __be16 port; /* port number in net order */ 749 atomic_t usecnt; /* usage counter */ 750 struct rcu_head rcu_head; 751 752 /* output hook: Process packet in inout direction, diff set for TCP. 753 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 754 * 2=Mangled but checksum was not updated 755 */ 756 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 757 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 758 759 /* input hook: Process packet in outin direction, diff set for TCP. 760 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 761 * 2=Mangled but checksum was not updated 762 */ 763 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, 764 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 765 766 /* ip_vs_app initializer */ 767 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); 768 769 /* ip_vs_app finish */ 770 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); 771 772 773 /* not used now */ 774 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, 775 struct ip_vs_protocol *); 776 777 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); 778 779 int * timeout_table; 780 int * timeouts; 781 int timeouts_size; 782 783 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, 784 int *verdict, struct ip_vs_conn **cpp); 785 786 struct ip_vs_conn * 787 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 788 const struct iphdr *iph, int inverse); 789 790 struct ip_vs_conn * 791 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 792 const struct iphdr *iph, int inverse); 793 794 int (*state_transition)(struct ip_vs_conn *cp, int direction, 795 const struct sk_buff *skb, 796 struct ip_vs_app *app); 797 798 void (*timeout_change)(struct ip_vs_app *app, int flags); 799 }; 800 801 struct ipvs_master_sync_state { 802 struct list_head sync_queue; 803 struct ip_vs_sync_buff *sync_buff; 804 unsigned long sync_queue_len; 805 unsigned int sync_queue_delay; 806 struct task_struct *master_thread; 807 struct delayed_work master_wakeup_work; 808 struct netns_ipvs *ipvs; 809 }; 810 811 /* How much time to keep dests in trash */ 812 #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) 813 814 struct ipvs_sync_daemon_cfg { 815 union nf_inet_addr mcast_group; 816 int syncid; 817 u16 sync_maxlen; 818 u16 mcast_port; 819 u8 mcast_af; 820 u8 mcast_ttl; 821 /* multicast interface name */ 822 char mcast_ifn[IP_VS_IFNAME_MAXLEN]; 823 }; 824 825 /* IPVS in network namespace */ 826 struct netns_ipvs { 827 int gen; /* Generation */ 828 int enable; /* enable like nf_hooks do */ 829 /* Hash table: for real service lookups */ 830 #define IP_VS_RTAB_BITS 4 831 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 832 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 833 834 struct hlist_head rs_table[IP_VS_RTAB_SIZE]; 835 /* ip_vs_app */ 836 struct list_head app_list; 837 /* ip_vs_proto */ 838 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 839 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 840 /* ip_vs_proto_tcp */ 841 #ifdef CONFIG_IP_VS_PROTO_TCP 842 #define TCP_APP_TAB_BITS 4 843 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 844 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 845 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 846 #endif 847 /* ip_vs_proto_udp */ 848 #ifdef CONFIG_IP_VS_PROTO_UDP 849 #define UDP_APP_TAB_BITS 4 850 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 851 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 852 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 853 #endif 854 /* ip_vs_proto_sctp */ 855 #ifdef CONFIG_IP_VS_PROTO_SCTP 856 #define SCTP_APP_TAB_BITS 4 857 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) 858 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 859 /* Hash table for SCTP application incarnations */ 860 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 861 #endif 862 /* ip_vs_conn */ 863 atomic_t conn_count; /* connection counter */ 864 865 /* ip_vs_ctl */ 866 struct ip_vs_stats tot_stats; /* Statistics & est. */ 867 868 int num_services; /* no of virtual services */ 869 870 /* Trash for destinations */ 871 struct list_head dest_trash; 872 spinlock_t dest_trash_lock; 873 struct timer_list dest_trash_timer; /* expiration timer */ 874 /* Service counters */ 875 atomic_t ftpsvc_counter; 876 atomic_t nullsvc_counter; 877 atomic_t conn_out_counter; 878 879 #ifdef CONFIG_SYSCTL 880 /* 1/rate drop and drop-entry variables */ 881 struct delayed_work defense_work; /* Work handler */ 882 int drop_rate; 883 int drop_counter; 884 atomic_t dropentry; 885 /* locks in ctl.c */ 886 spinlock_t dropentry_lock; /* drop entry handling */ 887 spinlock_t droppacket_lock; /* drop packet handling */ 888 spinlock_t securetcp_lock; /* state and timeout tables */ 889 890 /* sys-ctl struct */ 891 struct ctl_table_header *sysctl_hdr; 892 struct ctl_table *sysctl_tbl; 893 #endif 894 895 /* sysctl variables */ 896 int sysctl_amemthresh; 897 int sysctl_am_droprate; 898 int sysctl_drop_entry; 899 int sysctl_drop_packet; 900 int sysctl_secure_tcp; 901 #ifdef CONFIG_IP_VS_NFCT 902 int sysctl_conntrack; 903 #endif 904 int sysctl_snat_reroute; 905 int sysctl_sync_ver; 906 int sysctl_sync_ports; 907 int sysctl_sync_persist_mode; 908 unsigned long sysctl_sync_qlen_max; 909 int sysctl_sync_sock_size; 910 int sysctl_cache_bypass; 911 int sysctl_expire_nodest_conn; 912 int sysctl_sloppy_tcp; 913 int sysctl_sloppy_sctp; 914 int sysctl_expire_quiescent_template; 915 int sysctl_sync_threshold[2]; 916 unsigned int sysctl_sync_refresh_period; 917 int sysctl_sync_retries; 918 int sysctl_nat_icmp_send; 919 int sysctl_pmtu_disc; 920 int sysctl_backup_only; 921 int sysctl_conn_reuse_mode; 922 int sysctl_schedule_icmp; 923 int sysctl_ignore_tunneled; 924 925 /* ip_vs_lblc */ 926 int sysctl_lblc_expiration; 927 struct ctl_table_header *lblc_ctl_header; 928 struct ctl_table *lblc_ctl_table; 929 /* ip_vs_lblcr */ 930 int sysctl_lblcr_expiration; 931 struct ctl_table_header *lblcr_ctl_header; 932 struct ctl_table *lblcr_ctl_table; 933 /* ip_vs_est */ 934 struct list_head est_list; /* estimator list */ 935 spinlock_t est_lock; 936 struct timer_list est_timer; /* Estimation timer */ 937 /* ip_vs_sync */ 938 spinlock_t sync_lock; 939 struct ipvs_master_sync_state *ms; 940 spinlock_t sync_buff_lock; 941 struct task_struct **backup_threads; 942 int threads_mask; 943 volatile int sync_state; 944 struct mutex sync_mutex; 945 struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */ 946 struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ 947 /* net name space ptr */ 948 struct net *net; /* Needed by timer routines */ 949 /* Number of heterogeneous destinations, needed becaus heterogeneous 950 * are not supported when synchronization is enabled. 951 */ 952 unsigned int mixed_address_family_dests; 953 }; 954 955 #define DEFAULT_SYNC_THRESHOLD 3 956 #define DEFAULT_SYNC_PERIOD 50 957 #define DEFAULT_SYNC_VER 1 958 #define DEFAULT_SLOPPY_TCP 0 959 #define DEFAULT_SLOPPY_SCTP 0 960 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) 961 #define DEFAULT_SYNC_RETRIES 0 962 #define IPVS_SYNC_WAKEUP_RATE 8 963 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) 964 #define IPVS_SYNC_SEND_DELAY (HZ / 50) 965 #define IPVS_SYNC_CHECK_PERIOD HZ 966 #define IPVS_SYNC_FLUSH_TIME (HZ * 2) 967 #define IPVS_SYNC_PORTS_MAX (1 << 6) 968 969 #ifdef CONFIG_SYSCTL 970 971 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 972 { 973 return ipvs->sysctl_sync_threshold[0]; 974 } 975 976 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 977 { 978 return READ_ONCE(ipvs->sysctl_sync_threshold[1]); 979 } 980 981 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 982 { 983 return READ_ONCE(ipvs->sysctl_sync_refresh_period); 984 } 985 986 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 987 { 988 return ipvs->sysctl_sync_retries; 989 } 990 991 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 992 { 993 return ipvs->sysctl_sync_ver; 994 } 995 996 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 997 { 998 return ipvs->sysctl_sloppy_tcp; 999 } 1000 1001 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1002 { 1003 return ipvs->sysctl_sloppy_sctp; 1004 } 1005 1006 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1007 { 1008 return READ_ONCE(ipvs->sysctl_sync_ports); 1009 } 1010 1011 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1012 { 1013 return ipvs->sysctl_sync_persist_mode; 1014 } 1015 1016 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1017 { 1018 return ipvs->sysctl_sync_qlen_max; 1019 } 1020 1021 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1022 { 1023 return ipvs->sysctl_sync_sock_size; 1024 } 1025 1026 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1027 { 1028 return ipvs->sysctl_pmtu_disc; 1029 } 1030 1031 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1032 { 1033 return ipvs->sync_state & IP_VS_STATE_BACKUP && 1034 ipvs->sysctl_backup_only; 1035 } 1036 1037 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1038 { 1039 return ipvs->sysctl_conn_reuse_mode; 1040 } 1041 1042 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1043 { 1044 return ipvs->sysctl_schedule_icmp; 1045 } 1046 1047 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1048 { 1049 return ipvs->sysctl_ignore_tunneled; 1050 } 1051 1052 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1053 { 1054 return ipvs->sysctl_cache_bypass; 1055 } 1056 1057 #else 1058 1059 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1060 { 1061 return DEFAULT_SYNC_THRESHOLD; 1062 } 1063 1064 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1065 { 1066 return DEFAULT_SYNC_PERIOD; 1067 } 1068 1069 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1070 { 1071 return DEFAULT_SYNC_REFRESH_PERIOD; 1072 } 1073 1074 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1075 { 1076 return DEFAULT_SYNC_RETRIES & 3; 1077 } 1078 1079 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1080 { 1081 return DEFAULT_SYNC_VER; 1082 } 1083 1084 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1085 { 1086 return DEFAULT_SLOPPY_TCP; 1087 } 1088 1089 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1090 { 1091 return DEFAULT_SLOPPY_SCTP; 1092 } 1093 1094 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1095 { 1096 return 1; 1097 } 1098 1099 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1100 { 1101 return 0; 1102 } 1103 1104 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1105 { 1106 return IPVS_SYNC_QLEN_MAX; 1107 } 1108 1109 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1110 { 1111 return 0; 1112 } 1113 1114 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1115 { 1116 return 1; 1117 } 1118 1119 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1120 { 1121 return 0; 1122 } 1123 1124 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1125 { 1126 return 1; 1127 } 1128 1129 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1130 { 1131 return 0; 1132 } 1133 1134 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1135 { 1136 return 0; 1137 } 1138 1139 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1140 { 1141 return 0; 1142 } 1143 1144 #endif 1145 1146 /* IPVS core functions 1147 * (from ip_vs_core.c) 1148 */ 1149 const char *ip_vs_proto_name(unsigned int proto); 1150 void ip_vs_init_hash_table(struct list_head *table, int rows); 1151 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, 1152 struct ip_vs_dest *dest, 1153 struct sk_buff *skb, 1154 const struct ip_vs_iphdr *iph, 1155 __be16 dport, 1156 __be16 cport); 1157 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 1158 1159 #define IP_VS_APP_TYPE_FTP 1 1160 1161 /* ip_vs_conn handling functions 1162 * (from ip_vs_conn.c) 1163 */ 1164 enum { 1165 IP_VS_DIR_INPUT = 0, 1166 IP_VS_DIR_OUTPUT, 1167 IP_VS_DIR_INPUT_ONLY, 1168 IP_VS_DIR_LAST, 1169 }; 1170 1171 static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, 1172 const union nf_inet_addr *caddr, 1173 __be16 cport, 1174 const union nf_inet_addr *vaddr, 1175 __be16 vport, 1176 struct ip_vs_conn_param *p) 1177 { 1178 p->ipvs = ipvs; 1179 p->af = af; 1180 p->protocol = protocol; 1181 p->caddr = caddr; 1182 p->cport = cport; 1183 p->vaddr = vaddr; 1184 p->vport = vport; 1185 p->pe = NULL; 1186 p->pe_data = NULL; 1187 } 1188 1189 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1190 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1191 1192 struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, 1193 const struct sk_buff *skb, 1194 const struct ip_vs_iphdr *iph); 1195 1196 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1197 1198 struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, 1199 const struct sk_buff *skb, 1200 const struct ip_vs_iphdr *iph); 1201 1202 /* Get reference to gain full access to conn. 1203 * By default, RCU read-side critical sections have access only to 1204 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. 1205 */ 1206 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1207 { 1208 return refcount_inc_not_zero(&cp->refcnt); 1209 } 1210 1211 /* put back the conn without restarting its timer */ 1212 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1213 { 1214 smp_mb__before_atomic(); 1215 refcount_dec(&cp->refcnt); 1216 } 1217 void ip_vs_conn_put(struct ip_vs_conn *cp); 1218 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1219 1220 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, 1221 const union nf_inet_addr *daddr, 1222 __be16 dport, unsigned int flags, 1223 struct ip_vs_dest *dest, __u32 fwmark); 1224 void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1225 1226 const char *ip_vs_state_name(const struct ip_vs_conn *cp); 1227 1228 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1229 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1230 void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1231 int ip_vs_conn_init(void); 1232 void ip_vs_conn_cleanup(void); 1233 1234 static inline void ip_vs_control_del(struct ip_vs_conn *cp) 1235 { 1236 struct ip_vs_conn *ctl_cp = cp->control; 1237 if (!ctl_cp) { 1238 IP_VS_ERR_BUF("request control DEL for uncontrolled: " 1239 "%s:%d to %s:%d\n", 1240 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1241 ntohs(cp->cport), 1242 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1243 ntohs(cp->vport)); 1244 1245 return; 1246 } 1247 1248 IP_VS_DBG_BUF(7, "DELeting control for: " 1249 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1250 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1251 ntohs(cp->cport), 1252 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1253 ntohs(ctl_cp->cport)); 1254 1255 cp->control = NULL; 1256 if (atomic_read(&ctl_cp->n_control) == 0) { 1257 IP_VS_ERR_BUF("BUG control DEL with n=0 : " 1258 "%s:%d to %s:%d\n", 1259 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1260 ntohs(cp->cport), 1261 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1262 ntohs(cp->vport)); 1263 1264 return; 1265 } 1266 atomic_dec(&ctl_cp->n_control); 1267 } 1268 1269 static inline void 1270 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 1271 { 1272 if (cp->control) { 1273 IP_VS_ERR_BUF("request control ADD for already controlled: " 1274 "%s:%d to %s:%d\n", 1275 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1276 ntohs(cp->cport), 1277 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1278 ntohs(cp->vport)); 1279 1280 ip_vs_control_del(cp); 1281 } 1282 1283 IP_VS_DBG_BUF(7, "ADDing control for: " 1284 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1285 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1286 ntohs(cp->cport), 1287 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1288 ntohs(ctl_cp->cport)); 1289 1290 cp->control = ctl_cp; 1291 atomic_inc(&ctl_cp->n_control); 1292 } 1293 1294 /* Mark our template as assured */ 1295 static inline void 1296 ip_vs_control_assure_ct(struct ip_vs_conn *cp) 1297 { 1298 struct ip_vs_conn *ct = cp->control; 1299 1300 if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && 1301 (ct->flags & IP_VS_CONN_F_TEMPLATE)) 1302 ct->state |= IP_VS_CTPL_S_ASSURED; 1303 } 1304 1305 /* IPVS netns init & cleanup functions */ 1306 int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); 1307 int ip_vs_control_net_init(struct netns_ipvs *ipvs); 1308 int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); 1309 int ip_vs_app_net_init(struct netns_ipvs *ipvs); 1310 int ip_vs_conn_net_init(struct netns_ipvs *ipvs); 1311 int ip_vs_sync_net_init(struct netns_ipvs *ipvs); 1312 void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); 1313 void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); 1314 void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); 1315 void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); 1316 void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); 1317 void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); 1318 void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs); 1319 1320 /* IPVS application functions 1321 * (from ip_vs_app.c) 1322 */ 1323 #define IP_VS_APP_MAX_PORTS 8 1324 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1325 void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1326 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1327 void ip_vs_unbind_app(struct ip_vs_conn *cp); 1328 int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, 1329 __u16 port); 1330 int ip_vs_app_inc_get(struct ip_vs_app *inc); 1331 void ip_vs_app_inc_put(struct ip_vs_app *inc); 1332 1333 int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb, 1334 struct ip_vs_iphdr *ipvsh); 1335 int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb, 1336 struct ip_vs_iphdr *ipvsh); 1337 1338 int register_ip_vs_pe(struct ip_vs_pe *pe); 1339 int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1340 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1341 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1342 1343 /* Use a #define to avoid all of module.h just for these trivial ops */ 1344 #define ip_vs_pe_get(pe) \ 1345 if (pe && pe->module) \ 1346 __module_get(pe->module); 1347 1348 #define ip_vs_pe_put(pe) \ 1349 if (pe && pe->module) \ 1350 module_put(pe->module); 1351 1352 /* IPVS protocol functions (from ip_vs_proto.c) */ 1353 int ip_vs_protocol_init(void); 1354 void ip_vs_protocol_cleanup(void); 1355 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1356 int *ip_vs_create_timeout_table(int *table, int size); 1357 void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, 1358 const struct sk_buff *skb, int offset, 1359 const char *msg); 1360 1361 extern struct ip_vs_protocol ip_vs_protocol_tcp; 1362 extern struct ip_vs_protocol ip_vs_protocol_udp; 1363 extern struct ip_vs_protocol ip_vs_protocol_icmp; 1364 extern struct ip_vs_protocol ip_vs_protocol_esp; 1365 extern struct ip_vs_protocol ip_vs_protocol_ah; 1366 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1367 1368 /* Registering/unregistering scheduler functions 1369 * (from ip_vs_sched.c) 1370 */ 1371 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1372 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1373 int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1374 struct ip_vs_scheduler *scheduler); 1375 void ip_vs_unbind_scheduler(struct ip_vs_service *svc, 1376 struct ip_vs_scheduler *sched); 1377 struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1378 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1379 struct ip_vs_conn * 1380 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1381 struct ip_vs_proto_data *pd, int *ignored, 1382 struct ip_vs_iphdr *iph); 1383 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1384 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1385 1386 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1387 1388 /* IPVS control data and functions (from ip_vs_ctl.c) */ 1389 extern struct ip_vs_stats ip_vs_stats; 1390 extern int sysctl_ip_vs_sync_ver; 1391 1392 struct ip_vs_service * 1393 ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, 1394 const union nf_inet_addr *vaddr, __be16 vport); 1395 1396 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1397 const union nf_inet_addr *daddr, __be16 dport); 1398 1399 struct ip_vs_dest * 1400 ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1401 const union nf_inet_addr *daddr, __be16 dport); 1402 1403 int ip_vs_use_count_inc(void); 1404 void ip_vs_use_count_dec(void); 1405 int ip_vs_register_nl_ioctl(void); 1406 void ip_vs_unregister_nl_ioctl(void); 1407 int ip_vs_control_init(void); 1408 void ip_vs_control_cleanup(void); 1409 struct ip_vs_dest * 1410 ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, 1411 const union nf_inet_addr *daddr, __be16 dport, 1412 const union nf_inet_addr *vaddr, __be16 vport, 1413 __u16 protocol, __u32 fwmark, __u32 flags); 1414 void ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1415 1416 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) 1417 { 1418 refcount_inc(&dest->refcnt); 1419 } 1420 1421 static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1422 { 1423 smp_mb__before_atomic(); 1424 refcount_dec(&dest->refcnt); 1425 } 1426 1427 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1428 { 1429 if (refcount_dec_and_test(&dest->refcnt)) 1430 kfree(dest); 1431 } 1432 1433 /* IPVS sync daemon data and function prototypes 1434 * (from ip_vs_sync.c) 1435 */ 1436 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, 1437 int state); 1438 int stop_sync_thread(struct netns_ipvs *ipvs, int state); 1439 void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); 1440 1441 /* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1442 void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1443 void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1444 void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1445 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); 1446 1447 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ 1448 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1449 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1450 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1451 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1452 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1453 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1454 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1455 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1456 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1457 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1458 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1459 struct ip_vs_protocol *pp, int offset, 1460 unsigned int hooknum, struct ip_vs_iphdr *iph); 1461 void ip_vs_dest_dst_rcu_free(struct rcu_head *head); 1462 1463 #ifdef CONFIG_IP_VS_IPV6 1464 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1465 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1466 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1467 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1468 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1469 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1470 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1471 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1472 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1473 struct ip_vs_protocol *pp, int offset, 1474 unsigned int hooknum, struct ip_vs_iphdr *iph); 1475 #endif 1476 1477 #ifdef CONFIG_SYSCTL 1478 /* This is a simple mechanism to ignore packets when 1479 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1480 * we start to drop 1/rate of the packets 1481 */ 1482 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1483 { 1484 if (!ipvs->drop_rate) 1485 return 0; 1486 if (--ipvs->drop_counter > 0) 1487 return 0; 1488 ipvs->drop_counter = ipvs->drop_rate; 1489 return 1; 1490 } 1491 #else 1492 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1493 #endif 1494 1495 /* ip_vs_fwd_tag returns the forwarding tag of the connection */ 1496 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1497 1498 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1499 { 1500 char fwd; 1501 1502 switch (IP_VS_FWD_METHOD(cp)) { 1503 case IP_VS_CONN_F_MASQ: 1504 fwd = 'M'; break; 1505 case IP_VS_CONN_F_LOCALNODE: 1506 fwd = 'L'; break; 1507 case IP_VS_CONN_F_TUNNEL: 1508 fwd = 'T'; break; 1509 case IP_VS_CONN_F_DROUTE: 1510 fwd = 'R'; break; 1511 case IP_VS_CONN_F_BYPASS: 1512 fwd = 'B'; break; 1513 default: 1514 fwd = '?'; break; 1515 } 1516 return fwd; 1517 } 1518 1519 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 1520 struct ip_vs_conn *cp, int dir); 1521 1522 #ifdef CONFIG_IP_VS_IPV6 1523 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, 1524 struct ip_vs_conn *cp, int dir); 1525 #endif 1526 1527 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 1528 1529 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) 1530 { 1531 __be32 diff[2] = { ~old, new }; 1532 1533 return csum_partial(diff, sizeof(diff), oldsum); 1534 } 1535 1536 #ifdef CONFIG_IP_VS_IPV6 1537 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, 1538 __wsum oldsum) 1539 { 1540 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 1541 new[3], new[2], new[1], new[0] }; 1542 1543 return csum_partial(diff, sizeof(diff), oldsum); 1544 } 1545 #endif 1546 1547 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 1548 { 1549 __be16 diff[2] = { ~old, new }; 1550 1551 return csum_partial(diff, sizeof(diff), oldsum); 1552 } 1553 1554 /* Forget current conntrack (unconfirmed) and attach notrack entry */ 1555 static inline void ip_vs_notrack(struct sk_buff *skb) 1556 { 1557 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1558 enum ip_conntrack_info ctinfo; 1559 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1560 1561 if (ct) { 1562 nf_conntrack_put(&ct->ct_general); 1563 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 1564 } 1565 #endif 1566 } 1567 1568 #ifdef CONFIG_IP_VS_NFCT 1569 /* Netfilter connection tracking 1570 * (from ip_vs_nfct.c) 1571 */ 1572 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1573 { 1574 #ifdef CONFIG_SYSCTL 1575 return ipvs->sysctl_conntrack; 1576 #else 1577 return 0; 1578 #endif 1579 } 1580 1581 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, 1582 int outin); 1583 int ip_vs_confirm_conntrack(struct sk_buff *skb); 1584 void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, 1585 struct ip_vs_conn *cp, u_int8_t proto, 1586 const __be16 port, int from_rs); 1587 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); 1588 1589 #else 1590 1591 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 1592 { 1593 return 0; 1594 } 1595 1596 static inline void ip_vs_update_conntrack(struct sk_buff *skb, 1597 struct ip_vs_conn *cp, int outin) 1598 { 1599 } 1600 1601 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) 1602 { 1603 return NF_ACCEPT; 1604 } 1605 1606 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 1607 { 1608 } 1609 #endif /* CONFIG_IP_VS_NFCT */ 1610 1611 /* Really using conntrack? */ 1612 static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, 1613 struct sk_buff *skb) 1614 { 1615 #ifdef CONFIG_IP_VS_NFCT 1616 enum ip_conntrack_info ctinfo; 1617 struct nf_conn *ct; 1618 1619 if (!(cp->flags & IP_VS_CONN_F_NFCT)) 1620 return false; 1621 ct = nf_ct_get(skb, &ctinfo); 1622 if (ct) 1623 return true; 1624 #endif 1625 return false; 1626 } 1627 1628 static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) 1629 { 1630 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1631 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1632 int ret = 0; 1633 1634 if (!(svc->conntrack_afmask & afmask)) { 1635 ret = nf_ct_netns_get(svc->ipvs->net, svc->af); 1636 if (ret >= 0) 1637 svc->conntrack_afmask |= afmask; 1638 } 1639 return ret; 1640 #else 1641 return 0; 1642 #endif 1643 } 1644 1645 static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) 1646 { 1647 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1648 int afmask = (svc->af == AF_INET6) ? 2 : 1; 1649 1650 if (svc->conntrack_afmask & afmask) { 1651 nf_ct_netns_put(svc->ipvs->net, svc->af); 1652 svc->conntrack_afmask &= ~afmask; 1653 } 1654 #endif 1655 } 1656 1657 static inline int 1658 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 1659 { 1660 /* We think the overhead of processing active connections is 256 1661 * times higher than that of inactive connections in average. (This 1662 * 256 times might not be accurate, we will change it later) We 1663 * use the following formula to estimate the overhead now: 1664 * dest->activeconns*256 + dest->inactconns 1665 */ 1666 return (atomic_read(&dest->activeconns) << 8) + 1667 atomic_read(&dest->inactconns); 1668 } 1669 1670 #endif /* _NET_IP_VS_H */ 1671