1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) 1999-2001 Paul `Rusty' Russell 4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 5 * (C) 2011 Patrick McHardy <kaber@trash.net> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/timer.h> 13 #include <linux/skbuff.h> 14 #include <linux/gfp.h> 15 #include <net/xfrm.h> 16 #include <linux/siphash.h> 17 #include <linux/rtnetlink.h> 18 19 #include <net/netfilter/nf_conntrack.h> 20 #include <net/netfilter/nf_conntrack_core.h> 21 #include <net/netfilter/nf_conntrack_helper.h> 22 #include <net/netfilter/nf_conntrack_seqadj.h> 23 #include <net/netfilter/nf_conntrack_zones.h> 24 #include <net/netfilter/nf_nat.h> 25 #include <net/netfilter/nf_nat_helper.h> 26 #include <uapi/linux/netfilter/nf_nat.h> 27 28 #include "nf_internals.h" 29 30 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS]; 31 32 static DEFINE_MUTEX(nf_nat_proto_mutex); 33 static unsigned int nat_net_id __read_mostly; 34 35 static struct hlist_head *nf_nat_bysource __read_mostly; 36 static unsigned int nf_nat_htable_size __read_mostly; 37 static siphash_key_t nf_nat_hash_rnd __read_mostly; 38 39 struct nf_nat_lookup_hook_priv { 40 struct nf_hook_entries __rcu *entries; 41 42 struct rcu_head rcu_head; 43 }; 44 45 struct nf_nat_hooks_net { 46 struct nf_hook_ops *nat_hook_ops; 47 unsigned int users; 48 }; 49 50 struct nat_net { 51 struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO]; 52 }; 53 54 #ifdef CONFIG_XFRM 55 static void nf_nat_ipv4_decode_session(struct sk_buff *skb, 56 const struct nf_conn *ct, 57 enum ip_conntrack_dir dir, 58 unsigned long statusbit, 59 struct flowi *fl) 60 { 61 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; 62 struct flowi4 *fl4 = &fl->u.ip4; 63 64 if (ct->status & statusbit) { 65 fl4->daddr = t->dst.u3.ip; 66 if (t->dst.protonum == IPPROTO_TCP || 67 t->dst.protonum == IPPROTO_UDP || 68 t->dst.protonum == IPPROTO_UDPLITE || 69 t->dst.protonum == IPPROTO_DCCP || 70 t->dst.protonum == IPPROTO_SCTP) 71 fl4->fl4_dport = t->dst.u.all; 72 } 73 74 statusbit ^= IPS_NAT_MASK; 75 76 if (ct->status & statusbit) { 77 fl4->saddr = t->src.u3.ip; 78 if (t->dst.protonum == IPPROTO_TCP || 79 t->dst.protonum == IPPROTO_UDP || 80 t->dst.protonum == IPPROTO_UDPLITE || 81 t->dst.protonum == IPPROTO_DCCP || 82 t->dst.protonum == IPPROTO_SCTP) 83 fl4->fl4_sport = t->src.u.all; 84 } 85 } 86 87 static void nf_nat_ipv6_decode_session(struct sk_buff *skb, 88 const struct nf_conn *ct, 89 enum ip_conntrack_dir dir, 90 unsigned long statusbit, 91 struct flowi *fl) 92 { 93 #if IS_ENABLED(CONFIG_IPV6) 94 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; 95 struct flowi6 *fl6 = &fl->u.ip6; 96 97 if (ct->status & statusbit) { 98 fl6->daddr = t->dst.u3.in6; 99 if (t->dst.protonum == IPPROTO_TCP || 100 t->dst.protonum == IPPROTO_UDP || 101 t->dst.protonum == IPPROTO_UDPLITE || 102 t->dst.protonum == IPPROTO_DCCP || 103 t->dst.protonum == IPPROTO_SCTP) 104 fl6->fl6_dport = t->dst.u.all; 105 } 106 107 statusbit ^= IPS_NAT_MASK; 108 109 if (ct->status & statusbit) { 110 fl6->saddr = t->src.u3.in6; 111 if (t->dst.protonum == IPPROTO_TCP || 112 t->dst.protonum == IPPROTO_UDP || 113 t->dst.protonum == IPPROTO_UDPLITE || 114 t->dst.protonum == IPPROTO_DCCP || 115 t->dst.protonum == IPPROTO_SCTP) 116 fl6->fl6_sport = t->src.u.all; 117 } 118 #endif 119 } 120 121 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) 122 { 123 const struct nf_conn *ct; 124 enum ip_conntrack_info ctinfo; 125 enum ip_conntrack_dir dir; 126 unsigned long statusbit; 127 u8 family; 128 129 ct = nf_ct_get(skb, &ctinfo); 130 if (ct == NULL) 131 return; 132 133 family = nf_ct_l3num(ct); 134 dir = CTINFO2DIR(ctinfo); 135 if (dir == IP_CT_DIR_ORIGINAL) 136 statusbit = IPS_DST_NAT; 137 else 138 statusbit = IPS_SRC_NAT; 139 140 switch (family) { 141 case NFPROTO_IPV4: 142 nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl); 143 return; 144 case NFPROTO_IPV6: 145 nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl); 146 return; 147 } 148 } 149 #endif /* CONFIG_XFRM */ 150 151 /* We keep an extra hash for each conntrack, for fast searching. */ 152 static unsigned int 153 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) 154 { 155 unsigned int hash; 156 struct { 157 struct nf_conntrack_man src; 158 u32 net_mix; 159 u32 protonum; 160 } __aligned(SIPHASH_ALIGNMENT) combined; 161 162 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); 163 164 memset(&combined, 0, sizeof(combined)); 165 166 /* Original src, to ensure we map it consistently if poss. */ 167 combined.src = tuple->src; 168 combined.net_mix = net_hash_mix(n); 169 combined.protonum = tuple->dst.protonum; 170 171 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd); 172 173 return reciprocal_scale(hash, nf_nat_htable_size); 174 } 175 176 /* Is this tuple already taken? (not by us) */ 177 static int 178 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 179 const struct nf_conn *ignored_conntrack) 180 { 181 /* Conntrack tracking doesn't keep track of outgoing tuples; only 182 * incoming ones. NAT means they don't have a fixed mapping, 183 * so we invert the tuple and look for the incoming reply. 184 * 185 * We could keep a separate hash if this proves too slow. 186 */ 187 struct nf_conntrack_tuple reply; 188 189 nf_ct_invert_tuple(&reply, tuple); 190 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 191 } 192 193 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t, 194 const struct nf_nat_range2 *range) 195 { 196 if (t->src.l3num == NFPROTO_IPV4) 197 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && 198 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); 199 200 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && 201 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; 202 } 203 204 /* Is the manipable part of the tuple between min and max incl? */ 205 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple, 206 enum nf_nat_manip_type maniptype, 207 const union nf_conntrack_man_proto *min, 208 const union nf_conntrack_man_proto *max) 209 { 210 __be16 port; 211 212 switch (tuple->dst.protonum) { 213 case IPPROTO_ICMP: 214 case IPPROTO_ICMPV6: 215 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) && 216 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); 217 case IPPROTO_GRE: /* all fall though */ 218 case IPPROTO_TCP: 219 case IPPROTO_UDP: 220 case IPPROTO_UDPLITE: 221 case IPPROTO_DCCP: 222 case IPPROTO_SCTP: 223 if (maniptype == NF_NAT_MANIP_SRC) 224 port = tuple->src.u.all; 225 else 226 port = tuple->dst.u.all; 227 228 return ntohs(port) >= ntohs(min->all) && 229 ntohs(port) <= ntohs(max->all); 230 default: 231 return true; 232 } 233 } 234 235 /* If we source map this tuple so reply looks like reply_tuple, will 236 * that meet the constraints of range. 237 */ 238 static int in_range(const struct nf_conntrack_tuple *tuple, 239 const struct nf_nat_range2 *range) 240 { 241 /* If we are supposed to map IPs, then we must be in the 242 * range specified, otherwise let this drag us onto a new src IP. 243 */ 244 if (range->flags & NF_NAT_RANGE_MAP_IPS && 245 !nf_nat_inet_in_range(tuple, range)) 246 return 0; 247 248 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) 249 return 1; 250 251 return l4proto_in_range(tuple, NF_NAT_MANIP_SRC, 252 &range->min_proto, &range->max_proto); 253 } 254 255 static inline int 256 same_src(const struct nf_conn *ct, 257 const struct nf_conntrack_tuple *tuple) 258 { 259 const struct nf_conntrack_tuple *t; 260 261 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 262 return (t->dst.protonum == tuple->dst.protonum && 263 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && 264 t->src.u.all == tuple->src.u.all); 265 } 266 267 /* Only called for SRC manip */ 268 static int 269 find_appropriate_src(struct net *net, 270 const struct nf_conntrack_zone *zone, 271 const struct nf_conntrack_tuple *tuple, 272 struct nf_conntrack_tuple *result, 273 const struct nf_nat_range2 *range) 274 { 275 unsigned int h = hash_by_src(net, tuple); 276 const struct nf_conn *ct; 277 278 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { 279 if (same_src(ct, tuple) && 280 net_eq(net, nf_ct_net(ct)) && 281 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { 282 /* Copy source part from reply tuple. */ 283 nf_ct_invert_tuple(result, 284 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 285 result->dst = tuple->dst; 286 287 if (in_range(result, range)) 288 return 1; 289 } 290 } 291 return 0; 292 } 293 294 /* For [FUTURE] fragmentation handling, we want the least-used 295 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 296 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 297 * 1-65535, we don't do pro-rata allocation based on ports; we choose 298 * the ip with the lowest src-ip/dst-ip/proto usage. 299 */ 300 static void 301 find_best_ips_proto(const struct nf_conntrack_zone *zone, 302 struct nf_conntrack_tuple *tuple, 303 const struct nf_nat_range2 *range, 304 const struct nf_conn *ct, 305 enum nf_nat_manip_type maniptype) 306 { 307 union nf_inet_addr *var_ipp; 308 unsigned int i, max; 309 /* Host order */ 310 u32 minip, maxip, j, dist; 311 bool full_range; 312 313 /* No IP mapping? Do nothing. */ 314 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 315 return; 316 317 if (maniptype == NF_NAT_MANIP_SRC) 318 var_ipp = &tuple->src.u3; 319 else 320 var_ipp = &tuple->dst.u3; 321 322 /* Fast path: only one choice. */ 323 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { 324 *var_ipp = range->min_addr; 325 return; 326 } 327 328 if (nf_ct_l3num(ct) == NFPROTO_IPV4) 329 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; 330 else 331 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; 332 333 /* Hashing source and destination IPs gives a fairly even 334 * spread in practice (if there are a small number of IPs 335 * involved, there usually aren't that many connections 336 * anyway). The consistency means that servers see the same 337 * client coming from the same IP (some Internet Banking sites 338 * like this), even across reboots. 339 */ 340 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), 341 range->flags & NF_NAT_RANGE_PERSISTENT ? 342 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); 343 344 full_range = false; 345 for (i = 0; i <= max; i++) { 346 /* If first bytes of the address are at the maximum, use the 347 * distance. Otherwise use the full range. 348 */ 349 if (!full_range) { 350 minip = ntohl((__force __be32)range->min_addr.all[i]); 351 maxip = ntohl((__force __be32)range->max_addr.all[i]); 352 dist = maxip - minip + 1; 353 } else { 354 minip = 0; 355 dist = ~0; 356 } 357 358 var_ipp->all[i] = (__force __u32) 359 htonl(minip + reciprocal_scale(j, dist)); 360 if (var_ipp->all[i] != range->max_addr.all[i]) 361 full_range = true; 362 363 if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) 364 j ^= (__force u32)tuple->dst.u3.all[i]; 365 } 366 } 367 368 /* Alter the per-proto part of the tuple (depending on maniptype), to 369 * give a unique tuple in the given range if possible. 370 * 371 * Per-protocol part of tuple is initialized to the incoming packet. 372 */ 373 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, 374 const struct nf_nat_range2 *range, 375 enum nf_nat_manip_type maniptype, 376 const struct nf_conn *ct) 377 { 378 unsigned int range_size, min, max, i, attempts; 379 __be16 *keyptr; 380 u16 off; 381 static const unsigned int max_attempts = 128; 382 383 switch (tuple->dst.protonum) { 384 case IPPROTO_ICMP: 385 case IPPROTO_ICMPV6: 386 /* id is same for either direction... */ 387 keyptr = &tuple->src.u.icmp.id; 388 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { 389 min = 0; 390 range_size = 65536; 391 } else { 392 min = ntohs(range->min_proto.icmp.id); 393 range_size = ntohs(range->max_proto.icmp.id) - 394 ntohs(range->min_proto.icmp.id) + 1; 395 } 396 goto find_free_id; 397 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE) 398 case IPPROTO_GRE: 399 /* If there is no master conntrack we are not PPTP, 400 do not change tuples */ 401 if (!ct->master) 402 return; 403 404 if (maniptype == NF_NAT_MANIP_SRC) 405 keyptr = &tuple->src.u.gre.key; 406 else 407 keyptr = &tuple->dst.u.gre.key; 408 409 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { 410 min = 1; 411 range_size = 65535; 412 } else { 413 min = ntohs(range->min_proto.gre.key); 414 range_size = ntohs(range->max_proto.gre.key) - min + 1; 415 } 416 goto find_free_id; 417 #endif 418 case IPPROTO_UDP: 419 case IPPROTO_UDPLITE: 420 case IPPROTO_TCP: 421 case IPPROTO_SCTP: 422 case IPPROTO_DCCP: 423 if (maniptype == NF_NAT_MANIP_SRC) 424 keyptr = &tuple->src.u.all; 425 else 426 keyptr = &tuple->dst.u.all; 427 428 break; 429 default: 430 return; 431 } 432 433 /* If no range specified... */ 434 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) { 435 /* If it's dst rewrite, can't change port */ 436 if (maniptype == NF_NAT_MANIP_DST) 437 return; 438 439 if (ntohs(*keyptr) < 1024) { 440 /* Loose convention: >> 512 is credential passing */ 441 if (ntohs(*keyptr) < 512) { 442 min = 1; 443 range_size = 511 - min + 1; 444 } else { 445 min = 600; 446 range_size = 1023 - min + 1; 447 } 448 } else { 449 min = 1024; 450 range_size = 65535 - 1024 + 1; 451 } 452 } else { 453 min = ntohs(range->min_proto.all); 454 max = ntohs(range->max_proto.all); 455 if (unlikely(max < min)) 456 swap(max, min); 457 range_size = max - min + 1; 458 } 459 460 find_free_id: 461 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) 462 off = (ntohs(*keyptr) - ntohs(range->base_proto.all)); 463 else 464 off = prandom_u32(); 465 466 attempts = range_size; 467 if (attempts > max_attempts) 468 attempts = max_attempts; 469 470 /* We are in softirq; doing a search of the entire range risks 471 * soft lockup when all tuples are already used. 472 * 473 * If we can't find any free port from first offset, pick a new 474 * one and try again, with ever smaller search window. 475 */ 476 another_round: 477 for (i = 0; i < attempts; i++, off++) { 478 *keyptr = htons(min + off % range_size); 479 if (!nf_nat_used_tuple(tuple, ct)) 480 return; 481 } 482 483 if (attempts >= range_size || attempts < 16) 484 return; 485 attempts /= 2; 486 off = prandom_u32(); 487 goto another_round; 488 } 489 490 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 491 * we change the source to map into the range. For NF_INET_PRE_ROUTING 492 * and NF_INET_LOCAL_OUT, we change the destination to map into the 493 * range. It might not be possible to get a unique tuple, but we try. 494 * At worst (or if we race), we will end up with a final duplicate in 495 * __nf_conntrack_confirm and drop the packet. */ 496 static void 497 get_unique_tuple(struct nf_conntrack_tuple *tuple, 498 const struct nf_conntrack_tuple *orig_tuple, 499 const struct nf_nat_range2 *range, 500 struct nf_conn *ct, 501 enum nf_nat_manip_type maniptype) 502 { 503 const struct nf_conntrack_zone *zone; 504 struct net *net = nf_ct_net(ct); 505 506 zone = nf_ct_zone(ct); 507 508 /* 1) If this srcip/proto/src-proto-part is currently mapped, 509 * and that same mapping gives a unique tuple within the given 510 * range, use that. 511 * 512 * This is only required for source (ie. NAT/masq) mappings. 513 * So far, we don't do local source mappings, so multiple 514 * manips not an issue. 515 */ 516 if (maniptype == NF_NAT_MANIP_SRC && 517 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 518 /* try the original tuple first */ 519 if (in_range(orig_tuple, range)) { 520 if (!nf_nat_used_tuple(orig_tuple, ct)) { 521 *tuple = *orig_tuple; 522 return; 523 } 524 } else if (find_appropriate_src(net, zone, 525 orig_tuple, tuple, range)) { 526 pr_debug("get_unique_tuple: Found current src map\n"); 527 if (!nf_nat_used_tuple(tuple, ct)) 528 return; 529 } 530 } 531 532 /* 2) Select the least-used IP/proto combination in the given range */ 533 *tuple = *orig_tuple; 534 find_best_ips_proto(zone, tuple, range, ct, maniptype); 535 536 /* 3) The per-protocol part of the manip is made to map into 537 * the range to make a unique tuple. 538 */ 539 540 /* Only bother mapping if it's not already in range and unique */ 541 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 542 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 543 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) && 544 l4proto_in_range(tuple, maniptype, 545 &range->min_proto, 546 &range->max_proto) && 547 (range->min_proto.all == range->max_proto.all || 548 !nf_nat_used_tuple(tuple, ct))) 549 return; 550 } else if (!nf_nat_used_tuple(tuple, ct)) { 551 return; 552 } 553 } 554 555 /* Last chance: get protocol to try to obtain unique tuple. */ 556 nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct); 557 } 558 559 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) 560 { 561 struct nf_conn_nat *nat = nfct_nat(ct); 562 if (nat) 563 return nat; 564 565 if (!nf_ct_is_confirmed(ct)) 566 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 567 568 return nat; 569 } 570 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); 571 572 unsigned int 573 nf_nat_setup_info(struct nf_conn *ct, 574 const struct nf_nat_range2 *range, 575 enum nf_nat_manip_type maniptype) 576 { 577 struct net *net = nf_ct_net(ct); 578 struct nf_conntrack_tuple curr_tuple, new_tuple; 579 580 /* Can't setup nat info for confirmed ct. */ 581 if (nf_ct_is_confirmed(ct)) 582 return NF_ACCEPT; 583 584 WARN_ON(maniptype != NF_NAT_MANIP_SRC && 585 maniptype != NF_NAT_MANIP_DST); 586 587 if (WARN_ON(nf_nat_initialized(ct, maniptype))) 588 return NF_DROP; 589 590 /* What we've got will look like inverse of reply. Normally 591 * this is what is in the conntrack, except for prior 592 * manipulations (future optimization: if num_manips == 0, 593 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 594 */ 595 nf_ct_invert_tuple(&curr_tuple, 596 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 597 598 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 599 600 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { 601 struct nf_conntrack_tuple reply; 602 603 /* Alter conntrack table so will recognize replies. */ 604 nf_ct_invert_tuple(&reply, &new_tuple); 605 nf_conntrack_alter_reply(ct, &reply); 606 607 /* Non-atomic: we own this at the moment. */ 608 if (maniptype == NF_NAT_MANIP_SRC) 609 ct->status |= IPS_SRC_NAT; 610 else 611 ct->status |= IPS_DST_NAT; 612 613 if (nfct_help(ct) && !nfct_seqadj(ct)) 614 if (!nfct_seqadj_ext_add(ct)) 615 return NF_DROP; 616 } 617 618 if (maniptype == NF_NAT_MANIP_SRC) { 619 unsigned int srchash; 620 spinlock_t *lock; 621 622 srchash = hash_by_src(net, 623 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 624 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; 625 spin_lock_bh(lock); 626 hlist_add_head_rcu(&ct->nat_bysource, 627 &nf_nat_bysource[srchash]); 628 spin_unlock_bh(lock); 629 } 630 631 /* It's done. */ 632 if (maniptype == NF_NAT_MANIP_DST) 633 ct->status |= IPS_DST_NAT_DONE; 634 else 635 ct->status |= IPS_SRC_NAT_DONE; 636 637 return NF_ACCEPT; 638 } 639 EXPORT_SYMBOL(nf_nat_setup_info); 640 641 static unsigned int 642 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 643 { 644 /* Force range to this IP; let proto decide mapping for 645 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 646 * Use reply in case it's already been mangled (eg local packet). 647 */ 648 union nf_inet_addr ip = 649 (manip == NF_NAT_MANIP_SRC ? 650 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 651 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 652 struct nf_nat_range2 range = { 653 .flags = NF_NAT_RANGE_MAP_IPS, 654 .min_addr = ip, 655 .max_addr = ip, 656 }; 657 return nf_nat_setup_info(ct, &range, manip); 658 } 659 660 unsigned int 661 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 662 { 663 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 664 } 665 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 666 667 /* Do packet manipulations according to nf_nat_setup_info. */ 668 unsigned int nf_nat_packet(struct nf_conn *ct, 669 enum ip_conntrack_info ctinfo, 670 unsigned int hooknum, 671 struct sk_buff *skb) 672 { 673 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 674 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 675 unsigned int verdict = NF_ACCEPT; 676 unsigned long statusbit; 677 678 if (mtype == NF_NAT_MANIP_SRC) 679 statusbit = IPS_SRC_NAT; 680 else 681 statusbit = IPS_DST_NAT; 682 683 /* Invert if this is reply dir. */ 684 if (dir == IP_CT_DIR_REPLY) 685 statusbit ^= IPS_NAT_MASK; 686 687 /* Non-atomic: these bits don't change. */ 688 if (ct->status & statusbit) 689 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir); 690 691 return verdict; 692 } 693 EXPORT_SYMBOL_GPL(nf_nat_packet); 694 695 unsigned int 696 nf_nat_inet_fn(void *priv, struct sk_buff *skb, 697 const struct nf_hook_state *state) 698 { 699 struct nf_conn *ct; 700 enum ip_conntrack_info ctinfo; 701 struct nf_conn_nat *nat; 702 /* maniptype == SRC for postrouting. */ 703 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); 704 705 ct = nf_ct_get(skb, &ctinfo); 706 /* Can't track? It's not due to stress, or conntrack would 707 * have dropped it. Hence it's the user's responsibilty to 708 * packet filter it out, or implement conntrack/NAT for that 709 * protocol. 8) --RR 710 */ 711 if (!ct) 712 return NF_ACCEPT; 713 714 nat = nfct_nat(ct); 715 716 switch (ctinfo) { 717 case IP_CT_RELATED: 718 case IP_CT_RELATED_REPLY: 719 /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */ 720 case IP_CT_NEW: 721 /* Seen it before? This can happen for loopback, retrans, 722 * or local packets. 723 */ 724 if (!nf_nat_initialized(ct, maniptype)) { 725 struct nf_nat_lookup_hook_priv *lpriv = priv; 726 struct nf_hook_entries *e = rcu_dereference(lpriv->entries); 727 unsigned int ret; 728 int i; 729 730 if (!e) 731 goto null_bind; 732 733 for (i = 0; i < e->num_hook_entries; i++) { 734 ret = e->hooks[i].hook(e->hooks[i].priv, skb, 735 state); 736 if (ret != NF_ACCEPT) 737 return ret; 738 if (nf_nat_initialized(ct, maniptype)) 739 goto do_nat; 740 } 741 null_bind: 742 ret = nf_nat_alloc_null_binding(ct, state->hook); 743 if (ret != NF_ACCEPT) 744 return ret; 745 } else { 746 pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n", 747 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 748 ct, ct->status); 749 if (nf_nat_oif_changed(state->hook, ctinfo, nat, 750 state->out)) 751 goto oif_changed; 752 } 753 break; 754 default: 755 /* ESTABLISHED */ 756 WARN_ON(ctinfo != IP_CT_ESTABLISHED && 757 ctinfo != IP_CT_ESTABLISHED_REPLY); 758 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out)) 759 goto oif_changed; 760 } 761 do_nat: 762 return nf_nat_packet(ct, ctinfo, state->hook, skb); 763 764 oif_changed: 765 nf_ct_kill_acct(ct, ctinfo, skb); 766 return NF_DROP; 767 } 768 EXPORT_SYMBOL_GPL(nf_nat_inet_fn); 769 770 struct nf_nat_proto_clean { 771 u8 l3proto; 772 u8 l4proto; 773 }; 774 775 /* kill conntracks with affected NAT section */ 776 static int nf_nat_proto_remove(struct nf_conn *i, void *data) 777 { 778 const struct nf_nat_proto_clean *clean = data; 779 780 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 781 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 782 return 0; 783 784 return i->status & IPS_NAT_MASK ? 1 : 0; 785 } 786 787 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) 788 { 789 unsigned int h; 790 791 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 792 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 793 hlist_del_rcu(&ct->nat_bysource); 794 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 795 } 796 797 static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 798 { 799 if (nf_nat_proto_remove(ct, data)) 800 return 1; 801 802 /* This module is being removed and conntrack has nat null binding. 803 * Remove it from bysource hash, as the table will be freed soon. 804 * 805 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 806 * will delete entry from already-freed table. 807 */ 808 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status)) 809 __nf_nat_cleanup_conntrack(ct); 810 811 /* don't delete conntrack. Although that would make things a lot 812 * simpler, we'd end up flushing all conntracks on nat rmmod. 813 */ 814 return 0; 815 } 816 817 /* No one using conntrack by the time this called. */ 818 static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 819 { 820 if (ct->status & IPS_SRC_NAT_DONE) 821 __nf_nat_cleanup_conntrack(ct); 822 } 823 824 static struct nf_ct_ext_type nat_extend __read_mostly = { 825 .len = sizeof(struct nf_conn_nat), 826 .align = __alignof__(struct nf_conn_nat), 827 .destroy = nf_nat_cleanup_conntrack, 828 .id = NF_CT_EXT_NAT, 829 }; 830 831 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 832 833 #include <linux/netfilter/nfnetlink.h> 834 #include <linux/netfilter/nfnetlink_conntrack.h> 835 836 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 837 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 838 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 839 }; 840 841 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], 842 struct nf_nat_range2 *range) 843 { 844 if (tb[CTA_PROTONAT_PORT_MIN]) { 845 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); 846 range->max_proto.all = range->min_proto.all; 847 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 848 } 849 if (tb[CTA_PROTONAT_PORT_MAX]) { 850 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); 851 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 852 } 853 return 0; 854 } 855 856 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 857 const struct nf_conn *ct, 858 struct nf_nat_range2 *range) 859 { 860 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 861 int err; 862 863 err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr, 864 protonat_nla_policy, NULL); 865 if (err < 0) 866 return err; 867 868 return nf_nat_l4proto_nlattr_to_range(tb, range); 869 } 870 871 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 872 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, 873 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, 874 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, 875 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, 876 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 877 }; 878 879 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[], 880 struct nf_nat_range2 *range) 881 { 882 if (tb[CTA_NAT_V4_MINIP]) { 883 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]); 884 range->flags |= NF_NAT_RANGE_MAP_IPS; 885 } 886 887 if (tb[CTA_NAT_V4_MAXIP]) 888 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]); 889 else 890 range->max_addr.ip = range->min_addr.ip; 891 892 return 0; 893 } 894 895 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[], 896 struct nf_nat_range2 *range) 897 { 898 if (tb[CTA_NAT_V6_MINIP]) { 899 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP], 900 sizeof(struct in6_addr)); 901 range->flags |= NF_NAT_RANGE_MAP_IPS; 902 } 903 904 if (tb[CTA_NAT_V6_MAXIP]) 905 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP], 906 sizeof(struct in6_addr)); 907 else 908 range->max_addr = range->min_addr; 909 910 return 0; 911 } 912 913 static int 914 nfnetlink_parse_nat(const struct nlattr *nat, 915 const struct nf_conn *ct, struct nf_nat_range2 *range) 916 { 917 struct nlattr *tb[CTA_NAT_MAX+1]; 918 int err; 919 920 memset(range, 0, sizeof(*range)); 921 922 err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat, 923 nat_nla_policy, NULL); 924 if (err < 0) 925 return err; 926 927 switch (nf_ct_l3num(ct)) { 928 case NFPROTO_IPV4: 929 err = nf_nat_ipv4_nlattr_to_range(tb, range); 930 break; 931 case NFPROTO_IPV6: 932 err = nf_nat_ipv6_nlattr_to_range(tb, range); 933 break; 934 default: 935 err = -EPROTONOSUPPORT; 936 break; 937 } 938 939 if (err) 940 return err; 941 942 if (!tb[CTA_NAT_PROTO]) 943 return 0; 944 945 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 946 } 947 948 /* This function is called under rcu_read_lock() */ 949 static int 950 nfnetlink_parse_nat_setup(struct nf_conn *ct, 951 enum nf_nat_manip_type manip, 952 const struct nlattr *attr) 953 { 954 struct nf_nat_range2 range; 955 int err; 956 957 /* Should not happen, restricted to creating new conntracks 958 * via ctnetlink. 959 */ 960 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 961 return -EEXIST; 962 963 /* No NAT information has been passed, allocate the null-binding */ 964 if (attr == NULL) 965 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; 966 967 err = nfnetlink_parse_nat(attr, ct, &range); 968 if (err < 0) 969 return err; 970 971 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; 972 } 973 #else 974 static int 975 nfnetlink_parse_nat_setup(struct nf_conn *ct, 976 enum nf_nat_manip_type manip, 977 const struct nlattr *attr) 978 { 979 return -EOPNOTSUPP; 980 } 981 #endif 982 983 static struct nf_ct_helper_expectfn follow_master_nat = { 984 .name = "nat-follow-master", 985 .expectfn = nf_nat_follow_master, 986 }; 987 988 int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, 989 const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count) 990 { 991 struct nat_net *nat_net = net_generic(net, nat_net_id); 992 struct nf_nat_hooks_net *nat_proto_net; 993 struct nf_nat_lookup_hook_priv *priv; 994 unsigned int hooknum = ops->hooknum; 995 struct nf_hook_ops *nat_ops; 996 int i, ret; 997 998 if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net))) 999 return -EINVAL; 1000 1001 nat_proto_net = &nat_net->nat_proto_net[pf]; 1002 1003 for (i = 0; i < ops_count; i++) { 1004 if (orig_nat_ops[i].hooknum == hooknum) { 1005 hooknum = i; 1006 break; 1007 } 1008 } 1009 1010 if (WARN_ON_ONCE(i == ops_count)) 1011 return -EINVAL; 1012 1013 mutex_lock(&nf_nat_proto_mutex); 1014 if (!nat_proto_net->nat_hook_ops) { 1015 WARN_ON(nat_proto_net->users != 0); 1016 1017 nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL); 1018 if (!nat_ops) { 1019 mutex_unlock(&nf_nat_proto_mutex); 1020 return -ENOMEM; 1021 } 1022 1023 for (i = 0; i < ops_count; i++) { 1024 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1025 if (priv) { 1026 nat_ops[i].priv = priv; 1027 continue; 1028 } 1029 mutex_unlock(&nf_nat_proto_mutex); 1030 while (i) 1031 kfree(nat_ops[--i].priv); 1032 kfree(nat_ops); 1033 return -ENOMEM; 1034 } 1035 1036 ret = nf_register_net_hooks(net, nat_ops, ops_count); 1037 if (ret < 0) { 1038 mutex_unlock(&nf_nat_proto_mutex); 1039 for (i = 0; i < ops_count; i++) 1040 kfree(nat_ops[i].priv); 1041 kfree(nat_ops); 1042 return ret; 1043 } 1044 1045 nat_proto_net->nat_hook_ops = nat_ops; 1046 } 1047 1048 nat_ops = nat_proto_net->nat_hook_ops; 1049 priv = nat_ops[hooknum].priv; 1050 if (WARN_ON_ONCE(!priv)) { 1051 mutex_unlock(&nf_nat_proto_mutex); 1052 return -EOPNOTSUPP; 1053 } 1054 1055 ret = nf_hook_entries_insert_raw(&priv->entries, ops); 1056 if (ret == 0) 1057 nat_proto_net->users++; 1058 1059 mutex_unlock(&nf_nat_proto_mutex); 1060 return ret; 1061 } 1062 1063 void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops, 1064 unsigned int ops_count) 1065 { 1066 struct nat_net *nat_net = net_generic(net, nat_net_id); 1067 struct nf_nat_hooks_net *nat_proto_net; 1068 struct nf_nat_lookup_hook_priv *priv; 1069 struct nf_hook_ops *nat_ops; 1070 int hooknum = ops->hooknum; 1071 int i; 1072 1073 if (pf >= ARRAY_SIZE(nat_net->nat_proto_net)) 1074 return; 1075 1076 nat_proto_net = &nat_net->nat_proto_net[pf]; 1077 1078 mutex_lock(&nf_nat_proto_mutex); 1079 if (WARN_ON(nat_proto_net->users == 0)) 1080 goto unlock; 1081 1082 nat_proto_net->users--; 1083 1084 nat_ops = nat_proto_net->nat_hook_ops; 1085 for (i = 0; i < ops_count; i++) { 1086 if (nat_ops[i].hooknum == hooknum) { 1087 hooknum = i; 1088 break; 1089 } 1090 } 1091 if (WARN_ON_ONCE(i == ops_count)) 1092 goto unlock; 1093 priv = nat_ops[hooknum].priv; 1094 nf_hook_entries_delete_raw(&priv->entries, ops); 1095 1096 if (nat_proto_net->users == 0) { 1097 nf_unregister_net_hooks(net, nat_ops, ops_count); 1098 1099 for (i = 0; i < ops_count; i++) { 1100 priv = nat_ops[i].priv; 1101 kfree_rcu(priv, rcu_head); 1102 } 1103 1104 nat_proto_net->nat_hook_ops = NULL; 1105 kfree(nat_ops); 1106 } 1107 unlock: 1108 mutex_unlock(&nf_nat_proto_mutex); 1109 } 1110 1111 static struct pernet_operations nat_net_ops = { 1112 .id = &nat_net_id, 1113 .size = sizeof(struct nat_net), 1114 }; 1115 1116 static struct nf_nat_hook nat_hook = { 1117 .parse_nat_setup = nfnetlink_parse_nat_setup, 1118 #ifdef CONFIG_XFRM 1119 .decode_session = __nf_nat_decode_session, 1120 #endif 1121 .manip_pkt = nf_nat_manip_pkt, 1122 }; 1123 1124 static int __init nf_nat_init(void) 1125 { 1126 int ret, i; 1127 1128 /* Leave them the same for the moment. */ 1129 nf_nat_htable_size = nf_conntrack_htable_size; 1130 if (nf_nat_htable_size < CONNTRACK_LOCKS) 1131 nf_nat_htable_size = CONNTRACK_LOCKS; 1132 1133 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); 1134 if (!nf_nat_bysource) 1135 return -ENOMEM; 1136 1137 ret = nf_ct_extend_register(&nat_extend); 1138 if (ret < 0) { 1139 kvfree(nf_nat_bysource); 1140 pr_err("Unable to register extension\n"); 1141 return ret; 1142 } 1143 1144 for (i = 0; i < CONNTRACK_LOCKS; i++) 1145 spin_lock_init(&nf_nat_locks[i]); 1146 1147 ret = register_pernet_subsys(&nat_net_ops); 1148 if (ret < 0) { 1149 nf_ct_extend_unregister(&nat_extend); 1150 kvfree(nf_nat_bysource); 1151 return ret; 1152 } 1153 1154 nf_ct_helper_expectfn_register(&follow_master_nat); 1155 1156 WARN_ON(nf_nat_hook != NULL); 1157 RCU_INIT_POINTER(nf_nat_hook, &nat_hook); 1158 1159 return 0; 1160 } 1161 1162 static void __exit nf_nat_cleanup(void) 1163 { 1164 struct nf_nat_proto_clean clean = {}; 1165 1166 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean); 1167 1168 nf_ct_extend_unregister(&nat_extend); 1169 nf_ct_helper_expectfn_unregister(&follow_master_nat); 1170 RCU_INIT_POINTER(nf_nat_hook, NULL); 1171 1172 synchronize_net(); 1173 kvfree(nf_nat_bysource); 1174 unregister_pernet_subsys(&nat_net_ops); 1175 } 1176 1177 MODULE_LICENSE("GPL"); 1178 1179 module_init(nf_nat_init); 1180 module_exit(nf_nat_cleanup); 1181