1 /* 2 * (C) 1999-2001 Paul `Rusty' Russell 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2011 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/timer.h> 16 #include <linux/skbuff.h> 17 #include <linux/gfp.h> 18 #include <net/xfrm.h> 19 #include <linux/jhash.h> 20 #include <linux/rtnetlink.h> 21 22 #include <net/netfilter/nf_conntrack.h> 23 #include <net/netfilter/nf_conntrack_core.h> 24 #include <net/netfilter/nf_nat.h> 25 #include <net/netfilter/nf_nat_l3proto.h> 26 #include <net/netfilter/nf_nat_l4proto.h> 27 #include <net/netfilter/nf_nat_core.h> 28 #include <net/netfilter/nf_nat_helper.h> 29 #include <net/netfilter/nf_conntrack_helper.h> 30 #include <net/netfilter/nf_conntrack_seqadj.h> 31 #include <net/netfilter/nf_conntrack_l3proto.h> 32 #include <net/netfilter/nf_conntrack_zones.h> 33 #include <linux/netfilter/nf_nat.h> 34 35 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS]; 36 37 static DEFINE_MUTEX(nf_nat_proto_mutex); 38 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 39 __read_mostly; 40 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 41 __read_mostly; 42 43 static struct hlist_head *nf_nat_bysource __read_mostly; 44 static unsigned int nf_nat_htable_size __read_mostly; 45 static unsigned int nf_nat_hash_rnd __read_mostly; 46 47 inline const struct nf_nat_l3proto * 48 __nf_nat_l3proto_find(u8 family) 49 { 50 return rcu_dereference(nf_nat_l3protos[family]); 51 } 52 53 inline const struct nf_nat_l4proto * 54 __nf_nat_l4proto_find(u8 family, u8 protonum) 55 { 56 return rcu_dereference(nf_nat_l4protos[family][protonum]); 57 } 58 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); 59 60 #ifdef CONFIG_XFRM 61 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) 62 { 63 const struct nf_nat_l3proto *l3proto; 64 const struct nf_conn *ct; 65 enum ip_conntrack_info ctinfo; 66 enum ip_conntrack_dir dir; 67 unsigned long statusbit; 68 u8 family; 69 70 ct = nf_ct_get(skb, &ctinfo); 71 if (ct == NULL) 72 return; 73 74 family = nf_ct_l3num(ct); 75 l3proto = __nf_nat_l3proto_find(family); 76 if (l3proto == NULL) 77 return; 78 79 dir = CTINFO2DIR(ctinfo); 80 if (dir == IP_CT_DIR_ORIGINAL) 81 statusbit = IPS_DST_NAT; 82 else 83 statusbit = IPS_SRC_NAT; 84 85 l3proto->decode_session(skb, ct, dir, statusbit, fl); 86 } 87 88 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) 89 { 90 struct flowi fl; 91 unsigned int hh_len; 92 struct dst_entry *dst; 93 int err; 94 95 err = xfrm_decode_session(skb, &fl, family); 96 if (err < 0) 97 return err; 98 99 dst = skb_dst(skb); 100 if (dst->xfrm) 101 dst = ((struct xfrm_dst *)dst)->route; 102 dst_hold(dst); 103 104 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); 105 if (IS_ERR(dst)) 106 return PTR_ERR(dst); 107 108 skb_dst_drop(skb); 109 skb_dst_set(skb, dst); 110 111 /* Change in oif may mean change in hh_len. */ 112 hh_len = skb_dst(skb)->dev->hard_header_len; 113 if (skb_headroom(skb) < hh_len && 114 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 115 return -ENOMEM; 116 return 0; 117 } 118 EXPORT_SYMBOL(nf_xfrm_me_harder); 119 #endif /* CONFIG_XFRM */ 120 121 /* We keep an extra hash for each conntrack, for fast searching. */ 122 static unsigned int 123 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) 124 { 125 unsigned int hash; 126 127 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); 128 129 /* Original src, to ensure we map it consistently if poss. */ 130 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), 131 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); 132 133 return reciprocal_scale(hash, nf_nat_htable_size); 134 } 135 136 /* Is this tuple already taken? (not by us) */ 137 int 138 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 139 const struct nf_conn *ignored_conntrack) 140 { 141 /* Conntrack tracking doesn't keep track of outgoing tuples; only 142 * incoming ones. NAT means they don't have a fixed mapping, 143 * so we invert the tuple and look for the incoming reply. 144 * 145 * We could keep a separate hash if this proves too slow. 146 */ 147 struct nf_conntrack_tuple reply; 148 149 nf_ct_invert_tuplepr(&reply, tuple); 150 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 151 } 152 EXPORT_SYMBOL(nf_nat_used_tuple); 153 154 /* If we source map this tuple so reply looks like reply_tuple, will 155 * that meet the constraints of range. 156 */ 157 static int in_range(const struct nf_nat_l3proto *l3proto, 158 const struct nf_nat_l4proto *l4proto, 159 const struct nf_conntrack_tuple *tuple, 160 const struct nf_nat_range *range) 161 { 162 /* If we are supposed to map IPs, then we must be in the 163 * range specified, otherwise let this drag us onto a new src IP. 164 */ 165 if (range->flags & NF_NAT_RANGE_MAP_IPS && 166 !l3proto->in_range(tuple, range)) 167 return 0; 168 169 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 170 l4proto->in_range(tuple, NF_NAT_MANIP_SRC, 171 &range->min_proto, &range->max_proto)) 172 return 1; 173 174 return 0; 175 } 176 177 static inline int 178 same_src(const struct nf_conn *ct, 179 const struct nf_conntrack_tuple *tuple) 180 { 181 const struct nf_conntrack_tuple *t; 182 183 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 184 return (t->dst.protonum == tuple->dst.protonum && 185 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && 186 t->src.u.all == tuple->src.u.all); 187 } 188 189 /* Only called for SRC manip */ 190 static int 191 find_appropriate_src(struct net *net, 192 const struct nf_conntrack_zone *zone, 193 const struct nf_nat_l3proto *l3proto, 194 const struct nf_nat_l4proto *l4proto, 195 const struct nf_conntrack_tuple *tuple, 196 struct nf_conntrack_tuple *result, 197 const struct nf_nat_range *range) 198 { 199 unsigned int h = hash_by_src(net, tuple); 200 const struct nf_conn *ct; 201 202 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { 203 if (same_src(ct, tuple) && 204 net_eq(net, nf_ct_net(ct)) && 205 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { 206 /* Copy source part from reply tuple. */ 207 nf_ct_invert_tuplepr(result, 208 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 209 result->dst = tuple->dst; 210 211 if (in_range(l3proto, l4proto, result, range)) 212 return 1; 213 } 214 } 215 return 0; 216 } 217 218 /* For [FUTURE] fragmentation handling, we want the least-used 219 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 220 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 221 * 1-65535, we don't do pro-rata allocation based on ports; we choose 222 * the ip with the lowest src-ip/dst-ip/proto usage. 223 */ 224 static void 225 find_best_ips_proto(const struct nf_conntrack_zone *zone, 226 struct nf_conntrack_tuple *tuple, 227 const struct nf_nat_range *range, 228 const struct nf_conn *ct, 229 enum nf_nat_manip_type maniptype) 230 { 231 union nf_inet_addr *var_ipp; 232 unsigned int i, max; 233 /* Host order */ 234 u32 minip, maxip, j, dist; 235 bool full_range; 236 237 /* No IP mapping? Do nothing. */ 238 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 239 return; 240 241 if (maniptype == NF_NAT_MANIP_SRC) 242 var_ipp = &tuple->src.u3; 243 else 244 var_ipp = &tuple->dst.u3; 245 246 /* Fast path: only one choice. */ 247 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { 248 *var_ipp = range->min_addr; 249 return; 250 } 251 252 if (nf_ct_l3num(ct) == NFPROTO_IPV4) 253 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; 254 else 255 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; 256 257 /* Hashing source and destination IPs gives a fairly even 258 * spread in practice (if there are a small number of IPs 259 * involved, there usually aren't that many connections 260 * anyway). The consistency means that servers see the same 261 * client coming from the same IP (some Internet Banking sites 262 * like this), even across reboots. 263 */ 264 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), 265 range->flags & NF_NAT_RANGE_PERSISTENT ? 266 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); 267 268 full_range = false; 269 for (i = 0; i <= max; i++) { 270 /* If first bytes of the address are at the maximum, use the 271 * distance. Otherwise use the full range. 272 */ 273 if (!full_range) { 274 minip = ntohl((__force __be32)range->min_addr.all[i]); 275 maxip = ntohl((__force __be32)range->max_addr.all[i]); 276 dist = maxip - minip + 1; 277 } else { 278 minip = 0; 279 dist = ~0; 280 } 281 282 var_ipp->all[i] = (__force __u32) 283 htonl(minip + reciprocal_scale(j, dist)); 284 if (var_ipp->all[i] != range->max_addr.all[i]) 285 full_range = true; 286 287 if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) 288 j ^= (__force u32)tuple->dst.u3.all[i]; 289 } 290 } 291 292 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 293 * we change the source to map into the range. For NF_INET_PRE_ROUTING 294 * and NF_INET_LOCAL_OUT, we change the destination to map into the 295 * range. It might not be possible to get a unique tuple, but we try. 296 * At worst (or if we race), we will end up with a final duplicate in 297 * __ip_conntrack_confirm and drop the packet. */ 298 static void 299 get_unique_tuple(struct nf_conntrack_tuple *tuple, 300 const struct nf_conntrack_tuple *orig_tuple, 301 const struct nf_nat_range *range, 302 struct nf_conn *ct, 303 enum nf_nat_manip_type maniptype) 304 { 305 const struct nf_conntrack_zone *zone; 306 const struct nf_nat_l3proto *l3proto; 307 const struct nf_nat_l4proto *l4proto; 308 struct net *net = nf_ct_net(ct); 309 310 zone = nf_ct_zone(ct); 311 312 rcu_read_lock(); 313 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); 314 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, 315 orig_tuple->dst.protonum); 316 317 /* 1) If this srcip/proto/src-proto-part is currently mapped, 318 * and that same mapping gives a unique tuple within the given 319 * range, use that. 320 * 321 * This is only required for source (ie. NAT/masq) mappings. 322 * So far, we don't do local source mappings, so multiple 323 * manips not an issue. 324 */ 325 if (maniptype == NF_NAT_MANIP_SRC && 326 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 327 /* try the original tuple first */ 328 if (in_range(l3proto, l4proto, orig_tuple, range)) { 329 if (!nf_nat_used_tuple(orig_tuple, ct)) { 330 *tuple = *orig_tuple; 331 goto out; 332 } 333 } else if (find_appropriate_src(net, zone, l3proto, l4proto, 334 orig_tuple, tuple, range)) { 335 pr_debug("get_unique_tuple: Found current src map\n"); 336 if (!nf_nat_used_tuple(tuple, ct)) 337 goto out; 338 } 339 } 340 341 /* 2) Select the least-used IP/proto combination in the given range */ 342 *tuple = *orig_tuple; 343 find_best_ips_proto(zone, tuple, range, ct, maniptype); 344 345 /* 3) The per-protocol part of the manip is made to map into 346 * the range to make a unique tuple. 347 */ 348 349 /* Only bother mapping if it's not already in range and unique */ 350 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 351 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 352 if (l4proto->in_range(tuple, maniptype, 353 &range->min_proto, 354 &range->max_proto) && 355 (range->min_proto.all == range->max_proto.all || 356 !nf_nat_used_tuple(tuple, ct))) 357 goto out; 358 } else if (!nf_nat_used_tuple(tuple, ct)) { 359 goto out; 360 } 361 } 362 363 /* Last change: get protocol to try to obtain unique tuple. */ 364 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 365 out: 366 rcu_read_unlock(); 367 } 368 369 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) 370 { 371 struct nf_conn_nat *nat = nfct_nat(ct); 372 if (nat) 373 return nat; 374 375 if (!nf_ct_is_confirmed(ct)) 376 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 377 378 return nat; 379 } 380 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); 381 382 unsigned int 383 nf_nat_setup_info(struct nf_conn *ct, 384 const struct nf_nat_range *range, 385 enum nf_nat_manip_type maniptype) 386 { 387 struct net *net = nf_ct_net(ct); 388 struct nf_conntrack_tuple curr_tuple, new_tuple; 389 390 /* Can't setup nat info for confirmed ct. */ 391 if (nf_ct_is_confirmed(ct)) 392 return NF_ACCEPT; 393 394 WARN_ON(maniptype != NF_NAT_MANIP_SRC && 395 maniptype != NF_NAT_MANIP_DST); 396 397 if (WARN_ON(nf_nat_initialized(ct, maniptype))) 398 return NF_DROP; 399 400 /* What we've got will look like inverse of reply. Normally 401 * this is what is in the conntrack, except for prior 402 * manipulations (future optimization: if num_manips == 0, 403 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 404 */ 405 nf_ct_invert_tuplepr(&curr_tuple, 406 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 407 408 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 409 410 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { 411 struct nf_conntrack_tuple reply; 412 413 /* Alter conntrack table so will recognize replies. */ 414 nf_ct_invert_tuplepr(&reply, &new_tuple); 415 nf_conntrack_alter_reply(ct, &reply); 416 417 /* Non-atomic: we own this at the moment. */ 418 if (maniptype == NF_NAT_MANIP_SRC) 419 ct->status |= IPS_SRC_NAT; 420 else 421 ct->status |= IPS_DST_NAT; 422 423 if (nfct_help(ct) && !nfct_seqadj(ct)) 424 if (!nfct_seqadj_ext_add(ct)) 425 return NF_DROP; 426 } 427 428 if (maniptype == NF_NAT_MANIP_SRC) { 429 unsigned int srchash; 430 spinlock_t *lock; 431 432 srchash = hash_by_src(net, 433 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 434 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; 435 spin_lock_bh(lock); 436 hlist_add_head_rcu(&ct->nat_bysource, 437 &nf_nat_bysource[srchash]); 438 spin_unlock_bh(lock); 439 } 440 441 /* It's done. */ 442 if (maniptype == NF_NAT_MANIP_DST) 443 ct->status |= IPS_DST_NAT_DONE; 444 else 445 ct->status |= IPS_SRC_NAT_DONE; 446 447 return NF_ACCEPT; 448 } 449 EXPORT_SYMBOL(nf_nat_setup_info); 450 451 static unsigned int 452 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 453 { 454 /* Force range to this IP; let proto decide mapping for 455 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 456 * Use reply in case it's already been mangled (eg local packet). 457 */ 458 union nf_inet_addr ip = 459 (manip == NF_NAT_MANIP_SRC ? 460 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 461 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 462 struct nf_nat_range range = { 463 .flags = NF_NAT_RANGE_MAP_IPS, 464 .min_addr = ip, 465 .max_addr = ip, 466 }; 467 return nf_nat_setup_info(ct, &range, manip); 468 } 469 470 unsigned int 471 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 472 { 473 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 474 } 475 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 476 477 /* Do packet manipulations according to nf_nat_setup_info. */ 478 unsigned int nf_nat_packet(struct nf_conn *ct, 479 enum ip_conntrack_info ctinfo, 480 unsigned int hooknum, 481 struct sk_buff *skb) 482 { 483 const struct nf_nat_l3proto *l3proto; 484 const struct nf_nat_l4proto *l4proto; 485 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 486 unsigned long statusbit; 487 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 488 489 if (mtype == NF_NAT_MANIP_SRC) 490 statusbit = IPS_SRC_NAT; 491 else 492 statusbit = IPS_DST_NAT; 493 494 /* Invert if this is reply dir. */ 495 if (dir == IP_CT_DIR_REPLY) 496 statusbit ^= IPS_NAT_MASK; 497 498 /* Non-atomic: these bits don't change. */ 499 if (ct->status & statusbit) { 500 struct nf_conntrack_tuple target; 501 502 /* We are aiming to look like inverse of other direction. */ 503 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 504 505 l3proto = __nf_nat_l3proto_find(target.src.l3num); 506 l4proto = __nf_nat_l4proto_find(target.src.l3num, 507 target.dst.protonum); 508 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) 509 return NF_DROP; 510 } 511 return NF_ACCEPT; 512 } 513 EXPORT_SYMBOL_GPL(nf_nat_packet); 514 515 struct nf_nat_proto_clean { 516 u8 l3proto; 517 u8 l4proto; 518 }; 519 520 /* kill conntracks with affected NAT section */ 521 static int nf_nat_proto_remove(struct nf_conn *i, void *data) 522 { 523 const struct nf_nat_proto_clean *clean = data; 524 525 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 526 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 527 return 0; 528 529 return i->status & IPS_NAT_MASK ? 1 : 0; 530 } 531 532 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) 533 { 534 unsigned int h; 535 536 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 537 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 538 hlist_del_rcu(&ct->nat_bysource); 539 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 540 } 541 542 static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 543 { 544 if (nf_nat_proto_remove(ct, data)) 545 return 1; 546 547 /* This module is being removed and conntrack has nat null binding. 548 * Remove it from bysource hash, as the table will be freed soon. 549 * 550 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 551 * will delete entry from already-freed table. 552 */ 553 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status)) 554 __nf_nat_cleanup_conntrack(ct); 555 556 /* don't delete conntrack. Although that would make things a lot 557 * simpler, we'd end up flushing all conntracks on nat rmmod. 558 */ 559 return 0; 560 } 561 562 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 563 { 564 struct nf_nat_proto_clean clean = { 565 .l3proto = l3proto, 566 .l4proto = l4proto, 567 }; 568 569 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean); 570 } 571 572 static void nf_nat_l3proto_clean(u8 l3proto) 573 { 574 struct nf_nat_proto_clean clean = { 575 .l3proto = l3proto, 576 }; 577 578 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean); 579 } 580 581 /* Protocol registration. */ 582 int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) 583 { 584 const struct nf_nat_l4proto **l4protos; 585 unsigned int i; 586 int ret = 0; 587 588 mutex_lock(&nf_nat_proto_mutex); 589 if (nf_nat_l4protos[l3proto] == NULL) { 590 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), 591 GFP_KERNEL); 592 if (l4protos == NULL) { 593 ret = -ENOMEM; 594 goto out; 595 } 596 597 for (i = 0; i < IPPROTO_MAX; i++) 598 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); 599 600 /* Before making proto_array visible to lockless readers, 601 * we must make sure its content is committed to memory. 602 */ 603 smp_wmb(); 604 605 nf_nat_l4protos[l3proto] = l4protos; 606 } 607 608 if (rcu_dereference_protected( 609 nf_nat_l4protos[l3proto][l4proto->l4proto], 610 lockdep_is_held(&nf_nat_proto_mutex) 611 ) != &nf_nat_l4proto_unknown) { 612 ret = -EBUSY; 613 goto out; 614 } 615 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); 616 out: 617 mutex_unlock(&nf_nat_proto_mutex); 618 return ret; 619 } 620 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); 621 622 /* No one stores the protocol anywhere; simply delete it. */ 623 void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) 624 { 625 mutex_lock(&nf_nat_proto_mutex); 626 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], 627 &nf_nat_l4proto_unknown); 628 mutex_unlock(&nf_nat_proto_mutex); 629 synchronize_rcu(); 630 631 nf_nat_l4proto_clean(l3proto, l4proto->l4proto); 632 } 633 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); 634 635 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) 636 { 637 int err; 638 639 err = nf_ct_l3proto_try_module_get(l3proto->l3proto); 640 if (err < 0) 641 return err; 642 643 mutex_lock(&nf_nat_proto_mutex); 644 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], 645 &nf_nat_l4proto_tcp); 646 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], 647 &nf_nat_l4proto_udp); 648 #ifdef CONFIG_NF_NAT_PROTO_DCCP 649 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP], 650 &nf_nat_l4proto_dccp); 651 #endif 652 #ifdef CONFIG_NF_NAT_PROTO_SCTP 653 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP], 654 &nf_nat_l4proto_sctp); 655 #endif 656 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE 657 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE], 658 &nf_nat_l4proto_udplite); 659 #endif 660 mutex_unlock(&nf_nat_proto_mutex); 661 662 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); 663 return 0; 664 } 665 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); 666 667 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) 668 { 669 mutex_lock(&nf_nat_proto_mutex); 670 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); 671 mutex_unlock(&nf_nat_proto_mutex); 672 synchronize_rcu(); 673 674 nf_nat_l3proto_clean(l3proto->l3proto); 675 nf_ct_l3proto_module_put(l3proto->l3proto); 676 } 677 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); 678 679 /* No one using conntrack by the time this called. */ 680 static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 681 { 682 if (ct->status & IPS_SRC_NAT_DONE) 683 __nf_nat_cleanup_conntrack(ct); 684 } 685 686 static struct nf_ct_ext_type nat_extend __read_mostly = { 687 .len = sizeof(struct nf_conn_nat), 688 .align = __alignof__(struct nf_conn_nat), 689 .destroy = nf_nat_cleanup_conntrack, 690 .id = NF_CT_EXT_NAT, 691 }; 692 693 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 694 695 #include <linux/netfilter/nfnetlink.h> 696 #include <linux/netfilter/nfnetlink_conntrack.h> 697 698 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 699 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 700 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 701 }; 702 703 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 704 const struct nf_conn *ct, 705 struct nf_nat_range *range) 706 { 707 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 708 const struct nf_nat_l4proto *l4proto; 709 int err; 710 711 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, 712 protonat_nla_policy, NULL); 713 if (err < 0) 714 return err; 715 716 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 717 if (l4proto->nlattr_to_range) 718 err = l4proto->nlattr_to_range(tb, range); 719 720 return err; 721 } 722 723 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 724 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, 725 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, 726 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, 727 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, 728 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 729 }; 730 731 static int 732 nfnetlink_parse_nat(const struct nlattr *nat, 733 const struct nf_conn *ct, struct nf_nat_range *range, 734 const struct nf_nat_l3proto *l3proto) 735 { 736 struct nlattr *tb[CTA_NAT_MAX+1]; 737 int err; 738 739 memset(range, 0, sizeof(*range)); 740 741 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL); 742 if (err < 0) 743 return err; 744 745 err = l3proto->nlattr_to_range(tb, range); 746 if (err < 0) 747 return err; 748 749 if (!tb[CTA_NAT_PROTO]) 750 return 0; 751 752 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 753 } 754 755 /* This function is called under rcu_read_lock() */ 756 static int 757 nfnetlink_parse_nat_setup(struct nf_conn *ct, 758 enum nf_nat_manip_type manip, 759 const struct nlattr *attr) 760 { 761 struct nf_nat_range range; 762 const struct nf_nat_l3proto *l3proto; 763 int err; 764 765 /* Should not happen, restricted to creating new conntracks 766 * via ctnetlink. 767 */ 768 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 769 return -EEXIST; 770 771 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to 772 * attach the null binding, otherwise this may oops. 773 */ 774 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 775 if (l3proto == NULL) 776 return -EAGAIN; 777 778 /* No NAT information has been passed, allocate the null-binding */ 779 if (attr == NULL) 780 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; 781 782 err = nfnetlink_parse_nat(attr, ct, &range, l3proto); 783 if (err < 0) 784 return err; 785 786 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; 787 } 788 #else 789 static int 790 nfnetlink_parse_nat_setup(struct nf_conn *ct, 791 enum nf_nat_manip_type manip, 792 const struct nlattr *attr) 793 { 794 return -EOPNOTSUPP; 795 } 796 #endif 797 798 static struct nf_ct_helper_expectfn follow_master_nat = { 799 .name = "nat-follow-master", 800 .expectfn = nf_nat_follow_master, 801 }; 802 803 static int __init nf_nat_init(void) 804 { 805 int ret, i; 806 807 /* Leave them the same for the moment. */ 808 nf_nat_htable_size = nf_conntrack_htable_size; 809 if (nf_nat_htable_size < CONNTRACK_LOCKS) 810 nf_nat_htable_size = CONNTRACK_LOCKS; 811 812 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); 813 if (!nf_nat_bysource) 814 return -ENOMEM; 815 816 ret = nf_ct_extend_register(&nat_extend); 817 if (ret < 0) { 818 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); 819 pr_err("Unable to register extension\n"); 820 return ret; 821 } 822 823 for (i = 0; i < CONNTRACK_LOCKS; i++) 824 spin_lock_init(&nf_nat_locks[i]); 825 826 nf_ct_helper_expectfn_register(&follow_master_nat); 827 828 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 829 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, 830 nfnetlink_parse_nat_setup); 831 #ifdef CONFIG_XFRM 832 BUG_ON(nf_nat_decode_session_hook != NULL); 833 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); 834 #endif 835 return 0; 836 } 837 838 static void __exit nf_nat_cleanup(void) 839 { 840 struct nf_nat_proto_clean clean = {}; 841 unsigned int i; 842 843 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean); 844 845 nf_ct_extend_unregister(&nat_extend); 846 nf_ct_helper_expectfn_unregister(&follow_master_nat); 847 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 848 #ifdef CONFIG_XFRM 849 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 850 #endif 851 synchronize_rcu(); 852 853 for (i = 0; i < NFPROTO_NUMPROTO; i++) 854 kfree(nf_nat_l4protos[i]); 855 synchronize_net(); 856 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); 857 } 858 859 MODULE_LICENSE("GPL"); 860 861 module_init(nf_nat_init); 862 module_exit(nf_nat_cleanup); 863