1 /* 2 * (C) 1999-2001 Paul `Rusty' Russell 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2011 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/timer.h> 14 #include <linux/skbuff.h> 15 #include <linux/gfp.h> 16 #include <net/xfrm.h> 17 #include <linux/jhash.h> 18 #include <linux/rtnetlink.h> 19 20 #include <net/netfilter/nf_conntrack.h> 21 #include <net/netfilter/nf_conntrack_core.h> 22 #include <net/netfilter/nf_nat.h> 23 #include <net/netfilter/nf_nat_l3proto.h> 24 #include <net/netfilter/nf_nat_l4proto.h> 25 #include <net/netfilter/nf_nat_core.h> 26 #include <net/netfilter/nf_nat_helper.h> 27 #include <net/netfilter/nf_conntrack_helper.h> 28 #include <net/netfilter/nf_conntrack_seqadj.h> 29 #include <net/netfilter/nf_conntrack_l3proto.h> 30 #include <net/netfilter/nf_conntrack_zones.h> 31 #include <linux/netfilter/nf_nat.h> 32 33 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS]; 34 35 static DEFINE_MUTEX(nf_nat_proto_mutex); 36 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 37 __read_mostly; 38 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 39 __read_mostly; 40 41 static struct hlist_head *nf_nat_bysource __read_mostly; 42 static unsigned int nf_nat_htable_size __read_mostly; 43 static unsigned int nf_nat_hash_rnd __read_mostly; 44 45 inline const struct nf_nat_l3proto * 46 __nf_nat_l3proto_find(u8 family) 47 { 48 return rcu_dereference(nf_nat_l3protos[family]); 49 } 50 51 inline const struct nf_nat_l4proto * 52 __nf_nat_l4proto_find(u8 family, u8 protonum) 53 { 54 return rcu_dereference(nf_nat_l4protos[family][protonum]); 55 } 56 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); 57 58 #ifdef CONFIG_XFRM 59 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) 60 { 61 const struct nf_nat_l3proto *l3proto; 62 const struct nf_conn *ct; 63 enum ip_conntrack_info ctinfo; 64 enum ip_conntrack_dir dir; 65 unsigned long statusbit; 66 u8 family; 67 68 ct = nf_ct_get(skb, &ctinfo); 69 if (ct == NULL) 70 return; 71 72 family = nf_ct_l3num(ct); 73 l3proto = __nf_nat_l3proto_find(family); 74 if (l3proto == NULL) 75 return; 76 77 dir = CTINFO2DIR(ctinfo); 78 if (dir == IP_CT_DIR_ORIGINAL) 79 statusbit = IPS_DST_NAT; 80 else 81 statusbit = IPS_SRC_NAT; 82 83 l3proto->decode_session(skb, ct, dir, statusbit, fl); 84 } 85 86 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) 87 { 88 struct flowi fl; 89 unsigned int hh_len; 90 struct dst_entry *dst; 91 int err; 92 93 err = xfrm_decode_session(skb, &fl, family); 94 if (err < 0) 95 return err; 96 97 dst = skb_dst(skb); 98 if (dst->xfrm) 99 dst = ((struct xfrm_dst *)dst)->route; 100 dst_hold(dst); 101 102 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); 103 if (IS_ERR(dst)) 104 return PTR_ERR(dst); 105 106 skb_dst_drop(skb); 107 skb_dst_set(skb, dst); 108 109 /* Change in oif may mean change in hh_len. */ 110 hh_len = skb_dst(skb)->dev->hard_header_len; 111 if (skb_headroom(skb) < hh_len && 112 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 113 return -ENOMEM; 114 return 0; 115 } 116 EXPORT_SYMBOL(nf_xfrm_me_harder); 117 #endif /* CONFIG_XFRM */ 118 119 /* We keep an extra hash for each conntrack, for fast searching. */ 120 static unsigned int 121 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) 122 { 123 unsigned int hash; 124 125 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); 126 127 /* Original src, to ensure we map it consistently if poss. */ 128 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), 129 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); 130 131 return reciprocal_scale(hash, nf_nat_htable_size); 132 } 133 134 /* Is this tuple already taken? (not by us) */ 135 int 136 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 137 const struct nf_conn *ignored_conntrack) 138 { 139 /* Conntrack tracking doesn't keep track of outgoing tuples; only 140 * incoming ones. NAT means they don't have a fixed mapping, 141 * so we invert the tuple and look for the incoming reply. 142 * 143 * We could keep a separate hash if this proves too slow. 144 */ 145 struct nf_conntrack_tuple reply; 146 147 nf_ct_invert_tuplepr(&reply, tuple); 148 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 149 } 150 EXPORT_SYMBOL(nf_nat_used_tuple); 151 152 /* If we source map this tuple so reply looks like reply_tuple, will 153 * that meet the constraints of range. 154 */ 155 static int in_range(const struct nf_nat_l3proto *l3proto, 156 const struct nf_nat_l4proto *l4proto, 157 const struct nf_conntrack_tuple *tuple, 158 const struct nf_nat_range *range) 159 { 160 /* If we are supposed to map IPs, then we must be in the 161 * range specified, otherwise let this drag us onto a new src IP. 162 */ 163 if (range->flags & NF_NAT_RANGE_MAP_IPS && 164 !l3proto->in_range(tuple, range)) 165 return 0; 166 167 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 168 l4proto->in_range(tuple, NF_NAT_MANIP_SRC, 169 &range->min_proto, &range->max_proto)) 170 return 1; 171 172 return 0; 173 } 174 175 static inline int 176 same_src(const struct nf_conn *ct, 177 const struct nf_conntrack_tuple *tuple) 178 { 179 const struct nf_conntrack_tuple *t; 180 181 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 182 return (t->dst.protonum == tuple->dst.protonum && 183 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && 184 t->src.u.all == tuple->src.u.all); 185 } 186 187 /* Only called for SRC manip */ 188 static int 189 find_appropriate_src(struct net *net, 190 const struct nf_conntrack_zone *zone, 191 const struct nf_nat_l3proto *l3proto, 192 const struct nf_nat_l4proto *l4proto, 193 const struct nf_conntrack_tuple *tuple, 194 struct nf_conntrack_tuple *result, 195 const struct nf_nat_range *range) 196 { 197 unsigned int h = hash_by_src(net, tuple); 198 const struct nf_conn *ct; 199 200 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { 201 if (same_src(ct, tuple) && 202 net_eq(net, nf_ct_net(ct)) && 203 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) { 204 /* Copy source part from reply tuple. */ 205 nf_ct_invert_tuplepr(result, 206 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 207 result->dst = tuple->dst; 208 209 if (in_range(l3proto, l4proto, result, range)) 210 return 1; 211 } 212 } 213 return 0; 214 } 215 216 /* For [FUTURE] fragmentation handling, we want the least-used 217 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 218 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 219 * 1-65535, we don't do pro-rata allocation based on ports; we choose 220 * the ip with the lowest src-ip/dst-ip/proto usage. 221 */ 222 static void 223 find_best_ips_proto(const struct nf_conntrack_zone *zone, 224 struct nf_conntrack_tuple *tuple, 225 const struct nf_nat_range *range, 226 const struct nf_conn *ct, 227 enum nf_nat_manip_type maniptype) 228 { 229 union nf_inet_addr *var_ipp; 230 unsigned int i, max; 231 /* Host order */ 232 u32 minip, maxip, j, dist; 233 bool full_range; 234 235 /* No IP mapping? Do nothing. */ 236 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 237 return; 238 239 if (maniptype == NF_NAT_MANIP_SRC) 240 var_ipp = &tuple->src.u3; 241 else 242 var_ipp = &tuple->dst.u3; 243 244 /* Fast path: only one choice. */ 245 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { 246 *var_ipp = range->min_addr; 247 return; 248 } 249 250 if (nf_ct_l3num(ct) == NFPROTO_IPV4) 251 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; 252 else 253 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; 254 255 /* Hashing source and destination IPs gives a fairly even 256 * spread in practice (if there are a small number of IPs 257 * involved, there usually aren't that many connections 258 * anyway). The consistency means that servers see the same 259 * client coming from the same IP (some Internet Banking sites 260 * like this), even across reboots. 261 */ 262 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), 263 range->flags & NF_NAT_RANGE_PERSISTENT ? 264 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); 265 266 full_range = false; 267 for (i = 0; i <= max; i++) { 268 /* If first bytes of the address are at the maximum, use the 269 * distance. Otherwise use the full range. 270 */ 271 if (!full_range) { 272 minip = ntohl((__force __be32)range->min_addr.all[i]); 273 maxip = ntohl((__force __be32)range->max_addr.all[i]); 274 dist = maxip - minip + 1; 275 } else { 276 minip = 0; 277 dist = ~0; 278 } 279 280 var_ipp->all[i] = (__force __u32) 281 htonl(minip + reciprocal_scale(j, dist)); 282 if (var_ipp->all[i] != range->max_addr.all[i]) 283 full_range = true; 284 285 if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) 286 j ^= (__force u32)tuple->dst.u3.all[i]; 287 } 288 } 289 290 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 291 * we change the source to map into the range. For NF_INET_PRE_ROUTING 292 * and NF_INET_LOCAL_OUT, we change the destination to map into the 293 * range. It might not be possible to get a unique tuple, but we try. 294 * At worst (or if we race), we will end up with a final duplicate in 295 * __ip_conntrack_confirm and drop the packet. */ 296 static void 297 get_unique_tuple(struct nf_conntrack_tuple *tuple, 298 const struct nf_conntrack_tuple *orig_tuple, 299 const struct nf_nat_range *range, 300 struct nf_conn *ct, 301 enum nf_nat_manip_type maniptype) 302 { 303 const struct nf_conntrack_zone *zone; 304 const struct nf_nat_l3proto *l3proto; 305 const struct nf_nat_l4proto *l4proto; 306 struct net *net = nf_ct_net(ct); 307 308 zone = nf_ct_zone(ct); 309 310 rcu_read_lock(); 311 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); 312 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, 313 orig_tuple->dst.protonum); 314 315 /* 1) If this srcip/proto/src-proto-part is currently mapped, 316 * and that same mapping gives a unique tuple within the given 317 * range, use that. 318 * 319 * This is only required for source (ie. NAT/masq) mappings. 320 * So far, we don't do local source mappings, so multiple 321 * manips not an issue. 322 */ 323 if (maniptype == NF_NAT_MANIP_SRC && 324 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 325 /* try the original tuple first */ 326 if (in_range(l3proto, l4proto, orig_tuple, range)) { 327 if (!nf_nat_used_tuple(orig_tuple, ct)) { 328 *tuple = *orig_tuple; 329 goto out; 330 } 331 } else if (find_appropriate_src(net, zone, l3proto, l4proto, 332 orig_tuple, tuple, range)) { 333 pr_debug("get_unique_tuple: Found current src map\n"); 334 if (!nf_nat_used_tuple(tuple, ct)) 335 goto out; 336 } 337 } 338 339 /* 2) Select the least-used IP/proto combination in the given range */ 340 *tuple = *orig_tuple; 341 find_best_ips_proto(zone, tuple, range, ct, maniptype); 342 343 /* 3) The per-protocol part of the manip is made to map into 344 * the range to make a unique tuple. 345 */ 346 347 /* Only bother mapping if it's not already in range and unique */ 348 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 349 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 350 if (l4proto->in_range(tuple, maniptype, 351 &range->min_proto, 352 &range->max_proto) && 353 (range->min_proto.all == range->max_proto.all || 354 !nf_nat_used_tuple(tuple, ct))) 355 goto out; 356 } else if (!nf_nat_used_tuple(tuple, ct)) { 357 goto out; 358 } 359 } 360 361 /* Last change: get protocol to try to obtain unique tuple. */ 362 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 363 out: 364 rcu_read_unlock(); 365 } 366 367 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) 368 { 369 struct nf_conn_nat *nat = nfct_nat(ct); 370 if (nat) 371 return nat; 372 373 if (!nf_ct_is_confirmed(ct)) 374 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 375 376 return nat; 377 } 378 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); 379 380 unsigned int 381 nf_nat_setup_info(struct nf_conn *ct, 382 const struct nf_nat_range *range, 383 enum nf_nat_manip_type maniptype) 384 { 385 struct net *net = nf_ct_net(ct); 386 struct nf_conntrack_tuple curr_tuple, new_tuple; 387 388 /* Can't setup nat info for confirmed ct. */ 389 if (nf_ct_is_confirmed(ct)) 390 return NF_ACCEPT; 391 392 WARN_ON(maniptype != NF_NAT_MANIP_SRC && 393 maniptype != NF_NAT_MANIP_DST); 394 395 if (WARN_ON(nf_nat_initialized(ct, maniptype))) 396 return NF_DROP; 397 398 /* What we've got will look like inverse of reply. Normally 399 * this is what is in the conntrack, except for prior 400 * manipulations (future optimization: if num_manips == 0, 401 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 402 */ 403 nf_ct_invert_tuplepr(&curr_tuple, 404 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 405 406 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 407 408 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { 409 struct nf_conntrack_tuple reply; 410 411 /* Alter conntrack table so will recognize replies. */ 412 nf_ct_invert_tuplepr(&reply, &new_tuple); 413 nf_conntrack_alter_reply(ct, &reply); 414 415 /* Non-atomic: we own this at the moment. */ 416 if (maniptype == NF_NAT_MANIP_SRC) 417 ct->status |= IPS_SRC_NAT; 418 else 419 ct->status |= IPS_DST_NAT; 420 421 if (nfct_help(ct) && !nfct_seqadj(ct)) 422 if (!nfct_seqadj_ext_add(ct)) 423 return NF_DROP; 424 } 425 426 if (maniptype == NF_NAT_MANIP_SRC) { 427 unsigned int srchash; 428 spinlock_t *lock; 429 430 srchash = hash_by_src(net, 431 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 432 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; 433 spin_lock_bh(lock); 434 hlist_add_head_rcu(&ct->nat_bysource, 435 &nf_nat_bysource[srchash]); 436 spin_unlock_bh(lock); 437 } 438 439 /* It's done. */ 440 if (maniptype == NF_NAT_MANIP_DST) 441 ct->status |= IPS_DST_NAT_DONE; 442 else 443 ct->status |= IPS_SRC_NAT_DONE; 444 445 return NF_ACCEPT; 446 } 447 EXPORT_SYMBOL(nf_nat_setup_info); 448 449 static unsigned int 450 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 451 { 452 /* Force range to this IP; let proto decide mapping for 453 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 454 * Use reply in case it's already been mangled (eg local packet). 455 */ 456 union nf_inet_addr ip = 457 (manip == NF_NAT_MANIP_SRC ? 458 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 459 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 460 struct nf_nat_range range = { 461 .flags = NF_NAT_RANGE_MAP_IPS, 462 .min_addr = ip, 463 .max_addr = ip, 464 }; 465 return nf_nat_setup_info(ct, &range, manip); 466 } 467 468 unsigned int 469 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 470 { 471 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 472 } 473 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 474 475 /* Do packet manipulations according to nf_nat_setup_info. */ 476 unsigned int nf_nat_packet(struct nf_conn *ct, 477 enum ip_conntrack_info ctinfo, 478 unsigned int hooknum, 479 struct sk_buff *skb) 480 { 481 const struct nf_nat_l3proto *l3proto; 482 const struct nf_nat_l4proto *l4proto; 483 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 484 unsigned long statusbit; 485 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 486 487 if (mtype == NF_NAT_MANIP_SRC) 488 statusbit = IPS_SRC_NAT; 489 else 490 statusbit = IPS_DST_NAT; 491 492 /* Invert if this is reply dir. */ 493 if (dir == IP_CT_DIR_REPLY) 494 statusbit ^= IPS_NAT_MASK; 495 496 /* Non-atomic: these bits don't change. */ 497 if (ct->status & statusbit) { 498 struct nf_conntrack_tuple target; 499 500 /* We are aiming to look like inverse of other direction. */ 501 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 502 503 l3proto = __nf_nat_l3proto_find(target.src.l3num); 504 l4proto = __nf_nat_l4proto_find(target.src.l3num, 505 target.dst.protonum); 506 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) 507 return NF_DROP; 508 } 509 return NF_ACCEPT; 510 } 511 EXPORT_SYMBOL_GPL(nf_nat_packet); 512 513 struct nf_nat_proto_clean { 514 u8 l3proto; 515 u8 l4proto; 516 }; 517 518 /* kill conntracks with affected NAT section */ 519 static int nf_nat_proto_remove(struct nf_conn *i, void *data) 520 { 521 const struct nf_nat_proto_clean *clean = data; 522 523 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 524 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 525 return 0; 526 527 return i->status & IPS_NAT_MASK ? 1 : 0; 528 } 529 530 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) 531 { 532 unsigned int h; 533 534 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 535 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 536 hlist_del_rcu(&ct->nat_bysource); 537 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 538 } 539 540 static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 541 { 542 if (nf_nat_proto_remove(ct, data)) 543 return 1; 544 545 if ((ct->status & IPS_SRC_NAT_DONE) == 0) 546 return 0; 547 548 /* This netns is being destroyed, and conntrack has nat null binding. 549 * Remove it from bysource hash, as the table will be freed soon. 550 * 551 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 552 * will delete entry from already-freed table. 553 */ 554 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); 555 __nf_nat_cleanup_conntrack(ct); 556 557 /* don't delete conntrack. Although that would make things a lot 558 * simpler, we'd end up flushing all conntracks on nat rmmod. 559 */ 560 return 0; 561 } 562 563 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 564 { 565 struct nf_nat_proto_clean clean = { 566 .l3proto = l3proto, 567 .l4proto = l4proto, 568 }; 569 570 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean); 571 } 572 573 static void nf_nat_l3proto_clean(u8 l3proto) 574 { 575 struct nf_nat_proto_clean clean = { 576 .l3proto = l3proto, 577 }; 578 579 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean); 580 } 581 582 /* Protocol registration. */ 583 int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) 584 { 585 const struct nf_nat_l4proto **l4protos; 586 unsigned int i; 587 int ret = 0; 588 589 mutex_lock(&nf_nat_proto_mutex); 590 if (nf_nat_l4protos[l3proto] == NULL) { 591 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), 592 GFP_KERNEL); 593 if (l4protos == NULL) { 594 ret = -ENOMEM; 595 goto out; 596 } 597 598 for (i = 0; i < IPPROTO_MAX; i++) 599 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); 600 601 /* Before making proto_array visible to lockless readers, 602 * we must make sure its content is committed to memory. 603 */ 604 smp_wmb(); 605 606 nf_nat_l4protos[l3proto] = l4protos; 607 } 608 609 if (rcu_dereference_protected( 610 nf_nat_l4protos[l3proto][l4proto->l4proto], 611 lockdep_is_held(&nf_nat_proto_mutex) 612 ) != &nf_nat_l4proto_unknown) { 613 ret = -EBUSY; 614 goto out; 615 } 616 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); 617 out: 618 mutex_unlock(&nf_nat_proto_mutex); 619 return ret; 620 } 621 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); 622 623 /* No one stores the protocol anywhere; simply delete it. */ 624 void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) 625 { 626 mutex_lock(&nf_nat_proto_mutex); 627 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], 628 &nf_nat_l4proto_unknown); 629 mutex_unlock(&nf_nat_proto_mutex); 630 synchronize_rcu(); 631 632 nf_nat_l4proto_clean(l3proto, l4proto->l4proto); 633 } 634 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); 635 636 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) 637 { 638 int err; 639 640 err = nf_ct_l3proto_try_module_get(l3proto->l3proto); 641 if (err < 0) 642 return err; 643 644 mutex_lock(&nf_nat_proto_mutex); 645 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], 646 &nf_nat_l4proto_tcp); 647 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], 648 &nf_nat_l4proto_udp); 649 #ifdef CONFIG_NF_NAT_PROTO_DCCP 650 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP], 651 &nf_nat_l4proto_dccp); 652 #endif 653 #ifdef CONFIG_NF_NAT_PROTO_SCTP 654 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP], 655 &nf_nat_l4proto_sctp); 656 #endif 657 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE 658 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE], 659 &nf_nat_l4proto_udplite); 660 #endif 661 mutex_unlock(&nf_nat_proto_mutex); 662 663 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); 664 return 0; 665 } 666 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); 667 668 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) 669 { 670 mutex_lock(&nf_nat_proto_mutex); 671 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); 672 mutex_unlock(&nf_nat_proto_mutex); 673 synchronize_rcu(); 674 675 nf_nat_l3proto_clean(l3proto->l3proto); 676 nf_ct_l3proto_module_put(l3proto->l3proto); 677 } 678 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); 679 680 /* No one using conntrack by the time this called. */ 681 static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 682 { 683 if (ct->status & IPS_SRC_NAT_DONE) 684 __nf_nat_cleanup_conntrack(ct); 685 } 686 687 static struct nf_ct_ext_type nat_extend __read_mostly = { 688 .len = sizeof(struct nf_conn_nat), 689 .align = __alignof__(struct nf_conn_nat), 690 .destroy = nf_nat_cleanup_conntrack, 691 .id = NF_CT_EXT_NAT, 692 }; 693 694 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 695 696 #include <linux/netfilter/nfnetlink.h> 697 #include <linux/netfilter/nfnetlink_conntrack.h> 698 699 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 700 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 701 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 702 }; 703 704 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 705 const struct nf_conn *ct, 706 struct nf_nat_range *range) 707 { 708 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 709 const struct nf_nat_l4proto *l4proto; 710 int err; 711 712 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, 713 protonat_nla_policy, NULL); 714 if (err < 0) 715 return err; 716 717 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 718 if (l4proto->nlattr_to_range) 719 err = l4proto->nlattr_to_range(tb, range); 720 721 return err; 722 } 723 724 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 725 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, 726 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, 727 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, 728 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, 729 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 730 }; 731 732 static int 733 nfnetlink_parse_nat(const struct nlattr *nat, 734 const struct nf_conn *ct, struct nf_nat_range *range, 735 const struct nf_nat_l3proto *l3proto) 736 { 737 struct nlattr *tb[CTA_NAT_MAX+1]; 738 int err; 739 740 memset(range, 0, sizeof(*range)); 741 742 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL); 743 if (err < 0) 744 return err; 745 746 err = l3proto->nlattr_to_range(tb, range); 747 if (err < 0) 748 return err; 749 750 if (!tb[CTA_NAT_PROTO]) 751 return 0; 752 753 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 754 } 755 756 /* This function is called under rcu_read_lock() */ 757 static int 758 nfnetlink_parse_nat_setup(struct nf_conn *ct, 759 enum nf_nat_manip_type manip, 760 const struct nlattr *attr) 761 { 762 struct nf_nat_range range; 763 const struct nf_nat_l3proto *l3proto; 764 int err; 765 766 /* Should not happen, restricted to creating new conntracks 767 * via ctnetlink. 768 */ 769 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 770 return -EEXIST; 771 772 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to 773 * attach the null binding, otherwise this may oops. 774 */ 775 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 776 if (l3proto == NULL) 777 return -EAGAIN; 778 779 /* No NAT information has been passed, allocate the null-binding */ 780 if (attr == NULL) 781 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; 782 783 err = nfnetlink_parse_nat(attr, ct, &range, l3proto); 784 if (err < 0) 785 return err; 786 787 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; 788 } 789 #else 790 static int 791 nfnetlink_parse_nat_setup(struct nf_conn *ct, 792 enum nf_nat_manip_type manip, 793 const struct nlattr *attr) 794 { 795 return -EOPNOTSUPP; 796 } 797 #endif 798 799 static struct nf_ct_helper_expectfn follow_master_nat = { 800 .name = "nat-follow-master", 801 .expectfn = nf_nat_follow_master, 802 }; 803 804 static int __init nf_nat_init(void) 805 { 806 int ret, i; 807 808 /* Leave them the same for the moment. */ 809 nf_nat_htable_size = nf_conntrack_htable_size; 810 if (nf_nat_htable_size < CONNTRACK_LOCKS) 811 nf_nat_htable_size = CONNTRACK_LOCKS; 812 813 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); 814 if (!nf_nat_bysource) 815 return -ENOMEM; 816 817 ret = nf_ct_extend_register(&nat_extend); 818 if (ret < 0) { 819 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); 820 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 821 return ret; 822 } 823 824 for (i = 0; i < CONNTRACK_LOCKS; i++) 825 spin_lock_init(&nf_nat_locks[i]); 826 827 nf_ct_helper_expectfn_register(&follow_master_nat); 828 829 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 830 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, 831 nfnetlink_parse_nat_setup); 832 #ifdef CONFIG_XFRM 833 BUG_ON(nf_nat_decode_session_hook != NULL); 834 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); 835 #endif 836 return 0; 837 } 838 839 static void __exit nf_nat_cleanup(void) 840 { 841 struct nf_nat_proto_clean clean = {}; 842 unsigned int i; 843 844 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean); 845 846 nf_ct_extend_unregister(&nat_extend); 847 nf_ct_helper_expectfn_unregister(&follow_master_nat); 848 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 849 #ifdef CONFIG_XFRM 850 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 851 #endif 852 synchronize_rcu(); 853 854 for (i = 0; i < NFPROTO_NUMPROTO; i++) 855 kfree(nf_nat_l4protos[i]); 856 synchronize_net(); 857 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size); 858 } 859 860 MODULE_LICENSE("GPL"); 861 862 module_init(nf_nat_init); 863 module_exit(nf_nat_cleanup); 864