1 /* 2 * (C) 1999-2001 Paul `Rusty' Russell 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 4 * (C) 2011 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/timer.h> 14 #include <linux/skbuff.h> 15 #include <linux/gfp.h> 16 #include <net/xfrm.h> 17 #include <linux/jhash.h> 18 #include <linux/rtnetlink.h> 19 20 #include <net/netfilter/nf_conntrack.h> 21 #include <net/netfilter/nf_conntrack_core.h> 22 #include <net/netfilter/nf_nat.h> 23 #include <net/netfilter/nf_nat_l3proto.h> 24 #include <net/netfilter/nf_nat_l4proto.h> 25 #include <net/netfilter/nf_nat_core.h> 26 #include <net/netfilter/nf_nat_helper.h> 27 #include <net/netfilter/nf_conntrack_helper.h> 28 #include <net/netfilter/nf_conntrack_seqadj.h> 29 #include <net/netfilter/nf_conntrack_l3proto.h> 30 #include <net/netfilter/nf_conntrack_zones.h> 31 #include <linux/netfilter/nf_nat.h> 32 33 static DEFINE_MUTEX(nf_nat_proto_mutex); 34 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 35 __read_mostly; 36 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 37 __read_mostly; 38 39 struct nf_nat_conn_key { 40 const struct net *net; 41 const struct nf_conntrack_tuple *tuple; 42 const struct nf_conntrack_zone *zone; 43 }; 44 45 static struct rhltable nf_nat_bysource_table; 46 47 inline const struct nf_nat_l3proto * 48 __nf_nat_l3proto_find(u8 family) 49 { 50 return rcu_dereference(nf_nat_l3protos[family]); 51 } 52 53 inline const struct nf_nat_l4proto * 54 __nf_nat_l4proto_find(u8 family, u8 protonum) 55 { 56 return rcu_dereference(nf_nat_l4protos[family][protonum]); 57 } 58 EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); 59 60 #ifdef CONFIG_XFRM 61 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) 62 { 63 const struct nf_nat_l3proto *l3proto; 64 const struct nf_conn *ct; 65 enum ip_conntrack_info ctinfo; 66 enum ip_conntrack_dir dir; 67 unsigned long statusbit; 68 u8 family; 69 70 ct = nf_ct_get(skb, &ctinfo); 71 if (ct == NULL) 72 return; 73 74 family = nf_ct_l3num(ct); 75 l3proto = __nf_nat_l3proto_find(family); 76 if (l3proto == NULL) 77 return; 78 79 dir = CTINFO2DIR(ctinfo); 80 if (dir == IP_CT_DIR_ORIGINAL) 81 statusbit = IPS_DST_NAT; 82 else 83 statusbit = IPS_SRC_NAT; 84 85 l3proto->decode_session(skb, ct, dir, statusbit, fl); 86 } 87 88 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) 89 { 90 struct flowi fl; 91 unsigned int hh_len; 92 struct dst_entry *dst; 93 int err; 94 95 err = xfrm_decode_session(skb, &fl, family); 96 if (err < 0) 97 return err; 98 99 dst = skb_dst(skb); 100 if (dst->xfrm) 101 dst = ((struct xfrm_dst *)dst)->route; 102 dst_hold(dst); 103 104 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); 105 if (IS_ERR(dst)) 106 return PTR_ERR(dst); 107 108 skb_dst_drop(skb); 109 skb_dst_set(skb, dst); 110 111 /* Change in oif may mean change in hh_len. */ 112 hh_len = skb_dst(skb)->dev->hard_header_len; 113 if (skb_headroom(skb) < hh_len && 114 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) 115 return -ENOMEM; 116 return 0; 117 } 118 EXPORT_SYMBOL(nf_xfrm_me_harder); 119 #endif /* CONFIG_XFRM */ 120 121 static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed) 122 { 123 const struct nf_conntrack_tuple *t; 124 const struct nf_conn *ct = data; 125 126 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 127 /* Original src, to ensure we map it consistently if poss. */ 128 129 seed ^= net_hash_mix(nf_ct_net(ct)); 130 return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32), 131 t->dst.protonum ^ seed); 132 } 133 134 /* Is this tuple already taken? (not by us) */ 135 int 136 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, 137 const struct nf_conn *ignored_conntrack) 138 { 139 /* Conntrack tracking doesn't keep track of outgoing tuples; only 140 * incoming ones. NAT means they don't have a fixed mapping, 141 * so we invert the tuple and look for the incoming reply. 142 * 143 * We could keep a separate hash if this proves too slow. 144 */ 145 struct nf_conntrack_tuple reply; 146 147 nf_ct_invert_tuplepr(&reply, tuple); 148 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); 149 } 150 EXPORT_SYMBOL(nf_nat_used_tuple); 151 152 /* If we source map this tuple so reply looks like reply_tuple, will 153 * that meet the constraints of range. 154 */ 155 static int in_range(const struct nf_nat_l3proto *l3proto, 156 const struct nf_nat_l4proto *l4proto, 157 const struct nf_conntrack_tuple *tuple, 158 const struct nf_nat_range *range) 159 { 160 /* If we are supposed to map IPs, then we must be in the 161 * range specified, otherwise let this drag us onto a new src IP. 162 */ 163 if (range->flags & NF_NAT_RANGE_MAP_IPS && 164 !l3proto->in_range(tuple, range)) 165 return 0; 166 167 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || 168 l4proto->in_range(tuple, NF_NAT_MANIP_SRC, 169 &range->min_proto, &range->max_proto)) 170 return 1; 171 172 return 0; 173 } 174 175 static inline int 176 same_src(const struct nf_conn *ct, 177 const struct nf_conntrack_tuple *tuple) 178 { 179 const struct nf_conntrack_tuple *t; 180 181 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 182 return (t->dst.protonum == tuple->dst.protonum && 183 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && 184 t->src.u.all == tuple->src.u.all); 185 } 186 187 static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg, 188 const void *obj) 189 { 190 const struct nf_nat_conn_key *key = arg->key; 191 const struct nf_conn *ct = obj; 192 193 if (!same_src(ct, key->tuple) || 194 !net_eq(nf_ct_net(ct), key->net) || 195 !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL)) 196 return 1; 197 198 return 0; 199 } 200 201 static struct rhashtable_params nf_nat_bysource_params = { 202 .head_offset = offsetof(struct nf_conn, nat_bysource), 203 .obj_hashfn = nf_nat_bysource_hash, 204 .obj_cmpfn = nf_nat_bysource_cmp, 205 .nelem_hint = 256, 206 .min_size = 1024, 207 }; 208 209 /* Only called for SRC manip */ 210 static int 211 find_appropriate_src(struct net *net, 212 const struct nf_conntrack_zone *zone, 213 const struct nf_nat_l3proto *l3proto, 214 const struct nf_nat_l4proto *l4proto, 215 const struct nf_conntrack_tuple *tuple, 216 struct nf_conntrack_tuple *result, 217 const struct nf_nat_range *range) 218 { 219 const struct nf_conn *ct; 220 struct nf_nat_conn_key key = { 221 .net = net, 222 .tuple = tuple, 223 .zone = zone 224 }; 225 struct rhlist_head *hl; 226 227 hl = rhltable_lookup(&nf_nat_bysource_table, &key, 228 nf_nat_bysource_params); 229 if (!hl) 230 return 0; 231 232 ct = container_of(hl, typeof(*ct), nat_bysource); 233 234 nf_ct_invert_tuplepr(result, 235 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 236 result->dst = tuple->dst; 237 238 return in_range(l3proto, l4proto, result, range); 239 } 240 241 /* For [FUTURE] fragmentation handling, we want the least-used 242 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus 243 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports 244 * 1-65535, we don't do pro-rata allocation based on ports; we choose 245 * the ip with the lowest src-ip/dst-ip/proto usage. 246 */ 247 static void 248 find_best_ips_proto(const struct nf_conntrack_zone *zone, 249 struct nf_conntrack_tuple *tuple, 250 const struct nf_nat_range *range, 251 const struct nf_conn *ct, 252 enum nf_nat_manip_type maniptype) 253 { 254 union nf_inet_addr *var_ipp; 255 unsigned int i, max; 256 /* Host order */ 257 u32 minip, maxip, j, dist; 258 bool full_range; 259 260 /* No IP mapping? Do nothing. */ 261 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 262 return; 263 264 if (maniptype == NF_NAT_MANIP_SRC) 265 var_ipp = &tuple->src.u3; 266 else 267 var_ipp = &tuple->dst.u3; 268 269 /* Fast path: only one choice. */ 270 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { 271 *var_ipp = range->min_addr; 272 return; 273 } 274 275 if (nf_ct_l3num(ct) == NFPROTO_IPV4) 276 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; 277 else 278 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; 279 280 /* Hashing source and destination IPs gives a fairly even 281 * spread in practice (if there are a small number of IPs 282 * involved, there usually aren't that many connections 283 * anyway). The consistency means that servers see the same 284 * client coming from the same IP (some Internet Banking sites 285 * like this), even across reboots. 286 */ 287 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), 288 range->flags & NF_NAT_RANGE_PERSISTENT ? 289 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); 290 291 full_range = false; 292 for (i = 0; i <= max; i++) { 293 /* If first bytes of the address are at the maximum, use the 294 * distance. Otherwise use the full range. 295 */ 296 if (!full_range) { 297 minip = ntohl((__force __be32)range->min_addr.all[i]); 298 maxip = ntohl((__force __be32)range->max_addr.all[i]); 299 dist = maxip - minip + 1; 300 } else { 301 minip = 0; 302 dist = ~0; 303 } 304 305 var_ipp->all[i] = (__force __u32) 306 htonl(minip + reciprocal_scale(j, dist)); 307 if (var_ipp->all[i] != range->max_addr.all[i]) 308 full_range = true; 309 310 if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) 311 j ^= (__force u32)tuple->dst.u3.all[i]; 312 } 313 } 314 315 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, 316 * we change the source to map into the range. For NF_INET_PRE_ROUTING 317 * and NF_INET_LOCAL_OUT, we change the destination to map into the 318 * range. It might not be possible to get a unique tuple, but we try. 319 * At worst (or if we race), we will end up with a final duplicate in 320 * __ip_conntrack_confirm and drop the packet. */ 321 static void 322 get_unique_tuple(struct nf_conntrack_tuple *tuple, 323 const struct nf_conntrack_tuple *orig_tuple, 324 const struct nf_nat_range *range, 325 struct nf_conn *ct, 326 enum nf_nat_manip_type maniptype) 327 { 328 const struct nf_conntrack_zone *zone; 329 const struct nf_nat_l3proto *l3proto; 330 const struct nf_nat_l4proto *l4proto; 331 struct net *net = nf_ct_net(ct); 332 333 zone = nf_ct_zone(ct); 334 335 rcu_read_lock(); 336 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); 337 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, 338 orig_tuple->dst.protonum); 339 340 /* 1) If this srcip/proto/src-proto-part is currently mapped, 341 * and that same mapping gives a unique tuple within the given 342 * range, use that. 343 * 344 * This is only required for source (ie. NAT/masq) mappings. 345 * So far, we don't do local source mappings, so multiple 346 * manips not an issue. 347 */ 348 if (maniptype == NF_NAT_MANIP_SRC && 349 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 350 /* try the original tuple first */ 351 if (in_range(l3proto, l4proto, orig_tuple, range)) { 352 if (!nf_nat_used_tuple(orig_tuple, ct)) { 353 *tuple = *orig_tuple; 354 goto out; 355 } 356 } else if (find_appropriate_src(net, zone, l3proto, l4proto, 357 orig_tuple, tuple, range)) { 358 pr_debug("get_unique_tuple: Found current src map\n"); 359 if (!nf_nat_used_tuple(tuple, ct)) 360 goto out; 361 } 362 } 363 364 /* 2) Select the least-used IP/proto combination in the given range */ 365 *tuple = *orig_tuple; 366 find_best_ips_proto(zone, tuple, range, ct, maniptype); 367 368 /* 3) The per-protocol part of the manip is made to map into 369 * the range to make a unique tuple. 370 */ 371 372 /* Only bother mapping if it's not already in range and unique */ 373 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 374 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 375 if (l4proto->in_range(tuple, maniptype, 376 &range->min_proto, 377 &range->max_proto) && 378 (range->min_proto.all == range->max_proto.all || 379 !nf_nat_used_tuple(tuple, ct))) 380 goto out; 381 } else if (!nf_nat_used_tuple(tuple, ct)) { 382 goto out; 383 } 384 } 385 386 /* Last change: get protocol to try to obtain unique tuple. */ 387 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 388 out: 389 rcu_read_unlock(); 390 } 391 392 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) 393 { 394 struct nf_conn_nat *nat = nfct_nat(ct); 395 if (nat) 396 return nat; 397 398 if (!nf_ct_is_confirmed(ct)) 399 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); 400 401 return nat; 402 } 403 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); 404 405 unsigned int 406 nf_nat_setup_info(struct nf_conn *ct, 407 const struct nf_nat_range *range, 408 enum nf_nat_manip_type maniptype) 409 { 410 struct nf_conntrack_tuple curr_tuple, new_tuple; 411 412 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 413 maniptype == NF_NAT_MANIP_DST); 414 BUG_ON(nf_nat_initialized(ct, maniptype)); 415 416 /* What we've got will look like inverse of reply. Normally 417 * this is what is in the conntrack, except for prior 418 * manipulations (future optimization: if num_manips == 0, 419 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 420 */ 421 nf_ct_invert_tuplepr(&curr_tuple, 422 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 423 424 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); 425 426 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { 427 struct nf_conntrack_tuple reply; 428 429 /* Alter conntrack table so will recognize replies. */ 430 nf_ct_invert_tuplepr(&reply, &new_tuple); 431 nf_conntrack_alter_reply(ct, &reply); 432 433 /* Non-atomic: we own this at the moment. */ 434 if (maniptype == NF_NAT_MANIP_SRC) 435 ct->status |= IPS_SRC_NAT; 436 else 437 ct->status |= IPS_DST_NAT; 438 439 if (nfct_help(ct)) 440 if (!nfct_seqadj_ext_add(ct)) 441 return NF_DROP; 442 } 443 444 if (maniptype == NF_NAT_MANIP_SRC) { 445 struct nf_nat_conn_key key = { 446 .net = nf_ct_net(ct), 447 .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 448 .zone = nf_ct_zone(ct), 449 }; 450 int err; 451 452 err = rhltable_insert_key(&nf_nat_bysource_table, 453 &key, 454 &ct->nat_bysource, 455 nf_nat_bysource_params); 456 if (err) 457 return NF_DROP; 458 } 459 460 /* It's done. */ 461 if (maniptype == NF_NAT_MANIP_DST) 462 ct->status |= IPS_DST_NAT_DONE; 463 else 464 ct->status |= IPS_SRC_NAT_DONE; 465 466 return NF_ACCEPT; 467 } 468 EXPORT_SYMBOL(nf_nat_setup_info); 469 470 static unsigned int 471 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) 472 { 473 /* Force range to this IP; let proto decide mapping for 474 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 475 * Use reply in case it's already been mangled (eg local packet). 476 */ 477 union nf_inet_addr ip = 478 (manip == NF_NAT_MANIP_SRC ? 479 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 480 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 481 struct nf_nat_range range = { 482 .flags = NF_NAT_RANGE_MAP_IPS, 483 .min_addr = ip, 484 .max_addr = ip, 485 }; 486 return nf_nat_setup_info(ct, &range, manip); 487 } 488 489 unsigned int 490 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 491 { 492 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); 493 } 494 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); 495 496 /* Do packet manipulations according to nf_nat_setup_info. */ 497 unsigned int nf_nat_packet(struct nf_conn *ct, 498 enum ip_conntrack_info ctinfo, 499 unsigned int hooknum, 500 struct sk_buff *skb) 501 { 502 const struct nf_nat_l3proto *l3proto; 503 const struct nf_nat_l4proto *l4proto; 504 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 505 unsigned long statusbit; 506 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); 507 508 if (mtype == NF_NAT_MANIP_SRC) 509 statusbit = IPS_SRC_NAT; 510 else 511 statusbit = IPS_DST_NAT; 512 513 /* Invert if this is reply dir. */ 514 if (dir == IP_CT_DIR_REPLY) 515 statusbit ^= IPS_NAT_MASK; 516 517 /* Non-atomic: these bits don't change. */ 518 if (ct->status & statusbit) { 519 struct nf_conntrack_tuple target; 520 521 /* We are aiming to look like inverse of other direction. */ 522 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); 523 524 l3proto = __nf_nat_l3proto_find(target.src.l3num); 525 l4proto = __nf_nat_l4proto_find(target.src.l3num, 526 target.dst.protonum); 527 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) 528 return NF_DROP; 529 } 530 return NF_ACCEPT; 531 } 532 EXPORT_SYMBOL_GPL(nf_nat_packet); 533 534 struct nf_nat_proto_clean { 535 u8 l3proto; 536 u8 l4proto; 537 }; 538 539 /* kill conntracks with affected NAT section */ 540 static int nf_nat_proto_remove(struct nf_conn *i, void *data) 541 { 542 const struct nf_nat_proto_clean *clean = data; 543 544 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 545 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 546 return 0; 547 548 return i->status & IPS_NAT_MASK ? 1 : 0; 549 } 550 551 static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 552 { 553 if (nf_nat_proto_remove(ct, data)) 554 return 1; 555 556 if ((ct->status & IPS_SRC_NAT_DONE) == 0) 557 return 0; 558 559 /* This netns is being destroyed, and conntrack has nat null binding. 560 * Remove it from bysource hash, as the table will be freed soon. 561 * 562 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 563 * will delete entry from already-freed table. 564 */ 565 ct->status &= ~IPS_NAT_DONE_MASK; 566 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 567 nf_nat_bysource_params); 568 569 /* don't delete conntrack. Although that would make things a lot 570 * simpler, we'd end up flushing all conntracks on nat rmmod. 571 */ 572 return 0; 573 } 574 575 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 576 { 577 struct nf_nat_proto_clean clean = { 578 .l3proto = l3proto, 579 .l4proto = l4proto, 580 }; 581 struct net *net; 582 583 rtnl_lock(); 584 for_each_net(net) 585 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); 586 rtnl_unlock(); 587 } 588 589 static void nf_nat_l3proto_clean(u8 l3proto) 590 { 591 struct nf_nat_proto_clean clean = { 592 .l3proto = l3proto, 593 }; 594 struct net *net; 595 596 rtnl_lock(); 597 598 for_each_net(net) 599 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); 600 rtnl_unlock(); 601 } 602 603 /* Protocol registration. */ 604 int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) 605 { 606 const struct nf_nat_l4proto **l4protos; 607 unsigned int i; 608 int ret = 0; 609 610 mutex_lock(&nf_nat_proto_mutex); 611 if (nf_nat_l4protos[l3proto] == NULL) { 612 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), 613 GFP_KERNEL); 614 if (l4protos == NULL) { 615 ret = -ENOMEM; 616 goto out; 617 } 618 619 for (i = 0; i < IPPROTO_MAX; i++) 620 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); 621 622 /* Before making proto_array visible to lockless readers, 623 * we must make sure its content is committed to memory. 624 */ 625 smp_wmb(); 626 627 nf_nat_l4protos[l3proto] = l4protos; 628 } 629 630 if (rcu_dereference_protected( 631 nf_nat_l4protos[l3proto][l4proto->l4proto], 632 lockdep_is_held(&nf_nat_proto_mutex) 633 ) != &nf_nat_l4proto_unknown) { 634 ret = -EBUSY; 635 goto out; 636 } 637 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); 638 out: 639 mutex_unlock(&nf_nat_proto_mutex); 640 return ret; 641 } 642 EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); 643 644 /* No one stores the protocol anywhere; simply delete it. */ 645 void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) 646 { 647 mutex_lock(&nf_nat_proto_mutex); 648 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], 649 &nf_nat_l4proto_unknown); 650 mutex_unlock(&nf_nat_proto_mutex); 651 synchronize_rcu(); 652 653 nf_nat_l4proto_clean(l3proto, l4proto->l4proto); 654 } 655 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); 656 657 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) 658 { 659 int err; 660 661 err = nf_ct_l3proto_try_module_get(l3proto->l3proto); 662 if (err < 0) 663 return err; 664 665 mutex_lock(&nf_nat_proto_mutex); 666 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], 667 &nf_nat_l4proto_tcp); 668 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], 669 &nf_nat_l4proto_udp); 670 #ifdef CONFIG_NF_NAT_PROTO_DCCP 671 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP], 672 &nf_nat_l4proto_dccp); 673 #endif 674 #ifdef CONFIG_NF_NAT_PROTO_SCTP 675 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP], 676 &nf_nat_l4proto_sctp); 677 #endif 678 #ifdef CONFIG_NF_NAT_PROTO_UDPLITE 679 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE], 680 &nf_nat_l4proto_udplite); 681 #endif 682 mutex_unlock(&nf_nat_proto_mutex); 683 684 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); 685 return 0; 686 } 687 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); 688 689 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) 690 { 691 mutex_lock(&nf_nat_proto_mutex); 692 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); 693 mutex_unlock(&nf_nat_proto_mutex); 694 synchronize_rcu(); 695 696 nf_nat_l3proto_clean(l3proto->l3proto); 697 nf_ct_l3proto_module_put(l3proto->l3proto); 698 } 699 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); 700 701 /* No one using conntrack by the time this called. */ 702 static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 703 { 704 if (ct->status & IPS_SRC_NAT_DONE) 705 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 706 nf_nat_bysource_params); 707 } 708 709 static struct nf_ct_ext_type nat_extend __read_mostly = { 710 .len = sizeof(struct nf_conn_nat), 711 .align = __alignof__(struct nf_conn_nat), 712 .destroy = nf_nat_cleanup_conntrack, 713 .id = NF_CT_EXT_NAT, 714 }; 715 716 #if IS_ENABLED(CONFIG_NF_CT_NETLINK) 717 718 #include <linux/netfilter/nfnetlink.h> 719 #include <linux/netfilter/nfnetlink_conntrack.h> 720 721 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 722 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 723 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 724 }; 725 726 static int nfnetlink_parse_nat_proto(struct nlattr *attr, 727 const struct nf_conn *ct, 728 struct nf_nat_range *range) 729 { 730 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 731 const struct nf_nat_l4proto *l4proto; 732 int err; 733 734 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, 735 protonat_nla_policy, NULL); 736 if (err < 0) 737 return err; 738 739 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 740 if (l4proto->nlattr_to_range) 741 err = l4proto->nlattr_to_range(tb, range); 742 743 return err; 744 } 745 746 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { 747 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, 748 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, 749 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, 750 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, 751 [CTA_NAT_PROTO] = { .type = NLA_NESTED }, 752 }; 753 754 static int 755 nfnetlink_parse_nat(const struct nlattr *nat, 756 const struct nf_conn *ct, struct nf_nat_range *range, 757 const struct nf_nat_l3proto *l3proto) 758 { 759 struct nlattr *tb[CTA_NAT_MAX+1]; 760 int err; 761 762 memset(range, 0, sizeof(*range)); 763 764 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL); 765 if (err < 0) 766 return err; 767 768 err = l3proto->nlattr_to_range(tb, range); 769 if (err < 0) 770 return err; 771 772 if (!tb[CTA_NAT_PROTO]) 773 return 0; 774 775 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); 776 } 777 778 /* This function is called under rcu_read_lock() */ 779 static int 780 nfnetlink_parse_nat_setup(struct nf_conn *ct, 781 enum nf_nat_manip_type manip, 782 const struct nlattr *attr) 783 { 784 struct nf_nat_range range; 785 const struct nf_nat_l3proto *l3proto; 786 int err; 787 788 /* Should not happen, restricted to creating new conntracks 789 * via ctnetlink. 790 */ 791 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) 792 return -EEXIST; 793 794 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to 795 * attach the null binding, otherwise this may oops. 796 */ 797 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); 798 if (l3proto == NULL) 799 return -EAGAIN; 800 801 /* No NAT information has been passed, allocate the null-binding */ 802 if (attr == NULL) 803 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0; 804 805 err = nfnetlink_parse_nat(attr, ct, &range, l3proto); 806 if (err < 0) 807 return err; 808 809 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; 810 } 811 #else 812 static int 813 nfnetlink_parse_nat_setup(struct nf_conn *ct, 814 enum nf_nat_manip_type manip, 815 const struct nlattr *attr) 816 { 817 return -EOPNOTSUPP; 818 } 819 #endif 820 821 static void __net_exit nf_nat_net_exit(struct net *net) 822 { 823 struct nf_nat_proto_clean clean = {}; 824 825 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); 826 } 827 828 static struct pernet_operations nf_nat_net_ops = { 829 .exit = nf_nat_net_exit, 830 }; 831 832 static struct nf_ct_helper_expectfn follow_master_nat = { 833 .name = "nat-follow-master", 834 .expectfn = nf_nat_follow_master, 835 }; 836 837 static int __init nf_nat_init(void) 838 { 839 int ret; 840 841 ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 842 if (ret) 843 return ret; 844 845 ret = nf_ct_extend_register(&nat_extend); 846 if (ret < 0) { 847 rhltable_destroy(&nf_nat_bysource_table); 848 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 849 return ret; 850 } 851 852 ret = register_pernet_subsys(&nf_nat_net_ops); 853 if (ret < 0) 854 goto cleanup_extend; 855 856 nf_ct_helper_expectfn_register(&follow_master_nat); 857 858 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 859 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, 860 nfnetlink_parse_nat_setup); 861 #ifdef CONFIG_XFRM 862 BUG_ON(nf_nat_decode_session_hook != NULL); 863 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); 864 #endif 865 return 0; 866 867 cleanup_extend: 868 rhltable_destroy(&nf_nat_bysource_table); 869 nf_ct_extend_unregister(&nat_extend); 870 return ret; 871 } 872 873 static void __exit nf_nat_cleanup(void) 874 { 875 unsigned int i; 876 877 unregister_pernet_subsys(&nf_nat_net_ops); 878 nf_ct_extend_unregister(&nat_extend); 879 nf_ct_helper_expectfn_unregister(&follow_master_nat); 880 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); 881 #ifdef CONFIG_XFRM 882 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 883 #endif 884 synchronize_rcu(); 885 886 for (i = 0; i < NFPROTO_NUMPROTO; i++) 887 kfree(nf_nat_l4protos[i]); 888 889 rhltable_destroy(&nf_nat_bysource_table); 890 } 891 892 MODULE_LICENSE("GPL"); 893 894 module_init(nf_nat_init); 895 module_exit(nf_nat_cleanup); 896