1 /* Connection state tracking for netfilter. This is separated from, 2 but required by, the NAT layer; it can also be used by an iptables 3 extension. */ 4 5 /* (C) 1999-2001 Paul `Rusty' Russell 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/types.h> 15 #include <linux/netfilter.h> 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/skbuff.h> 19 #include <linux/proc_fs.h> 20 #include <linux/vmalloc.h> 21 #include <linux/stddef.h> 22 #include <linux/slab.h> 23 #include <linux/random.h> 24 #include <linux/jhash.h> 25 #include <linux/err.h> 26 #include <linux/percpu.h> 27 #include <linux/moduleparam.h> 28 #include <linux/notifier.h> 29 #include <linux/kernel.h> 30 #include <linux/netdevice.h> 31 #include <linux/socket.h> 32 #include <linux/mm.h> 33 #include <linux/nsproxy.h> 34 #include <linux/rculist_nulls.h> 35 36 #include <net/netfilter/nf_conntrack.h> 37 #include <net/netfilter/nf_conntrack_l3proto.h> 38 #include <net/netfilter/nf_conntrack_l4proto.h> 39 #include <net/netfilter/nf_conntrack_expect.h> 40 #include <net/netfilter/nf_conntrack_helper.h> 41 #include <net/netfilter/nf_conntrack_core.h> 42 #include <net/netfilter/nf_conntrack_extend.h> 43 #include <net/netfilter/nf_conntrack_acct.h> 44 #include <net/netfilter/nf_conntrack_ecache.h> 45 #include <net/netfilter/nf_conntrack_zones.h> 46 #include <net/netfilter/nf_conntrack_timestamp.h> 47 #include <net/netfilter/nf_nat.h> 48 #include <net/netfilter/nf_nat_core.h> 49 50 #define NF_CONNTRACK_VERSION "0.5.0" 51 52 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, 53 enum nf_nat_manip_type manip, 54 const struct nlattr *attr) __read_mostly; 55 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); 56 57 DEFINE_SPINLOCK(nf_conntrack_lock); 58 EXPORT_SYMBOL_GPL(nf_conntrack_lock); 59 60 unsigned int nf_conntrack_htable_size __read_mostly; 61 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 62 63 unsigned int nf_conntrack_max __read_mostly; 64 EXPORT_SYMBOL_GPL(nf_conntrack_max); 65 66 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 67 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 68 69 unsigned int nf_conntrack_hash_rnd __read_mostly; 70 71 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) 72 { 73 unsigned int n; 74 75 /* The direction must be ignored, so we hash everything up to the 76 * destination ports (which is a multiple of 4) and treat the last 77 * three bytes manually. 78 */ 79 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 80 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^ 81 (((__force __u16)tuple->dst.u.all << 16) | 82 tuple->dst.protonum)); 83 } 84 85 static u32 __hash_bucket(u32 hash, unsigned int size) 86 { 87 return ((u64)hash * size) >> 32; 88 } 89 90 static u32 hash_bucket(u32 hash, const struct net *net) 91 { 92 return __hash_bucket(hash, net->ct.htable_size); 93 } 94 95 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 96 u16 zone, unsigned int size) 97 { 98 return __hash_bucket(hash_conntrack_raw(tuple, zone), size); 99 } 100 101 static inline u_int32_t hash_conntrack(const struct net *net, u16 zone, 102 const struct nf_conntrack_tuple *tuple) 103 { 104 return __hash_conntrack(tuple, zone, net->ct.htable_size); 105 } 106 107 bool 108 nf_ct_get_tuple(const struct sk_buff *skb, 109 unsigned int nhoff, 110 unsigned int dataoff, 111 u_int16_t l3num, 112 u_int8_t protonum, 113 struct nf_conntrack_tuple *tuple, 114 const struct nf_conntrack_l3proto *l3proto, 115 const struct nf_conntrack_l4proto *l4proto) 116 { 117 memset(tuple, 0, sizeof(*tuple)); 118 119 tuple->src.l3num = l3num; 120 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) 121 return false; 122 123 tuple->dst.protonum = protonum; 124 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 125 126 return l4proto->pkt_to_tuple(skb, dataoff, tuple); 127 } 128 EXPORT_SYMBOL_GPL(nf_ct_get_tuple); 129 130 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 131 u_int16_t l3num, struct nf_conntrack_tuple *tuple) 132 { 133 struct nf_conntrack_l3proto *l3proto; 134 struct nf_conntrack_l4proto *l4proto; 135 unsigned int protoff; 136 u_int8_t protonum; 137 int ret; 138 139 rcu_read_lock(); 140 141 l3proto = __nf_ct_l3proto_find(l3num); 142 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); 143 if (ret != NF_ACCEPT) { 144 rcu_read_unlock(); 145 return false; 146 } 147 148 l4proto = __nf_ct_l4proto_find(l3num, protonum); 149 150 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, 151 l3proto, l4proto); 152 153 rcu_read_unlock(); 154 return ret; 155 } 156 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); 157 158 bool 159 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 160 const struct nf_conntrack_tuple *orig, 161 const struct nf_conntrack_l3proto *l3proto, 162 const struct nf_conntrack_l4proto *l4proto) 163 { 164 memset(inverse, 0, sizeof(*inverse)); 165 166 inverse->src.l3num = orig->src.l3num; 167 if (l3proto->invert_tuple(inverse, orig) == 0) 168 return false; 169 170 inverse->dst.dir = !orig->dst.dir; 171 172 inverse->dst.protonum = orig->dst.protonum; 173 return l4proto->invert_tuple(inverse, orig); 174 } 175 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); 176 177 static void 178 clean_from_lists(struct nf_conn *ct) 179 { 180 pr_debug("clean_from_lists(%p)\n", ct); 181 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 182 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); 183 184 /* Destroy all pending expectations */ 185 nf_ct_remove_expectations(ct); 186 } 187 188 static void 189 destroy_conntrack(struct nf_conntrack *nfct) 190 { 191 struct nf_conn *ct = (struct nf_conn *)nfct; 192 struct net *net = nf_ct_net(ct); 193 struct nf_conntrack_l4proto *l4proto; 194 195 pr_debug("destroy_conntrack(%p)\n", ct); 196 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 197 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 198 199 /* To make sure we don't get any weird locking issues here: 200 * destroy_conntrack() MUST NOT be called with a write lock 201 * to nf_conntrack_lock!!! -HW */ 202 rcu_read_lock(); 203 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 204 if (l4proto && l4proto->destroy) 205 l4proto->destroy(ct); 206 207 rcu_read_unlock(); 208 209 spin_lock_bh(&nf_conntrack_lock); 210 /* Expectations will have been removed in clean_from_lists, 211 * except TFTP can create an expectation on the first packet, 212 * before connection is in the list, so we need to clean here, 213 * too. */ 214 nf_ct_remove_expectations(ct); 215 216 /* We overload first tuple to link into unconfirmed list. */ 217 if (!nf_ct_is_confirmed(ct)) { 218 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); 219 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 220 } 221 222 NF_CT_STAT_INC(net, delete); 223 spin_unlock_bh(&nf_conntrack_lock); 224 225 if (ct->master) 226 nf_ct_put(ct->master); 227 228 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); 229 nf_conntrack_free(ct); 230 } 231 232 void nf_ct_delete_from_lists(struct nf_conn *ct) 233 { 234 struct net *net = nf_ct_net(ct); 235 236 nf_ct_helper_destroy(ct); 237 spin_lock_bh(&nf_conntrack_lock); 238 /* Inside lock so preempt is disabled on module removal path. 239 * Otherwise we can get spurious warnings. */ 240 NF_CT_STAT_INC(net, delete_list); 241 clean_from_lists(ct); 242 spin_unlock_bh(&nf_conntrack_lock); 243 } 244 EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); 245 246 static void death_by_event(unsigned long ul_conntrack) 247 { 248 struct nf_conn *ct = (void *)ul_conntrack; 249 struct net *net = nf_ct_net(ct); 250 251 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { 252 /* bad luck, let's retry again */ 253 ct->timeout.expires = jiffies + 254 (random32() % net->ct.sysctl_events_retry_timeout); 255 add_timer(&ct->timeout); 256 return; 257 } 258 /* we've got the event delivered, now it's dying */ 259 set_bit(IPS_DYING_BIT, &ct->status); 260 spin_lock(&nf_conntrack_lock); 261 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 262 spin_unlock(&nf_conntrack_lock); 263 nf_ct_put(ct); 264 } 265 266 void nf_ct_insert_dying_list(struct nf_conn *ct) 267 { 268 struct net *net = nf_ct_net(ct); 269 270 /* add this conntrack to the dying list */ 271 spin_lock_bh(&nf_conntrack_lock); 272 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 273 &net->ct.dying); 274 spin_unlock_bh(&nf_conntrack_lock); 275 /* set a new timer to retry event delivery */ 276 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); 277 ct->timeout.expires = jiffies + 278 (random32() % net->ct.sysctl_events_retry_timeout); 279 add_timer(&ct->timeout); 280 } 281 EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); 282 283 static void death_by_timeout(unsigned long ul_conntrack) 284 { 285 struct nf_conn *ct = (void *)ul_conntrack; 286 struct nf_conn_tstamp *tstamp; 287 288 tstamp = nf_conn_tstamp_find(ct); 289 if (tstamp && tstamp->stop == 0) 290 tstamp->stop = ktime_to_ns(ktime_get_real()); 291 292 if (!test_bit(IPS_DYING_BIT, &ct->status) && 293 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { 294 /* destroy event was not delivered */ 295 nf_ct_delete_from_lists(ct); 296 nf_ct_insert_dying_list(ct); 297 return; 298 } 299 set_bit(IPS_DYING_BIT, &ct->status); 300 nf_ct_delete_from_lists(ct); 301 nf_ct_put(ct); 302 } 303 304 /* 305 * Warning : 306 * - Caller must take a reference on returned object 307 * and recheck nf_ct_tuple_equal(tuple, &h->tuple) 308 * OR 309 * - Caller must lock nf_conntrack_lock before calling this function 310 */ 311 static struct nf_conntrack_tuple_hash * 312 ____nf_conntrack_find(struct net *net, u16 zone, 313 const struct nf_conntrack_tuple *tuple, u32 hash) 314 { 315 struct nf_conntrack_tuple_hash *h; 316 struct hlist_nulls_node *n; 317 unsigned int bucket = hash_bucket(hash, net); 318 319 /* Disable BHs the entire time since we normally need to disable them 320 * at least once for the stats anyway. 321 */ 322 local_bh_disable(); 323 begin: 324 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { 325 if (nf_ct_tuple_equal(tuple, &h->tuple) && 326 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) { 327 NF_CT_STAT_INC(net, found); 328 local_bh_enable(); 329 return h; 330 } 331 NF_CT_STAT_INC(net, searched); 332 } 333 /* 334 * if the nulls value we got at the end of this lookup is 335 * not the expected one, we must restart lookup. 336 * We probably met an item that was moved to another chain. 337 */ 338 if (get_nulls_value(n) != bucket) { 339 NF_CT_STAT_INC(net, search_restart); 340 goto begin; 341 } 342 local_bh_enable(); 343 344 return NULL; 345 } 346 347 struct nf_conntrack_tuple_hash * 348 __nf_conntrack_find(struct net *net, u16 zone, 349 const struct nf_conntrack_tuple *tuple) 350 { 351 return ____nf_conntrack_find(net, zone, tuple, 352 hash_conntrack_raw(tuple, zone)); 353 } 354 EXPORT_SYMBOL_GPL(__nf_conntrack_find); 355 356 /* Find a connection corresponding to a tuple. */ 357 static struct nf_conntrack_tuple_hash * 358 __nf_conntrack_find_get(struct net *net, u16 zone, 359 const struct nf_conntrack_tuple *tuple, u32 hash) 360 { 361 struct nf_conntrack_tuple_hash *h; 362 struct nf_conn *ct; 363 364 rcu_read_lock(); 365 begin: 366 h = ____nf_conntrack_find(net, zone, tuple, hash); 367 if (h) { 368 ct = nf_ct_tuplehash_to_ctrack(h); 369 if (unlikely(nf_ct_is_dying(ct) || 370 !atomic_inc_not_zero(&ct->ct_general.use))) 371 h = NULL; 372 else { 373 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) || 374 nf_ct_zone(ct) != zone)) { 375 nf_ct_put(ct); 376 goto begin; 377 } 378 } 379 } 380 rcu_read_unlock(); 381 382 return h; 383 } 384 385 struct nf_conntrack_tuple_hash * 386 nf_conntrack_find_get(struct net *net, u16 zone, 387 const struct nf_conntrack_tuple *tuple) 388 { 389 return __nf_conntrack_find_get(net, zone, tuple, 390 hash_conntrack_raw(tuple, zone)); 391 } 392 EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 393 394 static void __nf_conntrack_hash_insert(struct nf_conn *ct, 395 unsigned int hash, 396 unsigned int repl_hash) 397 { 398 struct net *net = nf_ct_net(ct); 399 400 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 401 &net->ct.hash[hash]); 402 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, 403 &net->ct.hash[repl_hash]); 404 } 405 406 void nf_conntrack_hash_insert(struct nf_conn *ct) 407 { 408 struct net *net = nf_ct_net(ct); 409 unsigned int hash, repl_hash; 410 u16 zone; 411 412 zone = nf_ct_zone(ct); 413 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 414 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 415 416 __nf_conntrack_hash_insert(ct, hash, repl_hash); 417 } 418 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 419 420 /* Confirm a connection given skb; places it in hash table */ 421 int 422 __nf_conntrack_confirm(struct sk_buff *skb) 423 { 424 unsigned int hash, repl_hash; 425 struct nf_conntrack_tuple_hash *h; 426 struct nf_conn *ct; 427 struct nf_conn_help *help; 428 struct nf_conn_tstamp *tstamp; 429 struct hlist_nulls_node *n; 430 enum ip_conntrack_info ctinfo; 431 struct net *net; 432 u16 zone; 433 434 ct = nf_ct_get(skb, &ctinfo); 435 net = nf_ct_net(ct); 436 437 /* ipt_REJECT uses nf_conntrack_attach to attach related 438 ICMP/TCP RST packets in other direction. Actual packet 439 which created connection will be IP_CT_NEW or for an 440 expected connection, IP_CT_RELATED. */ 441 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 442 return NF_ACCEPT; 443 444 zone = nf_ct_zone(ct); 445 /* reuse the hash saved before */ 446 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; 447 hash = hash_bucket(hash, net); 448 repl_hash = hash_conntrack(net, zone, 449 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 450 451 /* We're not in hash table, and we refuse to set up related 452 connections for unconfirmed conns. But packet copies and 453 REJECT will give spurious warnings here. */ 454 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 455 456 /* No external references means no one else could have 457 confirmed us. */ 458 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 459 pr_debug("Confirming conntrack %p\n", ct); 460 461 spin_lock_bh(&nf_conntrack_lock); 462 463 /* We have to check the DYING flag inside the lock to prevent 464 a race against nf_ct_get_next_corpse() possibly called from 465 user context, else we insert an already 'dead' hash, blocking 466 further use of that particular connection -JM */ 467 468 if (unlikely(nf_ct_is_dying(ct))) { 469 spin_unlock_bh(&nf_conntrack_lock); 470 return NF_ACCEPT; 471 } 472 473 /* See if there's one in the list already, including reverse: 474 NAT could have grabbed it without realizing, since we're 475 not in the hash. If there is, we lost race. */ 476 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 477 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 478 &h->tuple) && 479 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 480 goto out; 481 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 482 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 483 &h->tuple) && 484 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 485 goto out; 486 487 /* Remove from unconfirmed list */ 488 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); 489 490 /* Timer relative to confirmation time, not original 491 setting time, otherwise we'd get timer wrap in 492 weird delay cases. */ 493 ct->timeout.expires += jiffies; 494 add_timer(&ct->timeout); 495 atomic_inc(&ct->ct_general.use); 496 ct->status |= IPS_CONFIRMED; 497 498 /* set conntrack timestamp, if enabled. */ 499 tstamp = nf_conn_tstamp_find(ct); 500 if (tstamp) { 501 if (skb->tstamp.tv64 == 0) 502 __net_timestamp((struct sk_buff *)skb); 503 504 tstamp->start = ktime_to_ns(skb->tstamp); 505 } 506 /* Since the lookup is lockless, hash insertion must be done after 507 * starting the timer and setting the CONFIRMED bit. The RCU barriers 508 * guarantee that no other CPU can find the conntrack before the above 509 * stores are visible. 510 */ 511 __nf_conntrack_hash_insert(ct, hash, repl_hash); 512 NF_CT_STAT_INC(net, insert); 513 spin_unlock_bh(&nf_conntrack_lock); 514 515 help = nfct_help(ct); 516 if (help && help->helper) 517 nf_conntrack_event_cache(IPCT_HELPER, ct); 518 519 nf_conntrack_event_cache(master_ct(ct) ? 520 IPCT_RELATED : IPCT_NEW, ct); 521 return NF_ACCEPT; 522 523 out: 524 NF_CT_STAT_INC(net, insert_failed); 525 spin_unlock_bh(&nf_conntrack_lock); 526 return NF_DROP; 527 } 528 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 529 530 /* Returns true if a connection correspondings to the tuple (required 531 for NAT). */ 532 int 533 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 534 const struct nf_conn *ignored_conntrack) 535 { 536 struct net *net = nf_ct_net(ignored_conntrack); 537 struct nf_conntrack_tuple_hash *h; 538 struct hlist_nulls_node *n; 539 struct nf_conn *ct; 540 u16 zone = nf_ct_zone(ignored_conntrack); 541 unsigned int hash = hash_conntrack(net, zone, tuple); 542 543 /* Disable BHs the entire time since we need to disable them at 544 * least once for the stats anyway. 545 */ 546 rcu_read_lock_bh(); 547 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 548 ct = nf_ct_tuplehash_to_ctrack(h); 549 if (ct != ignored_conntrack && 550 nf_ct_tuple_equal(tuple, &h->tuple) && 551 nf_ct_zone(ct) == zone) { 552 NF_CT_STAT_INC(net, found); 553 rcu_read_unlock_bh(); 554 return 1; 555 } 556 NF_CT_STAT_INC(net, searched); 557 } 558 rcu_read_unlock_bh(); 559 560 return 0; 561 } 562 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 563 564 #define NF_CT_EVICTION_RANGE 8 565 566 /* There's a small race here where we may free a just-assured 567 connection. Too bad: we're in trouble anyway. */ 568 static noinline int early_drop(struct net *net, unsigned int hash) 569 { 570 /* Use oldest entry, which is roughly LRU */ 571 struct nf_conntrack_tuple_hash *h; 572 struct nf_conn *ct = NULL, *tmp; 573 struct hlist_nulls_node *n; 574 unsigned int i, cnt = 0; 575 int dropped = 0; 576 577 rcu_read_lock(); 578 for (i = 0; i < net->ct.htable_size; i++) { 579 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 580 hnnode) { 581 tmp = nf_ct_tuplehash_to_ctrack(h); 582 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 583 ct = tmp; 584 cnt++; 585 } 586 587 if (ct != NULL) { 588 if (likely(!nf_ct_is_dying(ct) && 589 atomic_inc_not_zero(&ct->ct_general.use))) 590 break; 591 else 592 ct = NULL; 593 } 594 595 if (cnt >= NF_CT_EVICTION_RANGE) 596 break; 597 598 hash = (hash + 1) % net->ct.htable_size; 599 } 600 rcu_read_unlock(); 601 602 if (!ct) 603 return dropped; 604 605 if (del_timer(&ct->timeout)) { 606 death_by_timeout((unsigned long)ct); 607 dropped = 1; 608 NF_CT_STAT_INC_ATOMIC(net, early_drop); 609 } 610 nf_ct_put(ct); 611 return dropped; 612 } 613 614 void init_nf_conntrack_hash_rnd(void) 615 { 616 unsigned int rand; 617 618 /* 619 * Why not initialize nf_conntrack_rnd in a "init()" function ? 620 * Because there isn't enough entropy when system initializing, 621 * and we initialize it as late as possible. 622 */ 623 do { 624 get_random_bytes(&rand, sizeof(rand)); 625 } while (!rand); 626 cmpxchg(&nf_conntrack_hash_rnd, 0, rand); 627 } 628 629 static struct nf_conn * 630 __nf_conntrack_alloc(struct net *net, u16 zone, 631 const struct nf_conntrack_tuple *orig, 632 const struct nf_conntrack_tuple *repl, 633 gfp_t gfp, u32 hash) 634 { 635 struct nf_conn *ct; 636 637 if (unlikely(!nf_conntrack_hash_rnd)) { 638 init_nf_conntrack_hash_rnd(); 639 /* recompute the hash as nf_conntrack_hash_rnd is initialized */ 640 hash = hash_conntrack_raw(orig, zone); 641 } 642 643 /* We don't want any race condition at early drop stage */ 644 atomic_inc(&net->ct.count); 645 646 if (nf_conntrack_max && 647 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 648 if (!early_drop(net, hash_bucket(hash, net))) { 649 atomic_dec(&net->ct.count); 650 if (net_ratelimit()) 651 printk(KERN_WARNING 652 "nf_conntrack: table full, dropping" 653 " packet.\n"); 654 return ERR_PTR(-ENOMEM); 655 } 656 } 657 658 /* 659 * Do not use kmem_cache_zalloc(), as this cache uses 660 * SLAB_DESTROY_BY_RCU. 661 */ 662 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); 663 if (ct == NULL) { 664 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 665 atomic_dec(&net->ct.count); 666 return ERR_PTR(-ENOMEM); 667 } 668 /* 669 * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next 670 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. 671 */ 672 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, 673 offsetof(struct nf_conn, proto) - 674 offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); 675 spin_lock_init(&ct->lock); 676 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; 677 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; 678 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; 679 /* save hash for reusing when confirming */ 680 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; 681 /* Don't set timer yet: wait for confirmation */ 682 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); 683 write_pnet(&ct->ct_net, net); 684 #ifdef CONFIG_NF_CONNTRACK_ZONES 685 if (zone) { 686 struct nf_conntrack_zone *nf_ct_zone; 687 688 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC); 689 if (!nf_ct_zone) 690 goto out_free; 691 nf_ct_zone->id = zone; 692 } 693 #endif 694 /* 695 * changes to lookup keys must be done before setting refcnt to 1 696 */ 697 smp_wmb(); 698 atomic_set(&ct->ct_general.use, 1); 699 return ct; 700 701 #ifdef CONFIG_NF_CONNTRACK_ZONES 702 out_free: 703 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 704 return ERR_PTR(-ENOMEM); 705 #endif 706 } 707 708 struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, 709 const struct nf_conntrack_tuple *orig, 710 const struct nf_conntrack_tuple *repl, 711 gfp_t gfp) 712 { 713 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0); 714 } 715 EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 716 717 void nf_conntrack_free(struct nf_conn *ct) 718 { 719 struct net *net = nf_ct_net(ct); 720 721 nf_ct_ext_destroy(ct); 722 atomic_dec(&net->ct.count); 723 nf_ct_ext_free(ct); 724 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 725 } 726 EXPORT_SYMBOL_GPL(nf_conntrack_free); 727 728 /* Allocate a new conntrack: we return -ENOMEM if classification 729 failed due to stress. Otherwise it really is unclassifiable. */ 730 static struct nf_conntrack_tuple_hash * 731 init_conntrack(struct net *net, struct nf_conn *tmpl, 732 const struct nf_conntrack_tuple *tuple, 733 struct nf_conntrack_l3proto *l3proto, 734 struct nf_conntrack_l4proto *l4proto, 735 struct sk_buff *skb, 736 unsigned int dataoff, u32 hash) 737 { 738 struct nf_conn *ct; 739 struct nf_conn_help *help; 740 struct nf_conntrack_tuple repl_tuple; 741 struct nf_conntrack_ecache *ecache; 742 struct nf_conntrack_expect *exp; 743 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 744 745 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 746 pr_debug("Can't invert tuple.\n"); 747 return NULL; 748 } 749 750 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC, 751 hash); 752 if (IS_ERR(ct)) { 753 pr_debug("Can't allocate conntrack.\n"); 754 return (struct nf_conntrack_tuple_hash *)ct; 755 } 756 757 if (!l4proto->new(ct, skb, dataoff)) { 758 nf_conntrack_free(ct); 759 pr_debug("init conntrack: can't track with proto module\n"); 760 return NULL; 761 } 762 763 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 764 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 765 766 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; 767 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, 768 ecache ? ecache->expmask : 0, 769 GFP_ATOMIC); 770 771 spin_lock_bh(&nf_conntrack_lock); 772 exp = nf_ct_find_expectation(net, zone, tuple); 773 if (exp) { 774 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 775 ct, exp); 776 /* Welcome, Mr. Bond. We've been expecting you... */ 777 __set_bit(IPS_EXPECTED_BIT, &ct->status); 778 ct->master = exp->master; 779 if (exp->helper) { 780 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 781 if (help) 782 rcu_assign_pointer(help->helper, exp->helper); 783 } 784 785 #ifdef CONFIG_NF_CONNTRACK_MARK 786 ct->mark = exp->master->mark; 787 #endif 788 #ifdef CONFIG_NF_CONNTRACK_SECMARK 789 ct->secmark = exp->master->secmark; 790 #endif 791 nf_conntrack_get(&ct->master->ct_general); 792 NF_CT_STAT_INC(net, expect_new); 793 } else { 794 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); 795 NF_CT_STAT_INC(net, new); 796 } 797 798 /* Overload tuple linked list to put us in unconfirmed list. */ 799 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 800 &net->ct.unconfirmed); 801 802 spin_unlock_bh(&nf_conntrack_lock); 803 804 if (exp) { 805 if (exp->expectfn) 806 exp->expectfn(ct, exp); 807 nf_ct_expect_put(exp); 808 } 809 810 return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; 811 } 812 813 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 814 static inline struct nf_conn * 815 resolve_normal_ct(struct net *net, struct nf_conn *tmpl, 816 struct sk_buff *skb, 817 unsigned int dataoff, 818 u_int16_t l3num, 819 u_int8_t protonum, 820 struct nf_conntrack_l3proto *l3proto, 821 struct nf_conntrack_l4proto *l4proto, 822 int *set_reply, 823 enum ip_conntrack_info *ctinfo) 824 { 825 struct nf_conntrack_tuple tuple; 826 struct nf_conntrack_tuple_hash *h; 827 struct nf_conn *ct; 828 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE; 829 u32 hash; 830 831 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 832 dataoff, l3num, protonum, &tuple, l3proto, 833 l4proto)) { 834 pr_debug("resolve_normal_ct: Can't get tuple\n"); 835 return NULL; 836 } 837 838 /* look for tuple match */ 839 hash = hash_conntrack_raw(&tuple, zone); 840 h = __nf_conntrack_find_get(net, zone, &tuple, hash); 841 if (!h) { 842 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, 843 skb, dataoff, hash); 844 if (!h) 845 return NULL; 846 if (IS_ERR(h)) 847 return (void *)h; 848 } 849 ct = nf_ct_tuplehash_to_ctrack(h); 850 851 /* It exists; we have (non-exclusive) reference. */ 852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 853 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 854 /* Please set reply bit if this packet OK */ 855 *set_reply = 1; 856 } else { 857 /* Once we've had two way comms, always ESTABLISHED. */ 858 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 859 pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 860 *ctinfo = IP_CT_ESTABLISHED; 861 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 862 pr_debug("nf_conntrack_in: related packet for %p\n", 863 ct); 864 *ctinfo = IP_CT_RELATED; 865 } else { 866 pr_debug("nf_conntrack_in: new packet for %p\n", ct); 867 *ctinfo = IP_CT_NEW; 868 } 869 *set_reply = 0; 870 } 871 skb->nfct = &ct->ct_general; 872 skb->nfctinfo = *ctinfo; 873 return ct; 874 } 875 876 unsigned int 877 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 878 struct sk_buff *skb) 879 { 880 struct nf_conn *ct, *tmpl = NULL; 881 enum ip_conntrack_info ctinfo; 882 struct nf_conntrack_l3proto *l3proto; 883 struct nf_conntrack_l4proto *l4proto; 884 unsigned int dataoff; 885 u_int8_t protonum; 886 int set_reply = 0; 887 int ret; 888 889 if (skb->nfct) { 890 /* Previously seen (loopback or untracked)? Ignore. */ 891 tmpl = (struct nf_conn *)skb->nfct; 892 if (!nf_ct_is_template(tmpl)) { 893 NF_CT_STAT_INC_ATOMIC(net, ignore); 894 return NF_ACCEPT; 895 } 896 skb->nfct = NULL; 897 } 898 899 /* rcu_read_lock()ed by nf_hook_slow */ 900 l3proto = __nf_ct_l3proto_find(pf); 901 ret = l3proto->get_l4proto(skb, skb_network_offset(skb), 902 &dataoff, &protonum); 903 if (ret <= 0) { 904 pr_debug("not prepared to track yet or error occurred\n"); 905 NF_CT_STAT_INC_ATOMIC(net, error); 906 NF_CT_STAT_INC_ATOMIC(net, invalid); 907 ret = -ret; 908 goto out; 909 } 910 911 l4proto = __nf_ct_l4proto_find(pf, protonum); 912 913 /* It may be an special packet, error, unclean... 914 * inverse of the return code tells to the netfilter 915 * core what to do with the packet. */ 916 if (l4proto->error != NULL) { 917 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo, 918 pf, hooknum); 919 if (ret <= 0) { 920 NF_CT_STAT_INC_ATOMIC(net, error); 921 NF_CT_STAT_INC_ATOMIC(net, invalid); 922 ret = -ret; 923 goto out; 924 } 925 } 926 927 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 928 l3proto, l4proto, &set_reply, &ctinfo); 929 if (!ct) { 930 /* Not valid part of a connection */ 931 NF_CT_STAT_INC_ATOMIC(net, invalid); 932 ret = NF_ACCEPT; 933 goto out; 934 } 935 936 if (IS_ERR(ct)) { 937 /* Too stressed to deal. */ 938 NF_CT_STAT_INC_ATOMIC(net, drop); 939 ret = NF_DROP; 940 goto out; 941 } 942 943 NF_CT_ASSERT(skb->nfct); 944 945 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); 946 if (ret <= 0) { 947 /* Invalid: inverse of the return code tells 948 * the netfilter core what to do */ 949 pr_debug("nf_conntrack_in: Can't track with proto module\n"); 950 nf_conntrack_put(skb->nfct); 951 skb->nfct = NULL; 952 NF_CT_STAT_INC_ATOMIC(net, invalid); 953 if (ret == -NF_DROP) 954 NF_CT_STAT_INC_ATOMIC(net, drop); 955 ret = -ret; 956 goto out; 957 } 958 959 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 960 nf_conntrack_event_cache(IPCT_REPLY, ct); 961 out: 962 if (tmpl) { 963 /* Special case: we have to repeat this hook, assign the 964 * template again to this packet. We assume that this packet 965 * has no conntrack assigned. This is used by nf_ct_tcp. */ 966 if (ret == NF_REPEAT) 967 skb->nfct = (struct nf_conntrack *)tmpl; 968 else 969 nf_ct_put(tmpl); 970 } 971 972 return ret; 973 } 974 EXPORT_SYMBOL_GPL(nf_conntrack_in); 975 976 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 977 const struct nf_conntrack_tuple *orig) 978 { 979 bool ret; 980 981 rcu_read_lock(); 982 ret = nf_ct_invert_tuple(inverse, orig, 983 __nf_ct_l3proto_find(orig->src.l3num), 984 __nf_ct_l4proto_find(orig->src.l3num, 985 orig->dst.protonum)); 986 rcu_read_unlock(); 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); 990 991 /* Alter reply tuple (maybe alter helper). This is for NAT, and is 992 implicitly racy: see __nf_conntrack_confirm */ 993 void nf_conntrack_alter_reply(struct nf_conn *ct, 994 const struct nf_conntrack_tuple *newreply) 995 { 996 struct nf_conn_help *help = nfct_help(ct); 997 998 /* Should be unconfirmed, so not in hash table yet */ 999 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 1000 1001 pr_debug("Altering reply tuple of %p to ", ct); 1002 nf_ct_dump_tuple(newreply); 1003 1004 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 1005 if (ct->master || (help && !hlist_empty(&help->expectations))) 1006 return; 1007 1008 rcu_read_lock(); 1009 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); 1010 rcu_read_unlock(); 1011 } 1012 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 1013 1014 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ 1015 void __nf_ct_refresh_acct(struct nf_conn *ct, 1016 enum ip_conntrack_info ctinfo, 1017 const struct sk_buff *skb, 1018 unsigned long extra_jiffies, 1019 int do_acct) 1020 { 1021 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 1022 NF_CT_ASSERT(skb); 1023 1024 /* Only update if this is not a fixed timeout */ 1025 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 1026 goto acct; 1027 1028 /* If not in hash table, timer will not be active yet */ 1029 if (!nf_ct_is_confirmed(ct)) { 1030 ct->timeout.expires = extra_jiffies; 1031 } else { 1032 unsigned long newtime = jiffies + extra_jiffies; 1033 1034 /* Only update the timeout if the new timeout is at least 1035 HZ jiffies from the old timeout. Need del_timer for race 1036 avoidance (may already be dying). */ 1037 if (newtime - ct->timeout.expires >= HZ) 1038 mod_timer_pending(&ct->timeout, newtime); 1039 } 1040 1041 acct: 1042 if (do_acct) { 1043 struct nf_conn_counter *acct; 1044 1045 acct = nf_conn_acct_find(ct); 1046 if (acct) { 1047 spin_lock_bh(&ct->lock); 1048 acct[CTINFO2DIR(ctinfo)].packets++; 1049 acct[CTINFO2DIR(ctinfo)].bytes += skb->len; 1050 spin_unlock_bh(&ct->lock); 1051 } 1052 } 1053 } 1054 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); 1055 1056 bool __nf_ct_kill_acct(struct nf_conn *ct, 1057 enum ip_conntrack_info ctinfo, 1058 const struct sk_buff *skb, 1059 int do_acct) 1060 { 1061 if (do_acct) { 1062 struct nf_conn_counter *acct; 1063 1064 acct = nf_conn_acct_find(ct); 1065 if (acct) { 1066 spin_lock_bh(&ct->lock); 1067 acct[CTINFO2DIR(ctinfo)].packets++; 1068 acct[CTINFO2DIR(ctinfo)].bytes += 1069 skb->len - skb_network_offset(skb); 1070 spin_unlock_bh(&ct->lock); 1071 } 1072 } 1073 1074 if (del_timer(&ct->timeout)) { 1075 ct->timeout.function((unsigned long)ct); 1076 return true; 1077 } 1078 return false; 1079 } 1080 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 1081 1082 #ifdef CONFIG_NF_CONNTRACK_ZONES 1083 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { 1084 .len = sizeof(struct nf_conntrack_zone), 1085 .align = __alignof__(struct nf_conntrack_zone), 1086 .id = NF_CT_EXT_ZONE, 1087 }; 1088 #endif 1089 1090 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1091 1092 #include <linux/netfilter/nfnetlink.h> 1093 #include <linux/netfilter/nfnetlink_conntrack.h> 1094 #include <linux/mutex.h> 1095 1096 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be 1097 * in ip_conntrack_core, since we don't want the protocols to autoload 1098 * or depend on ctnetlink */ 1099 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 1100 const struct nf_conntrack_tuple *tuple) 1101 { 1102 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 1103 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 1104 return 0; 1105 1106 nla_put_failure: 1107 return -1; 1108 } 1109 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); 1110 1111 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { 1112 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, 1113 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, 1114 }; 1115 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); 1116 1117 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], 1118 struct nf_conntrack_tuple *t) 1119 { 1120 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) 1121 return -EINVAL; 1122 1123 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); 1124 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); 1125 1126 return 0; 1127 } 1128 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 1129 1130 int nf_ct_port_nlattr_tuple_size(void) 1131 { 1132 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); 1133 } 1134 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); 1135 #endif 1136 1137 /* Used by ipt_REJECT and ip6t_REJECT. */ 1138 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 1139 { 1140 struct nf_conn *ct; 1141 enum ip_conntrack_info ctinfo; 1142 1143 /* This ICMP is in reverse direction to the packet which caused it */ 1144 ct = nf_ct_get(skb, &ctinfo); 1145 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 1146 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 1147 else 1148 ctinfo = IP_CT_RELATED; 1149 1150 /* Attach to new skbuff, and increment count */ 1151 nskb->nfct = &ct->ct_general; 1152 nskb->nfctinfo = ctinfo; 1153 nf_conntrack_get(nskb->nfct); 1154 } 1155 1156 /* Bring out ya dead! */ 1157 static struct nf_conn * 1158 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), 1159 void *data, unsigned int *bucket) 1160 { 1161 struct nf_conntrack_tuple_hash *h; 1162 struct nf_conn *ct; 1163 struct hlist_nulls_node *n; 1164 1165 spin_lock_bh(&nf_conntrack_lock); 1166 for (; *bucket < net->ct.htable_size; (*bucket)++) { 1167 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1168 ct = nf_ct_tuplehash_to_ctrack(h); 1169 if (iter(ct, data)) 1170 goto found; 1171 } 1172 } 1173 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { 1174 ct = nf_ct_tuplehash_to_ctrack(h); 1175 if (iter(ct, data)) 1176 set_bit(IPS_DYING_BIT, &ct->status); 1177 } 1178 spin_unlock_bh(&nf_conntrack_lock); 1179 return NULL; 1180 found: 1181 atomic_inc(&ct->ct_general.use); 1182 spin_unlock_bh(&nf_conntrack_lock); 1183 return ct; 1184 } 1185 1186 void nf_ct_iterate_cleanup(struct net *net, 1187 int (*iter)(struct nf_conn *i, void *data), 1188 void *data) 1189 { 1190 struct nf_conn *ct; 1191 unsigned int bucket = 0; 1192 1193 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 1194 /* Time to push up daises... */ 1195 if (del_timer(&ct->timeout)) 1196 death_by_timeout((unsigned long)ct); 1197 /* ... else the timer will get him soon. */ 1198 1199 nf_ct_put(ct); 1200 } 1201 } 1202 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 1203 1204 struct __nf_ct_flush_report { 1205 u32 pid; 1206 int report; 1207 }; 1208 1209 static int kill_report(struct nf_conn *i, void *data) 1210 { 1211 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; 1212 struct nf_conn_tstamp *tstamp; 1213 1214 tstamp = nf_conn_tstamp_find(i); 1215 if (tstamp && tstamp->stop == 0) 1216 tstamp->stop = ktime_to_ns(ktime_get_real()); 1217 1218 /* If we fail to deliver the event, death_by_timeout() will retry */ 1219 if (nf_conntrack_event_report(IPCT_DESTROY, i, 1220 fr->pid, fr->report) < 0) 1221 return 1; 1222 1223 /* Avoid the delivery of the destroy event in death_by_timeout(). */ 1224 set_bit(IPS_DYING_BIT, &i->status); 1225 return 1; 1226 } 1227 1228 static int kill_all(struct nf_conn *i, void *data) 1229 { 1230 return 1; 1231 } 1232 1233 void nf_ct_free_hashtable(void *hash, unsigned int size) 1234 { 1235 if (is_vmalloc_addr(hash)) 1236 vfree(hash); 1237 else 1238 free_pages((unsigned long)hash, 1239 get_order(sizeof(struct hlist_head) * size)); 1240 } 1241 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 1242 1243 void nf_conntrack_flush_report(struct net *net, u32 pid, int report) 1244 { 1245 struct __nf_ct_flush_report fr = { 1246 .pid = pid, 1247 .report = report, 1248 }; 1249 nf_ct_iterate_cleanup(net, kill_report, &fr); 1250 } 1251 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); 1252 1253 static void nf_ct_release_dying_list(struct net *net) 1254 { 1255 struct nf_conntrack_tuple_hash *h; 1256 struct nf_conn *ct; 1257 struct hlist_nulls_node *n; 1258 1259 spin_lock_bh(&nf_conntrack_lock); 1260 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { 1261 ct = nf_ct_tuplehash_to_ctrack(h); 1262 /* never fails to remove them, no listeners at this point */ 1263 nf_ct_kill(ct); 1264 } 1265 spin_unlock_bh(&nf_conntrack_lock); 1266 } 1267 1268 static int untrack_refs(void) 1269 { 1270 int cnt = 0, cpu; 1271 1272 for_each_possible_cpu(cpu) { 1273 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); 1274 1275 cnt += atomic_read(&ct->ct_general.use) - 1; 1276 } 1277 return cnt; 1278 } 1279 1280 static void nf_conntrack_cleanup_init_net(void) 1281 { 1282 while (untrack_refs() > 0) 1283 schedule(); 1284 1285 nf_conntrack_helper_fini(); 1286 nf_conntrack_proto_fini(); 1287 #ifdef CONFIG_NF_CONNTRACK_ZONES 1288 nf_ct_extend_unregister(&nf_ct_zone_extend); 1289 #endif 1290 } 1291 1292 static void nf_conntrack_cleanup_net(struct net *net) 1293 { 1294 i_see_dead_people: 1295 nf_ct_iterate_cleanup(net, kill_all, NULL); 1296 nf_ct_release_dying_list(net); 1297 if (atomic_read(&net->ct.count) != 0) { 1298 schedule(); 1299 goto i_see_dead_people; 1300 } 1301 1302 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1303 nf_conntrack_ecache_fini(net); 1304 nf_conntrack_tstamp_fini(net); 1305 nf_conntrack_acct_fini(net); 1306 nf_conntrack_expect_fini(net); 1307 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1308 kfree(net->ct.slabname); 1309 free_percpu(net->ct.stat); 1310 } 1311 1312 /* Mishearing the voices in his head, our hero wonders how he's 1313 supposed to kill the mall. */ 1314 void nf_conntrack_cleanup(struct net *net) 1315 { 1316 if (net_eq(net, &init_net)) 1317 rcu_assign_pointer(ip_ct_attach, NULL); 1318 1319 /* This makes sure all current packets have passed through 1320 netfilter framework. Roll on, two-stage module 1321 delete... */ 1322 synchronize_net(); 1323 1324 nf_conntrack_cleanup_net(net); 1325 1326 if (net_eq(net, &init_net)) { 1327 rcu_assign_pointer(nf_ct_destroy, NULL); 1328 nf_conntrack_cleanup_init_net(); 1329 } 1330 } 1331 1332 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1333 { 1334 struct hlist_nulls_head *hash; 1335 unsigned int nr_slots, i; 1336 size_t sz; 1337 1338 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); 1339 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); 1340 sz = nr_slots * sizeof(struct hlist_nulls_head); 1341 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1342 get_order(sz)); 1343 if (!hash) { 1344 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1345 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1346 PAGE_KERNEL); 1347 } 1348 1349 if (hash && nulls) 1350 for (i = 0; i < nr_slots; i++) 1351 INIT_HLIST_NULLS_HEAD(&hash[i], i); 1352 1353 return hash; 1354 } 1355 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); 1356 1357 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1358 { 1359 int i, bucket; 1360 unsigned int hashsize, old_size; 1361 struct hlist_nulls_head *hash, *old_hash; 1362 struct nf_conntrack_tuple_hash *h; 1363 struct nf_conn *ct; 1364 1365 if (current->nsproxy->net_ns != &init_net) 1366 return -EOPNOTSUPP; 1367 1368 /* On boot, we can set this without any fancy locking. */ 1369 if (!nf_conntrack_htable_size) 1370 return param_set_uint(val, kp); 1371 1372 hashsize = simple_strtoul(val, NULL, 0); 1373 if (!hashsize) 1374 return -EINVAL; 1375 1376 hash = nf_ct_alloc_hashtable(&hashsize, 1); 1377 if (!hash) 1378 return -ENOMEM; 1379 1380 /* Lookups in the old hash might happen in parallel, which means we 1381 * might get false negatives during connection lookup. New connections 1382 * created because of a false negative won't make it into the hash 1383 * though since that required taking the lock. 1384 */ 1385 spin_lock_bh(&nf_conntrack_lock); 1386 for (i = 0; i < init_net.ct.htable_size; i++) { 1387 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1388 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1389 struct nf_conntrack_tuple_hash, hnnode); 1390 ct = nf_ct_tuplehash_to_ctrack(h); 1391 hlist_nulls_del_rcu(&h->hnnode); 1392 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct), 1393 hashsize); 1394 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1395 } 1396 } 1397 old_size = init_net.ct.htable_size; 1398 old_hash = init_net.ct.hash; 1399 1400 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; 1401 init_net.ct.hash = hash; 1402 spin_unlock_bh(&nf_conntrack_lock); 1403 1404 nf_ct_free_hashtable(old_hash, old_size); 1405 return 0; 1406 } 1407 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); 1408 1409 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1410 &nf_conntrack_htable_size, 0600); 1411 1412 void nf_ct_untracked_status_or(unsigned long bits) 1413 { 1414 int cpu; 1415 1416 for_each_possible_cpu(cpu) 1417 per_cpu(nf_conntrack_untracked, cpu).status |= bits; 1418 } 1419 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or); 1420 1421 static int nf_conntrack_init_init_net(void) 1422 { 1423 int max_factor = 8; 1424 int ret, cpu; 1425 1426 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1427 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1428 if (!nf_conntrack_htable_size) { 1429 nf_conntrack_htable_size 1430 = (((totalram_pages << PAGE_SHIFT) / 16384) 1431 / sizeof(struct hlist_head)); 1432 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1433 nf_conntrack_htable_size = 16384; 1434 if (nf_conntrack_htable_size < 32) 1435 nf_conntrack_htable_size = 32; 1436 1437 /* Use a max. factor of four by default to get the same max as 1438 * with the old struct list_heads. When a table size is given 1439 * we use the old value of 8 to avoid reducing the max. 1440 * entries. */ 1441 max_factor = 4; 1442 } 1443 nf_conntrack_max = max_factor * nf_conntrack_htable_size; 1444 1445 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n", 1446 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1447 nf_conntrack_max); 1448 1449 ret = nf_conntrack_proto_init(); 1450 if (ret < 0) 1451 goto err_proto; 1452 1453 ret = nf_conntrack_helper_init(); 1454 if (ret < 0) 1455 goto err_helper; 1456 1457 #ifdef CONFIG_NF_CONNTRACK_ZONES 1458 ret = nf_ct_extend_register(&nf_ct_zone_extend); 1459 if (ret < 0) 1460 goto err_extend; 1461 #endif 1462 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1463 for_each_possible_cpu(cpu) { 1464 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); 1465 write_pnet(&ct->ct_net, &init_net); 1466 atomic_set(&ct->ct_general.use, 1); 1467 } 1468 /* - and look it like as a confirmed connection */ 1469 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1470 return 0; 1471 1472 #ifdef CONFIG_NF_CONNTRACK_ZONES 1473 err_extend: 1474 nf_conntrack_helper_fini(); 1475 #endif 1476 err_helper: 1477 nf_conntrack_proto_fini(); 1478 err_proto: 1479 return ret; 1480 } 1481 1482 /* 1483 * We need to use special "null" values, not used in hash table 1484 */ 1485 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) 1486 #define DYING_NULLS_VAL ((1<<30)+1) 1487 1488 static int nf_conntrack_init_net(struct net *net) 1489 { 1490 int ret; 1491 1492 atomic_set(&net->ct.count, 0); 1493 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); 1494 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); 1495 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1496 if (!net->ct.stat) { 1497 ret = -ENOMEM; 1498 goto err_stat; 1499 } 1500 1501 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); 1502 if (!net->ct.slabname) { 1503 ret = -ENOMEM; 1504 goto err_slabname; 1505 } 1506 1507 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, 1508 sizeof(struct nf_conn), 0, 1509 SLAB_DESTROY_BY_RCU, NULL); 1510 if (!net->ct.nf_conntrack_cachep) { 1511 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1512 ret = -ENOMEM; 1513 goto err_cache; 1514 } 1515 1516 net->ct.htable_size = nf_conntrack_htable_size; 1517 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); 1518 if (!net->ct.hash) { 1519 ret = -ENOMEM; 1520 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1521 goto err_hash; 1522 } 1523 ret = nf_conntrack_expect_init(net); 1524 if (ret < 0) 1525 goto err_expect; 1526 ret = nf_conntrack_acct_init(net); 1527 if (ret < 0) 1528 goto err_acct; 1529 ret = nf_conntrack_tstamp_init(net); 1530 if (ret < 0) 1531 goto err_tstamp; 1532 ret = nf_conntrack_ecache_init(net); 1533 if (ret < 0) 1534 goto err_ecache; 1535 1536 return 0; 1537 1538 err_ecache: 1539 nf_conntrack_tstamp_fini(net); 1540 err_tstamp: 1541 nf_conntrack_acct_fini(net); 1542 err_acct: 1543 nf_conntrack_expect_fini(net); 1544 err_expect: 1545 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1546 err_hash: 1547 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1548 err_cache: 1549 kfree(net->ct.slabname); 1550 err_slabname: 1551 free_percpu(net->ct.stat); 1552 err_stat: 1553 return ret; 1554 } 1555 1556 s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, 1557 enum ip_conntrack_dir dir, 1558 u32 seq); 1559 EXPORT_SYMBOL_GPL(nf_ct_nat_offset); 1560 1561 int nf_conntrack_init(struct net *net) 1562 { 1563 int ret; 1564 1565 if (net_eq(net, &init_net)) { 1566 ret = nf_conntrack_init_init_net(); 1567 if (ret < 0) 1568 goto out_init_net; 1569 } 1570 ret = nf_conntrack_init_net(net); 1571 if (ret < 0) 1572 goto out_net; 1573 1574 if (net_eq(net, &init_net)) { 1575 /* For use by REJECT target */ 1576 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); 1577 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); 1578 1579 /* Howto get NAT offsets */ 1580 rcu_assign_pointer(nf_ct_nat_offset, NULL); 1581 } 1582 return 0; 1583 1584 out_net: 1585 if (net_eq(net, &init_net)) 1586 nf_conntrack_cleanup_init_net(); 1587 out_init_net: 1588 return ret; 1589 } 1590