1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic TIME_WAIT sockets functions 7 * 8 * From code orinally in TCP 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/kmemcheck.h> 13 #include <net/inet_hashtables.h> 14 #include <net/inet_timewait_sock.h> 15 #include <net/ip.h> 16 17 /* Must be called with locally disabled BHs. */ 18 static void __inet_twsk_kill(struct inet_timewait_sock *tw, 19 struct inet_hashinfo *hashinfo) 20 { 21 struct inet_bind_hashbucket *bhead; 22 struct inet_bind_bucket *tb; 23 /* Unlink from established hashes. */ 24 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); 25 26 spin_lock(lock); 27 if (hlist_nulls_unhashed(&tw->tw_node)) { 28 spin_unlock(lock); 29 return; 30 } 31 hlist_nulls_del_rcu(&tw->tw_node); 32 sk_nulls_node_init(&tw->tw_node); 33 spin_unlock(lock); 34 35 /* Disassociate with bind bucket. */ 36 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, 37 hashinfo->bhash_size)]; 38 spin_lock(&bhead->lock); 39 tb = tw->tw_tb; 40 __hlist_del(&tw->tw_bind_node); 41 tw->tw_tb = NULL; 42 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 43 spin_unlock(&bhead->lock); 44 #ifdef SOCK_REFCNT_DEBUG 45 if (atomic_read(&tw->tw_refcnt) != 1) { 46 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", 47 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); 48 } 49 #endif 50 inet_twsk_put(tw); 51 } 52 53 static noinline void inet_twsk_free(struct inet_timewait_sock *tw) 54 { 55 struct module *owner = tw->tw_prot->owner; 56 twsk_destructor((struct sock *)tw); 57 #ifdef SOCK_REFCNT_DEBUG 58 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); 59 #endif 60 release_net(twsk_net(tw)); 61 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); 62 module_put(owner); 63 } 64 65 void inet_twsk_put(struct inet_timewait_sock *tw) 66 { 67 if (atomic_dec_and_test(&tw->tw_refcnt)) 68 inet_twsk_free(tw); 69 } 70 EXPORT_SYMBOL_GPL(inet_twsk_put); 71 72 /* 73 * Enter the time wait state. This is called with locally disabled BH. 74 * Essentially we whip up a timewait bucket, copy the relevant info into it 75 * from the SK, and mess with hash chains and list linkage. 76 */ 77 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 78 struct inet_hashinfo *hashinfo) 79 { 80 const struct inet_sock *inet = inet_sk(sk); 81 const struct inet_connection_sock *icsk = inet_csk(sk); 82 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); 83 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 84 struct inet_bind_hashbucket *bhead; 85 /* Step 1: Put TW into bind hash. Original socket stays there too. 86 Note, that any socket with inet->num != 0 MUST be bound in 87 binding cache, even if it is closed. 88 */ 89 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, 90 hashinfo->bhash_size)]; 91 spin_lock(&bhead->lock); 92 tw->tw_tb = icsk->icsk_bind_hash; 93 WARN_ON(!icsk->icsk_bind_hash); 94 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 95 spin_unlock(&bhead->lock); 96 97 spin_lock(lock); 98 99 /* 100 * Step 2: Hash TW into TIMEWAIT chain. 101 * Should be done before removing sk from established chain 102 * because readers are lockless and search established first. 103 */ 104 atomic_inc(&tw->tw_refcnt); 105 inet_twsk_add_node_rcu(tw, &ehead->twchain); 106 107 /* Step 3: Remove SK from established hash. */ 108 if (__sk_nulls_del_node_init_rcu(sk)) 109 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 110 111 spin_unlock(lock); 112 } 113 114 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); 115 116 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) 117 { 118 struct inet_timewait_sock *tw = 119 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, 120 GFP_ATOMIC); 121 if (tw != NULL) { 122 const struct inet_sock *inet = inet_sk(sk); 123 124 kmemcheck_annotate_bitfield(tw, flags); 125 126 /* Give us an identity. */ 127 tw->tw_daddr = inet->daddr; 128 tw->tw_rcv_saddr = inet->rcv_saddr; 129 tw->tw_bound_dev_if = sk->sk_bound_dev_if; 130 tw->tw_num = inet->num; 131 tw->tw_state = TCP_TIME_WAIT; 132 tw->tw_substate = state; 133 tw->tw_sport = inet->sport; 134 tw->tw_dport = inet->dport; 135 tw->tw_family = sk->sk_family; 136 tw->tw_reuse = sk->sk_reuse; 137 tw->tw_hash = sk->sk_hash; 138 tw->tw_ipv6only = 0; 139 tw->tw_transparent = inet->transparent; 140 tw->tw_prot = sk->sk_prot_creator; 141 twsk_net_set(tw, hold_net(sock_net(sk))); 142 atomic_set(&tw->tw_refcnt, 1); 143 inet_twsk_dead_node_init(tw); 144 __module_get(tw->tw_prot->owner); 145 } 146 147 return tw; 148 } 149 150 EXPORT_SYMBOL_GPL(inet_twsk_alloc); 151 152 /* Returns non-zero if quota exceeded. */ 153 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, 154 const int slot) 155 { 156 struct inet_timewait_sock *tw; 157 struct hlist_node *node; 158 unsigned int killed; 159 int ret; 160 161 /* NOTE: compare this to previous version where lock 162 * was released after detaching chain. It was racy, 163 * because tw buckets are scheduled in not serialized context 164 * in 2.3 (with netfilter), and with softnet it is common, because 165 * soft irqs are not sequenced. 166 */ 167 killed = 0; 168 ret = 0; 169 rescan: 170 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { 171 __inet_twsk_del_dead_node(tw); 172 spin_unlock(&twdr->death_lock); 173 __inet_twsk_kill(tw, twdr->hashinfo); 174 #ifdef CONFIG_NET_NS 175 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); 176 #endif 177 inet_twsk_put(tw); 178 killed++; 179 spin_lock(&twdr->death_lock); 180 if (killed > INET_TWDR_TWKILL_QUOTA) { 181 ret = 1; 182 break; 183 } 184 185 /* While we dropped twdr->death_lock, another cpu may have 186 * killed off the next TW bucket in the list, therefore 187 * do a fresh re-read of the hlist head node with the 188 * lock reacquired. We still use the hlist traversal 189 * macro in order to get the prefetches. 190 */ 191 goto rescan; 192 } 193 194 twdr->tw_count -= killed; 195 #ifndef CONFIG_NET_NS 196 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); 197 #endif 198 return ret; 199 } 200 201 void inet_twdr_hangman(unsigned long data) 202 { 203 struct inet_timewait_death_row *twdr; 204 int unsigned need_timer; 205 206 twdr = (struct inet_timewait_death_row *)data; 207 spin_lock(&twdr->death_lock); 208 209 if (twdr->tw_count == 0) 210 goto out; 211 212 need_timer = 0; 213 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { 214 twdr->thread_slots |= (1 << twdr->slot); 215 schedule_work(&twdr->twkill_work); 216 need_timer = 1; 217 } else { 218 /* We purged the entire slot, anything left? */ 219 if (twdr->tw_count) 220 need_timer = 1; 221 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); 222 } 223 if (need_timer) 224 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 225 out: 226 spin_unlock(&twdr->death_lock); 227 } 228 229 EXPORT_SYMBOL_GPL(inet_twdr_hangman); 230 231 void inet_twdr_twkill_work(struct work_struct *work) 232 { 233 struct inet_timewait_death_row *twdr = 234 container_of(work, struct inet_timewait_death_row, twkill_work); 235 int i; 236 237 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > 238 (sizeof(twdr->thread_slots) * 8)); 239 240 while (twdr->thread_slots) { 241 spin_lock_bh(&twdr->death_lock); 242 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { 243 if (!(twdr->thread_slots & (1 << i))) 244 continue; 245 246 while (inet_twdr_do_twkill_work(twdr, i) != 0) { 247 if (need_resched()) { 248 spin_unlock_bh(&twdr->death_lock); 249 schedule(); 250 spin_lock_bh(&twdr->death_lock); 251 } 252 } 253 254 twdr->thread_slots &= ~(1 << i); 255 } 256 spin_unlock_bh(&twdr->death_lock); 257 } 258 } 259 260 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); 261 262 /* These are always called from BH context. See callers in 263 * tcp_input.c to verify this. 264 */ 265 266 /* This is for handling early-kills of TIME_WAIT sockets. */ 267 void inet_twsk_deschedule(struct inet_timewait_sock *tw, 268 struct inet_timewait_death_row *twdr) 269 { 270 spin_lock(&twdr->death_lock); 271 if (inet_twsk_del_dead_node(tw)) { 272 inet_twsk_put(tw); 273 if (--twdr->tw_count == 0) 274 del_timer(&twdr->tw_timer); 275 } 276 spin_unlock(&twdr->death_lock); 277 __inet_twsk_kill(tw, twdr->hashinfo); 278 } 279 280 EXPORT_SYMBOL(inet_twsk_deschedule); 281 282 void inet_twsk_schedule(struct inet_timewait_sock *tw, 283 struct inet_timewait_death_row *twdr, 284 const int timeo, const int timewait_len) 285 { 286 struct hlist_head *list; 287 int slot; 288 289 /* timeout := RTO * 3.5 290 * 291 * 3.5 = 1+2+0.5 to wait for two retransmits. 292 * 293 * RATIONALE: if FIN arrived and we entered TIME-WAIT state, 294 * our ACK acking that FIN can be lost. If N subsequent retransmitted 295 * FINs (or previous seqments) are lost (probability of such event 296 * is p^(N+1), where p is probability to lose single packet and 297 * time to detect the loss is about RTO*(2^N - 1) with exponential 298 * backoff). Normal timewait length is calculated so, that we 299 * waited at least for one retransmitted FIN (maximal RTO is 120sec). 300 * [ BTW Linux. following BSD, violates this requirement waiting 301 * only for 60sec, we should wait at least for 240 secs. 302 * Well, 240 consumes too much of resources 8) 303 * ] 304 * This interval is not reduced to catch old duplicate and 305 * responces to our wandering segments living for two MSLs. 306 * However, if we use PAWS to detect 307 * old duplicates, we can reduce the interval to bounds required 308 * by RTO, rather than MSL. So, if peer understands PAWS, we 309 * kill tw bucket after 3.5*RTO (it is important that this number 310 * is greater than TS tick!) and detect old duplicates with help 311 * of PAWS. 312 */ 313 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; 314 315 spin_lock(&twdr->death_lock); 316 317 /* Unlink it, if it was scheduled */ 318 if (inet_twsk_del_dead_node(tw)) 319 twdr->tw_count--; 320 else 321 atomic_inc(&tw->tw_refcnt); 322 323 if (slot >= INET_TWDR_RECYCLE_SLOTS) { 324 /* Schedule to slow timer */ 325 if (timeo >= timewait_len) { 326 slot = INET_TWDR_TWKILL_SLOTS - 1; 327 } else { 328 slot = DIV_ROUND_UP(timeo, twdr->period); 329 if (slot >= INET_TWDR_TWKILL_SLOTS) 330 slot = INET_TWDR_TWKILL_SLOTS - 1; 331 } 332 tw->tw_ttd = jiffies + timeo; 333 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); 334 list = &twdr->cells[slot]; 335 } else { 336 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); 337 338 if (twdr->twcal_hand < 0) { 339 twdr->twcal_hand = 0; 340 twdr->twcal_jiffie = jiffies; 341 twdr->twcal_timer.expires = twdr->twcal_jiffie + 342 (slot << INET_TWDR_RECYCLE_TICK); 343 add_timer(&twdr->twcal_timer); 344 } else { 345 if (time_after(twdr->twcal_timer.expires, 346 jiffies + (slot << INET_TWDR_RECYCLE_TICK))) 347 mod_timer(&twdr->twcal_timer, 348 jiffies + (slot << INET_TWDR_RECYCLE_TICK)); 349 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); 350 } 351 list = &twdr->twcal_row[slot]; 352 } 353 354 hlist_add_head(&tw->tw_death_node, list); 355 356 if (twdr->tw_count++ == 0) 357 mod_timer(&twdr->tw_timer, jiffies + twdr->period); 358 spin_unlock(&twdr->death_lock); 359 } 360 361 EXPORT_SYMBOL_GPL(inet_twsk_schedule); 362 363 void inet_twdr_twcal_tick(unsigned long data) 364 { 365 struct inet_timewait_death_row *twdr; 366 int n, slot; 367 unsigned long j; 368 unsigned long now = jiffies; 369 int killed = 0; 370 int adv = 0; 371 372 twdr = (struct inet_timewait_death_row *)data; 373 374 spin_lock(&twdr->death_lock); 375 if (twdr->twcal_hand < 0) 376 goto out; 377 378 slot = twdr->twcal_hand; 379 j = twdr->twcal_jiffie; 380 381 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { 382 if (time_before_eq(j, now)) { 383 struct hlist_node *node, *safe; 384 struct inet_timewait_sock *tw; 385 386 inet_twsk_for_each_inmate_safe(tw, node, safe, 387 &twdr->twcal_row[slot]) { 388 __inet_twsk_del_dead_node(tw); 389 __inet_twsk_kill(tw, twdr->hashinfo); 390 #ifdef CONFIG_NET_NS 391 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); 392 #endif 393 inet_twsk_put(tw); 394 killed++; 395 } 396 } else { 397 if (!adv) { 398 adv = 1; 399 twdr->twcal_jiffie = j; 400 twdr->twcal_hand = slot; 401 } 402 403 if (!hlist_empty(&twdr->twcal_row[slot])) { 404 mod_timer(&twdr->twcal_timer, j); 405 goto out; 406 } 407 } 408 j += 1 << INET_TWDR_RECYCLE_TICK; 409 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); 410 } 411 twdr->twcal_hand = -1; 412 413 out: 414 if ((twdr->tw_count -= killed) == 0) 415 del_timer(&twdr->tw_timer); 416 #ifndef CONFIG_NET_NS 417 NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); 418 #endif 419 spin_unlock(&twdr->death_lock); 420 } 421 422 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 423 424 void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo, 425 struct inet_timewait_death_row *twdr, int family) 426 { 427 struct inet_timewait_sock *tw; 428 struct sock *sk; 429 struct hlist_nulls_node *node; 430 int h; 431 432 local_bh_disable(); 433 for (h = 0; h < (hashinfo->ehash_size); h++) { 434 struct inet_ehash_bucket *head = 435 inet_ehash_bucket(hashinfo, h); 436 spinlock_t *lock = inet_ehash_lockp(hashinfo, h); 437 restart: 438 spin_lock(lock); 439 sk_nulls_for_each(sk, node, &head->twchain) { 440 441 tw = inet_twsk(sk); 442 if (!net_eq(twsk_net(tw), net) || 443 tw->tw_family != family) 444 continue; 445 446 atomic_inc(&tw->tw_refcnt); 447 spin_unlock(lock); 448 inet_twsk_deschedule(tw, twdr); 449 inet_twsk_put(tw); 450 451 goto restart; 452 } 453 spin_unlock(lock); 454 } 455 local_bh_enable(); 456 } 457 EXPORT_SYMBOL_GPL(inet_twsk_purge); 458