1 /* 2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors: 3 * 4 * Marek Lindner, Simon Wunderlich 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of version 2 of the GNU General Public 8 * License as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA 19 * 20 */ 21 22 #include "main.h" 23 #include "originator.h" 24 #include "hash.h" 25 #include "translation-table.h" 26 #include "routing.h" 27 #include "gateway_client.h" 28 #include "hard-interface.h" 29 #include "unicast.h" 30 #include "soft-interface.h" 31 32 static void purge_orig(struct work_struct *work); 33 34 static void start_purge_timer(struct bat_priv *bat_priv) 35 { 36 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); 37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); 38 } 39 40 /* returns 1 if they are the same originator */ 41 static int compare_orig(const struct hlist_node *node, const void *data2) 42 { 43 const void *data1 = container_of(node, struct orig_node, hash_entry); 44 45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 46 } 47 48 int originator_init(struct bat_priv *bat_priv) 49 { 50 if (bat_priv->orig_hash) 51 return 1; 52 53 bat_priv->orig_hash = hash_new(1024); 54 55 if (!bat_priv->orig_hash) 56 goto err; 57 58 start_purge_timer(bat_priv); 59 return 1; 60 61 err: 62 return 0; 63 } 64 65 void neigh_node_free_ref(struct neigh_node *neigh_node) 66 { 67 if (atomic_dec_and_test(&neigh_node->refcount)) 68 kfree_rcu(neigh_node, rcu); 69 } 70 71 /* increases the refcounter of a found router */ 72 struct neigh_node *orig_node_get_router(struct orig_node *orig_node) 73 { 74 struct neigh_node *router; 75 76 rcu_read_lock(); 77 router = rcu_dereference(orig_node->router); 78 79 if (router && !atomic_inc_not_zero(&router->refcount)) 80 router = NULL; 81 82 rcu_read_unlock(); 83 return router; 84 } 85 86 struct neigh_node *create_neighbor(struct orig_node *orig_node, 87 struct orig_node *orig_neigh_node, 88 const uint8_t *neigh, 89 struct hard_iface *if_incoming) 90 { 91 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 92 struct neigh_node *neigh_node; 93 94 bat_dbg(DBG_BATMAN, bat_priv, 95 "Creating new last-hop neighbor of originator\n"); 96 97 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 98 if (!neigh_node) 99 return NULL; 100 101 INIT_HLIST_NODE(&neigh_node->list); 102 INIT_LIST_HEAD(&neigh_node->bonding_list); 103 spin_lock_init(&neigh_node->tq_lock); 104 105 memcpy(neigh_node->addr, neigh, ETH_ALEN); 106 neigh_node->orig_node = orig_neigh_node; 107 neigh_node->if_incoming = if_incoming; 108 109 /* extra reference for return */ 110 atomic_set(&neigh_node->refcount, 2); 111 112 spin_lock_bh(&orig_node->neigh_list_lock); 113 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 114 spin_unlock_bh(&orig_node->neigh_list_lock); 115 return neigh_node; 116 } 117 118 static void orig_node_free_rcu(struct rcu_head *rcu) 119 { 120 struct hlist_node *node, *node_tmp; 121 struct neigh_node *neigh_node, *tmp_neigh_node; 122 struct orig_node *orig_node; 123 124 orig_node = container_of(rcu, struct orig_node, rcu); 125 126 spin_lock_bh(&orig_node->neigh_list_lock); 127 128 /* for all bonding members ... */ 129 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 130 &orig_node->bond_list, bonding_list) { 131 list_del_rcu(&neigh_node->bonding_list); 132 neigh_node_free_ref(neigh_node); 133 } 134 135 /* for all neighbors towards this originator ... */ 136 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 137 &orig_node->neigh_list, list) { 138 hlist_del_rcu(&neigh_node->list); 139 neigh_node_free_ref(neigh_node); 140 } 141 142 spin_unlock_bh(&orig_node->neigh_list_lock); 143 144 frag_list_free(&orig_node->frag_list); 145 tt_global_del_orig(orig_node->bat_priv, orig_node, 146 "originator timed out"); 147 148 kfree(orig_node->tt_buff); 149 kfree(orig_node->bcast_own); 150 kfree(orig_node->bcast_own_sum); 151 kfree(orig_node); 152 } 153 154 void orig_node_free_ref(struct orig_node *orig_node) 155 { 156 if (atomic_dec_and_test(&orig_node->refcount)) 157 call_rcu(&orig_node->rcu, orig_node_free_rcu); 158 } 159 160 void originator_free(struct bat_priv *bat_priv) 161 { 162 struct hashtable_t *hash = bat_priv->orig_hash; 163 struct hlist_node *node, *node_tmp; 164 struct hlist_head *head; 165 spinlock_t *list_lock; /* spinlock to protect write access */ 166 struct orig_node *orig_node; 167 int i; 168 169 if (!hash) 170 return; 171 172 cancel_delayed_work_sync(&bat_priv->orig_work); 173 174 bat_priv->orig_hash = NULL; 175 176 for (i = 0; i < hash->size; i++) { 177 head = &hash->table[i]; 178 list_lock = &hash->list_locks[i]; 179 180 spin_lock_bh(list_lock); 181 hlist_for_each_entry_safe(orig_node, node, node_tmp, 182 head, hash_entry) { 183 184 hlist_del_rcu(node); 185 orig_node_free_ref(orig_node); 186 } 187 spin_unlock_bh(list_lock); 188 } 189 190 hash_destroy(hash); 191 } 192 193 /* this function finds or creates an originator entry for the given 194 * address if it does not exits */ 195 struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) 196 { 197 struct orig_node *orig_node; 198 int size; 199 int hash_added; 200 201 orig_node = orig_hash_find(bat_priv, addr); 202 if (orig_node) 203 return orig_node; 204 205 bat_dbg(DBG_BATMAN, bat_priv, 206 "Creating new originator: %pM\n", addr); 207 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 209 if (!orig_node) 210 return NULL; 211 212 INIT_HLIST_HEAD(&orig_node->neigh_list); 213 INIT_LIST_HEAD(&orig_node->bond_list); 214 spin_lock_init(&orig_node->ogm_cnt_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock); 216 spin_lock_init(&orig_node->neigh_list_lock); 217 spin_lock_init(&orig_node->tt_buff_lock); 218 219 /* extra reference for return */ 220 atomic_set(&orig_node->refcount, 2); 221 222 orig_node->tt_poss_change = false; 223 orig_node->bat_priv = bat_priv; 224 memcpy(orig_node->orig, addr, ETH_ALEN); 225 orig_node->router = NULL; 226 orig_node->tt_crc = 0; 227 atomic_set(&orig_node->last_ttvn, 0); 228 orig_node->tt_buff = NULL; 229 orig_node->tt_buff_len = 0; 230 atomic_set(&orig_node->tt_size, 0); 231 orig_node->bcast_seqno_reset = jiffies - 1 232 - msecs_to_jiffies(RESET_PROTECTION_MS); 233 orig_node->batman_seqno_reset = jiffies - 1 234 - msecs_to_jiffies(RESET_PROTECTION_MS); 235 236 atomic_set(&orig_node->bond_candidates, 0); 237 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; 239 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 241 if (!orig_node->bcast_own) 242 goto free_orig_node; 243 244 size = bat_priv->num_ifaces * sizeof(uint8_t); 245 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); 246 247 INIT_LIST_HEAD(&orig_node->frag_list); 248 orig_node->last_frag_packet = 0; 249 250 if (!orig_node->bcast_own_sum) 251 goto free_bcast_own; 252 253 hash_added = hash_add(bat_priv->orig_hash, compare_orig, 254 choose_orig, orig_node, &orig_node->hash_entry); 255 if (hash_added < 0) 256 goto free_bcast_own_sum; 257 258 return orig_node; 259 free_bcast_own_sum: 260 kfree(orig_node->bcast_own_sum); 261 free_bcast_own: 262 kfree(orig_node->bcast_own); 263 free_orig_node: 264 kfree(orig_node); 265 return NULL; 266 } 267 268 static bool purge_orig_neighbors(struct bat_priv *bat_priv, 269 struct orig_node *orig_node, 270 struct neigh_node **best_neigh_node) 271 { 272 struct hlist_node *node, *node_tmp; 273 struct neigh_node *neigh_node; 274 bool neigh_purged = false; 275 276 *best_neigh_node = NULL; 277 278 spin_lock_bh(&orig_node->neigh_list_lock); 279 280 /* for all neighbors towards this originator ... */ 281 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 282 &orig_node->neigh_list, list) { 283 284 if ((time_after(jiffies, 285 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || 286 (neigh_node->if_incoming->if_status == IF_INACTIVE) || 287 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || 288 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { 289 290 if ((neigh_node->if_incoming->if_status == 291 IF_INACTIVE) || 292 (neigh_node->if_incoming->if_status == 293 IF_NOT_IN_USE) || 294 (neigh_node->if_incoming->if_status == 295 IF_TO_BE_REMOVED)) 296 bat_dbg(DBG_BATMAN, bat_priv, 297 "neighbor purge: originator %pM, " 298 "neighbor: %pM, iface: %s\n", 299 orig_node->orig, neigh_node->addr, 300 neigh_node->if_incoming->net_dev->name); 301 else 302 bat_dbg(DBG_BATMAN, bat_priv, 303 "neighbor timeout: originator %pM, " 304 "neighbor: %pM, last_valid: %lu\n", 305 orig_node->orig, neigh_node->addr, 306 (neigh_node->last_valid / HZ)); 307 308 neigh_purged = true; 309 310 hlist_del_rcu(&neigh_node->list); 311 bonding_candidate_del(orig_node, neigh_node); 312 neigh_node_free_ref(neigh_node); 313 } else { 314 if ((!*best_neigh_node) || 315 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 316 *best_neigh_node = neigh_node; 317 } 318 } 319 320 spin_unlock_bh(&orig_node->neigh_list_lock); 321 return neigh_purged; 322 } 323 324 static bool purge_orig_node(struct bat_priv *bat_priv, 325 struct orig_node *orig_node) 326 { 327 struct neigh_node *best_neigh_node; 328 329 if (time_after(jiffies, 330 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) { 331 332 bat_dbg(DBG_BATMAN, bat_priv, 333 "Originator timeout: originator %pM, last_valid %lu\n", 334 orig_node->orig, (orig_node->last_valid / HZ)); 335 return true; 336 } else { 337 if (purge_orig_neighbors(bat_priv, orig_node, 338 &best_neigh_node)) { 339 update_routes(bat_priv, orig_node, 340 best_neigh_node); 341 } 342 } 343 344 return false; 345 } 346 347 static void _purge_orig(struct bat_priv *bat_priv) 348 { 349 struct hashtable_t *hash = bat_priv->orig_hash; 350 struct hlist_node *node, *node_tmp; 351 struct hlist_head *head; 352 spinlock_t *list_lock; /* spinlock to protect write access */ 353 struct orig_node *orig_node; 354 int i; 355 356 if (!hash) 357 return; 358 359 /* for all origins... */ 360 for (i = 0; i < hash->size; i++) { 361 head = &hash->table[i]; 362 list_lock = &hash->list_locks[i]; 363 364 spin_lock_bh(list_lock); 365 hlist_for_each_entry_safe(orig_node, node, node_tmp, 366 head, hash_entry) { 367 if (purge_orig_node(bat_priv, orig_node)) { 368 if (orig_node->gw_flags) 369 gw_node_delete(bat_priv, orig_node); 370 hlist_del_rcu(node); 371 orig_node_free_ref(orig_node); 372 continue; 373 } 374 375 if (time_after(jiffies, orig_node->last_frag_packet + 376 msecs_to_jiffies(FRAG_TIMEOUT))) 377 frag_list_free(&orig_node->frag_list); 378 } 379 spin_unlock_bh(list_lock); 380 } 381 382 gw_node_purge(bat_priv); 383 gw_election(bat_priv); 384 385 softif_neigh_purge(bat_priv); 386 } 387 388 static void purge_orig(struct work_struct *work) 389 { 390 struct delayed_work *delayed_work = 391 container_of(work, struct delayed_work, work); 392 struct bat_priv *bat_priv = 393 container_of(delayed_work, struct bat_priv, orig_work); 394 395 _purge_orig(bat_priv); 396 start_purge_timer(bat_priv); 397 } 398 399 void purge_orig_ref(struct bat_priv *bat_priv) 400 { 401 _purge_orig(bat_priv); 402 } 403 404 int orig_seq_print_text(struct seq_file *seq, void *offset) 405 { 406 struct net_device *net_dev = (struct net_device *)seq->private; 407 struct bat_priv *bat_priv = netdev_priv(net_dev); 408 struct hashtable_t *hash = bat_priv->orig_hash; 409 struct hlist_node *node, *node_tmp; 410 struct hlist_head *head; 411 struct hard_iface *primary_if; 412 struct orig_node *orig_node; 413 struct neigh_node *neigh_node, *neigh_node_tmp; 414 int batman_count = 0; 415 int last_seen_secs; 416 int last_seen_msecs; 417 int i, ret = 0; 418 419 primary_if = primary_if_get_selected(bat_priv); 420 421 if (!primary_if) { 422 ret = seq_printf(seq, "BATMAN mesh %s disabled - " 423 "please specify interfaces to enable it\n", 424 net_dev->name); 425 goto out; 426 } 427 428 if (primary_if->if_status != IF_ACTIVE) { 429 ret = seq_printf(seq, "BATMAN mesh %s " 430 "disabled - primary interface not active\n", 431 net_dev->name); 432 goto out; 433 } 434 435 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 436 SOURCE_VERSION, primary_if->net_dev->name, 437 primary_if->net_dev->dev_addr, net_dev->name); 438 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 439 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 440 "outgoingIF", "Potential nexthops"); 441 442 for (i = 0; i < hash->size; i++) { 443 head = &hash->table[i]; 444 445 rcu_read_lock(); 446 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 447 neigh_node = orig_node_get_router(orig_node); 448 if (!neigh_node) 449 continue; 450 451 if (neigh_node->tq_avg == 0) 452 goto next; 453 454 last_seen_secs = jiffies_to_msecs(jiffies - 455 orig_node->last_valid) / 1000; 456 last_seen_msecs = jiffies_to_msecs(jiffies - 457 orig_node->last_valid) % 1000; 458 459 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 460 orig_node->orig, last_seen_secs, 461 last_seen_msecs, neigh_node->tq_avg, 462 neigh_node->addr, 463 neigh_node->if_incoming->net_dev->name); 464 465 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 466 &orig_node->neigh_list, list) { 467 seq_printf(seq, " %pM (%3i)", 468 neigh_node_tmp->addr, 469 neigh_node_tmp->tq_avg); 470 } 471 472 seq_printf(seq, "\n"); 473 batman_count++; 474 475 next: 476 neigh_node_free_ref(neigh_node); 477 } 478 rcu_read_unlock(); 479 } 480 481 if (batman_count == 0) 482 seq_printf(seq, "No batman nodes in range ...\n"); 483 484 out: 485 if (primary_if) 486 hardif_free_ref(primary_if); 487 return ret; 488 } 489 490 static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 491 { 492 void *data_ptr; 493 494 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, 495 GFP_ATOMIC); 496 if (!data_ptr) { 497 pr_err("Can't resize orig: out of memory\n"); 498 return -1; 499 } 500 501 memcpy(data_ptr, orig_node->bcast_own, 502 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); 503 kfree(orig_node->bcast_own); 504 orig_node->bcast_own = data_ptr; 505 506 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 507 if (!data_ptr) { 508 pr_err("Can't resize orig: out of memory\n"); 509 return -1; 510 } 511 512 memcpy(data_ptr, orig_node->bcast_own_sum, 513 (max_if_num - 1) * sizeof(uint8_t)); 514 kfree(orig_node->bcast_own_sum); 515 orig_node->bcast_own_sum = data_ptr; 516 517 return 0; 518 } 519 520 int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) 521 { 522 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 523 struct hashtable_t *hash = bat_priv->orig_hash; 524 struct hlist_node *node; 525 struct hlist_head *head; 526 struct orig_node *orig_node; 527 int i, ret; 528 529 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 530 * if_num */ 531 for (i = 0; i < hash->size; i++) { 532 head = &hash->table[i]; 533 534 rcu_read_lock(); 535 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 536 spin_lock_bh(&orig_node->ogm_cnt_lock); 537 ret = orig_node_add_if(orig_node, max_if_num); 538 spin_unlock_bh(&orig_node->ogm_cnt_lock); 539 540 if (ret == -1) 541 goto err; 542 } 543 rcu_read_unlock(); 544 } 545 546 return 0; 547 548 err: 549 rcu_read_unlock(); 550 return -ENOMEM; 551 } 552 553 static int orig_node_del_if(struct orig_node *orig_node, 554 int max_if_num, int del_if_num) 555 { 556 void *data_ptr = NULL; 557 int chunk_size; 558 559 /* last interface was removed */ 560 if (max_if_num == 0) 561 goto free_bcast_own; 562 563 chunk_size = sizeof(unsigned long) * NUM_WORDS; 564 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 565 if (!data_ptr) { 566 pr_err("Can't resize orig: out of memory\n"); 567 return -1; 568 } 569 570 /* copy first part */ 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 572 573 /* copy second part */ 574 memcpy((char *)data_ptr + del_if_num * chunk_size, 575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 576 (max_if_num - del_if_num) * chunk_size); 577 578 free_bcast_own: 579 kfree(orig_node->bcast_own); 580 orig_node->bcast_own = data_ptr; 581 582 if (max_if_num == 0) 583 goto free_own_sum; 584 585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 586 if (!data_ptr) { 587 pr_err("Can't resize orig: out of memory\n"); 588 return -1; 589 } 590 591 memcpy(data_ptr, orig_node->bcast_own_sum, 592 del_if_num * sizeof(uint8_t)); 593 594 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), 595 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 596 (max_if_num - del_if_num) * sizeof(uint8_t)); 597 598 free_own_sum: 599 kfree(orig_node->bcast_own_sum); 600 orig_node->bcast_own_sum = data_ptr; 601 602 return 0; 603 } 604 605 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) 606 { 607 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 608 struct hashtable_t *hash = bat_priv->orig_hash; 609 struct hlist_node *node; 610 struct hlist_head *head; 611 struct hard_iface *hard_iface_tmp; 612 struct orig_node *orig_node; 613 int i, ret; 614 615 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 616 * if_num */ 617 for (i = 0; i < hash->size; i++) { 618 head = &hash->table[i]; 619 620 rcu_read_lock(); 621 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 622 spin_lock_bh(&orig_node->ogm_cnt_lock); 623 ret = orig_node_del_if(orig_node, max_if_num, 624 hard_iface->if_num); 625 spin_unlock_bh(&orig_node->ogm_cnt_lock); 626 627 if (ret == -1) 628 goto err; 629 } 630 rcu_read_unlock(); 631 } 632 633 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 634 rcu_read_lock(); 635 list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { 636 if (hard_iface_tmp->if_status == IF_NOT_IN_USE) 637 continue; 638 639 if (hard_iface == hard_iface_tmp) 640 continue; 641 642 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 643 continue; 644 645 if (hard_iface_tmp->if_num > hard_iface->if_num) 646 hard_iface_tmp->if_num--; 647 } 648 rcu_read_unlock(); 649 650 hard_iface->if_num = -1; 651 return 0; 652 653 err: 654 rcu_read_unlock(); 655 return -ENOMEM; 656 } 657