1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 17 * 02110-1301, USA 18 */ 19 20 #include "main.h" 21 #include "originator.h" 22 #include "hash.h" 23 #include "translation-table.h" 24 #include "routing.h" 25 #include "gateway_client.h" 26 #include "hard-interface.h" 27 #include "unicast.h" 28 #include "soft-interface.h" 29 #include "bridge_loop_avoidance.h" 30 31 static void batadv_purge_orig(struct work_struct *work); 32 33 static void batadv_start_purge_timer(struct batadv_priv *bat_priv) 34 { 35 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig); 36 queue_delayed_work(batadv_event_workqueue, 37 &bat_priv->orig_work, msecs_to_jiffies(1000)); 38 } 39 40 /* returns 1 if they are the same originator */ 41 static int batadv_compare_orig(const struct hlist_node *node, const void *data2) 42 { 43 const void *data1 = container_of(node, struct batadv_orig_node, 44 hash_entry); 45 46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 47 } 48 49 int batadv_originator_init(struct batadv_priv *bat_priv) 50 { 51 if (bat_priv->orig_hash) 52 return 0; 53 54 bat_priv->orig_hash = batadv_hash_new(1024); 55 56 if (!bat_priv->orig_hash) 57 goto err; 58 59 batadv_start_purge_timer(bat_priv); 60 return 0; 61 62 err: 63 return -ENOMEM; 64 } 65 66 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) 67 { 68 if (atomic_dec_and_test(&neigh_node->refcount)) 69 kfree_rcu(neigh_node, rcu); 70 } 71 72 /* increases the refcounter of a found router */ 73 struct batadv_neigh_node * 74 batadv_orig_node_get_router(struct batadv_orig_node *orig_node) 75 { 76 struct batadv_neigh_node *router; 77 78 rcu_read_lock(); 79 router = rcu_dereference(orig_node->router); 80 81 if (router && !atomic_inc_not_zero(&router->refcount)) 82 router = NULL; 83 84 rcu_read_unlock(); 85 return router; 86 } 87 88 struct batadv_neigh_node * 89 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 90 const uint8_t *neigh_addr, uint32_t seqno) 91 { 92 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 93 struct batadv_neigh_node *neigh_node; 94 95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 96 if (!neigh_node) 97 goto out; 98 99 INIT_HLIST_NODE(&neigh_node->list); 100 101 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); 102 spin_lock_init(&neigh_node->lq_update_lock); 103 104 /* extra reference for return */ 105 atomic_set(&neigh_node->refcount, 2); 106 107 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 108 "Creating new neighbor %pM, initial seqno %d\n", 109 neigh_addr, seqno); 110 111 out: 112 return neigh_node; 113 } 114 115 static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 116 { 117 struct hlist_node *node, *node_tmp; 118 struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 119 struct batadv_orig_node *orig_node; 120 121 orig_node = container_of(rcu, struct batadv_orig_node, rcu); 122 123 spin_lock_bh(&orig_node->neigh_list_lock); 124 125 /* for all bonding members ... */ 126 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 127 &orig_node->bond_list, bonding_list) { 128 list_del_rcu(&neigh_node->bonding_list); 129 batadv_neigh_node_free_ref(neigh_node); 130 } 131 132 /* for all neighbors towards this originator ... */ 133 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 134 &orig_node->neigh_list, list) { 135 hlist_del_rcu(&neigh_node->list); 136 batadv_neigh_node_free_ref(neigh_node); 137 } 138 139 spin_unlock_bh(&orig_node->neigh_list_lock); 140 141 batadv_frag_list_free(&orig_node->frag_list); 142 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, 143 "originator timed out"); 144 145 kfree(orig_node->tt_buff); 146 kfree(orig_node->bcast_own); 147 kfree(orig_node->bcast_own_sum); 148 kfree(orig_node); 149 } 150 151 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) 152 { 153 if (atomic_dec_and_test(&orig_node->refcount)) 154 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); 155 } 156 157 void batadv_originator_free(struct batadv_priv *bat_priv) 158 { 159 struct batadv_hashtable *hash = bat_priv->orig_hash; 160 struct hlist_node *node, *node_tmp; 161 struct hlist_head *head; 162 spinlock_t *list_lock; /* spinlock to protect write access */ 163 struct batadv_orig_node *orig_node; 164 uint32_t i; 165 166 if (!hash) 167 return; 168 169 cancel_delayed_work_sync(&bat_priv->orig_work); 170 171 bat_priv->orig_hash = NULL; 172 173 for (i = 0; i < hash->size; i++) { 174 head = &hash->table[i]; 175 list_lock = &hash->list_locks[i]; 176 177 spin_lock_bh(list_lock); 178 hlist_for_each_entry_safe(orig_node, node, node_tmp, 179 head, hash_entry) { 180 181 hlist_del_rcu(node); 182 batadv_orig_node_free_ref(orig_node); 183 } 184 spin_unlock_bh(list_lock); 185 } 186 187 batadv_hash_destroy(hash); 188 } 189 190 /* this function finds or creates an originator entry for the given 191 * address if it does not exits 192 */ 193 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, 194 const uint8_t *addr) 195 { 196 struct batadv_orig_node *orig_node; 197 int size; 198 int hash_added; 199 unsigned long reset_time; 200 201 orig_node = batadv_orig_hash_find(bat_priv, addr); 202 if (orig_node) 203 return orig_node; 204 205 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 206 "Creating new originator: %pM\n", addr); 207 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 209 if (!orig_node) 210 return NULL; 211 212 INIT_HLIST_HEAD(&orig_node->neigh_list); 213 INIT_LIST_HEAD(&orig_node->bond_list); 214 spin_lock_init(&orig_node->ogm_cnt_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock); 216 spin_lock_init(&orig_node->neigh_list_lock); 217 spin_lock_init(&orig_node->tt_buff_lock); 218 219 /* extra reference for return */ 220 atomic_set(&orig_node->refcount, 2); 221 222 orig_node->tt_initialised = false; 223 orig_node->tt_poss_change = false; 224 orig_node->bat_priv = bat_priv; 225 memcpy(orig_node->orig, addr, ETH_ALEN); 226 orig_node->router = NULL; 227 orig_node->tt_crc = 0; 228 atomic_set(&orig_node->last_ttvn, 0); 229 orig_node->tt_buff = NULL; 230 orig_node->tt_buff_len = 0; 231 atomic_set(&orig_node->tt_size, 0); 232 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 233 orig_node->bcast_seqno_reset = reset_time; 234 orig_node->batman_seqno_reset = reset_time; 235 236 atomic_set(&orig_node->bond_candidates, 0); 237 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS; 239 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 241 if (!orig_node->bcast_own) 242 goto free_orig_node; 243 244 size = bat_priv->num_ifaces * sizeof(uint8_t); 245 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); 246 247 INIT_LIST_HEAD(&orig_node->frag_list); 248 orig_node->last_frag_packet = 0; 249 250 if (!orig_node->bcast_own_sum) 251 goto free_bcast_own; 252 253 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 254 batadv_choose_orig, orig_node, 255 &orig_node->hash_entry); 256 if (hash_added != 0) 257 goto free_bcast_own_sum; 258 259 return orig_node; 260 free_bcast_own_sum: 261 kfree(orig_node->bcast_own_sum); 262 free_bcast_own: 263 kfree(orig_node->bcast_own); 264 free_orig_node: 265 kfree(orig_node); 266 return NULL; 267 } 268 269 static bool 270 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, 271 struct batadv_orig_node *orig_node, 272 struct batadv_neigh_node **best_neigh_node) 273 { 274 struct hlist_node *node, *node_tmp; 275 struct batadv_neigh_node *neigh_node; 276 bool neigh_purged = false; 277 unsigned long last_seen; 278 struct batadv_hard_iface *if_incoming; 279 280 *best_neigh_node = NULL; 281 282 spin_lock_bh(&orig_node->neigh_list_lock); 283 284 /* for all neighbors towards this originator ... */ 285 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 286 &orig_node->neigh_list, list) { 287 288 last_seen = neigh_node->last_seen; 289 if_incoming = neigh_node->if_incoming; 290 291 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) || 292 (if_incoming->if_status == BATADV_IF_INACTIVE) || 293 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 294 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) { 295 296 if ((if_incoming->if_status == BATADV_IF_INACTIVE) || 297 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 298 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) 299 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 300 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 301 orig_node->orig, neigh_node->addr, 302 if_incoming->net_dev->name); 303 else 304 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 305 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 306 orig_node->orig, neigh_node->addr, 307 jiffies_to_msecs(last_seen)); 308 309 neigh_purged = true; 310 311 hlist_del_rcu(&neigh_node->list); 312 batadv_bonding_candidate_del(orig_node, neigh_node); 313 batadv_neigh_node_free_ref(neigh_node); 314 } else { 315 if ((!*best_neigh_node) || 316 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 317 *best_neigh_node = neigh_node; 318 } 319 } 320 321 spin_unlock_bh(&orig_node->neigh_list_lock); 322 return neigh_purged; 323 } 324 325 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, 326 struct batadv_orig_node *orig_node) 327 { 328 struct batadv_neigh_node *best_neigh_node; 329 330 if (batadv_has_timed_out(orig_node->last_seen, 331 2 * BATADV_PURGE_TIMEOUT)) { 332 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 333 "Originator timeout: originator %pM, last_seen %u\n", 334 orig_node->orig, 335 jiffies_to_msecs(orig_node->last_seen)); 336 return true; 337 } else { 338 if (batadv_purge_orig_neighbors(bat_priv, orig_node, 339 &best_neigh_node)) 340 batadv_update_route(bat_priv, orig_node, 341 best_neigh_node); 342 } 343 344 return false; 345 } 346 347 static void _batadv_purge_orig(struct batadv_priv *bat_priv) 348 { 349 struct batadv_hashtable *hash = bat_priv->orig_hash; 350 struct hlist_node *node, *node_tmp; 351 struct hlist_head *head; 352 spinlock_t *list_lock; /* spinlock to protect write access */ 353 struct batadv_orig_node *orig_node; 354 uint32_t i; 355 356 if (!hash) 357 return; 358 359 /* for all origins... */ 360 for (i = 0; i < hash->size; i++) { 361 head = &hash->table[i]; 362 list_lock = &hash->list_locks[i]; 363 364 spin_lock_bh(list_lock); 365 hlist_for_each_entry_safe(orig_node, node, node_tmp, 366 head, hash_entry) { 367 if (batadv_purge_orig_node(bat_priv, orig_node)) { 368 if (orig_node->gw_flags) 369 batadv_gw_node_delete(bat_priv, 370 orig_node); 371 hlist_del_rcu(node); 372 batadv_orig_node_free_ref(orig_node); 373 continue; 374 } 375 376 if (batadv_has_timed_out(orig_node->last_frag_packet, 377 BATADV_FRAG_TIMEOUT)) 378 batadv_frag_list_free(&orig_node->frag_list); 379 } 380 spin_unlock_bh(list_lock); 381 } 382 383 batadv_gw_node_purge(bat_priv); 384 batadv_gw_election(bat_priv); 385 } 386 387 static void batadv_purge_orig(struct work_struct *work) 388 { 389 struct delayed_work *delayed_work; 390 struct batadv_priv *bat_priv; 391 392 delayed_work = container_of(work, struct delayed_work, work); 393 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); 394 _batadv_purge_orig(bat_priv); 395 batadv_start_purge_timer(bat_priv); 396 } 397 398 void batadv_purge_orig_ref(struct batadv_priv *bat_priv) 399 { 400 _batadv_purge_orig(bat_priv); 401 } 402 403 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) 404 { 405 struct net_device *net_dev = (struct net_device *)seq->private; 406 struct batadv_priv *bat_priv = netdev_priv(net_dev); 407 struct batadv_hashtable *hash = bat_priv->orig_hash; 408 struct hlist_node *node, *node_tmp; 409 struct hlist_head *head; 410 struct batadv_hard_iface *primary_if; 411 struct batadv_orig_node *orig_node; 412 struct batadv_neigh_node *neigh_node, *neigh_node_tmp; 413 int batman_count = 0; 414 int last_seen_secs; 415 int last_seen_msecs; 416 unsigned long last_seen_jiffies; 417 uint32_t i; 418 int ret = 0; 419 420 primary_if = batadv_primary_if_get_selected(bat_priv); 421 422 if (!primary_if) { 423 ret = seq_printf(seq, 424 "BATMAN mesh %s disabled - please specify interfaces to enable it\n", 425 net_dev->name); 426 goto out; 427 } 428 429 if (primary_if->if_status != BATADV_IF_ACTIVE) { 430 ret = seq_printf(seq, 431 "BATMAN mesh %s disabled - primary interface not active\n", 432 net_dev->name); 433 goto out; 434 } 435 436 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 437 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 438 primary_if->net_dev->dev_addr, net_dev->name); 439 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 440 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE, 441 "Nexthop", "outgoingIF", "Potential nexthops"); 442 443 for (i = 0; i < hash->size; i++) { 444 head = &hash->table[i]; 445 446 rcu_read_lock(); 447 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 448 neigh_node = batadv_orig_node_get_router(orig_node); 449 if (!neigh_node) 450 continue; 451 452 if (neigh_node->tq_avg == 0) 453 goto next; 454 455 last_seen_jiffies = jiffies - orig_node->last_seen; 456 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); 457 last_seen_secs = last_seen_msecs / 1000; 458 last_seen_msecs = last_seen_msecs % 1000; 459 460 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 461 orig_node->orig, last_seen_secs, 462 last_seen_msecs, neigh_node->tq_avg, 463 neigh_node->addr, 464 neigh_node->if_incoming->net_dev->name); 465 466 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 467 &orig_node->neigh_list, list) { 468 seq_printf(seq, " %pM (%3i)", 469 neigh_node_tmp->addr, 470 neigh_node_tmp->tq_avg); 471 } 472 473 seq_printf(seq, "\n"); 474 batman_count++; 475 476 next: 477 batadv_neigh_node_free_ref(neigh_node); 478 } 479 rcu_read_unlock(); 480 } 481 482 if (batman_count == 0) 483 seq_printf(seq, "No batman nodes in range ...\n"); 484 485 out: 486 if (primary_if) 487 batadv_hardif_free_ref(primary_if); 488 return ret; 489 } 490 491 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node, 492 int max_if_num) 493 { 494 void *data_ptr; 495 size_t data_size, old_size; 496 497 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS; 498 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS; 499 data_ptr = kmalloc(data_size, GFP_ATOMIC); 500 if (!data_ptr) 501 return -ENOMEM; 502 503 memcpy(data_ptr, orig_node->bcast_own, old_size); 504 kfree(orig_node->bcast_own); 505 orig_node->bcast_own = data_ptr; 506 507 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 508 if (!data_ptr) 509 return -ENOMEM; 510 511 memcpy(data_ptr, orig_node->bcast_own_sum, 512 (max_if_num - 1) * sizeof(uint8_t)); 513 kfree(orig_node->bcast_own_sum); 514 orig_node->bcast_own_sum = data_ptr; 515 516 return 0; 517 } 518 519 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 520 int max_if_num) 521 { 522 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 523 struct batadv_hashtable *hash = bat_priv->orig_hash; 524 struct hlist_node *node; 525 struct hlist_head *head; 526 struct batadv_orig_node *orig_node; 527 uint32_t i; 528 int ret; 529 530 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 531 * if_num 532 */ 533 for (i = 0; i < hash->size; i++) { 534 head = &hash->table[i]; 535 536 rcu_read_lock(); 537 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 538 spin_lock_bh(&orig_node->ogm_cnt_lock); 539 ret = batadv_orig_node_add_if(orig_node, max_if_num); 540 spin_unlock_bh(&orig_node->ogm_cnt_lock); 541 542 if (ret == -ENOMEM) 543 goto err; 544 } 545 rcu_read_unlock(); 546 } 547 548 return 0; 549 550 err: 551 rcu_read_unlock(); 552 return -ENOMEM; 553 } 554 555 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node, 556 int max_if_num, int del_if_num) 557 { 558 void *data_ptr = NULL; 559 int chunk_size; 560 561 /* last interface was removed */ 562 if (max_if_num == 0) 563 goto free_bcast_own; 564 565 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS; 566 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 567 if (!data_ptr) 568 return -ENOMEM; 569 570 /* copy first part */ 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 572 573 /* copy second part */ 574 memcpy((char *)data_ptr + del_if_num * chunk_size, 575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 576 (max_if_num - del_if_num) * chunk_size); 577 578 free_bcast_own: 579 kfree(orig_node->bcast_own); 580 orig_node->bcast_own = data_ptr; 581 582 if (max_if_num == 0) 583 goto free_own_sum; 584 585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 586 if (!data_ptr) 587 return -ENOMEM; 588 589 memcpy(data_ptr, orig_node->bcast_own_sum, 590 del_if_num * sizeof(uint8_t)); 591 592 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), 593 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 594 (max_if_num - del_if_num) * sizeof(uint8_t)); 595 596 free_own_sum: 597 kfree(orig_node->bcast_own_sum); 598 orig_node->bcast_own_sum = data_ptr; 599 600 return 0; 601 } 602 603 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 604 int max_if_num) 605 { 606 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 607 struct batadv_hashtable *hash = bat_priv->orig_hash; 608 struct hlist_node *node; 609 struct hlist_head *head; 610 struct batadv_hard_iface *hard_iface_tmp; 611 struct batadv_orig_node *orig_node; 612 uint32_t i; 613 int ret; 614 615 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 616 * if_num 617 */ 618 for (i = 0; i < hash->size; i++) { 619 head = &hash->table[i]; 620 621 rcu_read_lock(); 622 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 623 spin_lock_bh(&orig_node->ogm_cnt_lock); 624 ret = batadv_orig_node_del_if(orig_node, max_if_num, 625 hard_iface->if_num); 626 spin_unlock_bh(&orig_node->ogm_cnt_lock); 627 628 if (ret == -ENOMEM) 629 goto err; 630 } 631 rcu_read_unlock(); 632 } 633 634 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 635 rcu_read_lock(); 636 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { 637 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE) 638 continue; 639 640 if (hard_iface == hard_iface_tmp) 641 continue; 642 643 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 644 continue; 645 646 if (hard_iface_tmp->if_num > hard_iface->if_num) 647 hard_iface_tmp->if_num--; 648 } 649 rcu_read_unlock(); 650 651 hard_iface->if_num = -1; 652 return 0; 653 654 err: 655 rcu_read_unlock(); 656 return -ENOMEM; 657 } 658