1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 17 * 02110-1301, USA 18 */ 19 20 #include "main.h" 21 #include "distributed-arp-table.h" 22 #include "originator.h" 23 #include "hash.h" 24 #include "translation-table.h" 25 #include "routing.h" 26 #include "gateway_client.h" 27 #include "hard-interface.h" 28 #include "soft-interface.h" 29 #include "bridge_loop_avoidance.h" 30 #include "network-coding.h" 31 #include "fragmentation.h" 32 33 /* hash class keys */ 34 static struct lock_class_key batadv_orig_hash_lock_class_key; 35 36 static void batadv_purge_orig(struct work_struct *work); 37 38 /* returns 1 if they are the same originator */ 39 int batadv_compare_orig(const struct hlist_node *node, const void *data2) 40 { 41 const void *data1 = container_of(node, struct batadv_orig_node, 42 hash_entry); 43 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 45 } 46 47 /** 48 * batadv_orig_node_vlan_get - get an orig_node_vlan object 49 * @orig_node: the originator serving the VLAN 50 * @vid: the VLAN identifier 51 * 52 * Returns the vlan object identified by vid and belonging to orig_node or NULL 53 * if it does not exist. 54 */ 55 struct batadv_orig_node_vlan * 56 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node, 57 unsigned short vid) 58 { 59 struct batadv_orig_node_vlan *vlan = NULL, *tmp; 60 61 rcu_read_lock(); 62 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) { 63 if (tmp->vid != vid) 64 continue; 65 66 if (!atomic_inc_not_zero(&tmp->refcount)) 67 continue; 68 69 vlan = tmp; 70 71 break; 72 } 73 rcu_read_unlock(); 74 75 return vlan; 76 } 77 78 /** 79 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan 80 * object 81 * @orig_node: the originator serving the VLAN 82 * @vid: the VLAN identifier 83 * 84 * Returns NULL in case of failure or the vlan object identified by vid and 85 * belonging to orig_node otherwise. The object is created and added to the list 86 * if it does not exist. 87 * 88 * The object is returned with refcounter increased by 1. 89 */ 90 struct batadv_orig_node_vlan * 91 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, 92 unsigned short vid) 93 { 94 struct batadv_orig_node_vlan *vlan; 95 96 spin_lock_bh(&orig_node->vlan_list_lock); 97 98 /* first look if an object for this vid already exists */ 99 vlan = batadv_orig_node_vlan_get(orig_node, vid); 100 if (vlan) 101 goto out; 102 103 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); 104 if (!vlan) 105 goto out; 106 107 atomic_set(&vlan->refcount, 2); 108 vlan->vid = vid; 109 110 list_add_rcu(&vlan->list, &orig_node->vlan_list); 111 112 out: 113 spin_unlock_bh(&orig_node->vlan_list_lock); 114 115 return vlan; 116 } 117 118 /** 119 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free 120 * the originator-vlan object 121 * @orig_vlan: the originator-vlan object to release 122 */ 123 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan) 124 { 125 if (atomic_dec_and_test(&orig_vlan->refcount)) 126 kfree_rcu(orig_vlan, rcu); 127 } 128 129 int batadv_originator_init(struct batadv_priv *bat_priv) 130 { 131 if (bat_priv->orig_hash) 132 return 0; 133 134 bat_priv->orig_hash = batadv_hash_new(1024); 135 136 if (!bat_priv->orig_hash) 137 goto err; 138 139 batadv_hash_set_lock_class(bat_priv->orig_hash, 140 &batadv_orig_hash_lock_class_key); 141 142 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig); 143 queue_delayed_work(batadv_event_workqueue, 144 &bat_priv->orig_work, 145 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); 146 147 return 0; 148 149 err: 150 return -ENOMEM; 151 } 152 153 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) 154 { 155 if (atomic_dec_and_test(&neigh_node->refcount)) 156 kfree_rcu(neigh_node, rcu); 157 } 158 159 /* increases the refcounter of a found router */ 160 struct batadv_neigh_node * 161 batadv_orig_node_get_router(struct batadv_orig_node *orig_node) 162 { 163 struct batadv_neigh_node *router; 164 165 rcu_read_lock(); 166 router = rcu_dereference(orig_node->router); 167 168 if (router && !atomic_inc_not_zero(&router->refcount)) 169 router = NULL; 170 171 rcu_read_unlock(); 172 return router; 173 } 174 175 /** 176 * batadv_neigh_node_new - create and init a new neigh_node object 177 * @hard_iface: the interface where the neighbour is connected to 178 * @neigh_addr: the mac address of the neighbour interface 179 * @orig_node: originator object representing the neighbour 180 * 181 * Allocates a new neigh_node object and initialises all the generic fields. 182 * Returns the new object or NULL on failure. 183 */ 184 struct batadv_neigh_node * 185 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 186 const uint8_t *neigh_addr, 187 struct batadv_orig_node *orig_node) 188 { 189 struct batadv_neigh_node *neigh_node; 190 191 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 192 if (!neigh_node) 193 goto out; 194 195 INIT_HLIST_NODE(&neigh_node->list); 196 197 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); 198 neigh_node->if_incoming = hard_iface; 199 neigh_node->orig_node = orig_node; 200 201 INIT_LIST_HEAD(&neigh_node->bonding_list); 202 203 /* extra reference for return */ 204 atomic_set(&neigh_node->refcount, 2); 205 206 out: 207 return neigh_node; 208 } 209 210 static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 211 { 212 struct hlist_node *node_tmp; 213 struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 214 struct batadv_orig_node *orig_node; 215 216 orig_node = container_of(rcu, struct batadv_orig_node, rcu); 217 218 spin_lock_bh(&orig_node->neigh_list_lock); 219 220 /* for all bonding members ... */ 221 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 222 &orig_node->bond_list, bonding_list) { 223 list_del_rcu(&neigh_node->bonding_list); 224 batadv_neigh_node_free_ref(neigh_node); 225 } 226 227 /* for all neighbors towards this originator ... */ 228 hlist_for_each_entry_safe(neigh_node, node_tmp, 229 &orig_node->neigh_list, list) { 230 hlist_del_rcu(&neigh_node->list); 231 batadv_neigh_node_free_ref(neigh_node); 232 } 233 234 spin_unlock_bh(&orig_node->neigh_list_lock); 235 236 /* Free nc_nodes */ 237 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); 238 239 batadv_frag_purge_orig(orig_node, NULL); 240 241 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, 242 "originator timed out"); 243 244 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 245 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 246 247 kfree(orig_node->tt_buff); 248 kfree(orig_node); 249 } 250 251 /** 252 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly 253 * schedule an rcu callback for freeing it 254 * @orig_node: the orig node to free 255 */ 256 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) 257 { 258 if (atomic_dec_and_test(&orig_node->refcount)) 259 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); 260 } 261 262 /** 263 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and 264 * possibly free it (without rcu callback) 265 * @orig_node: the orig node to free 266 */ 267 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node) 268 { 269 if (atomic_dec_and_test(&orig_node->refcount)) 270 batadv_orig_node_free_rcu(&orig_node->rcu); 271 } 272 273 void batadv_originator_free(struct batadv_priv *bat_priv) 274 { 275 struct batadv_hashtable *hash = bat_priv->orig_hash; 276 struct hlist_node *node_tmp; 277 struct hlist_head *head; 278 spinlock_t *list_lock; /* spinlock to protect write access */ 279 struct batadv_orig_node *orig_node; 280 uint32_t i; 281 282 if (!hash) 283 return; 284 285 cancel_delayed_work_sync(&bat_priv->orig_work); 286 287 bat_priv->orig_hash = NULL; 288 289 for (i = 0; i < hash->size; i++) { 290 head = &hash->table[i]; 291 list_lock = &hash->list_locks[i]; 292 293 spin_lock_bh(list_lock); 294 hlist_for_each_entry_safe(orig_node, node_tmp, 295 head, hash_entry) { 296 hlist_del_rcu(&orig_node->hash_entry); 297 batadv_orig_node_free_ref(orig_node); 298 } 299 spin_unlock_bh(list_lock); 300 } 301 302 batadv_hash_destroy(hash); 303 } 304 305 /** 306 * batadv_orig_node_new - creates a new orig_node 307 * @bat_priv: the bat priv with all the soft interface information 308 * @addr: the mac address of the originator 309 * 310 * Creates a new originator object and initialise all the generic fields. 311 * The new object is not added to the originator list. 312 * Returns the newly created object or NULL on failure. 313 */ 314 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 315 const uint8_t *addr) 316 { 317 struct batadv_orig_node *orig_node; 318 struct batadv_orig_node_vlan *vlan; 319 unsigned long reset_time; 320 int i; 321 322 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 323 "Creating new originator: %pM\n", addr); 324 325 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 326 if (!orig_node) 327 return NULL; 328 329 INIT_HLIST_HEAD(&orig_node->neigh_list); 330 INIT_LIST_HEAD(&orig_node->bond_list); 331 INIT_LIST_HEAD(&orig_node->vlan_list); 332 spin_lock_init(&orig_node->bcast_seqno_lock); 333 spin_lock_init(&orig_node->neigh_list_lock); 334 spin_lock_init(&orig_node->tt_buff_lock); 335 spin_lock_init(&orig_node->tt_lock); 336 spin_lock_init(&orig_node->vlan_list_lock); 337 338 batadv_nc_init_orig(orig_node); 339 340 /* extra reference for return */ 341 atomic_set(&orig_node->refcount, 2); 342 343 orig_node->tt_initialised = false; 344 orig_node->bat_priv = bat_priv; 345 memcpy(orig_node->orig, addr, ETH_ALEN); 346 batadv_dat_init_orig_node_addr(orig_node); 347 orig_node->router = NULL; 348 atomic_set(&orig_node->last_ttvn, 0); 349 orig_node->tt_buff = NULL; 350 orig_node->tt_buff_len = 0; 351 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 352 orig_node->bcast_seqno_reset = reset_time; 353 orig_node->batman_seqno_reset = reset_time; 354 355 atomic_set(&orig_node->bond_candidates, 0); 356 357 /* create a vlan object for the "untagged" LAN */ 358 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS); 359 if (!vlan) 360 goto free_orig_node; 361 /* batadv_orig_node_vlan_new() increases the refcounter. 362 * Immediately release vlan since it is not needed anymore in this 363 * context 364 */ 365 batadv_orig_node_vlan_free_ref(vlan); 366 367 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { 368 INIT_HLIST_HEAD(&orig_node->fragments[i].head); 369 spin_lock_init(&orig_node->fragments[i].lock); 370 orig_node->fragments[i].size = 0; 371 } 372 373 return orig_node; 374 free_orig_node: 375 kfree(orig_node); 376 return NULL; 377 } 378 379 static bool 380 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, 381 struct batadv_orig_node *orig_node, 382 struct batadv_neigh_node **best_neigh) 383 { 384 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 385 struct hlist_node *node_tmp; 386 struct batadv_neigh_node *neigh_node; 387 bool neigh_purged = false; 388 unsigned long last_seen; 389 struct batadv_hard_iface *if_incoming; 390 391 *best_neigh = NULL; 392 393 spin_lock_bh(&orig_node->neigh_list_lock); 394 395 /* for all neighbors towards this originator ... */ 396 hlist_for_each_entry_safe(neigh_node, node_tmp, 397 &orig_node->neigh_list, list) { 398 last_seen = neigh_node->last_seen; 399 if_incoming = neigh_node->if_incoming; 400 401 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) || 402 (if_incoming->if_status == BATADV_IF_INACTIVE) || 403 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 404 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) { 405 if ((if_incoming->if_status == BATADV_IF_INACTIVE) || 406 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 407 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) 408 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 409 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 410 orig_node->orig, neigh_node->addr, 411 if_incoming->net_dev->name); 412 else 413 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 414 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 415 orig_node->orig, neigh_node->addr, 416 jiffies_to_msecs(last_seen)); 417 418 neigh_purged = true; 419 420 hlist_del_rcu(&neigh_node->list); 421 batadv_bonding_candidate_del(orig_node, neigh_node); 422 batadv_neigh_node_free_ref(neigh_node); 423 } else { 424 /* store the best_neighbour if this is the first 425 * iteration or if a better neighbor has been found 426 */ 427 if (!*best_neigh || 428 bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0) 429 *best_neigh = neigh_node; 430 } 431 } 432 433 spin_unlock_bh(&orig_node->neigh_list_lock); 434 return neigh_purged; 435 } 436 437 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, 438 struct batadv_orig_node *orig_node) 439 { 440 struct batadv_neigh_node *best_neigh_node; 441 442 if (batadv_has_timed_out(orig_node->last_seen, 443 2 * BATADV_PURGE_TIMEOUT)) { 444 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 445 "Originator timeout: originator %pM, last_seen %u\n", 446 orig_node->orig, 447 jiffies_to_msecs(orig_node->last_seen)); 448 return true; 449 } else { 450 if (batadv_purge_orig_neighbors(bat_priv, orig_node, 451 &best_neigh_node)) 452 batadv_update_route(bat_priv, orig_node, 453 best_neigh_node); 454 } 455 456 return false; 457 } 458 459 static void _batadv_purge_orig(struct batadv_priv *bat_priv) 460 { 461 struct batadv_hashtable *hash = bat_priv->orig_hash; 462 struct hlist_node *node_tmp; 463 struct hlist_head *head; 464 spinlock_t *list_lock; /* spinlock to protect write access */ 465 struct batadv_orig_node *orig_node; 466 uint32_t i; 467 468 if (!hash) 469 return; 470 471 /* for all origins... */ 472 for (i = 0; i < hash->size; i++) { 473 head = &hash->table[i]; 474 list_lock = &hash->list_locks[i]; 475 476 spin_lock_bh(list_lock); 477 hlist_for_each_entry_safe(orig_node, node_tmp, 478 head, hash_entry) { 479 if (batadv_purge_orig_node(bat_priv, orig_node)) { 480 batadv_gw_node_delete(bat_priv, orig_node); 481 hlist_del_rcu(&orig_node->hash_entry); 482 batadv_orig_node_free_ref(orig_node); 483 continue; 484 } 485 486 batadv_frag_purge_orig(orig_node, 487 batadv_frag_check_entry); 488 } 489 spin_unlock_bh(list_lock); 490 } 491 492 batadv_gw_node_purge(bat_priv); 493 batadv_gw_election(bat_priv); 494 } 495 496 static void batadv_purge_orig(struct work_struct *work) 497 { 498 struct delayed_work *delayed_work; 499 struct batadv_priv *bat_priv; 500 501 delayed_work = container_of(work, struct delayed_work, work); 502 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); 503 _batadv_purge_orig(bat_priv); 504 queue_delayed_work(batadv_event_workqueue, 505 &bat_priv->orig_work, 506 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); 507 } 508 509 void batadv_purge_orig_ref(struct batadv_priv *bat_priv) 510 { 511 _batadv_purge_orig(bat_priv); 512 } 513 514 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) 515 { 516 struct net_device *net_dev = (struct net_device *)seq->private; 517 struct batadv_priv *bat_priv = netdev_priv(net_dev); 518 struct batadv_hard_iface *primary_if; 519 520 primary_if = batadv_seq_print_text_primary_if_get(seq); 521 if (!primary_if) 522 return 0; 523 524 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", 525 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 526 primary_if->net_dev->dev_addr, net_dev->name, 527 bat_priv->bat_algo_ops->name); 528 529 batadv_hardif_free_ref(primary_if); 530 531 if (!bat_priv->bat_algo_ops->bat_orig_print) { 532 seq_puts(seq, 533 "No printing function for this routing protocol\n"); 534 return 0; 535 } 536 537 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq); 538 539 return 0; 540 } 541 542 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 543 int max_if_num) 544 { 545 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 546 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 547 struct batadv_hashtable *hash = bat_priv->orig_hash; 548 struct hlist_head *head; 549 struct batadv_orig_node *orig_node; 550 uint32_t i; 551 int ret; 552 553 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 554 * if_num 555 */ 556 for (i = 0; i < hash->size; i++) { 557 head = &hash->table[i]; 558 559 rcu_read_lock(); 560 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 561 ret = 0; 562 if (bao->bat_orig_add_if) 563 ret = bao->bat_orig_add_if(orig_node, 564 max_if_num); 565 if (ret == -ENOMEM) 566 goto err; 567 } 568 rcu_read_unlock(); 569 } 570 571 return 0; 572 573 err: 574 rcu_read_unlock(); 575 return -ENOMEM; 576 } 577 578 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 579 int max_if_num) 580 { 581 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 582 struct batadv_hashtable *hash = bat_priv->orig_hash; 583 struct hlist_head *head; 584 struct batadv_hard_iface *hard_iface_tmp; 585 struct batadv_orig_node *orig_node; 586 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 587 uint32_t i; 588 int ret; 589 590 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 591 * if_num 592 */ 593 for (i = 0; i < hash->size; i++) { 594 head = &hash->table[i]; 595 596 rcu_read_lock(); 597 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 598 ret = 0; 599 if (bao->bat_orig_del_if) 600 ret = bao->bat_orig_del_if(orig_node, 601 max_if_num, 602 hard_iface->if_num); 603 if (ret == -ENOMEM) 604 goto err; 605 } 606 rcu_read_unlock(); 607 } 608 609 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 610 rcu_read_lock(); 611 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { 612 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE) 613 continue; 614 615 if (hard_iface == hard_iface_tmp) 616 continue; 617 618 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 619 continue; 620 621 if (hard_iface_tmp->if_num > hard_iface->if_num) 622 hard_iface_tmp->if_num--; 623 } 624 rcu_read_unlock(); 625 626 hard_iface->if_num = -1; 627 return 0; 628 629 err: 630 rcu_read_unlock(); 631 return -ENOMEM; 632 } 633