1 /* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors: 2 * 3 * Marek Lindner, Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "originator.h" 19 #include "main.h" 20 21 #include <linux/atomic.h> 22 #include <linux/errno.h> 23 #include <linux/etherdevice.h> 24 #include <linux/fs.h> 25 #include <linux/jiffies.h> 26 #include <linux/kernel.h> 27 #include <linux/kref.h> 28 #include <linux/list.h> 29 #include <linux/lockdep.h> 30 #include <linux/netdevice.h> 31 #include <linux/rculist.h> 32 #include <linux/seq_file.h> 33 #include <linux/slab.h> 34 #include <linux/spinlock.h> 35 #include <linux/workqueue.h> 36 37 #include "distributed-arp-table.h" 38 #include "fragmentation.h" 39 #include "gateway_client.h" 40 #include "hard-interface.h" 41 #include "hash.h" 42 #include "multicast.h" 43 #include "network-coding.h" 44 #include "routing.h" 45 #include "translation-table.h" 46 47 /* hash class keys */ 48 static struct lock_class_key batadv_orig_hash_lock_class_key; 49 50 static void batadv_purge_orig(struct work_struct *work); 51 52 /** 53 * batadv_compare_orig - comparing function used in the originator hash table 54 * @node: node in the local table 55 * @data2: second object to compare the node to 56 * 57 * Return: true if they are the same originator 58 */ 59 bool batadv_compare_orig(const struct hlist_node *node, const void *data2) 60 { 61 const void *data1 = container_of(node, struct batadv_orig_node, 62 hash_entry); 63 64 return batadv_compare_eth(data1, data2); 65 } 66 67 /** 68 * batadv_orig_node_vlan_get - get an orig_node_vlan object 69 * @orig_node: the originator serving the VLAN 70 * @vid: the VLAN identifier 71 * 72 * Return: the vlan object identified by vid and belonging to orig_node or NULL 73 * if it does not exist. 74 */ 75 struct batadv_orig_node_vlan * 76 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node, 77 unsigned short vid) 78 { 79 struct batadv_orig_node_vlan *vlan = NULL, *tmp; 80 81 rcu_read_lock(); 82 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) { 83 if (tmp->vid != vid) 84 continue; 85 86 if (!kref_get_unless_zero(&tmp->refcount)) 87 continue; 88 89 vlan = tmp; 90 91 break; 92 } 93 rcu_read_unlock(); 94 95 return vlan; 96 } 97 98 /** 99 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan 100 * object 101 * @orig_node: the originator serving the VLAN 102 * @vid: the VLAN identifier 103 * 104 * Return: NULL in case of failure or the vlan object identified by vid and 105 * belonging to orig_node otherwise. The object is created and added to the list 106 * if it does not exist. 107 * 108 * The object is returned with refcounter increased by 1. 109 */ 110 struct batadv_orig_node_vlan * 111 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, 112 unsigned short vid) 113 { 114 struct batadv_orig_node_vlan *vlan; 115 116 spin_lock_bh(&orig_node->vlan_list_lock); 117 118 /* first look if an object for this vid already exists */ 119 vlan = batadv_orig_node_vlan_get(orig_node, vid); 120 if (vlan) 121 goto out; 122 123 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); 124 if (!vlan) 125 goto out; 126 127 kref_init(&vlan->refcount); 128 kref_get(&vlan->refcount); 129 vlan->vid = vid; 130 131 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list); 132 133 out: 134 spin_unlock_bh(&orig_node->vlan_list_lock); 135 136 return vlan; 137 } 138 139 /** 140 * batadv_orig_node_vlan_release - release originator-vlan object from lists 141 * and queue for free after rcu grace period 142 * @ref: kref pointer of the originator-vlan object 143 */ 144 static void batadv_orig_node_vlan_release(struct kref *ref) 145 { 146 struct batadv_orig_node_vlan *orig_vlan; 147 148 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount); 149 150 kfree_rcu(orig_vlan, rcu); 151 } 152 153 /** 154 * batadv_orig_node_vlan_put - decrement the refcounter and possibly release 155 * the originator-vlan object 156 * @orig_vlan: the originator-vlan object to release 157 */ 158 void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan) 159 { 160 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release); 161 } 162 163 int batadv_originator_init(struct batadv_priv *bat_priv) 164 { 165 if (bat_priv->orig_hash) 166 return 0; 167 168 bat_priv->orig_hash = batadv_hash_new(1024); 169 170 if (!bat_priv->orig_hash) 171 goto err; 172 173 batadv_hash_set_lock_class(bat_priv->orig_hash, 174 &batadv_orig_hash_lock_class_key); 175 176 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig); 177 queue_delayed_work(batadv_event_workqueue, 178 &bat_priv->orig_work, 179 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); 180 181 return 0; 182 183 err: 184 return -ENOMEM; 185 } 186 187 /** 188 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for 189 * free after rcu grace period 190 * @ref: kref pointer of the neigh_ifinfo 191 */ 192 static void batadv_neigh_ifinfo_release(struct kref *ref) 193 { 194 struct batadv_neigh_ifinfo *neigh_ifinfo; 195 196 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount); 197 198 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) 199 batadv_hardif_put(neigh_ifinfo->if_outgoing); 200 201 kfree_rcu(neigh_ifinfo, rcu); 202 } 203 204 /** 205 * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release 206 * the neigh_ifinfo 207 * @neigh_ifinfo: the neigh_ifinfo object to release 208 */ 209 void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo) 210 { 211 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release); 212 } 213 214 /** 215 * batadv_hardif_neigh_release - release hardif neigh node from lists and 216 * queue for free after rcu grace period 217 * @ref: kref pointer of the neigh_node 218 */ 219 static void batadv_hardif_neigh_release(struct kref *ref) 220 { 221 struct batadv_hardif_neigh_node *hardif_neigh; 222 223 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node, 224 refcount); 225 226 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 227 hlist_del_init_rcu(&hardif_neigh->list); 228 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); 229 230 batadv_hardif_put(hardif_neigh->if_incoming); 231 kfree_rcu(hardif_neigh, rcu); 232 } 233 234 /** 235 * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter 236 * and possibly release it 237 * @hardif_neigh: hardif neigh neighbor to free 238 */ 239 void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh) 240 { 241 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release); 242 } 243 244 /** 245 * batadv_neigh_node_release - release neigh_node from lists and queue for 246 * free after rcu grace period 247 * @ref: kref pointer of the neigh_node 248 */ 249 static void batadv_neigh_node_release(struct kref *ref) 250 { 251 struct hlist_node *node_tmp; 252 struct batadv_neigh_node *neigh_node; 253 struct batadv_neigh_ifinfo *neigh_ifinfo; 254 struct batadv_algo_ops *bao; 255 256 neigh_node = container_of(ref, struct batadv_neigh_node, refcount); 257 bao = neigh_node->orig_node->bat_priv->bat_algo_ops; 258 259 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, 260 &neigh_node->ifinfo_list, list) { 261 batadv_neigh_ifinfo_put(neigh_ifinfo); 262 } 263 264 batadv_hardif_neigh_put(neigh_node->hardif_neigh); 265 266 if (bao->bat_neigh_free) 267 bao->bat_neigh_free(neigh_node); 268 269 batadv_hardif_put(neigh_node->if_incoming); 270 271 kfree_rcu(neigh_node, rcu); 272 } 273 274 /** 275 * batadv_neigh_node_put - decrement the neighbors refcounter and possibly 276 * release it 277 * @neigh_node: neigh neighbor to free 278 */ 279 void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) 280 { 281 kref_put(&neigh_node->refcount, batadv_neigh_node_release); 282 } 283 284 /** 285 * batadv_orig_router_get - router to the originator depending on iface 286 * @orig_node: the orig node for the router 287 * @if_outgoing: the interface where the payload packet has been received or 288 * the OGM should be sent to 289 * 290 * Return: the neighbor which should be router for this orig_node/iface. 291 * 292 * The object is returned with refcounter increased by 1. 293 */ 294 struct batadv_neigh_node * 295 batadv_orig_router_get(struct batadv_orig_node *orig_node, 296 const struct batadv_hard_iface *if_outgoing) 297 { 298 struct batadv_orig_ifinfo *orig_ifinfo; 299 struct batadv_neigh_node *router = NULL; 300 301 rcu_read_lock(); 302 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) { 303 if (orig_ifinfo->if_outgoing != if_outgoing) 304 continue; 305 306 router = rcu_dereference(orig_ifinfo->router); 307 break; 308 } 309 310 if (router && !kref_get_unless_zero(&router->refcount)) 311 router = NULL; 312 313 rcu_read_unlock(); 314 return router; 315 } 316 317 /** 318 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node 319 * @orig_node: the orig node to be queried 320 * @if_outgoing: the interface for which the ifinfo should be acquired 321 * 322 * Return: the requested orig_ifinfo or NULL if not found. 323 * 324 * The object is returned with refcounter increased by 1. 325 */ 326 struct batadv_orig_ifinfo * 327 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node, 328 struct batadv_hard_iface *if_outgoing) 329 { 330 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL; 331 332 rcu_read_lock(); 333 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list, 334 list) { 335 if (tmp->if_outgoing != if_outgoing) 336 continue; 337 338 if (!kref_get_unless_zero(&tmp->refcount)) 339 continue; 340 341 orig_ifinfo = tmp; 342 break; 343 } 344 rcu_read_unlock(); 345 346 return orig_ifinfo; 347 } 348 349 /** 350 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object 351 * @orig_node: the orig node to be queried 352 * @if_outgoing: the interface for which the ifinfo should be acquired 353 * 354 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing 355 * interface otherwise. The object is created and added to the list 356 * if it does not exist. 357 * 358 * The object is returned with refcounter increased by 1. 359 */ 360 struct batadv_orig_ifinfo * 361 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, 362 struct batadv_hard_iface *if_outgoing) 363 { 364 struct batadv_orig_ifinfo *orig_ifinfo = NULL; 365 unsigned long reset_time; 366 367 spin_lock_bh(&orig_node->neigh_list_lock); 368 369 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing); 370 if (orig_ifinfo) 371 goto out; 372 373 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC); 374 if (!orig_ifinfo) 375 goto out; 376 377 if (if_outgoing != BATADV_IF_DEFAULT) 378 kref_get(&if_outgoing->refcount); 379 380 reset_time = jiffies - 1; 381 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 382 orig_ifinfo->batman_seqno_reset = reset_time; 383 orig_ifinfo->if_outgoing = if_outgoing; 384 INIT_HLIST_NODE(&orig_ifinfo->list); 385 kref_init(&orig_ifinfo->refcount); 386 kref_get(&orig_ifinfo->refcount); 387 hlist_add_head_rcu(&orig_ifinfo->list, 388 &orig_node->ifinfo_list); 389 out: 390 spin_unlock_bh(&orig_node->neigh_list_lock); 391 return orig_ifinfo; 392 } 393 394 /** 395 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node 396 * @neigh: the neigh node to be queried 397 * @if_outgoing: the interface for which the ifinfo should be acquired 398 * 399 * The object is returned with refcounter increased by 1. 400 * 401 * Return: the requested neigh_ifinfo or NULL if not found 402 */ 403 struct batadv_neigh_ifinfo * 404 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh, 405 struct batadv_hard_iface *if_outgoing) 406 { 407 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL, 408 *tmp_neigh_ifinfo; 409 410 rcu_read_lock(); 411 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list, 412 list) { 413 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing) 414 continue; 415 416 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount)) 417 continue; 418 419 neigh_ifinfo = tmp_neigh_ifinfo; 420 break; 421 } 422 rcu_read_unlock(); 423 424 return neigh_ifinfo; 425 } 426 427 /** 428 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object 429 * @neigh: the neigh node to be queried 430 * @if_outgoing: the interface for which the ifinfo should be acquired 431 * 432 * Return: NULL in case of failure or the neigh_ifinfo object for the 433 * if_outgoing interface otherwise. The object is created and added to the list 434 * if it does not exist. 435 * 436 * The object is returned with refcounter increased by 1. 437 */ 438 struct batadv_neigh_ifinfo * 439 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, 440 struct batadv_hard_iface *if_outgoing) 441 { 442 struct batadv_neigh_ifinfo *neigh_ifinfo; 443 444 spin_lock_bh(&neigh->ifinfo_lock); 445 446 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing); 447 if (neigh_ifinfo) 448 goto out; 449 450 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC); 451 if (!neigh_ifinfo) 452 goto out; 453 454 if (if_outgoing) 455 kref_get(&if_outgoing->refcount); 456 457 INIT_HLIST_NODE(&neigh_ifinfo->list); 458 kref_init(&neigh_ifinfo->refcount); 459 kref_get(&neigh_ifinfo->refcount); 460 neigh_ifinfo->if_outgoing = if_outgoing; 461 462 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list); 463 464 out: 465 spin_unlock_bh(&neigh->ifinfo_lock); 466 467 return neigh_ifinfo; 468 } 469 470 /** 471 * batadv_neigh_node_get - retrieve a neighbour from the list 472 * @orig_node: originator which the neighbour belongs to 473 * @hard_iface: the interface where this neighbour is connected to 474 * @addr: the address of the neighbour 475 * 476 * Looks for and possibly returns a neighbour belonging to this originator list 477 * which is connected through the provided hard interface. 478 * 479 * Return: neighbor when found. Othwerwise NULL 480 */ 481 static struct batadv_neigh_node * 482 batadv_neigh_node_get(const struct batadv_orig_node *orig_node, 483 const struct batadv_hard_iface *hard_iface, 484 const u8 *addr) 485 { 486 struct batadv_neigh_node *tmp_neigh_node, *res = NULL; 487 488 rcu_read_lock(); 489 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { 490 if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) 491 continue; 492 493 if (tmp_neigh_node->if_incoming != hard_iface) 494 continue; 495 496 if (!kref_get_unless_zero(&tmp_neigh_node->refcount)) 497 continue; 498 499 res = tmp_neigh_node; 500 break; 501 } 502 rcu_read_unlock(); 503 504 return res; 505 } 506 507 /** 508 * batadv_hardif_neigh_create - create a hardif neighbour node 509 * @hard_iface: the interface this neighbour is connected to 510 * @neigh_addr: the interface address of the neighbour to retrieve 511 * 512 * Return: the hardif neighbour node if found or created or NULL otherwise. 513 */ 514 static struct batadv_hardif_neigh_node * 515 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, 516 const u8 *neigh_addr) 517 { 518 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 519 struct batadv_hardif_neigh_node *hardif_neigh = NULL; 520 521 spin_lock_bh(&hard_iface->neigh_list_lock); 522 523 /* check if neighbor hasn't been added in the meantime */ 524 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr); 525 if (hardif_neigh) 526 goto out; 527 528 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC); 529 if (!hardif_neigh) 530 goto out; 531 532 kref_get(&hard_iface->refcount); 533 INIT_HLIST_NODE(&hardif_neigh->list); 534 ether_addr_copy(hardif_neigh->addr, neigh_addr); 535 hardif_neigh->if_incoming = hard_iface; 536 hardif_neigh->last_seen = jiffies; 537 538 kref_init(&hardif_neigh->refcount); 539 540 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init) 541 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh); 542 543 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); 544 545 out: 546 spin_unlock_bh(&hard_iface->neigh_list_lock); 547 return hardif_neigh; 548 } 549 550 /** 551 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour 552 * node 553 * @hard_iface: the interface this neighbour is connected to 554 * @neigh_addr: the interface address of the neighbour to retrieve 555 * 556 * Return: the hardif neighbour node if found or created or NULL otherwise. 557 */ 558 static struct batadv_hardif_neigh_node * 559 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, 560 const u8 *neigh_addr) 561 { 562 struct batadv_hardif_neigh_node *hardif_neigh = NULL; 563 564 /* first check without locking to avoid the overhead */ 565 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr); 566 if (hardif_neigh) 567 return hardif_neigh; 568 569 return batadv_hardif_neigh_create(hard_iface, neigh_addr); 570 } 571 572 /** 573 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list 574 * @hard_iface: the interface where this neighbour is connected to 575 * @neigh_addr: the address of the neighbour 576 * 577 * Looks for and possibly returns a neighbour belonging to this hard interface. 578 * 579 * Return: neighbor when found. Othwerwise NULL 580 */ 581 struct batadv_hardif_neigh_node * 582 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, 583 const u8 *neigh_addr) 584 { 585 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL; 586 587 rcu_read_lock(); 588 hlist_for_each_entry_rcu(tmp_hardif_neigh, 589 &hard_iface->neigh_list, list) { 590 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr)) 591 continue; 592 593 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount)) 594 continue; 595 596 hardif_neigh = tmp_hardif_neigh; 597 break; 598 } 599 rcu_read_unlock(); 600 601 return hardif_neigh; 602 } 603 604 /** 605 * batadv_neigh_node_new - create and init a new neigh_node object 606 * @orig_node: originator object representing the neighbour 607 * @hard_iface: the interface where the neighbour is connected to 608 * @neigh_addr: the mac address of the neighbour interface 609 * 610 * Allocates a new neigh_node object and initialises all the generic fields. 611 * 612 * Return: neighbor when found. Othwerwise NULL 613 */ 614 struct batadv_neigh_node * 615 batadv_neigh_node_new(struct batadv_orig_node *orig_node, 616 struct batadv_hard_iface *hard_iface, 617 const u8 *neigh_addr) 618 { 619 struct batadv_neigh_node *neigh_node; 620 struct batadv_hardif_neigh_node *hardif_neigh = NULL; 621 622 spin_lock_bh(&orig_node->neigh_list_lock); 623 624 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); 625 if (neigh_node) 626 goto out; 627 628 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface, 629 neigh_addr); 630 if (!hardif_neigh) 631 goto out; 632 633 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 634 if (!neigh_node) 635 goto out; 636 637 INIT_HLIST_NODE(&neigh_node->list); 638 INIT_HLIST_HEAD(&neigh_node->ifinfo_list); 639 spin_lock_init(&neigh_node->ifinfo_lock); 640 641 kref_get(&hard_iface->refcount); 642 ether_addr_copy(neigh_node->addr, neigh_addr); 643 neigh_node->if_incoming = hard_iface; 644 neigh_node->orig_node = orig_node; 645 neigh_node->last_seen = jiffies; 646 647 /* increment unique neighbor refcount */ 648 kref_get(&hardif_neigh->refcount); 649 neigh_node->hardif_neigh = hardif_neigh; 650 651 /* extra reference for return */ 652 kref_init(&neigh_node->refcount); 653 kref_get(&neigh_node->refcount); 654 655 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); 656 657 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, 658 "Creating new neighbor %pM for orig_node %pM on interface %s\n", 659 neigh_addr, orig_node->orig, hard_iface->net_dev->name); 660 661 out: 662 spin_unlock_bh(&orig_node->neigh_list_lock); 663 664 if (hardif_neigh) 665 batadv_hardif_neigh_put(hardif_neigh); 666 return neigh_node; 667 } 668 669 /** 670 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list 671 * @seq: neighbour table seq_file struct 672 * @offset: not used 673 * 674 * Return: always 0 675 */ 676 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset) 677 { 678 struct net_device *net_dev = (struct net_device *)seq->private; 679 struct batadv_priv *bat_priv = netdev_priv(net_dev); 680 struct batadv_hard_iface *primary_if; 681 682 primary_if = batadv_seq_print_text_primary_if_get(seq); 683 if (!primary_if) 684 return 0; 685 686 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", 687 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 688 primary_if->net_dev->dev_addr, net_dev->name, 689 bat_priv->bat_algo_ops->name); 690 691 batadv_hardif_put(primary_if); 692 693 if (!bat_priv->bat_algo_ops->bat_neigh_print) { 694 seq_puts(seq, 695 "No printing function for this routing protocol\n"); 696 return 0; 697 } 698 699 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq); 700 return 0; 701 } 702 703 /** 704 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for 705 * free after rcu grace period 706 * @ref: kref pointer of the orig_ifinfo 707 */ 708 static void batadv_orig_ifinfo_release(struct kref *ref) 709 { 710 struct batadv_orig_ifinfo *orig_ifinfo; 711 struct batadv_neigh_node *router; 712 713 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount); 714 715 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) 716 batadv_hardif_put(orig_ifinfo->if_outgoing); 717 718 /* this is the last reference to this object */ 719 router = rcu_dereference_protected(orig_ifinfo->router, true); 720 if (router) 721 batadv_neigh_node_put(router); 722 723 kfree_rcu(orig_ifinfo, rcu); 724 } 725 726 /** 727 * batadv_orig_ifinfo_put - decrement the refcounter and possibly release 728 * the orig_ifinfo 729 * @orig_ifinfo: the orig_ifinfo object to release 730 */ 731 void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo) 732 { 733 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release); 734 } 735 736 /** 737 * batadv_orig_node_free_rcu - free the orig_node 738 * @rcu: rcu pointer of the orig_node 739 */ 740 static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 741 { 742 struct batadv_orig_node *orig_node; 743 744 orig_node = container_of(rcu, struct batadv_orig_node, rcu); 745 746 batadv_mcast_purge_orig(orig_node); 747 748 batadv_frag_purge_orig(orig_node, NULL); 749 750 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 751 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 752 753 kfree(orig_node->tt_buff); 754 kfree(orig_node); 755 } 756 757 /** 758 * batadv_orig_node_release - release orig_node from lists and queue for 759 * free after rcu grace period 760 * @ref: kref pointer of the orig_node 761 */ 762 static void batadv_orig_node_release(struct kref *ref) 763 { 764 struct hlist_node *node_tmp; 765 struct batadv_neigh_node *neigh_node; 766 struct batadv_orig_node *orig_node; 767 struct batadv_orig_ifinfo *orig_ifinfo; 768 struct batadv_orig_node_vlan *vlan; 769 struct batadv_orig_ifinfo *last_candidate; 770 771 orig_node = container_of(ref, struct batadv_orig_node, refcount); 772 773 spin_lock_bh(&orig_node->neigh_list_lock); 774 775 /* for all neighbors towards this originator ... */ 776 hlist_for_each_entry_safe(neigh_node, node_tmp, 777 &orig_node->neigh_list, list) { 778 hlist_del_rcu(&neigh_node->list); 779 batadv_neigh_node_put(neigh_node); 780 } 781 782 hlist_for_each_entry_safe(orig_ifinfo, node_tmp, 783 &orig_node->ifinfo_list, list) { 784 hlist_del_rcu(&orig_ifinfo->list); 785 batadv_orig_ifinfo_put(orig_ifinfo); 786 } 787 788 last_candidate = orig_node->last_bonding_candidate; 789 orig_node->last_bonding_candidate = NULL; 790 spin_unlock_bh(&orig_node->neigh_list_lock); 791 792 if (last_candidate) 793 batadv_orig_ifinfo_put(last_candidate); 794 795 spin_lock_bh(&orig_node->vlan_list_lock); 796 hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) { 797 hlist_del_rcu(&vlan->list); 798 batadv_orig_node_vlan_put(vlan); 799 } 800 spin_unlock_bh(&orig_node->vlan_list_lock); 801 802 /* Free nc_nodes */ 803 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); 804 805 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); 806 } 807 808 /** 809 * batadv_orig_node_put - decrement the orig node refcounter and possibly 810 * release it 811 * @orig_node: the orig node to free 812 */ 813 void batadv_orig_node_put(struct batadv_orig_node *orig_node) 814 { 815 kref_put(&orig_node->refcount, batadv_orig_node_release); 816 } 817 818 void batadv_originator_free(struct batadv_priv *bat_priv) 819 { 820 struct batadv_hashtable *hash = bat_priv->orig_hash; 821 struct hlist_node *node_tmp; 822 struct hlist_head *head; 823 spinlock_t *list_lock; /* spinlock to protect write access */ 824 struct batadv_orig_node *orig_node; 825 u32 i; 826 827 if (!hash) 828 return; 829 830 cancel_delayed_work_sync(&bat_priv->orig_work); 831 832 bat_priv->orig_hash = NULL; 833 834 for (i = 0; i < hash->size; i++) { 835 head = &hash->table[i]; 836 list_lock = &hash->list_locks[i]; 837 838 spin_lock_bh(list_lock); 839 hlist_for_each_entry_safe(orig_node, node_tmp, 840 head, hash_entry) { 841 hlist_del_rcu(&orig_node->hash_entry); 842 batadv_orig_node_put(orig_node); 843 } 844 spin_unlock_bh(list_lock); 845 } 846 847 batadv_hash_destroy(hash); 848 } 849 850 /** 851 * batadv_orig_node_new - creates a new orig_node 852 * @bat_priv: the bat priv with all the soft interface information 853 * @addr: the mac address of the originator 854 * 855 * Creates a new originator object and initialise all the generic fields. 856 * The new object is not added to the originator list. 857 * 858 * Return: the newly created object or NULL on failure. 859 */ 860 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, 861 const u8 *addr) 862 { 863 struct batadv_orig_node *orig_node; 864 struct batadv_orig_node_vlan *vlan; 865 unsigned long reset_time; 866 int i; 867 868 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 869 "Creating new originator: %pM\n", addr); 870 871 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 872 if (!orig_node) 873 return NULL; 874 875 INIT_HLIST_HEAD(&orig_node->neigh_list); 876 INIT_HLIST_HEAD(&orig_node->vlan_list); 877 INIT_HLIST_HEAD(&orig_node->ifinfo_list); 878 spin_lock_init(&orig_node->bcast_seqno_lock); 879 spin_lock_init(&orig_node->neigh_list_lock); 880 spin_lock_init(&orig_node->tt_buff_lock); 881 spin_lock_init(&orig_node->tt_lock); 882 spin_lock_init(&orig_node->vlan_list_lock); 883 884 batadv_nc_init_orig(orig_node); 885 886 /* extra reference for return */ 887 kref_init(&orig_node->refcount); 888 kref_get(&orig_node->refcount); 889 890 orig_node->bat_priv = bat_priv; 891 ether_addr_copy(orig_node->orig, addr); 892 batadv_dat_init_orig_node_addr(orig_node); 893 atomic_set(&orig_node->last_ttvn, 0); 894 orig_node->tt_buff = NULL; 895 orig_node->tt_buff_len = 0; 896 orig_node->last_seen = jiffies; 897 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 898 orig_node->bcast_seqno_reset = reset_time; 899 900 #ifdef CONFIG_BATMAN_ADV_MCAST 901 orig_node->mcast_flags = BATADV_NO_FLAGS; 902 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node); 903 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node); 904 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node); 905 spin_lock_init(&orig_node->mcast_handler_lock); 906 #endif 907 908 /* create a vlan object for the "untagged" LAN */ 909 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS); 910 if (!vlan) 911 goto free_orig_node; 912 /* batadv_orig_node_vlan_new() increases the refcounter. 913 * Immediately release vlan since it is not needed anymore in this 914 * context 915 */ 916 batadv_orig_node_vlan_put(vlan); 917 918 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) { 919 INIT_HLIST_HEAD(&orig_node->fragments[i].head); 920 spin_lock_init(&orig_node->fragments[i].lock); 921 orig_node->fragments[i].size = 0; 922 } 923 924 return orig_node; 925 free_orig_node: 926 kfree(orig_node); 927 return NULL; 928 } 929 930 /** 931 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor 932 * @bat_priv: the bat priv with all the soft interface information 933 * @neigh: orig node which is to be checked 934 */ 935 static void 936 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv, 937 struct batadv_neigh_node *neigh) 938 { 939 struct batadv_neigh_ifinfo *neigh_ifinfo; 940 struct batadv_hard_iface *if_outgoing; 941 struct hlist_node *node_tmp; 942 943 spin_lock_bh(&neigh->ifinfo_lock); 944 945 /* for all ifinfo objects for this neighinator */ 946 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, 947 &neigh->ifinfo_list, list) { 948 if_outgoing = neigh_ifinfo->if_outgoing; 949 950 /* always keep the default interface */ 951 if (if_outgoing == BATADV_IF_DEFAULT) 952 continue; 953 954 /* don't purge if the interface is not (going) down */ 955 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) && 956 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) && 957 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)) 958 continue; 959 960 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 961 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n", 962 neigh->addr, if_outgoing->net_dev->name); 963 964 hlist_del_rcu(&neigh_ifinfo->list); 965 batadv_neigh_ifinfo_put(neigh_ifinfo); 966 } 967 968 spin_unlock_bh(&neigh->ifinfo_lock); 969 } 970 971 /** 972 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator 973 * @bat_priv: the bat priv with all the soft interface information 974 * @orig_node: orig node which is to be checked 975 * 976 * Return: true if any ifinfo entry was purged, false otherwise. 977 */ 978 static bool 979 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv, 980 struct batadv_orig_node *orig_node) 981 { 982 struct batadv_orig_ifinfo *orig_ifinfo; 983 struct batadv_hard_iface *if_outgoing; 984 struct hlist_node *node_tmp; 985 bool ifinfo_purged = false; 986 987 spin_lock_bh(&orig_node->neigh_list_lock); 988 989 /* for all ifinfo objects for this originator */ 990 hlist_for_each_entry_safe(orig_ifinfo, node_tmp, 991 &orig_node->ifinfo_list, list) { 992 if_outgoing = orig_ifinfo->if_outgoing; 993 994 /* always keep the default interface */ 995 if (if_outgoing == BATADV_IF_DEFAULT) 996 continue; 997 998 /* don't purge if the interface is not (going) down */ 999 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) && 1000 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) && 1001 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)) 1002 continue; 1003 1004 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1005 "router/ifinfo purge: originator %pM, iface: %s\n", 1006 orig_node->orig, if_outgoing->net_dev->name); 1007 1008 ifinfo_purged = true; 1009 1010 hlist_del_rcu(&orig_ifinfo->list); 1011 batadv_orig_ifinfo_put(orig_ifinfo); 1012 if (orig_node->last_bonding_candidate == orig_ifinfo) { 1013 orig_node->last_bonding_candidate = NULL; 1014 batadv_orig_ifinfo_put(orig_ifinfo); 1015 } 1016 } 1017 1018 spin_unlock_bh(&orig_node->neigh_list_lock); 1019 1020 return ifinfo_purged; 1021 } 1022 1023 /** 1024 * batadv_purge_orig_neighbors - purges neighbors from originator 1025 * @bat_priv: the bat priv with all the soft interface information 1026 * @orig_node: orig node which is to be checked 1027 * 1028 * Return: true if any neighbor was purged, false otherwise 1029 */ 1030 static bool 1031 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, 1032 struct batadv_orig_node *orig_node) 1033 { 1034 struct hlist_node *node_tmp; 1035 struct batadv_neigh_node *neigh_node; 1036 bool neigh_purged = false; 1037 unsigned long last_seen; 1038 struct batadv_hard_iface *if_incoming; 1039 1040 spin_lock_bh(&orig_node->neigh_list_lock); 1041 1042 /* for all neighbors towards this originator ... */ 1043 hlist_for_each_entry_safe(neigh_node, node_tmp, 1044 &orig_node->neigh_list, list) { 1045 last_seen = neigh_node->last_seen; 1046 if_incoming = neigh_node->if_incoming; 1047 1048 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) || 1049 (if_incoming->if_status == BATADV_IF_INACTIVE) || 1050 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 1051 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) { 1052 if ((if_incoming->if_status == BATADV_IF_INACTIVE) || 1053 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 1054 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) 1055 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1056 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 1057 orig_node->orig, neigh_node->addr, 1058 if_incoming->net_dev->name); 1059 else 1060 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1061 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 1062 orig_node->orig, neigh_node->addr, 1063 jiffies_to_msecs(last_seen)); 1064 1065 neigh_purged = true; 1066 1067 hlist_del_rcu(&neigh_node->list); 1068 batadv_neigh_node_put(neigh_node); 1069 } else { 1070 /* only necessary if not the whole neighbor is to be 1071 * deleted, but some interface has been removed. 1072 */ 1073 batadv_purge_neigh_ifinfo(bat_priv, neigh_node); 1074 } 1075 } 1076 1077 spin_unlock_bh(&orig_node->neigh_list_lock); 1078 return neigh_purged; 1079 } 1080 1081 /** 1082 * batadv_find_best_neighbor - finds the best neighbor after purging 1083 * @bat_priv: the bat priv with all the soft interface information 1084 * @orig_node: orig node which is to be checked 1085 * @if_outgoing: the interface for which the metric should be compared 1086 * 1087 * Return: the current best neighbor, with refcount increased. 1088 */ 1089 static struct batadv_neigh_node * 1090 batadv_find_best_neighbor(struct batadv_priv *bat_priv, 1091 struct batadv_orig_node *orig_node, 1092 struct batadv_hard_iface *if_outgoing) 1093 { 1094 struct batadv_neigh_node *best = NULL, *neigh; 1095 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1096 1097 rcu_read_lock(); 1098 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) { 1099 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing, 1100 best, if_outgoing) <= 0)) 1101 continue; 1102 1103 if (!kref_get_unless_zero(&neigh->refcount)) 1104 continue; 1105 1106 if (best) 1107 batadv_neigh_node_put(best); 1108 1109 best = neigh; 1110 } 1111 rcu_read_unlock(); 1112 1113 return best; 1114 } 1115 1116 /** 1117 * batadv_purge_orig_node - purges obsolete information from an orig_node 1118 * @bat_priv: the bat priv with all the soft interface information 1119 * @orig_node: orig node which is to be checked 1120 * 1121 * This function checks if the orig_node or substructures of it have become 1122 * obsolete, and purges this information if that's the case. 1123 * 1124 * Return: true if the orig_node is to be removed, false otherwise. 1125 */ 1126 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, 1127 struct batadv_orig_node *orig_node) 1128 { 1129 struct batadv_neigh_node *best_neigh_node; 1130 struct batadv_hard_iface *hard_iface; 1131 bool changed_ifinfo, changed_neigh; 1132 1133 if (batadv_has_timed_out(orig_node->last_seen, 1134 2 * BATADV_PURGE_TIMEOUT)) { 1135 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 1136 "Originator timeout: originator %pM, last_seen %u\n", 1137 orig_node->orig, 1138 jiffies_to_msecs(orig_node->last_seen)); 1139 return true; 1140 } 1141 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node); 1142 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node); 1143 1144 if (!changed_ifinfo && !changed_neigh) 1145 return false; 1146 1147 /* first for NULL ... */ 1148 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node, 1149 BATADV_IF_DEFAULT); 1150 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT, 1151 best_neigh_node); 1152 if (best_neigh_node) 1153 batadv_neigh_node_put(best_neigh_node); 1154 1155 /* ... then for all other interfaces. */ 1156 rcu_read_lock(); 1157 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 1158 if (hard_iface->if_status != BATADV_IF_ACTIVE) 1159 continue; 1160 1161 if (hard_iface->soft_iface != bat_priv->soft_iface) 1162 continue; 1163 1164 if (!kref_get_unless_zero(&hard_iface->refcount)) 1165 continue; 1166 1167 best_neigh_node = batadv_find_best_neighbor(bat_priv, 1168 orig_node, 1169 hard_iface); 1170 batadv_update_route(bat_priv, orig_node, hard_iface, 1171 best_neigh_node); 1172 if (best_neigh_node) 1173 batadv_neigh_node_put(best_neigh_node); 1174 1175 batadv_hardif_put(hard_iface); 1176 } 1177 rcu_read_unlock(); 1178 1179 return false; 1180 } 1181 1182 static void _batadv_purge_orig(struct batadv_priv *bat_priv) 1183 { 1184 struct batadv_hashtable *hash = bat_priv->orig_hash; 1185 struct hlist_node *node_tmp; 1186 struct hlist_head *head; 1187 spinlock_t *list_lock; /* spinlock to protect write access */ 1188 struct batadv_orig_node *orig_node; 1189 u32 i; 1190 1191 if (!hash) 1192 return; 1193 1194 /* for all origins... */ 1195 for (i = 0; i < hash->size; i++) { 1196 head = &hash->table[i]; 1197 list_lock = &hash->list_locks[i]; 1198 1199 spin_lock_bh(list_lock); 1200 hlist_for_each_entry_safe(orig_node, node_tmp, 1201 head, hash_entry) { 1202 if (batadv_purge_orig_node(bat_priv, orig_node)) { 1203 batadv_gw_node_delete(bat_priv, orig_node); 1204 hlist_del_rcu(&orig_node->hash_entry); 1205 batadv_tt_global_del_orig(orig_node->bat_priv, 1206 orig_node, -1, 1207 "originator timed out"); 1208 batadv_orig_node_put(orig_node); 1209 continue; 1210 } 1211 1212 batadv_frag_purge_orig(orig_node, 1213 batadv_frag_check_entry); 1214 } 1215 spin_unlock_bh(list_lock); 1216 } 1217 1218 batadv_gw_election(bat_priv); 1219 } 1220 1221 static void batadv_purge_orig(struct work_struct *work) 1222 { 1223 struct delayed_work *delayed_work; 1224 struct batadv_priv *bat_priv; 1225 1226 delayed_work = to_delayed_work(work); 1227 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); 1228 _batadv_purge_orig(bat_priv); 1229 queue_delayed_work(batadv_event_workqueue, 1230 &bat_priv->orig_work, 1231 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD)); 1232 } 1233 1234 void batadv_purge_orig_ref(struct batadv_priv *bat_priv) 1235 { 1236 _batadv_purge_orig(bat_priv); 1237 } 1238 1239 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) 1240 { 1241 struct net_device *net_dev = (struct net_device *)seq->private; 1242 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1243 struct batadv_hard_iface *primary_if; 1244 1245 primary_if = batadv_seq_print_text_primary_if_get(seq); 1246 if (!primary_if) 1247 return 0; 1248 1249 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", 1250 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 1251 primary_if->net_dev->dev_addr, net_dev->name, 1252 bat_priv->bat_algo_ops->name); 1253 1254 batadv_hardif_put(primary_if); 1255 1256 if (!bat_priv->bat_algo_ops->bat_orig_print) { 1257 seq_puts(seq, 1258 "No printing function for this routing protocol\n"); 1259 return 0; 1260 } 1261 1262 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, 1263 BATADV_IF_DEFAULT); 1264 1265 return 0; 1266 } 1267 1268 /** 1269 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific 1270 * outgoing interface 1271 * @seq: debugfs table seq_file struct 1272 * @offset: not used 1273 * 1274 * Return: 0 1275 */ 1276 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset) 1277 { 1278 struct net_device *net_dev = (struct net_device *)seq->private; 1279 struct batadv_hard_iface *hard_iface; 1280 struct batadv_priv *bat_priv; 1281 1282 hard_iface = batadv_hardif_get_by_netdev(net_dev); 1283 1284 if (!hard_iface || !hard_iface->soft_iface) { 1285 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n"); 1286 goto out; 1287 } 1288 1289 bat_priv = netdev_priv(hard_iface->soft_iface); 1290 if (!bat_priv->bat_algo_ops->bat_orig_print) { 1291 seq_puts(seq, 1292 "No printing function for this routing protocol\n"); 1293 goto out; 1294 } 1295 1296 if (hard_iface->if_status != BATADV_IF_ACTIVE) { 1297 seq_puts(seq, "Interface not active\n"); 1298 goto out; 1299 } 1300 1301 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n", 1302 BATADV_SOURCE_VERSION, hard_iface->net_dev->name, 1303 hard_iface->net_dev->dev_addr, 1304 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name); 1305 1306 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); 1307 1308 out: 1309 if (hard_iface) 1310 batadv_hardif_put(hard_iface); 1311 return 0; 1312 } 1313 1314 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 1315 int max_if_num) 1316 { 1317 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1318 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1319 struct batadv_hashtable *hash = bat_priv->orig_hash; 1320 struct hlist_head *head; 1321 struct batadv_orig_node *orig_node; 1322 u32 i; 1323 int ret; 1324 1325 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 1326 * if_num 1327 */ 1328 for (i = 0; i < hash->size; i++) { 1329 head = &hash->table[i]; 1330 1331 rcu_read_lock(); 1332 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1333 ret = 0; 1334 if (bao->bat_orig_add_if) 1335 ret = bao->bat_orig_add_if(orig_node, 1336 max_if_num); 1337 if (ret == -ENOMEM) 1338 goto err; 1339 } 1340 rcu_read_unlock(); 1341 } 1342 1343 return 0; 1344 1345 err: 1346 rcu_read_unlock(); 1347 return -ENOMEM; 1348 } 1349 1350 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 1351 int max_if_num) 1352 { 1353 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1354 struct batadv_hashtable *hash = bat_priv->orig_hash; 1355 struct hlist_head *head; 1356 struct batadv_hard_iface *hard_iface_tmp; 1357 struct batadv_orig_node *orig_node; 1358 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1359 u32 i; 1360 int ret; 1361 1362 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 1363 * if_num 1364 */ 1365 for (i = 0; i < hash->size; i++) { 1366 head = &hash->table[i]; 1367 1368 rcu_read_lock(); 1369 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1370 ret = 0; 1371 if (bao->bat_orig_del_if) 1372 ret = bao->bat_orig_del_if(orig_node, 1373 max_if_num, 1374 hard_iface->if_num); 1375 if (ret == -ENOMEM) 1376 goto err; 1377 } 1378 rcu_read_unlock(); 1379 } 1380 1381 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 1382 rcu_read_lock(); 1383 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { 1384 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE) 1385 continue; 1386 1387 if (hard_iface == hard_iface_tmp) 1388 continue; 1389 1390 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 1391 continue; 1392 1393 if (hard_iface_tmp->if_num > hard_iface->if_num) 1394 hard_iface_tmp->if_num--; 1395 } 1396 rcu_read_unlock(); 1397 1398 hard_iface->if_num = -1; 1399 return 0; 1400 1401 err: 1402 rcu_read_unlock(); 1403 return -ENOMEM; 1404 } 1405