1 /* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors: 2 * 3 * Simon Wunderlich 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of version 2 of the GNU General Public 7 * License as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "bridge_loop_avoidance.h" 19 #include "main.h" 20 21 #include <linux/atomic.h> 22 #include <linux/byteorder/generic.h> 23 #include <linux/compiler.h> 24 #include <linux/crc16.h> 25 #include <linux/errno.h> 26 #include <linux/etherdevice.h> 27 #include <linux/fs.h> 28 #include <linux/if_arp.h> 29 #include <linux/if_ether.h> 30 #include <linux/if_vlan.h> 31 #include <linux/jhash.h> 32 #include <linux/jiffies.h> 33 #include <linux/kernel.h> 34 #include <linux/kref.h> 35 #include <linux/list.h> 36 #include <linux/lockdep.h> 37 #include <linux/netdevice.h> 38 #include <linux/netlink.h> 39 #include <linux/rculist.h> 40 #include <linux/rcupdate.h> 41 #include <linux/seq_file.h> 42 #include <linux/skbuff.h> 43 #include <linux/slab.h> 44 #include <linux/spinlock.h> 45 #include <linux/stddef.h> 46 #include <linux/string.h> 47 #include <linux/workqueue.h> 48 #include <net/arp.h> 49 #include <net/genetlink.h> 50 #include <net/netlink.h> 51 #include <net/sock.h> 52 #include <uapi/linux/batman_adv.h> 53 54 #include "hard-interface.h" 55 #include "hash.h" 56 #include "log.h" 57 #include "netlink.h" 58 #include "originator.h" 59 #include "packet.h" 60 #include "soft-interface.h" 61 #include "sysfs.h" 62 #include "translation-table.h" 63 64 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 65 66 static void batadv_bla_periodic_work(struct work_struct *work); 67 static void 68 batadv_bla_send_announce(struct batadv_priv *bat_priv, 69 struct batadv_bla_backbone_gw *backbone_gw); 70 71 /** 72 * batadv_choose_claim - choose the right bucket for a claim. 73 * @data: data to hash 74 * @size: size of the hash table 75 * 76 * Return: the hash index of the claim 77 */ 78 static inline u32 batadv_choose_claim(const void *data, u32 size) 79 { 80 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; 81 u32 hash = 0; 82 83 hash = jhash(&claim->addr, sizeof(claim->addr), hash); 84 hash = jhash(&claim->vid, sizeof(claim->vid), hash); 85 86 return hash % size; 87 } 88 89 /** 90 * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway. 91 * @data: data to hash 92 * @size: size of the hash table 93 * 94 * Return: the hash index of the backbone gateway 95 */ 96 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) 97 { 98 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; 99 u32 hash = 0; 100 101 hash = jhash(&claim->addr, sizeof(claim->addr), hash); 102 hash = jhash(&claim->vid, sizeof(claim->vid), hash); 103 104 return hash % size; 105 } 106 107 /** 108 * batadv_compare_backbone_gw - compare address and vid of two backbone gws 109 * @node: list node of the first entry to compare 110 * @data2: pointer to the second backbone gateway 111 * 112 * Return: true if the backbones have the same data, false otherwise 113 */ 114 static bool batadv_compare_backbone_gw(const struct hlist_node *node, 115 const void *data2) 116 { 117 const void *data1 = container_of(node, struct batadv_bla_backbone_gw, 118 hash_entry); 119 const struct batadv_bla_backbone_gw *gw1 = data1; 120 const struct batadv_bla_backbone_gw *gw2 = data2; 121 122 if (!batadv_compare_eth(gw1->orig, gw2->orig)) 123 return false; 124 125 if (gw1->vid != gw2->vid) 126 return false; 127 128 return true; 129 } 130 131 /** 132 * batadv_compare_claim - compare address and vid of two claims 133 * @node: list node of the first entry to compare 134 * @data2: pointer to the second claims 135 * 136 * Return: true if the claim have the same data, 0 otherwise 137 */ 138 static bool batadv_compare_claim(const struct hlist_node *node, 139 const void *data2) 140 { 141 const void *data1 = container_of(node, struct batadv_bla_claim, 142 hash_entry); 143 const struct batadv_bla_claim *cl1 = data1; 144 const struct batadv_bla_claim *cl2 = data2; 145 146 if (!batadv_compare_eth(cl1->addr, cl2->addr)) 147 return false; 148 149 if (cl1->vid != cl2->vid) 150 return false; 151 152 return true; 153 } 154 155 /** 156 * batadv_backbone_gw_release - release backbone gw from lists and queue for 157 * free after rcu grace period 158 * @ref: kref pointer of the backbone gw 159 */ 160 static void batadv_backbone_gw_release(struct kref *ref) 161 { 162 struct batadv_bla_backbone_gw *backbone_gw; 163 164 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw, 165 refcount); 166 167 kfree_rcu(backbone_gw, rcu); 168 } 169 170 /** 171 * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly 172 * release it 173 * @backbone_gw: backbone gateway to be free'd 174 */ 175 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw) 176 { 177 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release); 178 } 179 180 /** 181 * batadv_claim_release - release claim from lists and queue for free after rcu 182 * grace period 183 * @ref: kref pointer of the claim 184 */ 185 static void batadv_claim_release(struct kref *ref) 186 { 187 struct batadv_bla_claim *claim; 188 struct batadv_bla_backbone_gw *old_backbone_gw; 189 190 claim = container_of(ref, struct batadv_bla_claim, refcount); 191 192 spin_lock_bh(&claim->backbone_lock); 193 old_backbone_gw = claim->backbone_gw; 194 claim->backbone_gw = NULL; 195 spin_unlock_bh(&claim->backbone_lock); 196 197 spin_lock_bh(&old_backbone_gw->crc_lock); 198 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 199 spin_unlock_bh(&old_backbone_gw->crc_lock); 200 201 batadv_backbone_gw_put(old_backbone_gw); 202 203 kfree_rcu(claim, rcu); 204 } 205 206 /** 207 * batadv_claim_put - decrement the claim refcounter and possibly 208 * release it 209 * @claim: claim to be free'd 210 */ 211 static void batadv_claim_put(struct batadv_bla_claim *claim) 212 { 213 kref_put(&claim->refcount, batadv_claim_release); 214 } 215 216 /** 217 * batadv_claim_hash_find - looks for a claim in the claim hash 218 * @bat_priv: the bat priv with all the soft interface information 219 * @data: search data (may be local/static data) 220 * 221 * Return: claim if found or NULL otherwise. 222 */ 223 static struct batadv_bla_claim * 224 batadv_claim_hash_find(struct batadv_priv *bat_priv, 225 struct batadv_bla_claim *data) 226 { 227 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 228 struct hlist_head *head; 229 struct batadv_bla_claim *claim; 230 struct batadv_bla_claim *claim_tmp = NULL; 231 int index; 232 233 if (!hash) 234 return NULL; 235 236 index = batadv_choose_claim(data, hash->size); 237 head = &hash->table[index]; 238 239 rcu_read_lock(); 240 hlist_for_each_entry_rcu(claim, head, hash_entry) { 241 if (!batadv_compare_claim(&claim->hash_entry, data)) 242 continue; 243 244 if (!kref_get_unless_zero(&claim->refcount)) 245 continue; 246 247 claim_tmp = claim; 248 break; 249 } 250 rcu_read_unlock(); 251 252 return claim_tmp; 253 } 254 255 /** 256 * batadv_backbone_hash_find - looks for a backbone gateway in the hash 257 * @bat_priv: the bat priv with all the soft interface information 258 * @addr: the address of the originator 259 * @vid: the VLAN ID 260 * 261 * Return: backbone gateway if found or NULL otherwise 262 */ 263 static struct batadv_bla_backbone_gw * 264 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr, 265 unsigned short vid) 266 { 267 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 268 struct hlist_head *head; 269 struct batadv_bla_backbone_gw search_entry, *backbone_gw; 270 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; 271 int index; 272 273 if (!hash) 274 return NULL; 275 276 ether_addr_copy(search_entry.orig, addr); 277 search_entry.vid = vid; 278 279 index = batadv_choose_backbone_gw(&search_entry, hash->size); 280 head = &hash->table[index]; 281 282 rcu_read_lock(); 283 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 284 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, 285 &search_entry)) 286 continue; 287 288 if (!kref_get_unless_zero(&backbone_gw->refcount)) 289 continue; 290 291 backbone_gw_tmp = backbone_gw; 292 break; 293 } 294 rcu_read_unlock(); 295 296 return backbone_gw_tmp; 297 } 298 299 /** 300 * batadv_bla_del_backbone_claims - delete all claims for a backbone 301 * @backbone_gw: backbone gateway where the claims should be removed 302 */ 303 static void 304 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) 305 { 306 struct batadv_hashtable *hash; 307 struct hlist_node *node_tmp; 308 struct hlist_head *head; 309 struct batadv_bla_claim *claim; 310 int i; 311 spinlock_t *list_lock; /* protects write access to the hash lists */ 312 313 hash = backbone_gw->bat_priv->bla.claim_hash; 314 if (!hash) 315 return; 316 317 for (i = 0; i < hash->size; i++) { 318 head = &hash->table[i]; 319 list_lock = &hash->list_locks[i]; 320 321 spin_lock_bh(list_lock); 322 hlist_for_each_entry_safe(claim, node_tmp, 323 head, hash_entry) { 324 if (claim->backbone_gw != backbone_gw) 325 continue; 326 327 batadv_claim_put(claim); 328 hlist_del_rcu(&claim->hash_entry); 329 } 330 spin_unlock_bh(list_lock); 331 } 332 333 /* all claims gone, initialize CRC */ 334 spin_lock_bh(&backbone_gw->crc_lock); 335 backbone_gw->crc = BATADV_BLA_CRC_INIT; 336 spin_unlock_bh(&backbone_gw->crc_lock); 337 } 338 339 /** 340 * batadv_bla_send_claim - sends a claim frame according to the provided info 341 * @bat_priv: the bat priv with all the soft interface information 342 * @mac: the mac address to be announced within the claim 343 * @vid: the VLAN ID 344 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) 345 */ 346 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, 347 unsigned short vid, int claimtype) 348 { 349 struct sk_buff *skb; 350 struct ethhdr *ethhdr; 351 struct batadv_hard_iface *primary_if; 352 struct net_device *soft_iface; 353 u8 *hw_src; 354 struct batadv_bla_claim_dst local_claim_dest; 355 __be32 zeroip = 0; 356 357 primary_if = batadv_primary_if_get_selected(bat_priv); 358 if (!primary_if) 359 return; 360 361 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, 362 sizeof(local_claim_dest)); 363 local_claim_dest.type = claimtype; 364 365 soft_iface = primary_if->soft_iface; 366 367 skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 368 /* IP DST: 0.0.0.0 */ 369 zeroip, 370 primary_if->soft_iface, 371 /* IP SRC: 0.0.0.0 */ 372 zeroip, 373 /* Ethernet DST: Broadcast */ 374 NULL, 375 /* Ethernet SRC/HW SRC: originator mac */ 376 primary_if->net_dev->dev_addr, 377 /* HW DST: FF:43:05:XX:YY:YY 378 * with XX = claim type 379 * and YY:YY = group id 380 */ 381 (u8 *)&local_claim_dest); 382 383 if (!skb) 384 goto out; 385 386 ethhdr = (struct ethhdr *)skb->data; 387 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr); 388 389 /* now we pretend that the client would have sent this ... */ 390 switch (claimtype) { 391 case BATADV_CLAIM_TYPE_CLAIM: 392 /* normal claim frame 393 * set Ethernet SRC to the clients mac 394 */ 395 ether_addr_copy(ethhdr->h_source, mac); 396 batadv_dbg(BATADV_DBG_BLA, bat_priv, 397 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, 398 BATADV_PRINT_VID(vid)); 399 break; 400 case BATADV_CLAIM_TYPE_UNCLAIM: 401 /* unclaim frame 402 * set HW SRC to the clients mac 403 */ 404 ether_addr_copy(hw_src, mac); 405 batadv_dbg(BATADV_DBG_BLA, bat_priv, 406 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, 407 BATADV_PRINT_VID(vid)); 408 break; 409 case BATADV_CLAIM_TYPE_ANNOUNCE: 410 /* announcement frame 411 * set HW SRC to the special mac containg the crc 412 */ 413 ether_addr_copy(hw_src, mac); 414 batadv_dbg(BATADV_DBG_BLA, bat_priv, 415 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", 416 ethhdr->h_source, BATADV_PRINT_VID(vid)); 417 break; 418 case BATADV_CLAIM_TYPE_REQUEST: 419 /* request frame 420 * set HW SRC and header destination to the receiving backbone 421 * gws mac 422 */ 423 ether_addr_copy(hw_src, mac); 424 ether_addr_copy(ethhdr->h_dest, mac); 425 batadv_dbg(BATADV_DBG_BLA, bat_priv, 426 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", 427 ethhdr->h_source, ethhdr->h_dest, 428 BATADV_PRINT_VID(vid)); 429 break; 430 case BATADV_CLAIM_TYPE_LOOPDETECT: 431 ether_addr_copy(ethhdr->h_source, mac); 432 batadv_dbg(BATADV_DBG_BLA, bat_priv, 433 "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n", 434 ethhdr->h_source, ethhdr->h_dest, 435 BATADV_PRINT_VID(vid)); 436 437 break; 438 } 439 440 if (vid & BATADV_VLAN_HAS_TAG) { 441 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), 442 vid & VLAN_VID_MASK); 443 if (!skb) 444 goto out; 445 } 446 447 skb_reset_mac_header(skb); 448 skb->protocol = eth_type_trans(skb, soft_iface); 449 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 450 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 451 skb->len + ETH_HLEN); 452 soft_iface->last_rx = jiffies; 453 454 netif_rx(skb); 455 out: 456 if (primary_if) 457 batadv_hardif_put(primary_if); 458 } 459 460 /** 461 * batadv_bla_loopdetect_report - worker for reporting the loop 462 * @work: work queue item 463 * 464 * Throws an uevent, as the loopdetect check function can't do that itself 465 * since the kernel may sleep while throwing uevents. 466 */ 467 static void batadv_bla_loopdetect_report(struct work_struct *work) 468 { 469 struct batadv_bla_backbone_gw *backbone_gw; 470 struct batadv_priv *bat_priv; 471 char vid_str[6] = { '\0' }; 472 473 backbone_gw = container_of(work, struct batadv_bla_backbone_gw, 474 report_work); 475 bat_priv = backbone_gw->bat_priv; 476 477 batadv_info(bat_priv->soft_iface, 478 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n", 479 BATADV_PRINT_VID(backbone_gw->vid)); 480 snprintf(vid_str, sizeof(vid_str), "%d", 481 BATADV_PRINT_VID(backbone_gw->vid)); 482 vid_str[sizeof(vid_str) - 1] = 0; 483 484 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT, 485 vid_str); 486 487 batadv_backbone_gw_put(backbone_gw); 488 } 489 490 /** 491 * batadv_bla_get_backbone_gw - finds or creates a backbone gateway 492 * @bat_priv: the bat priv with all the soft interface information 493 * @orig: the mac address of the originator 494 * @vid: the VLAN ID 495 * @own_backbone: set if the requested backbone is local 496 * 497 * Return: the (possibly created) backbone gateway or NULL on error 498 */ 499 static struct batadv_bla_backbone_gw * 500 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, 501 unsigned short vid, bool own_backbone) 502 { 503 struct batadv_bla_backbone_gw *entry; 504 struct batadv_orig_node *orig_node; 505 int hash_added; 506 507 entry = batadv_backbone_hash_find(bat_priv, orig, vid); 508 509 if (entry) 510 return entry; 511 512 batadv_dbg(BATADV_DBG_BLA, bat_priv, 513 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", 514 orig, BATADV_PRINT_VID(vid)); 515 516 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 517 if (!entry) 518 return NULL; 519 520 entry->vid = vid; 521 entry->lasttime = jiffies; 522 entry->crc = BATADV_BLA_CRC_INIT; 523 entry->bat_priv = bat_priv; 524 spin_lock_init(&entry->crc_lock); 525 atomic_set(&entry->request_sent, 0); 526 atomic_set(&entry->wait_periods, 0); 527 ether_addr_copy(entry->orig, orig); 528 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report); 529 kref_init(&entry->refcount); 530 531 kref_get(&entry->refcount); 532 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, 533 batadv_compare_backbone_gw, 534 batadv_choose_backbone_gw, entry, 535 &entry->hash_entry); 536 537 if (unlikely(hash_added != 0)) { 538 /* hash failed, free the structure */ 539 kfree(entry); 540 return NULL; 541 } 542 543 /* this is a gateway now, remove any TT entry on this VLAN */ 544 orig_node = batadv_orig_hash_find(bat_priv, orig); 545 if (orig_node) { 546 batadv_tt_global_del_orig(bat_priv, orig_node, vid, 547 "became a backbone gateway"); 548 batadv_orig_node_put(orig_node); 549 } 550 551 if (own_backbone) { 552 batadv_bla_send_announce(bat_priv, entry); 553 554 /* this will be decreased in the worker thread */ 555 atomic_inc(&entry->request_sent); 556 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS); 557 atomic_inc(&bat_priv->bla.num_requests); 558 } 559 560 return entry; 561 } 562 563 /** 564 * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN 565 * @bat_priv: the bat priv with all the soft interface information 566 * @primary_if: the selected primary interface 567 * @vid: VLAN identifier 568 * 569 * update or add the own backbone gw to make sure we announce 570 * where we receive other backbone gws 571 */ 572 static void 573 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, 574 struct batadv_hard_iface *primary_if, 575 unsigned short vid) 576 { 577 struct batadv_bla_backbone_gw *backbone_gw; 578 579 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 580 primary_if->net_dev->dev_addr, 581 vid, true); 582 if (unlikely(!backbone_gw)) 583 return; 584 585 backbone_gw->lasttime = jiffies; 586 batadv_backbone_gw_put(backbone_gw); 587 } 588 589 /** 590 * batadv_bla_answer_request - answer a bla request by sending own claims 591 * @bat_priv: the bat priv with all the soft interface information 592 * @primary_if: interface where the request came on 593 * @vid: the vid where the request came on 594 * 595 * Repeat all of our own claims, and finally send an ANNOUNCE frame 596 * to allow the requester another check if the CRC is correct now. 597 */ 598 static void batadv_bla_answer_request(struct batadv_priv *bat_priv, 599 struct batadv_hard_iface *primary_if, 600 unsigned short vid) 601 { 602 struct hlist_head *head; 603 struct batadv_hashtable *hash; 604 struct batadv_bla_claim *claim; 605 struct batadv_bla_backbone_gw *backbone_gw; 606 int i; 607 608 batadv_dbg(BATADV_DBG_BLA, bat_priv, 609 "bla_answer_request(): received a claim request, send all of our own claims again\n"); 610 611 backbone_gw = batadv_backbone_hash_find(bat_priv, 612 primary_if->net_dev->dev_addr, 613 vid); 614 if (!backbone_gw) 615 return; 616 617 hash = bat_priv->bla.claim_hash; 618 for (i = 0; i < hash->size; i++) { 619 head = &hash->table[i]; 620 621 rcu_read_lock(); 622 hlist_for_each_entry_rcu(claim, head, hash_entry) { 623 /* only own claims are interesting */ 624 if (claim->backbone_gw != backbone_gw) 625 continue; 626 627 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, 628 BATADV_CLAIM_TYPE_CLAIM); 629 } 630 rcu_read_unlock(); 631 } 632 633 /* finally, send an announcement frame */ 634 batadv_bla_send_announce(bat_priv, backbone_gw); 635 batadv_backbone_gw_put(backbone_gw); 636 } 637 638 /** 639 * batadv_bla_send_request - send a request to repeat claims 640 * @backbone_gw: the backbone gateway from whom we are out of sync 641 * 642 * When the crc is wrong, ask the backbone gateway for a full table update. 643 * After the request, it will repeat all of his own claims and finally 644 * send an announcement claim with which we can check again. 645 */ 646 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw) 647 { 648 /* first, remove all old entries */ 649 batadv_bla_del_backbone_claims(backbone_gw); 650 651 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 652 "Sending REQUEST to %pM\n", backbone_gw->orig); 653 654 /* send request */ 655 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, 656 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST); 657 658 /* no local broadcasts should be sent or received, for now. */ 659 if (!atomic_read(&backbone_gw->request_sent)) { 660 atomic_inc(&backbone_gw->bat_priv->bla.num_requests); 661 atomic_set(&backbone_gw->request_sent, 1); 662 } 663 } 664 665 /** 666 * batadv_bla_send_announce - Send an announcement frame 667 * @bat_priv: the bat priv with all the soft interface information 668 * @backbone_gw: our backbone gateway which should be announced 669 */ 670 static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 671 struct batadv_bla_backbone_gw *backbone_gw) 672 { 673 u8 mac[ETH_ALEN]; 674 __be16 crc; 675 676 memcpy(mac, batadv_announce_mac, 4); 677 spin_lock_bh(&backbone_gw->crc_lock); 678 crc = htons(backbone_gw->crc); 679 spin_unlock_bh(&backbone_gw->crc_lock); 680 memcpy(&mac[4], &crc, 2); 681 682 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, 683 BATADV_CLAIM_TYPE_ANNOUNCE); 684 } 685 686 /** 687 * batadv_bla_add_claim - Adds a claim in the claim hash 688 * @bat_priv: the bat priv with all the soft interface information 689 * @mac: the mac address of the claim 690 * @vid: the VLAN ID of the frame 691 * @backbone_gw: the backbone gateway which claims it 692 */ 693 static void batadv_bla_add_claim(struct batadv_priv *bat_priv, 694 const u8 *mac, const unsigned short vid, 695 struct batadv_bla_backbone_gw *backbone_gw) 696 { 697 struct batadv_bla_backbone_gw *old_backbone_gw; 698 struct batadv_bla_claim *claim; 699 struct batadv_bla_claim search_claim; 700 bool remove_crc = false; 701 int hash_added; 702 703 ether_addr_copy(search_claim.addr, mac); 704 search_claim.vid = vid; 705 claim = batadv_claim_hash_find(bat_priv, &search_claim); 706 707 /* create a new claim entry if it does not exist yet. */ 708 if (!claim) { 709 claim = kzalloc(sizeof(*claim), GFP_ATOMIC); 710 if (!claim) 711 return; 712 713 ether_addr_copy(claim->addr, mac); 714 spin_lock_init(&claim->backbone_lock); 715 claim->vid = vid; 716 claim->lasttime = jiffies; 717 kref_get(&backbone_gw->refcount); 718 claim->backbone_gw = backbone_gw; 719 kref_init(&claim->refcount); 720 721 batadv_dbg(BATADV_DBG_BLA, bat_priv, 722 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 723 mac, BATADV_PRINT_VID(vid)); 724 725 kref_get(&claim->refcount); 726 hash_added = batadv_hash_add(bat_priv->bla.claim_hash, 727 batadv_compare_claim, 728 batadv_choose_claim, claim, 729 &claim->hash_entry); 730 731 if (unlikely(hash_added != 0)) { 732 /* only local changes happened. */ 733 kfree(claim); 734 return; 735 } 736 } else { 737 claim->lasttime = jiffies; 738 if (claim->backbone_gw == backbone_gw) 739 /* no need to register a new backbone */ 740 goto claim_free_ref; 741 742 batadv_dbg(BATADV_DBG_BLA, bat_priv, 743 "bla_add_claim(): changing ownership for %pM, vid %d\n", 744 mac, BATADV_PRINT_VID(vid)); 745 746 remove_crc = true; 747 } 748 749 /* replace backbone_gw atomically and adjust reference counters */ 750 spin_lock_bh(&claim->backbone_lock); 751 old_backbone_gw = claim->backbone_gw; 752 kref_get(&backbone_gw->refcount); 753 claim->backbone_gw = backbone_gw; 754 spin_unlock_bh(&claim->backbone_lock); 755 756 if (remove_crc) { 757 /* remove claim address from old backbone_gw */ 758 spin_lock_bh(&old_backbone_gw->crc_lock); 759 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 760 spin_unlock_bh(&old_backbone_gw->crc_lock); 761 } 762 763 batadv_backbone_gw_put(old_backbone_gw); 764 765 /* add claim address to new backbone_gw */ 766 spin_lock_bh(&backbone_gw->crc_lock); 767 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 768 spin_unlock_bh(&backbone_gw->crc_lock); 769 backbone_gw->lasttime = jiffies; 770 771 claim_free_ref: 772 batadv_claim_put(claim); 773 } 774 775 /** 776 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of 777 * claim 778 * @claim: claim whose backbone_gw should be returned 779 * 780 * Return: valid reference to claim::backbone_gw 781 */ 782 static struct batadv_bla_backbone_gw * 783 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) 784 { 785 struct batadv_bla_backbone_gw *backbone_gw; 786 787 spin_lock_bh(&claim->backbone_lock); 788 backbone_gw = claim->backbone_gw; 789 kref_get(&backbone_gw->refcount); 790 spin_unlock_bh(&claim->backbone_lock); 791 792 return backbone_gw; 793 } 794 795 /** 796 * batadv_bla_del_claim - delete a claim from the claim hash 797 * @bat_priv: the bat priv with all the soft interface information 798 * @mac: mac address of the claim to be removed 799 * @vid: VLAN id for the claim to be removed 800 */ 801 static void batadv_bla_del_claim(struct batadv_priv *bat_priv, 802 const u8 *mac, const unsigned short vid) 803 { 804 struct batadv_bla_claim search_claim, *claim; 805 806 ether_addr_copy(search_claim.addr, mac); 807 search_claim.vid = vid; 808 claim = batadv_claim_hash_find(bat_priv, &search_claim); 809 if (!claim) 810 return; 811 812 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", 813 mac, BATADV_PRINT_VID(vid)); 814 815 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 816 batadv_choose_claim, claim); 817 batadv_claim_put(claim); /* reference from the hash is gone */ 818 819 /* don't need the reference from hash_find() anymore */ 820 batadv_claim_put(claim); 821 } 822 823 /** 824 * batadv_handle_announce - check for ANNOUNCE frame 825 * @bat_priv: the bat priv with all the soft interface information 826 * @an_addr: announcement mac address (ARP Sender HW address) 827 * @backbone_addr: originator address of the sender (Ethernet source MAC) 828 * @vid: the VLAN ID of the frame 829 * 830 * Return: true if handled 831 */ 832 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, 833 u8 *backbone_addr, unsigned short vid) 834 { 835 struct batadv_bla_backbone_gw *backbone_gw; 836 u16 backbone_crc, crc; 837 838 if (memcmp(an_addr, batadv_announce_mac, 4) != 0) 839 return false; 840 841 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, 842 false); 843 844 if (unlikely(!backbone_gw)) 845 return true; 846 847 /* handle as ANNOUNCE frame */ 848 backbone_gw->lasttime = jiffies; 849 crc = ntohs(*((__be16 *)(&an_addr[4]))); 850 851 batadv_dbg(BATADV_DBG_BLA, bat_priv, 852 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", 853 BATADV_PRINT_VID(vid), backbone_gw->orig, crc); 854 855 spin_lock_bh(&backbone_gw->crc_lock); 856 backbone_crc = backbone_gw->crc; 857 spin_unlock_bh(&backbone_gw->crc_lock); 858 859 if (backbone_crc != crc) { 860 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 861 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", 862 backbone_gw->orig, 863 BATADV_PRINT_VID(backbone_gw->vid), 864 backbone_crc, crc); 865 866 batadv_bla_send_request(backbone_gw); 867 } else { 868 /* if we have sent a request and the crc was OK, 869 * we can allow traffic again. 870 */ 871 if (atomic_read(&backbone_gw->request_sent)) { 872 atomic_dec(&backbone_gw->bat_priv->bla.num_requests); 873 atomic_set(&backbone_gw->request_sent, 0); 874 } 875 } 876 877 batadv_backbone_gw_put(backbone_gw); 878 return true; 879 } 880 881 /** 882 * batadv_handle_request - check for REQUEST frame 883 * @bat_priv: the bat priv with all the soft interface information 884 * @primary_if: the primary hard interface of this batman soft interface 885 * @backbone_addr: backbone address to be requested (ARP sender HW MAC) 886 * @ethhdr: ethernet header of a packet 887 * @vid: the VLAN ID of the frame 888 * 889 * Return: true if handled 890 */ 891 static bool batadv_handle_request(struct batadv_priv *bat_priv, 892 struct batadv_hard_iface *primary_if, 893 u8 *backbone_addr, struct ethhdr *ethhdr, 894 unsigned short vid) 895 { 896 /* check for REQUEST frame */ 897 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) 898 return false; 899 900 /* sanity check, this should not happen on a normal switch, 901 * we ignore it in this case. 902 */ 903 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) 904 return true; 905 906 batadv_dbg(BATADV_DBG_BLA, bat_priv, 907 "handle_request(): REQUEST vid %d (sent by %pM)...\n", 908 BATADV_PRINT_VID(vid), ethhdr->h_source); 909 910 batadv_bla_answer_request(bat_priv, primary_if, vid); 911 return true; 912 } 913 914 /** 915 * batadv_handle_unclaim - check for UNCLAIM frame 916 * @bat_priv: the bat priv with all the soft interface information 917 * @primary_if: the primary hard interface of this batman soft interface 918 * @backbone_addr: originator address of the backbone (Ethernet source) 919 * @claim_addr: Client to be unclaimed (ARP sender HW MAC) 920 * @vid: the VLAN ID of the frame 921 * 922 * Return: true if handled 923 */ 924 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, 925 struct batadv_hard_iface *primary_if, 926 u8 *backbone_addr, u8 *claim_addr, 927 unsigned short vid) 928 { 929 struct batadv_bla_backbone_gw *backbone_gw; 930 931 /* unclaim in any case if it is our own */ 932 if (primary_if && batadv_compare_eth(backbone_addr, 933 primary_if->net_dev->dev_addr)) 934 batadv_bla_send_claim(bat_priv, claim_addr, vid, 935 BATADV_CLAIM_TYPE_UNCLAIM); 936 937 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); 938 939 if (!backbone_gw) 940 return true; 941 942 /* this must be an UNCLAIM frame */ 943 batadv_dbg(BATADV_DBG_BLA, bat_priv, 944 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", 945 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig); 946 947 batadv_bla_del_claim(bat_priv, claim_addr, vid); 948 batadv_backbone_gw_put(backbone_gw); 949 return true; 950 } 951 952 /** 953 * batadv_handle_claim - check for CLAIM frame 954 * @bat_priv: the bat priv with all the soft interface information 955 * @primary_if: the primary hard interface of this batman soft interface 956 * @backbone_addr: originator address of the backbone (Ethernet Source) 957 * @claim_addr: client mac address to be claimed (ARP sender HW MAC) 958 * @vid: the VLAN ID of the frame 959 * 960 * Return: true if handled 961 */ 962 static bool batadv_handle_claim(struct batadv_priv *bat_priv, 963 struct batadv_hard_iface *primary_if, 964 u8 *backbone_addr, u8 *claim_addr, 965 unsigned short vid) 966 { 967 struct batadv_bla_backbone_gw *backbone_gw; 968 969 /* register the gateway if not yet available, and add the claim. */ 970 971 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, 972 false); 973 974 if (unlikely(!backbone_gw)) 975 return true; 976 977 /* this must be a CLAIM frame */ 978 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 979 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 980 batadv_bla_send_claim(bat_priv, claim_addr, vid, 981 BATADV_CLAIM_TYPE_CLAIM); 982 983 /* TODO: we could call something like tt_local_del() here. */ 984 985 batadv_backbone_gw_put(backbone_gw); 986 return true; 987 } 988 989 /** 990 * batadv_check_claim_group - check for claim group membership 991 * @bat_priv: the bat priv with all the soft interface information 992 * @primary_if: the primary interface of this batman interface 993 * @hw_src: the Hardware source in the ARP Header 994 * @hw_dst: the Hardware destination in the ARP Header 995 * @ethhdr: pointer to the Ethernet header of the claim frame 996 * 997 * checks if it is a claim packet and if its on the same group. 998 * This function also applies the group ID of the sender 999 * if it is in the same mesh. 1000 * 1001 * Return: 1002 * 2 - if it is a claim packet and on the same group 1003 * 1 - if is a claim packet from another group 1004 * 0 - if it is not a claim packet 1005 */ 1006 static int batadv_check_claim_group(struct batadv_priv *bat_priv, 1007 struct batadv_hard_iface *primary_if, 1008 u8 *hw_src, u8 *hw_dst, 1009 struct ethhdr *ethhdr) 1010 { 1011 u8 *backbone_addr; 1012 struct batadv_orig_node *orig_node; 1013 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 1014 1015 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 1016 bla_dst_own = &bat_priv->bla.claim_dest; 1017 1018 /* if announcement packet, use the source, 1019 * otherwise assume it is in the hw_src 1020 */ 1021 switch (bla_dst->type) { 1022 case BATADV_CLAIM_TYPE_CLAIM: 1023 backbone_addr = hw_src; 1024 break; 1025 case BATADV_CLAIM_TYPE_REQUEST: 1026 case BATADV_CLAIM_TYPE_ANNOUNCE: 1027 case BATADV_CLAIM_TYPE_UNCLAIM: 1028 backbone_addr = ethhdr->h_source; 1029 break; 1030 default: 1031 return 0; 1032 } 1033 1034 /* don't accept claim frames from ourselves */ 1035 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 1036 return 0; 1037 1038 /* if its already the same group, it is fine. */ 1039 if (bla_dst->group == bla_dst_own->group) 1040 return 2; 1041 1042 /* lets see if this originator is in our mesh */ 1043 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr); 1044 1045 /* dont accept claims from gateways which are not in 1046 * the same mesh or group. 1047 */ 1048 if (!orig_node) 1049 return 1; 1050 1051 /* if our mesh friends mac is bigger, use it for ourselves. */ 1052 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 1053 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1054 "taking other backbones claim group: %#.4x\n", 1055 ntohs(bla_dst->group)); 1056 bla_dst_own->group = bla_dst->group; 1057 } 1058 1059 batadv_orig_node_put(orig_node); 1060 1061 return 2; 1062 } 1063 1064 /** 1065 * batadv_bla_process_claim - Check if this is a claim frame, and process it 1066 * @bat_priv: the bat priv with all the soft interface information 1067 * @primary_if: the primary hard interface of this batman soft interface 1068 * @skb: the frame to be checked 1069 * 1070 * Return: true if it was a claim frame, otherwise return false to 1071 * tell the callee that it can use the frame on its own. 1072 */ 1073 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, 1074 struct batadv_hard_iface *primary_if, 1075 struct sk_buff *skb) 1076 { 1077 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 1078 u8 *hw_src, *hw_dst; 1079 struct vlan_hdr *vhdr, vhdr_buf; 1080 struct ethhdr *ethhdr; 1081 struct arphdr *arphdr; 1082 unsigned short vid; 1083 int vlan_depth = 0; 1084 __be16 proto; 1085 int headlen; 1086 int ret; 1087 1088 vid = batadv_get_vid(skb, 0); 1089 ethhdr = eth_hdr(skb); 1090 1091 proto = ethhdr->h_proto; 1092 headlen = ETH_HLEN; 1093 if (vid & BATADV_VLAN_HAS_TAG) { 1094 /* Traverse the VLAN/Ethertypes. 1095 * 1096 * At this point it is known that the first protocol is a VLAN 1097 * header, so start checking at the encapsulated protocol. 1098 * 1099 * The depth of the VLAN headers is recorded to drop BLA claim 1100 * frames encapsulated into multiple VLAN headers (QinQ). 1101 */ 1102 do { 1103 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, 1104 &vhdr_buf); 1105 if (!vhdr) 1106 return false; 1107 1108 proto = vhdr->h_vlan_encapsulated_proto; 1109 headlen += VLAN_HLEN; 1110 vlan_depth++; 1111 } while (proto == htons(ETH_P_8021Q)); 1112 } 1113 1114 if (proto != htons(ETH_P_ARP)) 1115 return false; /* not a claim frame */ 1116 1117 /* this must be a ARP frame. check if it is a claim. */ 1118 1119 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) 1120 return false; 1121 1122 /* pskb_may_pull() may have modified the pointers, get ethhdr again */ 1123 ethhdr = eth_hdr(skb); 1124 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen); 1125 1126 /* Check whether the ARP frame carries a valid 1127 * IP information 1128 */ 1129 if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) 1130 return false; 1131 if (arphdr->ar_pro != htons(ETH_P_IP)) 1132 return false; 1133 if (arphdr->ar_hln != ETH_ALEN) 1134 return false; 1135 if (arphdr->ar_pln != 4) 1136 return false; 1137 1138 hw_src = (u8 *)arphdr + sizeof(struct arphdr); 1139 hw_dst = hw_src + ETH_ALEN + 4; 1140 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 1141 bla_dst_own = &bat_priv->bla.claim_dest; 1142 1143 /* check if it is a claim frame in general */ 1144 if (memcmp(bla_dst->magic, bla_dst_own->magic, 1145 sizeof(bla_dst->magic)) != 0) 1146 return false; 1147 1148 /* check if there is a claim frame encapsulated deeper in (QinQ) and 1149 * drop that, as this is not supported by BLA but should also not be 1150 * sent via the mesh. 1151 */ 1152 if (vlan_depth > 1) 1153 return true; 1154 1155 /* Let the loopdetect frames on the mesh in any case. */ 1156 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) 1157 return false; 1158 1159 /* check if it is a claim frame. */ 1160 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 1161 ethhdr); 1162 if (ret == 1) 1163 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1164 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 1165 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, 1166 hw_dst); 1167 1168 if (ret < 2) 1169 return !!ret; 1170 1171 /* become a backbone gw ourselves on this vlan if not happened yet */ 1172 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1173 1174 /* check for the different types of claim frames ... */ 1175 switch (bla_dst->type) { 1176 case BATADV_CLAIM_TYPE_CLAIM: 1177 if (batadv_handle_claim(bat_priv, primary_if, hw_src, 1178 ethhdr->h_source, vid)) 1179 return true; 1180 break; 1181 case BATADV_CLAIM_TYPE_UNCLAIM: 1182 if (batadv_handle_unclaim(bat_priv, primary_if, 1183 ethhdr->h_source, hw_src, vid)) 1184 return true; 1185 break; 1186 1187 case BATADV_CLAIM_TYPE_ANNOUNCE: 1188 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, 1189 vid)) 1190 return true; 1191 break; 1192 case BATADV_CLAIM_TYPE_REQUEST: 1193 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, 1194 vid)) 1195 return true; 1196 break; 1197 } 1198 1199 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1200 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 1201 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); 1202 return true; 1203 } 1204 1205 /** 1206 * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or 1207 * immediately 1208 * @bat_priv: the bat priv with all the soft interface information 1209 * @now: whether the whole hash shall be wiped now 1210 * 1211 * Check when we last heard from other nodes, and remove them in case of 1212 * a time out, or clean all backbone gws if now is set. 1213 */ 1214 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 1215 { 1216 struct batadv_bla_backbone_gw *backbone_gw; 1217 struct hlist_node *node_tmp; 1218 struct hlist_head *head; 1219 struct batadv_hashtable *hash; 1220 spinlock_t *list_lock; /* protects write access to the hash lists */ 1221 int i; 1222 1223 hash = bat_priv->bla.backbone_hash; 1224 if (!hash) 1225 return; 1226 1227 for (i = 0; i < hash->size; i++) { 1228 head = &hash->table[i]; 1229 list_lock = &hash->list_locks[i]; 1230 1231 spin_lock_bh(list_lock); 1232 hlist_for_each_entry_safe(backbone_gw, node_tmp, 1233 head, hash_entry) { 1234 if (now) 1235 goto purge_now; 1236 if (!batadv_has_timed_out(backbone_gw->lasttime, 1237 BATADV_BLA_BACKBONE_TIMEOUT)) 1238 continue; 1239 1240 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 1241 "bla_purge_backbone_gw(): backbone gw %pM timed out\n", 1242 backbone_gw->orig); 1243 1244 purge_now: 1245 /* don't wait for the pending request anymore */ 1246 if (atomic_read(&backbone_gw->request_sent)) 1247 atomic_dec(&bat_priv->bla.num_requests); 1248 1249 batadv_bla_del_backbone_claims(backbone_gw); 1250 1251 hlist_del_rcu(&backbone_gw->hash_entry); 1252 batadv_backbone_gw_put(backbone_gw); 1253 } 1254 spin_unlock_bh(list_lock); 1255 } 1256 } 1257 1258 /** 1259 * batadv_bla_purge_claims - Remove claims after a timeout or immediately 1260 * @bat_priv: the bat priv with all the soft interface information 1261 * @primary_if: the selected primary interface, may be NULL if now is set 1262 * @now: whether the whole hash shall be wiped now 1263 * 1264 * Check when we heard last time from our own claims, and remove them in case of 1265 * a time out, or clean all claims if now is set 1266 */ 1267 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, 1268 struct batadv_hard_iface *primary_if, 1269 int now) 1270 { 1271 struct batadv_bla_backbone_gw *backbone_gw; 1272 struct batadv_bla_claim *claim; 1273 struct hlist_head *head; 1274 struct batadv_hashtable *hash; 1275 int i; 1276 1277 hash = bat_priv->bla.claim_hash; 1278 if (!hash) 1279 return; 1280 1281 for (i = 0; i < hash->size; i++) { 1282 head = &hash->table[i]; 1283 1284 rcu_read_lock(); 1285 hlist_for_each_entry_rcu(claim, head, hash_entry) { 1286 backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1287 if (now) 1288 goto purge_now; 1289 1290 if (!batadv_compare_eth(backbone_gw->orig, 1291 primary_if->net_dev->dev_addr)) 1292 goto skip; 1293 1294 if (!batadv_has_timed_out(claim->lasttime, 1295 BATADV_BLA_CLAIM_TIMEOUT)) 1296 goto skip; 1297 1298 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1299 "bla_purge_claims(): %pM, vid %d, time out\n", 1300 claim->addr, claim->vid); 1301 1302 purge_now: 1303 batadv_handle_unclaim(bat_priv, primary_if, 1304 backbone_gw->orig, 1305 claim->addr, claim->vid); 1306 skip: 1307 batadv_backbone_gw_put(backbone_gw); 1308 } 1309 rcu_read_unlock(); 1310 } 1311 } 1312 1313 /** 1314 * batadv_bla_update_orig_address - Update the backbone gateways when the own 1315 * originator address changes 1316 * @bat_priv: the bat priv with all the soft interface information 1317 * @primary_if: the new selected primary_if 1318 * @oldif: the old primary interface, may be NULL 1319 */ 1320 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, 1321 struct batadv_hard_iface *primary_if, 1322 struct batadv_hard_iface *oldif) 1323 { 1324 struct batadv_bla_backbone_gw *backbone_gw; 1325 struct hlist_head *head; 1326 struct batadv_hashtable *hash; 1327 __be16 group; 1328 int i; 1329 1330 /* reset bridge loop avoidance group id */ 1331 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1332 bat_priv->bla.claim_dest.group = group; 1333 1334 /* purge everything when bridge loop avoidance is turned off */ 1335 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1336 oldif = NULL; 1337 1338 if (!oldif) { 1339 batadv_bla_purge_claims(bat_priv, NULL, 1); 1340 batadv_bla_purge_backbone_gw(bat_priv, 1); 1341 return; 1342 } 1343 1344 hash = bat_priv->bla.backbone_hash; 1345 if (!hash) 1346 return; 1347 1348 for (i = 0; i < hash->size; i++) { 1349 head = &hash->table[i]; 1350 1351 rcu_read_lock(); 1352 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1353 /* own orig still holds the old value. */ 1354 if (!batadv_compare_eth(backbone_gw->orig, 1355 oldif->net_dev->dev_addr)) 1356 continue; 1357 1358 ether_addr_copy(backbone_gw->orig, 1359 primary_if->net_dev->dev_addr); 1360 /* send an announce frame so others will ask for our 1361 * claims and update their tables. 1362 */ 1363 batadv_bla_send_announce(bat_priv, backbone_gw); 1364 } 1365 rcu_read_unlock(); 1366 } 1367 } 1368 1369 /** 1370 * batadv_bla_send_loopdetect - send a loopdetect frame 1371 * @bat_priv: the bat priv with all the soft interface information 1372 * @backbone_gw: the backbone gateway for which a loop should be detected 1373 * 1374 * To detect loops that the bridge loop avoidance can't handle, send a loop 1375 * detection packet on the backbone. Unlike other BLA frames, this frame will 1376 * be allowed on the mesh by other nodes. If it is received on the mesh, this 1377 * indicates that there is a loop. 1378 */ 1379 static void 1380 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv, 1381 struct batadv_bla_backbone_gw *backbone_gw) 1382 { 1383 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n", 1384 backbone_gw->vid); 1385 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr, 1386 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT); 1387 } 1388 1389 /** 1390 * batadv_bla_status_update - purge bla interfaces if necessary 1391 * @net_dev: the soft interface net device 1392 */ 1393 void batadv_bla_status_update(struct net_device *net_dev) 1394 { 1395 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1396 struct batadv_hard_iface *primary_if; 1397 1398 primary_if = batadv_primary_if_get_selected(bat_priv); 1399 if (!primary_if) 1400 return; 1401 1402 /* this function already purges everything when bla is disabled, 1403 * so just call that one. 1404 */ 1405 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if); 1406 batadv_hardif_put(primary_if); 1407 } 1408 1409 /** 1410 * batadv_bla_periodic_work - performs periodic bla work 1411 * @work: kernel work struct 1412 * 1413 * periodic work to do: 1414 * * purge structures when they are too old 1415 * * send announcements 1416 */ 1417 static void batadv_bla_periodic_work(struct work_struct *work) 1418 { 1419 struct delayed_work *delayed_work; 1420 struct batadv_priv *bat_priv; 1421 struct batadv_priv_bla *priv_bla; 1422 struct hlist_head *head; 1423 struct batadv_bla_backbone_gw *backbone_gw; 1424 struct batadv_hashtable *hash; 1425 struct batadv_hard_iface *primary_if; 1426 bool send_loopdetect = false; 1427 int i; 1428 1429 delayed_work = to_delayed_work(work); 1430 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); 1431 bat_priv = container_of(priv_bla, struct batadv_priv, bla); 1432 primary_if = batadv_primary_if_get_selected(bat_priv); 1433 if (!primary_if) 1434 goto out; 1435 1436 batadv_bla_purge_claims(bat_priv, primary_if, 0); 1437 batadv_bla_purge_backbone_gw(bat_priv, 0); 1438 1439 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1440 goto out; 1441 1442 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) { 1443 /* set a new random mac address for the next bridge loop 1444 * detection frames. Set the locally administered bit to avoid 1445 * collisions with users mac addresses. 1446 */ 1447 random_ether_addr(bat_priv->bla.loopdetect_addr); 1448 bat_priv->bla.loopdetect_addr[0] = 0xba; 1449 bat_priv->bla.loopdetect_addr[1] = 0xbe; 1450 bat_priv->bla.loopdetect_lasttime = jiffies; 1451 atomic_set(&bat_priv->bla.loopdetect_next, 1452 BATADV_BLA_LOOPDETECT_PERIODS); 1453 1454 /* mark for sending loop detect on all VLANs */ 1455 send_loopdetect = true; 1456 } 1457 1458 hash = bat_priv->bla.backbone_hash; 1459 if (!hash) 1460 goto out; 1461 1462 for (i = 0; i < hash->size; i++) { 1463 head = &hash->table[i]; 1464 1465 rcu_read_lock(); 1466 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1467 if (!batadv_compare_eth(backbone_gw->orig, 1468 primary_if->net_dev->dev_addr)) 1469 continue; 1470 1471 backbone_gw->lasttime = jiffies; 1472 1473 batadv_bla_send_announce(bat_priv, backbone_gw); 1474 if (send_loopdetect) 1475 batadv_bla_send_loopdetect(bat_priv, 1476 backbone_gw); 1477 1478 /* request_sent is only set after creation to avoid 1479 * problems when we are not yet known as backbone gw 1480 * in the backbone. 1481 * 1482 * We can reset this now after we waited some periods 1483 * to give bridge forward delays and bla group forming 1484 * some grace time. 1485 */ 1486 1487 if (atomic_read(&backbone_gw->request_sent) == 0) 1488 continue; 1489 1490 if (!atomic_dec_and_test(&backbone_gw->wait_periods)) 1491 continue; 1492 1493 atomic_dec(&backbone_gw->bat_priv->bla.num_requests); 1494 atomic_set(&backbone_gw->request_sent, 0); 1495 } 1496 rcu_read_unlock(); 1497 } 1498 out: 1499 if (primary_if) 1500 batadv_hardif_put(primary_if); 1501 1502 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, 1503 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1504 } 1505 1506 /* The hash for claim and backbone hash receive the same key because they 1507 * are getting initialized by hash_new with the same key. Reinitializing 1508 * them with to different keys to allow nested locking without generating 1509 * lockdep warnings 1510 */ 1511 static struct lock_class_key batadv_claim_hash_lock_class_key; 1512 static struct lock_class_key batadv_backbone_hash_lock_class_key; 1513 1514 /** 1515 * batadv_bla_init - initialize all bla structures 1516 * @bat_priv: the bat priv with all the soft interface information 1517 * 1518 * Return: 0 on success, < 0 on error. 1519 */ 1520 int batadv_bla_init(struct batadv_priv *bat_priv) 1521 { 1522 int i; 1523 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1524 struct batadv_hard_iface *primary_if; 1525 u16 crc; 1526 unsigned long entrytime; 1527 1528 spin_lock_init(&bat_priv->bla.bcast_duplist_lock); 1529 1530 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1531 1532 /* setting claim destination address */ 1533 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3); 1534 bat_priv->bla.claim_dest.type = 0; 1535 primary_if = batadv_primary_if_get_selected(bat_priv); 1536 if (primary_if) { 1537 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN); 1538 bat_priv->bla.claim_dest.group = htons(crc); 1539 batadv_hardif_put(primary_if); 1540 } else { 1541 bat_priv->bla.claim_dest.group = 0; /* will be set later */ 1542 } 1543 1544 /* initialize the duplicate list */ 1545 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); 1546 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) 1547 bat_priv->bla.bcast_duplist[i].entrytime = entrytime; 1548 bat_priv->bla.bcast_duplist_curr = 0; 1549 1550 atomic_set(&bat_priv->bla.loopdetect_next, 1551 BATADV_BLA_LOOPDETECT_PERIODS); 1552 1553 if (bat_priv->bla.claim_hash) 1554 return 0; 1555 1556 bat_priv->bla.claim_hash = batadv_hash_new(128); 1557 bat_priv->bla.backbone_hash = batadv_hash_new(32); 1558 1559 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) 1560 return -ENOMEM; 1561 1562 batadv_hash_set_lock_class(bat_priv->bla.claim_hash, 1563 &batadv_claim_hash_lock_class_key); 1564 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, 1565 &batadv_backbone_hash_lock_class_key); 1566 1567 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1568 1569 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); 1570 1571 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, 1572 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1573 return 0; 1574 } 1575 1576 /** 1577 * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup. 1578 * @bat_priv: the bat priv with all the soft interface information 1579 * @skb: contains the bcast_packet to be checked 1580 * 1581 * check if it is on our broadcast list. Another gateway might 1582 * have sent the same packet because it is connected to the same backbone, 1583 * so we have to remove this duplicate. 1584 * 1585 * This is performed by checking the CRC, which will tell us 1586 * with a good chance that it is the same packet. If it is furthermore 1587 * sent by another host, drop it. We allow equal packets from 1588 * the same host however as this might be intended. 1589 * 1590 * Return: true if a packet is in the duplicate list, false otherwise. 1591 */ 1592 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 1593 struct sk_buff *skb) 1594 { 1595 int i, curr; 1596 __be32 crc; 1597 struct batadv_bcast_packet *bcast_packet; 1598 struct batadv_bcast_duplist_entry *entry; 1599 bool ret = false; 1600 1601 bcast_packet = (struct batadv_bcast_packet *)skb->data; 1602 1603 /* calculate the crc ... */ 1604 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); 1605 1606 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); 1607 1608 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1609 curr = (bat_priv->bla.bcast_duplist_curr + i); 1610 curr %= BATADV_DUPLIST_SIZE; 1611 entry = &bat_priv->bla.bcast_duplist[curr]; 1612 1613 /* we can stop searching if the entry is too old ; 1614 * later entries will be even older 1615 */ 1616 if (batadv_has_timed_out(entry->entrytime, 1617 BATADV_DUPLIST_TIMEOUT)) 1618 break; 1619 1620 if (entry->crc != crc) 1621 continue; 1622 1623 if (batadv_compare_eth(entry->orig, bcast_packet->orig)) 1624 continue; 1625 1626 /* this entry seems to match: same crc, not too old, 1627 * and from another gw. therefore return true to forbid it. 1628 */ 1629 ret = true; 1630 goto out; 1631 } 1632 /* not found, add a new entry (overwrite the oldest entry) 1633 * and allow it, its the first occurrence. 1634 */ 1635 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1636 curr %= BATADV_DUPLIST_SIZE; 1637 entry = &bat_priv->bla.bcast_duplist[curr]; 1638 entry->crc = crc; 1639 entry->entrytime = jiffies; 1640 ether_addr_copy(entry->orig, bcast_packet->orig); 1641 bat_priv->bla.bcast_duplist_curr = curr; 1642 1643 out: 1644 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock); 1645 1646 return ret; 1647 } 1648 1649 /** 1650 * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for 1651 * the VLAN identified by vid. 1652 * @bat_priv: the bat priv with all the soft interface information 1653 * @orig: originator mac address 1654 * @vid: VLAN identifier 1655 * 1656 * Return: true if orig is a backbone for this vid, false otherwise. 1657 */ 1658 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, 1659 unsigned short vid) 1660 { 1661 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1662 struct hlist_head *head; 1663 struct batadv_bla_backbone_gw *backbone_gw; 1664 int i; 1665 1666 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1667 return false; 1668 1669 if (!hash) 1670 return false; 1671 1672 for (i = 0; i < hash->size; i++) { 1673 head = &hash->table[i]; 1674 1675 rcu_read_lock(); 1676 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 1677 if (batadv_compare_eth(backbone_gw->orig, orig) && 1678 backbone_gw->vid == vid) { 1679 rcu_read_unlock(); 1680 return true; 1681 } 1682 } 1683 rcu_read_unlock(); 1684 } 1685 1686 return false; 1687 } 1688 1689 /** 1690 * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN. 1691 * @skb: the frame to be checked 1692 * @orig_node: the orig_node of the frame 1693 * @hdr_size: maximum length of the frame 1694 * 1695 * Return: true if the orig_node is also a gateway on the soft interface, 1696 * otherwise it returns false. 1697 */ 1698 bool batadv_bla_is_backbone_gw(struct sk_buff *skb, 1699 struct batadv_orig_node *orig_node, int hdr_size) 1700 { 1701 struct batadv_bla_backbone_gw *backbone_gw; 1702 unsigned short vid; 1703 1704 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1705 return false; 1706 1707 /* first, find out the vid. */ 1708 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) 1709 return false; 1710 1711 vid = batadv_get_vid(skb, hdr_size); 1712 1713 /* see if this originator is a backbone gw for this VLAN */ 1714 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv, 1715 orig_node->orig, vid); 1716 if (!backbone_gw) 1717 return false; 1718 1719 batadv_backbone_gw_put(backbone_gw); 1720 return true; 1721 } 1722 1723 /** 1724 * batadv_bla_free - free all bla structures 1725 * @bat_priv: the bat priv with all the soft interface information 1726 * 1727 * for softinterface free or module unload 1728 */ 1729 void batadv_bla_free(struct batadv_priv *bat_priv) 1730 { 1731 struct batadv_hard_iface *primary_if; 1732 1733 cancel_delayed_work_sync(&bat_priv->bla.work); 1734 primary_if = batadv_primary_if_get_selected(bat_priv); 1735 1736 if (bat_priv->bla.claim_hash) { 1737 batadv_bla_purge_claims(bat_priv, primary_if, 1); 1738 batadv_hash_destroy(bat_priv->bla.claim_hash); 1739 bat_priv->bla.claim_hash = NULL; 1740 } 1741 if (bat_priv->bla.backbone_hash) { 1742 batadv_bla_purge_backbone_gw(bat_priv, 1); 1743 batadv_hash_destroy(bat_priv->bla.backbone_hash); 1744 bat_priv->bla.backbone_hash = NULL; 1745 } 1746 if (primary_if) 1747 batadv_hardif_put(primary_if); 1748 } 1749 1750 /** 1751 * batadv_bla_loopdetect_check - check and handle a detected loop 1752 * @bat_priv: the bat priv with all the soft interface information 1753 * @skb: the packet to check 1754 * @primary_if: interface where the request came on 1755 * @vid: the VLAN ID of the frame 1756 * 1757 * Checks if this packet is a loop detect frame which has been sent by us, 1758 * throw an uevent and log the event if that is the case. 1759 * 1760 * Return: true if it is a loop detect frame which is to be dropped, false 1761 * otherwise. 1762 */ 1763 static bool 1764 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, 1765 struct batadv_hard_iface *primary_if, 1766 unsigned short vid) 1767 { 1768 struct batadv_bla_backbone_gw *backbone_gw; 1769 struct ethhdr *ethhdr; 1770 1771 ethhdr = eth_hdr(skb); 1772 1773 /* Only check for the MAC address and skip more checks here for 1774 * performance reasons - this function is on the hotpath, after all. 1775 */ 1776 if (!batadv_compare_eth(ethhdr->h_source, 1777 bat_priv->bla.loopdetect_addr)) 1778 return false; 1779 1780 /* If the packet came too late, don't forward it on the mesh 1781 * but don't consider that as loop. It might be a coincidence. 1782 */ 1783 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime, 1784 BATADV_BLA_LOOPDETECT_TIMEOUT)) 1785 return true; 1786 1787 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 1788 primary_if->net_dev->dev_addr, 1789 vid, true); 1790 if (unlikely(!backbone_gw)) 1791 return true; 1792 1793 queue_work(batadv_event_workqueue, &backbone_gw->report_work); 1794 /* backbone_gw is unreferenced in the report work function function */ 1795 1796 return true; 1797 } 1798 1799 /** 1800 * batadv_bla_rx - check packets coming from the mesh. 1801 * @bat_priv: the bat priv with all the soft interface information 1802 * @skb: the frame to be checked 1803 * @vid: the VLAN ID of the frame 1804 * @is_bcast: the packet came in a broadcast packet type. 1805 * 1806 * batadv_bla_rx avoidance checks if: 1807 * * we have to race for a claim 1808 * * if the frame is allowed on the LAN 1809 * 1810 * in these cases, the skb is further handled by this function 1811 * 1812 * Return: true if handled, otherwise it returns false and the caller shall 1813 * further process the skb. 1814 */ 1815 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1816 unsigned short vid, bool is_bcast) 1817 { 1818 struct batadv_bla_backbone_gw *backbone_gw; 1819 struct ethhdr *ethhdr; 1820 struct batadv_bla_claim search_claim, *claim = NULL; 1821 struct batadv_hard_iface *primary_if; 1822 bool own_claim; 1823 bool ret; 1824 1825 ethhdr = eth_hdr(skb); 1826 1827 primary_if = batadv_primary_if_get_selected(bat_priv); 1828 if (!primary_if) 1829 goto handled; 1830 1831 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1832 goto allow; 1833 1834 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid)) 1835 goto handled; 1836 1837 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1838 /* don't allow broadcasts while requests are in flight */ 1839 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1840 goto handled; 1841 1842 ether_addr_copy(search_claim.addr, ethhdr->h_source); 1843 search_claim.vid = vid; 1844 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1845 1846 if (!claim) { 1847 /* possible optimization: race for a claim */ 1848 /* No claim exists yet, claim it for us! 1849 */ 1850 batadv_handle_claim(bat_priv, primary_if, 1851 primary_if->net_dev->dev_addr, 1852 ethhdr->h_source, vid); 1853 goto allow; 1854 } 1855 1856 /* if it is our own claim ... */ 1857 backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1858 own_claim = batadv_compare_eth(backbone_gw->orig, 1859 primary_if->net_dev->dev_addr); 1860 batadv_backbone_gw_put(backbone_gw); 1861 1862 if (own_claim) { 1863 /* ... allow it in any case */ 1864 claim->lasttime = jiffies; 1865 goto allow; 1866 } 1867 1868 /* if it is a broadcast ... */ 1869 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { 1870 /* ... drop it. the responsible gateway is in charge. 1871 * 1872 * We need to check is_bcast because with the gateway 1873 * feature, broadcasts (like DHCP requests) may be sent 1874 * using a unicast packet type. 1875 */ 1876 goto handled; 1877 } else { 1878 /* seems the client considers us as its best gateway. 1879 * send a claim and update the claim table 1880 * immediately. 1881 */ 1882 batadv_handle_claim(bat_priv, primary_if, 1883 primary_if->net_dev->dev_addr, 1884 ethhdr->h_source, vid); 1885 goto allow; 1886 } 1887 allow: 1888 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1889 ret = false; 1890 goto out; 1891 1892 handled: 1893 kfree_skb(skb); 1894 ret = true; 1895 1896 out: 1897 if (primary_if) 1898 batadv_hardif_put(primary_if); 1899 if (claim) 1900 batadv_claim_put(claim); 1901 return ret; 1902 } 1903 1904 /** 1905 * batadv_bla_tx - check packets going into the mesh 1906 * @bat_priv: the bat priv with all the soft interface information 1907 * @skb: the frame to be checked 1908 * @vid: the VLAN ID of the frame 1909 * 1910 * batadv_bla_tx checks if: 1911 * * a claim was received which has to be processed 1912 * * the frame is allowed on the mesh 1913 * 1914 * in these cases, the skb is further handled by this function. 1915 * 1916 * This call might reallocate skb data. 1917 * 1918 * Return: true if handled, otherwise it returns false and the caller shall 1919 * further process the skb. 1920 */ 1921 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1922 unsigned short vid) 1923 { 1924 struct ethhdr *ethhdr; 1925 struct batadv_bla_claim search_claim, *claim = NULL; 1926 struct batadv_bla_backbone_gw *backbone_gw; 1927 struct batadv_hard_iface *primary_if; 1928 bool client_roamed; 1929 bool ret = false; 1930 1931 primary_if = batadv_primary_if_get_selected(bat_priv); 1932 if (!primary_if) 1933 goto out; 1934 1935 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1936 goto allow; 1937 1938 if (batadv_bla_process_claim(bat_priv, primary_if, skb)) 1939 goto handled; 1940 1941 ethhdr = eth_hdr(skb); 1942 1943 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1944 /* don't allow broadcasts while requests are in flight */ 1945 if (is_multicast_ether_addr(ethhdr->h_dest)) 1946 goto handled; 1947 1948 ether_addr_copy(search_claim.addr, ethhdr->h_source); 1949 search_claim.vid = vid; 1950 1951 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1952 1953 /* if no claim exists, allow it. */ 1954 if (!claim) 1955 goto allow; 1956 1957 /* check if we are responsible. */ 1958 backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 1959 client_roamed = batadv_compare_eth(backbone_gw->orig, 1960 primary_if->net_dev->dev_addr); 1961 batadv_backbone_gw_put(backbone_gw); 1962 1963 if (client_roamed) { 1964 /* if yes, the client has roamed and we have 1965 * to unclaim it. 1966 */ 1967 batadv_handle_unclaim(bat_priv, primary_if, 1968 primary_if->net_dev->dev_addr, 1969 ethhdr->h_source, vid); 1970 goto allow; 1971 } 1972 1973 /* check if it is a multicast/broadcast frame */ 1974 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1975 /* drop it. the responsible gateway has forwarded it into 1976 * the backbone network. 1977 */ 1978 goto handled; 1979 } else { 1980 /* we must allow it. at least if we are 1981 * responsible for the DESTINATION. 1982 */ 1983 goto allow; 1984 } 1985 allow: 1986 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1987 ret = false; 1988 goto out; 1989 handled: 1990 ret = true; 1991 out: 1992 if (primary_if) 1993 batadv_hardif_put(primary_if); 1994 if (claim) 1995 batadv_claim_put(claim); 1996 return ret; 1997 } 1998 1999 #ifdef CONFIG_BATMAN_ADV_DEBUGFS 2000 /** 2001 * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file 2002 * @seq: seq file to print on 2003 * @offset: not used 2004 * 2005 * Return: always 0 2006 */ 2007 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) 2008 { 2009 struct net_device *net_dev = (struct net_device *)seq->private; 2010 struct batadv_priv *bat_priv = netdev_priv(net_dev); 2011 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 2012 struct batadv_bla_backbone_gw *backbone_gw; 2013 struct batadv_bla_claim *claim; 2014 struct batadv_hard_iface *primary_if; 2015 struct hlist_head *head; 2016 u16 backbone_crc; 2017 u32 i; 2018 bool is_own; 2019 u8 *primary_addr; 2020 2021 primary_if = batadv_seq_print_text_primary_if_get(seq); 2022 if (!primary_if) 2023 goto out; 2024 2025 primary_addr = primary_if->net_dev->dev_addr; 2026 seq_printf(seq, 2027 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n", 2028 net_dev->name, primary_addr, 2029 ntohs(bat_priv->bla.claim_dest.group)); 2030 seq_puts(seq, 2031 " Client VID Originator [o] (CRC )\n"); 2032 for (i = 0; i < hash->size; i++) { 2033 head = &hash->table[i]; 2034 2035 rcu_read_lock(); 2036 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2037 backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 2038 2039 is_own = batadv_compare_eth(backbone_gw->orig, 2040 primary_addr); 2041 2042 spin_lock_bh(&backbone_gw->crc_lock); 2043 backbone_crc = backbone_gw->crc; 2044 spin_unlock_bh(&backbone_gw->crc_lock); 2045 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", 2046 claim->addr, BATADV_PRINT_VID(claim->vid), 2047 backbone_gw->orig, 2048 (is_own ? 'x' : ' '), 2049 backbone_crc); 2050 2051 batadv_backbone_gw_put(backbone_gw); 2052 } 2053 rcu_read_unlock(); 2054 } 2055 out: 2056 if (primary_if) 2057 batadv_hardif_put(primary_if); 2058 return 0; 2059 } 2060 #endif 2061 2062 /** 2063 * batadv_bla_claim_dump_entry - dump one entry of the claim table 2064 * to a netlink socket 2065 * @msg: buffer for the message 2066 * @portid: netlink port 2067 * @seq: Sequence number of netlink message 2068 * @primary_if: primary interface 2069 * @claim: entry to dump 2070 * 2071 * Return: 0 or error code. 2072 */ 2073 static int 2074 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, 2075 struct batadv_hard_iface *primary_if, 2076 struct batadv_bla_claim *claim) 2077 { 2078 u8 *primary_addr = primary_if->net_dev->dev_addr; 2079 u16 backbone_crc; 2080 bool is_own; 2081 void *hdr; 2082 int ret = -EINVAL; 2083 2084 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, 2085 NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM); 2086 if (!hdr) { 2087 ret = -ENOBUFS; 2088 goto out; 2089 } 2090 2091 is_own = batadv_compare_eth(claim->backbone_gw->orig, 2092 primary_addr); 2093 2094 spin_lock_bh(&claim->backbone_gw->crc_lock); 2095 backbone_crc = claim->backbone_gw->crc; 2096 spin_unlock_bh(&claim->backbone_gw->crc_lock); 2097 2098 if (is_own) 2099 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { 2100 genlmsg_cancel(msg, hdr); 2101 goto out; 2102 } 2103 2104 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) || 2105 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) || 2106 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, 2107 claim->backbone_gw->orig) || 2108 nla_put_u16(msg, BATADV_ATTR_BLA_CRC, 2109 backbone_crc)) { 2110 genlmsg_cancel(msg, hdr); 2111 goto out; 2112 } 2113 2114 genlmsg_end(msg, hdr); 2115 ret = 0; 2116 2117 out: 2118 return ret; 2119 } 2120 2121 /** 2122 * batadv_bla_claim_dump_bucket - dump one bucket of the claim table 2123 * to a netlink socket 2124 * @msg: buffer for the message 2125 * @portid: netlink port 2126 * @seq: Sequence number of netlink message 2127 * @primary_if: primary interface 2128 * @head: bucket to dump 2129 * @idx_skip: How many entries to skip 2130 * 2131 * Return: always 0. 2132 */ 2133 static int 2134 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, 2135 struct batadv_hard_iface *primary_if, 2136 struct hlist_head *head, int *idx_skip) 2137 { 2138 struct batadv_bla_claim *claim; 2139 int idx = 0; 2140 2141 rcu_read_lock(); 2142 hlist_for_each_entry_rcu(claim, head, hash_entry) { 2143 if (idx++ < *idx_skip) 2144 continue; 2145 if (batadv_bla_claim_dump_entry(msg, portid, seq, 2146 primary_if, claim)) { 2147 *idx_skip = idx - 1; 2148 goto unlock; 2149 } 2150 } 2151 2152 *idx_skip = idx; 2153 unlock: 2154 rcu_read_unlock(); 2155 return 0; 2156 } 2157 2158 /** 2159 * batadv_bla_claim_dump - dump claim table to a netlink socket 2160 * @msg: buffer for the message 2161 * @cb: callback structure containing arguments 2162 * 2163 * Return: message length. 2164 */ 2165 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) 2166 { 2167 struct batadv_hard_iface *primary_if = NULL; 2168 int portid = NETLINK_CB(cb->skb).portid; 2169 struct net *net = sock_net(cb->skb->sk); 2170 struct net_device *soft_iface; 2171 struct batadv_hashtable *hash; 2172 struct batadv_priv *bat_priv; 2173 int bucket = cb->args[0]; 2174 struct hlist_head *head; 2175 int idx = cb->args[1]; 2176 int ifindex; 2177 int ret = 0; 2178 2179 ifindex = batadv_netlink_get_ifindex(cb->nlh, 2180 BATADV_ATTR_MESH_IFINDEX); 2181 if (!ifindex) 2182 return -EINVAL; 2183 2184 soft_iface = dev_get_by_index(net, ifindex); 2185 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2186 ret = -ENODEV; 2187 goto out; 2188 } 2189 2190 bat_priv = netdev_priv(soft_iface); 2191 hash = bat_priv->bla.claim_hash; 2192 2193 primary_if = batadv_primary_if_get_selected(bat_priv); 2194 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { 2195 ret = -ENOENT; 2196 goto out; 2197 } 2198 2199 while (bucket < hash->size) { 2200 head = &hash->table[bucket]; 2201 2202 if (batadv_bla_claim_dump_bucket(msg, portid, 2203 cb->nlh->nlmsg_seq, 2204 primary_if, head, &idx)) 2205 break; 2206 bucket++; 2207 } 2208 2209 cb->args[0] = bucket; 2210 cb->args[1] = idx; 2211 2212 ret = msg->len; 2213 2214 out: 2215 if (primary_if) 2216 batadv_hardif_put(primary_if); 2217 2218 if (soft_iface) 2219 dev_put(soft_iface); 2220 2221 return ret; 2222 } 2223 2224 #ifdef CONFIG_BATMAN_ADV_DEBUGFS 2225 /** 2226 * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq 2227 * file 2228 * @seq: seq file to print on 2229 * @offset: not used 2230 * 2231 * Return: always 0 2232 */ 2233 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) 2234 { 2235 struct net_device *net_dev = (struct net_device *)seq->private; 2236 struct batadv_priv *bat_priv = netdev_priv(net_dev); 2237 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 2238 struct batadv_bla_backbone_gw *backbone_gw; 2239 struct batadv_hard_iface *primary_if; 2240 struct hlist_head *head; 2241 int secs, msecs; 2242 u16 backbone_crc; 2243 u32 i; 2244 bool is_own; 2245 u8 *primary_addr; 2246 2247 primary_if = batadv_seq_print_text_primary_if_get(seq); 2248 if (!primary_if) 2249 goto out; 2250 2251 primary_addr = primary_if->net_dev->dev_addr; 2252 seq_printf(seq, 2253 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n", 2254 net_dev->name, primary_addr, 2255 ntohs(bat_priv->bla.claim_dest.group)); 2256 seq_puts(seq, " Originator VID last seen (CRC )\n"); 2257 for (i = 0; i < hash->size; i++) { 2258 head = &hash->table[i]; 2259 2260 rcu_read_lock(); 2261 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 2262 msecs = jiffies_to_msecs(jiffies - 2263 backbone_gw->lasttime); 2264 secs = msecs / 1000; 2265 msecs = msecs % 1000; 2266 2267 is_own = batadv_compare_eth(backbone_gw->orig, 2268 primary_addr); 2269 if (is_own) 2270 continue; 2271 2272 spin_lock_bh(&backbone_gw->crc_lock); 2273 backbone_crc = backbone_gw->crc; 2274 spin_unlock_bh(&backbone_gw->crc_lock); 2275 2276 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", 2277 backbone_gw->orig, 2278 BATADV_PRINT_VID(backbone_gw->vid), secs, 2279 msecs, backbone_crc); 2280 } 2281 rcu_read_unlock(); 2282 } 2283 out: 2284 if (primary_if) 2285 batadv_hardif_put(primary_if); 2286 return 0; 2287 } 2288 #endif 2289 2290 /** 2291 * batadv_bla_backbone_dump_entry - dump one entry of the backbone table 2292 * to a netlink socket 2293 * @msg: buffer for the message 2294 * @portid: netlink port 2295 * @seq: Sequence number of netlink message 2296 * @primary_if: primary interface 2297 * @backbone_gw: entry to dump 2298 * 2299 * Return: 0 or error code. 2300 */ 2301 static int 2302 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, 2303 struct batadv_hard_iface *primary_if, 2304 struct batadv_bla_backbone_gw *backbone_gw) 2305 { 2306 u8 *primary_addr = primary_if->net_dev->dev_addr; 2307 u16 backbone_crc; 2308 bool is_own; 2309 int msecs; 2310 void *hdr; 2311 int ret = -EINVAL; 2312 2313 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, 2314 NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE); 2315 if (!hdr) { 2316 ret = -ENOBUFS; 2317 goto out; 2318 } 2319 2320 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr); 2321 2322 spin_lock_bh(&backbone_gw->crc_lock); 2323 backbone_crc = backbone_gw->crc; 2324 spin_unlock_bh(&backbone_gw->crc_lock); 2325 2326 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime); 2327 2328 if (is_own) 2329 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { 2330 genlmsg_cancel(msg, hdr); 2331 goto out; 2332 } 2333 2334 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, 2335 backbone_gw->orig) || 2336 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) || 2337 nla_put_u16(msg, BATADV_ATTR_BLA_CRC, 2338 backbone_crc) || 2339 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) { 2340 genlmsg_cancel(msg, hdr); 2341 goto out; 2342 } 2343 2344 genlmsg_end(msg, hdr); 2345 ret = 0; 2346 2347 out: 2348 return ret; 2349 } 2350 2351 /** 2352 * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table 2353 * to a netlink socket 2354 * @msg: buffer for the message 2355 * @portid: netlink port 2356 * @seq: Sequence number of netlink message 2357 * @primary_if: primary interface 2358 * @head: bucket to dump 2359 * @idx_skip: How many entries to skip 2360 * 2361 * Return: always 0. 2362 */ 2363 static int 2364 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, 2365 struct batadv_hard_iface *primary_if, 2366 struct hlist_head *head, int *idx_skip) 2367 { 2368 struct batadv_bla_backbone_gw *backbone_gw; 2369 int idx = 0; 2370 2371 rcu_read_lock(); 2372 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { 2373 if (idx++ < *idx_skip) 2374 continue; 2375 if (batadv_bla_backbone_dump_entry(msg, portid, seq, 2376 primary_if, backbone_gw)) { 2377 *idx_skip = idx - 1; 2378 goto unlock; 2379 } 2380 } 2381 2382 *idx_skip = idx; 2383 unlock: 2384 rcu_read_unlock(); 2385 return 0; 2386 } 2387 2388 /** 2389 * batadv_bla_backbone_dump - dump backbone table to a netlink socket 2390 * @msg: buffer for the message 2391 * @cb: callback structure containing arguments 2392 * 2393 * Return: message length. 2394 */ 2395 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) 2396 { 2397 struct batadv_hard_iface *primary_if = NULL; 2398 int portid = NETLINK_CB(cb->skb).portid; 2399 struct net *net = sock_net(cb->skb->sk); 2400 struct net_device *soft_iface; 2401 struct batadv_hashtable *hash; 2402 struct batadv_priv *bat_priv; 2403 int bucket = cb->args[0]; 2404 struct hlist_head *head; 2405 int idx = cb->args[1]; 2406 int ifindex; 2407 int ret = 0; 2408 2409 ifindex = batadv_netlink_get_ifindex(cb->nlh, 2410 BATADV_ATTR_MESH_IFINDEX); 2411 if (!ifindex) 2412 return -EINVAL; 2413 2414 soft_iface = dev_get_by_index(net, ifindex); 2415 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { 2416 ret = -ENODEV; 2417 goto out; 2418 } 2419 2420 bat_priv = netdev_priv(soft_iface); 2421 hash = bat_priv->bla.backbone_hash; 2422 2423 primary_if = batadv_primary_if_get_selected(bat_priv); 2424 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { 2425 ret = -ENOENT; 2426 goto out; 2427 } 2428 2429 while (bucket < hash->size) { 2430 head = &hash->table[bucket]; 2431 2432 if (batadv_bla_backbone_dump_bucket(msg, portid, 2433 cb->nlh->nlmsg_seq, 2434 primary_if, head, &idx)) 2435 break; 2436 bucket++; 2437 } 2438 2439 cb->args[0] = bucket; 2440 cb->args[1] = idx; 2441 2442 ret = msg->len; 2443 2444 out: 2445 if (primary_if) 2446 batadv_hardif_put(primary_if); 2447 2448 if (soft_iface) 2449 dev_put(soft_iface); 2450 2451 return ret; 2452 } 2453