1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008, 2009 open80211s Ltd. 4 * Author: Luis Carlos Cobo <luisca@cozybit.com> 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/list.h> 9 #include <linux/random.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/string.h> 13 #include <net/mac80211.h> 14 #include "wme.h" 15 #include "ieee80211_i.h" 16 #include "mesh.h" 17 18 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 19 20 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 21 { 22 /* Use last four bytes of hw addr as hash index */ 23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); 24 } 25 26 static const struct rhashtable_params mesh_rht_params = { 27 .nelem_hint = 2, 28 .automatic_shrinking = true, 29 .key_len = ETH_ALEN, 30 .key_offset = offsetof(struct mesh_path, dst), 31 .head_offset = offsetof(struct mesh_path, rhash), 32 .hashfn = mesh_table_hash, 33 }; 34 35 static inline bool mpath_expired(struct mesh_path *mpath) 36 { 37 return (mpath->flags & MESH_PATH_ACTIVE) && 38 time_after(jiffies, mpath->exp_time) && 39 !(mpath->flags & MESH_PATH_FIXED); 40 } 41 42 static void mesh_path_rht_free(void *ptr, void *tblptr) 43 { 44 struct mesh_path *mpath = ptr; 45 struct mesh_table *tbl = tblptr; 46 47 mesh_path_free_rcu(tbl, mpath); 48 } 49 50 static struct mesh_table *mesh_table_alloc(void) 51 { 52 struct mesh_table *newtbl; 53 54 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 55 if (!newtbl) 56 return NULL; 57 58 INIT_HLIST_HEAD(&newtbl->known_gates); 59 INIT_HLIST_HEAD(&newtbl->walk_head); 60 atomic_set(&newtbl->entries, 0); 61 spin_lock_init(&newtbl->gates_lock); 62 spin_lock_init(&newtbl->walk_lock); 63 64 return newtbl; 65 } 66 67 static void mesh_table_free(struct mesh_table *tbl) 68 { 69 rhashtable_free_and_destroy(&tbl->rhead, 70 mesh_path_rht_free, tbl); 71 kfree(tbl); 72 } 73 74 /** 75 * 76 * mesh_path_assign_nexthop - update mesh path next hop 77 * 78 * @mpath: mesh path to update 79 * @sta: next hop to assign 80 * 81 * Locking: mpath->state_lock must be held when calling this function 82 */ 83 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 84 { 85 struct sk_buff *skb; 86 struct ieee80211_hdr *hdr; 87 unsigned long flags; 88 89 rcu_assign_pointer(mpath->next_hop, sta); 90 91 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 92 skb_queue_walk(&mpath->frame_queue, skb) { 93 hdr = (struct ieee80211_hdr *) skb->data; 94 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 95 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 96 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); 97 } 98 99 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 100 } 101 102 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 103 struct mesh_path *gate_mpath) 104 { 105 struct ieee80211_hdr *hdr; 106 struct ieee80211s_hdr *mshdr; 107 int mesh_hdrlen, hdrlen; 108 char *next_hop; 109 110 hdr = (struct ieee80211_hdr *) skb->data; 111 hdrlen = ieee80211_hdrlen(hdr->frame_control); 112 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 113 114 if (!(mshdr->flags & MESH_FLAGS_AE)) { 115 /* size of the fixed part of the mesh header */ 116 mesh_hdrlen = 6; 117 118 /* make room for the two extended addresses */ 119 skb_push(skb, 2 * ETH_ALEN); 120 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 121 122 hdr = (struct ieee80211_hdr *) skb->data; 123 124 /* we preserve the previous mesh header and only add 125 * the new addreses */ 126 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 127 mshdr->flags = MESH_FLAGS_AE_A5_A6; 128 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 129 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 130 } 131 132 /* update next hop */ 133 hdr = (struct ieee80211_hdr *) skb->data; 134 rcu_read_lock(); 135 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 136 memcpy(hdr->addr1, next_hop, ETH_ALEN); 137 rcu_read_unlock(); 138 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); 139 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 140 } 141 142 /** 143 * 144 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 145 * 146 * This function is used to transfer or copy frames from an unresolved mpath to 147 * a gate mpath. The function also adds the Address Extension field and 148 * updates the next hop. 149 * 150 * If a frame already has an Address Extension field, only the next hop and 151 * destination addresses are updated. 152 * 153 * The gate mpath must be an active mpath with a valid mpath->next_hop. 154 * 155 * @mpath: An active mpath the frames will be sent to (i.e. the gate) 156 * @from_mpath: The failed mpath 157 * @copy: When true, copy all the frames to the new mpath queue. When false, 158 * move them. 159 */ 160 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 161 struct mesh_path *from_mpath, 162 bool copy) 163 { 164 struct sk_buff *skb, *fskb, *tmp; 165 struct sk_buff_head failq; 166 unsigned long flags; 167 168 if (WARN_ON(gate_mpath == from_mpath)) 169 return; 170 if (WARN_ON(!gate_mpath->next_hop)) 171 return; 172 173 __skb_queue_head_init(&failq); 174 175 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 176 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 177 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 178 179 skb_queue_walk_safe(&failq, fskb, tmp) { 180 if (skb_queue_len(&gate_mpath->frame_queue) >= 181 MESH_FRAME_QUEUE_LEN) { 182 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); 183 break; 184 } 185 186 skb = skb_copy(fskb, GFP_ATOMIC); 187 if (WARN_ON(!skb)) 188 break; 189 190 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 191 skb_queue_tail(&gate_mpath->frame_queue, skb); 192 193 if (copy) 194 continue; 195 196 __skb_unlink(fskb, &failq); 197 kfree_skb(fskb); 198 } 199 200 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 201 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 202 203 if (!copy) 204 return; 205 206 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 207 skb_queue_splice(&failq, &from_mpath->frame_queue); 208 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 209 } 210 211 212 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, 213 struct ieee80211_sub_if_data *sdata) 214 { 215 struct mesh_path *mpath; 216 217 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); 218 219 if (mpath && mpath_expired(mpath)) { 220 spin_lock_bh(&mpath->state_lock); 221 mpath->flags &= ~MESH_PATH_ACTIVE; 222 spin_unlock_bh(&mpath->state_lock); 223 } 224 return mpath; 225 } 226 227 /** 228 * mesh_path_lookup - look up a path in the mesh path table 229 * @sdata: local subif 230 * @dst: hardware address (ETH_ALEN length) of destination 231 * 232 * Returns: pointer to the mesh path structure, or NULL if not found 233 * 234 * Locking: must be called within a read rcu section. 235 */ 236 struct mesh_path * 237 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 238 { 239 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); 240 } 241 242 struct mesh_path * 243 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 244 { 245 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); 246 } 247 248 static struct mesh_path * 249 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 250 { 251 int i = 0; 252 struct mesh_path *mpath; 253 254 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 255 if (i++ == idx) 256 break; 257 } 258 259 if (!mpath) 260 return NULL; 261 262 if (mpath_expired(mpath)) { 263 spin_lock_bh(&mpath->state_lock); 264 mpath->flags &= ~MESH_PATH_ACTIVE; 265 spin_unlock_bh(&mpath->state_lock); 266 } 267 return mpath; 268 } 269 270 /** 271 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 272 * @idx: index 273 * @sdata: local subif, or NULL for all entries 274 * 275 * Returns: pointer to the mesh path structure, or NULL if not found. 276 * 277 * Locking: must be called within a read rcu section. 278 */ 279 struct mesh_path * 280 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 281 { 282 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); 283 } 284 285 /** 286 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index 287 * @idx: index 288 * @sdata: local subif, or NULL for all entries 289 * 290 * Returns: pointer to the proxy path structure, or NULL if not found. 291 * 292 * Locking: must be called within a read rcu section. 293 */ 294 struct mesh_path * 295 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 296 { 297 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); 298 } 299 300 /** 301 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 302 * @mpath: gate path to add to table 303 */ 304 int mesh_path_add_gate(struct mesh_path *mpath) 305 { 306 struct mesh_table *tbl; 307 int err; 308 309 rcu_read_lock(); 310 tbl = mpath->sdata->u.mesh.mesh_paths; 311 312 spin_lock_bh(&mpath->state_lock); 313 if (mpath->is_gate) { 314 err = -EEXIST; 315 spin_unlock_bh(&mpath->state_lock); 316 goto err_rcu; 317 } 318 mpath->is_gate = true; 319 mpath->sdata->u.mesh.num_gates++; 320 321 spin_lock(&tbl->gates_lock); 322 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); 323 spin_unlock(&tbl->gates_lock); 324 325 spin_unlock_bh(&mpath->state_lock); 326 327 mpath_dbg(mpath->sdata, 328 "Mesh path: Recorded new gate: %pM. %d known gates\n", 329 mpath->dst, mpath->sdata->u.mesh.num_gates); 330 err = 0; 331 err_rcu: 332 rcu_read_unlock(); 333 return err; 334 } 335 336 /** 337 * mesh_gate_del - remove a mesh gate from the list of known gates 338 * @tbl: table which holds our list of known gates 339 * @mpath: gate mpath 340 */ 341 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 342 { 343 lockdep_assert_held(&mpath->state_lock); 344 if (!mpath->is_gate) 345 return; 346 347 mpath->is_gate = false; 348 spin_lock_bh(&tbl->gates_lock); 349 hlist_del_rcu(&mpath->gate_list); 350 mpath->sdata->u.mesh.num_gates--; 351 spin_unlock_bh(&tbl->gates_lock); 352 353 mpath_dbg(mpath->sdata, 354 "Mesh path: Deleted gate: %pM. %d known gates\n", 355 mpath->dst, mpath->sdata->u.mesh.num_gates); 356 } 357 358 /** 359 * mesh_gate_num - number of gates known to this interface 360 * @sdata: subif data 361 */ 362 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 363 { 364 return sdata->u.mesh.num_gates; 365 } 366 367 static 368 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, 369 const u8 *dst, gfp_t gfp_flags) 370 { 371 struct mesh_path *new_mpath; 372 373 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); 374 if (!new_mpath) 375 return NULL; 376 377 memcpy(new_mpath->dst, dst, ETH_ALEN); 378 eth_broadcast_addr(new_mpath->rann_snd_addr); 379 new_mpath->is_root = false; 380 new_mpath->sdata = sdata; 381 new_mpath->flags = 0; 382 skb_queue_head_init(&new_mpath->frame_queue); 383 new_mpath->exp_time = jiffies; 384 spin_lock_init(&new_mpath->state_lock); 385 timer_setup(&new_mpath->timer, mesh_path_timer, 0); 386 387 return new_mpath; 388 } 389 390 /** 391 * mesh_path_add - allocate and add a new path to the mesh path table 392 * @dst: destination address of the path (ETH_ALEN length) 393 * @sdata: local subif 394 * 395 * Returns: 0 on success 396 * 397 * State: the initial state of the new path is set to 0 398 */ 399 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 400 const u8 *dst) 401 { 402 struct mesh_table *tbl; 403 struct mesh_path *mpath, *new_mpath; 404 405 if (ether_addr_equal(dst, sdata->vif.addr)) 406 /* never add ourselves as neighbours */ 407 return ERR_PTR(-ENOTSUPP); 408 409 if (is_multicast_ether_addr(dst)) 410 return ERR_PTR(-ENOTSUPP); 411 412 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 413 return ERR_PTR(-ENOSPC); 414 415 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 416 if (!new_mpath) 417 return ERR_PTR(-ENOMEM); 418 419 tbl = sdata->u.mesh.mesh_paths; 420 spin_lock_bh(&tbl->walk_lock); 421 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, 422 &new_mpath->rhash, 423 mesh_rht_params); 424 if (!mpath) 425 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); 426 spin_unlock_bh(&tbl->walk_lock); 427 428 if (mpath) { 429 kfree(new_mpath); 430 431 if (IS_ERR(mpath)) 432 return mpath; 433 434 new_mpath = mpath; 435 } 436 437 sdata->u.mesh.mesh_paths_generation++; 438 return new_mpath; 439 } 440 441 int mpp_path_add(struct ieee80211_sub_if_data *sdata, 442 const u8 *dst, const u8 *mpp) 443 { 444 struct mesh_table *tbl; 445 struct mesh_path *new_mpath; 446 int ret; 447 448 if (ether_addr_equal(dst, sdata->vif.addr)) 449 /* never add ourselves as neighbours */ 450 return -ENOTSUPP; 451 452 if (is_multicast_ether_addr(dst)) 453 return -ENOTSUPP; 454 455 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 456 457 if (!new_mpath) 458 return -ENOMEM; 459 460 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 461 tbl = sdata->u.mesh.mpp_paths; 462 463 spin_lock_bh(&tbl->walk_lock); 464 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 465 &new_mpath->rhash, 466 mesh_rht_params); 467 if (!ret) 468 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); 469 spin_unlock_bh(&tbl->walk_lock); 470 471 if (ret) 472 kfree(new_mpath); 473 474 sdata->u.mesh.mpp_paths_generation++; 475 return ret; 476 } 477 478 479 /** 480 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 481 * 482 * @sta: broken peer link 483 * 484 * This function must be called from the rate control algorithm if enough 485 * delivery errors suggest that a peer link is no longer usable. 486 */ 487 void mesh_plink_broken(struct sta_info *sta) 488 { 489 struct ieee80211_sub_if_data *sdata = sta->sdata; 490 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 491 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 492 struct mesh_path *mpath; 493 494 rcu_read_lock(); 495 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 496 if (rcu_access_pointer(mpath->next_hop) == sta && 497 mpath->flags & MESH_PATH_ACTIVE && 498 !(mpath->flags & MESH_PATH_FIXED)) { 499 spin_lock_bh(&mpath->state_lock); 500 mpath->flags &= ~MESH_PATH_ACTIVE; 501 ++mpath->sn; 502 spin_unlock_bh(&mpath->state_lock); 503 mesh_path_error_tx(sdata, 504 sdata->u.mesh.mshcfg.element_ttl, 505 mpath->dst, mpath->sn, 506 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 507 } 508 } 509 rcu_read_unlock(); 510 } 511 512 static void mesh_path_free_rcu(struct mesh_table *tbl, 513 struct mesh_path *mpath) 514 { 515 struct ieee80211_sub_if_data *sdata = mpath->sdata; 516 517 spin_lock_bh(&mpath->state_lock); 518 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; 519 mesh_gate_del(tbl, mpath); 520 spin_unlock_bh(&mpath->state_lock); 521 del_timer_sync(&mpath->timer); 522 atomic_dec(&sdata->u.mesh.mpaths); 523 atomic_dec(&tbl->entries); 524 kfree_rcu(mpath, rcu); 525 } 526 527 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 528 { 529 hlist_del_rcu(&mpath->walk_list); 530 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 531 mesh_path_free_rcu(tbl, mpath); 532 } 533 534 /** 535 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 536 * 537 * @sta: mesh peer to match 538 * 539 * RCU notes: this function is called when a mesh plink transitions from 540 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 541 * allows path creation. This will happen before the sta can be freed (because 542 * sta_info_destroy() calls this) so any reader in a rcu read block will be 543 * protected against the plink disappearing. 544 */ 545 void mesh_path_flush_by_nexthop(struct sta_info *sta) 546 { 547 struct ieee80211_sub_if_data *sdata = sta->sdata; 548 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 549 struct mesh_path *mpath; 550 struct hlist_node *n; 551 552 spin_lock_bh(&tbl->walk_lock); 553 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 554 if (rcu_access_pointer(mpath->next_hop) == sta) 555 __mesh_path_del(tbl, mpath); 556 } 557 spin_unlock_bh(&tbl->walk_lock); 558 } 559 560 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 561 const u8 *proxy) 562 { 563 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 564 struct mesh_path *mpath; 565 struct hlist_node *n; 566 567 spin_lock_bh(&tbl->walk_lock); 568 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 569 if (ether_addr_equal(mpath->mpp, proxy)) 570 __mesh_path_del(tbl, mpath); 571 } 572 spin_unlock_bh(&tbl->walk_lock); 573 } 574 575 static void table_flush_by_iface(struct mesh_table *tbl) 576 { 577 struct mesh_path *mpath; 578 struct hlist_node *n; 579 580 spin_lock_bh(&tbl->walk_lock); 581 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 582 __mesh_path_del(tbl, mpath); 583 } 584 spin_unlock_bh(&tbl->walk_lock); 585 } 586 587 /** 588 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 589 * 590 * This function deletes both mesh paths as well as mesh portal paths. 591 * 592 * @sdata: interface data to match 593 * 594 */ 595 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 596 { 597 table_flush_by_iface(sdata->u.mesh.mesh_paths); 598 table_flush_by_iface(sdata->u.mesh.mpp_paths); 599 } 600 601 /** 602 * table_path_del - delete a path from the mesh or mpp table 603 * 604 * @tbl: mesh or mpp path table 605 * @sdata: local subif 606 * @addr: dst address (ETH_ALEN length) 607 * 608 * Returns: 0 if successful 609 */ 610 static int table_path_del(struct mesh_table *tbl, 611 struct ieee80211_sub_if_data *sdata, 612 const u8 *addr) 613 { 614 struct mesh_path *mpath; 615 616 spin_lock_bh(&tbl->walk_lock); 617 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 618 if (!mpath) { 619 spin_unlock_bh(&tbl->walk_lock); 620 return -ENXIO; 621 } 622 623 __mesh_path_del(tbl, mpath); 624 spin_unlock_bh(&tbl->walk_lock); 625 return 0; 626 } 627 628 629 /** 630 * mesh_path_del - delete a mesh path from the table 631 * 632 * @addr: dst address (ETH_ALEN length) 633 * @sdata: local subif 634 * 635 * Returns: 0 if successful 636 */ 637 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 638 { 639 int err; 640 641 /* flush relevant mpp entries first */ 642 mpp_flush_by_proxy(sdata, addr); 643 644 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); 645 sdata->u.mesh.mesh_paths_generation++; 646 return err; 647 } 648 649 /** 650 * mesh_path_tx_pending - sends pending frames in a mesh path queue 651 * 652 * @mpath: mesh path to activate 653 * 654 * Locking: the state_lock of the mpath structure must NOT be held when calling 655 * this function. 656 */ 657 void mesh_path_tx_pending(struct mesh_path *mpath) 658 { 659 if (mpath->flags & MESH_PATH_ACTIVE) 660 ieee80211_add_pending_skbs(mpath->sdata->local, 661 &mpath->frame_queue); 662 } 663 664 /** 665 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 666 * 667 * @mpath: mesh path whose queue will be emptied 668 * 669 * If there is only one gate, the frames are transferred from the failed mpath 670 * queue to that gate's queue. If there are more than one gates, the frames 671 * are copied from each gate to the next. After frames are copied, the 672 * mpath queues are emptied onto the transmission queue. 673 */ 674 int mesh_path_send_to_gates(struct mesh_path *mpath) 675 { 676 struct ieee80211_sub_if_data *sdata = mpath->sdata; 677 struct mesh_table *tbl; 678 struct mesh_path *from_mpath = mpath; 679 struct mesh_path *gate; 680 bool copy = false; 681 682 tbl = sdata->u.mesh.mesh_paths; 683 684 rcu_read_lock(); 685 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 686 if (gate->flags & MESH_PATH_ACTIVE) { 687 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 688 mesh_path_move_to_queue(gate, from_mpath, copy); 689 from_mpath = gate; 690 copy = true; 691 } else { 692 mpath_dbg(sdata, 693 "Not forwarding to %pM (flags %#x)\n", 694 gate->dst, gate->flags); 695 } 696 } 697 698 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 699 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 700 mesh_path_tx_pending(gate); 701 } 702 rcu_read_unlock(); 703 704 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 705 } 706 707 /** 708 * mesh_path_discard_frame - discard a frame whose path could not be resolved 709 * 710 * @skb: frame to discard 711 * @sdata: network subif the frame was to be sent through 712 * 713 * Locking: the function must me called within a rcu_read_lock region 714 */ 715 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 716 struct sk_buff *skb) 717 { 718 kfree_skb(skb); 719 sdata->u.mesh.mshstats.dropped_frames_no_route++; 720 } 721 722 /** 723 * mesh_path_flush_pending - free the pending queue of a mesh path 724 * 725 * @mpath: mesh path whose queue has to be freed 726 * 727 * Locking: the function must me called within a rcu_read_lock region 728 */ 729 void mesh_path_flush_pending(struct mesh_path *mpath) 730 { 731 struct sk_buff *skb; 732 733 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 734 mesh_path_discard_frame(mpath->sdata, skb); 735 } 736 737 /** 738 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 739 * 740 * @mpath: the mesh path to modify 741 * @next_hop: the next hop to force 742 * 743 * Locking: this function must be called holding mpath->state_lock 744 */ 745 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 746 { 747 spin_lock_bh(&mpath->state_lock); 748 mesh_path_assign_nexthop(mpath, next_hop); 749 mpath->sn = 0xffff; 750 mpath->metric = 0; 751 mpath->hop_count = 0; 752 mpath->exp_time = 0; 753 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; 754 mesh_path_activate(mpath); 755 spin_unlock_bh(&mpath->state_lock); 756 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); 757 /* init it at a low value - 0 start is tricky */ 758 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); 759 mesh_path_tx_pending(mpath); 760 } 761 762 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 763 { 764 struct mesh_table *tbl_path, *tbl_mpp; 765 int ret; 766 767 tbl_path = mesh_table_alloc(); 768 if (!tbl_path) 769 return -ENOMEM; 770 771 tbl_mpp = mesh_table_alloc(); 772 if (!tbl_mpp) { 773 ret = -ENOMEM; 774 goto free_path; 775 } 776 777 rhashtable_init(&tbl_path->rhead, &mesh_rht_params); 778 rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); 779 780 sdata->u.mesh.mesh_paths = tbl_path; 781 sdata->u.mesh.mpp_paths = tbl_mpp; 782 783 return 0; 784 785 free_path: 786 mesh_table_free(tbl_path); 787 return ret; 788 } 789 790 static 791 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, 792 struct mesh_table *tbl) 793 { 794 struct mesh_path *mpath; 795 struct hlist_node *n; 796 797 spin_lock_bh(&tbl->walk_lock); 798 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 799 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 800 (!(mpath->flags & MESH_PATH_FIXED)) && 801 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 802 __mesh_path_del(tbl, mpath); 803 } 804 spin_unlock_bh(&tbl->walk_lock); 805 } 806 807 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 808 { 809 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); 810 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); 811 } 812 813 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 814 { 815 mesh_table_free(sdata->u.mesh.mesh_paths); 816 mesh_table_free(sdata->u.mesh.mpp_paths); 817 } 818