1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2008, 2009 open80211s Ltd. 4 * Author: Luis Carlos Cobo <luisca@cozybit.com> 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/list.h> 9 #include <linux/random.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/string.h> 13 #include <net/mac80211.h> 14 #include "wme.h" 15 #include "ieee80211_i.h" 16 #include "mesh.h" 17 18 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 19 20 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 21 { 22 /* Use last four bytes of hw addr as hash index */ 23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); 24 } 25 26 static const struct rhashtable_params mesh_rht_params = { 27 .nelem_hint = 2, 28 .automatic_shrinking = true, 29 .key_len = ETH_ALEN, 30 .key_offset = offsetof(struct mesh_path, dst), 31 .head_offset = offsetof(struct mesh_path, rhash), 32 .hashfn = mesh_table_hash, 33 }; 34 35 static inline bool mpath_expired(struct mesh_path *mpath) 36 { 37 return (mpath->flags & MESH_PATH_ACTIVE) && 38 time_after(jiffies, mpath->exp_time) && 39 !(mpath->flags & MESH_PATH_FIXED); 40 } 41 42 static void mesh_path_rht_free(void *ptr, void *tblptr) 43 { 44 struct mesh_path *mpath = ptr; 45 struct mesh_table *tbl = tblptr; 46 47 mesh_path_free_rcu(tbl, mpath); 48 } 49 50 static struct mesh_table *mesh_table_alloc(void) 51 { 52 struct mesh_table *newtbl; 53 54 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 55 if (!newtbl) 56 return NULL; 57 58 INIT_HLIST_HEAD(&newtbl->known_gates); 59 INIT_HLIST_HEAD(&newtbl->walk_head); 60 atomic_set(&newtbl->entries, 0); 61 spin_lock_init(&newtbl->gates_lock); 62 spin_lock_init(&newtbl->walk_lock); 63 rhashtable_init(&newtbl->rhead, &mesh_rht_params); 64 65 return newtbl; 66 } 67 68 static void mesh_table_free(struct mesh_table *tbl) 69 { 70 rhashtable_free_and_destroy(&tbl->rhead, 71 mesh_path_rht_free, tbl); 72 kfree(tbl); 73 } 74 75 /** 76 * mesh_path_assign_nexthop - update mesh path next hop 77 * 78 * @mpath: mesh path to update 79 * @sta: next hop to assign 80 * 81 * Locking: mpath->state_lock must be held when calling this function 82 */ 83 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 84 { 85 struct sk_buff *skb; 86 struct ieee80211_hdr *hdr; 87 unsigned long flags; 88 89 rcu_assign_pointer(mpath->next_hop, sta); 90 91 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 92 skb_queue_walk(&mpath->frame_queue, skb) { 93 hdr = (struct ieee80211_hdr *) skb->data; 94 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 95 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 96 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); 97 } 98 99 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 100 } 101 102 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 103 struct mesh_path *gate_mpath) 104 { 105 struct ieee80211_hdr *hdr; 106 struct ieee80211s_hdr *mshdr; 107 int mesh_hdrlen, hdrlen; 108 char *next_hop; 109 110 hdr = (struct ieee80211_hdr *) skb->data; 111 hdrlen = ieee80211_hdrlen(hdr->frame_control); 112 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 113 114 if (!(mshdr->flags & MESH_FLAGS_AE)) { 115 /* size of the fixed part of the mesh header */ 116 mesh_hdrlen = 6; 117 118 /* make room for the two extended addresses */ 119 skb_push(skb, 2 * ETH_ALEN); 120 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 121 122 hdr = (struct ieee80211_hdr *) skb->data; 123 124 /* we preserve the previous mesh header and only add 125 * the new addresses */ 126 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 127 mshdr->flags = MESH_FLAGS_AE_A5_A6; 128 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 129 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 130 } 131 132 /* update next hop */ 133 hdr = (struct ieee80211_hdr *) skb->data; 134 rcu_read_lock(); 135 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 136 memcpy(hdr->addr1, next_hop, ETH_ALEN); 137 rcu_read_unlock(); 138 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); 139 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 140 } 141 142 /** 143 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 144 * 145 * This function is used to transfer or copy frames from an unresolved mpath to 146 * a gate mpath. The function also adds the Address Extension field and 147 * updates the next hop. 148 * 149 * If a frame already has an Address Extension field, only the next hop and 150 * destination addresses are updated. 151 * 152 * The gate mpath must be an active mpath with a valid mpath->next_hop. 153 * 154 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) 155 * @from_mpath: The failed mpath 156 * @copy: When true, copy all the frames to the new mpath queue. When false, 157 * move them. 158 */ 159 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 160 struct mesh_path *from_mpath, 161 bool copy) 162 { 163 struct sk_buff *skb, *fskb, *tmp; 164 struct sk_buff_head failq; 165 unsigned long flags; 166 167 if (WARN_ON(gate_mpath == from_mpath)) 168 return; 169 if (WARN_ON(!gate_mpath->next_hop)) 170 return; 171 172 __skb_queue_head_init(&failq); 173 174 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 175 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 176 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 177 178 skb_queue_walk_safe(&failq, fskb, tmp) { 179 if (skb_queue_len(&gate_mpath->frame_queue) >= 180 MESH_FRAME_QUEUE_LEN) { 181 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); 182 break; 183 } 184 185 skb = skb_copy(fskb, GFP_ATOMIC); 186 if (WARN_ON(!skb)) 187 break; 188 189 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 190 skb_queue_tail(&gate_mpath->frame_queue, skb); 191 192 if (copy) 193 continue; 194 195 __skb_unlink(fskb, &failq); 196 kfree_skb(fskb); 197 } 198 199 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 200 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 201 202 if (!copy) 203 return; 204 205 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 206 skb_queue_splice(&failq, &from_mpath->frame_queue); 207 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 208 } 209 210 211 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, 212 struct ieee80211_sub_if_data *sdata) 213 { 214 struct mesh_path *mpath; 215 216 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); 217 218 if (mpath && mpath_expired(mpath)) { 219 spin_lock_bh(&mpath->state_lock); 220 mpath->flags &= ~MESH_PATH_ACTIVE; 221 spin_unlock_bh(&mpath->state_lock); 222 } 223 return mpath; 224 } 225 226 /** 227 * mesh_path_lookup - look up a path in the mesh path table 228 * @sdata: local subif 229 * @dst: hardware address (ETH_ALEN length) of destination 230 * 231 * Returns: pointer to the mesh path structure, or NULL if not found 232 * 233 * Locking: must be called within a read rcu section. 234 */ 235 struct mesh_path * 236 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 237 { 238 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); 239 } 240 241 struct mesh_path * 242 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 243 { 244 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); 245 } 246 247 static struct mesh_path * 248 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 249 { 250 int i = 0; 251 struct mesh_path *mpath; 252 253 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 254 if (i++ == idx) 255 break; 256 } 257 258 if (!mpath) 259 return NULL; 260 261 if (mpath_expired(mpath)) { 262 spin_lock_bh(&mpath->state_lock); 263 mpath->flags &= ~MESH_PATH_ACTIVE; 264 spin_unlock_bh(&mpath->state_lock); 265 } 266 return mpath; 267 } 268 269 /** 270 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 271 * @idx: index 272 * @sdata: local subif, or NULL for all entries 273 * 274 * Returns: pointer to the mesh path structure, or NULL if not found. 275 * 276 * Locking: must be called within a read rcu section. 277 */ 278 struct mesh_path * 279 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 280 { 281 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); 282 } 283 284 /** 285 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index 286 * @idx: index 287 * @sdata: local subif, or NULL for all entries 288 * 289 * Returns: pointer to the proxy path structure, or NULL if not found. 290 * 291 * Locking: must be called within a read rcu section. 292 */ 293 struct mesh_path * 294 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 295 { 296 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); 297 } 298 299 /** 300 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 301 * @mpath: gate path to add to table 302 */ 303 int mesh_path_add_gate(struct mesh_path *mpath) 304 { 305 struct mesh_table *tbl; 306 int err; 307 308 rcu_read_lock(); 309 tbl = mpath->sdata->u.mesh.mesh_paths; 310 311 spin_lock_bh(&mpath->state_lock); 312 if (mpath->is_gate) { 313 err = -EEXIST; 314 spin_unlock_bh(&mpath->state_lock); 315 goto err_rcu; 316 } 317 mpath->is_gate = true; 318 mpath->sdata->u.mesh.num_gates++; 319 320 spin_lock(&tbl->gates_lock); 321 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); 322 spin_unlock(&tbl->gates_lock); 323 324 spin_unlock_bh(&mpath->state_lock); 325 326 mpath_dbg(mpath->sdata, 327 "Mesh path: Recorded new gate: %pM. %d known gates\n", 328 mpath->dst, mpath->sdata->u.mesh.num_gates); 329 err = 0; 330 err_rcu: 331 rcu_read_unlock(); 332 return err; 333 } 334 335 /** 336 * mesh_gate_del - remove a mesh gate from the list of known gates 337 * @tbl: table which holds our list of known gates 338 * @mpath: gate mpath 339 */ 340 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 341 { 342 lockdep_assert_held(&mpath->state_lock); 343 if (!mpath->is_gate) 344 return; 345 346 mpath->is_gate = false; 347 spin_lock_bh(&tbl->gates_lock); 348 hlist_del_rcu(&mpath->gate_list); 349 mpath->sdata->u.mesh.num_gates--; 350 spin_unlock_bh(&tbl->gates_lock); 351 352 mpath_dbg(mpath->sdata, 353 "Mesh path: Deleted gate: %pM. %d known gates\n", 354 mpath->dst, mpath->sdata->u.mesh.num_gates); 355 } 356 357 /** 358 * mesh_gate_num - number of gates known to this interface 359 * @sdata: subif data 360 */ 361 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 362 { 363 return sdata->u.mesh.num_gates; 364 } 365 366 static 367 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, 368 const u8 *dst, gfp_t gfp_flags) 369 { 370 struct mesh_path *new_mpath; 371 372 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); 373 if (!new_mpath) 374 return NULL; 375 376 memcpy(new_mpath->dst, dst, ETH_ALEN); 377 eth_broadcast_addr(new_mpath->rann_snd_addr); 378 new_mpath->is_root = false; 379 new_mpath->sdata = sdata; 380 new_mpath->flags = 0; 381 skb_queue_head_init(&new_mpath->frame_queue); 382 new_mpath->exp_time = jiffies; 383 spin_lock_init(&new_mpath->state_lock); 384 timer_setup(&new_mpath->timer, mesh_path_timer, 0); 385 386 return new_mpath; 387 } 388 389 /** 390 * mesh_path_add - allocate and add a new path to the mesh path table 391 * @dst: destination address of the path (ETH_ALEN length) 392 * @sdata: local subif 393 * 394 * Returns: 0 on success 395 * 396 * State: the initial state of the new path is set to 0 397 */ 398 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 399 const u8 *dst) 400 { 401 struct mesh_table *tbl; 402 struct mesh_path *mpath, *new_mpath; 403 404 if (ether_addr_equal(dst, sdata->vif.addr)) 405 /* never add ourselves as neighbours */ 406 return ERR_PTR(-ENOTSUPP); 407 408 if (is_multicast_ether_addr(dst)) 409 return ERR_PTR(-ENOTSUPP); 410 411 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 412 return ERR_PTR(-ENOSPC); 413 414 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 415 if (!new_mpath) 416 return ERR_PTR(-ENOMEM); 417 418 tbl = sdata->u.mesh.mesh_paths; 419 spin_lock_bh(&tbl->walk_lock); 420 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, 421 &new_mpath->rhash, 422 mesh_rht_params); 423 if (!mpath) 424 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); 425 spin_unlock_bh(&tbl->walk_lock); 426 427 if (mpath) { 428 kfree(new_mpath); 429 430 if (IS_ERR(mpath)) 431 return mpath; 432 433 new_mpath = mpath; 434 } 435 436 sdata->u.mesh.mesh_paths_generation++; 437 return new_mpath; 438 } 439 440 int mpp_path_add(struct ieee80211_sub_if_data *sdata, 441 const u8 *dst, const u8 *mpp) 442 { 443 struct mesh_table *tbl; 444 struct mesh_path *new_mpath; 445 int ret; 446 447 if (ether_addr_equal(dst, sdata->vif.addr)) 448 /* never add ourselves as neighbours */ 449 return -ENOTSUPP; 450 451 if (is_multicast_ether_addr(dst)) 452 return -ENOTSUPP; 453 454 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 455 456 if (!new_mpath) 457 return -ENOMEM; 458 459 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 460 tbl = sdata->u.mesh.mpp_paths; 461 462 spin_lock_bh(&tbl->walk_lock); 463 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 464 &new_mpath->rhash, 465 mesh_rht_params); 466 if (!ret) 467 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); 468 spin_unlock_bh(&tbl->walk_lock); 469 470 if (ret) 471 kfree(new_mpath); 472 473 sdata->u.mesh.mpp_paths_generation++; 474 return ret; 475 } 476 477 478 /** 479 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 480 * 481 * @sta: broken peer link 482 * 483 * This function must be called from the rate control algorithm if enough 484 * delivery errors suggest that a peer link is no longer usable. 485 */ 486 void mesh_plink_broken(struct sta_info *sta) 487 { 488 struct ieee80211_sub_if_data *sdata = sta->sdata; 489 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 490 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 491 struct mesh_path *mpath; 492 493 rcu_read_lock(); 494 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { 495 if (rcu_access_pointer(mpath->next_hop) == sta && 496 mpath->flags & MESH_PATH_ACTIVE && 497 !(mpath->flags & MESH_PATH_FIXED)) { 498 spin_lock_bh(&mpath->state_lock); 499 mpath->flags &= ~MESH_PATH_ACTIVE; 500 ++mpath->sn; 501 spin_unlock_bh(&mpath->state_lock); 502 mesh_path_error_tx(sdata, 503 sdata->u.mesh.mshcfg.element_ttl, 504 mpath->dst, mpath->sn, 505 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 506 } 507 } 508 rcu_read_unlock(); 509 } 510 511 static void mesh_path_free_rcu(struct mesh_table *tbl, 512 struct mesh_path *mpath) 513 { 514 struct ieee80211_sub_if_data *sdata = mpath->sdata; 515 516 spin_lock_bh(&mpath->state_lock); 517 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; 518 mesh_gate_del(tbl, mpath); 519 spin_unlock_bh(&mpath->state_lock); 520 del_timer_sync(&mpath->timer); 521 atomic_dec(&sdata->u.mesh.mpaths); 522 atomic_dec(&tbl->entries); 523 mesh_path_flush_pending(mpath); 524 kfree_rcu(mpath, rcu); 525 } 526 527 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 528 { 529 hlist_del_rcu(&mpath->walk_list); 530 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 531 mesh_path_free_rcu(tbl, mpath); 532 } 533 534 /** 535 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 536 * 537 * @sta: mesh peer to match 538 * 539 * RCU notes: this function is called when a mesh plink transitions from 540 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 541 * allows path creation. This will happen before the sta can be freed (because 542 * sta_info_destroy() calls this) so any reader in a rcu read block will be 543 * protected against the plink disappearing. 544 */ 545 void mesh_path_flush_by_nexthop(struct sta_info *sta) 546 { 547 struct ieee80211_sub_if_data *sdata = sta->sdata; 548 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 549 struct mesh_path *mpath; 550 struct hlist_node *n; 551 552 spin_lock_bh(&tbl->walk_lock); 553 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 554 if (rcu_access_pointer(mpath->next_hop) == sta) 555 __mesh_path_del(tbl, mpath); 556 } 557 spin_unlock_bh(&tbl->walk_lock); 558 } 559 560 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 561 const u8 *proxy) 562 { 563 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 564 struct mesh_path *mpath; 565 struct hlist_node *n; 566 567 spin_lock_bh(&tbl->walk_lock); 568 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 569 if (ether_addr_equal(mpath->mpp, proxy)) 570 __mesh_path_del(tbl, mpath); 571 } 572 spin_unlock_bh(&tbl->walk_lock); 573 } 574 575 static void table_flush_by_iface(struct mesh_table *tbl) 576 { 577 struct mesh_path *mpath; 578 struct hlist_node *n; 579 580 spin_lock_bh(&tbl->walk_lock); 581 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 582 __mesh_path_del(tbl, mpath); 583 } 584 spin_unlock_bh(&tbl->walk_lock); 585 } 586 587 /** 588 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 589 * 590 * This function deletes both mesh paths as well as mesh portal paths. 591 * 592 * @sdata: interface data to match 593 * 594 */ 595 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 596 { 597 table_flush_by_iface(sdata->u.mesh.mesh_paths); 598 table_flush_by_iface(sdata->u.mesh.mpp_paths); 599 } 600 601 /** 602 * table_path_del - delete a path from the mesh or mpp table 603 * 604 * @tbl: mesh or mpp path table 605 * @sdata: local subif 606 * @addr: dst address (ETH_ALEN length) 607 * 608 * Returns: 0 if successful 609 */ 610 static int table_path_del(struct mesh_table *tbl, 611 struct ieee80211_sub_if_data *sdata, 612 const u8 *addr) 613 { 614 struct mesh_path *mpath; 615 616 spin_lock_bh(&tbl->walk_lock); 617 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 618 if (!mpath) { 619 spin_unlock_bh(&tbl->walk_lock); 620 return -ENXIO; 621 } 622 623 __mesh_path_del(tbl, mpath); 624 spin_unlock_bh(&tbl->walk_lock); 625 return 0; 626 } 627 628 629 /** 630 * mesh_path_del - delete a mesh path from the table 631 * 632 * @addr: dst address (ETH_ALEN length) 633 * @sdata: local subif 634 * 635 * Returns: 0 if successful 636 */ 637 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 638 { 639 int err; 640 641 /* flush relevant mpp entries first */ 642 mpp_flush_by_proxy(sdata, addr); 643 644 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); 645 sdata->u.mesh.mesh_paths_generation++; 646 return err; 647 } 648 649 /** 650 * mesh_path_tx_pending - sends pending frames in a mesh path queue 651 * 652 * @mpath: mesh path to activate 653 * 654 * Locking: the state_lock of the mpath structure must NOT be held when calling 655 * this function. 656 */ 657 void mesh_path_tx_pending(struct mesh_path *mpath) 658 { 659 if (mpath->flags & MESH_PATH_ACTIVE) 660 ieee80211_add_pending_skbs(mpath->sdata->local, 661 &mpath->frame_queue); 662 } 663 664 /** 665 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 666 * 667 * @mpath: mesh path whose queue will be emptied 668 * 669 * If there is only one gate, the frames are transferred from the failed mpath 670 * queue to that gate's queue. If there are more than one gates, the frames 671 * are copied from each gate to the next. After frames are copied, the 672 * mpath queues are emptied onto the transmission queue. 673 */ 674 int mesh_path_send_to_gates(struct mesh_path *mpath) 675 { 676 struct ieee80211_sub_if_data *sdata = mpath->sdata; 677 struct mesh_table *tbl; 678 struct mesh_path *from_mpath = mpath; 679 struct mesh_path *gate; 680 bool copy = false; 681 682 tbl = sdata->u.mesh.mesh_paths; 683 684 rcu_read_lock(); 685 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 686 if (gate->flags & MESH_PATH_ACTIVE) { 687 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 688 mesh_path_move_to_queue(gate, from_mpath, copy); 689 from_mpath = gate; 690 copy = true; 691 } else { 692 mpath_dbg(sdata, 693 "Not forwarding to %pM (flags %#x)\n", 694 gate->dst, gate->flags); 695 } 696 } 697 698 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 699 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 700 mesh_path_tx_pending(gate); 701 } 702 rcu_read_unlock(); 703 704 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 705 } 706 707 /** 708 * mesh_path_discard_frame - discard a frame whose path could not be resolved 709 * 710 * @skb: frame to discard 711 * @sdata: network subif the frame was to be sent through 712 * 713 * Locking: the function must me called within a rcu_read_lock region 714 */ 715 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 716 struct sk_buff *skb) 717 { 718 kfree_skb(skb); 719 sdata->u.mesh.mshstats.dropped_frames_no_route++; 720 } 721 722 /** 723 * mesh_path_flush_pending - free the pending queue of a mesh path 724 * 725 * @mpath: mesh path whose queue has to be freed 726 * 727 * Locking: the function must me called within a rcu_read_lock region 728 */ 729 void mesh_path_flush_pending(struct mesh_path *mpath) 730 { 731 struct sk_buff *skb; 732 733 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 734 mesh_path_discard_frame(mpath->sdata, skb); 735 } 736 737 /** 738 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 739 * 740 * @mpath: the mesh path to modify 741 * @next_hop: the next hop to force 742 * 743 * Locking: this function must be called holding mpath->state_lock 744 */ 745 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 746 { 747 spin_lock_bh(&mpath->state_lock); 748 mesh_path_assign_nexthop(mpath, next_hop); 749 mpath->sn = 0xffff; 750 mpath->metric = 0; 751 mpath->hop_count = 0; 752 mpath->exp_time = 0; 753 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; 754 mesh_path_activate(mpath); 755 spin_unlock_bh(&mpath->state_lock); 756 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); 757 /* init it at a low value - 0 start is tricky */ 758 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); 759 mesh_path_tx_pending(mpath); 760 } 761 762 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 763 { 764 struct mesh_table *tbl_path, *tbl_mpp; 765 int ret; 766 767 tbl_path = mesh_table_alloc(); 768 if (!tbl_path) 769 return -ENOMEM; 770 771 tbl_mpp = mesh_table_alloc(); 772 if (!tbl_mpp) { 773 ret = -ENOMEM; 774 goto free_path; 775 } 776 777 sdata->u.mesh.mesh_paths = tbl_path; 778 sdata->u.mesh.mpp_paths = tbl_mpp; 779 780 return 0; 781 782 free_path: 783 mesh_table_free(tbl_path); 784 return ret; 785 } 786 787 static 788 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, 789 struct mesh_table *tbl) 790 { 791 struct mesh_path *mpath; 792 struct hlist_node *n; 793 794 spin_lock_bh(&tbl->walk_lock); 795 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { 796 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 797 (!(mpath->flags & MESH_PATH_FIXED)) && 798 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 799 __mesh_path_del(tbl, mpath); 800 } 801 spin_unlock_bh(&tbl->walk_lock); 802 } 803 804 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 805 { 806 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); 807 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); 808 } 809 810 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 811 { 812 mesh_table_free(sdata->u.mesh.mesh_paths); 813 mesh_table_free(sdata->u.mesh.mpp_paths); 814 } 815