1 /* 2 * Copyright (c) 2008, 2009 open80211s Ltd. 3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/list.h> 12 #include <linux/random.h> 13 #include <linux/slab.h> 14 #include <linux/spinlock.h> 15 #include <linux/string.h> 16 #include <net/mac80211.h> 17 #include "wme.h" 18 #include "ieee80211_i.h" 19 #include "mesh.h" 20 21 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); 22 23 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 24 { 25 /* Use last four bytes of hw addr as hash index */ 26 return jhash_1word(*(u32 *)(addr+2), seed); 27 } 28 29 static const struct rhashtable_params mesh_rht_params = { 30 .nelem_hint = 2, 31 .automatic_shrinking = true, 32 .key_len = ETH_ALEN, 33 .key_offset = offsetof(struct mesh_path, dst), 34 .head_offset = offsetof(struct mesh_path, rhash), 35 .hashfn = mesh_table_hash, 36 }; 37 38 static inline bool mpath_expired(struct mesh_path *mpath) 39 { 40 return (mpath->flags & MESH_PATH_ACTIVE) && 41 time_after(jiffies, mpath->exp_time) && 42 !(mpath->flags & MESH_PATH_FIXED); 43 } 44 45 static void mesh_path_rht_free(void *ptr, void *tblptr) 46 { 47 struct mesh_path *mpath = ptr; 48 struct mesh_table *tbl = tblptr; 49 50 mesh_path_free_rcu(tbl, mpath); 51 } 52 53 static struct mesh_table *mesh_table_alloc(void) 54 { 55 struct mesh_table *newtbl; 56 57 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 58 if (!newtbl) 59 return NULL; 60 61 INIT_HLIST_HEAD(&newtbl->known_gates); 62 atomic_set(&newtbl->entries, 0); 63 spin_lock_init(&newtbl->gates_lock); 64 65 return newtbl; 66 } 67 68 static void mesh_table_free(struct mesh_table *tbl) 69 { 70 rhashtable_free_and_destroy(&tbl->rhead, 71 mesh_path_rht_free, tbl); 72 kfree(tbl); 73 } 74 75 /** 76 * 77 * mesh_path_assign_nexthop - update mesh path next hop 78 * 79 * @mpath: mesh path to update 80 * @sta: next hop to assign 81 * 82 * Locking: mpath->state_lock must be held when calling this function 83 */ 84 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) 85 { 86 struct sk_buff *skb; 87 struct ieee80211_hdr *hdr; 88 unsigned long flags; 89 90 rcu_assign_pointer(mpath->next_hop, sta); 91 92 spin_lock_irqsave(&mpath->frame_queue.lock, flags); 93 skb_queue_walk(&mpath->frame_queue, skb) { 94 hdr = (struct ieee80211_hdr *) skb->data; 95 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 96 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); 97 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); 98 } 99 100 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 101 } 102 103 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, 104 struct mesh_path *gate_mpath) 105 { 106 struct ieee80211_hdr *hdr; 107 struct ieee80211s_hdr *mshdr; 108 int mesh_hdrlen, hdrlen; 109 char *next_hop; 110 111 hdr = (struct ieee80211_hdr *) skb->data; 112 hdrlen = ieee80211_hdrlen(hdr->frame_control); 113 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 114 115 if (!(mshdr->flags & MESH_FLAGS_AE)) { 116 /* size of the fixed part of the mesh header */ 117 mesh_hdrlen = 6; 118 119 /* make room for the two extended addresses */ 120 skb_push(skb, 2 * ETH_ALEN); 121 memmove(skb->data, hdr, hdrlen + mesh_hdrlen); 122 123 hdr = (struct ieee80211_hdr *) skb->data; 124 125 /* we preserve the previous mesh header and only add 126 * the new addreses */ 127 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 128 mshdr->flags = MESH_FLAGS_AE_A5_A6; 129 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); 130 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); 131 } 132 133 /* update next hop */ 134 hdr = (struct ieee80211_hdr *) skb->data; 135 rcu_read_lock(); 136 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; 137 memcpy(hdr->addr1, next_hop, ETH_ALEN); 138 rcu_read_unlock(); 139 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); 140 memcpy(hdr->addr3, dst_addr, ETH_ALEN); 141 } 142 143 /** 144 * 145 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another 146 * 147 * This function is used to transfer or copy frames from an unresolved mpath to 148 * a gate mpath. The function also adds the Address Extension field and 149 * updates the next hop. 150 * 151 * If a frame already has an Address Extension field, only the next hop and 152 * destination addresses are updated. 153 * 154 * The gate mpath must be an active mpath with a valid mpath->next_hop. 155 * 156 * @mpath: An active mpath the frames will be sent to (i.e. the gate) 157 * @from_mpath: The failed mpath 158 * @copy: When true, copy all the frames to the new mpath queue. When false, 159 * move them. 160 */ 161 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, 162 struct mesh_path *from_mpath, 163 bool copy) 164 { 165 struct sk_buff *skb, *fskb, *tmp; 166 struct sk_buff_head failq; 167 unsigned long flags; 168 169 if (WARN_ON(gate_mpath == from_mpath)) 170 return; 171 if (WARN_ON(!gate_mpath->next_hop)) 172 return; 173 174 __skb_queue_head_init(&failq); 175 176 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 177 skb_queue_splice_init(&from_mpath->frame_queue, &failq); 178 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 179 180 skb_queue_walk_safe(&failq, fskb, tmp) { 181 if (skb_queue_len(&gate_mpath->frame_queue) >= 182 MESH_FRAME_QUEUE_LEN) { 183 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); 184 break; 185 } 186 187 skb = skb_copy(fskb, GFP_ATOMIC); 188 if (WARN_ON(!skb)) 189 break; 190 191 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 192 skb_queue_tail(&gate_mpath->frame_queue, skb); 193 194 if (copy) 195 continue; 196 197 __skb_unlink(fskb, &failq); 198 kfree_skb(fskb); 199 } 200 201 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", 202 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); 203 204 if (!copy) 205 return; 206 207 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); 208 skb_queue_splice(&failq, &from_mpath->frame_queue); 209 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); 210 } 211 212 213 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, 214 struct ieee80211_sub_if_data *sdata) 215 { 216 struct mesh_path *mpath; 217 218 mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params); 219 220 if (mpath && mpath_expired(mpath)) { 221 spin_lock_bh(&mpath->state_lock); 222 mpath->flags &= ~MESH_PATH_ACTIVE; 223 spin_unlock_bh(&mpath->state_lock); 224 } 225 return mpath; 226 } 227 228 /** 229 * mesh_path_lookup - look up a path in the mesh path table 230 * @sdata: local subif 231 * @dst: hardware address (ETH_ALEN length) of destination 232 * 233 * Returns: pointer to the mesh path structure, or NULL if not found 234 * 235 * Locking: must be called within a read rcu section. 236 */ 237 struct mesh_path * 238 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 239 { 240 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata); 241 } 242 243 struct mesh_path * 244 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 245 { 246 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata); 247 } 248 249 static struct mesh_path * 250 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) 251 { 252 int i = 0, ret; 253 struct mesh_path *mpath = NULL; 254 struct rhashtable_iter iter; 255 256 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); 257 if (ret) 258 return NULL; 259 260 rhashtable_walk_start(&iter); 261 262 while ((mpath = rhashtable_walk_next(&iter))) { 263 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 264 continue; 265 if (IS_ERR(mpath)) 266 break; 267 if (i++ == idx) 268 break; 269 } 270 rhashtable_walk_stop(&iter); 271 rhashtable_walk_exit(&iter); 272 273 if (IS_ERR(mpath) || !mpath) 274 return NULL; 275 276 if (mpath_expired(mpath)) { 277 spin_lock_bh(&mpath->state_lock); 278 mpath->flags &= ~MESH_PATH_ACTIVE; 279 spin_unlock_bh(&mpath->state_lock); 280 } 281 return mpath; 282 } 283 284 /** 285 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 286 * @idx: index 287 * @sdata: local subif, or NULL for all entries 288 * 289 * Returns: pointer to the mesh path structure, or NULL if not found. 290 * 291 * Locking: must be called within a read rcu section. 292 */ 293 struct mesh_path * 294 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 295 { 296 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); 297 } 298 299 /** 300 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index 301 * @idx: index 302 * @sdata: local subif, or NULL for all entries 303 * 304 * Returns: pointer to the proxy path structure, or NULL if not found. 305 * 306 * Locking: must be called within a read rcu section. 307 */ 308 struct mesh_path * 309 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 310 { 311 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); 312 } 313 314 /** 315 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table 316 * @mpath: gate path to add to table 317 */ 318 int mesh_path_add_gate(struct mesh_path *mpath) 319 { 320 struct mesh_table *tbl; 321 int err; 322 323 rcu_read_lock(); 324 tbl = mpath->sdata->u.mesh.mesh_paths; 325 326 spin_lock_bh(&mpath->state_lock); 327 if (mpath->is_gate) { 328 err = -EEXIST; 329 spin_unlock_bh(&mpath->state_lock); 330 goto err_rcu; 331 } 332 mpath->is_gate = true; 333 mpath->sdata->u.mesh.num_gates++; 334 335 spin_lock(&tbl->gates_lock); 336 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); 337 spin_unlock(&tbl->gates_lock); 338 339 spin_unlock_bh(&mpath->state_lock); 340 341 mpath_dbg(mpath->sdata, 342 "Mesh path: Recorded new gate: %pM. %d known gates\n", 343 mpath->dst, mpath->sdata->u.mesh.num_gates); 344 err = 0; 345 err_rcu: 346 rcu_read_unlock(); 347 return err; 348 } 349 350 /** 351 * mesh_gate_del - remove a mesh gate from the list of known gates 352 * @tbl: table which holds our list of known gates 353 * @mpath: gate mpath 354 */ 355 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) 356 { 357 lockdep_assert_held(&mpath->state_lock); 358 if (!mpath->is_gate) 359 return; 360 361 mpath->is_gate = false; 362 spin_lock_bh(&tbl->gates_lock); 363 hlist_del_rcu(&mpath->gate_list); 364 mpath->sdata->u.mesh.num_gates--; 365 spin_unlock_bh(&tbl->gates_lock); 366 367 mpath_dbg(mpath->sdata, 368 "Mesh path: Deleted gate: %pM. %d known gates\n", 369 mpath->dst, mpath->sdata->u.mesh.num_gates); 370 } 371 372 /** 373 * mesh_gate_num - number of gates known to this interface 374 * @sdata: subif data 375 */ 376 int mesh_gate_num(struct ieee80211_sub_if_data *sdata) 377 { 378 return sdata->u.mesh.num_gates; 379 } 380 381 static 382 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, 383 const u8 *dst, gfp_t gfp_flags) 384 { 385 struct mesh_path *new_mpath; 386 387 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); 388 if (!new_mpath) 389 return NULL; 390 391 memcpy(new_mpath->dst, dst, ETH_ALEN); 392 eth_broadcast_addr(new_mpath->rann_snd_addr); 393 new_mpath->is_root = false; 394 new_mpath->sdata = sdata; 395 new_mpath->flags = 0; 396 skb_queue_head_init(&new_mpath->frame_queue); 397 new_mpath->exp_time = jiffies; 398 spin_lock_init(&new_mpath->state_lock); 399 timer_setup(&new_mpath->timer, mesh_path_timer, 0); 400 401 return new_mpath; 402 } 403 404 /** 405 * mesh_path_add - allocate and add a new path to the mesh path table 406 * @dst: destination address of the path (ETH_ALEN length) 407 * @sdata: local subif 408 * 409 * Returns: 0 on success 410 * 411 * State: the initial state of the new path is set to 0 412 */ 413 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 414 const u8 *dst) 415 { 416 struct mesh_table *tbl; 417 struct mesh_path *mpath, *new_mpath; 418 int ret; 419 420 if (ether_addr_equal(dst, sdata->vif.addr)) 421 /* never add ourselves as neighbours */ 422 return ERR_PTR(-ENOTSUPP); 423 424 if (is_multicast_ether_addr(dst)) 425 return ERR_PTR(-ENOTSUPP); 426 427 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 428 return ERR_PTR(-ENOSPC); 429 430 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 431 if (!new_mpath) 432 return ERR_PTR(-ENOMEM); 433 434 tbl = sdata->u.mesh.mesh_paths; 435 do { 436 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 437 &new_mpath->rhash, 438 mesh_rht_params); 439 440 if (ret == -EEXIST) 441 mpath = rhashtable_lookup_fast(&tbl->rhead, 442 dst, 443 mesh_rht_params); 444 445 } while (unlikely(ret == -EEXIST && !mpath)); 446 447 if (ret && ret != -EEXIST) 448 return ERR_PTR(ret); 449 450 /* At this point either new_mpath was added, or we found a 451 * matching entry already in the table; in the latter case 452 * free the unnecessary new entry. 453 */ 454 if (ret == -EEXIST) { 455 kfree(new_mpath); 456 new_mpath = mpath; 457 } 458 sdata->u.mesh.mesh_paths_generation++; 459 return new_mpath; 460 } 461 462 int mpp_path_add(struct ieee80211_sub_if_data *sdata, 463 const u8 *dst, const u8 *mpp) 464 { 465 struct mesh_table *tbl; 466 struct mesh_path *new_mpath; 467 int ret; 468 469 if (ether_addr_equal(dst, sdata->vif.addr)) 470 /* never add ourselves as neighbours */ 471 return -ENOTSUPP; 472 473 if (is_multicast_ether_addr(dst)) 474 return -ENOTSUPP; 475 476 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 477 478 if (!new_mpath) 479 return -ENOMEM; 480 481 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 482 tbl = sdata->u.mesh.mpp_paths; 483 ret = rhashtable_lookup_insert_fast(&tbl->rhead, 484 &new_mpath->rhash, 485 mesh_rht_params); 486 487 sdata->u.mesh.mpp_paths_generation++; 488 return ret; 489 } 490 491 492 /** 493 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 494 * 495 * @sta: broken peer link 496 * 497 * This function must be called from the rate control algorithm if enough 498 * delivery errors suggest that a peer link is no longer usable. 499 */ 500 void mesh_plink_broken(struct sta_info *sta) 501 { 502 struct ieee80211_sub_if_data *sdata = sta->sdata; 503 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 504 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 505 struct mesh_path *mpath; 506 struct rhashtable_iter iter; 507 int ret; 508 509 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); 510 if (ret) 511 return; 512 513 rhashtable_walk_start(&iter); 514 515 while ((mpath = rhashtable_walk_next(&iter))) { 516 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 517 continue; 518 if (IS_ERR(mpath)) 519 break; 520 if (rcu_access_pointer(mpath->next_hop) == sta && 521 mpath->flags & MESH_PATH_ACTIVE && 522 !(mpath->flags & MESH_PATH_FIXED)) { 523 spin_lock_bh(&mpath->state_lock); 524 mpath->flags &= ~MESH_PATH_ACTIVE; 525 ++mpath->sn; 526 spin_unlock_bh(&mpath->state_lock); 527 mesh_path_error_tx(sdata, 528 sdata->u.mesh.mshcfg.element_ttl, 529 mpath->dst, mpath->sn, 530 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 531 } 532 } 533 rhashtable_walk_stop(&iter); 534 rhashtable_walk_exit(&iter); 535 } 536 537 static void mesh_path_free_rcu(struct mesh_table *tbl, 538 struct mesh_path *mpath) 539 { 540 struct ieee80211_sub_if_data *sdata = mpath->sdata; 541 542 spin_lock_bh(&mpath->state_lock); 543 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; 544 mesh_gate_del(tbl, mpath); 545 spin_unlock_bh(&mpath->state_lock); 546 del_timer_sync(&mpath->timer); 547 atomic_dec(&sdata->u.mesh.mpaths); 548 atomic_dec(&tbl->entries); 549 kfree_rcu(mpath, rcu); 550 } 551 552 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) 553 { 554 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); 555 mesh_path_free_rcu(tbl, mpath); 556 } 557 558 /** 559 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 560 * 561 * @sta: mesh peer to match 562 * 563 * RCU notes: this function is called when a mesh plink transitions from 564 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that 565 * allows path creation. This will happen before the sta can be freed (because 566 * sta_info_destroy() calls this) so any reader in a rcu read block will be 567 * protected against the plink disappearing. 568 */ 569 void mesh_path_flush_by_nexthop(struct sta_info *sta) 570 { 571 struct ieee80211_sub_if_data *sdata = sta->sdata; 572 struct mesh_table *tbl = sdata->u.mesh.mesh_paths; 573 struct mesh_path *mpath; 574 struct rhashtable_iter iter; 575 int ret; 576 577 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); 578 if (ret) 579 return; 580 581 rhashtable_walk_start(&iter); 582 583 while ((mpath = rhashtable_walk_next(&iter))) { 584 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 585 continue; 586 if (IS_ERR(mpath)) 587 break; 588 589 if (rcu_access_pointer(mpath->next_hop) == sta) 590 __mesh_path_del(tbl, mpath); 591 } 592 593 rhashtable_walk_stop(&iter); 594 rhashtable_walk_exit(&iter); 595 } 596 597 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 598 const u8 *proxy) 599 { 600 struct mesh_table *tbl = sdata->u.mesh.mpp_paths; 601 struct mesh_path *mpath; 602 struct rhashtable_iter iter; 603 int ret; 604 605 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); 606 if (ret) 607 return; 608 609 rhashtable_walk_start(&iter); 610 611 while ((mpath = rhashtable_walk_next(&iter))) { 612 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 613 continue; 614 if (IS_ERR(mpath)) 615 break; 616 617 if (ether_addr_equal(mpath->mpp, proxy)) 618 __mesh_path_del(tbl, mpath); 619 } 620 621 rhashtable_walk_stop(&iter); 622 rhashtable_walk_exit(&iter); 623 } 624 625 static void table_flush_by_iface(struct mesh_table *tbl) 626 { 627 struct mesh_path *mpath; 628 struct rhashtable_iter iter; 629 int ret; 630 631 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); 632 if (ret) 633 return; 634 635 rhashtable_walk_start(&iter); 636 637 while ((mpath = rhashtable_walk_next(&iter))) { 638 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 639 continue; 640 if (IS_ERR(mpath)) 641 break; 642 __mesh_path_del(tbl, mpath); 643 } 644 645 rhashtable_walk_stop(&iter); 646 rhashtable_walk_exit(&iter); 647 } 648 649 /** 650 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface 651 * 652 * This function deletes both mesh paths as well as mesh portal paths. 653 * 654 * @sdata: interface data to match 655 * 656 */ 657 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 658 { 659 table_flush_by_iface(sdata->u.mesh.mesh_paths); 660 table_flush_by_iface(sdata->u.mesh.mpp_paths); 661 } 662 663 /** 664 * table_path_del - delete a path from the mesh or mpp table 665 * 666 * @tbl: mesh or mpp path table 667 * @sdata: local subif 668 * @addr: dst address (ETH_ALEN length) 669 * 670 * Returns: 0 if successful 671 */ 672 static int table_path_del(struct mesh_table *tbl, 673 struct ieee80211_sub_if_data *sdata, 674 const u8 *addr) 675 { 676 struct mesh_path *mpath; 677 678 rcu_read_lock(); 679 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); 680 if (!mpath) { 681 rcu_read_unlock(); 682 return -ENXIO; 683 } 684 685 __mesh_path_del(tbl, mpath); 686 rcu_read_unlock(); 687 return 0; 688 } 689 690 691 /** 692 * mesh_path_del - delete a mesh path from the table 693 * 694 * @addr: dst address (ETH_ALEN length) 695 * @sdata: local subif 696 * 697 * Returns: 0 if successful 698 */ 699 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 700 { 701 int err; 702 703 /* flush relevant mpp entries first */ 704 mpp_flush_by_proxy(sdata, addr); 705 706 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); 707 sdata->u.mesh.mesh_paths_generation++; 708 return err; 709 } 710 711 /** 712 * mesh_path_tx_pending - sends pending frames in a mesh path queue 713 * 714 * @mpath: mesh path to activate 715 * 716 * Locking: the state_lock of the mpath structure must NOT be held when calling 717 * this function. 718 */ 719 void mesh_path_tx_pending(struct mesh_path *mpath) 720 { 721 if (mpath->flags & MESH_PATH_ACTIVE) 722 ieee80211_add_pending_skbs(mpath->sdata->local, 723 &mpath->frame_queue); 724 } 725 726 /** 727 * mesh_path_send_to_gates - sends pending frames to all known mesh gates 728 * 729 * @mpath: mesh path whose queue will be emptied 730 * 731 * If there is only one gate, the frames are transferred from the failed mpath 732 * queue to that gate's queue. If there are more than one gates, the frames 733 * are copied from each gate to the next. After frames are copied, the 734 * mpath queues are emptied onto the transmission queue. 735 */ 736 int mesh_path_send_to_gates(struct mesh_path *mpath) 737 { 738 struct ieee80211_sub_if_data *sdata = mpath->sdata; 739 struct mesh_table *tbl; 740 struct mesh_path *from_mpath = mpath; 741 struct mesh_path *gate; 742 bool copy = false; 743 744 tbl = sdata->u.mesh.mesh_paths; 745 746 rcu_read_lock(); 747 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 748 if (gate->flags & MESH_PATH_ACTIVE) { 749 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 750 mesh_path_move_to_queue(gate, from_mpath, copy); 751 from_mpath = gate; 752 copy = true; 753 } else { 754 mpath_dbg(sdata, 755 "Not forwarding to %pM (flags %#x)\n", 756 gate->dst, gate->flags); 757 } 758 } 759 760 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { 761 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 762 mesh_path_tx_pending(gate); 763 } 764 rcu_read_unlock(); 765 766 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 767 } 768 769 /** 770 * mesh_path_discard_frame - discard a frame whose path could not be resolved 771 * 772 * @skb: frame to discard 773 * @sdata: network subif the frame was to be sent through 774 * 775 * Locking: the function must me called within a rcu_read_lock region 776 */ 777 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 778 struct sk_buff *skb) 779 { 780 kfree_skb(skb); 781 sdata->u.mesh.mshstats.dropped_frames_no_route++; 782 } 783 784 /** 785 * mesh_path_flush_pending - free the pending queue of a mesh path 786 * 787 * @mpath: mesh path whose queue has to be freed 788 * 789 * Locking: the function must me called within a rcu_read_lock region 790 */ 791 void mesh_path_flush_pending(struct mesh_path *mpath) 792 { 793 struct sk_buff *skb; 794 795 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) 796 mesh_path_discard_frame(mpath->sdata, skb); 797 } 798 799 /** 800 * mesh_path_fix_nexthop - force a specific next hop for a mesh path 801 * 802 * @mpath: the mesh path to modify 803 * @next_hop: the next hop to force 804 * 805 * Locking: this function must be called holding mpath->state_lock 806 */ 807 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) 808 { 809 spin_lock_bh(&mpath->state_lock); 810 mesh_path_assign_nexthop(mpath, next_hop); 811 mpath->sn = 0xffff; 812 mpath->metric = 0; 813 mpath->hop_count = 0; 814 mpath->exp_time = 0; 815 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; 816 mesh_path_activate(mpath); 817 spin_unlock_bh(&mpath->state_lock); 818 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); 819 /* init it at a low value - 0 start is tricky */ 820 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); 821 mesh_path_tx_pending(mpath); 822 } 823 824 int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 825 { 826 struct mesh_table *tbl_path, *tbl_mpp; 827 int ret; 828 829 tbl_path = mesh_table_alloc(); 830 if (!tbl_path) 831 return -ENOMEM; 832 833 tbl_mpp = mesh_table_alloc(); 834 if (!tbl_mpp) { 835 ret = -ENOMEM; 836 goto free_path; 837 } 838 839 rhashtable_init(&tbl_path->rhead, &mesh_rht_params); 840 rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); 841 842 sdata->u.mesh.mesh_paths = tbl_path; 843 sdata->u.mesh.mpp_paths = tbl_mpp; 844 845 return 0; 846 847 free_path: 848 mesh_table_free(tbl_path); 849 return ret; 850 } 851 852 static 853 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, 854 struct mesh_table *tbl) 855 { 856 struct mesh_path *mpath; 857 struct rhashtable_iter iter; 858 int ret; 859 860 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); 861 if (ret) 862 return; 863 864 rhashtable_walk_start(&iter); 865 866 while ((mpath = rhashtable_walk_next(&iter))) { 867 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) 868 continue; 869 if (IS_ERR(mpath)) 870 break; 871 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 872 (!(mpath->flags & MESH_PATH_FIXED)) && 873 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 874 __mesh_path_del(tbl, mpath); 875 } 876 877 rhashtable_walk_stop(&iter); 878 rhashtable_walk_exit(&iter); 879 } 880 881 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 882 { 883 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); 884 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); 885 } 886 887 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 888 { 889 mesh_table_free(sdata->u.mesh.mesh_paths); 890 mesh_table_free(sdata->u.mesh.mpp_paths); 891 } 892