1 /* 2 * Copyright (c) 2008 open80211s Ltd. 3 * Author: Luis Carlos Cobo <luisca@cozybit.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <asm/unaligned.h> 11 #include "mesh.h" 12 13 #define TEST_FRAME_LEN 8192 14 #define MAX_METRIC 0xffffffff 15 #define ARITH_SHIFT 8 16 17 /* Number of frames buffered per destination for unresolved destinations */ 18 #define MESH_FRAME_QUEUE_LEN 10 19 #define MAX_PREQ_QUEUE_LEN 64 20 21 /* Destination only */ 22 #define MP_F_DO 0x1 23 /* Reply and forward */ 24 #define MP_F_RF 0x2 25 26 static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae) 27 { 28 if (ae) 29 offset += 6; 30 return le32_to_cpu(get_unaligned((__le32 *) (preq_elem + offset))); 31 } 32 33 /* HWMP IE processing macros */ 34 #define AE_F (1<<6) 35 #define AE_F_SET(x) (*x & AE_F) 36 #define PREQ_IE_FLAGS(x) (*(x)) 37 #define PREQ_IE_HOPCOUNT(x) (*(x + 1)) 38 #define PREQ_IE_TTL(x) (*(x + 2)) 39 #define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0) 40 #define PREQ_IE_ORIG_ADDR(x) (x + 7) 41 #define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0); 42 #define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x)); 43 #define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x)); 44 #define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26)) 45 #define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27) 46 #define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x)); 47 48 49 #define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x) 50 #define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x) 51 #define PREP_IE_TTL(x) PREQ_IE_TTL(x) 52 #define PREP_IE_ORIG_ADDR(x) (x + 3) 53 #define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0); 54 #define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x)); 55 #define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x)); 56 #define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21) 57 #define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x)); 58 59 #define PERR_IE_DST_ADDR(x) (x + 2) 60 #define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0); 61 62 #define TU_TO_EXP_TIME(x) (jiffies + msecs_to_jiffies(x * 1024 / 1000)) 63 #define MSEC_TO_TU(x) (x*1000/1024) 64 #define DSN_GT(x, y) ((long) (y) - (long) (x) < 0) 65 #define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) 66 67 #define net_traversal_jiffies(s) \ 68 msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 69 #define default_lifetime(s) \ 70 MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout) 71 #define min_preq_int_jiff(s) \ 72 (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval)) 73 #define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries) 74 #define disc_timeout_jiff(s) \ 75 msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout) 76 77 enum mpath_frame_type { 78 MPATH_PREQ = 0, 79 MPATH_PREP, 80 MPATH_PERR 81 }; 82 83 static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 84 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, 85 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, 86 __le32 metric, __le32 preq_id, struct net_device *dev) 87 { 88 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 89 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 90 struct ieee80211_mgmt *mgmt; 91 u8 *pos; 92 int ie_len; 93 94 if (!skb) 95 return -1; 96 skb_reserve(skb, local->hw.extra_tx_headroom); 97 /* 25 is the size of the common mgmt part (24) plus the size of the 98 * common action part (1) 99 */ 100 mgmt = (struct ieee80211_mgmt *) 101 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 102 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 103 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 104 IEEE80211_STYPE_ACTION); 105 106 memcpy(mgmt->da, da, ETH_ALEN); 107 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 108 /* BSSID is left zeroed, wildcard value */ 109 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 110 mgmt->u.action.u.mesh_action.action_code = action; 111 112 switch (action) { 113 case MPATH_PREQ: 114 ie_len = 37; 115 pos = skb_put(skb, 2 + ie_len); 116 *pos++ = WLAN_EID_PREQ; 117 break; 118 case MPATH_PREP: 119 ie_len = 31; 120 pos = skb_put(skb, 2 + ie_len); 121 *pos++ = WLAN_EID_PREP; 122 break; 123 default: 124 kfree(skb); 125 return -ENOTSUPP; 126 break; 127 } 128 *pos++ = ie_len; 129 *pos++ = flags; 130 *pos++ = hop_count; 131 *pos++ = ttl; 132 if (action == MPATH_PREQ) { 133 memcpy(pos, &preq_id, 4); 134 pos += 4; 135 } 136 memcpy(pos, orig_addr, ETH_ALEN); 137 pos += ETH_ALEN; 138 memcpy(pos, &orig_dsn, 4); 139 pos += 4; 140 memcpy(pos, &lifetime, 4); 141 pos += 4; 142 memcpy(pos, &metric, 4); 143 pos += 4; 144 if (action == MPATH_PREQ) { 145 /* destination count */ 146 *pos++ = 1; 147 *pos++ = dst_flags; 148 } 149 memcpy(pos, dst, ETH_ALEN); 150 pos += ETH_ALEN; 151 memcpy(pos, &dst_dsn, 4); 152 153 ieee80211_sta_tx(dev, skb, 0); 154 return 0; 155 } 156 157 /** 158 * mesh_send_path error - Sends a PERR mesh management frame 159 * 160 * @dst: broken destination 161 * @dst_dsn: dsn of the broken destination 162 * @ra: node this frame is addressed to 163 */ 164 int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, 165 struct net_device *dev) 166 { 167 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 168 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 169 struct ieee80211_mgmt *mgmt; 170 u8 *pos; 171 int ie_len; 172 173 if (!skb) 174 return -1; 175 skb_reserve(skb, local->hw.extra_tx_headroom); 176 /* 25 is the size of the common mgmt part (24) plus the size of the 177 * common action part (1) 178 */ 179 mgmt = (struct ieee80211_mgmt *) 180 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 181 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 182 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 183 IEEE80211_STYPE_ACTION); 184 185 memcpy(mgmt->da, ra, ETH_ALEN); 186 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 187 /* BSSID is left zeroed, wildcard value */ 188 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 189 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; 190 ie_len = 12; 191 pos = skb_put(skb, 2 + ie_len); 192 *pos++ = WLAN_EID_PERR; 193 *pos++ = ie_len; 194 /* mode flags, reserved */ 195 *pos++ = 0; 196 /* number of destinations */ 197 *pos++ = 1; 198 memcpy(pos, dst, ETH_ALEN); 199 pos += ETH_ALEN; 200 memcpy(pos, &dst_dsn, 4); 201 202 ieee80211_sta_tx(dev, skb, 0); 203 return 0; 204 } 205 206 static u32 airtime_link_metric_get(struct ieee80211_local *local, 207 struct sta_info *sta) 208 { 209 struct ieee80211_supported_band *sband; 210 /* This should be adjusted for each device */ 211 int device_constant = 1 << ARITH_SHIFT; 212 int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT; 213 int s_unit = 1 << ARITH_SHIFT; 214 int rate, err; 215 u32 tx_time, estimated_retx; 216 u64 result; 217 218 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 219 220 if (sta->fail_avg >= 100) 221 return MAX_METRIC; 222 err = (sta->fail_avg << ARITH_SHIFT) / 100; 223 224 /* bitrate is in units of 100 Kbps, while we need rate in units of 225 * 1Mbps. This will be corrected on tx_time computation. 226 */ 227 rate = sband->bitrates[sta->txrate_idx].bitrate; 228 tx_time = (device_constant + 10 * test_frame_len / rate); 229 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 230 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; 231 return (u32)result; 232 } 233 234 /** 235 * hwmp_route_info_get - Update routing info to originator and transmitter 236 * 237 * @dev: local mesh interface 238 * @mgmt: mesh management frame 239 * @hwmp_ie: hwmp information element (PREP or PREQ) 240 * 241 * This function updates the path routing information to the originator and the 242 * transmitter of a HWMP PREQ or PREP fram. 243 * 244 * Returns: metric to frame originator or 0 if the frame should not be further 245 * processed 246 * 247 * Notes: this function is the only place (besides user-provided info) where 248 * path routing information is updated. 249 */ 250 static u32 hwmp_route_info_get(struct net_device *dev, 251 struct ieee80211_mgmt *mgmt, 252 u8 *hwmp_ie) 253 { 254 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 255 struct mesh_path *mpath; 256 struct sta_info *sta; 257 bool fresh_info; 258 u8 *orig_addr, *ta; 259 u32 orig_dsn, orig_metric; 260 unsigned long orig_lifetime, exp_time; 261 u32 last_hop_metric, new_metric; 262 bool process = true; 263 u8 action = mgmt->u.action.u.mesh_action.action_code; 264 265 rcu_read_lock(); 266 sta = sta_info_get(local, mgmt->sa); 267 if (!sta) { 268 rcu_read_unlock(); 269 return 0; 270 } 271 272 last_hop_metric = airtime_link_metric_get(local, sta); 273 /* Update and check originator routing info */ 274 fresh_info = true; 275 276 switch (action) { 277 case MPATH_PREQ: 278 orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie); 279 orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie); 280 orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie); 281 orig_metric = PREQ_IE_METRIC(hwmp_ie); 282 break; 283 case MPATH_PREP: 284 /* Originator here refers to the MP that was the destination in 285 * the Path Request. The draft refers to that MP as the 286 * destination address, even though usually it is the origin of 287 * the PREP frame. We divert from the nomenclature in the draft 288 * so that we can easily use a single function to gather path 289 * information from both PREQ and PREP frames. 290 */ 291 orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie); 292 orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie); 293 orig_lifetime = PREP_IE_LIFETIME(hwmp_ie); 294 orig_metric = PREP_IE_METRIC(hwmp_ie); 295 break; 296 default: 297 rcu_read_unlock(); 298 return 0; 299 } 300 new_metric = orig_metric + last_hop_metric; 301 if (new_metric < orig_metric) 302 new_metric = MAX_METRIC; 303 exp_time = TU_TO_EXP_TIME(orig_lifetime); 304 305 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { 306 /* This MP is the originator, we are not interested in this 307 * frame, except for updating transmitter's path info. 308 */ 309 process = false; 310 fresh_info = false; 311 } else { 312 mpath = mesh_path_lookup(orig_addr, dev); 313 if (mpath) { 314 spin_lock_bh(&mpath->state_lock); 315 if (mpath->flags & MESH_PATH_FIXED) 316 fresh_info = false; 317 else if ((mpath->flags & MESH_PATH_ACTIVE) && 318 (mpath->flags & MESH_PATH_DSN_VALID)) { 319 if (DSN_GT(mpath->dsn, orig_dsn) || 320 (mpath->dsn == orig_dsn && 321 action == MPATH_PREQ && 322 new_metric > mpath->metric)) { 323 process = false; 324 fresh_info = false; 325 } 326 } 327 } else { 328 mesh_path_add(orig_addr, dev); 329 mpath = mesh_path_lookup(orig_addr, dev); 330 if (!mpath) { 331 rcu_read_unlock(); 332 return 0; 333 } 334 spin_lock_bh(&mpath->state_lock); 335 } 336 337 if (fresh_info) { 338 mesh_path_assign_nexthop(mpath, sta); 339 mpath->flags |= MESH_PATH_DSN_VALID; 340 mpath->metric = new_metric; 341 mpath->dsn = orig_dsn; 342 mpath->exp_time = time_after(mpath->exp_time, exp_time) 343 ? mpath->exp_time : exp_time; 344 mesh_path_activate(mpath); 345 spin_unlock_bh(&mpath->state_lock); 346 mesh_path_tx_pending(mpath); 347 /* draft says preq_id should be saved to, but there does 348 * not seem to be any use for it, skipping by now 349 */ 350 } else 351 spin_unlock_bh(&mpath->state_lock); 352 } 353 354 /* Update and check transmitter routing info */ 355 ta = mgmt->sa; 356 if (memcmp(orig_addr, ta, ETH_ALEN) == 0) 357 fresh_info = false; 358 else { 359 fresh_info = true; 360 361 mpath = mesh_path_lookup(ta, dev); 362 if (mpath) { 363 spin_lock_bh(&mpath->state_lock); 364 if ((mpath->flags & MESH_PATH_FIXED) || 365 ((mpath->flags & MESH_PATH_ACTIVE) && 366 (last_hop_metric > mpath->metric))) 367 fresh_info = false; 368 } else { 369 mesh_path_add(ta, dev); 370 mpath = mesh_path_lookup(ta, dev); 371 if (!mpath) { 372 rcu_read_unlock(); 373 return 0; 374 } 375 spin_lock_bh(&mpath->state_lock); 376 } 377 378 if (fresh_info) { 379 mesh_path_assign_nexthop(mpath, sta); 380 mpath->flags &= ~MESH_PATH_DSN_VALID; 381 mpath->metric = last_hop_metric; 382 mpath->exp_time = time_after(mpath->exp_time, exp_time) 383 ? mpath->exp_time : exp_time; 384 mesh_path_activate(mpath); 385 spin_unlock_bh(&mpath->state_lock); 386 mesh_path_tx_pending(mpath); 387 } else 388 spin_unlock_bh(&mpath->state_lock); 389 } 390 391 rcu_read_unlock(); 392 393 return process ? new_metric : 0; 394 } 395 396 static void hwmp_preq_frame_process(struct net_device *dev, 397 struct ieee80211_mgmt *mgmt, 398 u8 *preq_elem, u32 metric) { 399 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 400 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 401 struct mesh_path *mpath; 402 u8 *dst_addr, *orig_addr; 403 u8 dst_flags, ttl; 404 u32 orig_dsn, dst_dsn, lifetime; 405 bool reply = false; 406 bool forward = true; 407 408 /* Update destination DSN, if present */ 409 dst_addr = PREQ_IE_DST_ADDR(preq_elem); 410 orig_addr = PREQ_IE_ORIG_ADDR(preq_elem); 411 dst_dsn = PREQ_IE_DST_DSN(preq_elem); 412 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); 413 dst_flags = PREQ_IE_DST_F(preq_elem); 414 415 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { 416 forward = false; 417 reply = true; 418 metric = 0; 419 if (time_after(jiffies, ifsta->last_dsn_update + 420 net_traversal_jiffies(sdata)) || 421 time_before(jiffies, ifsta->last_dsn_update)) { 422 dst_dsn = ++ifsta->dsn; 423 ifsta->last_dsn_update = jiffies; 424 } 425 } else { 426 rcu_read_lock(); 427 mpath = mesh_path_lookup(dst_addr, dev); 428 if (mpath) { 429 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 430 DSN_LT(mpath->dsn, dst_dsn)) { 431 mpath->dsn = dst_dsn; 432 mpath->flags &= MESH_PATH_DSN_VALID; 433 } else if ((!(dst_flags & MP_F_DO)) && 434 (mpath->flags & MESH_PATH_ACTIVE)) { 435 reply = true; 436 metric = mpath->metric; 437 dst_dsn = mpath->dsn; 438 if (dst_flags & MP_F_RF) 439 dst_flags |= MP_F_DO; 440 else 441 forward = false; 442 } 443 } 444 rcu_read_unlock(); 445 } 446 447 if (reply) { 448 lifetime = PREQ_IE_LIFETIME(preq_elem); 449 ttl = ifsta->mshcfg.dot11MeshTTL; 450 if (ttl != 0) 451 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, 452 cpu_to_le32(dst_dsn), 0, orig_addr, 453 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, 454 cpu_to_le32(lifetime), cpu_to_le32(metric), 455 0, dev); 456 else 457 ifsta->mshstats.dropped_frames_ttl++; 458 } 459 460 if (forward) { 461 u32 preq_id; 462 u8 hopcount, flags; 463 464 ttl = PREQ_IE_TTL(preq_elem); 465 lifetime = PREQ_IE_LIFETIME(preq_elem); 466 if (ttl <= 1) { 467 ifsta->mshstats.dropped_frames_ttl++; 468 return; 469 } 470 --ttl; 471 flags = PREQ_IE_FLAGS(preq_elem); 472 preq_id = PREQ_IE_PREQ_ID(preq_elem); 473 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 474 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 475 cpu_to_le32(orig_dsn), dst_flags, dst_addr, 476 cpu_to_le32(dst_dsn), dev->broadcast, 477 hopcount, ttl, cpu_to_le32(lifetime), 478 cpu_to_le32(metric), cpu_to_le32(preq_id), 479 dev); 480 ifsta->mshstats.fwded_frames++; 481 } 482 } 483 484 485 static void hwmp_prep_frame_process(struct net_device *dev, 486 struct ieee80211_mgmt *mgmt, 487 u8 *prep_elem, u32 metric) 488 { 489 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 490 struct mesh_path *mpath; 491 u8 *dst_addr, *orig_addr; 492 u8 ttl, hopcount, flags; 493 u8 next_hop[ETH_ALEN]; 494 u32 dst_dsn, orig_dsn, lifetime; 495 496 /* Note that we divert from the draft nomenclature and denominate 497 * destination to what the draft refers to as origininator. So in this 498 * function destnation refers to the final destination of the PREP, 499 * which corresponds with the originator of the PREQ which this PREP 500 * replies 501 */ 502 dst_addr = PREP_IE_DST_ADDR(prep_elem); 503 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) 504 /* destination, no forwarding required */ 505 return; 506 507 ttl = PREP_IE_TTL(prep_elem); 508 if (ttl <= 1) { 509 sdata->u.sta.mshstats.dropped_frames_ttl++; 510 return; 511 } 512 513 rcu_read_lock(); 514 mpath = mesh_path_lookup(dst_addr, dev); 515 if (mpath) 516 spin_lock_bh(&mpath->state_lock); 517 else 518 goto fail; 519 if (!(mpath->flags & MESH_PATH_ACTIVE)) { 520 spin_unlock_bh(&mpath->state_lock); 521 goto fail; 522 } 523 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); 524 spin_unlock_bh(&mpath->state_lock); 525 --ttl; 526 flags = PREP_IE_FLAGS(prep_elem); 527 lifetime = PREP_IE_LIFETIME(prep_elem); 528 hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1; 529 orig_addr = PREP_IE_ORIG_ADDR(prep_elem); 530 dst_dsn = PREP_IE_DST_DSN(prep_elem); 531 orig_dsn = PREP_IE_ORIG_DSN(prep_elem); 532 533 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 534 cpu_to_le32(orig_dsn), 0, dst_addr, 535 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, 536 cpu_to_le32(lifetime), cpu_to_le32(metric), 537 0, dev); 538 rcu_read_unlock(); 539 sdata->u.sta.mshstats.fwded_frames++; 540 return; 541 542 fail: 543 rcu_read_unlock(); 544 sdata->u.sta.mshstats.dropped_frames_no_route++; 545 return; 546 } 547 548 static void hwmp_perr_frame_process(struct net_device *dev, 549 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 550 { 551 struct mesh_path *mpath; 552 u8 *ta, *dst_addr; 553 u32 dst_dsn; 554 555 ta = mgmt->sa; 556 dst_addr = PERR_IE_DST_ADDR(perr_elem); 557 dst_dsn = PERR_IE_DST_DSN(perr_elem); 558 rcu_read_lock(); 559 mpath = mesh_path_lookup(dst_addr, dev); 560 if (mpath) { 561 spin_lock_bh(&mpath->state_lock); 562 if (mpath->flags & MESH_PATH_ACTIVE && 563 memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 && 564 (!(mpath->flags & MESH_PATH_DSN_VALID) || 565 DSN_GT(dst_dsn, mpath->dsn))) { 566 mpath->flags &= ~MESH_PATH_ACTIVE; 567 mpath->dsn = dst_dsn; 568 spin_unlock_bh(&mpath->state_lock); 569 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), 570 dev->broadcast, dev); 571 } else 572 spin_unlock_bh(&mpath->state_lock); 573 } 574 rcu_read_unlock(); 575 } 576 577 578 579 void mesh_rx_path_sel_frame(struct net_device *dev, 580 struct ieee80211_mgmt *mgmt, 581 size_t len) 582 { 583 struct ieee802_11_elems elems; 584 size_t baselen; 585 u32 last_hop_metric; 586 587 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; 588 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 589 len - baselen, &elems); 590 591 switch (mgmt->u.action.u.mesh_action.action_code) { 592 case MPATH_PREQ: 593 if (!elems.preq || elems.preq_len != 37) 594 /* Right now we support just 1 destination and no AE */ 595 return; 596 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); 597 if (!last_hop_metric) 598 return; 599 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); 600 break; 601 case MPATH_PREP: 602 if (!elems.prep || elems.prep_len != 31) 603 /* Right now we support no AE */ 604 return; 605 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); 606 if (!last_hop_metric) 607 return; 608 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); 609 break; 610 case MPATH_PERR: 611 if (!elems.perr || elems.perr_len != 12) 612 /* Right now we support only one destination per PERR */ 613 return; 614 hwmp_perr_frame_process(dev, mgmt, elems.perr); 615 default: 616 return; 617 } 618 619 } 620 621 /** 622 * mesh_queue_preq - queue a PREQ to a given destination 623 * 624 * @mpath: mesh path to discover 625 * @flags: special attributes of the PREQ to be sent 626 * 627 * Locking: the function must be called from within a rcu read lock block. 628 * 629 */ 630 static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) 631 { 632 struct ieee80211_sub_if_data *sdata = 633 IEEE80211_DEV_TO_SUB_IF(mpath->dev); 634 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 635 struct mesh_preq_queue *preq_node; 636 637 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); 638 if (!preq_node) { 639 printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n"); 640 return; 641 } 642 643 spin_lock(&ifsta->mesh_preq_queue_lock); 644 if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 645 spin_unlock(&ifsta->mesh_preq_queue_lock); 646 kfree(preq_node); 647 if (printk_ratelimit()) 648 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); 649 return; 650 } 651 652 memcpy(preq_node->dst, mpath->dst, ETH_ALEN); 653 preq_node->flags = flags; 654 655 list_add_tail(&preq_node->list, &ifsta->preq_queue.list); 656 ++ifsta->preq_queue_len; 657 spin_unlock(&ifsta->mesh_preq_queue_lock); 658 659 if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata))) 660 queue_work(sdata->local->hw.workqueue, &ifsta->work); 661 662 else if (time_before(jiffies, ifsta->last_preq)) { 663 /* avoid long wait if did not send preqs for a long time 664 * and jiffies wrapped around 665 */ 666 ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 667 queue_work(sdata->local->hw.workqueue, &ifsta->work); 668 } else 669 mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq + 670 min_preq_int_jiff(sdata)); 671 } 672 673 /** 674 * mesh_path_start_discovery - launch a path discovery from the PREQ queue 675 * 676 * @dev: local mesh interface 677 */ 678 void mesh_path_start_discovery(struct net_device *dev) 679 { 680 struct ieee80211_sub_if_data *sdata = 681 IEEE80211_DEV_TO_SUB_IF(dev); 682 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 683 struct mesh_preq_queue *preq_node; 684 struct mesh_path *mpath; 685 u8 ttl, dst_flags; 686 u32 lifetime; 687 688 spin_lock(&ifsta->mesh_preq_queue_lock); 689 if (!ifsta->preq_queue_len || 690 time_before(jiffies, ifsta->last_preq + 691 min_preq_int_jiff(sdata))) { 692 spin_unlock(&ifsta->mesh_preq_queue_lock); 693 return; 694 } 695 696 preq_node = list_first_entry(&ifsta->preq_queue.list, 697 struct mesh_preq_queue, list); 698 list_del(&preq_node->list); 699 --ifsta->preq_queue_len; 700 spin_unlock(&ifsta->mesh_preq_queue_lock); 701 702 rcu_read_lock(); 703 mpath = mesh_path_lookup(preq_node->dst, dev); 704 if (!mpath) 705 goto enddiscovery; 706 707 spin_lock_bh(&mpath->state_lock); 708 if (preq_node->flags & PREQ_Q_F_START) { 709 if (mpath->flags & MESH_PATH_RESOLVING) { 710 spin_unlock_bh(&mpath->state_lock); 711 goto enddiscovery; 712 } else { 713 mpath->flags &= ~MESH_PATH_RESOLVED; 714 mpath->flags |= MESH_PATH_RESOLVING; 715 mpath->discovery_retries = 0; 716 mpath->discovery_timeout = disc_timeout_jiff(sdata); 717 } 718 } else if (!(mpath->flags & MESH_PATH_RESOLVING) || 719 mpath->flags & MESH_PATH_RESOLVED) { 720 mpath->flags &= ~MESH_PATH_RESOLVING; 721 spin_unlock_bh(&mpath->state_lock); 722 goto enddiscovery; 723 } 724 725 ifsta->last_preq = jiffies; 726 727 if (time_after(jiffies, ifsta->last_dsn_update + 728 net_traversal_jiffies(sdata)) || 729 time_before(jiffies, ifsta->last_dsn_update)) { 730 ++ifsta->dsn; 731 sdata->u.sta.last_dsn_update = jiffies; 732 } 733 lifetime = default_lifetime(sdata); 734 ttl = sdata->u.sta.mshcfg.dot11MeshTTL; 735 if (ttl == 0) { 736 sdata->u.sta.mshstats.dropped_frames_ttl++; 737 spin_unlock_bh(&mpath->state_lock); 738 goto enddiscovery; 739 } 740 741 if (preq_node->flags & PREQ_Q_F_REFRESH) 742 dst_flags = MP_F_DO; 743 else 744 dst_flags = MP_F_RF; 745 746 spin_unlock_bh(&mpath->state_lock); 747 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, 748 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, 749 cpu_to_le32(mpath->dsn), dev->broadcast, 0, 750 ttl, cpu_to_le32(lifetime), 0, 751 cpu_to_le32(ifsta->preq_id++), dev); 752 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 753 754 enddiscovery: 755 rcu_read_unlock(); 756 kfree(preq_node); 757 } 758 759 /** 760 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame 761 * 762 * @next_hop: output argument for next hop address 763 * @skb: frame to be sent 764 * @dev: network device the frame will be sent through 765 * 766 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is 767 * found, the function will start a path discovery and queue the frame so it is 768 * sent when the path is resolved. This means the caller must not free the skb 769 * in this case. 770 */ 771 int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, 772 struct net_device *dev) 773 { 774 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 775 struct sk_buff *skb_to_free = NULL; 776 struct mesh_path *mpath; 777 int err = 0; 778 779 rcu_read_lock(); 780 mpath = mesh_path_lookup(skb->data, dev); 781 782 if (!mpath) { 783 mesh_path_add(skb->data, dev); 784 mpath = mesh_path_lookup(skb->data, dev); 785 if (!mpath) { 786 dev_kfree_skb(skb); 787 sdata->u.sta.mshstats.dropped_frames_no_route++; 788 err = -ENOSPC; 789 goto endlookup; 790 } 791 } 792 793 if (mpath->flags & MESH_PATH_ACTIVE) { 794 if (time_after(jiffies, mpath->exp_time - 795 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) 796 && skb->pkt_type != PACKET_OTHERHOST 797 && !(mpath->flags & MESH_PATH_RESOLVING) 798 && !(mpath->flags & MESH_PATH_FIXED)) { 799 mesh_queue_preq(mpath, 800 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 801 } 802 memcpy(next_hop, mpath->next_hop->addr, 803 ETH_ALEN); 804 } else { 805 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 806 /* Start discovery only if it is not running yet */ 807 mesh_queue_preq(mpath, PREQ_Q_F_START); 808 } 809 810 if (skb_queue_len(&mpath->frame_queue) >= 811 MESH_FRAME_QUEUE_LEN) { 812 skb_to_free = mpath->frame_queue.next; 813 skb_unlink(skb_to_free, &mpath->frame_queue); 814 } 815 816 skb_queue_tail(&mpath->frame_queue, skb); 817 if (skb_to_free) 818 mesh_path_discard_frame(skb_to_free, dev); 819 err = -ENOENT; 820 } 821 822 endlookup: 823 rcu_read_unlock(); 824 return err; 825 } 826 827 void mesh_path_timer(unsigned long data) 828 { 829 struct ieee80211_sub_if_data *sdata; 830 struct mesh_path *mpath; 831 832 rcu_read_lock(); 833 mpath = (struct mesh_path *) data; 834 mpath = rcu_dereference(mpath); 835 if (!mpath) 836 goto endmpathtimer; 837 spin_lock_bh(&mpath->state_lock); 838 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); 839 if (mpath->flags & MESH_PATH_RESOLVED || 840 (!(mpath->flags & MESH_PATH_RESOLVING))) 841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 842 else if (mpath->discovery_retries < max_preq_retries(sdata)) { 843 ++mpath->discovery_retries; 844 mpath->discovery_timeout *= 2; 845 mesh_queue_preq(mpath, 0); 846 } else { 847 mpath->flags = 0; 848 mpath->exp_time = jiffies; 849 mesh_path_flush_pending(mpath); 850 } 851 852 spin_unlock_bh(&mpath->state_lock); 853 endmpathtimer: 854 rcu_read_unlock(); 855 } 856