1 /* 2 * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers 3 * 4 * Copyright (c) 2010, ST-Ericsson 5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <net/mac80211.h> 13 #include <linux/etherdevice.h> 14 #include <linux/skbuff.h> 15 16 #include "cw1200.h" 17 #include "wsm.h" 18 #include "bh.h" 19 #include "sta.h" 20 #include "debug.h" 21 22 #define CW1200_INVALID_RATE_ID (0xFF) 23 24 static int cw1200_handle_action_rx(struct cw1200_common *priv, 25 struct sk_buff *skb); 26 static const struct ieee80211_rate * 27 cw1200_get_tx_rate(const struct cw1200_common *priv, 28 const struct ieee80211_tx_rate *rate); 29 30 /* ******************************************************************** */ 31 /* TX queue lock / unlock */ 32 33 static inline void cw1200_tx_queues_lock(struct cw1200_common *priv) 34 { 35 int i; 36 for (i = 0; i < 4; ++i) 37 cw1200_queue_lock(&priv->tx_queue[i]); 38 } 39 40 static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv) 41 { 42 int i; 43 for (i = 0; i < 4; ++i) 44 cw1200_queue_unlock(&priv->tx_queue[i]); 45 } 46 47 /* ******************************************************************** */ 48 /* TX policy cache implementation */ 49 50 static void tx_policy_dump(struct tx_policy *policy) 51 { 52 pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", 53 policy->raw[0] & 0x0F, policy->raw[0] >> 4, 54 policy->raw[1] & 0x0F, policy->raw[1] >> 4, 55 policy->raw[2] & 0x0F, policy->raw[2] >> 4, 56 policy->raw[3] & 0x0F, policy->raw[3] >> 4, 57 policy->raw[4] & 0x0F, policy->raw[4] >> 4, 58 policy->raw[5] & 0x0F, policy->raw[5] >> 4, 59 policy->raw[6] & 0x0F, policy->raw[6] >> 4, 60 policy->raw[7] & 0x0F, policy->raw[7] >> 4, 61 policy->raw[8] & 0x0F, policy->raw[8] >> 4, 62 policy->raw[9] & 0x0F, policy->raw[9] >> 4, 63 policy->raw[10] & 0x0F, policy->raw[10] >> 4, 64 policy->raw[11] & 0x0F, policy->raw[11] >> 4, 65 policy->defined); 66 } 67 68 static void tx_policy_build(const struct cw1200_common *priv, 69 /* [out] */ struct tx_policy *policy, 70 struct ieee80211_tx_rate *rates, size_t count) 71 { 72 int i, j; 73 unsigned limit = priv->short_frame_max_tx_count; 74 unsigned total = 0; 75 BUG_ON(rates[0].idx < 0); 76 memset(policy, 0, sizeof(*policy)); 77 78 /* Sort rates in descending order. */ 79 for (i = 1; i < count; ++i) { 80 if (rates[i].idx < 0) { 81 count = i; 82 break; 83 } 84 if (rates[i].idx > rates[i - 1].idx) { 85 struct ieee80211_tx_rate tmp = rates[i - 1]; 86 rates[i - 1] = rates[i]; 87 rates[i] = tmp; 88 } 89 } 90 91 /* Eliminate duplicates. */ 92 total = rates[0].count; 93 for (i = 0, j = 1; j < count; ++j) { 94 if (rates[j].idx == rates[i].idx) { 95 rates[i].count += rates[j].count; 96 } else if (rates[j].idx > rates[i].idx) { 97 break; 98 } else { 99 ++i; 100 if (i != j) 101 rates[i] = rates[j]; 102 } 103 total += rates[j].count; 104 } 105 count = i + 1; 106 107 /* Re-fill policy trying to keep every requested rate and with 108 * respect to the global max tx retransmission count. 109 */ 110 if (limit < count) 111 limit = count; 112 if (total > limit) { 113 for (i = 0; i < count; ++i) { 114 int left = count - i - 1; 115 if (rates[i].count > limit - left) 116 rates[i].count = limit - left; 117 limit -= rates[i].count; 118 } 119 } 120 121 /* HACK!!! Device has problems (at least) switching from 122 * 54Mbps CTS to 1Mbps. This switch takes enormous amount 123 * of time (100-200 ms), leading to valuable throughput drop. 124 * As a workaround, additional g-rates are injected to the 125 * policy. 126 */ 127 if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) && 128 rates[0].idx > 4 && rates[0].count > 2 && 129 rates[1].idx < 2) { 130 int mid_rate = (rates[0].idx + 4) >> 1; 131 132 /* Decrease number of retries for the initial rate */ 133 rates[0].count -= 2; 134 135 if (mid_rate != 4) { 136 /* Keep fallback rate at 1Mbps. */ 137 rates[3] = rates[1]; 138 139 /* Inject 1 transmission on lowest g-rate */ 140 rates[2].idx = 4; 141 rates[2].count = 1; 142 rates[2].flags = rates[1].flags; 143 144 /* Inject 1 transmission on mid-rate */ 145 rates[1].idx = mid_rate; 146 rates[1].count = 1; 147 148 /* Fallback to 1 Mbps is a really bad thing, 149 * so let's try to increase probability of 150 * successful transmission on the lowest g rate 151 * even more 152 */ 153 if (rates[0].count >= 3) { 154 --rates[0].count; 155 ++rates[2].count; 156 } 157 158 /* Adjust amount of rates defined */ 159 count += 2; 160 } else { 161 /* Keep fallback rate at 1Mbps. */ 162 rates[2] = rates[1]; 163 164 /* Inject 2 transmissions on lowest g-rate */ 165 rates[1].idx = 4; 166 rates[1].count = 2; 167 168 /* Adjust amount of rates defined */ 169 count += 1; 170 } 171 } 172 173 policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; 174 175 for (i = 0; i < count; ++i) { 176 register unsigned rateid, off, shift, retries; 177 178 rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; 179 off = rateid >> 3; /* eq. rateid / 8 */ 180 shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ 181 182 retries = rates[i].count; 183 if (retries > 0x0F) { 184 rates[i].count = 0x0f; 185 retries = 0x0F; 186 } 187 policy->tbl[off] |= __cpu_to_le32(retries << shift); 188 policy->retry_count += retries; 189 } 190 191 pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n", 192 count, 193 rates[0].idx, rates[0].count, 194 rates[1].idx, rates[1].count, 195 rates[2].idx, rates[2].count, 196 rates[3].idx, rates[3].count); 197 } 198 199 static inline bool tx_policy_is_equal(const struct tx_policy *wanted, 200 const struct tx_policy *cached) 201 { 202 size_t count = wanted->defined >> 1; 203 if (wanted->defined > cached->defined) 204 return false; 205 if (count) { 206 if (memcmp(wanted->raw, cached->raw, count)) 207 return false; 208 } 209 if (wanted->defined & 1) { 210 if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) 211 return false; 212 } 213 return true; 214 } 215 216 static int tx_policy_find(struct tx_policy_cache *cache, 217 const struct tx_policy *wanted) 218 { 219 /* O(n) complexity. Not so good, but there's only 8 entries in 220 * the cache. 221 * Also lru helps to reduce search time. 222 */ 223 struct tx_policy_cache_entry *it; 224 /* First search for policy in "used" list */ 225 list_for_each_entry(it, &cache->used, link) { 226 if (tx_policy_is_equal(wanted, &it->policy)) 227 return it - cache->cache; 228 } 229 /* Then - in "free list" */ 230 list_for_each_entry(it, &cache->free, link) { 231 if (tx_policy_is_equal(wanted, &it->policy)) 232 return it - cache->cache; 233 } 234 return -1; 235 } 236 237 static inline void tx_policy_use(struct tx_policy_cache *cache, 238 struct tx_policy_cache_entry *entry) 239 { 240 ++entry->policy.usage_count; 241 list_move(&entry->link, &cache->used); 242 } 243 244 static inline int tx_policy_release(struct tx_policy_cache *cache, 245 struct tx_policy_cache_entry *entry) 246 { 247 int ret = --entry->policy.usage_count; 248 if (!ret) 249 list_move(&entry->link, &cache->free); 250 return ret; 251 } 252 253 void tx_policy_clean(struct cw1200_common *priv) 254 { 255 int idx, locked; 256 struct tx_policy_cache *cache = &priv->tx_policy_cache; 257 struct tx_policy_cache_entry *entry; 258 259 cw1200_tx_queues_lock(priv); 260 spin_lock_bh(&cache->lock); 261 locked = list_empty(&cache->free); 262 263 for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) { 264 entry = &cache->cache[idx]; 265 /* Policy usage count should be 0 at this time as all queues 266 should be empty 267 */ 268 if (WARN_ON(entry->policy.usage_count)) { 269 entry->policy.usage_count = 0; 270 list_move(&entry->link, &cache->free); 271 } 272 memset(&entry->policy, 0, sizeof(entry->policy)); 273 } 274 if (locked) 275 cw1200_tx_queues_unlock(priv); 276 277 cw1200_tx_queues_unlock(priv); 278 spin_unlock_bh(&cache->lock); 279 } 280 281 /* ******************************************************************** */ 282 /* External TX policy cache API */ 283 284 void tx_policy_init(struct cw1200_common *priv) 285 { 286 struct tx_policy_cache *cache = &priv->tx_policy_cache; 287 int i; 288 289 memset(cache, 0, sizeof(*cache)); 290 291 spin_lock_init(&cache->lock); 292 INIT_LIST_HEAD(&cache->used); 293 INIT_LIST_HEAD(&cache->free); 294 295 for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) 296 list_add(&cache->cache[i].link, &cache->free); 297 } 298 299 static int tx_policy_get(struct cw1200_common *priv, 300 struct ieee80211_tx_rate *rates, 301 size_t count, bool *renew) 302 { 303 int idx; 304 struct tx_policy_cache *cache = &priv->tx_policy_cache; 305 struct tx_policy wanted; 306 307 tx_policy_build(priv, &wanted, rates, count); 308 309 spin_lock_bh(&cache->lock); 310 if (WARN_ON_ONCE(list_empty(&cache->free))) { 311 spin_unlock_bh(&cache->lock); 312 return CW1200_INVALID_RATE_ID; 313 } 314 idx = tx_policy_find(cache, &wanted); 315 if (idx >= 0) { 316 pr_debug("[TX policy] Used TX policy: %d\n", idx); 317 *renew = false; 318 } else { 319 struct tx_policy_cache_entry *entry; 320 *renew = true; 321 /* If policy is not found create a new one 322 * using the oldest entry in "free" list 323 */ 324 entry = list_entry(cache->free.prev, 325 struct tx_policy_cache_entry, link); 326 entry->policy = wanted; 327 idx = entry - cache->cache; 328 pr_debug("[TX policy] New TX policy: %d\n", idx); 329 tx_policy_dump(&entry->policy); 330 } 331 tx_policy_use(cache, &cache->cache[idx]); 332 if (list_empty(&cache->free)) { 333 /* Lock TX queues. */ 334 cw1200_tx_queues_lock(priv); 335 } 336 spin_unlock_bh(&cache->lock); 337 return idx; 338 } 339 340 static void tx_policy_put(struct cw1200_common *priv, int idx) 341 { 342 int usage, locked; 343 struct tx_policy_cache *cache = &priv->tx_policy_cache; 344 345 spin_lock_bh(&cache->lock); 346 locked = list_empty(&cache->free); 347 usage = tx_policy_release(cache, &cache->cache[idx]); 348 if (locked && !usage) { 349 /* Unlock TX queues. */ 350 cw1200_tx_queues_unlock(priv); 351 } 352 spin_unlock_bh(&cache->lock); 353 } 354 355 static int tx_policy_upload(struct cw1200_common *priv) 356 { 357 struct tx_policy_cache *cache = &priv->tx_policy_cache; 358 int i; 359 struct wsm_set_tx_rate_retry_policy arg = { 360 .num = 0, 361 }; 362 spin_lock_bh(&cache->lock); 363 364 /* Upload only modified entries. */ 365 for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) { 366 struct tx_policy *src = &cache->cache[i].policy; 367 if (src->retry_count && !src->uploaded) { 368 struct wsm_tx_rate_retry_policy *dst = 369 &arg.tbl[arg.num]; 370 dst->index = i; 371 dst->short_retries = priv->short_frame_max_tx_count; 372 dst->long_retries = priv->long_frame_max_tx_count; 373 374 dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | 375 WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT; 376 memcpy(dst->rate_count_indices, src->tbl, 377 sizeof(dst->rate_count_indices)); 378 src->uploaded = 1; 379 ++arg.num; 380 } 381 } 382 spin_unlock_bh(&cache->lock); 383 cw1200_debug_tx_cache_miss(priv); 384 pr_debug("[TX policy] Upload %d policies\n", arg.num); 385 return wsm_set_tx_rate_retry_policy(priv, &arg); 386 } 387 388 void tx_policy_upload_work(struct work_struct *work) 389 { 390 struct cw1200_common *priv = 391 container_of(work, struct cw1200_common, tx_policy_upload_work); 392 393 pr_debug("[TX] TX policy upload.\n"); 394 tx_policy_upload(priv); 395 396 wsm_unlock_tx(priv); 397 cw1200_tx_queues_unlock(priv); 398 } 399 400 /* ******************************************************************** */ 401 /* cw1200 TX implementation */ 402 403 struct cw1200_txinfo { 404 struct sk_buff *skb; 405 unsigned queue; 406 struct ieee80211_tx_info *tx_info; 407 const struct ieee80211_rate *rate; 408 struct ieee80211_hdr *hdr; 409 size_t hdrlen; 410 const u8 *da; 411 struct cw1200_sta_priv *sta_priv; 412 struct ieee80211_sta *sta; 413 struct cw1200_txpriv txpriv; 414 }; 415 416 u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates) 417 { 418 u32 ret = 0; 419 int i; 420 for (i = 0; i < 32; ++i) { 421 if (rates & BIT(i)) 422 ret |= BIT(priv->rates[i].hw_value); 423 } 424 return ret; 425 } 426 427 static const struct ieee80211_rate * 428 cw1200_get_tx_rate(const struct cw1200_common *priv, 429 const struct ieee80211_tx_rate *rate) 430 { 431 if (rate->idx < 0) 432 return NULL; 433 if (rate->flags & IEEE80211_TX_RC_MCS) 434 return &priv->mcs_rates[rate->idx]; 435 return &priv->hw->wiphy->bands[priv->channel->band]-> 436 bitrates[rate->idx]; 437 } 438 439 static int 440 cw1200_tx_h_calc_link_ids(struct cw1200_common *priv, 441 struct cw1200_txinfo *t) 442 { 443 if (t->sta && t->sta_priv->link_id) 444 t->txpriv.raw_link_id = 445 t->txpriv.link_id = 446 t->sta_priv->link_id; 447 else if (priv->mode != NL80211_IFTYPE_AP) 448 t->txpriv.raw_link_id = 449 t->txpriv.link_id = 0; 450 else if (is_multicast_ether_addr(t->da)) { 451 if (priv->enable_beacon) { 452 t->txpriv.raw_link_id = 0; 453 t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; 454 } else { 455 t->txpriv.raw_link_id = 0; 456 t->txpriv.link_id = 0; 457 } 458 } else { 459 t->txpriv.link_id = cw1200_find_link_id(priv, t->da); 460 if (!t->txpriv.link_id) 461 t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); 462 if (!t->txpriv.link_id) { 463 wiphy_err(priv->hw->wiphy, 464 "No more link IDs available.\n"); 465 return -ENOENT; 466 } 467 t->txpriv.raw_link_id = t->txpriv.link_id; 468 } 469 if (t->txpriv.raw_link_id) 470 priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = 471 jiffies; 472 if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) 473 t->txpriv.link_id = CW1200_LINK_ID_UAPSD; 474 return 0; 475 } 476 477 static void 478 cw1200_tx_h_pm(struct cw1200_common *priv, 479 struct cw1200_txinfo *t) 480 { 481 if (ieee80211_is_auth(t->hdr->frame_control)) { 482 u32 mask = ~BIT(t->txpriv.raw_link_id); 483 spin_lock_bh(&priv->ps_state_lock); 484 priv->sta_asleep_mask &= mask; 485 priv->pspoll_mask &= mask; 486 spin_unlock_bh(&priv->ps_state_lock); 487 } 488 } 489 490 static void 491 cw1200_tx_h_calc_tid(struct cw1200_common *priv, 492 struct cw1200_txinfo *t) 493 { 494 if (ieee80211_is_data_qos(t->hdr->frame_control)) { 495 u8 *qos = ieee80211_get_qos_ctl(t->hdr); 496 t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; 497 } else if (ieee80211_is_data(t->hdr->frame_control)) { 498 t->txpriv.tid = 0; 499 } 500 } 501 502 static int 503 cw1200_tx_h_crypt(struct cw1200_common *priv, 504 struct cw1200_txinfo *t) 505 { 506 if (!t->tx_info->control.hw_key || 507 !ieee80211_has_protected(t->hdr->frame_control)) 508 return 0; 509 510 t->hdrlen += t->tx_info->control.hw_key->iv_len; 511 skb_put(t->skb, t->tx_info->control.hw_key->icv_len); 512 513 if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 514 skb_put(t->skb, 8); /* MIC space */ 515 516 return 0; 517 } 518 519 static int 520 cw1200_tx_h_align(struct cw1200_common *priv, 521 struct cw1200_txinfo *t, 522 u8 *flags) 523 { 524 size_t offset = (size_t)t->skb->data & 3; 525 526 if (!offset) 527 return 0; 528 529 if (offset & 1) { 530 wiphy_err(priv->hw->wiphy, 531 "Bug: attempt to transmit a frame with wrong alignment: %zu\n", 532 offset); 533 return -EINVAL; 534 } 535 536 if (skb_headroom(t->skb) < offset) { 537 wiphy_err(priv->hw->wiphy, 538 "Bug: no space allocated for DMA alignment. headroom: %d\n", 539 skb_headroom(t->skb)); 540 return -ENOMEM; 541 } 542 skb_push(t->skb, offset); 543 t->hdrlen += offset; 544 t->txpriv.offset += offset; 545 *flags |= WSM_TX_2BYTES_SHIFT; 546 cw1200_debug_tx_align(priv); 547 return 0; 548 } 549 550 static int 551 cw1200_tx_h_action(struct cw1200_common *priv, 552 struct cw1200_txinfo *t) 553 { 554 struct ieee80211_mgmt *mgmt = 555 (struct ieee80211_mgmt *)t->hdr; 556 if (ieee80211_is_action(t->hdr->frame_control) && 557 mgmt->u.action.category == WLAN_CATEGORY_BACK) 558 return 1; 559 else 560 return 0; 561 } 562 563 /* Add WSM header */ 564 static struct wsm_tx * 565 cw1200_tx_h_wsm(struct cw1200_common *priv, 566 struct cw1200_txinfo *t) 567 { 568 struct wsm_tx *wsm; 569 570 if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { 571 wiphy_err(priv->hw->wiphy, 572 "Bug: no space allocated for WSM header. headroom: %d\n", 573 skb_headroom(t->skb)); 574 return NULL; 575 } 576 577 wsm = skb_push(t->skb, sizeof(struct wsm_tx)); 578 t->txpriv.offset += sizeof(struct wsm_tx); 579 memset(wsm, 0, sizeof(*wsm)); 580 wsm->hdr.len = __cpu_to_le16(t->skb->len); 581 wsm->hdr.id = __cpu_to_le16(0x0004); 582 wsm->queue_id = wsm_queue_id_to_wsm(t->queue); 583 return wsm; 584 } 585 586 /* BT Coex specific handling */ 587 static void 588 cw1200_tx_h_bt(struct cw1200_common *priv, 589 struct cw1200_txinfo *t, 590 struct wsm_tx *wsm) 591 { 592 u8 priority = 0; 593 594 if (!priv->bt_present) 595 return; 596 597 if (ieee80211_is_nullfunc(t->hdr->frame_control)) { 598 priority = WSM_EPTA_PRIORITY_MGT; 599 } else if (ieee80211_is_data(t->hdr->frame_control)) { 600 /* Skip LLC SNAP header (+6) */ 601 u8 *payload = &t->skb->data[t->hdrlen]; 602 __be16 *ethertype = (__be16 *)&payload[6]; 603 if (be16_to_cpu(*ethertype) == ETH_P_PAE) 604 priority = WSM_EPTA_PRIORITY_EAPOL; 605 } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || 606 ieee80211_is_reassoc_req(t->hdr->frame_control)) { 607 struct ieee80211_mgmt *mgt_frame = 608 (struct ieee80211_mgmt *)t->hdr; 609 610 if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) < 611 priv->listen_interval) { 612 pr_debug("Modified Listen Interval to %d from %d\n", 613 priv->listen_interval, 614 mgt_frame->u.assoc_req.listen_interval); 615 /* Replace listen interval derieved from 616 * the one read from SDD 617 */ 618 mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval); 619 } 620 } 621 622 if (!priority) { 623 if (ieee80211_is_action(t->hdr->frame_control)) 624 priority = WSM_EPTA_PRIORITY_ACTION; 625 else if (ieee80211_is_mgmt(t->hdr->frame_control)) 626 priority = WSM_EPTA_PRIORITY_MGT; 627 else if ((wsm->queue_id == WSM_QUEUE_VOICE)) 628 priority = WSM_EPTA_PRIORITY_VOICE; 629 else if ((wsm->queue_id == WSM_QUEUE_VIDEO)) 630 priority = WSM_EPTA_PRIORITY_VIDEO; 631 else 632 priority = WSM_EPTA_PRIORITY_DATA; 633 } 634 635 pr_debug("[TX] EPTA priority %d.\n", priority); 636 637 wsm->flags |= priority << 1; 638 } 639 640 static int 641 cw1200_tx_h_rate_policy(struct cw1200_common *priv, 642 struct cw1200_txinfo *t, 643 struct wsm_tx *wsm) 644 { 645 bool tx_policy_renew = false; 646 647 t->txpriv.rate_id = tx_policy_get(priv, 648 t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, 649 &tx_policy_renew); 650 if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) 651 return -EFAULT; 652 653 wsm->flags |= t->txpriv.rate_id << 4; 654 655 t->rate = cw1200_get_tx_rate(priv, 656 &t->tx_info->control.rates[0]), 657 wsm->max_tx_rate = t->rate->hw_value; 658 if (t->rate->flags & IEEE80211_TX_RC_MCS) { 659 if (cw1200_ht_greenfield(&priv->ht_info)) 660 wsm->ht_tx_parameters |= 661 __cpu_to_le32(WSM_HT_TX_GREENFIELD); 662 else 663 wsm->ht_tx_parameters |= 664 __cpu_to_le32(WSM_HT_TX_MIXED); 665 } 666 667 if (tx_policy_renew) { 668 pr_debug("[TX] TX policy renew.\n"); 669 /* It's not so optimal to stop TX queues every now and then. 670 * Better to reimplement task scheduling with 671 * a counter. TODO. 672 */ 673 wsm_lock_tx_async(priv); 674 cw1200_tx_queues_lock(priv); 675 if (queue_work(priv->workqueue, 676 &priv->tx_policy_upload_work) <= 0) { 677 cw1200_tx_queues_unlock(priv); 678 wsm_unlock_tx(priv); 679 } 680 } 681 return 0; 682 } 683 684 static bool 685 cw1200_tx_h_pm_state(struct cw1200_common *priv, 686 struct cw1200_txinfo *t) 687 { 688 int was_buffered = 1; 689 690 if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && 691 !priv->buffered_multicasts) { 692 priv->buffered_multicasts = true; 693 if (priv->sta_asleep_mask) 694 queue_work(priv->workqueue, 695 &priv->multicast_start_work); 696 } 697 698 if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) 699 was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; 700 701 return !was_buffered; 702 } 703 704 /* ******************************************************************** */ 705 706 void cw1200_tx(struct ieee80211_hw *dev, 707 struct ieee80211_tx_control *control, 708 struct sk_buff *skb) 709 { 710 struct cw1200_common *priv = dev->priv; 711 struct cw1200_txinfo t = { 712 .skb = skb, 713 .queue = skb_get_queue_mapping(skb), 714 .tx_info = IEEE80211_SKB_CB(skb), 715 .hdr = (struct ieee80211_hdr *)skb->data, 716 .txpriv.tid = CW1200_MAX_TID, 717 .txpriv.rate_id = CW1200_INVALID_RATE_ID, 718 }; 719 struct ieee80211_sta *sta; 720 struct wsm_tx *wsm; 721 bool tid_update = 0; 722 u8 flags = 0; 723 int ret; 724 725 if (priv->bh_error) 726 goto drop; 727 728 t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); 729 t.da = ieee80211_get_DA(t.hdr); 730 if (control) { 731 t.sta = control->sta; 732 t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; 733 } 734 735 if (WARN_ON(t.queue >= 4)) 736 goto drop; 737 738 ret = cw1200_tx_h_calc_link_ids(priv, &t); 739 if (ret) 740 goto drop; 741 742 pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n", 743 skb->len, t.queue, t.txpriv.link_id, 744 t.txpriv.raw_link_id); 745 746 cw1200_tx_h_pm(priv, &t); 747 cw1200_tx_h_calc_tid(priv, &t); 748 ret = cw1200_tx_h_crypt(priv, &t); 749 if (ret) 750 goto drop; 751 ret = cw1200_tx_h_align(priv, &t, &flags); 752 if (ret) 753 goto drop; 754 ret = cw1200_tx_h_action(priv, &t); 755 if (ret) 756 goto drop; 757 wsm = cw1200_tx_h_wsm(priv, &t); 758 if (!wsm) { 759 ret = -ENOMEM; 760 goto drop; 761 } 762 wsm->flags |= flags; 763 cw1200_tx_h_bt(priv, &t, wsm); 764 ret = cw1200_tx_h_rate_policy(priv, &t, wsm); 765 if (ret) 766 goto drop; 767 768 rcu_read_lock(); 769 sta = rcu_dereference(t.sta); 770 771 spin_lock_bh(&priv->ps_state_lock); 772 { 773 tid_update = cw1200_tx_h_pm_state(priv, &t); 774 BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], 775 t.skb, &t.txpriv)); 776 } 777 spin_unlock_bh(&priv->ps_state_lock); 778 779 if (tid_update && sta) 780 ieee80211_sta_set_buffered(sta, t.txpriv.tid, true); 781 782 rcu_read_unlock(); 783 784 cw1200_bh_wakeup(priv); 785 786 return; 787 788 drop: 789 cw1200_skb_dtor(priv, skb, &t.txpriv); 790 return; 791 } 792 793 /* ******************************************************************** */ 794 795 static int cw1200_handle_action_rx(struct cw1200_common *priv, 796 struct sk_buff *skb) 797 { 798 struct ieee80211_mgmt *mgmt = (void *)skb->data; 799 800 /* Filter block ACK negotiation: fully controlled by firmware */ 801 if (mgmt->u.action.category == WLAN_CATEGORY_BACK) 802 return 1; 803 804 return 0; 805 } 806 807 static int cw1200_handle_pspoll(struct cw1200_common *priv, 808 struct sk_buff *skb) 809 { 810 struct ieee80211_sta *sta; 811 struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; 812 int link_id = 0; 813 u32 pspoll_mask = 0; 814 int drop = 1; 815 int i; 816 817 if (priv->join_status != CW1200_JOIN_STATUS_AP) 818 goto done; 819 if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) 820 goto done; 821 822 rcu_read_lock(); 823 sta = ieee80211_find_sta(priv->vif, pspoll->ta); 824 if (sta) { 825 struct cw1200_sta_priv *sta_priv; 826 sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; 827 link_id = sta_priv->link_id; 828 pspoll_mask = BIT(sta_priv->link_id); 829 } 830 rcu_read_unlock(); 831 if (!link_id) 832 goto done; 833 834 priv->pspoll_mask |= pspoll_mask; 835 drop = 0; 836 837 /* Do not report pspols if data for given link id is queued already. */ 838 for (i = 0; i < 4; ++i) { 839 if (cw1200_queue_get_num_queued(&priv->tx_queue[i], 840 pspoll_mask)) { 841 cw1200_bh_wakeup(priv); 842 drop = 1; 843 break; 844 } 845 } 846 pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); 847 done: 848 return drop; 849 } 850 851 /* ******************************************************************** */ 852 853 void cw1200_tx_confirm_cb(struct cw1200_common *priv, 854 int link_id, 855 struct wsm_tx_confirm *arg) 856 { 857 u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); 858 struct cw1200_queue *queue = &priv->tx_queue[queue_id]; 859 struct sk_buff *skb; 860 const struct cw1200_txpriv *txpriv; 861 862 pr_debug("[TX] TX confirm: %d, %d.\n", 863 arg->status, arg->ack_failures); 864 865 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { 866 /* STA is stopped. */ 867 return; 868 } 869 870 if (WARN_ON(queue_id >= 4)) 871 return; 872 873 if (arg->status) 874 pr_debug("TX failed: %d.\n", arg->status); 875 876 if ((arg->status == WSM_REQUEUE) && 877 (arg->flags & WSM_TX_STATUS_REQUEUE)) { 878 /* "Requeue" means "implicit suspend" */ 879 struct wsm_suspend_resume suspend = { 880 .link_id = link_id, 881 .stop = 1, 882 .multicast = !link_id, 883 }; 884 cw1200_suspend_resume(priv, &suspend); 885 wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", 886 link_id, 887 cw1200_queue_get_generation(arg->packet_id) + 1, 888 priv->sta_asleep_mask); 889 cw1200_queue_requeue(queue, arg->packet_id); 890 spin_lock_bh(&priv->ps_state_lock); 891 if (!link_id) { 892 priv->buffered_multicasts = true; 893 if (priv->sta_asleep_mask) { 894 queue_work(priv->workqueue, 895 &priv->multicast_start_work); 896 } 897 } 898 spin_unlock_bh(&priv->ps_state_lock); 899 } else if (!cw1200_queue_get_skb(queue, arg->packet_id, 900 &skb, &txpriv)) { 901 struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb); 902 int tx_count = arg->ack_failures; 903 u8 ht_flags = 0; 904 int i; 905 906 if (cw1200_ht_greenfield(&priv->ht_info)) 907 ht_flags |= IEEE80211_TX_RC_GREEN_FIELD; 908 909 spin_lock(&priv->bss_loss_lock); 910 if (priv->bss_loss_state && 911 arg->packet_id == priv->bss_loss_confirm_id) { 912 if (arg->status) { 913 /* Recovery failed */ 914 __cw1200_cqm_bssloss_sm(priv, 0, 0, 1); 915 } else { 916 /* Recovery succeeded */ 917 __cw1200_cqm_bssloss_sm(priv, 0, 1, 0); 918 } 919 } 920 spin_unlock(&priv->bss_loss_lock); 921 922 if (!arg->status) { 923 tx->flags |= IEEE80211_TX_STAT_ACK; 924 ++tx_count; 925 cw1200_debug_txed(priv); 926 if (arg->flags & WSM_TX_STATUS_AGGREGATION) { 927 /* Do not report aggregation to mac80211: 928 * it confuses minstrel a lot. 929 */ 930 /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ 931 cw1200_debug_txed_agg(priv); 932 } 933 } else { 934 if (tx_count) 935 ++tx_count; 936 } 937 938 for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { 939 if (tx->status.rates[i].count >= tx_count) { 940 tx->status.rates[i].count = tx_count; 941 break; 942 } 943 tx_count -= tx->status.rates[i].count; 944 if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) 945 tx->status.rates[i].flags |= ht_flags; 946 } 947 948 for (++i; i < IEEE80211_TX_MAX_RATES; ++i) { 949 tx->status.rates[i].count = 0; 950 tx->status.rates[i].idx = -1; 951 } 952 953 /* Pull off any crypto trailers that we added on */ 954 if (tx->control.hw_key) { 955 skb_trim(skb, skb->len - tx->control.hw_key->icv_len); 956 if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 957 skb_trim(skb, skb->len - 8); /* MIC space */ 958 } 959 cw1200_queue_remove(queue, arg->packet_id); 960 } 961 /* XXX TODO: Only wake if there are pending transmits.. */ 962 cw1200_bh_wakeup(priv); 963 } 964 965 static void cw1200_notify_buffered_tx(struct cw1200_common *priv, 966 struct sk_buff *skb, int link_id, int tid) 967 { 968 struct ieee80211_sta *sta; 969 struct ieee80211_hdr *hdr; 970 u8 *buffered; 971 u8 still_buffered = 0; 972 973 if (link_id && tid < CW1200_MAX_TID) { 974 buffered = priv->link_id_db 975 [link_id - 1].buffered; 976 977 spin_lock_bh(&priv->ps_state_lock); 978 if (!WARN_ON(!buffered[tid])) 979 still_buffered = --buffered[tid]; 980 spin_unlock_bh(&priv->ps_state_lock); 981 982 if (!still_buffered && tid < CW1200_MAX_TID) { 983 hdr = (struct ieee80211_hdr *)skb->data; 984 rcu_read_lock(); 985 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 986 if (sta) 987 ieee80211_sta_set_buffered(sta, tid, false); 988 rcu_read_unlock(); 989 } 990 } 991 } 992 993 void cw1200_skb_dtor(struct cw1200_common *priv, 994 struct sk_buff *skb, 995 const struct cw1200_txpriv *txpriv) 996 { 997 skb_pull(skb, txpriv->offset); 998 if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { 999 cw1200_notify_buffered_tx(priv, skb, 1000 txpriv->raw_link_id, txpriv->tid); 1001 tx_policy_put(priv, txpriv->rate_id); 1002 } 1003 ieee80211_tx_status(priv->hw, skb); 1004 } 1005 1006 void cw1200_rx_cb(struct cw1200_common *priv, 1007 struct wsm_rx *arg, 1008 int link_id, 1009 struct sk_buff **skb_p) 1010 { 1011 struct sk_buff *skb = *skb_p; 1012 struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); 1013 struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; 1014 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1015 struct cw1200_link_entry *entry = NULL; 1016 unsigned long grace_period; 1017 1018 bool early_data = false; 1019 bool p2p = priv->vif && priv->vif->p2p; 1020 size_t hdrlen; 1021 hdr->flag = 0; 1022 1023 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { 1024 /* STA is stopped. */ 1025 goto drop; 1026 } 1027 1028 if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) { 1029 entry = &priv->link_id_db[link_id - 1]; 1030 if (entry->status == CW1200_LINK_SOFT && 1031 ieee80211_is_data(frame->frame_control)) 1032 early_data = true; 1033 entry->timestamp = jiffies; 1034 } else if (p2p && 1035 ieee80211_is_action(frame->frame_control) && 1036 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { 1037 pr_debug("[RX] Going to MAP&RESET link ID\n"); 1038 WARN_ON(work_pending(&priv->linkid_reset_work)); 1039 memcpy(&priv->action_frame_sa[0], 1040 ieee80211_get_SA(frame), ETH_ALEN); 1041 priv->action_linkid = 0; 1042 schedule_work(&priv->linkid_reset_work); 1043 } 1044 1045 if (link_id && p2p && 1046 ieee80211_is_action(frame->frame_control) && 1047 (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { 1048 /* Link ID already exists for the ACTION frame. 1049 * Reset and Remap 1050 */ 1051 WARN_ON(work_pending(&priv->linkid_reset_work)); 1052 memcpy(&priv->action_frame_sa[0], 1053 ieee80211_get_SA(frame), ETH_ALEN); 1054 priv->action_linkid = link_id; 1055 schedule_work(&priv->linkid_reset_work); 1056 } 1057 if (arg->status) { 1058 if (arg->status == WSM_STATUS_MICFAILURE) { 1059 pr_debug("[RX] MIC failure.\n"); 1060 hdr->flag |= RX_FLAG_MMIC_ERROR; 1061 } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { 1062 pr_debug("[RX] No key found.\n"); 1063 goto drop; 1064 } else { 1065 pr_debug("[RX] Receive failure: %d.\n", 1066 arg->status); 1067 goto drop; 1068 } 1069 } 1070 1071 if (skb->len < sizeof(struct ieee80211_pspoll)) { 1072 wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); 1073 goto drop; 1074 } 1075 1076 if (ieee80211_is_pspoll(frame->frame_control)) 1077 if (cw1200_handle_pspoll(priv, skb)) 1078 goto drop; 1079 1080 hdr->band = ((arg->channel_number & 0xff00) || 1081 (arg->channel_number > 14)) ? 1082 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; 1083 hdr->freq = ieee80211_channel_to_frequency( 1084 arg->channel_number, 1085 hdr->band); 1086 1087 if (arg->rx_rate >= 14) { 1088 hdr->encoding = RX_ENC_HT; 1089 hdr->rate_idx = arg->rx_rate - 14; 1090 } else if (arg->rx_rate >= 4) { 1091 hdr->rate_idx = arg->rx_rate - 2; 1092 } else { 1093 hdr->rate_idx = arg->rx_rate; 1094 } 1095 1096 hdr->signal = (s8)arg->rcpi_rssi; 1097 hdr->antenna = 0; 1098 1099 hdrlen = ieee80211_hdrlen(frame->frame_control); 1100 1101 if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { 1102 size_t iv_len = 0, icv_len = 0; 1103 1104 hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; 1105 1106 /* Oops... There is no fast way to ask mac80211 about 1107 * IV/ICV lengths. Even defineas are not exposed. 1108 */ 1109 switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { 1110 case WSM_RX_STATUS_WEP: 1111 iv_len = 4 /* WEP_IV_LEN */; 1112 icv_len = 4 /* WEP_ICV_LEN */; 1113 break; 1114 case WSM_RX_STATUS_TKIP: 1115 iv_len = 8 /* TKIP_IV_LEN */; 1116 icv_len = 4 /* TKIP_ICV_LEN */ 1117 + 8 /*MICHAEL_MIC_LEN*/; 1118 hdr->flag |= RX_FLAG_MMIC_STRIPPED; 1119 break; 1120 case WSM_RX_STATUS_AES: 1121 iv_len = 8 /* CCMP_HDR_LEN */; 1122 icv_len = 8 /* CCMP_MIC_LEN */; 1123 break; 1124 case WSM_RX_STATUS_WAPI: 1125 iv_len = 18 /* WAPI_HDR_LEN */; 1126 icv_len = 16 /* WAPI_MIC_LEN */; 1127 break; 1128 default: 1129 pr_warn("Unknown encryption type %d\n", 1130 WSM_RX_STATUS_ENCRYPTION(arg->flags)); 1131 goto drop; 1132 } 1133 1134 /* Firmware strips ICV in case of MIC failure. */ 1135 if (arg->status == WSM_STATUS_MICFAILURE) 1136 icv_len = 0; 1137 1138 if (skb->len < hdrlen + iv_len + icv_len) { 1139 wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); 1140 goto drop; 1141 } 1142 1143 /* Remove IV, ICV and MIC */ 1144 skb_trim(skb, skb->len - icv_len); 1145 memmove(skb->data + iv_len, skb->data, hdrlen); 1146 skb_pull(skb, iv_len); 1147 } 1148 1149 /* Remove TSF from the end of frame */ 1150 if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { 1151 memcpy(&hdr->mactime, skb->data + skb->len - 8, 8); 1152 hdr->mactime = le64_to_cpu(hdr->mactime); 1153 if (skb->len >= 8) 1154 skb_trim(skb, skb->len - 8); 1155 } else { 1156 hdr->mactime = 0; 1157 } 1158 1159 cw1200_debug_rxed(priv); 1160 if (arg->flags & WSM_RX_STATUS_AGGREGATE) 1161 cw1200_debug_rxed_agg(priv); 1162 1163 if (ieee80211_is_action(frame->frame_control) && 1164 (arg->flags & WSM_RX_STATUS_ADDRESS1)) { 1165 if (cw1200_handle_action_rx(priv, skb)) 1166 return; 1167 } else if (ieee80211_is_beacon(frame->frame_control) && 1168 !arg->status && priv->vif && 1169 ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) { 1170 const u8 *tim_ie; 1171 u8 *ies = ((struct ieee80211_mgmt *) 1172 (skb->data))->u.beacon.variable; 1173 size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); 1174 1175 tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); 1176 if (tim_ie) { 1177 struct ieee80211_tim_ie *tim = 1178 (struct ieee80211_tim_ie *)&tim_ie[2]; 1179 1180 if (priv->join_dtim_period != tim->dtim_period) { 1181 priv->join_dtim_period = tim->dtim_period; 1182 queue_work(priv->workqueue, 1183 &priv->set_beacon_wakeup_period_work); 1184 } 1185 } 1186 1187 /* Disable beacon filter once we're associated... */ 1188 if (priv->disable_beacon_filter && 1189 (priv->vif->bss_conf.assoc || 1190 priv->vif->bss_conf.ibss_joined)) { 1191 priv->disable_beacon_filter = false; 1192 queue_work(priv->workqueue, 1193 &priv->update_filtering_work); 1194 } 1195 } 1196 1197 /* Stay awake after frame is received to give 1198 * userspace chance to react and acquire appropriate 1199 * wakelock. 1200 */ 1201 if (ieee80211_is_auth(frame->frame_control)) 1202 grace_period = 5 * HZ; 1203 else if (ieee80211_is_deauth(frame->frame_control)) 1204 grace_period = 5 * HZ; 1205 else 1206 grace_period = 1 * HZ; 1207 cw1200_pm_stay_awake(&priv->pm_state, grace_period); 1208 1209 if (early_data) { 1210 spin_lock_bh(&priv->ps_state_lock); 1211 /* Double-check status with lock held */ 1212 if (entry->status == CW1200_LINK_SOFT) 1213 skb_queue_tail(&entry->rx_queue, skb); 1214 else 1215 ieee80211_rx_irqsafe(priv->hw, skb); 1216 spin_unlock_bh(&priv->ps_state_lock); 1217 } else { 1218 ieee80211_rx_irqsafe(priv->hw, skb); 1219 } 1220 *skb_p = NULL; 1221 1222 return; 1223 1224 drop: 1225 /* TODO: update failure counters */ 1226 return; 1227 } 1228 1229 /* ******************************************************************** */ 1230 /* Security */ 1231 1232 int cw1200_alloc_key(struct cw1200_common *priv) 1233 { 1234 int idx; 1235 1236 idx = ffs(~priv->key_map) - 1; 1237 if (idx < 0 || idx > WSM_KEY_MAX_INDEX) 1238 return -1; 1239 1240 priv->key_map |= BIT(idx); 1241 priv->keys[idx].index = idx; 1242 return idx; 1243 } 1244 1245 void cw1200_free_key(struct cw1200_common *priv, int idx) 1246 { 1247 BUG_ON(!(priv->key_map & BIT(idx))); 1248 memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); 1249 priv->key_map &= ~BIT(idx); 1250 } 1251 1252 void cw1200_free_keys(struct cw1200_common *priv) 1253 { 1254 memset(&priv->keys, 0, sizeof(priv->keys)); 1255 priv->key_map = 0; 1256 } 1257 1258 int cw1200_upload_keys(struct cw1200_common *priv) 1259 { 1260 int idx, ret = 0; 1261 for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) 1262 if (priv->key_map & BIT(idx)) { 1263 ret = wsm_add_key(priv, &priv->keys[idx]); 1264 if (ret < 0) 1265 break; 1266 } 1267 return ret; 1268 } 1269 1270 /* Workaround for WFD test case 6.1.10 */ 1271 void cw1200_link_id_reset(struct work_struct *work) 1272 { 1273 struct cw1200_common *priv = 1274 container_of(work, struct cw1200_common, linkid_reset_work); 1275 int temp_linkid; 1276 1277 if (!priv->action_linkid) { 1278 /* In GO mode we can receive ACTION frames without a linkID */ 1279 temp_linkid = cw1200_alloc_link_id(priv, 1280 &priv->action_frame_sa[0]); 1281 WARN_ON(!temp_linkid); 1282 if (temp_linkid) { 1283 /* Make sure we execute the WQ */ 1284 flush_workqueue(priv->workqueue); 1285 /* Release the link ID */ 1286 spin_lock_bh(&priv->ps_state_lock); 1287 priv->link_id_db[temp_linkid - 1].prev_status = 1288 priv->link_id_db[temp_linkid - 1].status; 1289 priv->link_id_db[temp_linkid - 1].status = 1290 CW1200_LINK_RESET; 1291 spin_unlock_bh(&priv->ps_state_lock); 1292 wsm_lock_tx_async(priv); 1293 if (queue_work(priv->workqueue, 1294 &priv->link_id_work) <= 0) 1295 wsm_unlock_tx(priv); 1296 } 1297 } else { 1298 spin_lock_bh(&priv->ps_state_lock); 1299 priv->link_id_db[priv->action_linkid - 1].prev_status = 1300 priv->link_id_db[priv->action_linkid - 1].status; 1301 priv->link_id_db[priv->action_linkid - 1].status = 1302 CW1200_LINK_RESET_REMAP; 1303 spin_unlock_bh(&priv->ps_state_lock); 1304 wsm_lock_tx_async(priv); 1305 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) 1306 wsm_unlock_tx(priv); 1307 flush_workqueue(priv->workqueue); 1308 } 1309 } 1310 1311 int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac) 1312 { 1313 int i, ret = 0; 1314 spin_lock_bh(&priv->ps_state_lock); 1315 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { 1316 if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && 1317 priv->link_id_db[i].status) { 1318 priv->link_id_db[i].timestamp = jiffies; 1319 ret = i + 1; 1320 break; 1321 } 1322 } 1323 spin_unlock_bh(&priv->ps_state_lock); 1324 return ret; 1325 } 1326 1327 int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac) 1328 { 1329 int i, ret = 0; 1330 unsigned long max_inactivity = 0; 1331 unsigned long now = jiffies; 1332 1333 spin_lock_bh(&priv->ps_state_lock); 1334 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { 1335 if (!priv->link_id_db[i].status) { 1336 ret = i + 1; 1337 break; 1338 } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && 1339 !priv->tx_queue_stats.link_map_cache[i + 1]) { 1340 unsigned long inactivity = 1341 now - priv->link_id_db[i].timestamp; 1342 if (inactivity < max_inactivity) 1343 continue; 1344 max_inactivity = inactivity; 1345 ret = i + 1; 1346 } 1347 } 1348 if (ret) { 1349 struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; 1350 pr_debug("[AP] STA added, link_id: %d\n", ret); 1351 entry->status = CW1200_LINK_RESERVE; 1352 memcpy(&entry->mac, mac, ETH_ALEN); 1353 memset(&entry->buffered, 0, CW1200_MAX_TID); 1354 skb_queue_head_init(&entry->rx_queue); 1355 wsm_lock_tx_async(priv); 1356 if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) 1357 wsm_unlock_tx(priv); 1358 } else { 1359 wiphy_info(priv->hw->wiphy, 1360 "[AP] Early: no more link IDs available.\n"); 1361 } 1362 1363 spin_unlock_bh(&priv->ps_state_lock); 1364 return ret; 1365 } 1366 1367 void cw1200_link_id_work(struct work_struct *work) 1368 { 1369 struct cw1200_common *priv = 1370 container_of(work, struct cw1200_common, link_id_work); 1371 wsm_flush_tx(priv); 1372 cw1200_link_id_gc_work(&priv->link_id_gc_work.work); 1373 wsm_unlock_tx(priv); 1374 } 1375 1376 void cw1200_link_id_gc_work(struct work_struct *work) 1377 { 1378 struct cw1200_common *priv = 1379 container_of(work, struct cw1200_common, link_id_gc_work.work); 1380 struct wsm_reset reset = { 1381 .reset_statistics = false, 1382 }; 1383 struct wsm_map_link map_link = { 1384 .link_id = 0, 1385 }; 1386 unsigned long now = jiffies; 1387 unsigned long next_gc = -1; 1388 long ttl; 1389 bool need_reset; 1390 u32 mask; 1391 int i; 1392 1393 if (priv->join_status != CW1200_JOIN_STATUS_AP) 1394 return; 1395 1396 wsm_lock_tx(priv); 1397 spin_lock_bh(&priv->ps_state_lock); 1398 for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { 1399 need_reset = false; 1400 mask = BIT(i + 1); 1401 if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || 1402 (priv->link_id_db[i].status == CW1200_LINK_HARD && 1403 !(priv->link_id_map & mask))) { 1404 if (priv->link_id_map & mask) { 1405 priv->sta_asleep_mask &= ~mask; 1406 priv->pspoll_mask &= ~mask; 1407 need_reset = true; 1408 } 1409 priv->link_id_map |= mask; 1410 if (priv->link_id_db[i].status != CW1200_LINK_HARD) 1411 priv->link_id_db[i].status = CW1200_LINK_SOFT; 1412 memcpy(map_link.mac_addr, priv->link_id_db[i].mac, 1413 ETH_ALEN); 1414 spin_unlock_bh(&priv->ps_state_lock); 1415 if (need_reset) { 1416 reset.link_id = i + 1; 1417 wsm_reset(priv, &reset); 1418 } 1419 map_link.link_id = i + 1; 1420 wsm_map_link(priv, &map_link); 1421 next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); 1422 spin_lock_bh(&priv->ps_state_lock); 1423 } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { 1424 ttl = priv->link_id_db[i].timestamp - now + 1425 CW1200_LINK_ID_GC_TIMEOUT; 1426 if (ttl <= 0) { 1427 need_reset = true; 1428 priv->link_id_db[i].status = CW1200_LINK_OFF; 1429 priv->link_id_map &= ~mask; 1430 priv->sta_asleep_mask &= ~mask; 1431 priv->pspoll_mask &= ~mask; 1432 eth_zero_addr(map_link.mac_addr); 1433 spin_unlock_bh(&priv->ps_state_lock); 1434 reset.link_id = i + 1; 1435 wsm_reset(priv, &reset); 1436 spin_lock_bh(&priv->ps_state_lock); 1437 } else { 1438 next_gc = min_t(unsigned long, next_gc, ttl); 1439 } 1440 } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || 1441 priv->link_id_db[i].status == 1442 CW1200_LINK_RESET_REMAP) { 1443 int status = priv->link_id_db[i].status; 1444 priv->link_id_db[i].status = 1445 priv->link_id_db[i].prev_status; 1446 priv->link_id_db[i].timestamp = now; 1447 reset.link_id = i + 1; 1448 spin_unlock_bh(&priv->ps_state_lock); 1449 wsm_reset(priv, &reset); 1450 if (status == CW1200_LINK_RESET_REMAP) { 1451 memcpy(map_link.mac_addr, 1452 priv->link_id_db[i].mac, 1453 ETH_ALEN); 1454 map_link.link_id = i + 1; 1455 wsm_map_link(priv, &map_link); 1456 next_gc = min(next_gc, 1457 CW1200_LINK_ID_GC_TIMEOUT); 1458 } 1459 spin_lock_bh(&priv->ps_state_lock); 1460 } 1461 if (need_reset) { 1462 skb_queue_purge(&priv->link_id_db[i].rx_queue); 1463 pr_debug("[AP] STA removed, link_id: %d\n", 1464 reset.link_id); 1465 } 1466 } 1467 spin_unlock_bh(&priv->ps_state_lock); 1468 if (next_gc != -1) 1469 queue_delayed_work(priv->workqueue, 1470 &priv->link_id_gc_work, next_gc); 1471 wsm_unlock_tx(priv); 1472 } 1473