1 /* 2 * This file is part of wl1271 3 * 4 * Copyright (C) 2009 Nokia Corporation 5 * 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * 02110-1301 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/etherdevice.h> 27 #include <linux/spinlock.h> 28 29 #include "wlcore.h" 30 #include "debug.h" 31 #include "io.h" 32 #include "ps.h" 33 #include "tx.h" 34 #include "event.h" 35 #include "hw_ops.h" 36 37 /* 38 * TODO: this is here just for now, it must be removed when the data 39 * operations are in place. 40 */ 41 #include "../wl12xx/reg.h" 42 43 static int wl1271_set_default_wep_key(struct wl1271 *wl, 44 struct wl12xx_vif *wlvif, u8 id) 45 { 46 int ret; 47 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 48 49 if (is_ap) 50 ret = wl12xx_cmd_set_default_wep_key(wl, id, 51 wlvif->ap.bcast_hlid); 52 else 53 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); 54 55 if (ret < 0) 56 return ret; 57 58 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); 59 return 0; 60 } 61 62 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 63 { 64 int id; 65 66 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); 67 if (id >= wl->num_tx_desc) 68 return -EBUSY; 69 70 __set_bit(id, wl->tx_frames_map); 71 wl->tx_frames[id] = skb; 72 wl->tx_frames_cnt++; 73 return id; 74 } 75 76 void wl1271_free_tx_id(struct wl1271 *wl, int id) 77 { 78 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 79 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 80 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 81 82 wl->tx_frames[id] = NULL; 83 wl->tx_frames_cnt--; 84 } 85 } 86 EXPORT_SYMBOL(wl1271_free_tx_id); 87 88 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 89 struct wl12xx_vif *wlvif, 90 struct sk_buff *skb) 91 { 92 struct ieee80211_hdr *hdr; 93 94 hdr = (struct ieee80211_hdr *)(skb->data + 95 sizeof(struct wl1271_tx_hw_descr)); 96 if (!ieee80211_is_auth(hdr->frame_control)) 97 return; 98 99 /* 100 * add the station to the known list before transmitting the 101 * authentication response. this way it won't get de-authed by FW 102 * when transmitting too soon. 103 */ 104 wl1271_acx_set_inconnection_sta(wl, hdr->addr1); 105 106 /* 107 * ROC for 1 second on the AP channel for completing the connection. 108 * Note the ROC will be continued by the update_sta_state callbacks 109 * once the station reaches the associated state. 110 */ 111 wlcore_update_inconn_sta(wl, wlvif, NULL, true); 112 wlvif->pending_auth_reply_time = jiffies; 113 cancel_delayed_work(&wlvif->pending_auth_complete_work); 114 ieee80211_queue_delayed_work(wl->hw, 115 &wlvif->pending_auth_complete_work, 116 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); 117 } 118 119 static void wl1271_tx_regulate_link(struct wl1271 *wl, 120 struct wl12xx_vif *wlvif, 121 u8 hlid) 122 { 123 bool fw_ps; 124 u8 tx_pkts; 125 126 if (WARN_ON(!test_bit(hlid, wlvif->links_map))) 127 return; 128 129 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 130 tx_pkts = wl->links[hlid].allocated_pkts; 131 132 /* 133 * if in FW PS and there is enough data in FW we can put the link 134 * into high-level PS and clean out its TX queues. 135 * Make an exception if this is the only connected link. In this 136 * case FW-memory congestion is less of a problem. 137 * Note that a single connected STA means 3 active links, since we must 138 * account for the global and broadcast AP links. The "fw_ps" check 139 * assures us the third link is a STA connected to the AP. Otherwise 140 * the FW would not set the PSM bit. 141 */ 142 if (wl->active_link_count > 3 && fw_ps && 143 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 144 wl12xx_ps_link_start(wl, wlvif, hlid, true); 145 } 146 147 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) 148 { 149 return wl->dummy_packet == skb; 150 } 151 EXPORT_SYMBOL(wl12xx_is_dummy_packet); 152 153 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 154 struct sk_buff *skb, struct ieee80211_sta *sta) 155 { 156 if (sta) { 157 struct wl1271_station *wl_sta; 158 159 wl_sta = (struct wl1271_station *)sta->drv_priv; 160 return wl_sta->hlid; 161 } else { 162 struct ieee80211_hdr *hdr; 163 164 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 165 return wl->system_hlid; 166 167 hdr = (struct ieee80211_hdr *)skb->data; 168 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) 169 return wlvif->ap.bcast_hlid; 170 else 171 return wlvif->ap.global_hlid; 172 } 173 } 174 175 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 176 struct sk_buff *skb, struct ieee80211_sta *sta) 177 { 178 struct ieee80211_tx_info *control; 179 180 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 181 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); 182 183 control = IEEE80211_SKB_CB(skb); 184 if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 185 wl1271_debug(DEBUG_TX, "tx offchannel"); 186 return wlvif->dev_hlid; 187 } 188 189 return wlvif->sta.hlid; 190 } 191 192 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 193 unsigned int packet_length) 194 { 195 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || 196 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) 197 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 198 else 199 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 200 } 201 EXPORT_SYMBOL(wlcore_calc_packet_alignment); 202 203 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 204 struct sk_buff *skb, u32 extra, u32 buf_offset, 205 u8 hlid, bool is_gem) 206 { 207 struct wl1271_tx_hw_descr *desc; 208 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 209 u32 total_blocks; 210 int id, ret = -EBUSY, ac; 211 u32 spare_blocks; 212 213 if (buf_offset + total_len > wl->aggr_buf_size) 214 return -EAGAIN; 215 216 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); 217 218 /* allocate free identifier for the packet */ 219 id = wl1271_alloc_tx_id(wl, skb); 220 if (id < 0) 221 return id; 222 223 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 224 225 if (total_blocks <= wl->tx_blocks_available) { 226 desc = (struct wl1271_tx_hw_descr *)skb_push( 227 skb, total_len - skb->len); 228 229 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, 230 spare_blocks); 231 232 desc->id = id; 233 234 wl->tx_blocks_available -= total_blocks; 235 wl->tx_allocated_blocks += total_blocks; 236 237 /* If the FW was empty before, arm the Tx watchdog */ 238 if (wl->tx_allocated_blocks == total_blocks) 239 wl12xx_rearm_tx_watchdog_locked(wl); 240 241 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 242 wl->tx_allocated_pkts[ac]++; 243 244 if (test_bit(hlid, wl->links_map)) 245 wl->links[hlid].allocated_pkts++; 246 247 ret = 0; 248 249 wl1271_debug(DEBUG_TX, 250 "tx_allocate: size: %d, blocks: %d, id: %d", 251 total_len, total_blocks, id); 252 } else { 253 wl1271_free_tx_id(wl, id); 254 } 255 256 return ret; 257 } 258 259 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, 260 struct sk_buff *skb, u32 extra, 261 struct ieee80211_tx_info *control, u8 hlid) 262 { 263 struct timespec ts; 264 struct wl1271_tx_hw_descr *desc; 265 int ac, rate_idx; 266 s64 hosttime; 267 u16 tx_attr = 0; 268 __le16 frame_control; 269 struct ieee80211_hdr *hdr; 270 u8 *frame_start; 271 bool is_dummy; 272 273 desc = (struct wl1271_tx_hw_descr *) skb->data; 274 frame_start = (u8 *)(desc + 1); 275 hdr = (struct ieee80211_hdr *)(frame_start + extra); 276 frame_control = hdr->frame_control; 277 278 /* relocate space for security header */ 279 if (extra) { 280 int hdrlen = ieee80211_hdrlen(frame_control); 281 memmove(frame_start, hdr, hdrlen); 282 skb_set_network_header(skb, skb_network_offset(skb) + extra); 283 } 284 285 /* configure packet life time */ 286 getnstimeofday(&ts); 287 hosttime = (timespec_to_ns(&ts) >> 10); 288 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 289 290 is_dummy = wl12xx_is_dummy_packet(wl, skb); 291 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) 292 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 293 else 294 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 295 296 /* queue */ 297 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 298 desc->tid = skb->priority; 299 300 if (is_dummy) { 301 /* 302 * FW expects the dummy packet to have an invalid session id - 303 * any session id that is different than the one set in the join 304 */ 305 tx_attr = (SESSION_COUNTER_INVALID << 306 TX_HW_ATTR_OFST_SESSION_COUNTER) & 307 TX_HW_ATTR_SESSION_COUNTER; 308 309 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; 310 } else if (wlvif) { 311 u8 session_id = wl->session_ids[hlid]; 312 313 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && 314 (wlvif->bss_type == BSS_TYPE_AP_BSS)) 315 session_id = 0; 316 317 /* configure the tx attributes */ 318 tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; 319 } 320 321 desc->hlid = hlid; 322 if (is_dummy || !wlvif) 323 rate_idx = 0; 324 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 325 /* 326 * if the packets are data packets 327 * send them with AP rate policies (EAPOLs are an exception), 328 * otherwise use default basic rates 329 */ 330 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) 331 rate_idx = wlvif->sta.basic_rate_idx; 332 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 333 rate_idx = wlvif->sta.p2p_rate_idx; 334 else if (ieee80211_is_data(frame_control)) 335 rate_idx = wlvif->sta.ap_rate_idx; 336 else 337 rate_idx = wlvif->sta.basic_rate_idx; 338 } else { 339 if (hlid == wlvif->ap.global_hlid) 340 rate_idx = wlvif->ap.mgmt_rate_idx; 341 else if (hlid == wlvif->ap.bcast_hlid || 342 skb->protocol == cpu_to_be16(ETH_P_PAE) || 343 !ieee80211_is_data(frame_control)) 344 /* 345 * send non-data, bcast and EAPOLs using the 346 * min basic rate 347 */ 348 rate_idx = wlvif->ap.bcast_rate_idx; 349 else 350 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 351 } 352 353 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 354 355 /* for WEP shared auth - no fw encryption is needed */ 356 if (ieee80211_is_auth(frame_control) && 357 ieee80211_has_protected(frame_control)) 358 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 359 360 desc->tx_attr = cpu_to_le16(tx_attr); 361 362 wlcore_hw_set_tx_desc_csum(wl, desc, skb); 363 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 364 } 365 366 /* caller must hold wl->mutex */ 367 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 368 struct sk_buff *skb, u32 buf_offset, u8 hlid) 369 { 370 struct ieee80211_tx_info *info; 371 u32 extra = 0; 372 int ret = 0; 373 u32 total_len; 374 bool is_dummy; 375 bool is_gem = false; 376 377 if (!skb) { 378 wl1271_error("discarding null skb"); 379 return -EINVAL; 380 } 381 382 if (hlid == WL12XX_INVALID_LINK_ID) { 383 wl1271_error("invalid hlid. dropping skb 0x%p", skb); 384 return -EINVAL; 385 } 386 387 info = IEEE80211_SKB_CB(skb); 388 389 is_dummy = wl12xx_is_dummy_packet(wl, skb); 390 391 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 392 info->control.hw_key && 393 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 394 extra = WL1271_EXTRA_SPACE_TKIP; 395 396 if (info->control.hw_key) { 397 bool is_wep; 398 u8 idx = info->control.hw_key->hw_key_idx; 399 u32 cipher = info->control.hw_key->cipher; 400 401 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 402 (cipher == WLAN_CIPHER_SUITE_WEP104); 403 404 if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { 405 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 406 if (ret < 0) 407 return ret; 408 wlvif->default_key = idx; 409 } 410 411 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 412 } 413 414 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 415 is_gem); 416 if (ret < 0) 417 return ret; 418 419 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); 420 421 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { 422 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); 423 wl1271_tx_regulate_link(wl, wlvif, hlid); 424 } 425 426 /* 427 * The length of each packet is stored in terms of 428 * words. Thus, we must pad the skb data to make sure its 429 * length is aligned. The number of padding bytes is computed 430 * and set in wl1271_tx_fill_hdr. 431 * In special cases, we want to align to a specific block size 432 * (eg. for wl128x with SDIO we align to 256). 433 */ 434 total_len = wlcore_calc_packet_alignment(wl, skb->len); 435 436 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 437 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 438 439 /* Revert side effects in the dummy packet skb, so it can be reused */ 440 if (is_dummy) 441 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 442 443 return total_len; 444 } 445 446 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 447 enum ieee80211_band rate_band) 448 { 449 struct ieee80211_supported_band *band; 450 u32 enabled_rates = 0; 451 int bit; 452 453 band = wl->hw->wiphy->bands[rate_band]; 454 for (bit = 0; bit < band->n_bitrates; bit++) { 455 if (rate_set & 0x1) 456 enabled_rates |= band->bitrates[bit].hw_value; 457 rate_set >>= 1; 458 } 459 460 /* MCS rates indication are on bits 16 - 31 */ 461 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 462 463 for (bit = 0; bit < 16; bit++) { 464 if (rate_set & 0x1) 465 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 466 rate_set >>= 1; 467 } 468 469 return enabled_rates; 470 } 471 472 void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 473 { 474 int i; 475 struct wl12xx_vif *wlvif; 476 477 wl12xx_for_each_wlvif(wl, wlvif) { 478 for (i = 0; i < NUM_TX_QUEUES; i++) { 479 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, 480 WLCORE_QUEUE_STOP_REASON_WATERMARK) && 481 wlvif->tx_queue_count[i] <= 482 WL1271_TX_QUEUE_LOW_WATERMARK) 483 /* firmware buffer has space, restart queues */ 484 wlcore_wake_queue(wl, wlvif, i, 485 WLCORE_QUEUE_STOP_REASON_WATERMARK); 486 } 487 } 488 } 489 490 static int wlcore_select_ac(struct wl1271 *wl) 491 { 492 int i, q = -1, ac; 493 u32 min_pkts = 0xffffffff; 494 495 /* 496 * Find a non-empty ac where: 497 * 1. There are packets to transmit 498 * 2. The FW has the least allocated blocks 499 * 500 * We prioritize the ACs according to VO>VI>BE>BK 501 */ 502 for (i = 0; i < NUM_TX_QUEUES; i++) { 503 ac = wl1271_tx_get_queue(i); 504 if (wl->tx_queue_count[ac] && 505 wl->tx_allocated_pkts[ac] < min_pkts) { 506 q = ac; 507 min_pkts = wl->tx_allocated_pkts[q]; 508 } 509 } 510 511 return q; 512 } 513 514 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, 515 struct wl1271_link *lnk, u8 q) 516 { 517 struct sk_buff *skb; 518 unsigned long flags; 519 520 skb = skb_dequeue(&lnk->tx_queue[q]); 521 if (skb) { 522 spin_lock_irqsave(&wl->wl_lock, flags); 523 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 524 wl->tx_queue_count[q]--; 525 if (lnk->wlvif) { 526 WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); 527 lnk->wlvif->tx_queue_count[q]--; 528 } 529 spin_unlock_irqrestore(&wl->wl_lock, flags); 530 } 531 532 return skb; 533 } 534 535 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, 536 u8 hlid, u8 ac, 537 u8 *low_prio_hlid) 538 { 539 struct wl1271_link *lnk = &wl->links[hlid]; 540 541 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { 542 if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && 543 !skb_queue_empty(&lnk->tx_queue[ac]) && 544 wlcore_hw_lnk_low_prio(wl, hlid, lnk)) 545 /* we found the first non-empty low priority queue */ 546 *low_prio_hlid = hlid; 547 548 return NULL; 549 } 550 551 return wlcore_lnk_dequeue(wl, lnk, ac); 552 } 553 554 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, 555 struct wl12xx_vif *wlvif, 556 u8 ac, u8 *hlid, 557 u8 *low_prio_hlid) 558 { 559 struct sk_buff *skb = NULL; 560 int i, h, start_hlid; 561 562 /* start from the link after the last one */ 563 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; 564 565 /* dequeue according to AC, round robin on each link */ 566 for (i = 0; i < WL12XX_MAX_LINKS; i++) { 567 h = (start_hlid + i) % WL12XX_MAX_LINKS; 568 569 /* only consider connected stations */ 570 if (!test_bit(h, wlvif->links_map)) 571 continue; 572 573 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, 574 low_prio_hlid); 575 if (!skb) 576 continue; 577 578 wlvif->last_tx_hlid = h; 579 break; 580 } 581 582 if (!skb) 583 wlvif->last_tx_hlid = 0; 584 585 *hlid = wlvif->last_tx_hlid; 586 return skb; 587 } 588 589 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) 590 { 591 unsigned long flags; 592 struct wl12xx_vif *wlvif = wl->last_wlvif; 593 struct sk_buff *skb = NULL; 594 int ac; 595 u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; 596 597 ac = wlcore_select_ac(wl); 598 if (ac < 0) 599 goto out; 600 601 /* continue from last wlvif (round robin) */ 602 if (wlvif) { 603 wl12xx_for_each_wlvif_continue(wl, wlvif) { 604 if (!wlvif->tx_queue_count[ac]) 605 continue; 606 607 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 608 &low_prio_hlid); 609 if (!skb) 610 continue; 611 612 wl->last_wlvif = wlvif; 613 break; 614 } 615 } 616 617 /* dequeue from the system HLID before the restarting wlvif list */ 618 if (!skb) { 619 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, 620 ac, &low_prio_hlid); 621 if (skb) { 622 *hlid = wl->system_hlid; 623 wl->last_wlvif = NULL; 624 } 625 } 626 627 /* Do a new pass over the wlvif list. But no need to continue 628 * after last_wlvif. The previous pass should have found it. */ 629 if (!skb) { 630 wl12xx_for_each_wlvif(wl, wlvif) { 631 if (!wlvif->tx_queue_count[ac]) 632 goto next; 633 634 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 635 &low_prio_hlid); 636 if (skb) { 637 wl->last_wlvif = wlvif; 638 break; 639 } 640 641 next: 642 if (wlvif == wl->last_wlvif) 643 break; 644 } 645 } 646 647 /* no high priority skbs found - but maybe a low priority one? */ 648 if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { 649 struct wl1271_link *lnk = &wl->links[low_prio_hlid]; 650 skb = wlcore_lnk_dequeue(wl, lnk, ac); 651 652 WARN_ON(!skb); /* we checked this before */ 653 *hlid = low_prio_hlid; 654 655 /* ensure proper round robin in the vif/link levels */ 656 wl->last_wlvif = lnk->wlvif; 657 if (lnk->wlvif) 658 lnk->wlvif->last_tx_hlid = low_prio_hlid; 659 660 } 661 662 out: 663 if (!skb && 664 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 665 int q; 666 667 skb = wl->dummy_packet; 668 *hlid = wl->system_hlid; 669 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 670 spin_lock_irqsave(&wl->wl_lock, flags); 671 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 672 wl->tx_queue_count[q]--; 673 spin_unlock_irqrestore(&wl->wl_lock, flags); 674 } 675 676 return skb; 677 } 678 679 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 680 struct sk_buff *skb, u8 hlid) 681 { 682 unsigned long flags; 683 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 684 685 if (wl12xx_is_dummy_packet(wl, skb)) { 686 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 687 } else { 688 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 689 690 /* make sure we dequeue the same packet next time */ 691 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % 692 WL12XX_MAX_LINKS; 693 } 694 695 spin_lock_irqsave(&wl->wl_lock, flags); 696 wl->tx_queue_count[q]++; 697 if (wlvif) 698 wlvif->tx_queue_count[q]++; 699 spin_unlock_irqrestore(&wl->wl_lock, flags); 700 } 701 702 static bool wl1271_tx_is_data_present(struct sk_buff *skb) 703 { 704 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 705 706 return ieee80211_is_data_present(hdr->frame_control); 707 } 708 709 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) 710 { 711 struct wl12xx_vif *wlvif; 712 u32 timeout; 713 u8 hlid; 714 715 if (!wl->conf.rx_streaming.interval) 716 return; 717 718 if (!wl->conf.rx_streaming.always && 719 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) 720 return; 721 722 timeout = wl->conf.rx_streaming.duration; 723 wl12xx_for_each_wlvif_sta(wl, wlvif) { 724 bool found = false; 725 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { 726 if (test_bit(hlid, wlvif->links_map)) { 727 found = true; 728 break; 729 } 730 } 731 732 if (!found) 733 continue; 734 735 /* enable rx streaming */ 736 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 737 ieee80211_queue_work(wl->hw, 738 &wlvif->rx_streaming_enable_work); 739 740 mod_timer(&wlvif->rx_streaming_timer, 741 jiffies + msecs_to_jiffies(timeout)); 742 } 743 } 744 745 /* 746 * Returns failure values only in case of failed bus ops within this function. 747 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid 748 * triggering recovery by higher layers when not necessary. 749 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery 750 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame 751 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING 752 * within prepare_tx_frame code but there's nothing we should do about those 753 * as well. 754 */ 755 int wlcore_tx_work_locked(struct wl1271 *wl) 756 { 757 struct wl12xx_vif *wlvif; 758 struct sk_buff *skb; 759 struct wl1271_tx_hw_descr *desc; 760 u32 buf_offset = 0, last_len = 0; 761 bool sent_packets = false; 762 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 763 int ret = 0; 764 int bus_ret = 0; 765 u8 hlid; 766 767 if (unlikely(wl->state != WLCORE_STATE_ON)) 768 return 0; 769 770 while ((skb = wl1271_skb_dequeue(wl, &hlid))) { 771 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 772 bool has_data = false; 773 774 wlvif = NULL; 775 if (!wl12xx_is_dummy_packet(wl, skb)) 776 wlvif = wl12xx_vif_to_data(info->control.vif); 777 else 778 hlid = wl->system_hlid; 779 780 has_data = wlvif && wl1271_tx_is_data_present(skb); 781 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, 782 hlid); 783 if (ret == -EAGAIN) { 784 /* 785 * Aggregation buffer is full. 786 * Flush buffer and try again. 787 */ 788 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 789 790 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 791 last_len); 792 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, 793 wl->aggr_buf, buf_offset, true); 794 if (bus_ret < 0) 795 goto out; 796 797 sent_packets = true; 798 buf_offset = 0; 799 continue; 800 } else if (ret == -EBUSY) { 801 /* 802 * Firmware buffer is full. 803 * Queue back last skb, and stop aggregating. 804 */ 805 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 806 /* No work left, avoid scheduling redundant tx work */ 807 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 808 goto out_ack; 809 } else if (ret < 0) { 810 if (wl12xx_is_dummy_packet(wl, skb)) 811 /* 812 * fw still expects dummy packet, 813 * so re-enqueue it 814 */ 815 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 816 else 817 ieee80211_free_txskb(wl->hw, skb); 818 goto out_ack; 819 } 820 last_len = ret; 821 buf_offset += last_len; 822 wl->tx_packets_count++; 823 if (has_data) { 824 desc = (struct wl1271_tx_hw_descr *) skb->data; 825 __set_bit(desc->hlid, active_hlids); 826 } 827 } 828 829 out_ack: 830 if (buf_offset) { 831 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); 832 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 833 buf_offset, true); 834 if (bus_ret < 0) 835 goto out; 836 837 sent_packets = true; 838 } 839 if (sent_packets) { 840 /* 841 * Interrupt the firmware with the new packets. This is only 842 * required for older hardware revisions 843 */ 844 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { 845 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, 846 wl->tx_packets_count); 847 if (bus_ret < 0) 848 goto out; 849 } 850 851 wl1271_handle_tx_low_watermark(wl); 852 } 853 wl12xx_rearm_rx_streaming(wl, active_hlids); 854 855 out: 856 return bus_ret; 857 } 858 859 void wl1271_tx_work(struct work_struct *work) 860 { 861 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 862 int ret; 863 864 mutex_lock(&wl->mutex); 865 ret = wl1271_ps_elp_wakeup(wl); 866 if (ret < 0) 867 goto out; 868 869 ret = wlcore_tx_work_locked(wl); 870 if (ret < 0) { 871 wl12xx_queue_recovery_work(wl); 872 goto out; 873 } 874 875 wl1271_ps_elp_sleep(wl); 876 out: 877 mutex_unlock(&wl->mutex); 878 } 879 880 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) 881 { 882 u8 flags = 0; 883 884 /* 885 * TODO: use wl12xx constants when this code is moved to wl12xx, as 886 * only it uses Tx-completion. 887 */ 888 if (rate_class_index <= 8) 889 flags |= IEEE80211_TX_RC_MCS; 890 891 /* 892 * TODO: use wl12xx constants when this code is moved to wl12xx, as 893 * only it uses Tx-completion. 894 */ 895 if (rate_class_index == 0) 896 flags |= IEEE80211_TX_RC_SHORT_GI; 897 898 return flags; 899 } 900 901 static void wl1271_tx_complete_packet(struct wl1271 *wl, 902 struct wl1271_tx_hw_res_descr *result) 903 { 904 struct ieee80211_tx_info *info; 905 struct ieee80211_vif *vif; 906 struct wl12xx_vif *wlvif; 907 struct sk_buff *skb; 908 int id = result->id; 909 int rate = -1; 910 u8 rate_flags = 0; 911 u8 retries = 0; 912 913 /* check for id legality */ 914 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { 915 wl1271_warning("TX result illegal id: %d", id); 916 return; 917 } 918 919 skb = wl->tx_frames[id]; 920 info = IEEE80211_SKB_CB(skb); 921 922 if (wl12xx_is_dummy_packet(wl, skb)) { 923 wl1271_free_tx_id(wl, id); 924 return; 925 } 926 927 /* info->control is valid as long as we don't update info->status */ 928 vif = info->control.vif; 929 wlvif = wl12xx_vif_to_data(vif); 930 931 /* update the TX status info */ 932 if (result->status == TX_SUCCESS) { 933 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 934 info->flags |= IEEE80211_TX_STAT_ACK; 935 rate = wlcore_rate_to_idx(wl, result->rate_class_index, 936 wlvif->band); 937 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 938 retries = result->ack_failures; 939 } else if (result->status == TX_RETRY_EXCEEDED) { 940 wl->stats.excessive_retries++; 941 retries = result->ack_failures; 942 } 943 944 info->status.rates[0].idx = rate; 945 info->status.rates[0].count = retries; 946 info->status.rates[0].flags = rate_flags; 947 info->status.ack_signal = -1; 948 949 wl->stats.retry_count += result->ack_failures; 950 951 /* remove private header from packet */ 952 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 953 954 /* remove TKIP header space if present */ 955 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 956 info->control.hw_key && 957 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 958 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 959 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 960 hdrlen); 961 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 962 } 963 964 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 965 " status 0x%x", 966 result->id, skb, result->ack_failures, 967 result->rate_class_index, result->status); 968 969 /* return the packet to the stack */ 970 skb_queue_tail(&wl->deferred_tx_queue, skb); 971 queue_work(wl->freezable_wq, &wl->netstack_work); 972 wl1271_free_tx_id(wl, result->id); 973 } 974 975 /* Called upon reception of a TX complete interrupt */ 976 int wlcore_tx_complete(struct wl1271 *wl) 977 { 978 struct wl1271_acx_mem_map *memmap = wl->target_mem_map; 979 u32 count, fw_counter; 980 u32 i; 981 int ret; 982 983 /* read the tx results from the chipset */ 984 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), 985 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 986 if (ret < 0) 987 goto out; 988 989 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 990 991 /* write host counter to chipset (to ack) */ 992 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + 993 offsetof(struct wl1271_tx_hw_res_if, 994 tx_result_host_counter), fw_counter); 995 if (ret < 0) 996 goto out; 997 998 count = fw_counter - wl->tx_results_count; 999 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 1000 1001 /* verify that the result buffer is not getting overrun */ 1002 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) 1003 wl1271_warning("TX result overflow from chipset: %d", count); 1004 1005 /* process the results */ 1006 for (i = 0; i < count; i++) { 1007 struct wl1271_tx_hw_res_descr *result; 1008 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; 1009 1010 /* process the packet */ 1011 result = &(wl->tx_res_if->tx_results_queue[offset]); 1012 wl1271_tx_complete_packet(wl, result); 1013 1014 wl->tx_results_count++; 1015 } 1016 1017 out: 1018 return ret; 1019 } 1020 EXPORT_SYMBOL(wlcore_tx_complete); 1021 1022 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 1023 { 1024 struct sk_buff *skb; 1025 int i; 1026 unsigned long flags; 1027 struct ieee80211_tx_info *info; 1028 int total[NUM_TX_QUEUES]; 1029 struct wl1271_link *lnk = &wl->links[hlid]; 1030 1031 for (i = 0; i < NUM_TX_QUEUES; i++) { 1032 total[i] = 0; 1033 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { 1034 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); 1035 1036 if (!wl12xx_is_dummy_packet(wl, skb)) { 1037 info = IEEE80211_SKB_CB(skb); 1038 info->status.rates[0].idx = -1; 1039 info->status.rates[0].count = 0; 1040 ieee80211_tx_status_ni(wl->hw, skb); 1041 } 1042 1043 total[i]++; 1044 } 1045 } 1046 1047 spin_lock_irqsave(&wl->wl_lock, flags); 1048 for (i = 0; i < NUM_TX_QUEUES; i++) { 1049 wl->tx_queue_count[i] -= total[i]; 1050 if (lnk->wlvif) 1051 lnk->wlvif->tx_queue_count[i] -= total[i]; 1052 } 1053 spin_unlock_irqrestore(&wl->wl_lock, flags); 1054 1055 wl1271_handle_tx_low_watermark(wl); 1056 } 1057 1058 /* caller must hold wl->mutex and TX must be stopped */ 1059 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1060 { 1061 int i; 1062 1063 /* TX failure */ 1064 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { 1065 if (wlvif->bss_type == BSS_TYPE_AP_BSS && 1066 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { 1067 /* this calls wl12xx_free_link */ 1068 wl1271_free_sta(wl, wlvif, i); 1069 } else { 1070 u8 hlid = i; 1071 wl12xx_free_link(wl, wlvif, &hlid); 1072 } 1073 } 1074 wlvif->last_tx_hlid = 0; 1075 1076 for (i = 0; i < NUM_TX_QUEUES; i++) 1077 wlvif->tx_queue_count[i] = 0; 1078 } 1079 /* caller must hold wl->mutex and TX must be stopped */ 1080 void wl12xx_tx_reset(struct wl1271 *wl) 1081 { 1082 int i; 1083 struct sk_buff *skb; 1084 struct ieee80211_tx_info *info; 1085 1086 /* only reset the queues if something bad happened */ 1087 if (wl1271_tx_total_queue_count(wl) != 0) { 1088 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1089 wl1271_tx_reset_link_queues(wl, i); 1090 1091 for (i = 0; i < NUM_TX_QUEUES; i++) 1092 wl->tx_queue_count[i] = 0; 1093 } 1094 1095 /* 1096 * Make sure the driver is at a consistent state, in case this 1097 * function is called from a context other than interface removal. 1098 * This call will always wake the TX queues. 1099 */ 1100 wl1271_handle_tx_low_watermark(wl); 1101 1102 for (i = 0; i < wl->num_tx_desc; i++) { 1103 if (wl->tx_frames[i] == NULL) 1104 continue; 1105 1106 skb = wl->tx_frames[i]; 1107 wl1271_free_tx_id(wl, i); 1108 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 1109 1110 if (!wl12xx_is_dummy_packet(wl, skb)) { 1111 /* 1112 * Remove private headers before passing the skb to 1113 * mac80211 1114 */ 1115 info = IEEE80211_SKB_CB(skb); 1116 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 1117 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 1118 info->control.hw_key && 1119 info->control.hw_key->cipher == 1120 WLAN_CIPHER_SUITE_TKIP) { 1121 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1122 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, 1123 skb->data, hdrlen); 1124 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 1125 } 1126 1127 info->status.rates[0].idx = -1; 1128 info->status.rates[0].count = 0; 1129 1130 ieee80211_tx_status_ni(wl->hw, skb); 1131 } 1132 } 1133 } 1134 1135 #define WL1271_TX_FLUSH_TIMEOUT 500000 1136 1137 /* caller must *NOT* hold wl->mutex */ 1138 void wl1271_tx_flush(struct wl1271 *wl) 1139 { 1140 unsigned long timeout, start_time; 1141 int i; 1142 start_time = jiffies; 1143 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1144 1145 /* only one flush should be in progress, for consistent queue state */ 1146 mutex_lock(&wl->flush_mutex); 1147 1148 mutex_lock(&wl->mutex); 1149 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { 1150 mutex_unlock(&wl->mutex); 1151 goto out; 1152 } 1153 1154 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1155 1156 while (!time_after(jiffies, timeout)) { 1157 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", 1158 wl->tx_frames_cnt, 1159 wl1271_tx_total_queue_count(wl)); 1160 1161 /* force Tx and give the driver some time to flush data */ 1162 mutex_unlock(&wl->mutex); 1163 if (wl1271_tx_total_queue_count(wl)) 1164 wl1271_tx_work(&wl->tx_work); 1165 msleep(20); 1166 mutex_lock(&wl->mutex); 1167 1168 if ((wl->tx_frames_cnt == 0) && 1169 (wl1271_tx_total_queue_count(wl) == 0)) { 1170 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", 1171 jiffies_to_msecs(jiffies - start_time)); 1172 goto out_wake; 1173 } 1174 } 1175 1176 wl1271_warning("Unable to flush all TX buffers, " 1177 "timed out (timeout %d ms", 1178 WL1271_TX_FLUSH_TIMEOUT / 1000); 1179 1180 /* forcibly flush all Tx buffers on our queues */ 1181 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1182 wl1271_tx_reset_link_queues(wl, i); 1183 1184 out_wake: 1185 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1186 mutex_unlock(&wl->mutex); 1187 out: 1188 mutex_unlock(&wl->flush_mutex); 1189 } 1190 EXPORT_SYMBOL_GPL(wl1271_tx_flush); 1191 1192 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1193 { 1194 if (WARN_ON(!rate_set)) 1195 return 0; 1196 1197 return BIT(__ffs(rate_set)); 1198 } 1199 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); 1200 1201 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1202 u8 queue, enum wlcore_queue_stop_reason reason) 1203 { 1204 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1205 bool stopped = !!wl->queue_stop_reasons[hwq]; 1206 1207 /* queue should not be stopped for this reason */ 1208 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); 1209 1210 if (stopped) 1211 return; 1212 1213 ieee80211_stop_queue(wl->hw, hwq); 1214 } 1215 1216 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1217 enum wlcore_queue_stop_reason reason) 1218 { 1219 unsigned long flags; 1220 1221 spin_lock_irqsave(&wl->wl_lock, flags); 1222 wlcore_stop_queue_locked(wl, wlvif, queue, reason); 1223 spin_unlock_irqrestore(&wl->wl_lock, flags); 1224 } 1225 1226 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1227 enum wlcore_queue_stop_reason reason) 1228 { 1229 unsigned long flags; 1230 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1231 1232 spin_lock_irqsave(&wl->wl_lock, flags); 1233 1234 /* queue should not be clear for this reason */ 1235 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); 1236 1237 if (wl->queue_stop_reasons[hwq]) 1238 goto out; 1239 1240 ieee80211_wake_queue(wl->hw, hwq); 1241 1242 out: 1243 spin_unlock_irqrestore(&wl->wl_lock, flags); 1244 } 1245 1246 void wlcore_stop_queues(struct wl1271 *wl, 1247 enum wlcore_queue_stop_reason reason) 1248 { 1249 int i; 1250 unsigned long flags; 1251 1252 spin_lock_irqsave(&wl->wl_lock, flags); 1253 1254 /* mark all possible queues as stopped */ 1255 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1256 WARN_ON_ONCE(test_and_set_bit(reason, 1257 &wl->queue_stop_reasons[i])); 1258 1259 /* use the global version to make sure all vifs in mac80211 we don't 1260 * know are stopped. 1261 */ 1262 ieee80211_stop_queues(wl->hw); 1263 1264 spin_unlock_irqrestore(&wl->wl_lock, flags); 1265 } 1266 1267 void wlcore_wake_queues(struct wl1271 *wl, 1268 enum wlcore_queue_stop_reason reason) 1269 { 1270 int i; 1271 unsigned long flags; 1272 1273 spin_lock_irqsave(&wl->wl_lock, flags); 1274 1275 /* mark all possible queues as awake */ 1276 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1277 WARN_ON_ONCE(!test_and_clear_bit(reason, 1278 &wl->queue_stop_reasons[i])); 1279 1280 /* use the global version to make sure all vifs in mac80211 we don't 1281 * know are woken up. 1282 */ 1283 ieee80211_wake_queues(wl->hw); 1284 1285 spin_unlock_irqrestore(&wl->wl_lock, flags); 1286 } 1287 1288 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, 1289 struct wl12xx_vif *wlvif, u8 queue, 1290 enum wlcore_queue_stop_reason reason) 1291 { 1292 unsigned long flags; 1293 bool stopped; 1294 1295 spin_lock_irqsave(&wl->wl_lock, flags); 1296 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, 1297 reason); 1298 spin_unlock_irqrestore(&wl->wl_lock, flags); 1299 1300 return stopped; 1301 } 1302 1303 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, 1304 struct wl12xx_vif *wlvif, u8 queue, 1305 enum wlcore_queue_stop_reason reason) 1306 { 1307 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1308 1309 assert_spin_locked(&wl->wl_lock); 1310 return test_bit(reason, &wl->queue_stop_reasons[hwq]); 1311 } 1312 1313 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1314 u8 queue) 1315 { 1316 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1317 1318 assert_spin_locked(&wl->wl_lock); 1319 return !!wl->queue_stop_reasons[hwq]; 1320 } 1321