1 /* 2 * This file is part of wl1271 3 * 4 * Copyright (C) 2009 Nokia Corporation 5 * 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * 02110-1301 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/etherdevice.h> 27 #include <linux/spinlock.h> 28 29 #include "wlcore.h" 30 #include "debug.h" 31 #include "io.h" 32 #include "ps.h" 33 #include "tx.h" 34 #include "event.h" 35 #include "hw_ops.h" 36 37 /* 38 * TODO: this is here just for now, it must be removed when the data 39 * operations are in place. 40 */ 41 #include "../wl12xx/reg.h" 42 43 static int wl1271_set_default_wep_key(struct wl1271 *wl, 44 struct wl12xx_vif *wlvif, u8 id) 45 { 46 int ret; 47 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 48 49 if (is_ap) 50 ret = wl12xx_cmd_set_default_wep_key(wl, id, 51 wlvif->ap.bcast_hlid); 52 else 53 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); 54 55 if (ret < 0) 56 return ret; 57 58 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); 59 return 0; 60 } 61 62 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 63 { 64 int id; 65 66 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); 67 if (id >= wl->num_tx_desc) 68 return -EBUSY; 69 70 __set_bit(id, wl->tx_frames_map); 71 wl->tx_frames[id] = skb; 72 wl->tx_frames_cnt++; 73 return id; 74 } 75 76 void wl1271_free_tx_id(struct wl1271 *wl, int id) 77 { 78 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 79 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 80 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 81 82 wl->tx_frames[id] = NULL; 83 wl->tx_frames_cnt--; 84 } 85 } 86 EXPORT_SYMBOL(wl1271_free_tx_id); 87 88 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 89 struct wl12xx_vif *wlvif, 90 struct sk_buff *skb) 91 { 92 struct ieee80211_hdr *hdr; 93 94 hdr = (struct ieee80211_hdr *)(skb->data + 95 sizeof(struct wl1271_tx_hw_descr)); 96 if (!ieee80211_is_auth(hdr->frame_control)) 97 return; 98 99 /* 100 * add the station to the known list before transmitting the 101 * authentication response. this way it won't get de-authed by FW 102 * when transmitting too soon. 103 */ 104 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); 105 106 /* 107 * ROC for 1 second on the AP channel for completing the connection. 108 * Note the ROC will be continued by the update_sta_state callbacks 109 * once the station reaches the associated state. 110 */ 111 wlcore_update_inconn_sta(wl, wlvif, NULL, true); 112 wlvif->pending_auth_reply_time = jiffies; 113 cancel_delayed_work(&wlvif->pending_auth_complete_work); 114 ieee80211_queue_delayed_work(wl->hw, 115 &wlvif->pending_auth_complete_work, 116 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); 117 } 118 119 static void wl1271_tx_regulate_link(struct wl1271 *wl, 120 struct wl12xx_vif *wlvif, 121 u8 hlid) 122 { 123 bool fw_ps; 124 u8 tx_pkts; 125 126 if (WARN_ON(!test_bit(hlid, wlvif->links_map))) 127 return; 128 129 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); 130 tx_pkts = wl->links[hlid].allocated_pkts; 131 132 /* 133 * if in FW PS and there is enough data in FW we can put the link 134 * into high-level PS and clean out its TX queues. 135 * Make an exception if this is the only connected link. In this 136 * case FW-memory congestion is less of a problem. 137 * Note that a single connected STA means 2*ap_count + 1 active links, 138 * since we must account for the global and broadcast AP links 139 * for each AP. The "fw_ps" check assures us the other link is a STA 140 * connected to the AP. Otherwise the FW would not set the PSM bit. 141 */ 142 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && 143 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 144 wl12xx_ps_link_start(wl, wlvif, hlid, true); 145 } 146 147 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) 148 { 149 return wl->dummy_packet == skb; 150 } 151 EXPORT_SYMBOL(wl12xx_is_dummy_packet); 152 153 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 154 struct sk_buff *skb, struct ieee80211_sta *sta) 155 { 156 if (sta) { 157 struct wl1271_station *wl_sta; 158 159 wl_sta = (struct wl1271_station *)sta->drv_priv; 160 return wl_sta->hlid; 161 } else { 162 struct ieee80211_hdr *hdr; 163 164 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 165 return wl->system_hlid; 166 167 hdr = (struct ieee80211_hdr *)skb->data; 168 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) 169 return wlvif->ap.bcast_hlid; 170 else 171 return wlvif->ap.global_hlid; 172 } 173 } 174 175 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 176 struct sk_buff *skb, struct ieee80211_sta *sta) 177 { 178 struct ieee80211_tx_info *control; 179 180 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 181 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); 182 183 control = IEEE80211_SKB_CB(skb); 184 if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 185 wl1271_debug(DEBUG_TX, "tx offchannel"); 186 return wlvif->dev_hlid; 187 } 188 189 return wlvif->sta.hlid; 190 } 191 192 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 193 unsigned int packet_length) 194 { 195 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || 196 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) 197 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 198 else 199 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 200 } 201 EXPORT_SYMBOL(wlcore_calc_packet_alignment); 202 203 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 204 struct sk_buff *skb, u32 extra, u32 buf_offset, 205 u8 hlid, bool is_gem) 206 { 207 struct wl1271_tx_hw_descr *desc; 208 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 209 u32 total_blocks; 210 int id, ret = -EBUSY, ac; 211 u32 spare_blocks; 212 213 if (buf_offset + total_len > wl->aggr_buf_size) 214 return -EAGAIN; 215 216 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); 217 218 /* allocate free identifier for the packet */ 219 id = wl1271_alloc_tx_id(wl, skb); 220 if (id < 0) 221 return id; 222 223 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 224 225 if (total_blocks <= wl->tx_blocks_available) { 226 desc = skb_push(skb, total_len - skb->len); 227 228 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, 229 spare_blocks); 230 231 desc->id = id; 232 233 wl->tx_blocks_available -= total_blocks; 234 wl->tx_allocated_blocks += total_blocks; 235 236 /* 237 * If the FW was empty before, arm the Tx watchdog. Also do 238 * this on the first Tx after resume, as we always cancel the 239 * watchdog on suspend. 240 */ 241 if (wl->tx_allocated_blocks == total_blocks || 242 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) 243 wl12xx_rearm_tx_watchdog_locked(wl); 244 245 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 246 wl->tx_allocated_pkts[ac]++; 247 248 if (test_bit(hlid, wl->links_map)) 249 wl->links[hlid].allocated_pkts++; 250 251 ret = 0; 252 253 wl1271_debug(DEBUG_TX, 254 "tx_allocate: size: %d, blocks: %d, id: %d", 255 total_len, total_blocks, id); 256 } else { 257 wl1271_free_tx_id(wl, id); 258 } 259 260 return ret; 261 } 262 263 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, 264 struct sk_buff *skb, u32 extra, 265 struct ieee80211_tx_info *control, u8 hlid) 266 { 267 struct wl1271_tx_hw_descr *desc; 268 int ac, rate_idx; 269 s64 hosttime; 270 u16 tx_attr = 0; 271 __le16 frame_control; 272 struct ieee80211_hdr *hdr; 273 u8 *frame_start; 274 bool is_dummy; 275 276 desc = (struct wl1271_tx_hw_descr *) skb->data; 277 frame_start = (u8 *)(desc + 1); 278 hdr = (struct ieee80211_hdr *)(frame_start + extra); 279 frame_control = hdr->frame_control; 280 281 /* relocate space for security header */ 282 if (extra) { 283 int hdrlen = ieee80211_hdrlen(frame_control); 284 memmove(frame_start, hdr, hdrlen); 285 skb_set_network_header(skb, skb_network_offset(skb) + extra); 286 } 287 288 /* configure packet life time */ 289 hosttime = (ktime_get_boot_ns() >> 10); 290 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 291 292 is_dummy = wl12xx_is_dummy_packet(wl, skb); 293 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) 294 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 295 else 296 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 297 298 /* queue */ 299 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 300 desc->tid = skb->priority; 301 302 if (is_dummy) { 303 /* 304 * FW expects the dummy packet to have an invalid session id - 305 * any session id that is different than the one set in the join 306 */ 307 tx_attr = (SESSION_COUNTER_INVALID << 308 TX_HW_ATTR_OFST_SESSION_COUNTER) & 309 TX_HW_ATTR_SESSION_COUNTER; 310 311 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; 312 } else if (wlvif) { 313 u8 session_id = wl->session_ids[hlid]; 314 315 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && 316 (wlvif->bss_type == BSS_TYPE_AP_BSS)) 317 session_id = 0; 318 319 /* configure the tx attributes */ 320 tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; 321 } 322 323 desc->hlid = hlid; 324 if (is_dummy || !wlvif) 325 rate_idx = 0; 326 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 327 /* 328 * if the packets are data packets 329 * send them with AP rate policies (EAPOLs are an exception), 330 * otherwise use default basic rates 331 */ 332 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) 333 rate_idx = wlvif->sta.basic_rate_idx; 334 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 335 rate_idx = wlvif->sta.p2p_rate_idx; 336 else if (ieee80211_is_data(frame_control)) 337 rate_idx = wlvif->sta.ap_rate_idx; 338 else 339 rate_idx = wlvif->sta.basic_rate_idx; 340 } else { 341 if (hlid == wlvif->ap.global_hlid) 342 rate_idx = wlvif->ap.mgmt_rate_idx; 343 else if (hlid == wlvif->ap.bcast_hlid || 344 skb->protocol == cpu_to_be16(ETH_P_PAE) || 345 !ieee80211_is_data(frame_control)) 346 /* 347 * send non-data, bcast and EAPOLs using the 348 * min basic rate 349 */ 350 rate_idx = wlvif->ap.bcast_rate_idx; 351 else 352 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 353 } 354 355 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 356 357 /* for WEP shared auth - no fw encryption is needed */ 358 if (ieee80211_is_auth(frame_control) && 359 ieee80211_has_protected(frame_control)) 360 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 361 362 /* send EAPOL frames as voice */ 363 if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) 364 tx_attr |= TX_HW_ATTR_EAPOL_FRAME; 365 366 desc->tx_attr = cpu_to_le16(tx_attr); 367 368 wlcore_hw_set_tx_desc_csum(wl, desc, skb); 369 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 370 } 371 372 /* caller must hold wl->mutex */ 373 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 374 struct sk_buff *skb, u32 buf_offset, u8 hlid) 375 { 376 struct ieee80211_tx_info *info; 377 u32 extra = 0; 378 int ret = 0; 379 u32 total_len; 380 bool is_dummy; 381 bool is_gem = false; 382 383 if (!skb) { 384 wl1271_error("discarding null skb"); 385 return -EINVAL; 386 } 387 388 if (hlid == WL12XX_INVALID_LINK_ID) { 389 wl1271_error("invalid hlid. dropping skb 0x%p", skb); 390 return -EINVAL; 391 } 392 393 info = IEEE80211_SKB_CB(skb); 394 395 is_dummy = wl12xx_is_dummy_packet(wl, skb); 396 397 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 398 info->control.hw_key && 399 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 400 extra = WL1271_EXTRA_SPACE_TKIP; 401 402 if (info->control.hw_key) { 403 bool is_wep; 404 u8 idx = info->control.hw_key->hw_key_idx; 405 u32 cipher = info->control.hw_key->cipher; 406 407 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 408 (cipher == WLAN_CIPHER_SUITE_WEP104); 409 410 if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { 411 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 412 if (ret < 0) 413 return ret; 414 wlvif->default_key = idx; 415 } 416 417 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 418 } 419 420 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 421 is_gem); 422 if (ret < 0) 423 return ret; 424 425 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); 426 427 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { 428 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); 429 wl1271_tx_regulate_link(wl, wlvif, hlid); 430 } 431 432 /* 433 * The length of each packet is stored in terms of 434 * words. Thus, we must pad the skb data to make sure its 435 * length is aligned. The number of padding bytes is computed 436 * and set in wl1271_tx_fill_hdr. 437 * In special cases, we want to align to a specific block size 438 * (eg. for wl128x with SDIO we align to 256). 439 */ 440 total_len = wlcore_calc_packet_alignment(wl, skb->len); 441 442 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 443 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 444 445 /* Revert side effects in the dummy packet skb, so it can be reused */ 446 if (is_dummy) 447 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 448 449 return total_len; 450 } 451 452 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 453 enum nl80211_band rate_band) 454 { 455 struct ieee80211_supported_band *band; 456 u32 enabled_rates = 0; 457 int bit; 458 459 band = wl->hw->wiphy->bands[rate_band]; 460 for (bit = 0; bit < band->n_bitrates; bit++) { 461 if (rate_set & 0x1) 462 enabled_rates |= band->bitrates[bit].hw_value; 463 rate_set >>= 1; 464 } 465 466 /* MCS rates indication are on bits 16 - 31 */ 467 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 468 469 for (bit = 0; bit < 16; bit++) { 470 if (rate_set & 0x1) 471 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 472 rate_set >>= 1; 473 } 474 475 return enabled_rates; 476 } 477 478 void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 479 { 480 int i; 481 struct wl12xx_vif *wlvif; 482 483 wl12xx_for_each_wlvif(wl, wlvif) { 484 for (i = 0; i < NUM_TX_QUEUES; i++) { 485 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, 486 WLCORE_QUEUE_STOP_REASON_WATERMARK) && 487 wlvif->tx_queue_count[i] <= 488 WL1271_TX_QUEUE_LOW_WATERMARK) 489 /* firmware buffer has space, restart queues */ 490 wlcore_wake_queue(wl, wlvif, i, 491 WLCORE_QUEUE_STOP_REASON_WATERMARK); 492 } 493 } 494 } 495 496 static int wlcore_select_ac(struct wl1271 *wl) 497 { 498 int i, q = -1, ac; 499 u32 min_pkts = 0xffffffff; 500 501 /* 502 * Find a non-empty ac where: 503 * 1. There are packets to transmit 504 * 2. The FW has the least allocated blocks 505 * 506 * We prioritize the ACs according to VO>VI>BE>BK 507 */ 508 for (i = 0; i < NUM_TX_QUEUES; i++) { 509 ac = wl1271_tx_get_queue(i); 510 if (wl->tx_queue_count[ac] && 511 wl->tx_allocated_pkts[ac] < min_pkts) { 512 q = ac; 513 min_pkts = wl->tx_allocated_pkts[q]; 514 } 515 } 516 517 return q; 518 } 519 520 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, 521 struct wl1271_link *lnk, u8 q) 522 { 523 struct sk_buff *skb; 524 unsigned long flags; 525 526 skb = skb_dequeue(&lnk->tx_queue[q]); 527 if (skb) { 528 spin_lock_irqsave(&wl->wl_lock, flags); 529 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 530 wl->tx_queue_count[q]--; 531 if (lnk->wlvif) { 532 WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); 533 lnk->wlvif->tx_queue_count[q]--; 534 } 535 spin_unlock_irqrestore(&wl->wl_lock, flags); 536 } 537 538 return skb; 539 } 540 541 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, 542 u8 hlid, u8 ac, 543 u8 *low_prio_hlid) 544 { 545 struct wl1271_link *lnk = &wl->links[hlid]; 546 547 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { 548 if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && 549 !skb_queue_empty(&lnk->tx_queue[ac]) && 550 wlcore_hw_lnk_low_prio(wl, hlid, lnk)) 551 /* we found the first non-empty low priority queue */ 552 *low_prio_hlid = hlid; 553 554 return NULL; 555 } 556 557 return wlcore_lnk_dequeue(wl, lnk, ac); 558 } 559 560 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, 561 struct wl12xx_vif *wlvif, 562 u8 ac, u8 *hlid, 563 u8 *low_prio_hlid) 564 { 565 struct sk_buff *skb = NULL; 566 int i, h, start_hlid; 567 568 /* start from the link after the last one */ 569 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; 570 571 /* dequeue according to AC, round robin on each link */ 572 for (i = 0; i < wl->num_links; i++) { 573 h = (start_hlid + i) % wl->num_links; 574 575 /* only consider connected stations */ 576 if (!test_bit(h, wlvif->links_map)) 577 continue; 578 579 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, 580 low_prio_hlid); 581 if (!skb) 582 continue; 583 584 wlvif->last_tx_hlid = h; 585 break; 586 } 587 588 if (!skb) 589 wlvif->last_tx_hlid = 0; 590 591 *hlid = wlvif->last_tx_hlid; 592 return skb; 593 } 594 595 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) 596 { 597 unsigned long flags; 598 struct wl12xx_vif *wlvif = wl->last_wlvif; 599 struct sk_buff *skb = NULL; 600 int ac; 601 u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; 602 603 ac = wlcore_select_ac(wl); 604 if (ac < 0) 605 goto out; 606 607 /* continue from last wlvif (round robin) */ 608 if (wlvif) { 609 wl12xx_for_each_wlvif_continue(wl, wlvif) { 610 if (!wlvif->tx_queue_count[ac]) 611 continue; 612 613 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 614 &low_prio_hlid); 615 if (!skb) 616 continue; 617 618 wl->last_wlvif = wlvif; 619 break; 620 } 621 } 622 623 /* dequeue from the system HLID before the restarting wlvif list */ 624 if (!skb) { 625 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, 626 ac, &low_prio_hlid); 627 if (skb) { 628 *hlid = wl->system_hlid; 629 wl->last_wlvif = NULL; 630 } 631 } 632 633 /* Do a new pass over the wlvif list. But no need to continue 634 * after last_wlvif. The previous pass should have found it. */ 635 if (!skb) { 636 wl12xx_for_each_wlvif(wl, wlvif) { 637 if (!wlvif->tx_queue_count[ac]) 638 goto next; 639 640 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 641 &low_prio_hlid); 642 if (skb) { 643 wl->last_wlvif = wlvif; 644 break; 645 } 646 647 next: 648 if (wlvif == wl->last_wlvif) 649 break; 650 } 651 } 652 653 /* no high priority skbs found - but maybe a low priority one? */ 654 if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { 655 struct wl1271_link *lnk = &wl->links[low_prio_hlid]; 656 skb = wlcore_lnk_dequeue(wl, lnk, ac); 657 658 WARN_ON(!skb); /* we checked this before */ 659 *hlid = low_prio_hlid; 660 661 /* ensure proper round robin in the vif/link levels */ 662 wl->last_wlvif = lnk->wlvif; 663 if (lnk->wlvif) 664 lnk->wlvif->last_tx_hlid = low_prio_hlid; 665 666 } 667 668 out: 669 if (!skb && 670 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 671 int q; 672 673 skb = wl->dummy_packet; 674 *hlid = wl->system_hlid; 675 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 676 spin_lock_irqsave(&wl->wl_lock, flags); 677 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 678 wl->tx_queue_count[q]--; 679 spin_unlock_irqrestore(&wl->wl_lock, flags); 680 } 681 682 return skb; 683 } 684 685 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 686 struct sk_buff *skb, u8 hlid) 687 { 688 unsigned long flags; 689 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 690 691 if (wl12xx_is_dummy_packet(wl, skb)) { 692 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 693 } else { 694 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 695 696 /* make sure we dequeue the same packet next time */ 697 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % 698 wl->num_links; 699 } 700 701 spin_lock_irqsave(&wl->wl_lock, flags); 702 wl->tx_queue_count[q]++; 703 if (wlvif) 704 wlvif->tx_queue_count[q]++; 705 spin_unlock_irqrestore(&wl->wl_lock, flags); 706 } 707 708 static bool wl1271_tx_is_data_present(struct sk_buff *skb) 709 { 710 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 711 712 return ieee80211_is_data_present(hdr->frame_control); 713 } 714 715 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) 716 { 717 struct wl12xx_vif *wlvif; 718 u32 timeout; 719 u8 hlid; 720 721 if (!wl->conf.rx_streaming.interval) 722 return; 723 724 if (!wl->conf.rx_streaming.always && 725 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) 726 return; 727 728 timeout = wl->conf.rx_streaming.duration; 729 wl12xx_for_each_wlvif_sta(wl, wlvif) { 730 bool found = false; 731 for_each_set_bit(hlid, active_hlids, wl->num_links) { 732 if (test_bit(hlid, wlvif->links_map)) { 733 found = true; 734 break; 735 } 736 } 737 738 if (!found) 739 continue; 740 741 /* enable rx streaming */ 742 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 743 ieee80211_queue_work(wl->hw, 744 &wlvif->rx_streaming_enable_work); 745 746 mod_timer(&wlvif->rx_streaming_timer, 747 jiffies + msecs_to_jiffies(timeout)); 748 } 749 } 750 751 /* 752 * Returns failure values only in case of failed bus ops within this function. 753 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid 754 * triggering recovery by higher layers when not necessary. 755 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery 756 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame 757 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING 758 * within prepare_tx_frame code but there's nothing we should do about those 759 * as well. 760 */ 761 int wlcore_tx_work_locked(struct wl1271 *wl) 762 { 763 struct wl12xx_vif *wlvif; 764 struct sk_buff *skb; 765 struct wl1271_tx_hw_descr *desc; 766 u32 buf_offset = 0, last_len = 0; 767 bool sent_packets = false; 768 unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; 769 int ret = 0; 770 int bus_ret = 0; 771 u8 hlid; 772 773 if (unlikely(wl->state != WLCORE_STATE_ON)) 774 return 0; 775 776 while ((skb = wl1271_skb_dequeue(wl, &hlid))) { 777 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 778 bool has_data = false; 779 780 wlvif = NULL; 781 if (!wl12xx_is_dummy_packet(wl, skb)) 782 wlvif = wl12xx_vif_to_data(info->control.vif); 783 else 784 hlid = wl->system_hlid; 785 786 has_data = wlvif && wl1271_tx_is_data_present(skb); 787 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, 788 hlid); 789 if (ret == -EAGAIN) { 790 /* 791 * Aggregation buffer is full. 792 * Flush buffer and try again. 793 */ 794 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 795 796 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 797 last_len); 798 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, 799 wl->aggr_buf, buf_offset, true); 800 if (bus_ret < 0) 801 goto out; 802 803 sent_packets = true; 804 buf_offset = 0; 805 continue; 806 } else if (ret == -EBUSY) { 807 /* 808 * Firmware buffer is full. 809 * Queue back last skb, and stop aggregating. 810 */ 811 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 812 /* No work left, avoid scheduling redundant tx work */ 813 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 814 goto out_ack; 815 } else if (ret < 0) { 816 if (wl12xx_is_dummy_packet(wl, skb)) 817 /* 818 * fw still expects dummy packet, 819 * so re-enqueue it 820 */ 821 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 822 else 823 ieee80211_free_txskb(wl->hw, skb); 824 goto out_ack; 825 } 826 last_len = ret; 827 buf_offset += last_len; 828 wl->tx_packets_count++; 829 if (has_data) { 830 desc = (struct wl1271_tx_hw_descr *) skb->data; 831 __set_bit(desc->hlid, active_hlids); 832 } 833 } 834 835 out_ack: 836 if (buf_offset) { 837 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); 838 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 839 buf_offset, true); 840 if (bus_ret < 0) 841 goto out; 842 843 sent_packets = true; 844 } 845 if (sent_packets) { 846 /* 847 * Interrupt the firmware with the new packets. This is only 848 * required for older hardware revisions 849 */ 850 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { 851 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, 852 wl->tx_packets_count); 853 if (bus_ret < 0) 854 goto out; 855 } 856 857 wl1271_handle_tx_low_watermark(wl); 858 } 859 wl12xx_rearm_rx_streaming(wl, active_hlids); 860 861 out: 862 return bus_ret; 863 } 864 865 void wl1271_tx_work(struct work_struct *work) 866 { 867 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 868 int ret; 869 870 mutex_lock(&wl->mutex); 871 ret = wl1271_ps_elp_wakeup(wl); 872 if (ret < 0) 873 goto out; 874 875 ret = wlcore_tx_work_locked(wl); 876 if (ret < 0) { 877 wl12xx_queue_recovery_work(wl); 878 goto out; 879 } 880 881 wl1271_ps_elp_sleep(wl); 882 out: 883 mutex_unlock(&wl->mutex); 884 } 885 886 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) 887 { 888 u8 flags = 0; 889 890 /* 891 * TODO: use wl12xx constants when this code is moved to wl12xx, as 892 * only it uses Tx-completion. 893 */ 894 if (rate_class_index <= 8) 895 flags |= IEEE80211_TX_RC_MCS; 896 897 /* 898 * TODO: use wl12xx constants when this code is moved to wl12xx, as 899 * only it uses Tx-completion. 900 */ 901 if (rate_class_index == 0) 902 flags |= IEEE80211_TX_RC_SHORT_GI; 903 904 return flags; 905 } 906 907 static void wl1271_tx_complete_packet(struct wl1271 *wl, 908 struct wl1271_tx_hw_res_descr *result) 909 { 910 struct ieee80211_tx_info *info; 911 struct ieee80211_vif *vif; 912 struct wl12xx_vif *wlvif; 913 struct sk_buff *skb; 914 int id = result->id; 915 int rate = -1; 916 u8 rate_flags = 0; 917 u8 retries = 0; 918 919 /* check for id legality */ 920 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { 921 wl1271_warning("TX result illegal id: %d", id); 922 return; 923 } 924 925 skb = wl->tx_frames[id]; 926 info = IEEE80211_SKB_CB(skb); 927 928 if (wl12xx_is_dummy_packet(wl, skb)) { 929 wl1271_free_tx_id(wl, id); 930 return; 931 } 932 933 /* info->control is valid as long as we don't update info->status */ 934 vif = info->control.vif; 935 wlvif = wl12xx_vif_to_data(vif); 936 937 /* update the TX status info */ 938 if (result->status == TX_SUCCESS) { 939 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 940 info->flags |= IEEE80211_TX_STAT_ACK; 941 rate = wlcore_rate_to_idx(wl, result->rate_class_index, 942 wlvif->band); 943 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 944 retries = result->ack_failures; 945 } else if (result->status == TX_RETRY_EXCEEDED) { 946 wl->stats.excessive_retries++; 947 retries = result->ack_failures; 948 } 949 950 info->status.rates[0].idx = rate; 951 info->status.rates[0].count = retries; 952 info->status.rates[0].flags = rate_flags; 953 info->status.ack_signal = -1; 954 955 wl->stats.retry_count += result->ack_failures; 956 957 /* remove private header from packet */ 958 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 959 960 /* remove TKIP header space if present */ 961 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 962 info->control.hw_key && 963 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 964 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 965 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 966 hdrlen); 967 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 968 } 969 970 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 971 " status 0x%x", 972 result->id, skb, result->ack_failures, 973 result->rate_class_index, result->status); 974 975 /* return the packet to the stack */ 976 skb_queue_tail(&wl->deferred_tx_queue, skb); 977 queue_work(wl->freezable_wq, &wl->netstack_work); 978 wl1271_free_tx_id(wl, result->id); 979 } 980 981 /* Called upon reception of a TX complete interrupt */ 982 int wlcore_tx_complete(struct wl1271 *wl) 983 { 984 struct wl1271_acx_mem_map *memmap = wl->target_mem_map; 985 u32 count, fw_counter; 986 u32 i; 987 int ret; 988 989 /* read the tx results from the chipset */ 990 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), 991 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 992 if (ret < 0) 993 goto out; 994 995 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 996 997 /* write host counter to chipset (to ack) */ 998 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + 999 offsetof(struct wl1271_tx_hw_res_if, 1000 tx_result_host_counter), fw_counter); 1001 if (ret < 0) 1002 goto out; 1003 1004 count = fw_counter - wl->tx_results_count; 1005 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 1006 1007 /* verify that the result buffer is not getting overrun */ 1008 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) 1009 wl1271_warning("TX result overflow from chipset: %d", count); 1010 1011 /* process the results */ 1012 for (i = 0; i < count; i++) { 1013 struct wl1271_tx_hw_res_descr *result; 1014 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; 1015 1016 /* process the packet */ 1017 result = &(wl->tx_res_if->tx_results_queue[offset]); 1018 wl1271_tx_complete_packet(wl, result); 1019 1020 wl->tx_results_count++; 1021 } 1022 1023 out: 1024 return ret; 1025 } 1026 EXPORT_SYMBOL(wlcore_tx_complete); 1027 1028 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 1029 { 1030 struct sk_buff *skb; 1031 int i; 1032 unsigned long flags; 1033 struct ieee80211_tx_info *info; 1034 int total[NUM_TX_QUEUES]; 1035 struct wl1271_link *lnk = &wl->links[hlid]; 1036 1037 for (i = 0; i < NUM_TX_QUEUES; i++) { 1038 total[i] = 0; 1039 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { 1040 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); 1041 1042 if (!wl12xx_is_dummy_packet(wl, skb)) { 1043 info = IEEE80211_SKB_CB(skb); 1044 info->status.rates[0].idx = -1; 1045 info->status.rates[0].count = 0; 1046 ieee80211_tx_status_ni(wl->hw, skb); 1047 } 1048 1049 total[i]++; 1050 } 1051 } 1052 1053 spin_lock_irqsave(&wl->wl_lock, flags); 1054 for (i = 0; i < NUM_TX_QUEUES; i++) { 1055 wl->tx_queue_count[i] -= total[i]; 1056 if (lnk->wlvif) 1057 lnk->wlvif->tx_queue_count[i] -= total[i]; 1058 } 1059 spin_unlock_irqrestore(&wl->wl_lock, flags); 1060 1061 wl1271_handle_tx_low_watermark(wl); 1062 } 1063 1064 /* caller must hold wl->mutex and TX must be stopped */ 1065 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1066 { 1067 int i; 1068 1069 /* TX failure */ 1070 for_each_set_bit(i, wlvif->links_map, wl->num_links) { 1071 if (wlvif->bss_type == BSS_TYPE_AP_BSS && 1072 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { 1073 /* this calls wl12xx_free_link */ 1074 wl1271_free_sta(wl, wlvif, i); 1075 } else { 1076 u8 hlid = i; 1077 wl12xx_free_link(wl, wlvif, &hlid); 1078 } 1079 } 1080 wlvif->last_tx_hlid = 0; 1081 1082 for (i = 0; i < NUM_TX_QUEUES; i++) 1083 wlvif->tx_queue_count[i] = 0; 1084 } 1085 /* caller must hold wl->mutex and TX must be stopped */ 1086 void wl12xx_tx_reset(struct wl1271 *wl) 1087 { 1088 int i; 1089 struct sk_buff *skb; 1090 struct ieee80211_tx_info *info; 1091 1092 /* only reset the queues if something bad happened */ 1093 if (wl1271_tx_total_queue_count(wl) != 0) { 1094 for (i = 0; i < wl->num_links; i++) 1095 wl1271_tx_reset_link_queues(wl, i); 1096 1097 for (i = 0; i < NUM_TX_QUEUES; i++) 1098 wl->tx_queue_count[i] = 0; 1099 } 1100 1101 /* 1102 * Make sure the driver is at a consistent state, in case this 1103 * function is called from a context other than interface removal. 1104 * This call will always wake the TX queues. 1105 */ 1106 wl1271_handle_tx_low_watermark(wl); 1107 1108 for (i = 0; i < wl->num_tx_desc; i++) { 1109 if (wl->tx_frames[i] == NULL) 1110 continue; 1111 1112 skb = wl->tx_frames[i]; 1113 wl1271_free_tx_id(wl, i); 1114 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 1115 1116 if (!wl12xx_is_dummy_packet(wl, skb)) { 1117 /* 1118 * Remove private headers before passing the skb to 1119 * mac80211 1120 */ 1121 info = IEEE80211_SKB_CB(skb); 1122 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 1123 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 1124 info->control.hw_key && 1125 info->control.hw_key->cipher == 1126 WLAN_CIPHER_SUITE_TKIP) { 1127 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1128 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, 1129 skb->data, hdrlen); 1130 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 1131 } 1132 1133 info->status.rates[0].idx = -1; 1134 info->status.rates[0].count = 0; 1135 1136 ieee80211_tx_status_ni(wl->hw, skb); 1137 } 1138 } 1139 } 1140 1141 #define WL1271_TX_FLUSH_TIMEOUT 500000 1142 1143 /* caller must *NOT* hold wl->mutex */ 1144 void wl1271_tx_flush(struct wl1271 *wl) 1145 { 1146 unsigned long timeout, start_time; 1147 int i; 1148 start_time = jiffies; 1149 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1150 1151 /* only one flush should be in progress, for consistent queue state */ 1152 mutex_lock(&wl->flush_mutex); 1153 1154 mutex_lock(&wl->mutex); 1155 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { 1156 mutex_unlock(&wl->mutex); 1157 goto out; 1158 } 1159 1160 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1161 1162 while (!time_after(jiffies, timeout)) { 1163 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", 1164 wl->tx_frames_cnt, 1165 wl1271_tx_total_queue_count(wl)); 1166 1167 /* force Tx and give the driver some time to flush data */ 1168 mutex_unlock(&wl->mutex); 1169 if (wl1271_tx_total_queue_count(wl)) 1170 wl1271_tx_work(&wl->tx_work); 1171 msleep(20); 1172 mutex_lock(&wl->mutex); 1173 1174 if ((wl->tx_frames_cnt == 0) && 1175 (wl1271_tx_total_queue_count(wl) == 0)) { 1176 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", 1177 jiffies_to_msecs(jiffies - start_time)); 1178 goto out_wake; 1179 } 1180 } 1181 1182 wl1271_warning("Unable to flush all TX buffers, " 1183 "timed out (timeout %d ms", 1184 WL1271_TX_FLUSH_TIMEOUT / 1000); 1185 1186 /* forcibly flush all Tx buffers on our queues */ 1187 for (i = 0; i < wl->num_links; i++) 1188 wl1271_tx_reset_link_queues(wl, i); 1189 1190 out_wake: 1191 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1192 mutex_unlock(&wl->mutex); 1193 out: 1194 mutex_unlock(&wl->flush_mutex); 1195 } 1196 EXPORT_SYMBOL_GPL(wl1271_tx_flush); 1197 1198 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1199 { 1200 if (WARN_ON(!rate_set)) 1201 return 0; 1202 1203 return BIT(__ffs(rate_set)); 1204 } 1205 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); 1206 1207 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1208 u8 queue, enum wlcore_queue_stop_reason reason) 1209 { 1210 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1211 bool stopped = !!wl->queue_stop_reasons[hwq]; 1212 1213 /* queue should not be stopped for this reason */ 1214 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); 1215 1216 if (stopped) 1217 return; 1218 1219 ieee80211_stop_queue(wl->hw, hwq); 1220 } 1221 1222 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1223 enum wlcore_queue_stop_reason reason) 1224 { 1225 unsigned long flags; 1226 1227 spin_lock_irqsave(&wl->wl_lock, flags); 1228 wlcore_stop_queue_locked(wl, wlvif, queue, reason); 1229 spin_unlock_irqrestore(&wl->wl_lock, flags); 1230 } 1231 1232 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1233 enum wlcore_queue_stop_reason reason) 1234 { 1235 unsigned long flags; 1236 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1237 1238 spin_lock_irqsave(&wl->wl_lock, flags); 1239 1240 /* queue should not be clear for this reason */ 1241 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); 1242 1243 if (wl->queue_stop_reasons[hwq]) 1244 goto out; 1245 1246 ieee80211_wake_queue(wl->hw, hwq); 1247 1248 out: 1249 spin_unlock_irqrestore(&wl->wl_lock, flags); 1250 } 1251 1252 void wlcore_stop_queues(struct wl1271 *wl, 1253 enum wlcore_queue_stop_reason reason) 1254 { 1255 int i; 1256 unsigned long flags; 1257 1258 spin_lock_irqsave(&wl->wl_lock, flags); 1259 1260 /* mark all possible queues as stopped */ 1261 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1262 WARN_ON_ONCE(test_and_set_bit(reason, 1263 &wl->queue_stop_reasons[i])); 1264 1265 /* use the global version to make sure all vifs in mac80211 we don't 1266 * know are stopped. 1267 */ 1268 ieee80211_stop_queues(wl->hw); 1269 1270 spin_unlock_irqrestore(&wl->wl_lock, flags); 1271 } 1272 1273 void wlcore_wake_queues(struct wl1271 *wl, 1274 enum wlcore_queue_stop_reason reason) 1275 { 1276 int i; 1277 unsigned long flags; 1278 1279 spin_lock_irqsave(&wl->wl_lock, flags); 1280 1281 /* mark all possible queues as awake */ 1282 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1283 WARN_ON_ONCE(!test_and_clear_bit(reason, 1284 &wl->queue_stop_reasons[i])); 1285 1286 /* use the global version to make sure all vifs in mac80211 we don't 1287 * know are woken up. 1288 */ 1289 ieee80211_wake_queues(wl->hw); 1290 1291 spin_unlock_irqrestore(&wl->wl_lock, flags); 1292 } 1293 1294 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, 1295 struct wl12xx_vif *wlvif, u8 queue, 1296 enum wlcore_queue_stop_reason reason) 1297 { 1298 unsigned long flags; 1299 bool stopped; 1300 1301 spin_lock_irqsave(&wl->wl_lock, flags); 1302 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, 1303 reason); 1304 spin_unlock_irqrestore(&wl->wl_lock, flags); 1305 1306 return stopped; 1307 } 1308 1309 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, 1310 struct wl12xx_vif *wlvif, u8 queue, 1311 enum wlcore_queue_stop_reason reason) 1312 { 1313 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1314 1315 assert_spin_locked(&wl->wl_lock); 1316 return test_bit(reason, &wl->queue_stop_reasons[hwq]); 1317 } 1318 1319 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1320 u8 queue) 1321 { 1322 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1323 1324 assert_spin_locked(&wl->wl_lock); 1325 return !!wl->queue_stop_reasons[hwq]; 1326 } 1327