1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <linux/ieee80211.h> 65 #include <linux/etherdevice.h> 66 #include <linux/tcp.h> 67 #include <net/ip.h> 68 #include <net/ipv6.h> 69 70 #include "iwl-trans.h" 71 #include "iwl-eeprom-parse.h" 72 #include "mvm.h" 73 #include "sta.h" 74 75 static void 76 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, 77 u16 tid, u16 ssn) 78 { 79 struct iwl_fw_dbg_trigger_tlv *trig; 80 struct iwl_fw_dbg_trigger_ba *ba_trig; 81 82 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); 83 if (!trig) 84 return; 85 86 ba_trig = (void *)trig->data; 87 88 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) 89 return; 90 91 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 92 "BAR sent to %pM, tid %d, ssn %d", 93 addr, tid, ssn); 94 } 95 96 #define OPT_HDR(type, skb, off) \ 97 (type *)(skb_network_header(skb) + (off)) 98 99 static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, 100 struct ieee80211_hdr *hdr, 101 struct ieee80211_tx_info *info, 102 u16 offload_assist) 103 { 104 #if IS_ENABLED(CONFIG_INET) 105 u16 mh_len = ieee80211_hdrlen(hdr->frame_control); 106 u8 protocol = 0; 107 108 /* 109 * Do not compute checksum if already computed or if transport will 110 * compute it 111 */ 112 if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) 113 goto out; 114 115 /* We do not expect to be requested to csum stuff we do not support */ 116 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || 117 (skb->protocol != htons(ETH_P_IP) && 118 skb->protocol != htons(ETH_P_IPV6)), 119 "No support for requested checksum\n")) { 120 skb_checksum_help(skb); 121 goto out; 122 } 123 124 if (skb->protocol == htons(ETH_P_IP)) { 125 protocol = ip_hdr(skb)->protocol; 126 } else { 127 #if IS_ENABLED(CONFIG_IPV6) 128 struct ipv6hdr *ipv6h = 129 (struct ipv6hdr *)skb_network_header(skb); 130 unsigned int off = sizeof(*ipv6h); 131 132 protocol = ipv6h->nexthdr; 133 while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { 134 struct ipv6_opt_hdr *hp; 135 136 /* only supported extension headers */ 137 if (protocol != NEXTHDR_ROUTING && 138 protocol != NEXTHDR_HOP && 139 protocol != NEXTHDR_DEST) { 140 skb_checksum_help(skb); 141 goto out; 142 } 143 144 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 145 protocol = hp->nexthdr; 146 off += ipv6_optlen(hp); 147 } 148 /* if we get here - protocol now should be TCP/UDP */ 149 #endif 150 } 151 152 if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { 153 WARN_ON_ONCE(1); 154 skb_checksum_help(skb); 155 goto out; 156 } 157 158 /* enable L4 csum */ 159 offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); 160 161 /* 162 * Set offset to IP header (snap). 163 * We don't support tunneling so no need to take care of inner header. 164 * Size is in words. 165 */ 166 offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); 167 168 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ 169 if (skb->protocol == htons(ETH_P_IP) && 170 (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { 171 ip_hdr(skb)->check = 0; 172 offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); 173 } 174 175 /* reset UDP/TCP header csum */ 176 if (protocol == IPPROTO_TCP) 177 tcp_hdr(skb)->check = 0; 178 else 179 udp_hdr(skb)->check = 0; 180 181 /* 182 * mac header len should include IV, size is in words unless 183 * the IV is added by the firmware like in WEP. 184 * In new Tx API, the IV is always added by the firmware. 185 */ 186 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && 187 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && 188 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) 189 mh_len += info->control.hw_key->iv_len; 190 mh_len /= 2; 191 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; 192 193 out: 194 #endif 195 return offload_assist; 196 } 197 198 /* 199 * Sets most of the Tx cmd's fields 200 */ 201 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, 202 struct iwl_tx_cmd *tx_cmd, 203 struct ieee80211_tx_info *info, u8 sta_id) 204 { 205 struct ieee80211_hdr *hdr = (void *)skb->data; 206 __le16 fc = hdr->frame_control; 207 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); 208 u32 len = skb->len + FCS_LEN; 209 u16 offload_assist = 0; 210 u8 ac; 211 212 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 213 tx_flags |= TX_CMD_FLG_ACK; 214 else 215 tx_flags &= ~TX_CMD_FLG_ACK; 216 217 if (ieee80211_is_probe_resp(fc)) 218 tx_flags |= TX_CMD_FLG_TSF; 219 220 if (ieee80211_has_morefrags(fc)) 221 tx_flags |= TX_CMD_FLG_MORE_FRAG; 222 223 if (ieee80211_is_data_qos(fc)) { 224 u8 *qc = ieee80211_get_qos_ctl(hdr); 225 tx_cmd->tid_tspec = qc[0] & 0xf; 226 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 227 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 228 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); 229 } else if (ieee80211_is_back_req(fc)) { 230 struct ieee80211_bar *bar = (void *)skb->data; 231 u16 control = le16_to_cpu(bar->control); 232 u16 ssn = le16_to_cpu(bar->start_seq_num); 233 234 tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; 235 tx_cmd->tid_tspec = (control & 236 IEEE80211_BAR_CTRL_TID_INFO_MASK) >> 237 IEEE80211_BAR_CTRL_TID_INFO_SHIFT; 238 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); 239 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, 240 ssn); 241 } else { 242 if (ieee80211_is_data(fc)) 243 tx_cmd->tid_tspec = IWL_TID_NON_QOS; 244 else 245 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; 246 247 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 248 tx_flags |= TX_CMD_FLG_SEQ_CTL; 249 else 250 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 251 } 252 253 /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ 254 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) 255 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 256 else 257 ac = tid_to_mac80211_ac[0]; 258 259 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << 260 TX_CMD_FLG_BT_PRIO_POS; 261 262 if (ieee80211_is_mgmt(fc)) { 263 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 264 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); 265 else if (ieee80211_is_action(fc)) 266 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); 267 else 268 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); 269 270 /* The spec allows Action frames in A-MPDU, we don't support 271 * it 272 */ 273 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); 274 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { 275 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); 276 } else { 277 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); 278 } 279 280 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && 281 !is_multicast_ether_addr(ieee80211_get_DA(hdr))) 282 tx_flags |= TX_CMD_FLG_PROT_REQUIRE; 283 284 if (fw_has_capa(&mvm->fw->ucode_capa, 285 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && 286 ieee80211_action_contains_tpc(skb)) 287 tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; 288 289 tx_cmd->tx_flags = cpu_to_le32(tx_flags); 290 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ 291 tx_cmd->len = cpu_to_le16((u16)skb->len); 292 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 293 tx_cmd->sta_id = sta_id; 294 295 /* padding is inserted later in transport */ 296 if (ieee80211_hdrlen(fc) % 4 && 297 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) 298 offload_assist |= BIT(TX_CMD_OFFLD_PAD); 299 300 tx_cmd->offload_assist |= 301 cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info, 302 offload_assist)); 303 } 304 305 static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, 306 struct ieee80211_tx_info *info, 307 struct ieee80211_sta *sta, __le16 fc) 308 { 309 if (info->band == NL80211_BAND_2GHZ && 310 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 311 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; 312 313 if (sta && ieee80211_is_data(fc)) { 314 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 315 316 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; 317 } 318 319 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 320 } 321 322 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, 323 struct ieee80211_tx_info *info, 324 struct ieee80211_sta *sta) 325 { 326 int rate_idx; 327 u8 rate_plcp; 328 u32 rate_flags = 0; 329 330 /* HT rate doesn't make sense for a non data frame */ 331 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, 332 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n", 333 info->control.rates[0].flags, 334 info->control.rates[0].idx); 335 336 rate_idx = info->control.rates[0].idx; 337 /* if the rate isn't a well known legacy rate, take the lowest one */ 338 if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) 339 rate_idx = rate_lowest_index( 340 &mvm->nvm_data->bands[info->band], sta); 341 342 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 343 if (info->band == NL80211_BAND_5GHZ) 344 rate_idx += IWL_FIRST_OFDM_RATE; 345 346 /* For 2.4 GHZ band, check that there is no need to remap */ 347 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); 348 349 /* Get PLCP rate for tx_cmd->rate_n_flags */ 350 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); 351 352 /* Set CCK flag as needed */ 353 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) 354 rate_flags |= RATE_MCS_CCK_MSK; 355 356 return (u32)rate_plcp | rate_flags; 357 } 358 359 static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, 360 struct ieee80211_tx_info *info, 361 struct ieee80211_sta *sta, __le16 fc) 362 { 363 return iwl_mvm_get_tx_rate(mvm, info, sta) | 364 iwl_mvm_get_tx_ant(mvm, info, sta, fc); 365 } 366 367 /* 368 * Sets the fields in the Tx cmd that are rate related 369 */ 370 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, 371 struct ieee80211_tx_info *info, 372 struct ieee80211_sta *sta, __le16 fc) 373 { 374 /* Set retry limit on RTS packets */ 375 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; 376 377 /* Set retry limit on DATA packets and Probe Responses*/ 378 if (ieee80211_is_probe_resp(fc)) { 379 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; 380 tx_cmd->rts_retry_limit = 381 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); 382 } else if (ieee80211_is_back_req(fc)) { 383 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; 384 } else { 385 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; 386 } 387 388 /* 389 * for data packets, rate info comes from the table inside the fw. This 390 * table is controlled by LINK_QUALITY commands 391 */ 392 393 if (ieee80211_is_data(fc) && sta) { 394 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 395 396 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { 397 tx_cmd->initial_rate_index = 0; 398 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 399 return; 400 } 401 } else if (ieee80211_is_back_req(fc)) { 402 tx_cmd->tx_flags |= 403 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); 404 } 405 406 /* Set the rate in the TX cmd */ 407 tx_cmd->rate_n_flags = 408 cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); 409 } 410 411 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, 412 u8 *crypto_hdr) 413 { 414 struct ieee80211_key_conf *keyconf = info->control.hw_key; 415 u64 pn; 416 417 pn = atomic64_inc_return(&keyconf->tx_pn); 418 crypto_hdr[0] = pn; 419 crypto_hdr[2] = 0; 420 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); 421 crypto_hdr[1] = pn >> 8; 422 crypto_hdr[4] = pn >> 16; 423 crypto_hdr[5] = pn >> 24; 424 crypto_hdr[6] = pn >> 32; 425 crypto_hdr[7] = pn >> 40; 426 } 427 428 /* 429 * Sets the fields in the Tx cmd that are crypto related 430 */ 431 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, 432 struct ieee80211_tx_info *info, 433 struct iwl_tx_cmd *tx_cmd, 434 struct sk_buff *skb_frag, 435 int hdrlen) 436 { 437 struct ieee80211_key_conf *keyconf = info->control.hw_key; 438 u8 *crypto_hdr = skb_frag->data + hdrlen; 439 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; 440 u64 pn; 441 442 switch (keyconf->cipher) { 443 case WLAN_CIPHER_SUITE_CCMP: 444 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); 445 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 446 break; 447 448 case WLAN_CIPHER_SUITE_TKIP: 449 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 450 pn = atomic64_inc_return(&keyconf->tx_pn); 451 ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); 452 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); 453 break; 454 455 case WLAN_CIPHER_SUITE_WEP104: 456 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; 457 /* fall through */ 458 case WLAN_CIPHER_SUITE_WEP40: 459 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | 460 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & 461 TX_CMD_SEC_WEP_KEY_IDX_MSK); 462 463 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 464 break; 465 case WLAN_CIPHER_SUITE_GCMP: 466 case WLAN_CIPHER_SUITE_GCMP_256: 467 type = TX_CMD_SEC_GCMP; 468 /* Fall through */ 469 case WLAN_CIPHER_SUITE_CCMP_256: 470 /* TODO: Taking the key from the table might introduce a race 471 * when PTK rekeying is done, having an old packets with a PN 472 * based on the old key but the message encrypted with a new 473 * one. 474 * Need to handle this. 475 */ 476 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; 477 tx_cmd->key[0] = keyconf->hw_key_idx; 478 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 479 break; 480 default: 481 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; 482 } 483 } 484 485 /* 486 * Allocates and sets the Tx cmd the driver data pointers in the skb 487 */ 488 static struct iwl_device_cmd * 489 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, 490 struct ieee80211_tx_info *info, int hdrlen, 491 struct ieee80211_sta *sta, u8 sta_id) 492 { 493 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 494 struct iwl_device_cmd *dev_cmd; 495 struct iwl_tx_cmd *tx_cmd; 496 497 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); 498 499 if (unlikely(!dev_cmd)) 500 return NULL; 501 502 /* Make sure we zero enough of dev_cmd */ 503 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); 504 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); 505 506 memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); 507 dev_cmd->hdr.cmd = TX_CMD; 508 509 if (iwl_mvm_has_new_tx_api(mvm)) { 510 u16 offload_assist = 0; 511 u32 rate_n_flags = 0; 512 u16 flags = 0; 513 struct iwl_mvm_sta *mvmsta = sta ? 514 iwl_mvm_sta_from_mac80211(sta) : NULL; 515 516 if (ieee80211_is_data_qos(hdr->frame_control)) { 517 u8 *qc = ieee80211_get_qos_ctl(hdr); 518 519 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 520 offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); 521 } 522 523 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info, 524 offload_assist); 525 526 /* padding is inserted later in transport */ 527 if (ieee80211_hdrlen(hdr->frame_control) % 4 && 528 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) 529 offload_assist |= BIT(TX_CMD_OFFLD_PAD); 530 531 if (!info->control.hw_key) 532 flags |= IWL_TX_FLAGS_ENCRYPT_DIS; 533 534 /* 535 * For data packets rate info comes from the fw. Only 536 * set rate/antenna during connection establishment. 537 */ 538 if (sta && (!ieee80211_is_data(hdr->frame_control) || 539 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) { 540 flags |= IWL_TX_FLAGS_CMD_RATE; 541 rate_n_flags = 542 iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, 543 hdr->frame_control); 544 } 545 546 if (mvm->trans->cfg->device_family >= 547 IWL_DEVICE_FAMILY_22560) { 548 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; 549 550 cmd->offload_assist |= cpu_to_le32(offload_assist); 551 552 /* Total # bytes to be transmitted */ 553 cmd->len = cpu_to_le16((u16)skb->len); 554 555 /* Copy MAC header from skb into command buffer */ 556 memcpy(cmd->hdr, hdr, hdrlen); 557 558 cmd->flags = cpu_to_le16(flags); 559 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); 560 } else { 561 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; 562 563 cmd->offload_assist |= cpu_to_le16(offload_assist); 564 565 /* Total # bytes to be transmitted */ 566 cmd->len = cpu_to_le16((u16)skb->len); 567 568 /* Copy MAC header from skb into command buffer */ 569 memcpy(cmd->hdr, hdr, hdrlen); 570 571 cmd->flags = cpu_to_le32(flags); 572 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); 573 } 574 goto out; 575 } 576 577 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 578 579 if (info->control.hw_key) 580 iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); 581 582 iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); 583 584 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); 585 586 /* Copy MAC header from skb into command buffer */ 587 memcpy(tx_cmd->hdr, hdr, hdrlen); 588 589 out: 590 return dev_cmd; 591 } 592 593 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, 594 struct iwl_device_cmd *cmd) 595 { 596 struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); 597 598 memset(&skb_info->status, 0, sizeof(skb_info->status)); 599 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); 600 601 skb_info->driver_data[1] = cmd; 602 } 603 604 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, 605 struct ieee80211_tx_info *info, __le16 fc) 606 { 607 struct iwl_mvm_vif *mvmvif; 608 609 mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); 610 611 switch (info->control.vif->type) { 612 case NL80211_IFTYPE_AP: 613 case NL80211_IFTYPE_ADHOC: 614 /* 615 * Non-bufferable frames use the broadcast station, thus they 616 * use the probe queue. 617 * Also take care of the case where we send a deauth to a 618 * station that we don't have, or similarly an association 619 * response (with non-success status) for a station we can't 620 * accept. 621 * Also, disassociate frames might happen, particular with 622 * reason 7 ("Class 3 frame received from nonassociated STA"). 623 */ 624 if (ieee80211_is_mgmt(fc) && 625 (!ieee80211_is_bufferable_mmpdu(fc) || 626 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) 627 return mvm->probe_queue; 628 if (info->hw_queue == info->control.vif->cab_queue) 629 return mvmvif->cab_queue; 630 631 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, 632 "fc=0x%02x", le16_to_cpu(fc)); 633 return mvm->probe_queue; 634 case NL80211_IFTYPE_P2P_DEVICE: 635 if (ieee80211_is_mgmt(fc)) 636 return mvm->p2p_dev_queue; 637 if (info->hw_queue == info->control.vif->cab_queue) 638 return mvmvif->cab_queue; 639 640 WARN_ON_ONCE(1); 641 return mvm->p2p_dev_queue; 642 default: 643 WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); 644 return -1; 645 } 646 } 647 648 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, 649 struct sk_buff *skb) 650 { 651 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 652 struct iwl_mvm_vif *mvmvif = 653 iwl_mvm_vif_from_mac80211(info->control.vif); 654 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 655 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; 656 struct iwl_probe_resp_data *resp_data; 657 u8 *ie, *pos; 658 u8 match[] = { 659 (WLAN_OUI_WFA >> 16) & 0xff, 660 (WLAN_OUI_WFA >> 8) & 0xff, 661 WLAN_OUI_WFA & 0xff, 662 WLAN_OUI_TYPE_WFA_P2P, 663 }; 664 665 rcu_read_lock(); 666 667 resp_data = rcu_dereference(mvmvif->probe_resp_data); 668 if (!resp_data) 669 goto out; 670 671 if (!resp_data->notif.noa_active) 672 goto out; 673 674 ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, 675 mgmt->u.probe_resp.variable, 676 skb->len - base_len, 677 match, 4, 2); 678 if (!ie) { 679 IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); 680 goto out; 681 } 682 683 if (skb_tailroom(skb) < resp_data->noa_len) { 684 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { 685 IWL_ERR(mvm, 686 "Failed to reallocate probe resp\n"); 687 goto out; 688 } 689 } 690 691 pos = skb_put(skb, resp_data->noa_len); 692 693 *pos++ = WLAN_EID_VENDOR_SPECIFIC; 694 /* Set length of IE body (not including ID and length itself) */ 695 *pos++ = resp_data->noa_len - 2; 696 *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; 697 *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; 698 *pos++ = WLAN_OUI_WFA & 0xff; 699 *pos++ = WLAN_OUI_TYPE_WFA_P2P; 700 701 memcpy(pos, &resp_data->notif.noa_attr, 702 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); 703 704 out: 705 rcu_read_unlock(); 706 } 707 708 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) 709 { 710 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 711 struct ieee80211_tx_info info; 712 struct iwl_device_cmd *dev_cmd; 713 u8 sta_id; 714 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 715 __le16 fc = hdr->frame_control; 716 int queue = -1; 717 718 memcpy(&info, skb->cb, sizeof(info)); 719 720 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) 721 return -1; 722 723 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 724 (!info.control.vif || 725 info.hw_queue != info.control.vif->cab_queue))) 726 return -1; 727 728 if (info.control.vif) { 729 struct iwl_mvm_vif *mvmvif = 730 iwl_mvm_vif_from_mac80211(info.control.vif); 731 732 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 733 info.control.vif->type == NL80211_IFTYPE_AP || 734 info.control.vif->type == NL80211_IFTYPE_ADHOC) { 735 if (!ieee80211_is_data(hdr->frame_control)) 736 sta_id = mvmvif->bcast_sta.sta_id; 737 else 738 sta_id = mvmvif->mcast_sta.sta_id; 739 740 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 741 hdr->frame_control); 742 743 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { 744 queue = mvm->snif_queue; 745 sta_id = mvm->snif_sta.sta_id; 746 } else if (info.control.vif->type == NL80211_IFTYPE_STATION && 747 info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) { 748 /* 749 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets 750 * that can be used in 2 different types of vifs, P2P & 751 * STATION. 752 * P2P uses the offchannel queue. 753 * STATION (HS2.0) uses the auxiliary context of the FW, 754 * and hence needs to be sent on the aux queue. 755 */ 756 sta_id = mvm->aux_sta.sta_id; 757 queue = mvm->aux_queue; 758 } 759 } 760 761 if (queue < 0) 762 return -1; 763 764 if (unlikely(ieee80211_is_probe_resp(fc))) 765 iwl_mvm_probe_resp_set_noa(mvm, skb); 766 767 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); 768 769 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); 770 if (!dev_cmd) 771 return -1; 772 773 /* From now on, we cannot access info->control */ 774 iwl_mvm_skb_prepare_status(skb, dev_cmd); 775 776 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { 777 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 778 return -1; 779 } 780 781 return 0; 782 } 783 784 #ifdef CONFIG_INET 785 786 static int 787 iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, 788 netdev_features_t netdev_flags, 789 struct sk_buff_head *mpdus_skb) 790 { 791 struct sk_buff *tmp, *next; 792 struct ieee80211_hdr *hdr = (void *)skb->data; 793 char cb[sizeof(skb->cb)]; 794 u16 i = 0; 795 unsigned int tcp_payload_len; 796 unsigned int mss = skb_shinfo(skb)->gso_size; 797 bool ipv4 = (skb->protocol == htons(ETH_P_IP)); 798 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; 799 800 skb_shinfo(skb)->gso_size = num_subframes * mss; 801 memcpy(cb, skb->cb, sizeof(cb)); 802 803 next = skb_gso_segment(skb, netdev_flags); 804 skb_shinfo(skb)->gso_size = mss; 805 if (WARN_ON_ONCE(IS_ERR(next))) 806 return -EINVAL; 807 else if (next) 808 consume_skb(skb); 809 810 while (next) { 811 tmp = next; 812 next = tmp->next; 813 814 memcpy(tmp->cb, cb, sizeof(tmp->cb)); 815 /* 816 * Compute the length of all the data added for the A-MSDU. 817 * This will be used to compute the length to write in the TX 818 * command. We have: SNAP + IP + TCP for n -1 subframes and 819 * ETH header for n subframes. 820 */ 821 tcp_payload_len = skb_tail_pointer(tmp) - 822 skb_transport_header(tmp) - 823 tcp_hdrlen(tmp) + tmp->data_len; 824 825 if (ipv4) 826 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); 827 828 if (tcp_payload_len > mss) { 829 skb_shinfo(tmp)->gso_size = mss; 830 } else { 831 if (ieee80211_is_data_qos(hdr->frame_control)) { 832 u8 *qc; 833 834 if (ipv4) 835 ip_send_check(ip_hdr(tmp)); 836 837 qc = ieee80211_get_qos_ctl((void *)tmp->data); 838 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 839 } 840 skb_shinfo(tmp)->gso_size = 0; 841 } 842 843 tmp->prev = NULL; 844 tmp->next = NULL; 845 846 __skb_queue_tail(mpdus_skb, tmp); 847 i++; 848 } 849 850 return 0; 851 } 852 853 static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, 854 struct ieee80211_sta *sta, 855 unsigned int tid) 856 { 857 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 858 enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; 859 u8 ac = tid_to_mac80211_ac[tid]; 860 unsigned int txf; 861 int lmac = IWL_LMAC_24G_INDEX; 862 863 if (iwl_mvm_is_cdb_supported(mvm) && 864 band == NL80211_BAND_5GHZ) 865 lmac = IWL_LMAC_5G_INDEX; 866 867 /* For HE redirect to trigger based fifos */ 868 if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) 869 ac += 4; 870 871 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 872 873 /* 874 * Don't send an AMSDU that will be longer than the TXF. 875 * Add a security margin of 256 for the TX command + headers. 876 * We also want to have the start of the next packet inside the 877 * fifo to be able to send bursts. 878 */ 879 return min_t(unsigned int, mvmsta->max_amsdu_len, 880 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); 881 } 882 883 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 884 struct ieee80211_tx_info *info, 885 struct ieee80211_sta *sta, 886 struct sk_buff_head *mpdus_skb) 887 { 888 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 889 struct ieee80211_hdr *hdr = (void *)skb->data; 890 unsigned int mss = skb_shinfo(skb)->gso_size; 891 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; 892 u16 snap_ip_tcp, pad; 893 unsigned int dbg_max_amsdu_len; 894 netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; 895 u8 tid; 896 897 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + 898 tcp_hdrlen(skb); 899 900 dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); 901 902 if (!mvmsta->max_amsdu_len || 903 !ieee80211_is_data_qos(hdr->frame_control) || 904 (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len)) 905 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); 906 907 /* 908 * Do not build AMSDU for IPv6 with extension headers. 909 * ask stack to segment and checkum the generated MPDUs for us. 910 */ 911 if (skb->protocol == htons(ETH_P_IPV6) && 912 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != 913 IPPROTO_TCP) { 914 netdev_flags &= ~NETIF_F_CSUM_MASK; 915 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); 916 } 917 918 tid = ieee80211_get_tid(hdr); 919 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 920 return -EINVAL; 921 922 /* 923 * No need to lock amsdu_in_ampdu_allowed since it can't be modified 924 * during an BA session. 925 */ 926 if (info->flags & IEEE80211_TX_CTL_AMPDU && 927 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) 928 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); 929 930 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || 931 !(mvmsta->amsdu_enabled & BIT(tid))) 932 return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); 933 934 max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); 935 936 if (unlikely(dbg_max_amsdu_len)) 937 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 938 dbg_max_amsdu_len); 939 940 /* 941 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not 942 * supported. This is a spec requirement (IEEE 802.11-2015 943 * section 8.7.3 NOTE 3). 944 */ 945 if (info->flags & IEEE80211_TX_CTL_AMPDU && 946 !sta->vht_cap.vht_supported) 947 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); 948 949 /* Sub frame header + SNAP + IP header + TCP header + MSS */ 950 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; 951 pad = (4 - subf_len) & 0x3; 952 953 /* 954 * If we have N subframes in the A-MSDU, then the A-MSDU's size is 955 * N * subf_len + (N - 1) * pad. 956 */ 957 num_subframes = (max_amsdu_len + pad) / (subf_len + pad); 958 959 if (sta->max_amsdu_subframes && 960 num_subframes > sta->max_amsdu_subframes) 961 num_subframes = sta->max_amsdu_subframes; 962 963 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 964 tcp_hdrlen(skb) + skb->data_len; 965 966 /* 967 * Make sure we have enough TBs for the A-MSDU: 968 * 2 for each subframe 969 * 1 more for each fragment 970 * 1 more for the potential data in the header 971 */ 972 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > 973 mvm->trans->max_skb_frags) 974 num_subframes = 1; 975 976 if (num_subframes > 1) 977 *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 978 979 /* This skb fits in one single A-MSDU */ 980 if (num_subframes * mss >= tcp_payload_len) { 981 __skb_queue_tail(mpdus_skb, skb); 982 return 0; 983 } 984 985 /* 986 * Trick the segmentation function to make it 987 * create SKBs that can fit into one A-MSDU. 988 */ 989 return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, 990 mpdus_skb); 991 } 992 #else /* CONFIG_INET */ 993 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, 994 struct ieee80211_tx_info *info, 995 struct ieee80211_sta *sta, 996 struct sk_buff_head *mpdus_skb) 997 { 998 /* Impossible to get TSO with CONFIG_INET */ 999 WARN_ON(1); 1000 1001 return -1; 1002 } 1003 #endif 1004 1005 static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, 1006 struct iwl_mvm_sta *mvm_sta, u8 tid, 1007 struct sk_buff *skb) 1008 { 1009 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1010 u8 mac_queue = info->hw_queue; 1011 struct sk_buff_head *deferred_tx_frames; 1012 1013 lockdep_assert_held(&mvm_sta->lock); 1014 1015 mvm_sta->deferred_traffic_tid_map |= BIT(tid); 1016 set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); 1017 1018 deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; 1019 1020 skb_queue_tail(deferred_tx_frames, skb); 1021 1022 /* 1023 * The first deferred frame should've stopped the MAC queues, so we 1024 * should never get a second deferred frame for the RA/TID. 1025 * In case of GSO the first packet may have been split, so don't warn. 1026 */ 1027 if (skb_queue_len(deferred_tx_frames) == 1) { 1028 iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); 1029 schedule_work(&mvm->add_stream_wk); 1030 } 1031 } 1032 1033 /* Check if there are any timed-out TIDs on a given shared TXQ */ 1034 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) 1035 { 1036 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; 1037 unsigned long now = jiffies; 1038 int tid; 1039 1040 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1041 return false; 1042 1043 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1044 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + 1045 IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1046 return true; 1047 } 1048 1049 return false; 1050 } 1051 1052 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, 1053 struct iwl_mvm_sta *mvmsta, 1054 int airtime) 1055 { 1056 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; 1057 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; 1058 1059 if (mvm->tcm.paused) 1060 return; 1061 1062 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) 1063 schedule_delayed_work(&mvm->tcm.work, 0); 1064 1065 mdata->tx.airtime += airtime; 1066 } 1067 1068 static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, 1069 struct iwl_mvm_sta *mvmsta, int tid) 1070 { 1071 u32 ac = tid_to_mac80211_ac[tid]; 1072 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; 1073 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; 1074 1075 mdata->tx.pkts[ac]++; 1076 } 1077 1078 /* 1079 * Sets the fields in the Tx cmd that are crypto related 1080 */ 1081 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, 1082 struct ieee80211_tx_info *info, 1083 struct ieee80211_sta *sta) 1084 { 1085 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1086 struct iwl_mvm_sta *mvmsta; 1087 struct iwl_device_cmd *dev_cmd; 1088 __le16 fc; 1089 u16 seq_number = 0; 1090 u8 tid = IWL_MAX_TID_COUNT; 1091 u16 txq_id = info->hw_queue; 1092 bool is_ampdu = false; 1093 int hdrlen; 1094 1095 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1096 fc = hdr->frame_control; 1097 hdrlen = ieee80211_hdrlen(fc); 1098 1099 if (WARN_ON_ONCE(!mvmsta)) 1100 return -1; 1101 1102 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 1103 return -1; 1104 1105 if (unlikely(ieee80211_is_probe_resp(fc))) 1106 iwl_mvm_probe_resp_set_noa(mvm, skb); 1107 1108 dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, 1109 sta, mvmsta->sta_id); 1110 if (!dev_cmd) 1111 goto drop; 1112 1113 /* 1114 * we handle that entirely ourselves -- for uAPSD the firmware 1115 * will always send a notification, and for PS-Poll responses 1116 * we'll notify mac80211 when getting frame status 1117 */ 1118 info->flags &= ~IEEE80211_TX_STATUS_EOSP; 1119 1120 spin_lock(&mvmsta->lock); 1121 1122 /* nullfunc frames should go to the MGMT queue regardless of QOS, 1123 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default 1124 * assignment of MGMT TID 1125 */ 1126 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { 1127 tid = ieee80211_get_tid(hdr); 1128 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 1129 goto drop_unlock_sta; 1130 1131 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 1132 if (WARN_ON_ONCE(is_ampdu && 1133 mvmsta->tid_data[tid].state != IWL_AGG_ON)) 1134 goto drop_unlock_sta; 1135 1136 seq_number = mvmsta->tid_data[tid].seq_number; 1137 seq_number &= IEEE80211_SCTL_SEQ; 1138 1139 if (!iwl_mvm_has_new_tx_api(mvm)) { 1140 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1141 1142 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1143 hdr->seq_ctrl |= cpu_to_le16(seq_number); 1144 /* update the tx_cmd hdr as it was already copied */ 1145 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; 1146 } 1147 } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { 1148 tid = IWL_TID_NON_QOS; 1149 } 1150 1151 txq_id = mvmsta->tid_data[tid].txq_id; 1152 1153 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); 1154 1155 /* Check if TXQ needs to be allocated or re-activated */ 1156 if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { 1157 iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); 1158 1159 /* 1160 * The frame is now deferred, and the worker scheduled 1161 * will re-allocate it, so we can free it for now. 1162 */ 1163 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 1164 spin_unlock(&mvmsta->lock); 1165 return 0; 1166 } 1167 1168 if (!iwl_mvm_has_new_tx_api(mvm)) { 1169 /* Keep track of the time of the last frame for this RA/TID */ 1170 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; 1171 1172 /* 1173 * If we have timed-out TIDs - schedule the worker that will 1174 * reconfig the queues and update them 1175 * 1176 * Note that the no lock is taken here in order to not serialize 1177 * the TX flow. This isn't dangerous because scheduling 1178 * mvm->add_stream_wk can't ruin the state, and if we DON'T 1179 * schedule it due to some race condition then next TX we get 1180 * here we will. 1181 */ 1182 if (unlikely(mvm->queue_info[txq_id].status == 1183 IWL_MVM_QUEUE_SHARED && 1184 iwl_mvm_txq_should_update(mvm, txq_id))) 1185 schedule_work(&mvm->add_stream_wk); 1186 } 1187 1188 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, 1189 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); 1190 1191 /* From now on, we cannot access info->control */ 1192 iwl_mvm_skb_prepare_status(skb, dev_cmd); 1193 1194 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) 1195 goto drop_unlock_sta; 1196 1197 if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) 1198 mvmsta->tid_data[tid].seq_number = seq_number + 0x10; 1199 1200 spin_unlock(&mvmsta->lock); 1201 1202 iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); 1203 1204 return 0; 1205 1206 drop_unlock_sta: 1207 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 1208 spin_unlock(&mvmsta->lock); 1209 drop: 1210 return -1; 1211 } 1212 1213 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, 1214 struct ieee80211_sta *sta) 1215 { 1216 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1217 struct ieee80211_tx_info info; 1218 struct sk_buff_head mpdus_skbs; 1219 unsigned int payload_len; 1220 int ret; 1221 1222 if (WARN_ON_ONCE(!mvmsta)) 1223 return -1; 1224 1225 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 1226 return -1; 1227 1228 memcpy(&info, skb->cb, sizeof(info)); 1229 1230 if (!skb_is_gso(skb)) 1231 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1232 1233 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - 1234 tcp_hdrlen(skb) + skb->data_len; 1235 1236 if (payload_len <= skb_shinfo(skb)->gso_size) 1237 return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1238 1239 __skb_queue_head_init(&mpdus_skbs); 1240 1241 ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); 1242 if (ret) 1243 return ret; 1244 1245 if (WARN_ON(skb_queue_empty(&mpdus_skbs))) 1246 return ret; 1247 1248 while (!skb_queue_empty(&mpdus_skbs)) { 1249 skb = __skb_dequeue(&mpdus_skbs); 1250 1251 ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); 1252 if (ret) { 1253 __skb_queue_purge(&mpdus_skbs); 1254 return ret; 1255 } 1256 } 1257 1258 return 0; 1259 } 1260 1261 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, 1262 struct ieee80211_sta *sta, u8 tid) 1263 { 1264 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1265 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1266 struct ieee80211_vif *vif = mvmsta->vif; 1267 u16 normalized_ssn; 1268 1269 lockdep_assert_held(&mvmsta->lock); 1270 1271 if ((tid_data->state == IWL_AGG_ON || 1272 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && 1273 iwl_mvm_tid_queued(mvm, tid_data) == 0) { 1274 /* 1275 * Now that this aggregation or DQA queue is empty tell 1276 * mac80211 so it knows we no longer have frames buffered for 1277 * the station on this TID (for the TIM bitmap calculation.) 1278 */ 1279 ieee80211_sta_set_buffered(sta, tid, false); 1280 } 1281 1282 /* 1283 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 1284 * to align the wrap around of ssn so we compare relevant values. 1285 */ 1286 normalized_ssn = tid_data->ssn; 1287 if (mvm->trans->cfg->gen2) 1288 normalized_ssn &= 0xff; 1289 1290 if (normalized_ssn != tid_data->next_reclaimed) 1291 return; 1292 1293 switch (tid_data->state) { 1294 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1295 IWL_DEBUG_TX_QUEUES(mvm, 1296 "Can continue addBA flow ssn = next_recl = %d\n", 1297 tid_data->next_reclaimed); 1298 tid_data->state = IWL_AGG_STARTING; 1299 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1300 break; 1301 1302 case IWL_EMPTYING_HW_QUEUE_DELBA: 1303 IWL_DEBUG_TX_QUEUES(mvm, 1304 "Can continue DELBA flow ssn = next_recl = %d\n", 1305 tid_data->next_reclaimed); 1306 tid_data->state = IWL_AGG_OFF; 1307 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1308 break; 1309 1310 default: 1311 break; 1312 } 1313 } 1314 1315 #ifdef CONFIG_IWLWIFI_DEBUG 1316 const char *iwl_mvm_get_tx_fail_reason(u32 status) 1317 { 1318 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x 1319 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x 1320 1321 switch (status & TX_STATUS_MSK) { 1322 case TX_STATUS_SUCCESS: 1323 return "SUCCESS"; 1324 TX_STATUS_POSTPONE(DELAY); 1325 TX_STATUS_POSTPONE(FEW_BYTES); 1326 TX_STATUS_POSTPONE(BT_PRIO); 1327 TX_STATUS_POSTPONE(QUIET_PERIOD); 1328 TX_STATUS_POSTPONE(CALC_TTAK); 1329 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); 1330 TX_STATUS_FAIL(SHORT_LIMIT); 1331 TX_STATUS_FAIL(LONG_LIMIT); 1332 TX_STATUS_FAIL(UNDERRUN); 1333 TX_STATUS_FAIL(DRAIN_FLOW); 1334 TX_STATUS_FAIL(RFKILL_FLUSH); 1335 TX_STATUS_FAIL(LIFE_EXPIRE); 1336 TX_STATUS_FAIL(DEST_PS); 1337 TX_STATUS_FAIL(HOST_ABORTED); 1338 TX_STATUS_FAIL(BT_RETRY); 1339 TX_STATUS_FAIL(STA_INVALID); 1340 TX_STATUS_FAIL(FRAG_DROPPED); 1341 TX_STATUS_FAIL(TID_DISABLE); 1342 TX_STATUS_FAIL(FIFO_FLUSHED); 1343 TX_STATUS_FAIL(SMALL_CF_POLL); 1344 TX_STATUS_FAIL(FW_DROP); 1345 TX_STATUS_FAIL(STA_COLOR_MISMATCH); 1346 } 1347 1348 return "UNKNOWN"; 1349 1350 #undef TX_STATUS_FAIL 1351 #undef TX_STATUS_POSTPONE 1352 } 1353 #endif /* CONFIG_IWLWIFI_DEBUG */ 1354 1355 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, 1356 enum nl80211_band band, 1357 struct ieee80211_tx_rate *r) 1358 { 1359 if (rate_n_flags & RATE_HT_MCS_GF_MSK) 1360 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; 1361 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 1362 case RATE_MCS_CHAN_WIDTH_20: 1363 break; 1364 case RATE_MCS_CHAN_WIDTH_40: 1365 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 1366 break; 1367 case RATE_MCS_CHAN_WIDTH_80: 1368 r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; 1369 break; 1370 case RATE_MCS_CHAN_WIDTH_160: 1371 r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; 1372 break; 1373 } 1374 if (rate_n_flags & RATE_MCS_SGI_MSK) 1375 r->flags |= IEEE80211_TX_RC_SHORT_GI; 1376 if (rate_n_flags & RATE_MCS_HT_MSK) { 1377 r->flags |= IEEE80211_TX_RC_MCS; 1378 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; 1379 } else if (rate_n_flags & RATE_MCS_VHT_MSK) { 1380 ieee80211_rate_set_vht( 1381 r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, 1382 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> 1383 RATE_VHT_MCS_NSS_POS) + 1); 1384 r->flags |= IEEE80211_TX_RC_VHT_MCS; 1385 } else { 1386 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 1387 band); 1388 } 1389 } 1390 1391 /** 1392 * translate ucode response to mac80211 tx status control values 1393 */ 1394 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, 1395 struct ieee80211_tx_info *info) 1396 { 1397 struct ieee80211_tx_rate *r = &info->status.rates[0]; 1398 1399 info->status.antenna = 1400 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 1401 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); 1402 } 1403 1404 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, 1405 u32 status) 1406 { 1407 struct iwl_fw_dbg_trigger_tlv *trig; 1408 struct iwl_fw_dbg_trigger_tx_status *status_trig; 1409 int i; 1410 1411 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, 1412 FW_DBG_TRIGGER_TX_STATUS); 1413 if (!trig) 1414 return; 1415 1416 status_trig = (void *)trig->data; 1417 1418 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { 1419 /* don't collect on status 0 */ 1420 if (!status_trig->statuses[i].status) 1421 break; 1422 1423 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) 1424 continue; 1425 1426 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 1427 "Tx status %d was received", 1428 status & TX_STATUS_MSK); 1429 break; 1430 } 1431 } 1432 1433 /** 1434 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD 1435 * @tx_resp: the Tx response from the fw (agg or non-agg) 1436 * 1437 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since 1438 * it can't know that everything will go well until the end of the AMPDU, it 1439 * can't know in advance the number of MPDUs that will be sent in the current 1440 * batch. This is why it writes the agg Tx response while it fetches the MPDUs. 1441 * Hence, it can't know in advance what the SSN of the SCD will be at the end 1442 * of the batch. This is why the SSN of the SCD is written at the end of the 1443 * whole struct at a variable offset. This function knows how to cope with the 1444 * variable offset and returns the SSN of the SCD. 1445 */ 1446 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, 1447 struct iwl_mvm_tx_resp *tx_resp) 1448 { 1449 return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + 1450 tx_resp->frame_count) & 0xfff; 1451 } 1452 1453 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, 1454 struct iwl_rx_packet *pkt) 1455 { 1456 struct ieee80211_sta *sta; 1457 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1458 int txq_id = SEQ_TO_QUEUE(sequence); 1459 /* struct iwl_mvm_tx_resp_v3 is almost the same */ 1460 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; 1461 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); 1462 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); 1463 struct agg_tx_status *agg_status = 1464 iwl_mvm_get_agg_status(mvm, tx_resp); 1465 u32 status = le16_to_cpu(agg_status->status); 1466 u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); 1467 struct sk_buff_head skbs; 1468 u8 skb_freed = 0; 1469 u8 lq_color; 1470 u16 next_reclaimed, seq_ctl; 1471 bool is_ndp = false; 1472 1473 __skb_queue_head_init(&skbs); 1474 1475 if (iwl_mvm_has_new_tx_api(mvm)) 1476 txq_id = le16_to_cpu(tx_resp->tx_queue); 1477 1478 seq_ctl = le16_to_cpu(tx_resp->seq_ctl); 1479 1480 /* we can free until ssn % q.n_bd not inclusive */ 1481 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); 1482 1483 while (!skb_queue_empty(&skbs)) { 1484 struct sk_buff *skb = __skb_dequeue(&skbs); 1485 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1486 struct ieee80211_hdr *hdr = (void *)skb->data; 1487 bool flushed = false; 1488 1489 skb_freed++; 1490 1491 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); 1492 1493 memset(&info->status, 0, sizeof(info->status)); 1494 1495 /* inform mac80211 about what happened with the frame */ 1496 switch (status & TX_STATUS_MSK) { 1497 case TX_STATUS_SUCCESS: 1498 case TX_STATUS_DIRECT_DONE: 1499 info->flags |= IEEE80211_TX_STAT_ACK; 1500 break; 1501 case TX_STATUS_FAIL_FIFO_FLUSHED: 1502 case TX_STATUS_FAIL_DRAIN_FLOW: 1503 flushed = true; 1504 break; 1505 case TX_STATUS_FAIL_DEST_PS: 1506 /* the FW should have stopped the queue and not 1507 * return this status 1508 */ 1509 WARN_ON(1); 1510 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1511 break; 1512 default: 1513 break; 1514 } 1515 1516 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && 1517 ieee80211_is_mgmt(hdr->frame_control)) 1518 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); 1519 1520 /* 1521 * If we are freeing multiple frames, mark all the frames 1522 * but the first one as acked, since they were acknowledged 1523 * before 1524 * */ 1525 if (skb_freed > 1) 1526 info->flags |= IEEE80211_TX_STAT_ACK; 1527 1528 iwl_mvm_tx_status_check_trigger(mvm, status); 1529 1530 info->status.rates[0].count = tx_resp->failure_frame + 1; 1531 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), 1532 info); 1533 info->status.status_driver_data[1] = 1534 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); 1535 1536 /* Single frame failure in an AMPDU queue => send BAR */ 1537 if (info->flags & IEEE80211_TX_CTL_AMPDU && 1538 !(info->flags & IEEE80211_TX_STAT_ACK) && 1539 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) 1540 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1541 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1542 1543 /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ 1544 if (ieee80211_is_back_req(hdr->frame_control)) 1545 seq_ctl = 0; 1546 else if (status != TX_STATUS_SUCCESS) 1547 seq_ctl = le16_to_cpu(hdr->seq_ctrl); 1548 1549 if (unlikely(!seq_ctl)) { 1550 struct ieee80211_hdr *hdr = (void *)skb->data; 1551 1552 /* 1553 * If it is an NDP, we can't update next_reclaim since 1554 * its sequence control is 0. Note that for that same 1555 * reason, NDPs are never sent to A-MPDU'able queues 1556 * so that we can never have more than one freed frame 1557 * for a single Tx resonse (see WARN_ON below). 1558 */ 1559 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 1560 is_ndp = true; 1561 } 1562 1563 /* 1564 * TODO: this is not accurate if we are freeing more than one 1565 * packet. 1566 */ 1567 info->status.tx_time = 1568 le16_to_cpu(tx_resp->wireless_media_time); 1569 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); 1570 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); 1571 info->status.status_driver_data[0] = 1572 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); 1573 1574 ieee80211_tx_status(mvm->hw, skb); 1575 } 1576 1577 /* This is an aggregation queue or might become one, so we use 1578 * the ssn since: ssn = wifi seq_num % 256. 1579 * The seq_ctl is the sequence control of the packet to which 1580 * this Tx response relates. But if there is a hole in the 1581 * bitmap of the BA we received, this Tx response may allow to 1582 * reclaim the hole and all the subsequent packets that were 1583 * already acked. In that case, seq_ctl != ssn, and the next 1584 * packet to be reclaimed will be ssn and not seq_ctl. In that 1585 * case, several packets will be reclaimed even if 1586 * frame_count = 1. 1587 * 1588 * The ssn is the index (% 256) of the latest packet that has 1589 * treated (acked / dropped) + 1. 1590 */ 1591 next_reclaimed = ssn; 1592 1593 IWL_DEBUG_TX_REPLY(mvm, 1594 "TXQ %d status %s (0x%08x)\n", 1595 txq_id, iwl_mvm_get_tx_fail_reason(status), status); 1596 1597 IWL_DEBUG_TX_REPLY(mvm, 1598 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", 1599 le32_to_cpu(tx_resp->initial_rate), 1600 tx_resp->failure_frame, SEQ_TO_INDEX(sequence), 1601 ssn, next_reclaimed, seq_ctl); 1602 1603 rcu_read_lock(); 1604 1605 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1606 /* 1607 * sta can't be NULL otherwise it'd mean that the sta has been freed in 1608 * the firmware while we still have packets for it in the Tx queues. 1609 */ 1610 if (WARN_ON_ONCE(!sta)) 1611 goto out; 1612 1613 if (!IS_ERR(sta)) { 1614 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1615 1616 iwl_mvm_tx_airtime(mvm, mvmsta, 1617 le16_to_cpu(tx_resp->wireless_media_time)); 1618 1619 if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && 1620 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) 1621 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); 1622 1623 if (sta->wme && tid != IWL_MGMT_TID) { 1624 struct iwl_mvm_tid_data *tid_data = 1625 &mvmsta->tid_data[tid]; 1626 bool send_eosp_ndp = false; 1627 1628 spin_lock_bh(&mvmsta->lock); 1629 1630 if (!is_ndp) { 1631 tid_data->next_reclaimed = next_reclaimed; 1632 IWL_DEBUG_TX_REPLY(mvm, 1633 "Next reclaimed packet:%d\n", 1634 next_reclaimed); 1635 } else { 1636 IWL_DEBUG_TX_REPLY(mvm, 1637 "NDP - don't update next_reclaimed\n"); 1638 } 1639 1640 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1641 1642 if (mvmsta->sleep_tx_count) { 1643 mvmsta->sleep_tx_count--; 1644 if (mvmsta->sleep_tx_count && 1645 !iwl_mvm_tid_queued(mvm, tid_data)) { 1646 /* 1647 * The number of frames in the queue 1648 * dropped to 0 even if we sent less 1649 * frames than we thought we had on the 1650 * Tx queue. 1651 * This means we had holes in the BA 1652 * window that we just filled, ask 1653 * mac80211 to send EOSP since the 1654 * firmware won't know how to do that. 1655 * Send NDP and the firmware will send 1656 * EOSP notification that will trigger 1657 * a call to ieee80211_sta_eosp(). 1658 */ 1659 send_eosp_ndp = true; 1660 } 1661 } 1662 1663 spin_unlock_bh(&mvmsta->lock); 1664 if (send_eosp_ndp) { 1665 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, 1666 IEEE80211_FRAME_RELEASE_UAPSD, 1667 1, tid, false, false); 1668 mvmsta->sleep_tx_count = 0; 1669 ieee80211_send_eosp_nullfunc(sta, tid); 1670 } 1671 } 1672 1673 if (mvmsta->next_status_eosp) { 1674 mvmsta->next_status_eosp = false; 1675 ieee80211_sta_eosp(sta); 1676 } 1677 } 1678 out: 1679 rcu_read_unlock(); 1680 } 1681 1682 #ifdef CONFIG_IWLWIFI_DEBUG 1683 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x 1684 static const char *iwl_get_agg_tx_status(u16 status) 1685 { 1686 switch (status & AGG_TX_STATE_STATUS_MSK) { 1687 AGG_TX_STATE_(TRANSMITTED); 1688 AGG_TX_STATE_(UNDERRUN); 1689 AGG_TX_STATE_(BT_PRIO); 1690 AGG_TX_STATE_(FEW_BYTES); 1691 AGG_TX_STATE_(ABORT); 1692 AGG_TX_STATE_(TX_ON_AIR_DROP); 1693 AGG_TX_STATE_(LAST_SENT_TRY_CNT); 1694 AGG_TX_STATE_(LAST_SENT_BT_KILL); 1695 AGG_TX_STATE_(SCD_QUERY); 1696 AGG_TX_STATE_(TEST_BAD_CRC32); 1697 AGG_TX_STATE_(RESPONSE); 1698 AGG_TX_STATE_(DUMP_TX); 1699 AGG_TX_STATE_(DELAY_TX); 1700 } 1701 1702 return "UNKNOWN"; 1703 } 1704 1705 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, 1706 struct iwl_rx_packet *pkt) 1707 { 1708 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; 1709 struct agg_tx_status *frame_status = 1710 iwl_mvm_get_agg_status(mvm, tx_resp); 1711 int i; 1712 1713 for (i = 0; i < tx_resp->frame_count; i++) { 1714 u16 fstatus = le16_to_cpu(frame_status[i].status); 1715 1716 IWL_DEBUG_TX_REPLY(mvm, 1717 "status %s (0x%04x), try-count (%d) seq (0x%x)\n", 1718 iwl_get_agg_tx_status(fstatus), 1719 fstatus & AGG_TX_STATE_STATUS_MSK, 1720 (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> 1721 AGG_TX_STATE_TRY_CNT_POS, 1722 le16_to_cpu(frame_status[i].sequence)); 1723 } 1724 } 1725 #else 1726 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, 1727 struct iwl_rx_packet *pkt) 1728 {} 1729 #endif /* CONFIG_IWLWIFI_DEBUG */ 1730 1731 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, 1732 struct iwl_rx_packet *pkt) 1733 { 1734 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; 1735 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); 1736 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); 1737 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1738 struct iwl_mvm_sta *mvmsta; 1739 int queue = SEQ_TO_QUEUE(sequence); 1740 struct ieee80211_sta *sta; 1741 1742 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && 1743 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) 1744 return; 1745 1746 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); 1747 1748 rcu_read_lock(); 1749 1750 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); 1751 1752 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1753 if (WARN_ON_ONCE(!sta || !sta->wme)) { 1754 rcu_read_unlock(); 1755 return; 1756 } 1757 1758 if (!WARN_ON_ONCE(!mvmsta)) { 1759 mvmsta->tid_data[tid].rate_n_flags = 1760 le32_to_cpu(tx_resp->initial_rate); 1761 mvmsta->tid_data[tid].tx_time = 1762 le16_to_cpu(tx_resp->wireless_media_time); 1763 mvmsta->tid_data[tid].lq_color = 1764 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); 1765 iwl_mvm_tx_airtime(mvm, mvmsta, 1766 le16_to_cpu(tx_resp->wireless_media_time)); 1767 } 1768 1769 rcu_read_unlock(); 1770 } 1771 1772 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1773 { 1774 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1775 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; 1776 1777 if (tx_resp->frame_count == 1) 1778 iwl_mvm_rx_tx_cmd_single(mvm, pkt); 1779 else 1780 iwl_mvm_rx_tx_cmd_agg(mvm, pkt); 1781 } 1782 1783 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, 1784 int txq, int index, 1785 struct ieee80211_tx_info *ba_info, u32 rate) 1786 { 1787 struct sk_buff_head reclaimed_skbs; 1788 struct iwl_mvm_tid_data *tid_data; 1789 struct ieee80211_sta *sta; 1790 struct iwl_mvm_sta *mvmsta; 1791 struct sk_buff *skb; 1792 int freed; 1793 1794 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || 1795 tid > IWL_MAX_TID_COUNT, 1796 "sta_id %d tid %d", sta_id, tid)) 1797 return; 1798 1799 rcu_read_lock(); 1800 1801 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1802 1803 /* Reclaiming frames for a station that has been deleted ? */ 1804 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 1805 rcu_read_unlock(); 1806 return; 1807 } 1808 1809 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1810 tid_data = &mvmsta->tid_data[tid]; 1811 1812 if (tid_data->txq_id != txq) { 1813 IWL_ERR(mvm, 1814 "invalid BA notification: Q %d, tid %d\n", 1815 tid_data->txq_id, tid); 1816 rcu_read_unlock(); 1817 return; 1818 } 1819 1820 __skb_queue_head_init(&reclaimed_skbs); 1821 1822 /* 1823 * Release all TFDs before the SSN, i.e. all TFDs in front of 1824 * block-ack window (we assume that they've been successfully 1825 * transmitted ... if not, it's too late anyway). 1826 */ 1827 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); 1828 1829 spin_lock_bh(&mvmsta->lock); 1830 1831 tid_data->next_reclaimed = index; 1832 1833 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1834 1835 freed = 0; 1836 1837 /* pack lq color from tid_data along the reduced txp */ 1838 ba_info->status.status_driver_data[0] = 1839 RS_DRV_DATA_PACK(tid_data->lq_color, 1840 ba_info->status.status_driver_data[0]); 1841 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; 1842 1843 skb_queue_walk(&reclaimed_skbs, skb) { 1844 struct ieee80211_hdr *hdr = (void *)skb->data; 1845 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1846 1847 if (ieee80211_is_data_qos(hdr->frame_control)) 1848 freed++; 1849 else 1850 WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); 1851 1852 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); 1853 1854 memset(&info->status, 0, sizeof(info->status)); 1855 /* Packet was transmitted successfully, failures come as single 1856 * frames because before failing a frame the firmware transmits 1857 * it without aggregation at least once. 1858 */ 1859 info->flags |= IEEE80211_TX_STAT_ACK; 1860 1861 /* this is the first skb we deliver in this batch */ 1862 /* put the rate scaling data there */ 1863 if (freed == 1) { 1864 info->flags |= IEEE80211_TX_STAT_AMPDU; 1865 memcpy(&info->status, &ba_info->status, 1866 sizeof(ba_info->status)); 1867 iwl_mvm_hwrate_to_tx_status(rate, info); 1868 } 1869 } 1870 1871 spin_unlock_bh(&mvmsta->lock); 1872 1873 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is 1874 * possible (i.e. first MPDU in the aggregation wasn't acked) 1875 * Still it's important to update RS about sent vs. acked. 1876 */ 1877 if (skb_queue_empty(&reclaimed_skbs)) { 1878 struct ieee80211_chanctx_conf *chanctx_conf = NULL; 1879 1880 if (mvmsta->vif) 1881 chanctx_conf = 1882 rcu_dereference(mvmsta->vif->chanctx_conf); 1883 1884 if (WARN_ON_ONCE(!chanctx_conf)) 1885 goto out; 1886 1887 ba_info->band = chanctx_conf->def.chan->band; 1888 iwl_mvm_hwrate_to_tx_status(rate, ba_info); 1889 1890 if (!iwl_mvm_has_tlc_offload(mvm)) { 1891 IWL_DEBUG_TX_REPLY(mvm, 1892 "No reclaim. Update rs directly\n"); 1893 iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); 1894 } 1895 } 1896 1897 out: 1898 rcu_read_unlock(); 1899 1900 while (!skb_queue_empty(&reclaimed_skbs)) { 1901 skb = __skb_dequeue(&reclaimed_skbs); 1902 ieee80211_tx_status(mvm->hw, skb); 1903 } 1904 } 1905 1906 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1907 { 1908 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1909 int sta_id, tid, txq, index; 1910 struct ieee80211_tx_info ba_info = {}; 1911 struct iwl_mvm_ba_notif *ba_notif; 1912 struct iwl_mvm_tid_data *tid_data; 1913 struct iwl_mvm_sta *mvmsta; 1914 1915 ba_info.flags = IEEE80211_TX_STAT_AMPDU; 1916 1917 if (iwl_mvm_has_new_tx_api(mvm)) { 1918 struct iwl_mvm_compressed_ba_notif *ba_res = 1919 (void *)pkt->data; 1920 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); 1921 int i; 1922 1923 sta_id = ba_res->sta_id; 1924 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); 1925 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); 1926 ba_info.status.tx_time = 1927 (u16)le32_to_cpu(ba_res->wireless_time); 1928 ba_info.status.status_driver_data[0] = 1929 (void *)(uintptr_t)ba_res->reduced_txp; 1930 1931 if (!le16_to_cpu(ba_res->tfd_cnt)) 1932 goto out; 1933 1934 rcu_read_lock(); 1935 1936 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); 1937 if (!mvmsta) 1938 goto out_unlock; 1939 1940 /* Free per TID */ 1941 for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) { 1942 struct iwl_mvm_compressed_ba_tfd *ba_tfd = 1943 &ba_res->tfd[i]; 1944 1945 tid = ba_tfd->tid; 1946 if (tid == IWL_MGMT_TID) 1947 tid = IWL_MAX_TID_COUNT; 1948 1949 mvmsta->tid_data[i].lq_color = lq_color; 1950 iwl_mvm_tx_reclaim(mvm, sta_id, tid, 1951 (int)(le16_to_cpu(ba_tfd->q_num)), 1952 le16_to_cpu(ba_tfd->tfd_index), 1953 &ba_info, 1954 le32_to_cpu(ba_res->tx_rate)); 1955 } 1956 1957 iwl_mvm_tx_airtime(mvm, mvmsta, 1958 le32_to_cpu(ba_res->wireless_time)); 1959 out_unlock: 1960 rcu_read_unlock(); 1961 out: 1962 IWL_DEBUG_TX_REPLY(mvm, 1963 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", 1964 sta_id, le32_to_cpu(ba_res->flags), 1965 le16_to_cpu(ba_res->txed), 1966 le16_to_cpu(ba_res->done)); 1967 return; 1968 } 1969 1970 ba_notif = (void *)pkt->data; 1971 sta_id = ba_notif->sta_id; 1972 tid = ba_notif->tid; 1973 /* "flow" corresponds to Tx queue */ 1974 txq = le16_to_cpu(ba_notif->scd_flow); 1975 /* "ssn" is start of block-ack Tx window, corresponds to index 1976 * (in Tx queue's circular buffer) of first TFD/frame in window */ 1977 index = le16_to_cpu(ba_notif->scd_ssn); 1978 1979 rcu_read_lock(); 1980 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); 1981 if (WARN_ON_ONCE(!mvmsta)) { 1982 rcu_read_unlock(); 1983 return; 1984 } 1985 1986 tid_data = &mvmsta->tid_data[tid]; 1987 1988 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; 1989 ba_info.status.ampdu_len = ba_notif->txed; 1990 ba_info.status.tx_time = tid_data->tx_time; 1991 ba_info.status.status_driver_data[0] = 1992 (void *)(uintptr_t)ba_notif->reduced_txp; 1993 1994 rcu_read_unlock(); 1995 1996 iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, 1997 tid_data->rate_n_flags); 1998 1999 IWL_DEBUG_TX_REPLY(mvm, 2000 "BA_NOTIFICATION Received from %pM, sta_id = %d\n", 2001 ba_notif->sta_addr, ba_notif->sta_id); 2002 2003 IWL_DEBUG_TX_REPLY(mvm, 2004 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", 2005 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), 2006 le64_to_cpu(ba_notif->bitmap), txq, index, 2007 ba_notif->txed, ba_notif->txed_2_done); 2008 2009 IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", 2010 ba_notif->reduced_txp); 2011 } 2012 2013 /* 2014 * Note that there are transports that buffer frames before they reach 2015 * the firmware. This means that after flush_tx_path is called, the 2016 * queue might not be empty. The race-free way to handle this is to: 2017 * 1) set the station as draining 2018 * 2) flush the Tx path 2019 * 3) wait for the transport queues to be empty 2020 */ 2021 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags) 2022 { 2023 int ret; 2024 struct iwl_tx_path_flush_cmd_v1 flush_cmd = { 2025 .queues_ctl = cpu_to_le32(tfd_msk), 2026 .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), 2027 }; 2028 2029 WARN_ON(iwl_mvm_has_new_tx_api(mvm)); 2030 2031 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, 2032 sizeof(flush_cmd), &flush_cmd); 2033 if (ret) 2034 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); 2035 return ret; 2036 } 2037 2038 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, 2039 u16 tids, u32 flags) 2040 { 2041 int ret; 2042 struct iwl_tx_path_flush_cmd flush_cmd = { 2043 .sta_id = cpu_to_le32(sta_id), 2044 .tid_mask = cpu_to_le16(tids), 2045 }; 2046 2047 WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); 2048 2049 ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags, 2050 sizeof(flush_cmd), &flush_cmd); 2051 if (ret) 2052 IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); 2053 return ret; 2054 } 2055 2056 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) 2057 { 2058 struct iwl_mvm_int_sta *int_sta = sta; 2059 struct iwl_mvm_sta *mvm_sta = sta; 2060 2061 BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != 2062 offsetof(struct iwl_mvm_sta, sta_id)); 2063 2064 if (iwl_mvm_has_new_tx_api(mvm)) 2065 return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 2066 0xff | BIT(IWL_MGMT_TID), flags); 2067 2068 if (internal) 2069 return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 2070 flags); 2071 2072 return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags); 2073 } 2074