1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/devcoredump.h> 5 #include <linux/etherdevice.h> 6 #include <linux/timekeeping.h> 7 #include "mt7921.h" 8 #include "../dma.h" 9 #include "mac.h" 10 #include "mcu.h" 11 12 #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) 13 #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ 14 IEEE80211_RADIOTAP_HE_##f) 15 16 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev, 17 u16 idx, bool unicast) 18 { 19 struct mt7921_sta *sta; 20 struct mt76_wcid *wcid; 21 22 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 23 return NULL; 24 25 wcid = rcu_dereference(dev->mt76.wcid[idx]); 26 if (unicast || !wcid) 27 return wcid; 28 29 if (!wcid->sta) 30 return NULL; 31 32 sta = container_of(wcid, struct mt7921_sta, wcid); 33 if (!sta->vif) 34 return NULL; 35 36 return &sta->vif->sta.wcid; 37 } 38 39 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 40 { 41 } 42 43 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) 44 { 45 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 46 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 47 48 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 49 0, 5000); 50 } 51 52 static void mt7921_mac_sta_poll(struct mt7921_dev *dev) 53 { 54 static const u8 ac_to_tid[] = { 55 [IEEE80211_AC_BE] = 0, 56 [IEEE80211_AC_BK] = 1, 57 [IEEE80211_AC_VI] = 4, 58 [IEEE80211_AC_VO] = 6 59 }; 60 struct ieee80211_sta *sta; 61 struct mt7921_sta *msta; 62 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 63 LIST_HEAD(sta_poll_list); 64 int i; 65 66 spin_lock_bh(&dev->sta_poll_lock); 67 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 68 spin_unlock_bh(&dev->sta_poll_lock); 69 70 rcu_read_lock(); 71 72 while (true) { 73 bool clear = false; 74 u32 addr; 75 u16 idx; 76 77 spin_lock_bh(&dev->sta_poll_lock); 78 if (list_empty(&sta_poll_list)) { 79 spin_unlock_bh(&dev->sta_poll_lock); 80 break; 81 } 82 msta = list_first_entry(&sta_poll_list, 83 struct mt7921_sta, poll_list); 84 list_del_init(&msta->poll_list); 85 spin_unlock_bh(&dev->sta_poll_lock); 86 87 idx = msta->wcid.idx; 88 addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4; 89 90 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 91 u32 tx_last = msta->airtime_ac[i]; 92 u32 rx_last = msta->airtime_ac[i + 4]; 93 94 msta->airtime_ac[i] = mt76_rr(dev, addr); 95 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 96 97 tx_time[i] = msta->airtime_ac[i] - tx_last; 98 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 99 100 if ((tx_last | rx_last) & BIT(30)) 101 clear = true; 102 103 addr += 8; 104 } 105 106 if (clear) { 107 mt7921_mac_wtbl_update(dev, idx, 108 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 109 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 110 } 111 112 if (!msta->wcid.sta) 113 continue; 114 115 sta = container_of((void *)msta, struct ieee80211_sta, 116 drv_priv); 117 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 118 u8 q = mt7921_lmac_mapping(dev, i); 119 u32 tx_cur = tx_time[q]; 120 u32 rx_cur = rx_time[q]; 121 u8 tid = ac_to_tid[i]; 122 123 if (!tx_cur && !rx_cur) 124 continue; 125 126 ieee80211_sta_register_airtime(sta, tid, tx_cur, 127 rx_cur); 128 } 129 } 130 131 rcu_read_unlock(); 132 } 133 134 static void 135 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, 136 struct ieee80211_radiotap_he *he, 137 __le32 *rxv) 138 { 139 u32 ru_h, ru_l; 140 u8 ru, offs = 0; 141 142 ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0])); 143 ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1])); 144 ru = (u8)(ru_l | ru_h << 4); 145 146 status->bw = RATE_INFO_BW_HE_RU; 147 148 switch (ru) { 149 case 0 ... 36: 150 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; 151 offs = ru; 152 break; 153 case 37 ... 52: 154 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; 155 offs = ru - 37; 156 break; 157 case 53 ... 60: 158 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; 159 offs = ru - 53; 160 break; 161 case 61 ... 64: 162 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; 163 offs = ru - 61; 164 break; 165 case 65 ... 66: 166 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; 167 offs = ru - 65; 168 break; 169 case 67: 170 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; 171 break; 172 case 68: 173 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; 174 break; 175 } 176 177 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); 178 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | 179 le16_encode_bits(offs, 180 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); 181 } 182 183 static void 184 mt7921_mac_decode_he_radiotap(struct sk_buff *skb, 185 struct mt76_rx_status *status, 186 __le32 *rxv, u32 phy) 187 { 188 /* TODO: struct ieee80211_radiotap_he_mu */ 189 static const struct ieee80211_radiotap_he known = { 190 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | 191 HE_BITS(DATA1_DATA_DCM_KNOWN) | 192 HE_BITS(DATA1_STBC_KNOWN) | 193 HE_BITS(DATA1_CODING_KNOWN) | 194 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | 195 HE_BITS(DATA1_DOPPLER_KNOWN) | 196 HE_BITS(DATA1_BSS_COLOR_KNOWN), 197 .data2 = HE_BITS(DATA2_GI_KNOWN) | 198 HE_BITS(DATA2_TXBF_KNOWN) | 199 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | 200 HE_BITS(DATA2_TXOP_KNOWN), 201 }; 202 struct ieee80211_radiotap_he *he = NULL; 203 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; 204 205 he = skb_push(skb, sizeof(known)); 206 memcpy(he, &known, sizeof(known)); 207 208 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | 209 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); 210 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | 211 le16_encode_bits(ltf_size, 212 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); 213 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | 214 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); 215 216 switch (phy) { 217 case MT_PHY_TYPE_HE_SU: 218 he->data1 |= HE_BITS(DATA1_FORMAT_SU) | 219 HE_BITS(DATA1_UL_DL_KNOWN) | 220 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | 221 HE_BITS(DATA1_SPTL_REUSE_KNOWN); 222 223 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | 224 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 225 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); 226 break; 227 case MT_PHY_TYPE_HE_EXT_SU: 228 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | 229 HE_BITS(DATA1_UL_DL_KNOWN); 230 231 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 232 break; 233 case MT_PHY_TYPE_HE_MU: 234 he->data1 |= HE_BITS(DATA1_FORMAT_MU) | 235 HE_BITS(DATA1_UL_DL_KNOWN) | 236 HE_BITS(DATA1_SPTL_REUSE_KNOWN); 237 238 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 239 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); 240 241 mt7921_mac_decode_he_radiotap_ru(status, he, rxv); 242 break; 243 case MT_PHY_TYPE_HE_TB: 244 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | 245 HE_BITS(DATA1_SPTL_REUSE_KNOWN) | 246 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | 247 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | 248 HE_BITS(DATA1_SPTL_REUSE4_KNOWN); 249 250 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) | 251 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | 252 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) | 253 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]); 254 255 mt7921_mac_decode_he_radiotap_ru(status, he, rxv); 256 break; 257 default: 258 break; 259 } 260 } 261 262 static void 263 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, 264 struct mt76_rx_status *status, u8 chfreq) 265 { 266 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 267 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 268 !test_bit(MT76_STATE_ROC, &mphy->state)) { 269 status->freq = mphy->chandef.chan->center_freq; 270 status->band = mphy->chandef.chan->band; 271 return; 272 } 273 274 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 275 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 276 } 277 278 static void 279 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 280 { 281 struct sk_buff *skb = priv; 282 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 283 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 284 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 285 286 if (status->signal > 0) 287 return; 288 289 if (!ether_addr_equal(vif->addr, hdr->addr1)) 290 return; 291 292 ewma_rssi_add(&mvif->rssi, -status->signal); 293 } 294 295 static void 296 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb) 297 { 298 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 299 300 if (!ieee80211_is_assoc_resp(hdr->frame_control) && 301 !ieee80211_is_auth(hdr->frame_control)) 302 return; 303 304 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 305 IEEE80211_IFACE_ITER_RESUME_ALL, 306 mt7921_mac_rssi_iter, skb); 307 } 308 309 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) 310 { 311 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 312 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 313 bool hdr_trans, unicast, insert_ccmp_hdr = false; 314 u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; 315 __le32 *rxv = NULL, *rxd = (__le32 *)skb->data; 316 struct mt76_phy *mphy = &dev->mt76.phy; 317 struct mt7921_phy *phy = &dev->phy; 318 struct ieee80211_supported_band *sband; 319 struct ieee80211_hdr *hdr; 320 u32 rxd0 = le32_to_cpu(rxd[0]); 321 u32 rxd1 = le32_to_cpu(rxd[1]); 322 u32 rxd2 = le32_to_cpu(rxd[2]); 323 u32 rxd3 = le32_to_cpu(rxd[3]); 324 u32 rxd4 = le32_to_cpu(rxd[4]); 325 u16 seq_ctrl = 0; 326 __le16 fc = 0; 327 u32 mode = 0; 328 int i, idx; 329 330 memset(status, 0, sizeof(*status)); 331 332 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) 333 return -EINVAL; 334 335 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 336 return -EINVAL; 337 338 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 339 return -EINVAL; 340 341 chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3); 342 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 343 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 344 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 345 status->wcid = mt7921_rx_get_wcid(dev, idx, unicast); 346 347 if (status->wcid) { 348 struct mt7921_sta *msta; 349 350 msta = container_of(status->wcid, struct mt7921_sta, wcid); 351 spin_lock_bh(&dev->sta_poll_lock); 352 if (list_empty(&msta->poll_list)) 353 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 354 spin_unlock_bh(&dev->sta_poll_lock); 355 } 356 357 mt7921_get_status_freq_info(dev, mphy, status, chfreq); 358 359 if (status->band == NL80211_BAND_5GHZ) 360 sband = &mphy->sband_5g.sband; 361 else 362 sband = &mphy->sband_2g.sband; 363 364 if (!sband->channels) 365 return -EINVAL; 366 367 if ((rxd0 & csum_mask) == csum_mask) 368 skb->ip_summed = CHECKSUM_UNNECESSARY; 369 370 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 371 status->flag |= RX_FLAG_FAILED_FCS_CRC; 372 373 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 374 status->flag |= RX_FLAG_MMIC_ERROR; 375 376 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 377 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 378 status->flag |= RX_FLAG_DECRYPTED; 379 status->flag |= RX_FLAG_IV_STRIPPED; 380 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 381 } 382 383 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 384 385 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 386 return -EINVAL; 387 388 rxd += 6; 389 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 390 u32 v0 = le32_to_cpu(rxd[0]); 391 u32 v2 = le32_to_cpu(rxd[2]); 392 393 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 394 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 395 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 396 397 rxd += 4; 398 if ((u8 *)rxd - skb->data >= skb->len) 399 return -EINVAL; 400 } 401 402 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 403 u8 *data = (u8 *)rxd; 404 405 if (status->flag & RX_FLAG_DECRYPTED) { 406 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 407 case MT_CIPHER_AES_CCMP: 408 case MT_CIPHER_CCMP_CCX: 409 case MT_CIPHER_CCMP_256: 410 insert_ccmp_hdr = 411 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 412 fallthrough; 413 case MT_CIPHER_TKIP: 414 case MT_CIPHER_TKIP_NO_MIC: 415 case MT_CIPHER_GCMP: 416 case MT_CIPHER_GCMP_256: 417 status->iv[0] = data[5]; 418 status->iv[1] = data[4]; 419 status->iv[2] = data[3]; 420 status->iv[3] = data[2]; 421 status->iv[4] = data[1]; 422 status->iv[5] = data[0]; 423 break; 424 default: 425 break; 426 } 427 } 428 rxd += 4; 429 if ((u8 *)rxd - skb->data >= skb->len) 430 return -EINVAL; 431 } 432 433 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 434 status->timestamp = le32_to_cpu(rxd[0]); 435 status->flag |= RX_FLAG_MACTIME_START; 436 437 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 438 status->flag |= RX_FLAG_AMPDU_DETAILS; 439 440 /* all subframes of an A-MPDU have the same timestamp */ 441 if (phy->rx_ampdu_ts != status->timestamp) { 442 if (!++phy->ampdu_ref) 443 phy->ampdu_ref++; 444 } 445 phy->rx_ampdu_ts = status->timestamp; 446 447 status->ampdu_ref = phy->ampdu_ref; 448 } 449 450 rxd += 2; 451 if ((u8 *)rxd - skb->data >= skb->len) 452 return -EINVAL; 453 } 454 455 /* RXD Group 3 - P-RXV */ 456 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 457 u8 stbc, gi; 458 u32 v0, v1; 459 bool cck; 460 461 rxv = rxd; 462 rxd += 2; 463 if ((u8 *)rxd - skb->data >= skb->len) 464 return -EINVAL; 465 466 v0 = le32_to_cpu(rxv[0]); 467 v1 = le32_to_cpu(rxv[1]); 468 469 if (v0 & MT_PRXV_HT_AD_CODE) 470 status->enc_flags |= RX_ENC_FLAG_LDPC; 471 472 status->chains = mphy->antenna_mask; 473 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 474 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 475 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 476 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 477 status->signal = -128; 478 for (i = 0; i < hweight8(mphy->antenna_mask); i++) { 479 if (!(status->chains & BIT(i)) || 480 status->chain_signal[i] >= 0) 481 continue; 482 483 status->signal = max(status->signal, 484 status->chain_signal[i]); 485 } 486 487 if (status->signal == -128) 488 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 489 490 stbc = FIELD_GET(MT_PRXV_STBC, v0); 491 gi = FIELD_GET(MT_PRXV_SGI, v0); 492 cck = false; 493 494 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); 495 mode = FIELD_GET(MT_PRXV_TX_MODE, v0); 496 497 switch (mode) { 498 case MT_PHY_TYPE_CCK: 499 cck = true; 500 fallthrough; 501 case MT_PHY_TYPE_OFDM: 502 i = mt76_get_rate(&dev->mt76, sband, i, cck); 503 break; 504 case MT_PHY_TYPE_HT_GF: 505 case MT_PHY_TYPE_HT: 506 status->encoding = RX_ENC_HT; 507 if (i > 31) 508 return -EINVAL; 509 break; 510 case MT_PHY_TYPE_VHT: 511 status->nss = 512 FIELD_GET(MT_PRXV_NSTS, v0) + 1; 513 status->encoding = RX_ENC_VHT; 514 if (i > 9) 515 return -EINVAL; 516 break; 517 case MT_PHY_TYPE_HE_MU: 518 status->flag |= RX_FLAG_RADIOTAP_HE_MU; 519 fallthrough; 520 case MT_PHY_TYPE_HE_SU: 521 case MT_PHY_TYPE_HE_EXT_SU: 522 case MT_PHY_TYPE_HE_TB: 523 status->nss = 524 FIELD_GET(MT_PRXV_NSTS, v0) + 1; 525 status->encoding = RX_ENC_HE; 526 status->flag |= RX_FLAG_RADIOTAP_HE; 527 i &= GENMASK(3, 0); 528 529 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 530 status->he_gi = gi; 531 532 status->he_dcm = !!(idx & MT_PRXV_TX_DCM); 533 break; 534 default: 535 return -EINVAL; 536 } 537 538 status->rate_idx = i; 539 540 switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) { 541 case IEEE80211_STA_RX_BW_20: 542 break; 543 case IEEE80211_STA_RX_BW_40: 544 if (mode & MT_PHY_TYPE_HE_EXT_SU && 545 (idx & MT_PRXV_TX_ER_SU_106T)) { 546 status->bw = RATE_INFO_BW_HE_RU; 547 status->he_ru = 548 NL80211_RATE_INFO_HE_RU_ALLOC_106; 549 } else { 550 status->bw = RATE_INFO_BW_40; 551 } 552 break; 553 case IEEE80211_STA_RX_BW_80: 554 status->bw = RATE_INFO_BW_80; 555 break; 556 case IEEE80211_STA_RX_BW_160: 557 status->bw = RATE_INFO_BW_160; 558 break; 559 default: 560 return -EINVAL; 561 } 562 563 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 564 if (mode < MT_PHY_TYPE_HE_SU && gi) 565 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 566 567 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 568 rxd += 18; 569 if ((u8 *)rxd - skb->data >= skb->len) 570 return -EINVAL; 571 } 572 } 573 574 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 575 576 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 577 status->amsdu = !!amsdu_info; 578 if (status->amsdu) { 579 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 580 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 581 if (!hdr_trans) { 582 memmove(skb->data + 2, skb->data, 583 ieee80211_get_hdrlen_from_skb(skb)); 584 skb_pull(skb, 2); 585 } 586 } 587 588 if (!hdr_trans) { 589 if (insert_ccmp_hdr) { 590 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 591 592 mt76_insert_ccmp_hdr(skb, key_id); 593 } 594 595 hdr = mt76_skb_get_hdr(skb); 596 fc = hdr->frame_control; 597 if (ieee80211_is_data_qos(fc)) { 598 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 599 qos_ctl = *ieee80211_get_qos_ctl(hdr); 600 } 601 } else { 602 status->flag &= ~(RX_FLAG_RADIOTAP_HE | 603 RX_FLAG_RADIOTAP_HE_MU); 604 status->flag |= RX_FLAG_8023; 605 } 606 607 mt7921_mac_assoc_rssi(dev, skb); 608 609 if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) 610 mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); 611 612 if (!status->wcid || !ieee80211_is_data_qos(fc)) 613 return 0; 614 615 status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc); 616 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 617 status->qos_ctl = qos_ctl; 618 619 return 0; 620 } 621 622 static void 623 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi, 624 struct sk_buff *skb, struct mt76_wcid *wcid) 625 { 626 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 627 u8 fc_type, fc_stype; 628 bool wmm = false; 629 u32 val; 630 631 if (wcid->sta) { 632 struct ieee80211_sta *sta; 633 634 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 635 wmm = sta->wme; 636 } 637 638 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 639 FIELD_PREP(MT_TXD1_TID, tid); 640 641 if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN) 642 val |= MT_TXD1_ETH_802_3; 643 644 txwi[1] |= cpu_to_le32(val); 645 646 fc_type = IEEE80211_FTYPE_DATA >> 2; 647 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 648 649 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 650 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 651 652 txwi[2] |= cpu_to_le32(val); 653 654 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 655 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 656 txwi[7] |= cpu_to_le32(val); 657 } 658 659 static void 660 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi, 661 struct sk_buff *skb, struct ieee80211_key_conf *key) 662 { 663 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 664 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 665 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 666 bool multicast = is_multicast_ether_addr(hdr->addr1); 667 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 668 __le16 fc = hdr->frame_control; 669 u8 fc_type, fc_stype; 670 u32 val; 671 672 if (ieee80211_is_action(fc) && 673 mgmt->u.action.category == WLAN_CATEGORY_BACK && 674 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 675 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); 676 677 txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA); 678 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK; 679 } else if (ieee80211_is_back_req(hdr->frame_control)) { 680 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; 681 u16 control = le16_to_cpu(bar->control); 682 683 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control); 684 } 685 686 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 687 FIELD_PREP(MT_TXD1_HDR_INFO, 688 ieee80211_get_hdrlen_from_skb(skb) / 2) | 689 FIELD_PREP(MT_TXD1_TID, tid); 690 txwi[1] |= cpu_to_le32(val); 691 692 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 693 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 694 695 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 696 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 697 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 698 699 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && 700 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 701 val |= MT_TXD2_BIP; 702 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 703 } 704 705 if (!ieee80211_is_data(fc) || multicast) 706 val |= MT_TXD2_FIX_RATE; 707 708 txwi[2] |= cpu_to_le32(val); 709 710 if (ieee80211_is_beacon(fc)) { 711 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 712 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 713 } 714 715 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 716 u16 seqno = le16_to_cpu(hdr->seq_ctrl); 717 718 if (ieee80211_is_back_req(hdr->frame_control)) { 719 struct ieee80211_bar *bar; 720 721 bar = (struct ieee80211_bar *)skb->data; 722 seqno = le16_to_cpu(bar->start_seq_num); 723 } 724 725 val = MT_TXD3_SN_VALID | 726 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 727 txwi[3] |= cpu_to_le32(val); 728 } 729 730 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 731 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 732 txwi[7] |= cpu_to_le32(val); 733 } 734 735 static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi) 736 { 737 struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid); 738 u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]); 739 740 if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2))) 741 return; 742 743 if (time_is_after_eq_jiffies(msta->next_txs_ts)) 744 return; 745 746 msta->next_txs_ts = jiffies + msecs_to_jiffies(250); 747 pid = mt76_get_next_pkt_id(wcid); 748 txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU | 749 FIELD_PREP(MT_TXD5_PID, pid)); 750 } 751 752 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, 753 struct sk_buff *skb, struct mt76_wcid *wcid, 754 struct ieee80211_key_conf *key, bool beacon) 755 { 756 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 757 struct ieee80211_vif *vif = info->control.vif; 758 struct mt76_phy *mphy = &dev->mphy; 759 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 760 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 761 u16 tx_count = 15; 762 u32 val; 763 764 if (vif) { 765 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 766 767 omac_idx = mvif->omac_idx; 768 wmm_idx = mvif->wmm_idx; 769 } 770 771 if (beacon) { 772 p_fmt = MT_TX_TYPE_FW; 773 q_idx = MT_LMAC_BCN0; 774 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 775 p_fmt = MT_TX_TYPE_CT; 776 q_idx = MT_LMAC_ALTX0; 777 } else { 778 p_fmt = MT_TX_TYPE_CT; 779 q_idx = wmm_idx * MT7921_MAX_WMM_SETS + 780 mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb)); 781 } 782 783 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 784 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 785 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 786 txwi[0] = cpu_to_le32(val); 787 788 val = MT_TXD1_LONG_FORMAT | 789 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 790 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 791 792 txwi[1] = cpu_to_le32(val); 793 txwi[2] = 0; 794 795 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 796 if (key) 797 val |= MT_TXD3_PROTECT_FRAME; 798 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 799 val |= MT_TXD3_NO_ACK; 800 801 txwi[3] = cpu_to_le32(val); 802 txwi[4] = 0; 803 txwi[5] = 0; 804 txwi[6] = 0; 805 txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; 806 807 if (is_8023) 808 mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid); 809 else 810 mt7921_mac_write_txwi_80211(dev, txwi, skb, key); 811 812 if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { 813 u16 rate; 814 815 /* hardware won't add HTC for mgmt/ctrl frame */ 816 txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD); 817 818 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 819 rate = MT7921_5G_RATE_DEFAULT; 820 else 821 rate = MT7921_2G_RATE_DEFAULT; 822 823 val = MT_TXD6_FIXED_BW | 824 FIELD_PREP(MT_TXD6_TX_RATE, rate); 825 txwi[6] |= cpu_to_le32(val); 826 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 827 } 828 829 mt7921_update_txs(wcid, txwi); 830 } 831 832 static void 833 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, 834 void *txp_ptr, u32 id) 835 { 836 struct mt7921_hw_txp *txp = txp_ptr; 837 struct mt7921_txp_ptr *ptr = &txp->ptr[0]; 838 int i, nbuf = tx_info->nbuf - 1; 839 840 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 841 tx_info->nbuf = 1; 842 843 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); 844 845 for (i = 0; i < nbuf; i++) { 846 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; 847 u32 addr = tx_info->buf[i + 1].addr; 848 849 if (i == nbuf - 1) 850 len |= MT_TXD_LEN_LAST; 851 852 if (i & 1) { 853 ptr->buf1 = cpu_to_le32(addr); 854 ptr->len1 = cpu_to_le16(len); 855 ptr++; 856 } else { 857 ptr->buf0 = cpu_to_le32(addr); 858 ptr->len0 = cpu_to_le16(len); 859 } 860 } 861 } 862 863 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 864 enum mt76_txq_id qid, struct mt76_wcid *wcid, 865 struct ieee80211_sta *sta, 866 struct mt76_tx_info *tx_info) 867 { 868 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 869 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 870 struct ieee80211_key_conf *key = info->control.hw_key; 871 struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb); 872 struct mt76_txwi_cache *t; 873 struct mt7921_txp_common *txp; 874 int id; 875 u8 *txwi = (u8 *)txwi_ptr; 876 877 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 878 return -EINVAL; 879 880 if (!wcid) 881 wcid = &dev->mt76.global_wcid; 882 883 cb->wcid = wcid->idx; 884 885 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 886 t->skb = tx_info->skb; 887 888 id = mt76_token_consume(mdev, &t); 889 if (id < 0) 890 return id; 891 892 mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 893 false); 894 895 txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE); 896 memset(txp, 0, sizeof(struct mt7921_txp_common)); 897 mt7921_write_hw_txp(dev, tx_info, txp, id); 898 899 tx_info->skb = DMA_DUMMY_DATA; 900 901 return 0; 902 } 903 904 static void 905 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 906 { 907 struct mt7921_sta *msta; 908 u16 fc, tid; 909 u32 val; 910 911 if (!sta || !sta->ht_cap.ht_supported) 912 return; 913 914 tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1])); 915 if (tid >= 6) /* skip VO queue */ 916 return; 917 918 val = le32_to_cpu(txwi[2]); 919 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 920 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 921 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 922 return; 923 924 msta = (struct mt7921_sta *)sta->drv_priv; 925 if (!test_and_set_bit(tid, &msta->ampdu_state)) 926 ieee80211_start_tx_ba_session(sta, tid, 0); 927 } 928 929 static void 930 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, 931 struct ieee80211_sta *sta, u8 stat, 932 struct list_head *free_list) 933 { 934 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 935 struct ieee80211_tx_status status = { 936 .sta = sta, 937 .info = info, 938 .skb = skb, 939 .free_list = free_list, 940 }; 941 struct ieee80211_hw *hw; 942 943 if (sta) { 944 struct mt7921_sta *msta; 945 946 msta = (struct mt7921_sta *)sta->drv_priv; 947 status.rate = &msta->stats.tx_rate; 948 } 949 950 hw = mt76_tx_status_get_hw(mdev, skb); 951 952 if (info->flags & IEEE80211_TX_CTL_AMPDU) 953 info->flags |= IEEE80211_TX_STAT_AMPDU; 954 955 if (stat) 956 ieee80211_tx_info_clear_status(info); 957 958 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 959 info->flags |= IEEE80211_TX_STAT_ACK; 960 961 info->status.tx_time = 0; 962 ieee80211_tx_status_ext(hw, &status); 963 } 964 965 void mt7921_txp_skb_unmap(struct mt76_dev *dev, 966 struct mt76_txwi_cache *t) 967 { 968 struct mt7921_txp_common *txp; 969 int i; 970 971 txp = mt7921_txwi_to_txp(dev, t); 972 973 for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) { 974 struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i]; 975 bool last; 976 u16 len; 977 978 len = le16_to_cpu(ptr->len0); 979 last = len & MT_TXD_LEN_LAST; 980 len &= MT_TXD_LEN_MASK; 981 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 982 DMA_TO_DEVICE); 983 if (last) 984 break; 985 986 len = le16_to_cpu(ptr->len1); 987 last = len & MT_TXD_LEN_LAST; 988 len &= MT_TXD_LEN_MASK; 989 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 990 DMA_TO_DEVICE); 991 if (last) 992 break; 993 } 994 } 995 996 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) 997 { 998 struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data; 999 struct mt76_dev *mdev = &dev->mt76; 1000 struct mt76_txwi_cache *txwi; 1001 struct ieee80211_sta *sta = NULL; 1002 LIST_HEAD(free_list); 1003 struct sk_buff *tmp; 1004 bool wake = false; 1005 u8 i, count; 1006 1007 /* clean DMA queues and unmap buffers first */ 1008 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1009 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1010 1011 /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, 1012 * to the time ack is received or dropped by hw (air + hw queue time). 1013 * Should avoid accessing WTBL to get Tx airtime, and use it instead. 1014 */ 1015 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); 1016 for (i = 0; i < count; i++) { 1017 u32 msdu, info = le32_to_cpu(free->info[i]); 1018 u8 stat; 1019 1020 /* 1'b1: new wcid pair. 1021 * 1'b0: msdu_id with the same 'wcid pair' as above. 1022 */ 1023 if (info & MT_TX_FREE_PAIR) { 1024 struct mt7921_sta *msta; 1025 struct mt7921_phy *phy; 1026 struct mt76_wcid *wcid; 1027 u16 idx; 1028 1029 count++; 1030 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 1031 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1032 sta = wcid_to_sta(wcid); 1033 if (!sta) 1034 continue; 1035 1036 msta = container_of(wcid, struct mt7921_sta, wcid); 1037 phy = msta->vif->phy; 1038 spin_lock_bh(&dev->sta_poll_lock); 1039 if (list_empty(&msta->stats_list)) 1040 list_add_tail(&msta->stats_list, &phy->stats_list); 1041 if (list_empty(&msta->poll_list)) 1042 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1043 spin_unlock_bh(&dev->sta_poll_lock); 1044 continue; 1045 } 1046 1047 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 1048 stat = FIELD_GET(MT_TX_FREE_STATUS, info); 1049 1050 txwi = mt76_token_release(mdev, msdu, &wake); 1051 if (!txwi) 1052 continue; 1053 1054 mt7921_txp_skb_unmap(mdev, txwi); 1055 if (txwi->skb) { 1056 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb); 1057 void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi); 1058 1059 if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1060 mt7921_tx_check_aggr(sta, txwi_ptr); 1061 1062 if (sta && !info->tx_time_est) { 1063 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1064 int pending; 1065 1066 pending = atomic_dec_return(&wcid->non_aql_packets); 1067 if (pending < 0) 1068 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 1069 } 1070 1071 mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list); 1072 txwi->skb = NULL; 1073 } 1074 1075 mt76_put_txwi(mdev, txwi); 1076 } 1077 1078 if (wake) 1079 mt76_set_tx_blocked(&dev->mt76, false); 1080 1081 napi_consume_skb(skb, 1); 1082 1083 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1084 skb_list_del_init(skb); 1085 napi_consume_skb(skb, 1); 1086 } 1087 1088 mt7921_mac_sta_poll(dev); 1089 mt76_worker_schedule(&dev->mt76.tx_worker); 1090 } 1091 1092 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 1093 { 1094 struct mt7921_dev *dev; 1095 1096 if (!e->txwi) { 1097 dev_kfree_skb_any(e->skb); 1098 return; 1099 } 1100 1101 dev = container_of(mdev, struct mt7921_dev, mt76); 1102 1103 /* error path */ 1104 if (e->skb == DMA_DUMMY_DATA) { 1105 struct mt76_txwi_cache *t; 1106 struct mt7921_txp_common *txp; 1107 u16 token; 1108 1109 txp = mt7921_txwi_to_txp(mdev, e->txwi); 1110 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; 1111 t = mt76_token_put(mdev, token); 1112 e->skb = t ? t->skb : NULL; 1113 } 1114 1115 if (e->skb) { 1116 struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb); 1117 struct mt76_wcid *wcid; 1118 1119 wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]); 1120 1121 mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0, 1122 NULL); 1123 } 1124 } 1125 1126 void mt7921_mac_reset_counters(struct mt7921_phy *phy) 1127 { 1128 struct mt7921_dev *dev = phy->dev; 1129 int i; 1130 1131 for (i = 0; i < 4; i++) { 1132 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 1133 mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); 1134 } 1135 1136 dev->mt76.phy.survey_time = ktime_get_boottime(); 1137 memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2); 1138 1139 /* reset airtime counters */ 1140 mt76_rr(dev, MT_MIB_SDR9(0)); 1141 mt76_rr(dev, MT_MIB_SDR36(0)); 1142 mt76_rr(dev, MT_MIB_SDR37(0)); 1143 1144 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); 1145 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); 1146 } 1147 1148 void mt7921_mac_set_timing(struct mt7921_phy *phy) 1149 { 1150 s16 coverage_class = phy->coverage_class; 1151 struct mt7921_dev *dev = phy->dev; 1152 u32 val, reg_offset; 1153 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1154 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1155 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1156 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1157 int sifs, offset; 1158 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 1159 1160 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1161 return; 1162 1163 if (is_5ghz) 1164 sifs = 16; 1165 else 1166 sifs = 10; 1167 1168 mt76_set(dev, MT_ARB_SCR(0), 1169 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1170 udelay(1); 1171 1172 offset = 3 * coverage_class; 1173 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1174 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1175 1176 mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset); 1177 mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset); 1178 mt76_wr(dev, MT_TMAC_ICR0(0), 1179 FIELD_PREP(MT_IFS_EIFS, 360) | 1180 FIELD_PREP(MT_IFS_RIFS, 2) | 1181 FIELD_PREP(MT_IFS_SIFS, sifs) | 1182 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1183 1184 if (phy->slottime < 20 || is_5ghz) 1185 val = MT7921_CFEND_RATE_DEFAULT; 1186 else 1187 val = MT7921_CFEND_RATE_11B; 1188 1189 mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val); 1190 mt76_clear(dev, MT_ARB_SCR(0), 1191 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1192 } 1193 1194 static u8 1195 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx) 1196 { 1197 return 0; 1198 } 1199 1200 static void 1201 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx) 1202 { 1203 struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76); 1204 struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; 1205 struct mt76_channel_state *state; 1206 u64 busy_time, tx_time, rx_time, obss_time; 1207 int nf; 1208 1209 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1210 MT_MIB_SDR9_BUSY_MASK); 1211 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1212 MT_MIB_SDR36_TXTIME_MASK); 1213 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1214 MT_MIB_SDR37_RXTIME_MASK); 1215 obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx), 1216 MT_MIB_OBSSTIME_MASK); 1217 1218 nf = mt7921_phy_get_nf(phy, idx); 1219 if (!phy->noise) 1220 phy->noise = nf << 4; 1221 else if (nf) 1222 phy->noise += nf - (phy->noise >> 4); 1223 1224 state = mphy->chan_state; 1225 state->cc_busy += busy_time; 1226 state->cc_tx += tx_time; 1227 state->cc_rx += rx_time + obss_time; 1228 state->cc_bss_rx += rx_time; 1229 state->noise = -(phy->noise >> 4); 1230 } 1231 1232 void mt7921_update_channel(struct mt76_phy *mphy) 1233 { 1234 struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76); 1235 1236 if (mt76_connac_pm_wake(mphy, &dev->pm)) 1237 return; 1238 1239 mt7921_phy_update_channel(mphy, 0); 1240 /* reset obss airtime */ 1241 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR); 1242 1243 mt76_connac_power_save_sched(mphy, &dev->pm); 1244 } 1245 1246 void mt7921_tx_token_put(struct mt7921_dev *dev) 1247 { 1248 struct mt76_txwi_cache *txwi; 1249 int id; 1250 1251 spin_lock_bh(&dev->mt76.token_lock); 1252 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1253 mt7921_txp_skb_unmap(&dev->mt76, txwi); 1254 if (txwi->skb) { 1255 struct ieee80211_hw *hw; 1256 1257 hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); 1258 ieee80211_free_txskb(hw, txwi->skb); 1259 } 1260 mt76_put_txwi(&dev->mt76, txwi); 1261 dev->mt76.token_count--; 1262 } 1263 spin_unlock_bh(&dev->mt76.token_lock); 1264 idr_destroy(&dev->mt76.token); 1265 } 1266 1267 static void 1268 mt7921_vif_connect_iter(void *priv, u8 *mac, 1269 struct ieee80211_vif *vif) 1270 { 1271 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 1272 struct mt7921_dev *dev = mvif->phy->dev; 1273 1274 if (vif->type == NL80211_IFTYPE_STATION) 1275 ieee80211_disconnect(vif, true); 1276 1277 mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); 1278 mt7921_mcu_set_tx(dev, vif); 1279 } 1280 1281 static int 1282 mt7921_mac_reset(struct mt7921_dev *dev) 1283 { 1284 int i, err; 1285 1286 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); 1287 1288 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); 1289 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1290 1291 set_bit(MT76_RESET, &dev->mphy.state); 1292 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1293 wake_up(&dev->mt76.mcu.wait); 1294 skb_queue_purge(&dev->mt76.mcu.res_q); 1295 1296 mt76_txq_schedule_all(&dev->mphy); 1297 1298 mt76_worker_disable(&dev->mt76.tx_worker); 1299 napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); 1300 napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); 1301 napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); 1302 napi_disable(&dev->mt76.tx_napi); 1303 1304 mt7921_tx_token_put(dev); 1305 idr_init(&dev->mt76.token); 1306 1307 mt7921_wpdma_reset(dev, true); 1308 1309 mt76_for_each_q_rx(&dev->mt76, i) { 1310 napi_enable(&dev->mt76.napi[i]); 1311 napi_schedule(&dev->mt76.napi[i]); 1312 } 1313 1314 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1315 1316 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 1317 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 1318 MT_INT_MCU_CMD); 1319 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1320 1321 err = mt7921_run_firmware(dev); 1322 if (err) 1323 goto out; 1324 1325 err = mt7921_mcu_set_eeprom(dev); 1326 if (err) 1327 goto out; 1328 1329 err = mt7921_mac_init(dev); 1330 if (err) 1331 goto out; 1332 1333 err = __mt7921_start(&dev->phy); 1334 out: 1335 clear_bit(MT76_RESET, &dev->mphy.state); 1336 1337 napi_enable(&dev->mt76.tx_napi); 1338 napi_schedule(&dev->mt76.tx_napi); 1339 mt76_worker_enable(&dev->mt76.tx_worker); 1340 1341 return err; 1342 } 1343 1344 /* system error recovery */ 1345 void mt7921_mac_reset_work(struct work_struct *work) 1346 { 1347 struct mt7921_dev *dev = container_of(work, struct mt7921_dev, 1348 reset_work); 1349 struct ieee80211_hw *hw = mt76_hw(dev); 1350 struct mt76_connac_pm *pm = &dev->pm; 1351 int i; 1352 1353 dev_err(dev->mt76.dev, "chip reset\n"); 1354 dev->hw_full_reset = true; 1355 ieee80211_stop_queues(hw); 1356 1357 cancel_delayed_work_sync(&dev->mphy.mac_work); 1358 cancel_delayed_work_sync(&pm->ps_work); 1359 cancel_work_sync(&pm->wake_work); 1360 1361 mutex_lock(&dev->mt76.mutex); 1362 for (i = 0; i < 10; i++) { 1363 __mt7921_mcu_drv_pmctrl(dev); 1364 1365 if (!mt7921_mac_reset(dev)) 1366 break; 1367 } 1368 mutex_unlock(&dev->mt76.mutex); 1369 1370 if (i == 10) 1371 dev_err(dev->mt76.dev, "chip reset failed\n"); 1372 1373 if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) { 1374 struct cfg80211_scan_info info = { 1375 .aborted = true, 1376 }; 1377 1378 ieee80211_scan_completed(dev->mphy.hw, &info); 1379 } 1380 1381 dev->hw_full_reset = false; 1382 ieee80211_wake_queues(hw); 1383 ieee80211_iterate_active_interfaces(hw, 1384 IEEE80211_IFACE_ITER_RESUME_ALL, 1385 mt7921_vif_connect_iter, NULL); 1386 mt76_connac_power_save_sched(&dev->mt76.phy, pm); 1387 } 1388 1389 void mt7921_reset(struct mt76_dev *mdev) 1390 { 1391 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 1392 1393 if (!dev->hw_init_done) 1394 return; 1395 1396 if (dev->hw_full_reset) 1397 return; 1398 1399 queue_work(dev->mt76.wq, &dev->reset_work); 1400 } 1401 1402 static void 1403 mt7921_mac_update_mib_stats(struct mt7921_phy *phy) 1404 { 1405 struct mt7921_dev *dev = phy->dev; 1406 struct mib_stats *mib = &phy->mib; 1407 int i, aggr0 = 0, aggr1; 1408 1409 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0), 1410 MT_MIB_SDR3_FCS_ERR_MASK); 1411 mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0), 1412 MT_MIB_ACK_FAIL_COUNT_MASK); 1413 mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0), 1414 MT_MIB_BA_FAIL_COUNT_MASK); 1415 mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0), 1416 MT_MIB_RTS_COUNT_MASK); 1417 mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0), 1418 MT_MIB_RTS_FAIL_COUNT_MASK); 1419 1420 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { 1421 u32 val, val2; 1422 1423 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 1424 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i)); 1425 1426 dev->mt76.aggr_stats[aggr0++] += val & 0xffff; 1427 dev->mt76.aggr_stats[aggr0++] += val >> 16; 1428 dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff; 1429 dev->mt76.aggr_stats[aggr1++] += val2 >> 16; 1430 } 1431 } 1432 1433 void mt7921_mac_work(struct work_struct *work) 1434 { 1435 struct mt7921_phy *phy; 1436 struct mt76_phy *mphy; 1437 1438 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1439 mac_work.work); 1440 phy = mphy->priv; 1441 1442 mt7921_mutex_acquire(phy->dev); 1443 1444 mt76_update_survey(mphy); 1445 if (++mphy->mac_work_count == 2) { 1446 mphy->mac_work_count = 0; 1447 1448 mt7921_mac_update_mib_stats(phy); 1449 } 1450 1451 mt7921_mutex_release(phy->dev); 1452 ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, 1453 MT7921_WATCHDOG_TIME); 1454 } 1455 1456 void mt7921_pm_wake_work(struct work_struct *work) 1457 { 1458 struct mt7921_dev *dev; 1459 struct mt76_phy *mphy; 1460 1461 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, 1462 pm.wake_work); 1463 mphy = dev->phy.mt76; 1464 1465 if (!mt7921_mcu_drv_pmctrl(dev)) { 1466 int i; 1467 1468 mt76_for_each_q_rx(&dev->mt76, i) 1469 napi_schedule(&dev->mt76.napi[i]); 1470 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 1471 mt7921_tx_cleanup(dev); 1472 if (test_bit(MT76_STATE_RUNNING, &mphy->state)) 1473 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1474 MT7921_WATCHDOG_TIME); 1475 } 1476 1477 ieee80211_wake_queues(mphy->hw); 1478 wake_up(&dev->pm.wait); 1479 } 1480 1481 void mt7921_pm_power_save_work(struct work_struct *work) 1482 { 1483 struct mt7921_dev *dev; 1484 unsigned long delta; 1485 struct mt76_phy *mphy; 1486 1487 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, 1488 pm.ps_work.work); 1489 mphy = dev->phy.mt76; 1490 1491 delta = dev->pm.idle_timeout; 1492 if (test_bit(MT76_HW_SCANNING, &mphy->state) || 1493 test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) 1494 goto out; 1495 1496 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1497 delta = dev->pm.last_activity + delta - jiffies; 1498 goto out; 1499 } 1500 1501 if (!mt7921_mcu_fw_pmctrl(dev)) { 1502 cancel_delayed_work_sync(&mphy->mac_work); 1503 return; 1504 } 1505 out: 1506 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 1507 } 1508 1509 int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, 1510 struct ieee80211_vif *vif, 1511 bool enable) 1512 { 1513 struct mt7921_dev *dev = phy->dev; 1514 bool ext_phy = phy != &dev->phy; 1515 int err; 1516 1517 if (!dev->pm.enable) 1518 return -EOPNOTSUPP; 1519 1520 err = mt7921_mcu_set_bss_pm(dev, vif, enable); 1521 if (err) 1522 return err; 1523 1524 if (enable) { 1525 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 1526 mt76_set(dev, MT_WF_RFCR(ext_phy), 1527 MT_WF_RFCR_DROP_OTHER_BEACON); 1528 } else { 1529 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 1530 mt76_clear(dev, MT_WF_RFCR(ext_phy), 1531 MT_WF_RFCR_DROP_OTHER_BEACON); 1532 } 1533 1534 return 0; 1535 } 1536 1537 void mt7921_coredump_work(struct work_struct *work) 1538 { 1539 struct mt7921_dev *dev; 1540 char *dump, *data; 1541 1542 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, 1543 coredump.work.work); 1544 1545 if (time_is_after_jiffies(dev->coredump.last_activity + 1546 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) { 1547 queue_delayed_work(dev->mt76.wq, &dev->coredump.work, 1548 MT76_CONNAC_COREDUMP_TIMEOUT); 1549 return; 1550 } 1551 1552 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ); 1553 data = dump; 1554 1555 while (true) { 1556 struct sk_buff *skb; 1557 1558 spin_lock_bh(&dev->mt76.lock); 1559 skb = __skb_dequeue(&dev->coredump.msg_list); 1560 spin_unlock_bh(&dev->mt76.lock); 1561 1562 if (!skb) 1563 break; 1564 1565 skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); 1566 if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) { 1567 dev_kfree_skb(skb); 1568 continue; 1569 } 1570 1571 memcpy(data, skb->data, skb->len); 1572 data += skb->len; 1573 1574 dev_kfree_skb(skb); 1575 } 1576 1577 if (dump) 1578 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ, 1579 GFP_KERNEL); 1580 1581 mt7921_reset(&dev->mt76); 1582 } 1583