1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/timekeeping.h> 12 #include "mt7615.h" 13 #include "../trace.h" 14 #include "../dma.h" 15 #include "mt7615_trace.h" 16 #include "mac.h" 17 18 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 19 20 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 21 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 22 .radar_pattern = { 23 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 24 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 25 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 26 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 27 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 28 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 29 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 30 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 31 }, 32 }; 33 34 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 35 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 36 .radar_pattern = { 37 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 38 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 39 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 40 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 41 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 42 }, 43 }; 44 45 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 46 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 47 .radar_pattern = { 48 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 49 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 50 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 51 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 52 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 53 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 54 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 55 }, 56 }; 57 58 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 59 u8 idx, bool unicast) 60 { 61 struct mt7615_sta *sta; 62 struct mt76_wcid *wcid; 63 64 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 65 return NULL; 66 67 wcid = rcu_dereference(dev->mt76.wcid[idx]); 68 if (unicast || !wcid) 69 return wcid; 70 71 if (!wcid->sta) 72 return NULL; 73 74 sta = container_of(wcid, struct mt7615_sta, wcid); 75 if (!sta->vif) 76 return NULL; 77 78 return &sta->vif->sta.wcid; 79 } 80 81 void mt7615_mac_reset_counters(struct mt7615_dev *dev) 82 { 83 int i; 84 85 for (i = 0; i < 4; i++) 86 mt76_rr(dev, MT_TX_AGG_CNT(i)); 87 88 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 89 dev->mt76.phy.survey_time = ktime_get_boottime(); 90 if (dev->mt76.phy2) 91 dev->mt76.phy2->survey_time = ktime_get_boottime(); 92 93 /* reset airtime counters */ 94 mt76_rr(dev, MT_MIB_SDR9(0)); 95 mt76_rr(dev, MT_MIB_SDR9(1)); 96 97 mt76_rr(dev, MT_MIB_SDR36(0)); 98 mt76_rr(dev, MT_MIB_SDR36(1)); 99 100 mt76_rr(dev, MT_MIB_SDR37(0)); 101 mt76_rr(dev, MT_MIB_SDR37(1)); 102 103 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 104 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 105 } 106 107 void mt7615_mac_set_timing(struct mt7615_phy *phy) 108 { 109 s16 coverage_class = phy->coverage_class; 110 struct mt7615_dev *dev = phy->dev; 111 bool ext_phy = phy != &dev->phy; 112 u32 val, reg_offset; 113 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 114 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 115 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 116 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 117 int sifs, offset; 118 119 if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ) 120 sifs = 16; 121 else 122 sifs = 10; 123 124 if (ext_phy) { 125 coverage_class = max_t(s16, dev->phy.coverage_class, 126 coverage_class); 127 mt76_set(dev, MT_ARB_SCR, 128 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 129 } else { 130 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 131 132 if (phy_ext) 133 coverage_class = max_t(s16, phy_ext->coverage_class, 134 coverage_class); 135 mt76_set(dev, MT_ARB_SCR, 136 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 137 } 138 udelay(1); 139 140 offset = 3 * coverage_class; 141 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 142 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 143 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 144 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 145 146 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 147 FIELD_PREP(MT_IFS_EIFS, 360) | 148 FIELD_PREP(MT_IFS_RIFS, 2) | 149 FIELD_PREP(MT_IFS_SIFS, sifs) | 150 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 151 152 if (phy->slottime < 20) 153 val = MT7615_CFEND_RATE_DEFAULT; 154 else 155 val = MT7615_CFEND_RATE_11B; 156 157 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 158 if (ext_phy) 159 mt76_clear(dev, MT_ARB_SCR, 160 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 161 else 162 mt76_clear(dev, MT_ARB_SCR, 163 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 164 165 } 166 167 int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 168 { 169 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 170 struct mt76_phy *mphy = &dev->mt76.phy; 171 struct mt7615_phy *phy = &dev->phy; 172 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL; 173 struct ieee80211_supported_band *sband; 174 struct ieee80211_hdr *hdr; 175 __le32 *rxd = (__le32 *)skb->data; 176 u32 rxd0 = le32_to_cpu(rxd[0]); 177 u32 rxd1 = le32_to_cpu(rxd[1]); 178 u32 rxd2 = le32_to_cpu(rxd[2]); 179 __le32 rxd12 = rxd[12]; 180 bool unicast, remove_pad, insert_ccmp_hdr = false; 181 int phy_idx; 182 int i, idx; 183 u8 chfreq; 184 185 memset(status, 0, sizeof(*status)); 186 187 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 188 if (!phy2) 189 phy_idx = 0; 190 else if (phy2->chfreq == phy->chfreq) 191 phy_idx = -1; 192 else if (phy->chfreq == chfreq) 193 phy_idx = 0; 194 else if (phy2->chfreq == chfreq) 195 phy_idx = 1; 196 else 197 phy_idx = -1; 198 199 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 200 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 201 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 202 203 if (status->wcid) { 204 struct mt7615_sta *msta; 205 206 msta = container_of(status->wcid, struct mt7615_sta, wcid); 207 spin_lock_bh(&dev->sta_poll_lock); 208 if (list_empty(&msta->poll_list)) 209 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 210 spin_unlock_bh(&dev->sta_poll_lock); 211 } 212 213 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 214 status->flag |= RX_FLAG_FAILED_FCS_CRC; 215 216 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 217 status->flag |= RX_FLAG_MMIC_ERROR; 218 219 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 220 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 221 status->flag |= RX_FLAG_DECRYPTED; 222 status->flag |= RX_FLAG_IV_STRIPPED; 223 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 224 } 225 226 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 227 228 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 229 return -EINVAL; 230 231 rxd += 4; 232 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 233 rxd += 4; 234 if ((u8 *)rxd - skb->data >= skb->len) 235 return -EINVAL; 236 } 237 238 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 239 u8 *data = (u8 *)rxd; 240 241 if (status->flag & RX_FLAG_DECRYPTED) { 242 status->iv[0] = data[5]; 243 status->iv[1] = data[4]; 244 status->iv[2] = data[3]; 245 status->iv[3] = data[2]; 246 status->iv[4] = data[1]; 247 status->iv[5] = data[0]; 248 249 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 250 } 251 rxd += 4; 252 if ((u8 *)rxd - skb->data >= skb->len) 253 return -EINVAL; 254 } 255 256 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 257 rxd += 2; 258 if ((u8 *)rxd - skb->data >= skb->len) 259 return -EINVAL; 260 } 261 262 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 263 u32 rxdg5 = le32_to_cpu(rxd[5]); 264 265 /* 266 * If both PHYs are on the same channel and we don't have a WCID, 267 * we need to figure out which PHY this packet was received on. 268 * On the primary PHY, the noise value for the chains belonging to the 269 * second PHY will be set to the noise value of the last packet from 270 * that PHY. 271 */ 272 if (phy_idx < 0) { 273 int first_chain = ffs(phy2->chainmask) - 1; 274 275 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 276 } 277 } 278 279 if (phy_idx == 1 && phy2) { 280 mphy = dev->mt76.phy2; 281 phy = phy2; 282 status->ext_phy = true; 283 } 284 285 if (chfreq != phy->chfreq) 286 return -EINVAL; 287 288 status->freq = mphy->chandef.chan->center_freq; 289 status->band = mphy->chandef.chan->band; 290 if (status->band == NL80211_BAND_5GHZ) 291 sband = &mphy->sband_5g.sband; 292 else 293 sband = &mphy->sband_2g.sband; 294 295 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 296 return -EINVAL; 297 298 if (!sband->channels) 299 return -EINVAL; 300 301 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 302 MT_RXD2_NORMAL_NON_AMPDU))) { 303 status->flag |= RX_FLAG_AMPDU_DETAILS; 304 305 /* all subframes of an A-MPDU have the same timestamp */ 306 if (phy->rx_ampdu_ts != rxd12) { 307 if (!++phy->ampdu_ref) 308 phy->ampdu_ref++; 309 } 310 phy->rx_ampdu_ts = rxd12; 311 312 status->ampdu_ref = phy->ampdu_ref; 313 } 314 315 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 316 u32 rxdg0 = le32_to_cpu(rxd[0]); 317 u32 rxdg1 = le32_to_cpu(rxd[1]); 318 u32 rxdg3 = le32_to_cpu(rxd[3]); 319 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 320 bool cck = false; 321 322 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 323 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 324 case MT_PHY_TYPE_CCK: 325 cck = true; 326 /* fall through */ 327 case MT_PHY_TYPE_OFDM: 328 i = mt76_get_rate(&dev->mt76, sband, i, cck); 329 break; 330 case MT_PHY_TYPE_HT_GF: 331 case MT_PHY_TYPE_HT: 332 status->encoding = RX_ENC_HT; 333 if (i > 31) 334 return -EINVAL; 335 break; 336 case MT_PHY_TYPE_VHT: 337 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 338 status->encoding = RX_ENC_VHT; 339 break; 340 default: 341 return -EINVAL; 342 } 343 status->rate_idx = i; 344 345 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 346 case MT_PHY_BW_20: 347 break; 348 case MT_PHY_BW_40: 349 status->bw = RATE_INFO_BW_40; 350 break; 351 case MT_PHY_BW_80: 352 status->bw = RATE_INFO_BW_80; 353 break; 354 case MT_PHY_BW_160: 355 status->bw = RATE_INFO_BW_160; 356 break; 357 default: 358 return -EINVAL; 359 } 360 361 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 362 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 363 if (rxdg0 & MT_RXV1_HT_AD_CODE) 364 status->enc_flags |= RX_ENC_FLAG_LDPC; 365 366 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 367 368 status->chains = mphy->antenna_mask; 369 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 370 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 371 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 372 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 373 status->signal = status->chain_signal[0]; 374 375 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 376 if (!(status->chains & BIT(i))) 377 continue; 378 379 status->signal = max(status->signal, 380 status->chain_signal[i]); 381 } 382 383 rxd += 6; 384 if ((u8 *)rxd - skb->data >= skb->len) 385 return -EINVAL; 386 } 387 388 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 389 390 if (insert_ccmp_hdr) { 391 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 392 393 mt76_insert_ccmp_hdr(skb, key_id); 394 } 395 396 hdr = (struct ieee80211_hdr *)skb->data; 397 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 398 return 0; 399 400 status->aggr = unicast && 401 !ieee80211_is_qos_nullfunc(hdr->frame_control); 402 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 403 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 404 405 return 0; 406 } 407 408 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 409 { 410 } 411 412 void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 413 struct mt76_queue_entry *e) 414 { 415 if (!e->txwi) { 416 dev_kfree_skb_any(e->skb); 417 return; 418 } 419 420 /* error path */ 421 if (e->skb == DMA_DUMMY_DATA) { 422 struct mt76_txwi_cache *t; 423 struct mt7615_dev *dev; 424 struct mt7615_txp_common *txp; 425 u16 token; 426 427 dev = container_of(mdev, struct mt7615_dev, mt76); 428 txp = mt7615_txwi_to_txp(mdev, e->txwi); 429 430 if (is_mt7615(&dev->mt76)) 431 token = le16_to_cpu(txp->fw.token); 432 else 433 token = le16_to_cpu(txp->hw.msdu_id[0]) & 434 ~MT_MSDU_ID_VALID; 435 436 spin_lock_bh(&dev->token_lock); 437 t = idr_remove(&dev->token, token); 438 spin_unlock_bh(&dev->token_lock); 439 e->skb = t ? t->skb : NULL; 440 } 441 442 if (e->skb) 443 mt76_tx_complete_skb(mdev, e->skb); 444 } 445 446 static u16 447 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 448 struct mt76_phy *mphy, 449 const struct ieee80211_tx_rate *rate, 450 bool stbc, u8 *bw) 451 { 452 u8 phy, nss, rate_idx; 453 u16 rateval = 0; 454 455 *bw = 0; 456 457 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 458 rate_idx = ieee80211_rate_get_vht_mcs(rate); 459 nss = ieee80211_rate_get_vht_nss(rate); 460 phy = MT_PHY_TYPE_VHT; 461 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 462 *bw = 1; 463 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 464 *bw = 2; 465 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 466 *bw = 3; 467 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 468 rate_idx = rate->idx; 469 nss = 1 + (rate->idx >> 3); 470 phy = MT_PHY_TYPE_HT; 471 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 472 phy = MT_PHY_TYPE_HT_GF; 473 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 474 *bw = 1; 475 } else { 476 const struct ieee80211_rate *r; 477 int band = mphy->chandef.chan->band; 478 u16 val; 479 480 nss = 1; 481 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 482 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 483 val = r->hw_value_short; 484 else 485 val = r->hw_value; 486 487 phy = val >> 8; 488 rate_idx = val & 0xff; 489 } 490 491 if (stbc && nss == 1) { 492 nss++; 493 rateval |= MT_TX_RATE_STBC; 494 } 495 496 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 497 FIELD_PREP(MT_TX_RATE_MODE, phy) | 498 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 499 500 return rateval; 501 } 502 503 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 504 struct sk_buff *skb, struct mt76_wcid *wcid, 505 struct ieee80211_sta *sta, int pid, 506 struct ieee80211_key_conf *key, bool beacon) 507 { 508 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 509 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 510 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 511 bool multicast = is_multicast_ether_addr(hdr->addr1); 512 struct ieee80211_vif *vif = info->control.vif; 513 struct mt76_phy *mphy = &dev->mphy; 514 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 515 int tx_count = 8; 516 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 517 __le16 fc = hdr->frame_control; 518 u16 seqno = 0; 519 u32 val; 520 521 if (vif) { 522 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 523 524 omac_idx = mvif->omac_idx; 525 wmm_idx = mvif->wmm_idx; 526 } 527 528 if (sta) { 529 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 530 531 tx_count = msta->rate_count; 532 } 533 534 if (ext_phy && dev->mt76.phy2) 535 mphy = dev->mt76.phy2; 536 537 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 538 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 539 540 if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { 541 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 542 skb_get_queue_mapping(skb); 543 p_fmt = MT_TX_TYPE_CT; 544 } else if (beacon) { 545 if (ext_phy) 546 q_idx = MT_LMAC_BCN1; 547 else 548 q_idx = MT_LMAC_BCN0; 549 p_fmt = MT_TX_TYPE_FW; 550 } else { 551 if (ext_phy) 552 q_idx = MT_LMAC_ALTX1; 553 else 554 q_idx = MT_LMAC_ALTX0; 555 p_fmt = MT_TX_TYPE_CT; 556 } 557 558 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 559 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 560 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 561 txwi[0] = cpu_to_le32(val); 562 563 val = MT_TXD1_LONG_FORMAT | 564 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 565 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 566 FIELD_PREP(MT_TXD1_HDR_INFO, 567 ieee80211_get_hdrlen_from_skb(skb) / 2) | 568 FIELD_PREP(MT_TXD1_TID, 569 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 570 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 571 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 572 txwi[1] = cpu_to_le32(val); 573 574 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 575 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 576 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 577 if (key) { 578 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 579 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 580 val |= MT_TXD2_BIP; 581 txwi[3] = 0; 582 } else { 583 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 584 } 585 } else { 586 txwi[3] = 0; 587 } 588 txwi[2] = cpu_to_le32(val); 589 590 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 591 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 592 593 txwi[4] = 0; 594 txwi[6] = 0; 595 596 if (rate->idx >= 0 && rate->count && 597 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 598 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 599 u8 bw; 600 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 601 &bw); 602 603 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 604 605 val = MT_TXD6_FIXED_BW | 606 FIELD_PREP(MT_TXD6_BW, bw) | 607 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 608 txwi[6] |= cpu_to_le32(val); 609 610 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 611 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 612 613 if (info->flags & IEEE80211_TX_CTL_LDPC) 614 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 615 616 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 617 IEEE80211_TX_RC_VHT_MCS))) 618 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 619 620 tx_count = rate->count; 621 } 622 623 if (!ieee80211_is_beacon(fc)) { 624 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 625 FIELD_PREP(MT_TXD5_PID, pid); 626 txwi[5] = cpu_to_le32(val); 627 } else { 628 txwi[5] = 0; 629 /* use maximum tx count for beacons */ 630 tx_count = 0x1f; 631 } 632 633 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 634 if (ieee80211_is_data_qos(hdr->frame_control)) { 635 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 636 val |= MT_TXD3_SN_VALID; 637 } else if (ieee80211_is_back_req(hdr->frame_control)) { 638 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 639 640 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); 641 val |= MT_TXD3_SN_VALID; 642 } 643 val |= FIELD_PREP(MT_TXD3_SEQ, seqno); 644 645 txwi[3] |= cpu_to_le32(val); 646 647 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 648 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 649 650 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 651 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 652 653 return 0; 654 } 655 656 static void 657 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) 658 { 659 int i; 660 661 for (i = 1; i < txp->nbuf; i++) 662 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 663 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 664 } 665 666 static void 667 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) 668 { 669 int i; 670 671 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { 672 struct mt7615_txp_ptr *ptr = &txp->ptr[i]; 673 bool last; 674 u16 len; 675 676 len = le16_to_cpu(ptr->len0); 677 last = len & MT_TXD_LEN_MSDU_LAST; 678 len &= ~MT_TXD_LEN_MSDU_LAST; 679 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 680 DMA_TO_DEVICE); 681 if (last) 682 break; 683 684 len = le16_to_cpu(ptr->len1); 685 last = len & MT_TXD_LEN_MSDU_LAST; 686 len &= ~MT_TXD_LEN_MSDU_LAST; 687 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 688 DMA_TO_DEVICE); 689 if (last) 690 break; 691 } 692 } 693 694 void mt7615_txp_skb_unmap(struct mt76_dev *dev, 695 struct mt76_txwi_cache *t) 696 { 697 struct mt7615_txp_common *txp; 698 699 txp = mt7615_txwi_to_txp(dev, t); 700 if (is_mt7615(dev)) 701 mt7615_txp_skb_unmap_fw(dev, &txp->fw); 702 else 703 mt7615_txp_skb_unmap_hw(dev, &txp->hw); 704 } 705 706 static u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid) 707 { 708 return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE; 709 } 710 711 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 712 { 713 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 714 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 715 716 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 717 0, 5000); 718 } 719 720 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 721 { 722 static const u8 ac_to_tid[4] = { 723 [IEEE80211_AC_BE] = 0, 724 [IEEE80211_AC_BK] = 1, 725 [IEEE80211_AC_VI] = 4, 726 [IEEE80211_AC_VO] = 6 727 }; 728 static const u8 hw_queue_map[] = { 729 [IEEE80211_AC_BK] = 0, 730 [IEEE80211_AC_BE] = 1, 731 [IEEE80211_AC_VI] = 2, 732 [IEEE80211_AC_VO] = 3, 733 }; 734 struct ieee80211_sta *sta; 735 struct mt7615_sta *msta; 736 u32 addr, tx_time[4], rx_time[4]; 737 int i; 738 739 rcu_read_lock(); 740 741 while (true) { 742 bool clear = false; 743 744 spin_lock_bh(&dev->sta_poll_lock); 745 if (list_empty(&dev->sta_poll_list)) { 746 spin_unlock_bh(&dev->sta_poll_lock); 747 break; 748 } 749 msta = list_first_entry(&dev->sta_poll_list, 750 struct mt7615_sta, poll_list); 751 list_del_init(&msta->poll_list); 752 spin_unlock_bh(&dev->sta_poll_lock); 753 754 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 755 756 for (i = 0; i < 4; i++, addr += 8) { 757 u32 tx_last = msta->airtime_ac[i]; 758 u32 rx_last = msta->airtime_ac[i + 4]; 759 760 msta->airtime_ac[i] = mt76_rr(dev, addr); 761 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 762 tx_time[i] = msta->airtime_ac[i] - tx_last; 763 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 764 765 if ((tx_last | rx_last) & BIT(30)) 766 clear = true; 767 } 768 769 if (clear) { 770 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 771 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 772 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 773 } 774 775 if (!msta->wcid.sta) 776 continue; 777 778 sta = container_of((void *)msta, struct ieee80211_sta, 779 drv_priv); 780 for (i = 0; i < 4; i++) { 781 u32 tx_cur = tx_time[i]; 782 u32 rx_cur = rx_time[hw_queue_map[i]]; 783 u8 tid = ac_to_tid[i]; 784 785 if (!tx_cur && !rx_cur) 786 continue; 787 788 ieee80211_sta_register_airtime(sta, tid, tx_cur, 789 rx_cur); 790 } 791 } 792 793 rcu_read_unlock(); 794 } 795 796 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 797 struct ieee80211_tx_rate *probe_rate, 798 struct ieee80211_tx_rate *rates) 799 { 800 struct mt7615_dev *dev = phy->dev; 801 struct mt76_phy *mphy = phy->mt76; 802 struct ieee80211_tx_rate *ref; 803 int wcid = sta->wcid.idx; 804 u32 addr = mt7615_mac_wtbl_addr(dev, wcid); 805 bool stbc = false; 806 int n_rates = sta->n_rates; 807 u8 bw, bw_prev, bw_idx = 0; 808 u16 val[4]; 809 u16 probe_val; 810 u32 w5, w27; 811 bool rateset; 812 int i, k; 813 814 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 815 return; 816 817 for (i = n_rates; i < 4; i++) 818 rates[i] = rates[n_rates - 1]; 819 820 rateset = !(sta->rate_set_tsf & BIT(0)); 821 memcpy(sta->rateset[rateset].rates, rates, 822 sizeof(sta->rateset[rateset].rates)); 823 if (probe_rate) { 824 sta->rateset[rateset].probe_rate = *probe_rate; 825 ref = &sta->rateset[rateset].probe_rate; 826 } else { 827 sta->rateset[rateset].probe_rate.idx = -1; 828 ref = &sta->rateset[rateset].rates[0]; 829 } 830 831 rates = sta->rateset[rateset].rates; 832 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 833 /* 834 * We don't support switching between short and long GI 835 * within the rate set. For accurate tx status reporting, we 836 * need to make sure that flags match. 837 * For improved performance, avoid duplicate entries by 838 * decrementing the MCS index if necessary 839 */ 840 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 841 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 842 843 for (k = 0; k < i; k++) { 844 if (rates[i].idx != rates[k].idx) 845 continue; 846 if ((rates[i].flags ^ rates[k].flags) & 847 (IEEE80211_TX_RC_40_MHZ_WIDTH | 848 IEEE80211_TX_RC_80_MHZ_WIDTH | 849 IEEE80211_TX_RC_160_MHZ_WIDTH)) 850 continue; 851 852 if (!rates[i].idx) 853 continue; 854 855 rates[i].idx--; 856 } 857 } 858 859 val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 860 bw_prev = bw; 861 862 if (probe_rate) { 863 probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 864 stbc, &bw); 865 if (bw) 866 bw_idx = 1; 867 else 868 bw_prev = 0; 869 } else { 870 probe_val = val[0]; 871 } 872 873 val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 874 if (bw_prev) { 875 bw_idx = 3; 876 bw_prev = bw; 877 } 878 879 val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 880 if (bw_prev) { 881 bw_idx = 5; 882 bw_prev = bw; 883 } 884 885 val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 886 if (bw_prev) 887 bw_idx = 7; 888 889 w27 = mt76_rr(dev, addr + 27 * 4); 890 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 891 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw); 892 893 w5 = mt76_rr(dev, addr + 5 * 4); 894 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 895 MT_WTBL_W5_MPDU_OK_COUNT | 896 MT_WTBL_W5_MPDU_FAIL_COUNT | 897 MT_WTBL_W5_RATE_IDX); 898 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) | 899 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7); 900 901 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 902 903 mt76_wr(dev, MT_WTBL_RIUCR1, 904 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 905 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 906 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); 907 908 mt76_wr(dev, MT_WTBL_RIUCR2, 909 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | 910 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 911 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | 912 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 913 914 mt76_wr(dev, MT_WTBL_RIUCR3, 915 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 916 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | 917 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 918 919 mt76_wr(dev, MT_WTBL_UPDATE, 920 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 921 MT_WTBL_UPDATE_RATE_UPDATE | 922 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 923 924 mt76_wr(dev, addr + 27 * 4, w27); 925 926 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 927 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; 928 929 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 930 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 931 932 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 933 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 934 } 935 936 static enum mt7615_cipher_type 937 mt7615_mac_get_cipher(int cipher) 938 { 939 switch (cipher) { 940 case WLAN_CIPHER_SUITE_WEP40: 941 return MT_CIPHER_WEP40; 942 case WLAN_CIPHER_SUITE_WEP104: 943 return MT_CIPHER_WEP104; 944 case WLAN_CIPHER_SUITE_TKIP: 945 return MT_CIPHER_TKIP; 946 case WLAN_CIPHER_SUITE_AES_CMAC: 947 return MT_CIPHER_BIP_CMAC_128; 948 case WLAN_CIPHER_SUITE_CCMP: 949 return MT_CIPHER_AES_CCMP; 950 case WLAN_CIPHER_SUITE_CCMP_256: 951 return MT_CIPHER_CCMP_256; 952 case WLAN_CIPHER_SUITE_GCMP: 953 return MT_CIPHER_GCMP; 954 case WLAN_CIPHER_SUITE_GCMP_256: 955 return MT_CIPHER_GCMP_256; 956 case WLAN_CIPHER_SUITE_SMS4: 957 return MT_CIPHER_WAPI; 958 default: 959 return MT_CIPHER_NONE; 960 } 961 } 962 963 static int 964 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, 965 struct ieee80211_key_conf *key, 966 enum mt7615_cipher_type cipher, 967 enum set_key_cmd cmd) 968 { 969 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 970 u8 data[32] = {}; 971 972 if (key->keylen > sizeof(data)) 973 return -EINVAL; 974 975 mt76_rr_copy(dev, addr, data, sizeof(data)); 976 if (cmd == SET_KEY) { 977 if (cipher == MT_CIPHER_TKIP) { 978 /* Rx/Tx MIC keys are swapped */ 979 memcpy(data + 16, key->key + 24, 8); 980 memcpy(data + 24, key->key + 16, 8); 981 } 982 if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) 983 memmove(data + 16, data, 16); 984 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 985 memcpy(data, key->key, key->keylen); 986 else if (cipher == MT_CIPHER_BIP_CMAC_128) 987 memcpy(data + 16, key->key, 16); 988 } else { 989 if (wcid->cipher & ~BIT(cipher)) { 990 if (cipher != MT_CIPHER_BIP_CMAC_128) 991 memmove(data, data + 16, 16); 992 memset(data + 16, 0, 16); 993 } else { 994 memset(data, 0, sizeof(data)); 995 } 996 } 997 mt76_wr_copy(dev, addr, data, sizeof(data)); 998 999 return 0; 1000 } 1001 1002 static int 1003 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1004 enum mt7615_cipher_type cipher, int keyidx, 1005 enum set_key_cmd cmd) 1006 { 1007 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1008 1009 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1010 return -ETIMEDOUT; 1011 1012 w0 = mt76_rr(dev, addr); 1013 w1 = mt76_rr(dev, addr + 4); 1014 if (cmd == SET_KEY) { 1015 w0 |= MT_WTBL_W0_RX_KEY_VALID | 1016 FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, 1017 cipher == MT_CIPHER_BIP_CMAC_128); 1018 if (cipher != MT_CIPHER_BIP_CMAC_128 || 1019 !wcid->cipher) 1020 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1021 } else { 1022 if (!(wcid->cipher & ~BIT(cipher))) 1023 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | 1024 MT_WTBL_W0_KEY_IDX); 1025 if (cipher == MT_CIPHER_BIP_CMAC_128) 1026 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1027 } 1028 mt76_wr(dev, MT_WTBL_RICR0, w0); 1029 mt76_wr(dev, MT_WTBL_RICR1, w1); 1030 1031 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1032 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1033 return -ETIMEDOUT; 1034 1035 return 0; 1036 } 1037 1038 static void 1039 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, 1040 enum mt7615_cipher_type cipher, 1041 enum set_key_cmd cmd) 1042 { 1043 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1044 1045 if (cmd == SET_KEY) { 1046 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1047 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1048 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1049 } else { 1050 if (cipher != MT_CIPHER_BIP_CMAC_128 && 1051 wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) 1052 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1053 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, 1054 MT_CIPHER_BIP_CMAC_128)); 1055 else if (!(wcid->cipher & ~BIT(cipher))) 1056 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); 1057 } 1058 } 1059 1060 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1061 struct mt76_wcid *wcid, 1062 struct ieee80211_key_conf *key, 1063 enum set_key_cmd cmd) 1064 { 1065 enum mt7615_cipher_type cipher; 1066 int err; 1067 1068 cipher = mt7615_mac_get_cipher(key->cipher); 1069 if (cipher == MT_CIPHER_NONE) 1070 return -EOPNOTSUPP; 1071 1072 spin_lock_bh(&dev->mt76.lock); 1073 1074 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); 1075 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); 1076 if (err < 0) 1077 goto out; 1078 1079 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, 1080 cmd); 1081 if (err < 0) 1082 goto out; 1083 1084 if (cmd == SET_KEY) 1085 wcid->cipher |= BIT(cipher); 1086 else 1087 wcid->cipher &= ~BIT(cipher); 1088 1089 out: 1090 spin_unlock_bh(&dev->mt76.lock); 1091 1092 return err; 1093 } 1094 1095 static void 1096 mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, 1097 void *txp_ptr, u32 id) 1098 { 1099 struct mt7615_hw_txp *txp = txp_ptr; 1100 struct mt7615_txp_ptr *ptr = &txp->ptr[0]; 1101 int nbuf = tx_info->nbuf - 1; 1102 int i; 1103 1104 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 1105 tx_info->nbuf = 1; 1106 1107 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); 1108 1109 for (i = 0; i < nbuf; i++) { 1110 u32 addr = tx_info->buf[i + 1].addr; 1111 u16 len = tx_info->buf[i + 1].len; 1112 1113 if (i == nbuf - 1) 1114 len |= MT_TXD_LEN_MSDU_LAST | 1115 MT_TXD_LEN_AMSDU_LAST; 1116 1117 if (i & 1) { 1118 ptr->buf1 = cpu_to_le32(addr); 1119 ptr->len1 = cpu_to_le16(len); 1120 ptr++; 1121 } else { 1122 ptr->buf0 = cpu_to_le32(addr); 1123 ptr->len0 = cpu_to_le16(len); 1124 } 1125 } 1126 } 1127 1128 static void 1129 mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info, 1130 void *txp_ptr, u32 id) 1131 { 1132 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1133 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1134 struct ieee80211_key_conf *key = info->control.hw_key; 1135 struct ieee80211_vif *vif = info->control.vif; 1136 struct mt7615_fw_txp *txp = txp_ptr; 1137 int nbuf = tx_info->nbuf - 1; 1138 int i; 1139 1140 for (i = 0; i < nbuf; i++) { 1141 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1142 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 1143 } 1144 txp->nbuf = nbuf; 1145 1146 /* pass partial skb header to fw */ 1147 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 1148 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1149 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1150 1151 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1152 1153 if (!key) 1154 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1155 1156 if (ieee80211_is_mgmt(hdr->frame_control)) 1157 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1158 1159 if (vif) { 1160 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 1161 1162 txp->bss_idx = mvif->idx; 1163 } 1164 1165 txp->token = cpu_to_le16(id); 1166 txp->rept_wds_wcid = 0xff; 1167 } 1168 1169 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1170 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1171 struct ieee80211_sta *sta, 1172 struct mt76_tx_info *tx_info) 1173 { 1174 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1175 struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); 1176 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1177 struct ieee80211_key_conf *key = info->control.hw_key; 1178 int pid, id; 1179 u8 *txwi = (u8 *)txwi_ptr; 1180 struct mt76_txwi_cache *t; 1181 void *txp; 1182 1183 if (!wcid) 1184 wcid = &dev->mt76.global_wcid; 1185 1186 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1187 1188 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 1189 struct mt7615_phy *phy = &dev->phy; 1190 1191 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2) 1192 phy = mdev->phy2->priv; 1193 1194 spin_lock_bh(&dev->mt76.lock); 1195 mt7615_mac_set_rates(phy, msta, &info->control.rates[0], 1196 msta->rates); 1197 msta->rate_probe = true; 1198 spin_unlock_bh(&dev->mt76.lock); 1199 } 1200 1201 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1202 t->skb = tx_info->skb; 1203 1204 spin_lock_bh(&dev->token_lock); 1205 id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC); 1206 spin_unlock_bh(&dev->token_lock); 1207 if (id < 0) 1208 return id; 1209 1210 mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta, 1211 pid, key, false); 1212 1213 txp = txwi + MT_TXD_SIZE; 1214 memset(txp, 0, sizeof(struct mt7615_txp_common)); 1215 if (is_mt7615(&dev->mt76)) 1216 mt7615_write_fw_txp(dev, tx_info, txp, id); 1217 else 1218 mt7615_write_hw_txp(dev, tx_info, txp, id); 1219 1220 tx_info->skb = DMA_DUMMY_DATA; 1221 1222 return 0; 1223 } 1224 1225 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1226 struct ieee80211_tx_info *info, __le32 *txs_data) 1227 { 1228 struct ieee80211_supported_band *sband; 1229 struct mt7615_rate_set *rs; 1230 struct mt76_phy *mphy; 1231 int first_idx = 0, last_idx; 1232 int i, idx, count; 1233 bool fixed_rate, ack_timeout; 1234 bool probe, ampdu, cck = false; 1235 bool rs_idx; 1236 u32 rate_set_tsf; 1237 u32 final_rate, final_rate_flags, final_nss, txs; 1238 1239 fixed_rate = info->status.rates[0].count; 1240 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1241 1242 txs = le32_to_cpu(txs_data[1]); 1243 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU); 1244 1245 txs = le32_to_cpu(txs_data[3]); 1246 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1247 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1248 1249 txs = le32_to_cpu(txs_data[0]); 1250 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1251 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1252 1253 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1254 return false; 1255 1256 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1257 return false; 1258 1259 if (!ack_timeout) 1260 info->flags |= IEEE80211_TX_STAT_ACK; 1261 1262 info->status.ampdu_len = 1; 1263 info->status.ampdu_ack_len = !!(info->flags & 1264 IEEE80211_TX_STAT_ACK); 1265 1266 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1267 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1268 1269 first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY); 1270 1271 if (fixed_rate && !probe) { 1272 info->status.rates[0].count = count; 1273 i = 0; 1274 goto out; 1275 } 1276 1277 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1278 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - 1279 rate_set_tsf) < 1000000); 1280 rs_idx ^= rate_set_tsf & BIT(0); 1281 rs = &sta->rateset[rs_idx]; 1282 1283 if (!first_idx && rs->probe_rate.idx >= 0) { 1284 info->status.rates[0] = rs->probe_rate; 1285 1286 spin_lock_bh(&dev->mt76.lock); 1287 if (sta->rate_probe) { 1288 struct mt7615_phy *phy = &dev->phy; 1289 1290 if (sta->wcid.ext_phy && dev->mt76.phy2) 1291 phy = dev->mt76.phy2->priv; 1292 1293 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1294 sta->rate_probe = false; 1295 } 1296 spin_unlock_bh(&dev->mt76.lock); 1297 } else { 1298 info->status.rates[0] = rs->rates[first_idx / 2]; 1299 } 1300 info->status.rates[0].count = 0; 1301 1302 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1303 struct ieee80211_tx_rate *cur_rate; 1304 int cur_count; 1305 1306 cur_rate = &rs->rates[idx / 2]; 1307 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1308 count -= cur_count; 1309 1310 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1311 cur_rate->flags != info->status.rates[i].flags)) { 1312 i++; 1313 if (i == ARRAY_SIZE(info->status.rates)) { 1314 i--; 1315 break; 1316 } 1317 1318 info->status.rates[i] = *cur_rate; 1319 info->status.rates[i].count = 0; 1320 } 1321 1322 info->status.rates[i].count += cur_count; 1323 } 1324 1325 out: 1326 final_rate_flags = info->status.rates[i].flags; 1327 1328 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1329 case MT_PHY_TYPE_CCK: 1330 cck = true; 1331 /* fall through */ 1332 case MT_PHY_TYPE_OFDM: 1333 mphy = &dev->mphy; 1334 if (sta->wcid.ext_phy && dev->mt76.phy2) 1335 mphy = dev->mt76.phy2; 1336 1337 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1338 sband = &mphy->sband_5g.sband; 1339 else 1340 sband = &mphy->sband_2g.sband; 1341 final_rate &= MT_TX_RATE_IDX; 1342 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1343 cck); 1344 final_rate_flags = 0; 1345 break; 1346 case MT_PHY_TYPE_HT_GF: 1347 case MT_PHY_TYPE_HT: 1348 final_rate_flags |= IEEE80211_TX_RC_MCS; 1349 final_rate &= MT_TX_RATE_IDX; 1350 if (final_rate > 31) 1351 return false; 1352 break; 1353 case MT_PHY_TYPE_VHT: 1354 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1355 1356 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1357 final_nss--; 1358 1359 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1360 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1361 break; 1362 default: 1363 return false; 1364 } 1365 1366 info->status.rates[i].idx = final_rate; 1367 info->status.rates[i].flags = final_rate_flags; 1368 1369 return true; 1370 } 1371 1372 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1373 struct mt7615_sta *sta, int pid, 1374 __le32 *txs_data) 1375 { 1376 struct mt76_dev *mdev = &dev->mt76; 1377 struct sk_buff_head list; 1378 struct sk_buff *skb; 1379 1380 if (pid < MT_PACKET_ID_FIRST) 1381 return false; 1382 1383 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1384 1385 mt76_tx_status_lock(mdev, &list); 1386 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1387 if (skb) { 1388 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1389 1390 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1391 ieee80211_tx_info_clear_status(info); 1392 info->status.rates[0].idx = -1; 1393 } 1394 1395 mt76_tx_status_skb_done(mdev, skb, &list); 1396 } 1397 mt76_tx_status_unlock(mdev, &list); 1398 1399 return !!skb; 1400 } 1401 1402 void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1403 { 1404 struct ieee80211_tx_info info = {}; 1405 struct ieee80211_sta *sta = NULL; 1406 struct mt7615_sta *msta = NULL; 1407 struct mt76_wcid *wcid; 1408 struct mt76_phy *mphy = &dev->mt76.phy; 1409 __le32 *txs_data = data; 1410 u32 txs; 1411 u8 wcidx; 1412 u8 pid; 1413 1414 txs = le32_to_cpu(txs_data[0]); 1415 pid = FIELD_GET(MT_TXS0_PID, txs); 1416 txs = le32_to_cpu(txs_data[2]); 1417 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1418 1419 if (pid == MT_PACKET_ID_NO_ACK) 1420 return; 1421 1422 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) 1423 return; 1424 1425 rcu_read_lock(); 1426 1427 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1428 if (!wcid) 1429 goto out; 1430 1431 msta = container_of(wcid, struct mt7615_sta, wcid); 1432 sta = wcid_to_sta(wcid); 1433 1434 spin_lock_bh(&dev->sta_poll_lock); 1435 if (list_empty(&msta->poll_list)) 1436 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1437 spin_unlock_bh(&dev->sta_poll_lock); 1438 1439 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1440 goto out; 1441 1442 if (wcidx >= MT7615_WTBL_STA || !sta) 1443 goto out; 1444 1445 if (wcid->ext_phy && dev->mt76.phy2) 1446 mphy = dev->mt76.phy2; 1447 1448 if (mt7615_fill_txs(dev, msta, &info, txs_data)) 1449 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1450 1451 out: 1452 rcu_read_unlock(); 1453 } 1454 1455 static void 1456 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1457 { 1458 struct mt76_dev *mdev = &dev->mt76; 1459 struct mt76_txwi_cache *txwi; 1460 1461 trace_mac_tx_free(dev, token); 1462 1463 spin_lock_bh(&dev->token_lock); 1464 txwi = idr_remove(&dev->token, token); 1465 spin_unlock_bh(&dev->token_lock); 1466 1467 if (!txwi) 1468 return; 1469 1470 mt7615_txp_skb_unmap(mdev, txwi); 1471 if (txwi->skb) { 1472 mt76_tx_complete_skb(mdev, txwi->skb); 1473 txwi->skb = NULL; 1474 } 1475 1476 mt76_put_txwi(mdev, txwi); 1477 } 1478 1479 void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) 1480 { 1481 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; 1482 u8 i, count; 1483 1484 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1485 if (is_mt7615(&dev->mt76)) { 1486 __le16 *token = &free->token[0]; 1487 1488 for (i = 0; i < count; i++) 1489 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1490 } else { 1491 __le32 *token = (__le32 *)&free->token[0]; 1492 1493 for (i = 0; i < count; i++) 1494 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1495 } 1496 1497 dev_kfree_skb(skb); 1498 } 1499 1500 static void 1501 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1502 { 1503 struct mt7615_dev *dev = phy->dev; 1504 bool ext_phy = phy != &dev->phy; 1505 1506 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1507 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1508 MT_WF_PHY_PD_OFDM(ext_phy, 0x13c)); 1509 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1510 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1511 MT_WF_PHY_PD_CCK(ext_phy, 0x92)); 1512 1513 phy->ofdm_sensitivity = -98; 1514 phy->cck_sensitivity = -110; 1515 phy->last_cca_adj = jiffies; 1516 } 1517 1518 void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable) 1519 { 1520 struct mt7615_phy *ext_phy; 1521 1522 mutex_lock(&dev->mt76.mutex); 1523 1524 if (dev->scs_en == enable) 1525 goto out; 1526 1527 if (is_mt7663(&dev->mt76)) 1528 goto out; 1529 1530 if (enable) { 1531 mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0), 1532 MT_WF_PHY_PD_BLK(0)); 1533 mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1), 1534 MT_WF_PHY_PD_BLK(1)); 1535 if (is_mt7622(&dev->mt76)) { 1536 mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8); 1537 mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7); 1538 } 1539 } else { 1540 mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0), 1541 MT_WF_PHY_PD_BLK(0)); 1542 mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1), 1543 MT_WF_PHY_PD_BLK(1)); 1544 } 1545 1546 mt7615_mac_set_default_sensitivity(&dev->phy); 1547 ext_phy = mt7615_ext_phy(dev); 1548 if (ext_phy) 1549 mt7615_mac_set_default_sensitivity(ext_phy); 1550 1551 dev->scs_en = enable; 1552 1553 out: 1554 mutex_unlock(&dev->mt76.mutex); 1555 } 1556 1557 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1558 { 1559 u32 rxtd; 1560 1561 if (is_mt7663(&dev->mt76)) 1562 return; 1563 1564 if (ext_phy) 1565 rxtd = MT_WF_PHY_RXTD2(10); 1566 else 1567 rxtd = MT_WF_PHY_RXTD(12); 1568 1569 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1570 mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12); 1571 } 1572 1573 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1574 { 1575 struct mt7615_dev *dev = phy->dev; 1576 bool ext_phy = phy != &dev->phy; 1577 u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1578 1579 mt76_clear(dev, reg, GENMASK(22, 20)); 1580 mt76_set(dev, reg, BIT(22) | BIT(20)); 1581 } 1582 1583 static void 1584 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1585 u32 rts_err_rate, bool ofdm) 1586 { 1587 struct mt7615_dev *dev = phy->dev; 1588 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1589 bool ext_phy = phy != &dev->phy; 1590 u16 def_th = ofdm ? -98 : -110; 1591 bool update = false; 1592 s8 *sensitivity; 1593 int signal; 1594 1595 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1596 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1597 if (!signal) { 1598 mt7615_mac_set_default_sensitivity(phy); 1599 return; 1600 } 1601 1602 signal = min(signal, -72); 1603 if (false_cca > 500) { 1604 if (rts_err_rate > MT_FRAC(40, 100)) 1605 return; 1606 1607 /* decrease coverage */ 1608 if (*sensitivity == def_th && signal > -90) { 1609 *sensitivity = -90; 1610 update = true; 1611 } else if (*sensitivity + 2 < signal) { 1612 *sensitivity += 2; 1613 update = true; 1614 } 1615 } else if ((false_cca > 0 && false_cca < 50) || 1616 rts_err_rate > MT_FRAC(60, 100)) { 1617 /* increase coverage */ 1618 if (*sensitivity - 2 >= def_th) { 1619 *sensitivity -= 2; 1620 update = true; 1621 } 1622 } 1623 1624 if (*sensitivity > signal) { 1625 *sensitivity = signal; 1626 update = true; 1627 } 1628 1629 if (update) { 1630 u16 val; 1631 1632 if (ofdm) { 1633 val = *sensitivity * 2 + 512; 1634 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1635 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1636 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1637 } else { 1638 val = *sensitivity + 256; 1639 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1640 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1641 MT_WF_PHY_PD_CCK(ext_phy, val)); 1642 } 1643 phy->last_cca_adj = jiffies; 1644 } 1645 } 1646 1647 static void 1648 mt7615_mac_scs_check(struct mt7615_phy *phy) 1649 { 1650 struct mt7615_dev *dev = phy->dev; 1651 struct mib_stats *mib = &phy->mib; 1652 u32 val, rts_err_rate = 0; 1653 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1654 bool ext_phy = phy != &dev->phy; 1655 1656 if (!dev->scs_en) 1657 return; 1658 1659 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1660 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1661 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1662 1663 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1664 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1665 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1666 1667 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1668 phy->false_cca_cck = pd_cck - mdrdy_cck; 1669 mt7615_mac_cca_stats_reset(phy); 1670 1671 if (mib->rts_cnt + mib->rts_retries_cnt) 1672 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1673 mib->rts_cnt + mib->rts_retries_cnt); 1674 1675 /* cck */ 1676 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1677 /* ofdm */ 1678 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1679 1680 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1681 mt7615_mac_set_default_sensitivity(phy); 1682 } 1683 1684 static u8 1685 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1686 { 1687 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1688 u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1689 u32 val, sum = 0, n = 0; 1690 int i; 1691 1692 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1693 val = mt76_rr(dev, reg); 1694 sum += val * nf_power[i]; 1695 n += val; 1696 } 1697 1698 if (!n) 1699 return 0; 1700 1701 return sum / n; 1702 } 1703 1704 static void 1705 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1706 { 1707 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1708 struct mt7615_phy *phy = mphy->priv; 1709 struct mt76_channel_state *state; 1710 u64 busy_time, tx_time, rx_time, obss_time; 1711 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1712 int nf; 1713 1714 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1715 MT_MIB_SDR9_BUSY_MASK); 1716 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1717 MT_MIB_SDR36_TXTIME_MASK); 1718 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1719 MT_MIB_SDR37_RXTIME_MASK); 1720 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1721 1722 nf = mt7615_phy_get_nf(dev, idx); 1723 if (!phy->noise) 1724 phy->noise = nf << 4; 1725 else if (nf) 1726 phy->noise += nf - (phy->noise >> 4); 1727 1728 state = mphy->chan_state; 1729 state->cc_busy += busy_time; 1730 state->cc_tx += tx_time; 1731 state->cc_rx += rx_time + obss_time; 1732 state->cc_bss_rx += rx_time; 1733 state->noise = -(phy->noise >> 4); 1734 } 1735 1736 void mt7615_update_channel(struct mt76_dev *mdev) 1737 { 1738 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1739 1740 mt7615_phy_update_channel(&mdev->phy, 0); 1741 if (mdev->phy2) 1742 mt7615_phy_update_channel(mdev->phy2, 1); 1743 1744 /* reset obss airtime */ 1745 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1746 } 1747 1748 static void 1749 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 1750 { 1751 struct mt7615_dev *dev = phy->dev; 1752 struct mib_stats *mib = &phy->mib; 1753 bool ext_phy = phy != &dev->phy; 1754 int i; 1755 1756 memset(mib, 0, sizeof(*mib)); 1757 1758 mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 1759 MT_MIB_SDR3_FCS_ERR_MASK); 1760 1761 for (i = 0; i < 4; i++) { 1762 u32 data, val, val2; 1763 1764 val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i), 1765 MT_MIB_ACK_FAIL_COUNT_MASK); 1766 if (val > mib->ack_fail_cnt) 1767 mib->ack_fail_cnt = val; 1768 1769 val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 1770 data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2); 1771 if (data > mib->rts_retries_cnt) { 1772 mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2); 1773 mib->rts_retries_cnt = data; 1774 } 1775 } 1776 } 1777 1778 void mt7615_mac_work(struct work_struct *work) 1779 { 1780 struct mt7615_dev *dev; 1781 struct mt7615_phy *ext_phy; 1782 int i, idx; 1783 1784 dev = (struct mt7615_dev *)container_of(work, struct mt76_dev, 1785 mac_work.work); 1786 1787 mutex_lock(&dev->mt76.mutex); 1788 mt76_update_survey(&dev->mt76); 1789 if (++dev->mac_work_count == 5) { 1790 ext_phy = mt7615_ext_phy(dev); 1791 1792 mt7615_mac_update_mib_stats(&dev->phy); 1793 mt7615_mac_scs_check(&dev->phy); 1794 if (ext_phy) { 1795 mt7615_mac_update_mib_stats(ext_phy); 1796 mt7615_mac_scs_check(ext_phy); 1797 } 1798 1799 dev->mac_work_count = 0; 1800 } 1801 1802 for (i = 0, idx = 0; i < 4; i++) { 1803 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); 1804 1805 dev->mt76.aggr_stats[idx++] += val & 0xffff; 1806 dev->mt76.aggr_stats[idx++] += val >> 16; 1807 } 1808 mutex_unlock(&dev->mt76.mutex); 1809 1810 mt76_tx_status_check(&dev->mt76, NULL, false); 1811 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, 1812 MT7615_WATCHDOG_TIME); 1813 } 1814 1815 static bool 1816 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) 1817 { 1818 bool ret; 1819 1820 ret = wait_event_timeout(dev->reset_wait, 1821 (READ_ONCE(dev->reset_state) & state), 1822 MT7615_RESET_TIMEOUT); 1823 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1824 return ret; 1825 } 1826 1827 static void 1828 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1829 { 1830 struct ieee80211_hw *hw = priv; 1831 struct mt7615_dev *dev = mt7615_hw_dev(hw); 1832 1833 mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon); 1834 } 1835 1836 static void 1837 mt7615_update_beacons(struct mt7615_dev *dev) 1838 { 1839 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1840 IEEE80211_IFACE_ITER_RESUME_ALL, 1841 mt7615_update_vif_beacon, dev->mt76.hw); 1842 1843 if (!dev->mt76.phy2) 1844 return; 1845 1846 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 1847 IEEE80211_IFACE_ITER_RESUME_ALL, 1848 mt7615_update_vif_beacon, dev->mt76.phy2->hw); 1849 } 1850 1851 static void 1852 mt7615_dma_reset(struct mt7615_dev *dev) 1853 { 1854 int i; 1855 1856 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1857 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1858 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1859 usleep_range(1000, 2000); 1860 1861 for (i = 0; i < __MT_TXQ_MAX; i++) 1862 mt76_queue_tx_cleanup(dev, i, true); 1863 1864 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 1865 mt76_queue_rx_reset(dev, i); 1866 1867 mt76_set(dev, MT_WPDMA_GLO_CFG, 1868 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1869 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1870 } 1871 1872 void mt7615_mac_reset_work(struct work_struct *work) 1873 { 1874 struct mt7615_dev *dev; 1875 1876 dev = container_of(work, struct mt7615_dev, reset_work); 1877 1878 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) 1879 return; 1880 1881 ieee80211_stop_queues(mt76_hw(dev)); 1882 if (dev->mt76.phy2) 1883 ieee80211_stop_queues(dev->mt76.phy2->hw); 1884 1885 set_bit(MT76_RESET, &dev->mphy.state); 1886 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1887 wake_up(&dev->mt76.mcu.wait); 1888 cancel_delayed_work_sync(&dev->mt76.mac_work); 1889 1890 /* lock/unlock all queues to ensure that no tx is pending */ 1891 mt76_txq_schedule_all(&dev->mphy); 1892 if (dev->mt76.phy2) 1893 mt76_txq_schedule_all(dev->mt76.phy2); 1894 1895 tasklet_disable(&dev->mt76.tx_tasklet); 1896 napi_disable(&dev->mt76.napi[0]); 1897 napi_disable(&dev->mt76.napi[1]); 1898 napi_disable(&dev->mt76.tx_napi); 1899 1900 mutex_lock(&dev->mt76.mutex); 1901 1902 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); 1903 1904 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1905 mt7615_dma_reset(dev); 1906 1907 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 1908 1909 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); 1910 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1911 } 1912 1913 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1914 clear_bit(MT76_RESET, &dev->mphy.state); 1915 1916 tasklet_enable(&dev->mt76.tx_tasklet); 1917 napi_enable(&dev->mt76.tx_napi); 1918 napi_schedule(&dev->mt76.tx_napi); 1919 1920 napi_enable(&dev->mt76.napi[0]); 1921 napi_schedule(&dev->mt76.napi[0]); 1922 1923 napi_enable(&dev->mt76.napi[1]); 1924 napi_schedule(&dev->mt76.napi[1]); 1925 1926 ieee80211_wake_queues(mt76_hw(dev)); 1927 if (dev->mt76.phy2) 1928 ieee80211_wake_queues(dev->mt76.phy2->hw); 1929 1930 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1931 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1932 1933 mutex_unlock(&dev->mt76.mutex); 1934 1935 mt7615_update_beacons(dev); 1936 1937 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work, 1938 MT7615_WATCHDOG_TIME); 1939 } 1940 1941 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 1942 { 1943 struct mt7615_dev *dev = phy->dev; 1944 1945 if (phy->rdd_state & BIT(0)) 1946 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); 1947 if (phy->rdd_state & BIT(1)) 1948 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); 1949 } 1950 1951 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 1952 { 1953 int err; 1954 1955 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); 1956 if (err < 0) 1957 return err; 1958 1959 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 1960 MT_RX_SEL0, 1); 1961 } 1962 1963 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 1964 { 1965 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 1966 struct mt7615_dev *dev = phy->dev; 1967 bool ext_phy = phy != &dev->phy; 1968 int err; 1969 1970 /* start CAC */ 1971 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); 1972 if (err < 0) 1973 return err; 1974 1975 err = mt7615_dfs_start_rdd(dev, ext_phy); 1976 if (err < 0) 1977 return err; 1978 1979 phy->rdd_state |= BIT(ext_phy); 1980 1981 if (chandef->width == NL80211_CHAN_WIDTH_160 || 1982 chandef->width == NL80211_CHAN_WIDTH_80P80) { 1983 err = mt7615_dfs_start_rdd(dev, 1); 1984 if (err < 0) 1985 return err; 1986 1987 phy->rdd_state |= BIT(1); 1988 } 1989 1990 return 0; 1991 } 1992 1993 static int 1994 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 1995 { 1996 const struct mt7615_dfs_radar_spec *radar_specs; 1997 struct mt7615_dev *dev = phy->dev; 1998 int err, i; 1999 2000 switch (dev->mt76.region) { 2001 case NL80211_DFS_FCC: 2002 radar_specs = &fcc_radar_specs; 2003 err = mt7615_mcu_set_fcc5_lpn(dev, 8); 2004 if (err < 0) 2005 return err; 2006 break; 2007 case NL80211_DFS_ETSI: 2008 radar_specs = &etsi_radar_specs; 2009 break; 2010 case NL80211_DFS_JP: 2011 radar_specs = &jp_radar_specs; 2012 break; 2013 default: 2014 return -EINVAL; 2015 } 2016 2017 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2018 err = mt7615_mcu_set_radar_th(dev, i, 2019 &radar_specs->radar_pattern[i]); 2020 if (err < 0) 2021 return err; 2022 } 2023 2024 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2025 } 2026 2027 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 2028 { 2029 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2030 struct mt7615_dev *dev = phy->dev; 2031 bool ext_phy = phy != &dev->phy; 2032 int err; 2033 2034 if (dev->mt76.region == NL80211_DFS_UNSET) { 2035 phy->dfs_state = -1; 2036 if (phy->rdd_state) 2037 goto stop; 2038 2039 return 0; 2040 } 2041 2042 if (test_bit(MT76_SCANNING, &phy->mt76->state)) 2043 return 0; 2044 2045 if (phy->dfs_state == chandef->chan->dfs_state) 2046 return 0; 2047 2048 err = mt7615_dfs_init_radar_specs(phy); 2049 if (err < 0) { 2050 phy->dfs_state = -1; 2051 goto stop; 2052 } 2053 2054 phy->dfs_state = chandef->chan->dfs_state; 2055 2056 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { 2057 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 2058 return mt7615_dfs_start_radar_detector(phy); 2059 2060 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, 2061 MT_RX_SEL0, 0); 2062 } 2063 2064 stop: 2065 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0); 2066 if (err < 0) 2067 return err; 2068 2069 mt7615_dfs_stop_radar_detector(phy); 2070 return 0; 2071 } 2072