1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/timekeeping.h> 12 #include "mt7615.h" 13 #include "../trace.h" 14 #include "../dma.h" 15 #include "mt7615_trace.h" 16 #include "mac.h" 17 18 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 19 20 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 21 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 22 .radar_pattern = { 23 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 24 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 25 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 26 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 27 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 28 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 29 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 30 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 31 }, 32 }; 33 34 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 35 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 36 .radar_pattern = { 37 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 38 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 39 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 40 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 41 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 42 }, 43 }; 44 45 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 46 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 47 .radar_pattern = { 48 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 49 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 50 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 51 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 52 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 53 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 54 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 55 }, 56 }; 57 58 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 59 u8 idx, bool unicast) 60 { 61 struct mt7615_sta *sta; 62 struct mt76_wcid *wcid; 63 64 if (idx >= MT7615_WTBL_SIZE) 65 return NULL; 66 67 wcid = rcu_dereference(dev->mt76.wcid[idx]); 68 if (unicast || !wcid) 69 return wcid; 70 71 if (!wcid->sta) 72 return NULL; 73 74 sta = container_of(wcid, struct mt7615_sta, wcid); 75 if (!sta->vif) 76 return NULL; 77 78 return &sta->vif->sta.wcid; 79 } 80 81 void mt7615_mac_reset_counters(struct mt7615_dev *dev) 82 { 83 int i; 84 85 for (i = 0; i < 4; i++) { 86 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 87 mt76_rr(dev, MT_TX_AGG_CNT(1, i)); 88 } 89 90 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 91 dev->mt76.phy.survey_time = ktime_get_boottime(); 92 if (dev->mt76.phy2) 93 dev->mt76.phy2->survey_time = ktime_get_boottime(); 94 95 /* reset airtime counters */ 96 mt76_rr(dev, MT_MIB_SDR9(0)); 97 mt76_rr(dev, MT_MIB_SDR9(1)); 98 99 mt76_rr(dev, MT_MIB_SDR36(0)); 100 mt76_rr(dev, MT_MIB_SDR36(1)); 101 102 mt76_rr(dev, MT_MIB_SDR37(0)); 103 mt76_rr(dev, MT_MIB_SDR37(1)); 104 105 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 106 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 107 } 108 109 void mt7615_mac_set_timing(struct mt7615_phy *phy) 110 { 111 s16 coverage_class = phy->coverage_class; 112 struct mt7615_dev *dev = phy->dev; 113 bool ext_phy = phy != &dev->phy; 114 u32 val, reg_offset; 115 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 116 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 117 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 118 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 119 int sifs, offset; 120 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 121 122 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 123 return; 124 125 if (is_5ghz) 126 sifs = 16; 127 else 128 sifs = 10; 129 130 if (ext_phy) { 131 coverage_class = max_t(s16, dev->phy.coverage_class, 132 coverage_class); 133 mt76_set(dev, MT_ARB_SCR, 134 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 135 } else { 136 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 137 138 if (phy_ext) 139 coverage_class = max_t(s16, phy_ext->coverage_class, 140 coverage_class); 141 mt76_set(dev, MT_ARB_SCR, 142 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 143 } 144 udelay(1); 145 146 offset = 3 * coverage_class; 147 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 148 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 149 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 150 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 151 152 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 153 FIELD_PREP(MT_IFS_EIFS, 360) | 154 FIELD_PREP(MT_IFS_RIFS, 2) | 155 FIELD_PREP(MT_IFS_SIFS, sifs) | 156 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 157 158 if (phy->slottime < 20 || is_5ghz) 159 val = MT7615_CFEND_RATE_DEFAULT; 160 else 161 val = MT7615_CFEND_RATE_11B; 162 163 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 164 if (ext_phy) 165 mt76_clear(dev, MT_ARB_SCR, 166 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 167 else 168 mt76_clear(dev, MT_ARB_SCR, 169 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 170 171 } 172 173 static void 174 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, 175 struct mt76_rx_status *status, u8 chfreq) 176 { 177 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 178 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 179 !test_bit(MT76_STATE_ROC, &mphy->state)) { 180 status->freq = mphy->chandef.chan->center_freq; 181 status->band = mphy->chandef.chan->band; 182 return; 183 } 184 185 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 186 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 187 } 188 189 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 190 { 191 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 192 struct mt76_phy *mphy = &dev->mt76.phy; 193 struct mt7615_phy *phy = &dev->phy; 194 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL; 195 struct ieee80211_supported_band *sband; 196 struct ieee80211_hdr *hdr; 197 __le32 *rxd = (__le32 *)skb->data; 198 u32 rxd0 = le32_to_cpu(rxd[0]); 199 u32 rxd1 = le32_to_cpu(rxd[1]); 200 u32 rxd2 = le32_to_cpu(rxd[2]); 201 __le32 rxd12 = rxd[12]; 202 bool unicast, remove_pad, insert_ccmp_hdr = false; 203 int phy_idx; 204 int i, idx; 205 u8 chfreq; 206 207 memset(status, 0, sizeof(*status)); 208 209 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 210 if (!phy2) 211 phy_idx = 0; 212 else if (phy2->chfreq == phy->chfreq) 213 phy_idx = -1; 214 else if (phy->chfreq == chfreq) 215 phy_idx = 0; 216 else if (phy2->chfreq == chfreq) 217 phy_idx = 1; 218 else 219 phy_idx = -1; 220 221 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 222 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 223 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 224 225 if (status->wcid) { 226 struct mt7615_sta *msta; 227 228 msta = container_of(status->wcid, struct mt7615_sta, wcid); 229 spin_lock_bh(&dev->sta_poll_lock); 230 if (list_empty(&msta->poll_list)) 231 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 232 spin_unlock_bh(&dev->sta_poll_lock); 233 } 234 235 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 236 status->flag |= RX_FLAG_FAILED_FCS_CRC; 237 238 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 239 status->flag |= RX_FLAG_MMIC_ERROR; 240 241 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 242 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 243 status->flag |= RX_FLAG_DECRYPTED; 244 status->flag |= RX_FLAG_IV_STRIPPED; 245 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 246 } 247 248 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 249 250 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 251 return -EINVAL; 252 253 rxd += 4; 254 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 255 rxd += 4; 256 if ((u8 *)rxd - skb->data >= skb->len) 257 return -EINVAL; 258 } 259 260 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 261 u8 *data = (u8 *)rxd; 262 263 if (status->flag & RX_FLAG_DECRYPTED) { 264 status->iv[0] = data[5]; 265 status->iv[1] = data[4]; 266 status->iv[2] = data[3]; 267 status->iv[3] = data[2]; 268 status->iv[4] = data[1]; 269 status->iv[5] = data[0]; 270 271 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 272 } 273 rxd += 4; 274 if ((u8 *)rxd - skb->data >= skb->len) 275 return -EINVAL; 276 } 277 278 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 279 rxd += 2; 280 if ((u8 *)rxd - skb->data >= skb->len) 281 return -EINVAL; 282 } 283 284 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 285 u32 rxdg5 = le32_to_cpu(rxd[5]); 286 287 /* 288 * If both PHYs are on the same channel and we don't have a WCID, 289 * we need to figure out which PHY this packet was received on. 290 * On the primary PHY, the noise value for the chains belonging to the 291 * second PHY will be set to the noise value of the last packet from 292 * that PHY. 293 */ 294 if (phy_idx < 0) { 295 int first_chain = ffs(phy2->chainmask) - 1; 296 297 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 298 } 299 } 300 301 if (phy_idx == 1 && phy2) { 302 mphy = dev->mt76.phy2; 303 phy = phy2; 304 status->ext_phy = true; 305 } 306 307 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) 308 return -EINVAL; 309 310 mt7615_get_status_freq_info(dev, mphy, status, chfreq); 311 if (status->band == NL80211_BAND_5GHZ) 312 sband = &mphy->sband_5g.sband; 313 else 314 sband = &mphy->sband_2g.sband; 315 316 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 317 return -EINVAL; 318 319 if (!sband->channels) 320 return -EINVAL; 321 322 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 323 MT_RXD2_NORMAL_NON_AMPDU))) { 324 status->flag |= RX_FLAG_AMPDU_DETAILS; 325 326 /* all subframes of an A-MPDU have the same timestamp */ 327 if (phy->rx_ampdu_ts != rxd12) { 328 if (!++phy->ampdu_ref) 329 phy->ampdu_ref++; 330 } 331 phy->rx_ampdu_ts = rxd12; 332 333 status->ampdu_ref = phy->ampdu_ref; 334 } 335 336 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 337 u32 rxdg0 = le32_to_cpu(rxd[0]); 338 u32 rxdg1 = le32_to_cpu(rxd[1]); 339 u32 rxdg3 = le32_to_cpu(rxd[3]); 340 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 341 bool cck = false; 342 343 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 344 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 345 case MT_PHY_TYPE_CCK: 346 cck = true; 347 /* fall through */ 348 case MT_PHY_TYPE_OFDM: 349 i = mt76_get_rate(&dev->mt76, sband, i, cck); 350 break; 351 case MT_PHY_TYPE_HT_GF: 352 case MT_PHY_TYPE_HT: 353 status->encoding = RX_ENC_HT; 354 if (i > 31) 355 return -EINVAL; 356 break; 357 case MT_PHY_TYPE_VHT: 358 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 359 status->encoding = RX_ENC_VHT; 360 break; 361 default: 362 return -EINVAL; 363 } 364 status->rate_idx = i; 365 366 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 367 case MT_PHY_BW_20: 368 break; 369 case MT_PHY_BW_40: 370 status->bw = RATE_INFO_BW_40; 371 break; 372 case MT_PHY_BW_80: 373 status->bw = RATE_INFO_BW_80; 374 break; 375 case MT_PHY_BW_160: 376 status->bw = RATE_INFO_BW_160; 377 break; 378 default: 379 return -EINVAL; 380 } 381 382 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 383 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 384 if (rxdg0 & MT_RXV1_HT_AD_CODE) 385 status->enc_flags |= RX_ENC_FLAG_LDPC; 386 387 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 388 389 status->chains = mphy->antenna_mask; 390 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 391 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 392 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 393 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 394 status->signal = status->chain_signal[0]; 395 396 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 397 if (!(status->chains & BIT(i))) 398 continue; 399 400 status->signal = max(status->signal, 401 status->chain_signal[i]); 402 } 403 404 rxd += 6; 405 if ((u8 *)rxd - skb->data >= skb->len) 406 return -EINVAL; 407 } 408 409 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 410 411 if (insert_ccmp_hdr) { 412 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 413 414 mt76_insert_ccmp_hdr(skb, key_id); 415 } 416 417 hdr = (struct ieee80211_hdr *)skb->data; 418 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 419 return 0; 420 421 status->aggr = unicast && 422 !ieee80211_is_qos_nullfunc(hdr->frame_control); 423 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 424 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 425 426 return 0; 427 } 428 429 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 430 { 431 } 432 EXPORT_SYMBOL_GPL(mt7615_sta_ps); 433 434 static u16 435 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 436 struct mt76_phy *mphy, 437 const struct ieee80211_tx_rate *rate, 438 bool stbc, u8 *bw) 439 { 440 u8 phy, nss, rate_idx; 441 u16 rateval = 0; 442 443 *bw = 0; 444 445 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 446 rate_idx = ieee80211_rate_get_vht_mcs(rate); 447 nss = ieee80211_rate_get_vht_nss(rate); 448 phy = MT_PHY_TYPE_VHT; 449 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 450 *bw = 1; 451 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 452 *bw = 2; 453 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 454 *bw = 3; 455 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 456 rate_idx = rate->idx; 457 nss = 1 + (rate->idx >> 3); 458 phy = MT_PHY_TYPE_HT; 459 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 460 phy = MT_PHY_TYPE_HT_GF; 461 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 462 *bw = 1; 463 } else { 464 const struct ieee80211_rate *r; 465 int band = mphy->chandef.chan->band; 466 u16 val; 467 468 nss = 1; 469 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 470 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 471 val = r->hw_value_short; 472 else 473 val = r->hw_value; 474 475 phy = val >> 8; 476 rate_idx = val & 0xff; 477 } 478 479 if (stbc && nss == 1) { 480 nss++; 481 rateval |= MT_TX_RATE_STBC; 482 } 483 484 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 485 FIELD_PREP(MT_TX_RATE_MODE, phy) | 486 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 487 488 return rateval; 489 } 490 491 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 492 struct sk_buff *skb, struct mt76_wcid *wcid, 493 struct ieee80211_sta *sta, int pid, 494 struct ieee80211_key_conf *key, bool beacon) 495 { 496 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 497 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 498 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 499 bool multicast = is_multicast_ether_addr(hdr->addr1); 500 struct ieee80211_vif *vif = info->control.vif; 501 struct mt76_phy *mphy = &dev->mphy; 502 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 503 bool is_usb = mt76_is_usb(&dev->mt76); 504 int tx_count = 8; 505 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 506 __le16 fc = hdr->frame_control; 507 u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE; 508 u16 seqno = 0; 509 510 if (vif) { 511 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 512 513 omac_idx = mvif->omac_idx; 514 wmm_idx = mvif->wmm_idx; 515 } 516 517 if (sta) { 518 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 519 520 tx_count = msta->rate_count; 521 } 522 523 if (ext_phy && dev->mt76.phy2) 524 mphy = dev->mt76.phy2; 525 526 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 527 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 528 529 if (beacon) { 530 p_fmt = MT_TX_TYPE_FW; 531 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 532 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 533 p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 534 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 535 } else { 536 p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 537 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 538 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 539 } 540 541 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | 542 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 543 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 544 txwi[0] = cpu_to_le32(val); 545 546 val = MT_TXD1_LONG_FORMAT | 547 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 548 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 549 FIELD_PREP(MT_TXD1_HDR_INFO, 550 ieee80211_get_hdrlen_from_skb(skb) / 2) | 551 FIELD_PREP(MT_TXD1_TID, 552 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 553 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 554 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 555 txwi[1] = cpu_to_le32(val); 556 557 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 558 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 559 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 560 if (key) { 561 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 562 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 563 val |= MT_TXD2_BIP; 564 txwi[3] = 0; 565 } else { 566 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 567 } 568 } else { 569 txwi[3] = 0; 570 } 571 txwi[2] = cpu_to_le32(val); 572 573 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 574 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 575 576 txwi[4] = 0; 577 txwi[6] = 0; 578 579 if (rate->idx >= 0 && rate->count && 580 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 581 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 582 u8 bw; 583 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 584 &bw); 585 586 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 587 588 val = MT_TXD6_FIXED_BW | 589 FIELD_PREP(MT_TXD6_BW, bw) | 590 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 591 txwi[6] |= cpu_to_le32(val); 592 593 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 594 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 595 596 if (info->flags & IEEE80211_TX_CTL_LDPC) 597 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 598 599 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 600 IEEE80211_TX_RC_VHT_MCS))) 601 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 602 603 tx_count = rate->count; 604 } 605 606 if (!ieee80211_is_beacon(fc)) { 607 struct ieee80211_hw *hw = mt76_hw(dev); 608 609 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); 610 if (!ieee80211_hw_check(hw, SUPPORTS_PS)) 611 val |= MT_TXD5_SW_POWER_MGMT; 612 txwi[5] = cpu_to_le32(val); 613 } else { 614 txwi[5] = 0; 615 /* use maximum tx count for beacons */ 616 tx_count = 0x1f; 617 } 618 619 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 620 if (ieee80211_is_data_qos(hdr->frame_control)) { 621 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 622 val |= MT_TXD3_SN_VALID; 623 } else if (ieee80211_is_back_req(hdr->frame_control)) { 624 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 625 626 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); 627 val |= MT_TXD3_SN_VALID; 628 } 629 val |= FIELD_PREP(MT_TXD3_SEQ, seqno); 630 631 txwi[3] |= cpu_to_le32(val); 632 633 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 634 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 635 636 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 637 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 638 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 639 if (is_usb) 640 txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 641 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 642 643 return 0; 644 } 645 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); 646 647 static void 648 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) 649 { 650 int i; 651 652 for (i = 1; i < txp->nbuf; i++) 653 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 654 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 655 } 656 657 static void 658 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) 659 { 660 u32 last_mask; 661 int i; 662 663 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST; 664 665 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { 666 struct mt7615_txp_ptr *ptr = &txp->ptr[i]; 667 bool last; 668 u16 len; 669 670 len = le16_to_cpu(ptr->len0); 671 last = len & last_mask; 672 len &= MT_TXD_LEN_MASK; 673 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 674 DMA_TO_DEVICE); 675 if (last) 676 break; 677 678 len = le16_to_cpu(ptr->len1); 679 last = len & last_mask; 680 len &= MT_TXD_LEN_MASK; 681 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 682 DMA_TO_DEVICE); 683 if (last) 684 break; 685 } 686 } 687 688 void mt7615_txp_skb_unmap(struct mt76_dev *dev, 689 struct mt76_txwi_cache *t) 690 { 691 struct mt7615_txp_common *txp; 692 693 txp = mt7615_txwi_to_txp(dev, t); 694 if (is_mt7615(dev)) 695 mt7615_txp_skb_unmap_fw(dev, &txp->fw); 696 else 697 mt7615_txp_skb_unmap_hw(dev, &txp->hw); 698 } 699 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap); 700 701 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 702 { 703 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 704 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 705 706 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 707 0, 5000); 708 } 709 710 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 711 { 712 static const u8 ac_to_tid[4] = { 713 [IEEE80211_AC_BE] = 0, 714 [IEEE80211_AC_BK] = 1, 715 [IEEE80211_AC_VI] = 4, 716 [IEEE80211_AC_VO] = 6 717 }; 718 static const u8 hw_queue_map[] = { 719 [IEEE80211_AC_BK] = 0, 720 [IEEE80211_AC_BE] = 1, 721 [IEEE80211_AC_VI] = 2, 722 [IEEE80211_AC_VO] = 3, 723 }; 724 struct ieee80211_sta *sta; 725 struct mt7615_sta *msta; 726 u32 addr, tx_time[4], rx_time[4]; 727 struct list_head sta_poll_list; 728 int i; 729 730 INIT_LIST_HEAD(&sta_poll_list); 731 spin_lock_bh(&dev->sta_poll_lock); 732 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 733 spin_unlock_bh(&dev->sta_poll_lock); 734 735 while (!list_empty(&sta_poll_list)) { 736 bool clear = false; 737 738 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 739 poll_list); 740 list_del_init(&msta->poll_list); 741 742 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 743 744 for (i = 0; i < 4; i++, addr += 8) { 745 u32 tx_last = msta->airtime_ac[i]; 746 u32 rx_last = msta->airtime_ac[i + 4]; 747 748 msta->airtime_ac[i] = mt76_rr(dev, addr); 749 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 750 tx_time[i] = msta->airtime_ac[i] - tx_last; 751 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 752 753 if ((tx_last | rx_last) & BIT(30)) 754 clear = true; 755 } 756 757 if (clear) { 758 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 759 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 760 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 761 } 762 763 if (!msta->wcid.sta) 764 continue; 765 766 sta = container_of((void *)msta, struct ieee80211_sta, 767 drv_priv); 768 for (i = 0; i < 4; i++) { 769 u32 tx_cur = tx_time[i]; 770 u32 rx_cur = rx_time[hw_queue_map[i]]; 771 u8 tid = ac_to_tid[i]; 772 773 if (!tx_cur && !rx_cur) 774 continue; 775 776 ieee80211_sta_register_airtime(sta, tid, tx_cur, 777 rx_cur); 778 } 779 } 780 } 781 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); 782 783 static void 784 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, 785 struct ieee80211_tx_rate *probe_rate, 786 struct ieee80211_tx_rate *rates, 787 struct mt7615_rate_desc *rd) 788 { 789 struct mt7615_dev *dev = phy->dev; 790 struct mt76_phy *mphy = phy->mt76; 791 struct ieee80211_tx_rate *ref; 792 bool rateset, stbc = false; 793 int n_rates = sta->n_rates; 794 u8 bw, bw_prev; 795 int i, j; 796 797 for (i = n_rates; i < 4; i++) 798 rates[i] = rates[n_rates - 1]; 799 800 rateset = !(sta->rate_set_tsf & BIT(0)); 801 memcpy(sta->rateset[rateset].rates, rates, 802 sizeof(sta->rateset[rateset].rates)); 803 if (probe_rate) { 804 sta->rateset[rateset].probe_rate = *probe_rate; 805 ref = &sta->rateset[rateset].probe_rate; 806 } else { 807 sta->rateset[rateset].probe_rate.idx = -1; 808 ref = &sta->rateset[rateset].rates[0]; 809 } 810 811 rates = sta->rateset[rateset].rates; 812 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 813 /* 814 * We don't support switching between short and long GI 815 * within the rate set. For accurate tx status reporting, we 816 * need to make sure that flags match. 817 * For improved performance, avoid duplicate entries by 818 * decrementing the MCS index if necessary 819 */ 820 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 821 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 822 823 for (j = 0; j < i; j++) { 824 if (rates[i].idx != rates[j].idx) 825 continue; 826 if ((rates[i].flags ^ rates[j].flags) & 827 (IEEE80211_TX_RC_40_MHZ_WIDTH | 828 IEEE80211_TX_RC_80_MHZ_WIDTH | 829 IEEE80211_TX_RC_160_MHZ_WIDTH)) 830 continue; 831 832 if (!rates[i].idx) 833 continue; 834 835 rates[i].idx--; 836 } 837 } 838 839 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 840 bw_prev = bw; 841 842 if (probe_rate) { 843 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 844 stbc, &bw); 845 if (bw) 846 rd->bw_idx = 1; 847 else 848 bw_prev = 0; 849 } else { 850 rd->probe_val = rd->val[0]; 851 } 852 853 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 854 if (bw_prev) { 855 rd->bw_idx = 3; 856 bw_prev = bw; 857 } 858 859 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 860 if (bw_prev) { 861 rd->bw_idx = 5; 862 bw_prev = bw; 863 } 864 865 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 866 if (bw_prev) 867 rd->bw_idx = 7; 868 869 rd->rateset = rateset; 870 rd->bw = bw; 871 } 872 873 static int 874 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, 875 struct ieee80211_tx_rate *probe_rate, 876 struct ieee80211_tx_rate *rates) 877 { 878 struct mt7615_dev *dev = phy->dev; 879 struct mt7615_wtbl_desc *wd; 880 881 wd = kzalloc(sizeof(*wd), GFP_ATOMIC); 882 if (!wd) 883 return -ENOMEM; 884 885 wd->type = MT7615_WTBL_RATE_DESC; 886 wd->sta = sta; 887 888 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 889 &wd->rate); 890 list_add_tail(&wd->node, &dev->wd_head); 891 queue_work(dev->mt76.usb.wq, &dev->wtbl_work); 892 893 return 0; 894 } 895 896 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 897 struct ieee80211_tx_rate *probe_rate, 898 struct ieee80211_tx_rate *rates) 899 { 900 int wcid = sta->wcid.idx, n_rates = sta->n_rates; 901 struct mt7615_dev *dev = phy->dev; 902 struct mt7615_rate_desc rd; 903 u32 w5, w27, addr; 904 905 if (mt76_is_usb(&dev->mt76)) { 906 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 907 return; 908 } 909 910 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 911 return; 912 913 memset(&rd, 0, sizeof(struct mt7615_rate_desc)); 914 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); 915 916 addr = mt7615_mac_wtbl_addr(dev, wcid); 917 w27 = mt76_rr(dev, addr + 27 * 4); 918 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 919 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); 920 921 w5 = mt76_rr(dev, addr + 5 * 4); 922 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 923 MT_WTBL_W5_MPDU_OK_COUNT | 924 MT_WTBL_W5_MPDU_FAIL_COUNT | 925 MT_WTBL_W5_RATE_IDX); 926 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | 927 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 928 rd.bw_idx ? rd.bw_idx - 1 : 7); 929 930 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 931 932 mt76_wr(dev, MT_WTBL_RIUCR1, 933 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | 934 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | 935 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); 936 937 mt76_wr(dev, MT_WTBL_RIUCR2, 938 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | 939 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | 940 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | 941 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); 942 943 mt76_wr(dev, MT_WTBL_RIUCR3, 944 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | 945 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | 946 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); 947 948 mt76_wr(dev, MT_WTBL_UPDATE, 949 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 950 MT_WTBL_UPDATE_RATE_UPDATE | 951 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 952 953 mt76_wr(dev, addr + 27 * 4, w27); 954 955 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 956 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); 957 sta->rate_set_tsf |= rd.rateset; 958 959 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 960 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 961 962 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 963 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 964 } 965 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 966 967 int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, 968 struct mt76_wcid *wcid, 969 u8 *key, u8 keylen, 970 enum mt7615_cipher_type cipher, 971 enum set_key_cmd cmd) 972 { 973 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 974 u8 data[32] = {}; 975 976 if (keylen > sizeof(data)) 977 return -EINVAL; 978 979 mt76_rr_copy(dev, addr, data, sizeof(data)); 980 if (cmd == SET_KEY) { 981 if (cipher == MT_CIPHER_TKIP) { 982 /* Rx/Tx MIC keys are swapped */ 983 memcpy(data + 16, key + 24, 8); 984 memcpy(data + 24, key + 16, 8); 985 } 986 if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) 987 memmove(data + 16, data, 16); 988 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 989 memcpy(data, key, keylen); 990 else if (cipher == MT_CIPHER_BIP_CMAC_128) 991 memcpy(data + 16, key, 16); 992 } else { 993 if (wcid->cipher & ~BIT(cipher)) { 994 if (cipher != MT_CIPHER_BIP_CMAC_128) 995 memmove(data, data + 16, 16); 996 memset(data + 16, 0, 16); 997 } else { 998 memset(data, 0, sizeof(data)); 999 } 1000 } 1001 mt76_wr_copy(dev, addr, data, sizeof(data)); 1002 1003 return 0; 1004 } 1005 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key); 1006 1007 int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, 1008 struct mt76_wcid *wcid, 1009 enum mt7615_cipher_type cipher, 1010 int keyidx, enum set_key_cmd cmd) 1011 { 1012 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1013 1014 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1015 return -ETIMEDOUT; 1016 1017 w0 = mt76_rr(dev, addr); 1018 w1 = mt76_rr(dev, addr + 4); 1019 if (cmd == SET_KEY) { 1020 w0 |= MT_WTBL_W0_RX_KEY_VALID | 1021 FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, 1022 cipher == MT_CIPHER_BIP_CMAC_128); 1023 if (cipher != MT_CIPHER_BIP_CMAC_128 || 1024 !wcid->cipher) 1025 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1026 } else { 1027 if (!(wcid->cipher & ~BIT(cipher))) 1028 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | 1029 MT_WTBL_W0_KEY_IDX); 1030 if (cipher == MT_CIPHER_BIP_CMAC_128) 1031 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1032 } 1033 mt76_wr(dev, MT_WTBL_RICR0, w0); 1034 mt76_wr(dev, MT_WTBL_RICR1, w1); 1035 1036 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1037 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1038 return -ETIMEDOUT; 1039 1040 return 0; 1041 } 1042 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk); 1043 1044 void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, 1045 struct mt76_wcid *wcid, 1046 enum mt7615_cipher_type cipher, 1047 enum set_key_cmd cmd) 1048 { 1049 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1050 1051 if (cmd == SET_KEY) { 1052 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1053 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1054 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1055 } else { 1056 if (cipher != MT_CIPHER_BIP_CMAC_128 && 1057 wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) 1058 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1059 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, 1060 MT_CIPHER_BIP_CMAC_128)); 1061 else if (!(wcid->cipher & ~BIT(cipher))) 1062 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); 1063 } 1064 } 1065 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher); 1066 1067 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1068 struct mt76_wcid *wcid, 1069 struct ieee80211_key_conf *key, 1070 enum set_key_cmd cmd) 1071 { 1072 enum mt7615_cipher_type cipher; 1073 int err; 1074 1075 cipher = mt7615_mac_get_cipher(key->cipher); 1076 if (cipher == MT_CIPHER_NONE) 1077 return -EOPNOTSUPP; 1078 1079 spin_lock_bh(&dev->mt76.lock); 1080 1081 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); 1082 err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, 1083 cipher, cmd); 1084 if (err < 0) 1085 goto out; 1086 1087 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, 1088 cmd); 1089 if (err < 0) 1090 goto out; 1091 1092 if (cmd == SET_KEY) 1093 wcid->cipher |= BIT(cipher); 1094 else 1095 wcid->cipher &= ~BIT(cipher); 1096 1097 out: 1098 spin_unlock_bh(&dev->mt76.lock); 1099 1100 return err; 1101 } 1102 1103 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1104 struct ieee80211_tx_info *info, __le32 *txs_data) 1105 { 1106 struct ieee80211_supported_band *sband; 1107 struct mt7615_rate_set *rs; 1108 struct mt76_phy *mphy; 1109 int first_idx = 0, last_idx; 1110 int i, idx, count; 1111 bool fixed_rate, ack_timeout; 1112 bool probe, ampdu, cck = false; 1113 bool rs_idx; 1114 u32 rate_set_tsf; 1115 u32 final_rate, final_rate_flags, final_nss, txs; 1116 1117 fixed_rate = info->status.rates[0].count; 1118 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1119 1120 txs = le32_to_cpu(txs_data[1]); 1121 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU); 1122 1123 txs = le32_to_cpu(txs_data[3]); 1124 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1125 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1126 1127 txs = le32_to_cpu(txs_data[0]); 1128 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1129 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1130 1131 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1132 return false; 1133 1134 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1135 return false; 1136 1137 if (!ack_timeout) 1138 info->flags |= IEEE80211_TX_STAT_ACK; 1139 1140 info->status.ampdu_len = 1; 1141 info->status.ampdu_ack_len = !!(info->flags & 1142 IEEE80211_TX_STAT_ACK); 1143 1144 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1145 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1146 1147 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); 1148 1149 if (fixed_rate && !probe) { 1150 info->status.rates[0].count = count; 1151 i = 0; 1152 goto out; 1153 } 1154 1155 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1156 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - 1157 rate_set_tsf) < 1000000); 1158 rs_idx ^= rate_set_tsf & BIT(0); 1159 rs = &sta->rateset[rs_idx]; 1160 1161 if (!first_idx && rs->probe_rate.idx >= 0) { 1162 info->status.rates[0] = rs->probe_rate; 1163 1164 spin_lock_bh(&dev->mt76.lock); 1165 if (sta->rate_probe) { 1166 struct mt7615_phy *phy = &dev->phy; 1167 1168 if (sta->wcid.ext_phy && dev->mt76.phy2) 1169 phy = dev->mt76.phy2->priv; 1170 1171 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1172 sta->rate_probe = false; 1173 } 1174 spin_unlock_bh(&dev->mt76.lock); 1175 } else { 1176 info->status.rates[0] = rs->rates[first_idx / 2]; 1177 } 1178 info->status.rates[0].count = 0; 1179 1180 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1181 struct ieee80211_tx_rate *cur_rate; 1182 int cur_count; 1183 1184 cur_rate = &rs->rates[idx / 2]; 1185 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1186 count -= cur_count; 1187 1188 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1189 cur_rate->flags != info->status.rates[i].flags)) { 1190 i++; 1191 if (i == ARRAY_SIZE(info->status.rates)) { 1192 i--; 1193 break; 1194 } 1195 1196 info->status.rates[i] = *cur_rate; 1197 info->status.rates[i].count = 0; 1198 } 1199 1200 info->status.rates[i].count += cur_count; 1201 } 1202 1203 out: 1204 final_rate_flags = info->status.rates[i].flags; 1205 1206 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1207 case MT_PHY_TYPE_CCK: 1208 cck = true; 1209 /* fall through */ 1210 case MT_PHY_TYPE_OFDM: 1211 mphy = &dev->mphy; 1212 if (sta->wcid.ext_phy && dev->mt76.phy2) 1213 mphy = dev->mt76.phy2; 1214 1215 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1216 sband = &mphy->sband_5g.sband; 1217 else 1218 sband = &mphy->sband_2g.sband; 1219 final_rate &= MT_TX_RATE_IDX; 1220 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1221 cck); 1222 final_rate_flags = 0; 1223 break; 1224 case MT_PHY_TYPE_HT_GF: 1225 case MT_PHY_TYPE_HT: 1226 final_rate_flags |= IEEE80211_TX_RC_MCS; 1227 final_rate &= MT_TX_RATE_IDX; 1228 if (final_rate > 31) 1229 return false; 1230 break; 1231 case MT_PHY_TYPE_VHT: 1232 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1233 1234 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1235 final_nss--; 1236 1237 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1238 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1239 break; 1240 default: 1241 return false; 1242 } 1243 1244 info->status.rates[i].idx = final_rate; 1245 info->status.rates[i].flags = final_rate_flags; 1246 1247 return true; 1248 } 1249 1250 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1251 struct mt7615_sta *sta, int pid, 1252 __le32 *txs_data) 1253 { 1254 struct mt76_dev *mdev = &dev->mt76; 1255 struct sk_buff_head list; 1256 struct sk_buff *skb; 1257 1258 if (pid < MT_PACKET_ID_FIRST) 1259 return false; 1260 1261 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1262 1263 mt76_tx_status_lock(mdev, &list); 1264 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1265 if (skb) { 1266 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1267 1268 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1269 ieee80211_tx_info_clear_status(info); 1270 info->status.rates[0].idx = -1; 1271 } 1272 1273 mt76_tx_status_skb_done(mdev, skb, &list); 1274 } 1275 mt76_tx_status_unlock(mdev, &list); 1276 1277 return !!skb; 1278 } 1279 1280 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1281 { 1282 struct ieee80211_tx_info info = {}; 1283 struct ieee80211_sta *sta = NULL; 1284 struct mt7615_sta *msta = NULL; 1285 struct mt76_wcid *wcid; 1286 struct mt76_phy *mphy = &dev->mt76.phy; 1287 __le32 *txs_data = data; 1288 u32 txs; 1289 u8 wcidx; 1290 u8 pid; 1291 1292 txs = le32_to_cpu(txs_data[0]); 1293 pid = FIELD_GET(MT_TXS0_PID, txs); 1294 txs = le32_to_cpu(txs_data[2]); 1295 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1296 1297 if (pid == MT_PACKET_ID_NO_ACK) 1298 return; 1299 1300 if (wcidx >= MT7615_WTBL_SIZE) 1301 return; 1302 1303 rcu_read_lock(); 1304 1305 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1306 if (!wcid) 1307 goto out; 1308 1309 msta = container_of(wcid, struct mt7615_sta, wcid); 1310 sta = wcid_to_sta(wcid); 1311 1312 spin_lock_bh(&dev->sta_poll_lock); 1313 if (list_empty(&msta->poll_list)) 1314 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1315 spin_unlock_bh(&dev->sta_poll_lock); 1316 1317 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1318 goto out; 1319 1320 if (wcidx >= MT7615_WTBL_STA || !sta) 1321 goto out; 1322 1323 if (wcid->ext_phy && dev->mt76.phy2) 1324 mphy = dev->mt76.phy2; 1325 1326 if (mt7615_fill_txs(dev, msta, &info, txs_data)) 1327 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1328 1329 out: 1330 rcu_read_unlock(); 1331 } 1332 1333 static void 1334 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1335 { 1336 struct mt76_dev *mdev = &dev->mt76; 1337 struct mt76_txwi_cache *txwi; 1338 1339 trace_mac_tx_free(dev, token); 1340 1341 spin_lock_bh(&dev->token_lock); 1342 txwi = idr_remove(&dev->token, token); 1343 spin_unlock_bh(&dev->token_lock); 1344 1345 if (!txwi) 1346 return; 1347 1348 mt7615_txp_skb_unmap(mdev, txwi); 1349 if (txwi->skb) { 1350 mt76_tx_complete_skb(mdev, txwi->skb); 1351 txwi->skb = NULL; 1352 } 1353 1354 mt76_put_txwi(mdev, txwi); 1355 } 1356 1357 static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) 1358 { 1359 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; 1360 u8 i, count; 1361 1362 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1363 if (is_mt7615(&dev->mt76)) { 1364 __le16 *token = &free->token[0]; 1365 1366 for (i = 0; i < count; i++) 1367 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1368 } else { 1369 __le32 *token = (__le32 *)&free->token[0]; 1370 1371 for (i = 0; i < count; i++) 1372 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1373 } 1374 1375 dev_kfree_skb(skb); 1376 } 1377 1378 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1379 struct sk_buff *skb) 1380 { 1381 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1382 __le32 *rxd = (__le32 *)skb->data; 1383 __le32 *end = (__le32 *)&skb->data[skb->len]; 1384 enum rx_pkt_type type; 1385 u16 flag; 1386 1387 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1388 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); 1389 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 1390 type = PKT_TYPE_NORMAL_MCU; 1391 1392 switch (type) { 1393 case PKT_TYPE_TXS: 1394 for (rxd++; rxd + 7 <= end; rxd += 7) 1395 mt7615_mac_add_txs(dev, rxd); 1396 dev_kfree_skb(skb); 1397 break; 1398 case PKT_TYPE_TXRX_NOTIFY: 1399 mt7615_mac_tx_free(dev, skb); 1400 break; 1401 case PKT_TYPE_RX_EVENT: 1402 mt7615_mcu_rx_event(dev, skb); 1403 break; 1404 case PKT_TYPE_NORMAL_MCU: 1405 case PKT_TYPE_NORMAL: 1406 if (!mt7615_mac_fill_rx(dev, skb)) { 1407 mt76_rx(&dev->mt76, q, skb); 1408 return; 1409 } 1410 /* fall through */ 1411 default: 1412 dev_kfree_skb(skb); 1413 break; 1414 } 1415 } 1416 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); 1417 1418 static void 1419 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) 1420 { 1421 struct mt7615_dev *dev = phy->dev; 1422 bool ext_phy = phy != &dev->phy; 1423 1424 if (is_mt7663(&dev->mt76)) { 1425 if (ofdm) 1426 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), 1427 MT_WF_PHY_PD_OFDM_MASK(0), 1428 MT_WF_PHY_PD_OFDM(0, val)); 1429 else 1430 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), 1431 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1432 MT_WF_PHY_PD_CCK(ext_phy, val)); 1433 return; 1434 } 1435 1436 if (ofdm) 1437 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1438 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1439 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1440 else 1441 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1442 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1443 MT_WF_PHY_PD_CCK(ext_phy, val)); 1444 } 1445 1446 static void 1447 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1448 { 1449 /* ofdm */ 1450 mt7615_mac_set_sensitivity(phy, 0x13c, true); 1451 /* cck */ 1452 mt7615_mac_set_sensitivity(phy, 0x92, false); 1453 1454 phy->ofdm_sensitivity = -98; 1455 phy->cck_sensitivity = -110; 1456 phy->last_cca_adj = jiffies; 1457 } 1458 1459 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) 1460 { 1461 struct mt7615_dev *dev = phy->dev; 1462 bool ext_phy = phy != &dev->phy; 1463 u32 reg, mask; 1464 1465 mutex_lock(&dev->mt76.mutex); 1466 1467 if (phy->scs_en == enable) 1468 goto out; 1469 1470 if (is_mt7663(&dev->mt76)) { 1471 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); 1472 mask = MT_WF_PHY_PD_BLK(0); 1473 } else { 1474 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); 1475 mask = MT_WF_PHY_PD_BLK(ext_phy); 1476 } 1477 1478 if (enable) { 1479 mt76_set(dev, reg, mask); 1480 if (is_mt7622(&dev->mt76)) { 1481 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); 1482 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); 1483 } 1484 } else { 1485 mt76_clear(dev, reg, mask); 1486 } 1487 1488 mt7615_mac_set_default_sensitivity(phy); 1489 phy->scs_en = enable; 1490 1491 out: 1492 mutex_unlock(&dev->mt76.mutex); 1493 } 1494 1495 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1496 { 1497 u32 rxtd, reg; 1498 1499 if (is_mt7663(&dev->mt76)) 1500 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1501 else 1502 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1503 1504 if (ext_phy) 1505 rxtd = MT_WF_PHY_RXTD2(10); 1506 else 1507 rxtd = MT_WF_PHY_RXTD(12); 1508 1509 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1510 mt76_set(dev, reg, 0x5 << 12); 1511 } 1512 1513 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1514 { 1515 struct mt7615_dev *dev = phy->dev; 1516 bool ext_phy = phy != &dev->phy; 1517 u32 reg; 1518 1519 if (is_mt7663(&dev->mt76)) 1520 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1521 else 1522 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1523 1524 /* reset PD and MDRDY counters */ 1525 mt76_clear(dev, reg, GENMASK(22, 20)); 1526 mt76_set(dev, reg, BIT(22) | BIT(20)); 1527 } 1528 1529 static void 1530 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1531 u32 rts_err_rate, bool ofdm) 1532 { 1533 struct mt7615_dev *dev = phy->dev; 1534 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1535 bool ext_phy = phy != &dev->phy; 1536 u16 def_th = ofdm ? -98 : -110; 1537 bool update = false; 1538 s8 *sensitivity; 1539 int signal; 1540 1541 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1542 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1543 if (!signal) { 1544 mt7615_mac_set_default_sensitivity(phy); 1545 return; 1546 } 1547 1548 signal = min(signal, -72); 1549 if (false_cca > 500) { 1550 if (rts_err_rate > MT_FRAC(40, 100)) 1551 return; 1552 1553 /* decrease coverage */ 1554 if (*sensitivity == def_th && signal > -90) { 1555 *sensitivity = -90; 1556 update = true; 1557 } else if (*sensitivity + 2 < signal) { 1558 *sensitivity += 2; 1559 update = true; 1560 } 1561 } else if ((false_cca > 0 && false_cca < 50) || 1562 rts_err_rate > MT_FRAC(60, 100)) { 1563 /* increase coverage */ 1564 if (*sensitivity - 2 >= def_th) { 1565 *sensitivity -= 2; 1566 update = true; 1567 } 1568 } 1569 1570 if (*sensitivity > signal) { 1571 *sensitivity = signal; 1572 update = true; 1573 } 1574 1575 if (update) { 1576 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; 1577 1578 mt7615_mac_set_sensitivity(phy, val, ofdm); 1579 phy->last_cca_adj = jiffies; 1580 } 1581 } 1582 1583 static void 1584 mt7615_mac_scs_check(struct mt7615_phy *phy) 1585 { 1586 struct mt7615_dev *dev = phy->dev; 1587 struct mib_stats *mib = &phy->mib; 1588 u32 val, rts_err_rate = 0; 1589 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1590 bool ext_phy = phy != &dev->phy; 1591 1592 if (!phy->scs_en) 1593 return; 1594 1595 if (is_mt7663(&dev->mt76)) 1596 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1597 else 1598 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1599 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1600 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1601 1602 if (is_mt7663(&dev->mt76)) 1603 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1604 else 1605 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1606 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1607 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1608 1609 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1610 phy->false_cca_cck = pd_cck - mdrdy_cck; 1611 mt7615_mac_cca_stats_reset(phy); 1612 1613 if (mib->rts_cnt + mib->rts_retries_cnt) 1614 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1615 mib->rts_cnt + mib->rts_retries_cnt); 1616 1617 /* cck */ 1618 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1619 /* ofdm */ 1620 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1621 1622 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1623 mt7615_mac_set_default_sensitivity(phy); 1624 } 1625 1626 static u8 1627 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1628 { 1629 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1630 u32 reg, val, sum = 0, n = 0; 1631 int i; 1632 1633 if (is_mt7663(&dev->mt76)) 1634 reg = MT7663_WF_PHY_RXTD(20); 1635 else 1636 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1637 1638 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1639 val = mt76_rr(dev, reg); 1640 sum += val * nf_power[i]; 1641 n += val; 1642 } 1643 1644 if (!n) 1645 return 0; 1646 1647 return sum / n; 1648 } 1649 1650 static void 1651 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1652 { 1653 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1654 struct mt7615_phy *phy = mphy->priv; 1655 struct mt76_channel_state *state; 1656 u64 busy_time, tx_time, rx_time, obss_time; 1657 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1658 int nf; 1659 1660 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1661 MT_MIB_SDR9_BUSY_MASK); 1662 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1663 MT_MIB_SDR36_TXTIME_MASK); 1664 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1665 MT_MIB_SDR37_RXTIME_MASK); 1666 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1667 1668 nf = mt7615_phy_get_nf(dev, idx); 1669 if (!phy->noise) 1670 phy->noise = nf << 4; 1671 else if (nf) 1672 phy->noise += nf - (phy->noise >> 4); 1673 1674 state = mphy->chan_state; 1675 state->cc_busy += busy_time; 1676 state->cc_tx += tx_time; 1677 state->cc_rx += rx_time + obss_time; 1678 state->cc_bss_rx += rx_time; 1679 state->noise = -(phy->noise >> 4); 1680 } 1681 1682 void mt7615_update_channel(struct mt76_dev *mdev) 1683 { 1684 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1685 1686 mt7615_phy_update_channel(&mdev->phy, 0); 1687 if (mdev->phy2) 1688 mt7615_phy_update_channel(mdev->phy2, 1); 1689 1690 /* reset obss airtime */ 1691 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1692 } 1693 EXPORT_SYMBOL_GPL(mt7615_update_channel); 1694 1695 static void 1696 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 1697 { 1698 struct mt7615_dev *dev = phy->dev; 1699 struct mib_stats *mib = &phy->mib; 1700 bool ext_phy = phy != &dev->phy; 1701 int i, aggr; 1702 u32 val, val2; 1703 1704 memset(mib, 0, sizeof(*mib)); 1705 1706 mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 1707 MT_MIB_SDR3_FCS_ERR_MASK); 1708 1709 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), 1710 MT_MIB_AMPDU_MPDU_COUNT); 1711 if (val) { 1712 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), 1713 MT_MIB_AMPDU_ACK_COUNT); 1714 mib->aggr_per = 1000 * (val - val2) / val; 1715 } 1716 1717 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1718 for (i = 0; i < 4; i++) { 1719 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 1720 1721 val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1722 if (val2 > mib->ack_fail_cnt) 1723 mib->ack_fail_cnt = val2; 1724 1725 val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1726 if (val2 > mib->ba_miss_cnt) 1727 mib->ba_miss_cnt = val2; 1728 1729 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 1730 val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1731 if (val2 > mib->rts_retries_cnt) { 1732 mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1733 mib->rts_retries_cnt = val2; 1734 } 1735 1736 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 1737 1738 dev->mt76.aggr_stats[aggr++] += val & 0xffff; 1739 dev->mt76.aggr_stats[aggr++] += val >> 16; 1740 } 1741 } 1742 1743 void mt7615_mac_work(struct work_struct *work) 1744 { 1745 struct mt7615_phy *phy; 1746 struct mt76_dev *mdev; 1747 1748 phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, 1749 mac_work.work); 1750 mdev = &phy->dev->mt76; 1751 1752 mutex_lock(&mdev->mutex); 1753 1754 mt76_update_survey(mdev); 1755 if (++phy->mac_work_count == 5) { 1756 phy->mac_work_count = 0; 1757 1758 mt7615_mac_update_mib_stats(phy); 1759 mt7615_mac_scs_check(phy); 1760 } 1761 1762 mutex_unlock(&mdev->mutex); 1763 1764 mt76_tx_status_check(mdev, NULL, false); 1765 ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, 1766 MT7615_WATCHDOG_TIME); 1767 } 1768 1769 static bool 1770 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) 1771 { 1772 bool ret; 1773 1774 ret = wait_event_timeout(dev->reset_wait, 1775 (READ_ONCE(dev->reset_state) & state), 1776 MT7615_RESET_TIMEOUT); 1777 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1778 return ret; 1779 } 1780 1781 static void 1782 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1783 { 1784 struct ieee80211_hw *hw = priv; 1785 struct mt7615_dev *dev = mt7615_hw_dev(hw); 1786 1787 mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon); 1788 } 1789 1790 static void 1791 mt7615_update_beacons(struct mt7615_dev *dev) 1792 { 1793 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1794 IEEE80211_IFACE_ITER_RESUME_ALL, 1795 mt7615_update_vif_beacon, dev->mt76.hw); 1796 1797 if (!dev->mt76.phy2) 1798 return; 1799 1800 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 1801 IEEE80211_IFACE_ITER_RESUME_ALL, 1802 mt7615_update_vif_beacon, dev->mt76.phy2->hw); 1803 } 1804 1805 void mt7615_dma_reset(struct mt7615_dev *dev) 1806 { 1807 int i; 1808 1809 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1810 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1811 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1812 usleep_range(1000, 2000); 1813 1814 for (i = 0; i < __MT_TXQ_MAX; i++) 1815 mt76_queue_tx_cleanup(dev, i, true); 1816 1817 mt76_for_each_q_rx(&dev->mt76, i) { 1818 mt76_queue_rx_reset(dev, i); 1819 } 1820 1821 mt76_set(dev, MT_WPDMA_GLO_CFG, 1822 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1823 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1824 } 1825 EXPORT_SYMBOL_GPL(mt7615_dma_reset); 1826 1827 void mt7615_mac_reset_work(struct work_struct *work) 1828 { 1829 struct mt7615_phy *phy2; 1830 struct mt76_phy *ext_phy; 1831 struct mt7615_dev *dev; 1832 1833 dev = container_of(work, struct mt7615_dev, reset_work); 1834 ext_phy = dev->mt76.phy2; 1835 phy2 = ext_phy ? ext_phy->priv : NULL; 1836 1837 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) 1838 return; 1839 1840 ieee80211_stop_queues(mt76_hw(dev)); 1841 if (ext_phy) 1842 ieee80211_stop_queues(ext_phy->hw); 1843 1844 set_bit(MT76_RESET, &dev->mphy.state); 1845 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1846 wake_up(&dev->mt76.mcu.wait); 1847 cancel_delayed_work_sync(&dev->phy.mac_work); 1848 del_timer_sync(&dev->phy.roc_timer); 1849 cancel_work_sync(&dev->phy.roc_work); 1850 if (phy2) { 1851 cancel_delayed_work_sync(&phy2->mac_work); 1852 del_timer_sync(&phy2->roc_timer); 1853 cancel_work_sync(&phy2->roc_work); 1854 } 1855 1856 /* lock/unlock all queues to ensure that no tx is pending */ 1857 mt76_txq_schedule_all(&dev->mphy); 1858 if (ext_phy) 1859 mt76_txq_schedule_all(ext_phy); 1860 1861 tasklet_disable(&dev->mt76.tx_tasklet); 1862 napi_disable(&dev->mt76.napi[0]); 1863 napi_disable(&dev->mt76.napi[1]); 1864 napi_disable(&dev->mt76.tx_napi); 1865 1866 mutex_lock(&dev->mt76.mutex); 1867 1868 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); 1869 1870 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1871 mt7615_dma_reset(dev); 1872 1873 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 1874 1875 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); 1876 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1877 } 1878 1879 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1880 clear_bit(MT76_RESET, &dev->mphy.state); 1881 1882 tasklet_enable(&dev->mt76.tx_tasklet); 1883 napi_enable(&dev->mt76.tx_napi); 1884 napi_schedule(&dev->mt76.tx_napi); 1885 1886 napi_enable(&dev->mt76.napi[0]); 1887 napi_schedule(&dev->mt76.napi[0]); 1888 1889 napi_enable(&dev->mt76.napi[1]); 1890 napi_schedule(&dev->mt76.napi[1]); 1891 1892 ieee80211_wake_queues(mt76_hw(dev)); 1893 if (ext_phy) 1894 ieee80211_wake_queues(ext_phy->hw); 1895 1896 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1897 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1898 1899 mutex_unlock(&dev->mt76.mutex); 1900 1901 mt7615_update_beacons(dev); 1902 1903 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, 1904 MT7615_WATCHDOG_TIME); 1905 if (phy2) 1906 ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work, 1907 MT7615_WATCHDOG_TIME); 1908 1909 } 1910 1911 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 1912 { 1913 struct mt7615_dev *dev = phy->dev; 1914 1915 if (phy->rdd_state & BIT(0)) 1916 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); 1917 if (phy->rdd_state & BIT(1)) 1918 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); 1919 } 1920 1921 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 1922 { 1923 int err; 1924 1925 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); 1926 if (err < 0) 1927 return err; 1928 1929 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 1930 MT_RX_SEL0, 1); 1931 } 1932 1933 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 1934 { 1935 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 1936 struct mt7615_dev *dev = phy->dev; 1937 bool ext_phy = phy != &dev->phy; 1938 int err; 1939 1940 /* start CAC */ 1941 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); 1942 if (err < 0) 1943 return err; 1944 1945 err = mt7615_dfs_start_rdd(dev, ext_phy); 1946 if (err < 0) 1947 return err; 1948 1949 phy->rdd_state |= BIT(ext_phy); 1950 1951 if (chandef->width == NL80211_CHAN_WIDTH_160 || 1952 chandef->width == NL80211_CHAN_WIDTH_80P80) { 1953 err = mt7615_dfs_start_rdd(dev, 1); 1954 if (err < 0) 1955 return err; 1956 1957 phy->rdd_state |= BIT(1); 1958 } 1959 1960 return 0; 1961 } 1962 1963 static int 1964 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 1965 { 1966 const struct mt7615_dfs_radar_spec *radar_specs; 1967 struct mt7615_dev *dev = phy->dev; 1968 int err, i; 1969 1970 switch (dev->mt76.region) { 1971 case NL80211_DFS_FCC: 1972 radar_specs = &fcc_radar_specs; 1973 err = mt7615_mcu_set_fcc5_lpn(dev, 8); 1974 if (err < 0) 1975 return err; 1976 break; 1977 case NL80211_DFS_ETSI: 1978 radar_specs = &etsi_radar_specs; 1979 break; 1980 case NL80211_DFS_JP: 1981 radar_specs = &jp_radar_specs; 1982 break; 1983 default: 1984 return -EINVAL; 1985 } 1986 1987 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 1988 err = mt7615_mcu_set_radar_th(dev, i, 1989 &radar_specs->radar_pattern[i]); 1990 if (err < 0) 1991 return err; 1992 } 1993 1994 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 1995 } 1996 1997 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 1998 { 1999 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2000 struct mt7615_dev *dev = phy->dev; 2001 bool ext_phy = phy != &dev->phy; 2002 int err; 2003 2004 if (is_mt7663(&dev->mt76)) 2005 return 0; 2006 2007 if (dev->mt76.region == NL80211_DFS_UNSET) { 2008 phy->dfs_state = -1; 2009 if (phy->rdd_state) 2010 goto stop; 2011 2012 return 0; 2013 } 2014 2015 if (test_bit(MT76_SCANNING, &phy->mt76->state)) 2016 return 0; 2017 2018 if (phy->dfs_state == chandef->chan->dfs_state) 2019 return 0; 2020 2021 err = mt7615_dfs_init_radar_specs(phy); 2022 if (err < 0) { 2023 phy->dfs_state = -1; 2024 goto stop; 2025 } 2026 2027 phy->dfs_state = chandef->chan->dfs_state; 2028 2029 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { 2030 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 2031 return mt7615_dfs_start_radar_detector(phy); 2032 2033 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, 2034 MT_RX_SEL0, 0); 2035 } 2036 2037 stop: 2038 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0); 2039 if (err < 0) 2040 return err; 2041 2042 mt7615_dfs_stop_radar_detector(phy); 2043 return 0; 2044 } 2045