1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 */ 9 10 #include <linux/etherdevice.h> 11 #include <linux/timekeeping.h> 12 #include "mt7615.h" 13 #include "../trace.h" 14 #include "../dma.h" 15 #include "mt7615_trace.h" 16 #include "mac.h" 17 18 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 19 20 static const struct mt7615_dfs_radar_spec etsi_radar_specs = { 21 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 22 .radar_pattern = { 23 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 }, 24 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 }, 25 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 }, 26 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 }, 27 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 }, 28 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 }, 29 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 }, 30 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 }, 31 }, 32 }; 33 34 static const struct mt7615_dfs_radar_spec fcc_radar_specs = { 35 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 36 .radar_pattern = { 37 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 }, 38 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 39 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 40 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 41 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 }, 42 }, 43 }; 44 45 static const struct mt7615_dfs_radar_spec jp_radar_specs = { 46 .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 }, 47 .radar_pattern = { 48 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 }, 49 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 }, 50 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 }, 51 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 }, 52 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 }, 53 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 }, 54 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 }, 55 }, 56 }; 57 58 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev, 59 u8 idx, bool unicast) 60 { 61 struct mt7615_sta *sta; 62 struct mt76_wcid *wcid; 63 64 if (idx >= MT7615_WTBL_SIZE) 65 return NULL; 66 67 wcid = rcu_dereference(dev->mt76.wcid[idx]); 68 if (unicast || !wcid) 69 return wcid; 70 71 if (!wcid->sta) 72 return NULL; 73 74 sta = container_of(wcid, struct mt7615_sta, wcid); 75 if (!sta->vif) 76 return NULL; 77 78 return &sta->vif->sta.wcid; 79 } 80 81 void mt7615_mac_reset_counters(struct mt7615_dev *dev) 82 { 83 int i; 84 85 for (i = 0; i < 4; i++) { 86 mt76_rr(dev, MT_TX_AGG_CNT(0, i)); 87 mt76_rr(dev, MT_TX_AGG_CNT(1, i)); 88 } 89 90 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 91 dev->mt76.phy.survey_time = ktime_get_boottime(); 92 if (dev->mt76.phy2) 93 dev->mt76.phy2->survey_time = ktime_get_boottime(); 94 95 /* reset airtime counters */ 96 mt76_rr(dev, MT_MIB_SDR9(0)); 97 mt76_rr(dev, MT_MIB_SDR9(1)); 98 99 mt76_rr(dev, MT_MIB_SDR36(0)); 100 mt76_rr(dev, MT_MIB_SDR36(1)); 101 102 mt76_rr(dev, MT_MIB_SDR37(0)); 103 mt76_rr(dev, MT_MIB_SDR37(1)); 104 105 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 106 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 107 } 108 109 void mt7615_mac_set_timing(struct mt7615_phy *phy) 110 { 111 s16 coverage_class = phy->coverage_class; 112 struct mt7615_dev *dev = phy->dev; 113 bool ext_phy = phy != &dev->phy; 114 u32 val, reg_offset; 115 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 116 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 117 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 118 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 119 int sifs, offset; 120 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 121 122 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 123 return; 124 125 if (is_5ghz) 126 sifs = 16; 127 else 128 sifs = 10; 129 130 if (ext_phy) { 131 coverage_class = max_t(s16, dev->phy.coverage_class, 132 coverage_class); 133 mt76_set(dev, MT_ARB_SCR, 134 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 135 } else { 136 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev); 137 138 if (phy_ext) 139 coverage_class = max_t(s16, phy_ext->coverage_class, 140 coverage_class); 141 mt76_set(dev, MT_ARB_SCR, 142 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 143 } 144 udelay(1); 145 146 offset = 3 * coverage_class; 147 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 148 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 149 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset); 150 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset); 151 152 mt76_wr(dev, MT_TMAC_ICR(ext_phy), 153 FIELD_PREP(MT_IFS_EIFS, 360) | 154 FIELD_PREP(MT_IFS_RIFS, 2) | 155 FIELD_PREP(MT_IFS_SIFS, sifs) | 156 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 157 158 if (phy->slottime < 20 || is_5ghz) 159 val = MT7615_CFEND_RATE_DEFAULT; 160 else 161 val = MT7615_CFEND_RATE_11B; 162 163 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 164 if (ext_phy) 165 mt76_clear(dev, MT_ARB_SCR, 166 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE); 167 else 168 mt76_clear(dev, MT_ARB_SCR, 169 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE); 170 171 } 172 173 static void 174 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, 175 struct mt76_rx_status *status, u8 chfreq) 176 { 177 if (!test_bit(MT76_HW_SCANNING, &mphy->state) && 178 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) && 179 !test_bit(MT76_STATE_ROC, &mphy->state)) { 180 status->freq = mphy->chandef.chan->center_freq; 181 status->band = mphy->chandef.chan->band; 182 return; 183 } 184 185 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 186 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 187 } 188 189 static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv) 190 { 191 #ifdef CONFIG_NL80211_TESTMODE 192 u32 rxv1 = le32_to_cpu(rxv[0]); 193 u32 rxv3 = le32_to_cpu(rxv[2]); 194 u32 rxv4 = le32_to_cpu(rxv[3]); 195 u32 rxv5 = le32_to_cpu(rxv[4]); 196 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); 197 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); 198 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); 199 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; 200 201 if (!mode) { 202 /* CCK */ 203 foe &= ~BIT(11); 204 foe *= 1000; 205 foe >>= 11; 206 } else { 207 if (foe > 2048) 208 foe -= 4096; 209 210 foe = (foe * foe_const) >> 15; 211 } 212 213 dev->test.last_freq_offset = foe; 214 dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); 215 dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); 216 dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); 217 dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); 218 dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); 219 dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); 220 #endif 221 } 222 223 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 224 { 225 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 226 struct mt76_phy *mphy = &dev->mt76.phy; 227 struct mt7615_phy *phy = &dev->phy; 228 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL; 229 struct ieee80211_supported_band *sband; 230 struct ieee80211_hdr *hdr; 231 __le32 *rxd = (__le32 *)skb->data; 232 u32 rxd0 = le32_to_cpu(rxd[0]); 233 u32 rxd1 = le32_to_cpu(rxd[1]); 234 u32 rxd2 = le32_to_cpu(rxd[2]); 235 __le32 rxd12 = rxd[12]; 236 bool unicast, remove_pad, insert_ccmp_hdr = false; 237 int phy_idx; 238 int i, idx; 239 u8 chfreq; 240 241 memset(status, 0, sizeof(*status)); 242 243 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 244 if (!phy2) 245 phy_idx = 0; 246 else if (phy2->chfreq == phy->chfreq) 247 phy_idx = -1; 248 else if (phy->chfreq == chfreq) 249 phy_idx = 0; 250 else if (phy2->chfreq == chfreq) 251 phy_idx = 1; 252 else 253 phy_idx = -1; 254 255 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M; 256 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 257 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast); 258 259 if (status->wcid) { 260 struct mt7615_sta *msta; 261 262 msta = container_of(status->wcid, struct mt7615_sta, wcid); 263 spin_lock_bh(&dev->sta_poll_lock); 264 if (list_empty(&msta->poll_list)) 265 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 266 spin_unlock_bh(&dev->sta_poll_lock); 267 } 268 269 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 270 status->flag |= RX_FLAG_FAILED_FCS_CRC; 271 272 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 273 status->flag |= RX_FLAG_MMIC_ERROR; 274 275 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 276 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 277 status->flag |= RX_FLAG_DECRYPTED; 278 status->flag |= RX_FLAG_IV_STRIPPED; 279 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 280 } 281 282 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 283 284 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 285 return -EINVAL; 286 287 rxd += 4; 288 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 289 rxd += 4; 290 if ((u8 *)rxd - skb->data >= skb->len) 291 return -EINVAL; 292 } 293 294 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 295 u8 *data = (u8 *)rxd; 296 297 if (status->flag & RX_FLAG_DECRYPTED) { 298 status->iv[0] = data[5]; 299 status->iv[1] = data[4]; 300 status->iv[2] = data[3]; 301 status->iv[3] = data[2]; 302 status->iv[4] = data[1]; 303 status->iv[5] = data[0]; 304 305 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 306 } 307 rxd += 4; 308 if ((u8 *)rxd - skb->data >= skb->len) 309 return -EINVAL; 310 } 311 312 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 313 rxd += 2; 314 if ((u8 *)rxd - skb->data >= skb->len) 315 return -EINVAL; 316 } 317 318 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 319 u32 rxdg5 = le32_to_cpu(rxd[5]); 320 321 /* 322 * If both PHYs are on the same channel and we don't have a WCID, 323 * we need to figure out which PHY this packet was received on. 324 * On the primary PHY, the noise value for the chains belonging to the 325 * second PHY will be set to the noise value of the last packet from 326 * that PHY. 327 */ 328 if (phy_idx < 0) { 329 int first_chain = ffs(phy2->chainmask) - 1; 330 331 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0; 332 } 333 } 334 335 if (phy_idx == 1 && phy2) { 336 mphy = dev->mt76.phy2; 337 phy = phy2; 338 status->ext_phy = true; 339 } 340 341 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq) 342 return -EINVAL; 343 344 mt7615_get_status_freq_info(dev, mphy, status, chfreq); 345 if (status->band == NL80211_BAND_5GHZ) 346 sband = &mphy->sband_5g.sband; 347 else 348 sband = &mphy->sband_2g.sband; 349 350 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 351 return -EINVAL; 352 353 if (!sband->channels) 354 return -EINVAL; 355 356 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 357 MT_RXD2_NORMAL_NON_AMPDU))) { 358 status->flag |= RX_FLAG_AMPDU_DETAILS; 359 360 /* all subframes of an A-MPDU have the same timestamp */ 361 if (phy->rx_ampdu_ts != rxd12) { 362 if (!++phy->ampdu_ref) 363 phy->ampdu_ref++; 364 } 365 phy->rx_ampdu_ts = rxd12; 366 367 status->ampdu_ref = phy->ampdu_ref; 368 } 369 370 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 371 u32 rxdg0 = le32_to_cpu(rxd[0]); 372 u32 rxdg1 = le32_to_cpu(rxd[1]); 373 u32 rxdg3 = le32_to_cpu(rxd[3]); 374 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 375 bool cck = false; 376 377 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 378 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 379 case MT_PHY_TYPE_CCK: 380 cck = true; 381 /* fall through */ 382 case MT_PHY_TYPE_OFDM: 383 i = mt76_get_rate(&dev->mt76, sband, i, cck); 384 break; 385 case MT_PHY_TYPE_HT_GF: 386 case MT_PHY_TYPE_HT: 387 status->encoding = RX_ENC_HT; 388 if (i > 31) 389 return -EINVAL; 390 break; 391 case MT_PHY_TYPE_VHT: 392 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1; 393 status->encoding = RX_ENC_VHT; 394 break; 395 default: 396 return -EINVAL; 397 } 398 status->rate_idx = i; 399 400 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) { 401 case MT_PHY_BW_20: 402 break; 403 case MT_PHY_BW_40: 404 status->bw = RATE_INFO_BW_40; 405 break; 406 case MT_PHY_BW_80: 407 status->bw = RATE_INFO_BW_80; 408 break; 409 case MT_PHY_BW_160: 410 status->bw = RATE_INFO_BW_160; 411 break; 412 default: 413 return -EINVAL; 414 } 415 416 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 417 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 418 if (rxdg0 & MT_RXV1_HT_AD_CODE) 419 status->enc_flags |= RX_ENC_FLAG_LDPC; 420 421 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 422 423 status->chains = mphy->antenna_mask; 424 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3); 425 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3); 426 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3); 427 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3); 428 status->signal = status->chain_signal[0]; 429 430 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 431 if (!(status->chains & BIT(i))) 432 continue; 433 434 status->signal = max(status->signal, 435 status->chain_signal[i]); 436 } 437 438 mt7615_mac_fill_tm_rx(dev, rxd); 439 440 rxd += 6; 441 if ((u8 *)rxd - skb->data >= skb->len) 442 return -EINVAL; 443 } 444 445 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 446 447 if (insert_ccmp_hdr) { 448 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 449 450 mt76_insert_ccmp_hdr(skb, key_id); 451 } 452 453 hdr = (struct ieee80211_hdr *)skb->data; 454 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 455 return 0; 456 457 status->aggr = unicast && 458 !ieee80211_is_qos_nullfunc(hdr->frame_control); 459 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 460 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 461 462 return 0; 463 } 464 465 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 466 { 467 } 468 EXPORT_SYMBOL_GPL(mt7615_sta_ps); 469 470 static u16 471 mt7615_mac_tx_rate_val(struct mt7615_dev *dev, 472 struct mt76_phy *mphy, 473 const struct ieee80211_tx_rate *rate, 474 bool stbc, u8 *bw) 475 { 476 u8 phy, nss, rate_idx; 477 u16 rateval = 0; 478 479 *bw = 0; 480 481 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 482 rate_idx = ieee80211_rate_get_vht_mcs(rate); 483 nss = ieee80211_rate_get_vht_nss(rate); 484 phy = MT_PHY_TYPE_VHT; 485 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 486 *bw = 1; 487 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) 488 *bw = 2; 489 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) 490 *bw = 3; 491 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 492 rate_idx = rate->idx; 493 nss = 1 + (rate->idx >> 3); 494 phy = MT_PHY_TYPE_HT; 495 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 496 phy = MT_PHY_TYPE_HT_GF; 497 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 498 *bw = 1; 499 } else { 500 const struct ieee80211_rate *r; 501 int band = mphy->chandef.chan->band; 502 u16 val; 503 504 nss = 1; 505 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx]; 506 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 507 val = r->hw_value_short; 508 else 509 val = r->hw_value; 510 511 phy = val >> 8; 512 rate_idx = val & 0xff; 513 } 514 515 if (stbc && nss == 1) { 516 nss++; 517 rateval |= MT_TX_RATE_STBC; 518 } 519 520 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 521 FIELD_PREP(MT_TX_RATE_MODE, phy) | 522 FIELD_PREP(MT_TX_RATE_NSS, nss - 1)); 523 524 return rateval; 525 } 526 527 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, 528 struct sk_buff *skb, struct mt76_wcid *wcid, 529 struct ieee80211_sta *sta, int pid, 530 struct ieee80211_key_conf *key, bool beacon) 531 { 532 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 533 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 534 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 535 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 536 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 537 bool multicast = is_multicast_ether_addr(hdr->addr1); 538 struct ieee80211_vif *vif = info->control.vif; 539 bool is_mmio = mt76_is_mmio(&dev->mt76); 540 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; 541 struct mt76_phy *mphy = &dev->mphy; 542 __le16 fc = hdr->frame_control; 543 int tx_count = 8; 544 u16 seqno = 0; 545 546 if (vif) { 547 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 548 549 omac_idx = mvif->omac_idx; 550 wmm_idx = mvif->wmm_idx; 551 } 552 553 if (sta) { 554 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 555 556 tx_count = msta->rate_count; 557 } 558 559 if (ext_phy && dev->mt76.phy2) 560 mphy = dev->mt76.phy2; 561 562 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 563 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 564 565 if (beacon) { 566 p_fmt = MT_TX_TYPE_FW; 567 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 568 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 569 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 570 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 571 } else { 572 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 573 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 574 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 575 } 576 577 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | 578 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) | 579 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 580 txwi[0] = cpu_to_le32(val); 581 582 val = MT_TXD1_LONG_FORMAT | 583 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 584 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 585 FIELD_PREP(MT_TXD1_HDR_INFO, 586 ieee80211_get_hdrlen_from_skb(skb) / 2) | 587 FIELD_PREP(MT_TXD1_TID, 588 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 589 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) | 590 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 591 txwi[1] = cpu_to_le32(val); 592 593 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 594 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 595 FIELD_PREP(MT_TXD2_MULTICAST, multicast); 596 if (key) { 597 if (multicast && ieee80211_is_robust_mgmt_frame(skb) && 598 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 599 val |= MT_TXD2_BIP; 600 txwi[3] = 0; 601 } else { 602 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); 603 } 604 } else { 605 txwi[3] = 0; 606 } 607 txwi[2] = cpu_to_le32(val); 608 609 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 610 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 611 612 txwi[4] = 0; 613 txwi[6] = 0; 614 615 if (rate->idx >= 0 && rate->count && 616 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 617 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 618 u8 bw; 619 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc, 620 &bw); 621 622 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 623 624 val = MT_TXD6_FIXED_BW | 625 FIELD_PREP(MT_TXD6_BW, bw) | 626 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 627 txwi[6] |= cpu_to_le32(val); 628 629 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 630 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 631 632 if (info->flags & IEEE80211_TX_CTL_LDPC) 633 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC); 634 635 if (!(rate->flags & (IEEE80211_TX_RC_MCS | 636 IEEE80211_TX_RC_VHT_MCS))) 637 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 638 639 tx_count = rate->count; 640 } 641 642 if (!ieee80211_is_beacon(fc)) { 643 struct ieee80211_hw *hw = mt76_hw(dev); 644 645 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid); 646 if (!ieee80211_hw_check(hw, SUPPORTS_PS)) 647 val |= MT_TXD5_SW_POWER_MGMT; 648 txwi[5] = cpu_to_le32(val); 649 } else { 650 txwi[5] = 0; 651 /* use maximum tx count for beacons */ 652 tx_count = 0x1f; 653 } 654 655 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 656 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 657 seqno = le16_to_cpu(hdr->seq_ctrl); 658 659 if (ieee80211_is_back_req(hdr->frame_control)) { 660 struct ieee80211_bar *bar; 661 662 bar = (struct ieee80211_bar *)skb->data; 663 seqno = le16_to_cpu(bar->start_seq_num); 664 } 665 666 val |= MT_TXD3_SN_VALID | 667 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 668 } 669 670 txwi[3] |= cpu_to_le32(val); 671 672 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 673 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); 674 675 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 676 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 677 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 678 if (!is_mmio) 679 txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 680 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 681 682 return 0; 683 } 684 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi); 685 686 static void 687 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp) 688 { 689 int i; 690 691 for (i = 1; i < txp->nbuf; i++) 692 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 693 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 694 } 695 696 static void 697 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp) 698 { 699 u32 last_mask; 700 int i; 701 702 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST; 703 704 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) { 705 struct mt7615_txp_ptr *ptr = &txp->ptr[i]; 706 bool last; 707 u16 len; 708 709 len = le16_to_cpu(ptr->len0); 710 last = len & last_mask; 711 len &= MT_TXD_LEN_MASK; 712 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 713 DMA_TO_DEVICE); 714 if (last) 715 break; 716 717 len = le16_to_cpu(ptr->len1); 718 last = len & last_mask; 719 len &= MT_TXD_LEN_MASK; 720 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 721 DMA_TO_DEVICE); 722 if (last) 723 break; 724 } 725 } 726 727 void mt7615_txp_skb_unmap(struct mt76_dev *dev, 728 struct mt76_txwi_cache *t) 729 { 730 struct mt7615_txp_common *txp; 731 732 txp = mt7615_txwi_to_txp(dev, t); 733 if (is_mt7615(dev)) 734 mt7615_txp_skb_unmap_fw(dev, &txp->fw); 735 else 736 mt7615_txp_skb_unmap_hw(dev, &txp->hw); 737 } 738 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap); 739 740 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask) 741 { 742 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 743 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 744 745 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 746 0, 5000); 747 } 748 749 void mt7615_mac_sta_poll(struct mt7615_dev *dev) 750 { 751 static const u8 ac_to_tid[4] = { 752 [IEEE80211_AC_BE] = 0, 753 [IEEE80211_AC_BK] = 1, 754 [IEEE80211_AC_VI] = 4, 755 [IEEE80211_AC_VO] = 6 756 }; 757 static const u8 hw_queue_map[] = { 758 [IEEE80211_AC_BK] = 0, 759 [IEEE80211_AC_BE] = 1, 760 [IEEE80211_AC_VI] = 2, 761 [IEEE80211_AC_VO] = 3, 762 }; 763 struct ieee80211_sta *sta; 764 struct mt7615_sta *msta; 765 u32 addr, tx_time[4], rx_time[4]; 766 struct list_head sta_poll_list; 767 int i; 768 769 INIT_LIST_HEAD(&sta_poll_list); 770 spin_lock_bh(&dev->sta_poll_lock); 771 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 772 spin_unlock_bh(&dev->sta_poll_lock); 773 774 while (!list_empty(&sta_poll_list)) { 775 bool clear = false; 776 777 msta = list_first_entry(&sta_poll_list, struct mt7615_sta, 778 poll_list); 779 list_del_init(&msta->poll_list); 780 781 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; 782 783 for (i = 0; i < 4; i++, addr += 8) { 784 u32 tx_last = msta->airtime_ac[i]; 785 u32 rx_last = msta->airtime_ac[i + 4]; 786 787 msta->airtime_ac[i] = mt76_rr(dev, addr); 788 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 789 tx_time[i] = msta->airtime_ac[i] - tx_last; 790 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 791 792 if ((tx_last | rx_last) & BIT(30)) 793 clear = true; 794 } 795 796 if (clear) { 797 mt7615_mac_wtbl_update(dev, msta->wcid.idx, 798 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 799 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 800 } 801 802 if (!msta->wcid.sta) 803 continue; 804 805 sta = container_of((void *)msta, struct ieee80211_sta, 806 drv_priv); 807 for (i = 0; i < 4; i++) { 808 u32 tx_cur = tx_time[i]; 809 u32 rx_cur = rx_time[hw_queue_map[i]]; 810 u8 tid = ac_to_tid[i]; 811 812 if (!tx_cur && !rx_cur) 813 continue; 814 815 ieee80211_sta_register_airtime(sta, tid, tx_cur, 816 rx_cur); 817 } 818 } 819 } 820 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll); 821 822 static void 823 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta, 824 struct ieee80211_tx_rate *probe_rate, 825 struct ieee80211_tx_rate *rates, 826 struct mt7615_rate_desc *rd) 827 { 828 struct mt7615_dev *dev = phy->dev; 829 struct mt76_phy *mphy = phy->mt76; 830 struct ieee80211_tx_rate *ref; 831 bool rateset, stbc = false; 832 int n_rates = sta->n_rates; 833 u8 bw, bw_prev; 834 int i, j; 835 836 for (i = n_rates; i < 4; i++) 837 rates[i] = rates[n_rates - 1]; 838 839 rateset = !(sta->rate_set_tsf & BIT(0)); 840 memcpy(sta->rateset[rateset].rates, rates, 841 sizeof(sta->rateset[rateset].rates)); 842 if (probe_rate) { 843 sta->rateset[rateset].probe_rate = *probe_rate; 844 ref = &sta->rateset[rateset].probe_rate; 845 } else { 846 sta->rateset[rateset].probe_rate.idx = -1; 847 ref = &sta->rateset[rateset].rates[0]; 848 } 849 850 rates = sta->rateset[rateset].rates; 851 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 852 /* 853 * We don't support switching between short and long GI 854 * within the rate set. For accurate tx status reporting, we 855 * need to make sure that flags match. 856 * For improved performance, avoid duplicate entries by 857 * decrementing the MCS index if necessary 858 */ 859 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 860 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 861 862 for (j = 0; j < i; j++) { 863 if (rates[i].idx != rates[j].idx) 864 continue; 865 if ((rates[i].flags ^ rates[j].flags) & 866 (IEEE80211_TX_RC_40_MHZ_WIDTH | 867 IEEE80211_TX_RC_80_MHZ_WIDTH | 868 IEEE80211_TX_RC_160_MHZ_WIDTH)) 869 continue; 870 871 if (!rates[i].idx) 872 continue; 873 874 rates[i].idx--; 875 } 876 } 877 878 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw); 879 bw_prev = bw; 880 881 if (probe_rate) { 882 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate, 883 stbc, &bw); 884 if (bw) 885 rd->bw_idx = 1; 886 else 887 bw_prev = 0; 888 } else { 889 rd->probe_val = rd->val[0]; 890 } 891 892 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw); 893 if (bw_prev) { 894 rd->bw_idx = 3; 895 bw_prev = bw; 896 } 897 898 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw); 899 if (bw_prev) { 900 rd->bw_idx = 5; 901 bw_prev = bw; 902 } 903 904 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw); 905 if (bw_prev) 906 rd->bw_idx = 7; 907 908 rd->rateset = rateset; 909 rd->bw = bw; 910 } 911 912 static int 913 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, 914 struct ieee80211_tx_rate *probe_rate, 915 struct ieee80211_tx_rate *rates) 916 { 917 struct mt7615_dev *dev = phy->dev; 918 struct mt7615_wtbl_desc *wd; 919 920 if (work_pending(&dev->wtbl_work)) 921 return -EBUSY; 922 923 wd = kzalloc(sizeof(*wd), GFP_ATOMIC); 924 if (!wd) 925 return -ENOMEM; 926 927 wd->type = MT7615_WTBL_RATE_DESC; 928 wd->sta = sta; 929 930 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 931 &wd->rate); 932 list_add_tail(&wd->node, &dev->wd_head); 933 queue_work(dev->mt76.wq, &dev->wtbl_work); 934 935 return 0; 936 } 937 938 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) 939 { 940 u32 addr, val, val2; 941 u8 offset; 942 943 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; 944 945 offset = tid * 12; 946 addr += 4 * (offset / 32); 947 offset %= 32; 948 949 val = mt76_rr(dev, addr); 950 val >>= (tid % 32); 951 952 if (offset > 20) { 953 addr += 4; 954 val2 = mt76_rr(dev, addr); 955 val |= val2 << (32 - offset); 956 } 957 958 return val & GENMASK(11, 0); 959 } 960 961 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 962 struct ieee80211_tx_rate *probe_rate, 963 struct ieee80211_tx_rate *rates) 964 { 965 int wcid = sta->wcid.idx, n_rates = sta->n_rates; 966 struct mt7615_dev *dev = phy->dev; 967 struct mt7615_rate_desc rd; 968 u32 w5, w27, addr; 969 970 if (!mt76_is_mmio(&dev->mt76)) { 971 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 972 return; 973 } 974 975 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 976 return; 977 978 memset(&rd, 0, sizeof(struct mt7615_rate_desc)); 979 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd); 980 981 addr = mt7615_mac_wtbl_addr(dev, wcid); 982 w27 = mt76_rr(dev, addr + 27 * 4); 983 w27 &= ~MT_WTBL_W27_CC_BW_SEL; 984 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw); 985 986 w5 = mt76_rr(dev, addr + 5 * 4); 987 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 988 MT_WTBL_W5_MPDU_OK_COUNT | 989 MT_WTBL_W5_MPDU_FAIL_COUNT | 990 MT_WTBL_W5_RATE_IDX); 991 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) | 992 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 993 rd.bw_idx ? rd.bw_idx - 1 : 7); 994 995 mt76_wr(dev, MT_WTBL_RIUCR0, w5); 996 997 mt76_wr(dev, MT_WTBL_RIUCR1, 998 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) | 999 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) | 1000 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1])); 1001 1002 mt76_wr(dev, MT_WTBL_RIUCR2, 1003 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) | 1004 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) | 1005 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) | 1006 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2])); 1007 1008 mt76_wr(dev, MT_WTBL_RIUCR3, 1009 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) | 1010 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) | 1011 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3])); 1012 1013 mt76_wr(dev, MT_WTBL_UPDATE, 1014 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 1015 MT_WTBL_UPDATE_RATE_UPDATE | 1016 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 1017 1018 mt76_wr(dev, addr + 27 * 4, w27); 1019 1020 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 1021 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0); 1022 sta->rate_set_tsf |= rd.rateset; 1023 1024 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 1025 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 1026 1027 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 1028 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 1029 sta->rate_probe = !!probe_rate; 1030 } 1031 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1032 1033 int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, 1034 struct mt76_wcid *wcid, 1035 u8 *key, u8 keylen, 1036 enum mt7615_cipher_type cipher, 1037 enum set_key_cmd cmd) 1038 { 1039 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; 1040 u8 data[32] = {}; 1041 1042 if (keylen > sizeof(data)) 1043 return -EINVAL; 1044 1045 mt76_rr_copy(dev, addr, data, sizeof(data)); 1046 if (cmd == SET_KEY) { 1047 if (cipher == MT_CIPHER_TKIP) { 1048 /* Rx/Tx MIC keys are swapped */ 1049 memcpy(data + 16, key + 24, 8); 1050 memcpy(data + 24, key + 16, 8); 1051 } 1052 if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) 1053 memmove(data + 16, data, 16); 1054 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1055 memcpy(data, key, keylen); 1056 else if (cipher == MT_CIPHER_BIP_CMAC_128) 1057 memcpy(data + 16, key, 16); 1058 } else { 1059 if (wcid->cipher & ~BIT(cipher)) { 1060 if (cipher != MT_CIPHER_BIP_CMAC_128) 1061 memmove(data, data + 16, 16); 1062 memset(data + 16, 0, 16); 1063 } else { 1064 memset(data, 0, sizeof(data)); 1065 } 1066 } 1067 mt76_wr_copy(dev, addr, data, sizeof(data)); 1068 1069 return 0; 1070 } 1071 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key); 1072 1073 int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, 1074 struct mt76_wcid *wcid, 1075 enum mt7615_cipher_type cipher, 1076 int keyidx, enum set_key_cmd cmd) 1077 { 1078 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; 1079 1080 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 1081 return -ETIMEDOUT; 1082 1083 w0 = mt76_rr(dev, addr); 1084 w1 = mt76_rr(dev, addr + 4); 1085 if (cmd == SET_KEY) { 1086 w0 |= MT_WTBL_W0_RX_KEY_VALID | 1087 FIELD_PREP(MT_WTBL_W0_RX_IK_VALID, 1088 cipher == MT_CIPHER_BIP_CMAC_128); 1089 if (cipher != MT_CIPHER_BIP_CMAC_128 || 1090 !wcid->cipher) 1091 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx); 1092 } else { 1093 if (!(wcid->cipher & ~BIT(cipher))) 1094 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | 1095 MT_WTBL_W0_KEY_IDX); 1096 if (cipher == MT_CIPHER_BIP_CMAC_128) 1097 w0 &= ~MT_WTBL_W0_RX_IK_VALID; 1098 } 1099 mt76_wr(dev, MT_WTBL_RICR0, w0); 1100 mt76_wr(dev, MT_WTBL_RICR1, w1); 1101 1102 if (!mt7615_mac_wtbl_update(dev, wcid->idx, 1103 MT_WTBL_UPDATE_RXINFO_UPDATE)) 1104 return -ETIMEDOUT; 1105 1106 return 0; 1107 } 1108 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk); 1109 1110 void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, 1111 struct mt76_wcid *wcid, 1112 enum mt7615_cipher_type cipher, 1113 enum set_key_cmd cmd) 1114 { 1115 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); 1116 1117 if (cmd == SET_KEY) { 1118 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) 1119 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1120 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher)); 1121 } else { 1122 if (cipher != MT_CIPHER_BIP_CMAC_128 && 1123 wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128)) 1124 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE, 1125 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, 1126 MT_CIPHER_BIP_CMAC_128)); 1127 else if (!(wcid->cipher & ~BIT(cipher))) 1128 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); 1129 } 1130 } 1131 EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher); 1132 1133 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, 1134 struct mt76_wcid *wcid, 1135 struct ieee80211_key_conf *key, 1136 enum set_key_cmd cmd) 1137 { 1138 enum mt7615_cipher_type cipher; 1139 int err; 1140 1141 cipher = mt7615_mac_get_cipher(key->cipher); 1142 if (cipher == MT_CIPHER_NONE) 1143 return -EOPNOTSUPP; 1144 1145 spin_lock_bh(&dev->mt76.lock); 1146 1147 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); 1148 err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, 1149 cipher, cmd); 1150 if (err < 0) 1151 goto out; 1152 1153 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, 1154 cmd); 1155 if (err < 0) 1156 goto out; 1157 1158 if (cmd == SET_KEY) 1159 wcid->cipher |= BIT(cipher); 1160 else 1161 wcid->cipher &= ~BIT(cipher); 1162 1163 out: 1164 spin_unlock_bh(&dev->mt76.lock); 1165 1166 return err; 1167 } 1168 1169 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, 1170 struct ieee80211_tx_info *info, __le32 *txs_data) 1171 { 1172 struct ieee80211_supported_band *sband; 1173 struct mt7615_rate_set *rs; 1174 struct mt76_phy *mphy; 1175 int first_idx = 0, last_idx; 1176 int i, idx, count; 1177 bool fixed_rate, ack_timeout; 1178 bool probe, ampdu, cck = false; 1179 bool rs_idx; 1180 u32 rate_set_tsf; 1181 u32 final_rate, final_rate_flags, final_nss, txs; 1182 1183 fixed_rate = info->status.rates[0].count; 1184 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1185 1186 txs = le32_to_cpu(txs_data[1]); 1187 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU); 1188 1189 txs = le32_to_cpu(txs_data[3]); 1190 count = FIELD_GET(MT_TXS3_TX_COUNT, txs); 1191 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); 1192 1193 txs = le32_to_cpu(txs_data[0]); 1194 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1195 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1196 1197 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1198 return false; 1199 1200 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1201 return false; 1202 1203 if (!ack_timeout) 1204 info->flags |= IEEE80211_TX_STAT_ACK; 1205 1206 info->status.ampdu_len = 1; 1207 info->status.ampdu_ack_len = !!(info->flags & 1208 IEEE80211_TX_STAT_ACK); 1209 1210 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1211 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1212 1213 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY); 1214 1215 if (fixed_rate && !probe) { 1216 info->status.rates[0].count = count; 1217 i = 0; 1218 goto out; 1219 } 1220 1221 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1222 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) - 1223 rate_set_tsf) < 1000000); 1224 rs_idx ^= rate_set_tsf & BIT(0); 1225 rs = &sta->rateset[rs_idx]; 1226 1227 if (!first_idx && rs->probe_rate.idx >= 0) { 1228 info->status.rates[0] = rs->probe_rate; 1229 1230 spin_lock_bh(&dev->mt76.lock); 1231 if (sta->rate_probe) { 1232 struct mt7615_phy *phy = &dev->phy; 1233 1234 if (sta->wcid.ext_phy && dev->mt76.phy2) 1235 phy = dev->mt76.phy2->priv; 1236 1237 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1238 } 1239 spin_unlock_bh(&dev->mt76.lock); 1240 } else { 1241 info->status.rates[0] = rs->rates[first_idx / 2]; 1242 } 1243 info->status.rates[0].count = 0; 1244 1245 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1246 struct ieee80211_tx_rate *cur_rate; 1247 int cur_count; 1248 1249 cur_rate = &rs->rates[idx / 2]; 1250 cur_count = min_t(int, MT7615_RATE_RETRY, count); 1251 count -= cur_count; 1252 1253 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1254 cur_rate->flags != info->status.rates[i].flags)) { 1255 i++; 1256 if (i == ARRAY_SIZE(info->status.rates)) { 1257 i--; 1258 break; 1259 } 1260 1261 info->status.rates[i] = *cur_rate; 1262 info->status.rates[i].count = 0; 1263 } 1264 1265 info->status.rates[i].count += cur_count; 1266 } 1267 1268 out: 1269 final_rate_flags = info->status.rates[i].flags; 1270 1271 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1272 case MT_PHY_TYPE_CCK: 1273 cck = true; 1274 /* fall through */ 1275 case MT_PHY_TYPE_OFDM: 1276 mphy = &dev->mphy; 1277 if (sta->wcid.ext_phy && dev->mt76.phy2) 1278 mphy = dev->mt76.phy2; 1279 1280 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1281 sband = &mphy->sband_5g.sband; 1282 else 1283 sband = &mphy->sband_2g.sband; 1284 final_rate &= MT_TX_RATE_IDX; 1285 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1286 cck); 1287 final_rate_flags = 0; 1288 break; 1289 case MT_PHY_TYPE_HT_GF: 1290 case MT_PHY_TYPE_HT: 1291 final_rate_flags |= IEEE80211_TX_RC_MCS; 1292 final_rate &= MT_TX_RATE_IDX; 1293 if (final_rate > 31) 1294 return false; 1295 break; 1296 case MT_PHY_TYPE_VHT: 1297 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate); 1298 1299 if ((final_rate & MT_TX_RATE_STBC) && final_nss) 1300 final_nss--; 1301 1302 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS; 1303 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4); 1304 break; 1305 default: 1306 return false; 1307 } 1308 1309 info->status.rates[i].idx = final_rate; 1310 info->status.rates[i].flags = final_rate_flags; 1311 1312 return true; 1313 } 1314 1315 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev, 1316 struct mt7615_sta *sta, int pid, 1317 __le32 *txs_data) 1318 { 1319 struct mt76_dev *mdev = &dev->mt76; 1320 struct sk_buff_head list; 1321 struct sk_buff *skb; 1322 1323 if (pid < MT_PACKET_ID_FIRST) 1324 return false; 1325 1326 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1327 1328 mt76_tx_status_lock(mdev, &list); 1329 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1330 if (skb) { 1331 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1332 1333 if (!mt7615_fill_txs(dev, sta, info, txs_data)) { 1334 ieee80211_tx_info_clear_status(info); 1335 info->status.rates[0].idx = -1; 1336 } 1337 1338 mt76_tx_status_skb_done(mdev, skb, &list); 1339 } 1340 mt76_tx_status_unlock(mdev, &list); 1341 1342 return !!skb; 1343 } 1344 1345 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data) 1346 { 1347 struct ieee80211_tx_info info = {}; 1348 struct ieee80211_sta *sta = NULL; 1349 struct mt7615_sta *msta = NULL; 1350 struct mt76_wcid *wcid; 1351 struct mt76_phy *mphy = &dev->mt76.phy; 1352 __le32 *txs_data = data; 1353 u32 txs; 1354 u8 wcidx; 1355 u8 pid; 1356 1357 txs = le32_to_cpu(txs_data[0]); 1358 pid = FIELD_GET(MT_TXS0_PID, txs); 1359 txs = le32_to_cpu(txs_data[2]); 1360 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1361 1362 if (pid == MT_PACKET_ID_NO_ACK) 1363 return; 1364 1365 if (wcidx >= MT7615_WTBL_SIZE) 1366 return; 1367 1368 rcu_read_lock(); 1369 1370 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1371 if (!wcid) 1372 goto out; 1373 1374 msta = container_of(wcid, struct mt7615_sta, wcid); 1375 sta = wcid_to_sta(wcid); 1376 1377 spin_lock_bh(&dev->sta_poll_lock); 1378 if (list_empty(&msta->poll_list)) 1379 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1380 spin_unlock_bh(&dev->sta_poll_lock); 1381 1382 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data)) 1383 goto out; 1384 1385 if (wcidx >= MT7615_WTBL_STA || !sta) 1386 goto out; 1387 1388 if (wcid->ext_phy && dev->mt76.phy2) 1389 mphy = dev->mt76.phy2; 1390 1391 if (mt7615_fill_txs(dev, msta, &info, txs_data)) 1392 ieee80211_tx_status_noskb(mphy->hw, sta, &info); 1393 1394 out: 1395 rcu_read_unlock(); 1396 } 1397 1398 static void 1399 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token) 1400 { 1401 struct mt76_dev *mdev = &dev->mt76; 1402 struct mt76_txwi_cache *txwi; 1403 1404 trace_mac_tx_free(dev, token); 1405 1406 spin_lock_bh(&dev->token_lock); 1407 txwi = idr_remove(&dev->token, token); 1408 spin_unlock_bh(&dev->token_lock); 1409 1410 if (!txwi) 1411 return; 1412 1413 mt7615_txp_skb_unmap(mdev, txwi); 1414 if (txwi->skb) { 1415 mt76_tx_complete_skb(mdev, txwi->skb); 1416 txwi->skb = NULL; 1417 } 1418 1419 mt76_put_txwi(mdev, txwi); 1420 } 1421 1422 static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) 1423 { 1424 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; 1425 u8 i, count; 1426 1427 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1428 if (is_mt7615(&dev->mt76)) { 1429 __le16 *token = &free->token[0]; 1430 1431 for (i = 0; i < count; i++) 1432 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i])); 1433 } else { 1434 __le32 *token = (__le32 *)&free->token[0]; 1435 1436 for (i = 0; i < count; i++) 1437 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i])); 1438 } 1439 1440 dev_kfree_skb(skb); 1441 1442 rcu_read_lock(); 1443 mt7615_mac_sta_poll(dev); 1444 rcu_read_unlock(); 1445 1446 tasklet_schedule(&dev->mt76.tx_tasklet); 1447 } 1448 1449 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1450 struct sk_buff *skb) 1451 { 1452 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1453 __le32 *rxd = (__le32 *)skb->data; 1454 __le32 *end = (__le32 *)&skb->data[skb->len]; 1455 enum rx_pkt_type type; 1456 u16 flag; 1457 1458 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1459 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); 1460 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 1461 type = PKT_TYPE_NORMAL_MCU; 1462 1463 switch (type) { 1464 case PKT_TYPE_TXS: 1465 for (rxd++; rxd + 7 <= end; rxd += 7) 1466 mt7615_mac_add_txs(dev, rxd); 1467 dev_kfree_skb(skb); 1468 break; 1469 case PKT_TYPE_TXRX_NOTIFY: 1470 mt7615_mac_tx_free(dev, skb); 1471 break; 1472 case PKT_TYPE_RX_EVENT: 1473 mt7615_mcu_rx_event(dev, skb); 1474 break; 1475 case PKT_TYPE_NORMAL_MCU: 1476 case PKT_TYPE_NORMAL: 1477 if (!mt7615_mac_fill_rx(dev, skb)) { 1478 mt76_rx(&dev->mt76, q, skb); 1479 return; 1480 } 1481 /* fall through */ 1482 default: 1483 dev_kfree_skb(skb); 1484 break; 1485 } 1486 } 1487 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb); 1488 1489 static void 1490 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm) 1491 { 1492 struct mt7615_dev *dev = phy->dev; 1493 bool ext_phy = phy != &dev->phy; 1494 1495 if (is_mt7663(&dev->mt76)) { 1496 if (ofdm) 1497 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy), 1498 MT_WF_PHY_PD_OFDM_MASK(0), 1499 MT_WF_PHY_PD_OFDM(0, val)); 1500 else 1501 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy), 1502 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1503 MT_WF_PHY_PD_CCK(ext_phy, val)); 1504 return; 1505 } 1506 1507 if (ofdm) 1508 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy), 1509 MT_WF_PHY_PD_OFDM_MASK(ext_phy), 1510 MT_WF_PHY_PD_OFDM(ext_phy, val)); 1511 else 1512 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy), 1513 MT_WF_PHY_PD_CCK_MASK(ext_phy), 1514 MT_WF_PHY_PD_CCK(ext_phy, val)); 1515 } 1516 1517 static void 1518 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy) 1519 { 1520 /* ofdm */ 1521 mt7615_mac_set_sensitivity(phy, 0x13c, true); 1522 /* cck */ 1523 mt7615_mac_set_sensitivity(phy, 0x92, false); 1524 1525 phy->ofdm_sensitivity = -98; 1526 phy->cck_sensitivity = -110; 1527 phy->last_cca_adj = jiffies; 1528 } 1529 1530 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) 1531 { 1532 struct mt7615_dev *dev = phy->dev; 1533 bool ext_phy = phy != &dev->phy; 1534 u32 reg, mask; 1535 1536 mt7615_mutex_acquire(dev); 1537 1538 if (phy->scs_en == enable) 1539 goto out; 1540 1541 if (is_mt7663(&dev->mt76)) { 1542 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy); 1543 mask = MT_WF_PHY_PD_BLK(0); 1544 } else { 1545 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy); 1546 mask = MT_WF_PHY_PD_BLK(ext_phy); 1547 } 1548 1549 if (enable) { 1550 mt76_set(dev, reg, mask); 1551 if (is_mt7622(&dev->mt76)) { 1552 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8); 1553 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7); 1554 } 1555 } else { 1556 mt76_clear(dev, reg, mask); 1557 } 1558 1559 mt7615_mac_set_default_sensitivity(phy); 1560 phy->scs_en = enable; 1561 1562 out: 1563 mt7615_mutex_release(dev); 1564 } 1565 1566 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) 1567 { 1568 u32 rxtd, reg; 1569 1570 if (is_mt7663(&dev->mt76)) 1571 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1572 else 1573 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1574 1575 if (ext_phy) 1576 rxtd = MT_WF_PHY_RXTD2(10); 1577 else 1578 rxtd = MT_WF_PHY_RXTD(12); 1579 1580 mt76_set(dev, rxtd, BIT(18) | BIT(29)); 1581 mt76_set(dev, reg, 0x5 << 12); 1582 } 1583 1584 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy) 1585 { 1586 struct mt7615_dev *dev = phy->dev; 1587 bool ext_phy = phy != &dev->phy; 1588 u32 reg; 1589 1590 if (is_mt7663(&dev->mt76)) 1591 reg = MT7663_WF_PHY_R0_PHYMUX_5; 1592 else 1593 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy); 1594 1595 /* reset PD and MDRDY counters */ 1596 mt76_clear(dev, reg, GENMASK(22, 20)); 1597 mt76_set(dev, reg, BIT(22) | BIT(20)); 1598 } 1599 1600 static void 1601 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, 1602 u32 rts_err_rate, bool ofdm) 1603 { 1604 struct mt7615_dev *dev = phy->dev; 1605 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; 1606 bool ext_phy = phy != &dev->phy; 1607 u16 def_th = ofdm ? -98 : -110; 1608 bool update = false; 1609 s8 *sensitivity; 1610 int signal; 1611 1612 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity; 1613 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy); 1614 if (!signal) { 1615 mt7615_mac_set_default_sensitivity(phy); 1616 return; 1617 } 1618 1619 signal = min(signal, -72); 1620 if (false_cca > 500) { 1621 if (rts_err_rate > MT_FRAC(40, 100)) 1622 return; 1623 1624 /* decrease coverage */ 1625 if (*sensitivity == def_th && signal > -90) { 1626 *sensitivity = -90; 1627 update = true; 1628 } else if (*sensitivity + 2 < signal) { 1629 *sensitivity += 2; 1630 update = true; 1631 } 1632 } else if ((false_cca > 0 && false_cca < 50) || 1633 rts_err_rate > MT_FRAC(60, 100)) { 1634 /* increase coverage */ 1635 if (*sensitivity - 2 >= def_th) { 1636 *sensitivity -= 2; 1637 update = true; 1638 } 1639 } 1640 1641 if (*sensitivity > signal) { 1642 *sensitivity = signal; 1643 update = true; 1644 } 1645 1646 if (update) { 1647 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256; 1648 1649 mt7615_mac_set_sensitivity(phy, val, ofdm); 1650 phy->last_cca_adj = jiffies; 1651 } 1652 } 1653 1654 static void 1655 mt7615_mac_scs_check(struct mt7615_phy *phy) 1656 { 1657 struct mt7615_dev *dev = phy->dev; 1658 struct mib_stats *mib = &phy->mib; 1659 u32 val, rts_err_rate = 0; 1660 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm; 1661 bool ext_phy = phy != &dev->phy; 1662 1663 if (!phy->scs_en) 1664 return; 1665 1666 if (is_mt7663(&dev->mt76)) 1667 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1668 else 1669 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy)); 1670 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val); 1671 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val); 1672 1673 if (is_mt7663(&dev->mt76)) 1674 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1675 else 1676 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy)); 1677 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val); 1678 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val); 1679 1680 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1681 phy->false_cca_cck = pd_cck - mdrdy_cck; 1682 mt7615_mac_cca_stats_reset(phy); 1683 1684 if (mib->rts_cnt + mib->rts_retries_cnt) 1685 rts_err_rate = MT_FRAC(mib->rts_retries_cnt, 1686 mib->rts_cnt + mib->rts_retries_cnt); 1687 1688 /* cck */ 1689 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false); 1690 /* ofdm */ 1691 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true); 1692 1693 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ)) 1694 mt7615_mac_set_default_sensitivity(phy); 1695 } 1696 1697 static u8 1698 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx) 1699 { 1700 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1701 u32 reg, val, sum = 0, n = 0; 1702 int i; 1703 1704 if (is_mt7663(&dev->mt76)) 1705 reg = MT7663_WF_PHY_RXTD(20); 1706 else 1707 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20); 1708 1709 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1710 val = mt76_rr(dev, reg); 1711 sum += val * nf_power[i]; 1712 n += val; 1713 } 1714 1715 if (!n) 1716 return 0; 1717 1718 return sum / n; 1719 } 1720 1721 static void 1722 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) 1723 { 1724 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76); 1725 struct mt7615_phy *phy = mphy->priv; 1726 struct mt76_channel_state *state; 1727 u64 busy_time, tx_time, rx_time, obss_time; 1728 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5; 1729 int nf; 1730 1731 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), 1732 MT_MIB_SDR9_BUSY_MASK); 1733 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx), 1734 MT_MIB_SDR36_TXTIME_MASK); 1735 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx), 1736 MT_MIB_SDR37_RXTIME_MASK); 1737 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK); 1738 1739 nf = mt7615_phy_get_nf(dev, idx); 1740 if (!phy->noise) 1741 phy->noise = nf << 4; 1742 else if (nf) 1743 phy->noise += nf - (phy->noise >> 4); 1744 1745 state = mphy->chan_state; 1746 state->cc_busy += busy_time; 1747 state->cc_tx += tx_time; 1748 state->cc_rx += rx_time + obss_time; 1749 state->cc_bss_rx += rx_time; 1750 state->noise = -(phy->noise >> 4); 1751 } 1752 1753 static void __mt7615_update_channel(struct mt7615_dev *dev) 1754 { 1755 struct mt76_dev *mdev = &dev->mt76; 1756 1757 mt7615_phy_update_channel(&mdev->phy, 0); 1758 if (mdev->phy2) 1759 mt7615_phy_update_channel(mdev->phy2, 1); 1760 1761 /* reset obss airtime */ 1762 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1763 } 1764 1765 void mt7615_update_channel(struct mt76_dev *mdev) 1766 { 1767 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1768 1769 if (mt7615_pm_wake(dev)) 1770 return; 1771 1772 __mt7615_update_channel(dev); 1773 mt7615_pm_power_save_sched(dev); 1774 } 1775 EXPORT_SYMBOL_GPL(mt7615_update_channel); 1776 1777 static void mt7615_update_survey(struct mt7615_dev *dev) 1778 { 1779 struct mt76_dev *mdev = &dev->mt76; 1780 ktime_t cur_time; 1781 1782 __mt7615_update_channel(dev); 1783 cur_time = ktime_get_boottime(); 1784 1785 mt76_update_survey_active_time(&mdev->phy, cur_time); 1786 if (mdev->phy2) 1787 mt76_update_survey_active_time(mdev->phy2, cur_time); 1788 } 1789 1790 static void 1791 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) 1792 { 1793 struct mt7615_dev *dev = phy->dev; 1794 struct mib_stats *mib = &phy->mib; 1795 bool ext_phy = phy != &dev->phy; 1796 int i, aggr; 1797 u32 val, val2; 1798 1799 memset(mib, 0, sizeof(*mib)); 1800 1801 mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 1802 MT_MIB_SDR3_FCS_ERR_MASK); 1803 1804 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy), 1805 MT_MIB_AMPDU_MPDU_COUNT); 1806 if (val) { 1807 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy), 1808 MT_MIB_AMPDU_ACK_COUNT); 1809 mib->aggr_per = 1000 * (val - val2) / val; 1810 } 1811 1812 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1813 for (i = 0; i < 4; i++) { 1814 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 1815 1816 val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1817 if (val2 > mib->ack_fail_cnt) 1818 mib->ack_fail_cnt = val2; 1819 1820 val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1821 if (val2 > mib->ba_miss_cnt) 1822 mib->ba_miss_cnt = val2; 1823 1824 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 1825 val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1826 if (val2 > mib->rts_retries_cnt) { 1827 mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1828 mib->rts_retries_cnt = val2; 1829 } 1830 1831 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 1832 1833 dev->mt76.aggr_stats[aggr++] += val & 0xffff; 1834 dev->mt76.aggr_stats[aggr++] += val >> 16; 1835 } 1836 } 1837 1838 void mt7615_pm_wake_work(struct work_struct *work) 1839 { 1840 struct mt7615_dev *dev; 1841 struct mt76_phy *mphy; 1842 int i; 1843 1844 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1845 pm.wake_work); 1846 mphy = dev->phy.mt76; 1847 1848 if (mt7615_driver_own(dev)) { 1849 dev_err(mphy->dev->dev, "failed to wake device\n"); 1850 goto out; 1851 } 1852 1853 spin_lock_bh(&dev->pm.txq_lock); 1854 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1855 struct mt7615_sta *msta = dev->pm.tx_q[i].msta; 1856 struct mt76_wcid *wcid = msta ? &msta->wcid : NULL; 1857 struct ieee80211_sta *sta = NULL; 1858 1859 if (!dev->pm.tx_q[i].skb) 1860 continue; 1861 1862 if (msta && wcid->sta) 1863 sta = container_of((void *)msta, struct ieee80211_sta, 1864 drv_priv); 1865 1866 mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb); 1867 dev->pm.tx_q[i].skb = NULL; 1868 } 1869 spin_unlock_bh(&dev->pm.txq_lock); 1870 1871 tasklet_schedule(&dev->mt76.tx_tasklet); 1872 1873 out: 1874 ieee80211_wake_queues(mphy->hw); 1875 complete_all(&dev->pm.wake_cmpl); 1876 } 1877 1878 int mt7615_pm_wake(struct mt7615_dev *dev) 1879 { 1880 struct mt76_phy *mphy = dev->phy.mt76; 1881 1882 if (!mt7615_firmware_offload(dev)) 1883 return 0; 1884 1885 if (!mt76_is_mmio(mphy->dev)) 1886 return 0; 1887 1888 if (!test_bit(MT76_STATE_PM, &mphy->state)) 1889 return 0; 1890 1891 if (test_bit(MT76_HW_SCANNING, &mphy->state) || 1892 test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) 1893 return 0; 1894 1895 if (queue_work(dev->mt76.wq, &dev->pm.wake_work)) 1896 reinit_completion(&dev->pm.wake_cmpl); 1897 1898 if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) { 1899 ieee80211_wake_queues(mphy->hw); 1900 return -ETIMEDOUT; 1901 } 1902 1903 return 0; 1904 } 1905 EXPORT_SYMBOL_GPL(mt7615_pm_wake); 1906 1907 void mt7615_pm_power_save_sched(struct mt7615_dev *dev) 1908 { 1909 struct mt76_phy *mphy = dev->phy.mt76; 1910 1911 if (!mt7615_firmware_offload(dev)) 1912 return; 1913 1914 if (!mt76_is_mmio(mphy->dev)) 1915 return; 1916 1917 if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state)) 1918 return; 1919 1920 dev->pm.last_activity = jiffies; 1921 1922 if (test_bit(MT76_HW_SCANNING, &mphy->state) || 1923 test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) 1924 return; 1925 1926 if (!test_bit(MT76_STATE_PM, &mphy->state)) 1927 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, 1928 dev->pm.idle_timeout); 1929 } 1930 EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched); 1931 1932 void mt7615_pm_power_save_work(struct work_struct *work) 1933 { 1934 struct mt7615_dev *dev; 1935 unsigned long delta; 1936 1937 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1938 pm.ps_work.work); 1939 1940 delta = dev->pm.idle_timeout; 1941 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1942 delta = dev->pm.last_activity + delta - jiffies; 1943 goto out; 1944 } 1945 1946 if (!mt7615_firmware_own(dev)) 1947 return; 1948 out: 1949 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 1950 } 1951 1952 static void 1953 mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 1954 { 1955 struct mt7615_phy *phy = priv; 1956 struct mt7615_dev *dev = phy->dev; 1957 bool ext_phy = phy != &dev->phy; 1958 1959 if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable)) 1960 return; 1961 1962 if (dev->pm.enable) { 1963 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 1964 mt76_set(dev, MT_WF_RFCR(ext_phy), 1965 MT_WF_RFCR_DROP_OTHER_BEACON); 1966 } else { 1967 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 1968 mt76_clear(dev, MT_WF_RFCR(ext_phy), 1969 MT_WF_RFCR_DROP_OTHER_BEACON); 1970 } 1971 } 1972 1973 int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) 1974 { 1975 struct mt76_phy *mphy = dev->phy.mt76; 1976 1977 if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) 1978 return -EOPNOTSUPP; 1979 1980 mt7615_mutex_acquire(dev); 1981 1982 if (dev->pm.enable == enable) 1983 goto out; 1984 1985 dev->pm.enable = enable; 1986 ieee80211_iterate_active_interfaces(mphy->hw, 1987 IEEE80211_IFACE_ITER_RESUME_ALL, 1988 mt7615_pm_interface_iter, mphy->priv); 1989 out: 1990 mt7615_mutex_release(dev); 1991 1992 return 0; 1993 } 1994 1995 void mt7615_mac_work(struct work_struct *work) 1996 { 1997 struct mt7615_phy *phy; 1998 struct mt76_dev *mdev; 1999 2000 phy = (struct mt7615_phy *)container_of(work, struct mt7615_phy, 2001 mac_work.work); 2002 mdev = &phy->dev->mt76; 2003 2004 mt7615_mutex_acquire(phy->dev); 2005 2006 mt7615_update_survey(phy->dev); 2007 if (++phy->mac_work_count == 5) { 2008 phy->mac_work_count = 0; 2009 2010 mt7615_mac_update_mib_stats(phy); 2011 mt7615_mac_scs_check(phy); 2012 } 2013 2014 mt7615_mutex_release(phy->dev); 2015 2016 mt76_tx_status_check(mdev, NULL, false); 2017 ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, 2018 MT7615_WATCHDOG_TIME); 2019 } 2020 2021 static bool 2022 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state) 2023 { 2024 bool ret; 2025 2026 ret = wait_event_timeout(dev->reset_wait, 2027 (READ_ONCE(dev->reset_state) & state), 2028 MT7615_RESET_TIMEOUT); 2029 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 2030 return ret; 2031 } 2032 2033 static void 2034 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 2035 { 2036 struct ieee80211_hw *hw = priv; 2037 struct mt7615_dev *dev = mt7615_hw_dev(hw); 2038 2039 mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon); 2040 } 2041 2042 static void 2043 mt7615_update_beacons(struct mt7615_dev *dev) 2044 { 2045 ieee80211_iterate_active_interfaces(dev->mt76.hw, 2046 IEEE80211_IFACE_ITER_RESUME_ALL, 2047 mt7615_update_vif_beacon, dev->mt76.hw); 2048 2049 if (!dev->mt76.phy2) 2050 return; 2051 2052 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 2053 IEEE80211_IFACE_ITER_RESUME_ALL, 2054 mt7615_update_vif_beacon, dev->mt76.phy2->hw); 2055 } 2056 2057 void mt7615_dma_reset(struct mt7615_dev *dev) 2058 { 2059 int i; 2060 2061 mt76_clear(dev, MT_WPDMA_GLO_CFG, 2062 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 2063 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 2064 usleep_range(1000, 2000); 2065 2066 for (i = 0; i < __MT_TXQ_MAX; i++) 2067 mt76_queue_tx_cleanup(dev, i, true); 2068 2069 mt76_for_each_q_rx(&dev->mt76, i) { 2070 mt76_queue_rx_reset(dev, i); 2071 } 2072 2073 mt76_set(dev, MT_WPDMA_GLO_CFG, 2074 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 2075 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 2076 } 2077 EXPORT_SYMBOL_GPL(mt7615_dma_reset); 2078 2079 void mt7615_mac_reset_work(struct work_struct *work) 2080 { 2081 struct mt7615_phy *phy2; 2082 struct mt76_phy *ext_phy; 2083 struct mt7615_dev *dev; 2084 2085 dev = container_of(work, struct mt7615_dev, reset_work); 2086 ext_phy = dev->mt76.phy2; 2087 phy2 = ext_phy ? ext_phy->priv : NULL; 2088 2089 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA)) 2090 return; 2091 2092 ieee80211_stop_queues(mt76_hw(dev)); 2093 if (ext_phy) 2094 ieee80211_stop_queues(ext_phy->hw); 2095 2096 set_bit(MT76_RESET, &dev->mphy.state); 2097 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2098 wake_up(&dev->mt76.mcu.wait); 2099 cancel_delayed_work_sync(&dev->phy.mac_work); 2100 del_timer_sync(&dev->phy.roc_timer); 2101 cancel_work_sync(&dev->phy.roc_work); 2102 if (phy2) { 2103 cancel_delayed_work_sync(&phy2->mac_work); 2104 del_timer_sync(&phy2->roc_timer); 2105 cancel_work_sync(&phy2->roc_work); 2106 } 2107 2108 /* lock/unlock all queues to ensure that no tx is pending */ 2109 mt76_txq_schedule_all(&dev->mphy); 2110 if (ext_phy) 2111 mt76_txq_schedule_all(ext_phy); 2112 2113 tasklet_disable(&dev->mt76.tx_tasklet); 2114 napi_disable(&dev->mt76.napi[0]); 2115 napi_disable(&dev->mt76.napi[1]); 2116 napi_disable(&dev->mt76.tx_napi); 2117 2118 mt7615_mutex_acquire(dev); 2119 2120 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); 2121 2122 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2123 mt7615_dma_reset(dev); 2124 2125 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 2126 2127 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT); 2128 mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2129 } 2130 2131 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2132 clear_bit(MT76_RESET, &dev->mphy.state); 2133 2134 tasklet_enable(&dev->mt76.tx_tasklet); 2135 napi_enable(&dev->mt76.tx_napi); 2136 napi_schedule(&dev->mt76.tx_napi); 2137 2138 napi_enable(&dev->mt76.napi[0]); 2139 napi_schedule(&dev->mt76.napi[0]); 2140 2141 napi_enable(&dev->mt76.napi[1]); 2142 napi_schedule(&dev->mt76.napi[1]); 2143 2144 ieee80211_wake_queues(mt76_hw(dev)); 2145 if (ext_phy) 2146 ieee80211_wake_queues(ext_phy->hw); 2147 2148 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2149 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2150 2151 mt7615_update_beacons(dev); 2152 2153 mt7615_mutex_release(dev); 2154 2155 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, 2156 MT7615_WATCHDOG_TIME); 2157 if (phy2) 2158 ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work, 2159 MT7615_WATCHDOG_TIME); 2160 2161 } 2162 2163 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy) 2164 { 2165 struct mt7615_dev *dev = phy->dev; 2166 2167 if (phy->rdd_state & BIT(0)) 2168 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); 2169 if (phy->rdd_state & BIT(1)) 2170 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); 2171 } 2172 2173 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain) 2174 { 2175 int err; 2176 2177 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); 2178 if (err < 0) 2179 return err; 2180 2181 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 2182 MT_RX_SEL0, 1); 2183 } 2184 2185 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy) 2186 { 2187 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2188 struct mt7615_dev *dev = phy->dev; 2189 bool ext_phy = phy != &dev->phy; 2190 int err; 2191 2192 /* start CAC */ 2193 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); 2194 if (err < 0) 2195 return err; 2196 2197 err = mt7615_dfs_start_rdd(dev, ext_phy); 2198 if (err < 0) 2199 return err; 2200 2201 phy->rdd_state |= BIT(ext_phy); 2202 2203 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2204 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2205 err = mt7615_dfs_start_rdd(dev, 1); 2206 if (err < 0) 2207 return err; 2208 2209 phy->rdd_state |= BIT(1); 2210 } 2211 2212 return 0; 2213 } 2214 2215 static int 2216 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy) 2217 { 2218 const struct mt7615_dfs_radar_spec *radar_specs; 2219 struct mt7615_dev *dev = phy->dev; 2220 int err, i; 2221 2222 switch (dev->mt76.region) { 2223 case NL80211_DFS_FCC: 2224 radar_specs = &fcc_radar_specs; 2225 err = mt7615_mcu_set_fcc5_lpn(dev, 8); 2226 if (err < 0) 2227 return err; 2228 break; 2229 case NL80211_DFS_ETSI: 2230 radar_specs = &etsi_radar_specs; 2231 break; 2232 case NL80211_DFS_JP: 2233 radar_specs = &jp_radar_specs; 2234 break; 2235 default: 2236 return -EINVAL; 2237 } 2238 2239 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2240 err = mt7615_mcu_set_radar_th(dev, i, 2241 &radar_specs->radar_pattern[i]); 2242 if (err < 0) 2243 return err; 2244 } 2245 2246 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2247 } 2248 2249 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy) 2250 { 2251 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2252 struct mt7615_dev *dev = phy->dev; 2253 bool ext_phy = phy != &dev->phy; 2254 int err; 2255 2256 if (is_mt7663(&dev->mt76)) 2257 return 0; 2258 2259 if (dev->mt76.region == NL80211_DFS_UNSET) { 2260 phy->dfs_state = -1; 2261 if (phy->rdd_state) 2262 goto stop; 2263 2264 return 0; 2265 } 2266 2267 if (test_bit(MT76_SCANNING, &phy->mt76->state)) 2268 return 0; 2269 2270 if (phy->dfs_state == chandef->chan->dfs_state) 2271 return 0; 2272 2273 err = mt7615_dfs_init_radar_specs(phy); 2274 if (err < 0) { 2275 phy->dfs_state = -1; 2276 goto stop; 2277 } 2278 2279 phy->dfs_state = chandef->chan->dfs_state; 2280 2281 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { 2282 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 2283 return mt7615_dfs_start_radar_detector(phy); 2284 2285 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, 2286 MT_RX_SEL0, 0); 2287 } 2288 2289 stop: 2290 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0); 2291 if (err < 0) 2292 return err; 2293 2294 mt7615_dfs_stop_radar_detector(phy); 2295 return 0; 2296 } 2297