1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "mt7915.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #include "mcu.h" 10 11 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 12 13 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 14 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 15 .radar_pattern = { 16 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 17 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 18 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 19 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 20 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 21 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 22 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 23 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 24 }, 25 }; 26 27 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 28 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 29 .radar_pattern = { 30 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 31 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 32 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 33 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 34 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 35 }, 36 }; 37 38 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 39 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 40 .radar_pattern = { 41 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 42 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 43 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 44 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 45 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 46 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 47 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 48 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 49 }, 50 }; 51 52 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 53 u16 idx, bool unicast) 54 { 55 struct mt7915_sta *sta; 56 struct mt76_wcid *wcid; 57 58 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 59 return NULL; 60 61 wcid = rcu_dereference(dev->mt76.wcid[idx]); 62 if (unicast || !wcid) 63 return wcid; 64 65 if (!wcid->sta) 66 return NULL; 67 68 sta = container_of(wcid, struct mt7915_sta, wcid); 69 if (!sta->vif) 70 return NULL; 71 72 return &sta->vif->sta.wcid; 73 } 74 75 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 76 { 77 } 78 79 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 80 { 81 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 82 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 83 84 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 85 0, 5000); 86 } 87 88 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 89 { 90 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 91 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 92 93 return MT_WTBL_LMAC_OFFS(wcid, dw); 94 } 95 96 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 97 { 98 static const u8 ac_to_tid[] = { 99 [IEEE80211_AC_BE] = 0, 100 [IEEE80211_AC_BK] = 1, 101 [IEEE80211_AC_VI] = 4, 102 [IEEE80211_AC_VO] = 6 103 }; 104 struct ieee80211_sta *sta; 105 struct mt7915_sta *msta; 106 struct rate_info *rate; 107 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 108 LIST_HEAD(sta_poll_list); 109 int i; 110 111 spin_lock_bh(&dev->sta_poll_lock); 112 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 113 spin_unlock_bh(&dev->sta_poll_lock); 114 115 rcu_read_lock(); 116 117 while (true) { 118 bool clear = false; 119 u32 addr, val; 120 u16 idx; 121 u8 bw; 122 123 spin_lock_bh(&dev->sta_poll_lock); 124 if (list_empty(&sta_poll_list)) { 125 spin_unlock_bh(&dev->sta_poll_lock); 126 break; 127 } 128 msta = list_first_entry(&sta_poll_list, 129 struct mt7915_sta, poll_list); 130 list_del_init(&msta->poll_list); 131 spin_unlock_bh(&dev->sta_poll_lock); 132 133 idx = msta->wcid.idx; 134 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 135 136 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 137 u32 tx_last = msta->airtime_ac[i]; 138 u32 rx_last = msta->airtime_ac[i + 4]; 139 140 msta->airtime_ac[i] = mt76_rr(dev, addr); 141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 142 143 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 145 146 if ((tx_last | rx_last) & BIT(30)) 147 clear = true; 148 149 addr += 8; 150 } 151 152 if (clear) { 153 mt7915_mac_wtbl_update(dev, idx, 154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 156 } 157 158 if (!msta->wcid.sta) 159 continue; 160 161 sta = container_of((void *)msta, struct ieee80211_sta, 162 drv_priv); 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 164 u8 q = mt76_connac_lmac_mapping(i); 165 u32 tx_cur = tx_time[q]; 166 u32 rx_cur = rx_time[q]; 167 u8 tid = ac_to_tid[i]; 168 169 if (!tx_cur && !rx_cur) 170 continue; 171 172 ieee80211_sta_register_airtime(sta, tid, tx_cur, 173 rx_cur); 174 } 175 176 /* 177 * We don't support reading GI info from txs packets. 178 * For accurate tx status reporting and AQL improvement, 179 * we need to make sure that flags match so polling GI 180 * from per-sta counters directly. 181 */ 182 rate = &msta->wcid.rate; 183 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 184 val = mt76_rr(dev, addr); 185 186 switch (rate->bw) { 187 case RATE_INFO_BW_160: 188 bw = IEEE80211_STA_RX_BW_160; 189 break; 190 case RATE_INFO_BW_80: 191 bw = IEEE80211_STA_RX_BW_80; 192 break; 193 case RATE_INFO_BW_40: 194 bw = IEEE80211_STA_RX_BW_40; 195 break; 196 default: 197 bw = IEEE80211_STA_RX_BW_20; 198 break; 199 } 200 201 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 202 u8 offs = 24 + 2 * bw; 203 204 rate->he_gi = (val & (0x3 << offs)) >> offs; 205 } else if (rate->flags & 206 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 207 if (val & BIT(12 + bw)) 208 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 209 else 210 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 211 } 212 } 213 214 rcu_read_unlock(); 215 } 216 217 static int 218 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 219 { 220 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 221 struct mt76_phy *mphy = &dev->mt76.phy; 222 struct mt7915_phy *phy = &dev->phy; 223 struct ieee80211_supported_band *sband; 224 __le32 *rxd = (__le32 *)skb->data; 225 __le32 *rxv = NULL; 226 u32 rxd0 = le32_to_cpu(rxd[0]); 227 u32 rxd1 = le32_to_cpu(rxd[1]); 228 u32 rxd2 = le32_to_cpu(rxd[2]); 229 u32 rxd3 = le32_to_cpu(rxd[3]); 230 u32 rxd4 = le32_to_cpu(rxd[4]); 231 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 232 bool unicast, insert_ccmp_hdr = false; 233 u8 remove_pad, amsdu_info; 234 u8 mode = 0, qos_ctl = 0; 235 struct mt7915_sta *msta = NULL; 236 u32 csum_status = *(u32 *)skb->cb; 237 bool hdr_trans; 238 u16 hdr_gap; 239 u16 seq_ctrl = 0; 240 __le16 fc = 0; 241 int idx; 242 243 memset(status, 0, sizeof(*status)); 244 245 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) { 246 mphy = dev->mt76.phys[MT_BAND1]; 247 if (!mphy) 248 return -EINVAL; 249 250 phy = mphy->priv; 251 status->phy_idx = 1; 252 } 253 254 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 255 return -EINVAL; 256 257 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 258 return -EINVAL; 259 260 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 261 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 262 return -EINVAL; 263 264 /* ICV error or CCMP/BIP/WPI MIC error */ 265 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 266 status->flag |= RX_FLAG_ONLY_MONITOR; 267 268 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 269 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 270 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 271 272 if (status->wcid) { 273 msta = container_of(status->wcid, struct mt7915_sta, wcid); 274 spin_lock_bh(&dev->sta_poll_lock); 275 if (list_empty(&msta->poll_list)) 276 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 277 spin_unlock_bh(&dev->sta_poll_lock); 278 } 279 280 status->freq = mphy->chandef.chan->center_freq; 281 status->band = mphy->chandef.chan->band; 282 if (status->band == NL80211_BAND_5GHZ) 283 sband = &mphy->sband_5g.sband; 284 else if (status->band == NL80211_BAND_6GHZ) 285 sband = &mphy->sband_6g.sband; 286 else 287 sband = &mphy->sband_2g.sband; 288 289 if (!sband->channels) 290 return -EINVAL; 291 292 if ((rxd0 & csum_mask) == csum_mask && 293 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 294 skb->ip_summed = CHECKSUM_UNNECESSARY; 295 296 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 297 status->flag |= RX_FLAG_FAILED_FCS_CRC; 298 299 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 300 status->flag |= RX_FLAG_MMIC_ERROR; 301 302 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 303 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 304 status->flag |= RX_FLAG_DECRYPTED; 305 status->flag |= RX_FLAG_IV_STRIPPED; 306 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 307 } 308 309 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 310 311 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 312 return -EINVAL; 313 314 rxd += 6; 315 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 316 u32 v0 = le32_to_cpu(rxd[0]); 317 u32 v2 = le32_to_cpu(rxd[2]); 318 319 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 320 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 321 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 322 323 rxd += 4; 324 if ((u8 *)rxd - skb->data >= skb->len) 325 return -EINVAL; 326 } 327 328 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 329 u8 *data = (u8 *)rxd; 330 331 if (status->flag & RX_FLAG_DECRYPTED) { 332 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 333 case MT_CIPHER_AES_CCMP: 334 case MT_CIPHER_CCMP_CCX: 335 case MT_CIPHER_CCMP_256: 336 insert_ccmp_hdr = 337 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 338 fallthrough; 339 case MT_CIPHER_TKIP: 340 case MT_CIPHER_TKIP_NO_MIC: 341 case MT_CIPHER_GCMP: 342 case MT_CIPHER_GCMP_256: 343 status->iv[0] = data[5]; 344 status->iv[1] = data[4]; 345 status->iv[2] = data[3]; 346 status->iv[3] = data[2]; 347 status->iv[4] = data[1]; 348 status->iv[5] = data[0]; 349 break; 350 default: 351 break; 352 } 353 } 354 rxd += 4; 355 if ((u8 *)rxd - skb->data >= skb->len) 356 return -EINVAL; 357 } 358 359 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 360 status->timestamp = le32_to_cpu(rxd[0]); 361 status->flag |= RX_FLAG_MACTIME_START; 362 363 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 364 status->flag |= RX_FLAG_AMPDU_DETAILS; 365 366 /* all subframes of an A-MPDU have the same timestamp */ 367 if (phy->rx_ampdu_ts != status->timestamp) { 368 if (!++phy->ampdu_ref) 369 phy->ampdu_ref++; 370 } 371 phy->rx_ampdu_ts = status->timestamp; 372 373 status->ampdu_ref = phy->ampdu_ref; 374 } 375 376 rxd += 2; 377 if ((u8 *)rxd - skb->data >= skb->len) 378 return -EINVAL; 379 } 380 381 /* RXD Group 3 - P-RXV */ 382 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 383 u32 v0, v1; 384 int ret; 385 386 rxv = rxd; 387 rxd += 2; 388 if ((u8 *)rxd - skb->data >= skb->len) 389 return -EINVAL; 390 391 v0 = le32_to_cpu(rxv[0]); 392 v1 = le32_to_cpu(rxv[1]); 393 394 if (v0 & MT_PRXV_HT_AD_CODE) 395 status->enc_flags |= RX_ENC_FLAG_LDPC; 396 397 status->chains = mphy->antenna_mask; 398 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 399 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 400 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 401 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 402 403 /* RXD Group 5 - C-RXV */ 404 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 405 rxd += 18; 406 if ((u8 *)rxd - skb->data >= skb->len) 407 return -EINVAL; 408 } 409 410 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { 411 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status, 412 sband, rxv, &mode); 413 if (ret < 0) 414 return ret; 415 } 416 } 417 418 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 419 status->amsdu = !!amsdu_info; 420 if (status->amsdu) { 421 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 422 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 423 } 424 425 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 426 if (hdr_trans && ieee80211_has_morefrags(fc)) { 427 struct ieee80211_vif *vif; 428 int err; 429 430 if (!msta || !msta->vif) 431 return -EINVAL; 432 433 vif = container_of((void *)msta->vif, struct ieee80211_vif, 434 drv_priv); 435 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap); 436 if (err) 437 return err; 438 439 hdr_trans = false; 440 } else { 441 int pad_start = 0; 442 443 skb_pull(skb, hdr_gap); 444 if (!hdr_trans && status->amsdu) { 445 pad_start = ieee80211_get_hdrlen_from_skb(skb); 446 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 447 /* 448 * When header translation failure is indicated, 449 * the hardware will insert an extra 2-byte field 450 * containing the data length after the protocol 451 * type field. This happens either when the LLC-SNAP 452 * pattern did not match, or if a VLAN header was 453 * detected. 454 */ 455 pad_start = 12; 456 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 457 pad_start += 4; 458 else 459 pad_start = 0; 460 } 461 462 if (pad_start) { 463 memmove(skb->data + 2, skb->data, pad_start); 464 skb_pull(skb, 2); 465 } 466 } 467 468 if (!hdr_trans) { 469 struct ieee80211_hdr *hdr; 470 471 if (insert_ccmp_hdr) { 472 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 473 474 mt76_insert_ccmp_hdr(skb, key_id); 475 } 476 477 hdr = mt76_skb_get_hdr(skb); 478 fc = hdr->frame_control; 479 if (ieee80211_is_data_qos(fc)) { 480 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 481 qos_ctl = *ieee80211_get_qos_ctl(hdr); 482 } 483 } else { 484 status->flag |= RX_FLAG_8023; 485 } 486 487 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 488 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode); 489 490 if (!status->wcid || !ieee80211_is_data_qos(fc)) 491 return 0; 492 493 status->aggr = unicast && 494 !ieee80211_is_qos_nullfunc(fc); 495 status->qos_ctl = qos_ctl; 496 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 497 498 return 0; 499 } 500 501 static void 502 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 503 { 504 #ifdef CONFIG_NL80211_TESTMODE 505 struct mt7915_phy *phy = &dev->phy; 506 __le32 *rxd = (__le32 *)skb->data; 507 __le32 *rxv_hdr = rxd + 2; 508 __le32 *rxv = rxd + 4; 509 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 510 u8 band_idx; 511 s32 foe; 512 u8 snr; 513 int i; 514 515 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); 516 if (band_idx && !phy->band_idx) { 517 phy = mt7915_ext_phy(dev); 518 if (!phy) 519 goto out; 520 } 521 522 rcpi = le32_to_cpu(rxv[6]); 523 ib_rssi = le32_to_cpu(rxv[7]); 524 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 525 526 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 527 if (i == 3) 528 wb_rssi = le32_to_cpu(rxv[9]); 529 530 phy->test.last_rcpi[i] = rcpi & 0xff; 531 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 532 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 533 } 534 535 v20 = le32_to_cpu(rxv[20]); 536 v21 = le32_to_cpu(rxv[21]); 537 538 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 539 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 540 541 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 542 543 phy->test.last_freq_offset = foe; 544 phy->test.last_snr = snr; 545 out: 546 #endif 547 dev_kfree_skb(skb); 548 } 549 550 static void 551 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 552 struct sk_buff *skb) 553 { 554 #ifdef CONFIG_NL80211_TESTMODE 555 struct mt76_testmode_data *td = &phy->mt76->test; 556 const struct ieee80211_rate *r; 557 u8 bw, mode, nss = td->tx_rate_nss; 558 u8 rate_idx = td->tx_rate_idx; 559 u16 rateval = 0; 560 u32 val; 561 bool cck = false; 562 int band; 563 564 if (skb != phy->mt76->test.tx_skb) 565 return; 566 567 switch (td->tx_rate_mode) { 568 case MT76_TM_TX_MODE_HT: 569 nss = 1 + (rate_idx >> 3); 570 mode = MT_PHY_TYPE_HT; 571 break; 572 case MT76_TM_TX_MODE_VHT: 573 mode = MT_PHY_TYPE_VHT; 574 break; 575 case MT76_TM_TX_MODE_HE_SU: 576 mode = MT_PHY_TYPE_HE_SU; 577 break; 578 case MT76_TM_TX_MODE_HE_EXT_SU: 579 mode = MT_PHY_TYPE_HE_EXT_SU; 580 break; 581 case MT76_TM_TX_MODE_HE_TB: 582 mode = MT_PHY_TYPE_HE_TB; 583 break; 584 case MT76_TM_TX_MODE_HE_MU: 585 mode = MT_PHY_TYPE_HE_MU; 586 break; 587 case MT76_TM_TX_MODE_CCK: 588 cck = true; 589 fallthrough; 590 case MT76_TM_TX_MODE_OFDM: 591 band = phy->mt76->chandef.chan->band; 592 if (band == NL80211_BAND_2GHZ && !cck) 593 rate_idx += 4; 594 595 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 596 val = cck ? r->hw_value_short : r->hw_value; 597 598 mode = val >> 8; 599 rate_idx = val & 0xff; 600 break; 601 default: 602 mode = MT_PHY_TYPE_OFDM; 603 break; 604 } 605 606 switch (phy->mt76->chandef.width) { 607 case NL80211_CHAN_WIDTH_40: 608 bw = 1; 609 break; 610 case NL80211_CHAN_WIDTH_80: 611 bw = 2; 612 break; 613 case NL80211_CHAN_WIDTH_80P80: 614 case NL80211_CHAN_WIDTH_160: 615 bw = 3; 616 break; 617 default: 618 bw = 0; 619 break; 620 } 621 622 if (td->tx_rate_stbc && nss == 1) { 623 nss++; 624 rateval |= MT_TX_RATE_STBC; 625 } 626 627 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 628 FIELD_PREP(MT_TX_RATE_MODE, mode) | 629 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 630 631 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 632 633 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 634 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 635 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 636 637 val = MT_TXD6_FIXED_BW | 638 FIELD_PREP(MT_TXD6_BW, bw) | 639 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 640 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 641 642 /* for HE_SU/HE_EXT_SU PPDU 643 * - 1x, 2x, 4x LTF + 0.8us GI 644 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 645 * for HE_MU PPDU 646 * - 2x, 4x LTF + 0.8us GI 647 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 648 * for HE_TB PPDU 649 * - 1x, 2x LTF + 1.6us GI 650 * - 4x LTF + 3.2us GI 651 */ 652 if (mode >= MT_PHY_TYPE_HE_SU) 653 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 654 655 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 656 val |= MT_TXD6_LDPC; 657 658 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 659 txwi[6] |= cpu_to_le32(val); 660 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 661 phy->test.spe_idx)); 662 #endif 663 } 664 665 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, 666 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 667 struct ieee80211_key_conf *key, 668 enum mt76_txq_id qid, u32 changed) 669 { 670 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 671 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 672 struct mt76_phy *mphy = &dev->phy; 673 674 if (phy_idx && dev->phys[MT_BAND1]) 675 mphy = dev->phys[MT_BAND1]; 676 677 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed); 678 679 if (mt76_testmode_enabled(mphy)) 680 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 681 } 682 683 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 684 enum mt76_txq_id qid, struct mt76_wcid *wcid, 685 struct ieee80211_sta *sta, 686 struct mt76_tx_info *tx_info) 687 { 688 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 689 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 690 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 691 struct ieee80211_key_conf *key = info->control.hw_key; 692 struct ieee80211_vif *vif = info->control.vif; 693 struct mt76_connac_fw_txp *txp; 694 struct mt76_txwi_cache *t; 695 int id, i, nbuf = tx_info->nbuf - 1; 696 u8 *txwi = (u8 *)txwi_ptr; 697 int pid; 698 699 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 700 return -EINVAL; 701 702 if (!wcid) 703 wcid = &dev->mt76.global_wcid; 704 705 if (sta) { 706 struct mt7915_sta *msta; 707 708 msta = (struct mt7915_sta *)sta->drv_priv; 709 710 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 711 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 712 msta->jiffies = jiffies; 713 } 714 } 715 716 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 717 t->skb = tx_info->skb; 718 719 id = mt76_token_consume(mdev, &t); 720 if (id < 0) 721 return id; 722 723 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 724 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key, 725 qid, 0); 726 727 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE); 728 for (i = 0; i < nbuf; i++) { 729 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 730 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 731 } 732 txp->nbuf = nbuf; 733 734 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 735 736 if (!key) 737 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 738 739 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 740 ieee80211_is_mgmt(hdr->frame_control)) 741 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 742 743 if (vif) { 744 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 745 746 txp->bss_idx = mvif->mt76.idx; 747 } 748 749 txp->token = cpu_to_le16(id); 750 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 751 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 752 else 753 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 754 tx_info->skb = DMA_DUMMY_DATA; 755 756 /* pass partial skb header to fw */ 757 tx_info->buf[1].len = MT_CT_PARSE_LEN; 758 tx_info->buf[1].skip_unmap = true; 759 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 760 761 return 0; 762 } 763 764 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 765 { 766 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 767 __le32 *txwi = ptr; 768 u32 val; 769 770 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 771 772 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 773 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 774 txwi[0] = cpu_to_le32(val); 775 776 val = MT_TXD1_LONG_FORMAT | 777 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 778 txwi[1] = cpu_to_le32(val); 779 780 txp->token = cpu_to_le16(token_id); 781 txp->nbuf = 1; 782 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 783 784 return MT_TXD_SIZE + sizeof(*txp); 785 } 786 787 static void 788 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 789 { 790 struct mt7915_sta *msta; 791 u16 fc, tid; 792 u32 val; 793 794 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 795 return; 796 797 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 798 if (tid >= 6) /* skip VO queue */ 799 return; 800 801 val = le32_to_cpu(txwi[2]); 802 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 803 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 804 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 805 return; 806 807 msta = (struct mt7915_sta *)sta->drv_priv; 808 if (!test_and_set_bit(tid, &msta->ampdu_state)) 809 ieee80211_start_tx_ba_session(sta, tid, 0); 810 } 811 812 static void 813 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 814 struct ieee80211_sta *sta, struct list_head *free_list) 815 { 816 struct mt76_dev *mdev = &dev->mt76; 817 struct mt7915_sta *msta; 818 struct mt76_wcid *wcid; 819 __le32 *txwi; 820 u16 wcid_idx; 821 822 mt76_connac_txp_skb_unmap(mdev, t); 823 if (!t->skb) 824 goto out; 825 826 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 827 if (sta) { 828 wcid = (struct mt76_wcid *)sta->drv_priv; 829 wcid_idx = wcid->idx; 830 } else { 831 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); 832 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); 833 834 if (wcid && wcid->sta) { 835 msta = container_of(wcid, struct mt7915_sta, wcid); 836 sta = container_of((void *)msta, struct ieee80211_sta, 837 drv_priv); 838 spin_lock_bh(&dev->sta_poll_lock); 839 if (list_empty(&msta->poll_list)) 840 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 841 spin_unlock_bh(&dev->sta_poll_lock); 842 } 843 } 844 845 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 846 mt7915_tx_check_aggr(sta, txwi); 847 848 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 849 850 out: 851 t->skb = NULL; 852 mt76_put_txwi(mdev, t); 853 } 854 855 static void 856 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) 857 { 858 struct mt76_dev *mdev = &dev->mt76; 859 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1]; 860 861 /* clean DMA queues and unmap buffers first */ 862 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 863 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 864 if (mphy_ext) { 865 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 866 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 867 } 868 } 869 870 static void 871 mt7915_mac_tx_free_done(struct mt7915_dev *dev, 872 struct list_head *free_list, bool wake) 873 { 874 struct sk_buff *skb, *tmp; 875 876 mt7915_mac_sta_poll(dev); 877 878 if (wake) 879 mt76_set_tx_blocked(&dev->mt76, false); 880 881 mt76_worker_schedule(&dev->mt76.tx_worker); 882 883 list_for_each_entry_safe(skb, tmp, free_list, list) { 884 skb_list_del_init(skb); 885 napi_consume_skb(skb, 1); 886 } 887 } 888 889 static void 890 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 891 { 892 struct mt76_connac_tx_free *free = data; 893 __le32 *tx_info = (__le32 *)(data + sizeof(*free)); 894 struct mt76_dev *mdev = &dev->mt76; 895 struct mt76_txwi_cache *txwi; 896 struct ieee80211_sta *sta = NULL; 897 LIST_HEAD(free_list); 898 void *end = data + len; 899 bool v3, wake = false; 900 u16 total, count = 0; 901 u32 txd = le32_to_cpu(free->txd); 902 __le32 *cur_info; 903 904 mt7915_mac_tx_free_prepare(dev); 905 906 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); 907 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); 908 if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end)) 909 return; 910 911 for (cur_info = tx_info; count < total; cur_info++) { 912 u32 msdu, info = le32_to_cpu(*cur_info); 913 u8 i; 914 915 /* 916 * 1'b1: new wcid pair. 917 * 1'b0: msdu_id with the same 'wcid pair' as above. 918 */ 919 if (info & MT_TX_FREE_PAIR) { 920 struct mt7915_sta *msta; 921 struct mt76_wcid *wcid; 922 u16 idx; 923 924 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 925 wcid = rcu_dereference(dev->mt76.wcid[idx]); 926 sta = wcid_to_sta(wcid); 927 if (!sta) 928 continue; 929 930 msta = container_of(wcid, struct mt7915_sta, wcid); 931 spin_lock_bh(&dev->sta_poll_lock); 932 if (list_empty(&msta->poll_list)) 933 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 934 spin_unlock_bh(&dev->sta_poll_lock); 935 continue; 936 } 937 938 if (v3 && (info & MT_TX_FREE_MPDU_HEADER)) 939 continue; 940 941 for (i = 0; i < 1 + v3; i++) { 942 if (v3) { 943 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3; 944 if (msdu == MT_TX_FREE_MSDU_ID_V3) 945 continue; 946 } else { 947 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 948 } 949 count++; 950 txwi = mt76_token_release(mdev, msdu, &wake); 951 if (!txwi) 952 continue; 953 954 mt7915_txwi_free(dev, txwi, sta, &free_list); 955 } 956 } 957 958 mt7915_mac_tx_free_done(dev, &free_list, wake); 959 } 960 961 static void 962 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) 963 { 964 struct mt76_connac_tx_free *free = data; 965 __le16 *info = (__le16 *)(data + sizeof(*free)); 966 struct mt76_dev *mdev = &dev->mt76; 967 void *end = data + len; 968 LIST_HEAD(free_list); 969 bool wake = false; 970 u8 i, count; 971 972 mt7915_mac_tx_free_prepare(dev); 973 974 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); 975 if (WARN_ON_ONCE((void *)&info[count] > end)) 976 return; 977 978 for (i = 0; i < count; i++) { 979 struct mt76_txwi_cache *txwi; 980 u16 msdu = le16_to_cpu(info[i]); 981 982 txwi = mt76_token_release(mdev, msdu, &wake); 983 if (!txwi) 984 continue; 985 986 mt7915_txwi_free(dev, txwi, NULL, &free_list); 987 } 988 989 mt7915_mac_tx_free_done(dev, &free_list, wake); 990 } 991 992 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 993 { 994 struct mt7915_sta *msta = NULL; 995 struct mt76_wcid *wcid; 996 __le32 *txs_data = data; 997 u16 wcidx; 998 u8 pid; 999 1000 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 1001 return; 1002 1003 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1004 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1005 1006 if (pid < MT_PACKET_ID_WED) 1007 return; 1008 1009 if (wcidx >= mt7915_wtbl_size(dev)) 1010 return; 1011 1012 rcu_read_lock(); 1013 1014 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1015 if (!wcid) 1016 goto out; 1017 1018 msta = container_of(wcid, struct mt7915_sta, wcid); 1019 1020 if (pid == MT_PACKET_ID_WED) 1021 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data); 1022 else 1023 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); 1024 1025 if (!wcid->sta) 1026 goto out; 1027 1028 spin_lock_bh(&dev->sta_poll_lock); 1029 if (list_empty(&msta->poll_list)) 1030 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1031 spin_unlock_bh(&dev->sta_poll_lock); 1032 1033 out: 1034 rcu_read_unlock(); 1035 } 1036 1037 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1038 { 1039 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1040 __le32 *rxd = (__le32 *)data; 1041 __le32 *end = (__le32 *)&rxd[len / 4]; 1042 enum rx_pkt_type type; 1043 1044 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1045 1046 switch (type) { 1047 case PKT_TYPE_TXRX_NOTIFY: 1048 mt7915_mac_tx_free(dev, data, len); 1049 return false; 1050 case PKT_TYPE_TXRX_NOTIFY_V0: 1051 mt7915_mac_tx_free_v0(dev, data, len); 1052 return false; 1053 case PKT_TYPE_TXS: 1054 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1055 mt7915_mac_add_txs(dev, rxd); 1056 return false; 1057 case PKT_TYPE_RX_FW_MONITOR: 1058 mt7915_debugfs_rx_fw_monitor(dev, data, len); 1059 return false; 1060 default: 1061 return true; 1062 } 1063 } 1064 1065 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1066 struct sk_buff *skb) 1067 { 1068 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1069 __le32 *rxd = (__le32 *)skb->data; 1070 __le32 *end = (__le32 *)&skb->data[skb->len]; 1071 enum rx_pkt_type type; 1072 1073 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1074 1075 switch (type) { 1076 case PKT_TYPE_TXRX_NOTIFY: 1077 mt7915_mac_tx_free(dev, skb->data, skb->len); 1078 napi_consume_skb(skb, 1); 1079 break; 1080 case PKT_TYPE_TXRX_NOTIFY_V0: 1081 mt7915_mac_tx_free_v0(dev, skb->data, skb->len); 1082 napi_consume_skb(skb, 1); 1083 break; 1084 case PKT_TYPE_RX_EVENT: 1085 mt7915_mcu_rx_event(dev, skb); 1086 break; 1087 case PKT_TYPE_TXRXV: 1088 mt7915_mac_fill_rx_vector(dev, skb); 1089 break; 1090 case PKT_TYPE_TXS: 1091 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1092 mt7915_mac_add_txs(dev, rxd); 1093 dev_kfree_skb(skb); 1094 break; 1095 case PKT_TYPE_RX_FW_MONITOR: 1096 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1097 dev_kfree_skb(skb); 1098 break; 1099 case PKT_TYPE_NORMAL: 1100 if (!mt7915_mac_fill_rx(dev, skb)) { 1101 mt76_rx(&dev->mt76, q, skb); 1102 return; 1103 } 1104 fallthrough; 1105 default: 1106 dev_kfree_skb(skb); 1107 break; 1108 } 1109 } 1110 1111 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1112 { 1113 struct mt7915_dev *dev = phy->dev; 1114 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx); 1115 1116 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1117 mt76_set(dev, reg, BIT(11) | BIT(9)); 1118 } 1119 1120 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1121 { 1122 struct mt7915_dev *dev = phy->dev; 1123 int i; 1124 1125 for (i = 0; i < 4; i++) { 1126 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1127 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1128 } 1129 1130 i = 0; 1131 phy->mt76->survey_time = ktime_get_boottime(); 1132 if (phy->band_idx) 1133 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2; 1134 1135 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2); 1136 1137 /* reset airtime counters */ 1138 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx), 1139 MT_WF_RMAC_MIB_RXTIME_CLR); 1140 1141 mt7915_mcu_get_chan_mib_info(phy, true); 1142 } 1143 1144 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1145 { 1146 s16 coverage_class = phy->coverage_class; 1147 struct mt7915_dev *dev = phy->dev; 1148 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev); 1149 u32 val, reg_offset; 1150 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1151 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1152 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1153 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1154 int offset; 1155 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ); 1156 1157 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1158 return; 1159 1160 if (ext_phy) 1161 coverage_class = max_t(s16, dev->phy.coverage_class, 1162 ext_phy->coverage_class); 1163 1164 mt76_set(dev, MT_ARB_SCR(phy->band_idx), 1165 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1166 udelay(1); 1167 1168 offset = 3 * coverage_class; 1169 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1170 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1171 1172 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset); 1173 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset); 1174 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx), 1175 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) | 1176 FIELD_PREP(MT_IFS_RIFS, 2) | 1177 FIELD_PREP(MT_IFS_SIFS, 10) | 1178 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1179 1180 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx), 1181 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1182 1183 if (phy->slottime < 20 || a_band) 1184 val = MT7915_CFEND_RATE_DEFAULT; 1185 else 1186 val = MT7915_CFEND_RATE_11B; 1187 1188 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val); 1189 mt76_clear(dev, MT_ARB_SCR(phy->band_idx), 1190 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1191 } 1192 1193 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1194 { 1195 u32 reg; 1196 1197 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) : 1198 MT_WF_PHY_RXTD12_MT7916(ext_phy); 1199 mt76_set(dev, reg, 1200 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1201 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1202 1203 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) : 1204 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy); 1205 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1206 } 1207 1208 static u8 1209 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1210 { 1211 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1212 struct mt7915_dev *dev = phy->dev; 1213 u32 val, sum = 0, n = 0; 1214 int nss, i; 1215 1216 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1217 u32 reg = is_mt7915(&dev->mt76) ? 1218 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) : 1219 MT_WF_IRPI_NSS_MT7916(idx, nss); 1220 1221 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1222 val = mt76_rr(dev, reg); 1223 sum += val * nf_power[i]; 1224 n += val; 1225 } 1226 } 1227 1228 if (!n) 1229 return 0; 1230 1231 return sum / n; 1232 } 1233 1234 void mt7915_update_channel(struct mt76_phy *mphy) 1235 { 1236 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1237 struct mt76_channel_state *state = mphy->chan_state; 1238 int nf; 1239 1240 mt7915_mcu_get_chan_mib_info(phy, false); 1241 1242 nf = mt7915_phy_get_nf(phy, phy->band_idx); 1243 if (!phy->noise) 1244 phy->noise = nf << 4; 1245 else if (nf) 1246 phy->noise += nf - (phy->noise >> 4); 1247 1248 state->noise = -(phy->noise >> 4); 1249 } 1250 1251 static bool 1252 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1253 { 1254 bool ret; 1255 1256 ret = wait_event_timeout(dev->reset_wait, 1257 (READ_ONCE(dev->reset_state) & state), 1258 MT7915_RESET_TIMEOUT); 1259 1260 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1261 return ret; 1262 } 1263 1264 static void 1265 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1266 { 1267 struct ieee80211_hw *hw = priv; 1268 1269 switch (vif->type) { 1270 case NL80211_IFTYPE_MESH_POINT: 1271 case NL80211_IFTYPE_ADHOC: 1272 case NL80211_IFTYPE_AP: 1273 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, 1274 BSS_CHANGED_BEACON_ENABLED); 1275 break; 1276 default: 1277 break; 1278 } 1279 } 1280 1281 static void 1282 mt7915_update_beacons(struct mt7915_dev *dev) 1283 { 1284 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1285 1286 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1287 IEEE80211_IFACE_ITER_RESUME_ALL, 1288 mt7915_update_vif_beacon, dev->mt76.hw); 1289 1290 if (!mphy_ext) 1291 return; 1292 1293 ieee80211_iterate_active_interfaces(mphy_ext->hw, 1294 IEEE80211_IFACE_ITER_RESUME_ALL, 1295 mt7915_update_vif_beacon, mphy_ext->hw); 1296 } 1297 1298 static void 1299 mt7915_dma_reset(struct mt7915_dev *dev) 1300 { 1301 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 1302 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 1303 int i; 1304 1305 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 1306 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1307 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1308 1309 if (is_mt7915(&dev->mt76)) 1310 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 1311 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1312 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1313 if (dev->hif2) { 1314 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1315 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1316 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1317 1318 if (is_mt7915(&dev->mt76)) 1319 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1320 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1321 MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1322 } 1323 1324 usleep_range(1000, 2000); 1325 1326 for (i = 0; i < __MT_TXQ_MAX; i++) { 1327 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1328 if (mphy_ext) 1329 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 1330 } 1331 1332 for (i = 0; i < __MT_MCUQ_MAX; i++) 1333 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 1334 1335 mt76_for_each_q_rx(&dev->mt76, i) 1336 mt76_queue_rx_reset(dev, i); 1337 1338 mt76_tx_status_check(&dev->mt76, true); 1339 1340 /* re-init prefetch settings after reset */ 1341 mt7915_dma_prefetch(dev); 1342 1343 mt76_set(dev, MT_WFDMA0_GLO_CFG, 1344 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1345 if (is_mt7915(&dev->mt76)) 1346 mt76_set(dev, MT_WFDMA1_GLO_CFG, 1347 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1348 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1349 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1350 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1351 if (dev->hif2) { 1352 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1353 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1354 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1355 1356 if (is_mt7915(&dev->mt76)) 1357 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1358 MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1359 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1360 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1361 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1362 } 1363 } 1364 1365 void mt7915_tx_token_put(struct mt7915_dev *dev) 1366 { 1367 struct mt76_txwi_cache *txwi; 1368 int id; 1369 1370 spin_lock_bh(&dev->mt76.token_lock); 1371 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1372 mt7915_txwi_free(dev, txwi, NULL, NULL); 1373 dev->mt76.token_count--; 1374 } 1375 spin_unlock_bh(&dev->mt76.token_lock); 1376 idr_destroy(&dev->mt76.token); 1377 } 1378 1379 /* system error recovery */ 1380 void mt7915_mac_reset_work(struct work_struct *work) 1381 { 1382 struct mt7915_phy *phy2; 1383 struct mt76_phy *ext_phy; 1384 struct mt7915_dev *dev; 1385 int i; 1386 1387 dev = container_of(work, struct mt7915_dev, reset_work); 1388 ext_phy = dev->mt76.phys[MT_BAND1]; 1389 phy2 = ext_phy ? ext_phy->priv : NULL; 1390 1391 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) 1392 return; 1393 1394 ieee80211_stop_queues(mt76_hw(dev)); 1395 if (ext_phy) 1396 ieee80211_stop_queues(ext_phy->hw); 1397 1398 set_bit(MT76_RESET, &dev->mphy.state); 1399 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1400 wake_up(&dev->mt76.mcu.wait); 1401 cancel_delayed_work_sync(&dev->mphy.mac_work); 1402 if (phy2) { 1403 set_bit(MT76_RESET, &phy2->mt76->state); 1404 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1405 } 1406 mt76_worker_disable(&dev->mt76.tx_worker); 1407 mt76_for_each_q_rx(&dev->mt76, i) 1408 napi_disable(&dev->mt76.napi[i]); 1409 napi_disable(&dev->mt76.tx_napi); 1410 1411 mutex_lock(&dev->mt76.mutex); 1412 1413 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1414 1415 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1416 mt7915_dma_reset(dev); 1417 1418 mt7915_tx_token_put(dev); 1419 idr_init(&dev->mt76.token); 1420 1421 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1422 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1423 } 1424 1425 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1426 clear_bit(MT76_RESET, &dev->mphy.state); 1427 if (phy2) 1428 clear_bit(MT76_RESET, &phy2->mt76->state); 1429 1430 local_bh_disable(); 1431 mt76_for_each_q_rx(&dev->mt76, i) { 1432 napi_enable(&dev->mt76.napi[i]); 1433 napi_schedule(&dev->mt76.napi[i]); 1434 } 1435 local_bh_enable(); 1436 1437 tasklet_schedule(&dev->irq_tasklet); 1438 1439 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1440 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1441 1442 mt76_worker_enable(&dev->mt76.tx_worker); 1443 1444 local_bh_disable(); 1445 napi_enable(&dev->mt76.tx_napi); 1446 napi_schedule(&dev->mt76.tx_napi); 1447 local_bh_enable(); 1448 1449 ieee80211_wake_queues(mt76_hw(dev)); 1450 if (ext_phy) 1451 ieee80211_wake_queues(ext_phy->hw); 1452 1453 mutex_unlock(&dev->mt76.mutex); 1454 1455 mt7915_update_beacons(dev); 1456 1457 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1458 MT7915_WATCHDOG_TIME); 1459 if (phy2) 1460 ieee80211_queue_delayed_work(ext_phy->hw, 1461 &phy2->mt76->mac_work, 1462 MT7915_WATCHDOG_TIME); 1463 } 1464 1465 void mt7915_mac_update_stats(struct mt7915_phy *phy) 1466 { 1467 struct mt7915_dev *dev = phy->dev; 1468 struct mib_stats *mib = &phy->mib; 1469 int i, aggr0, aggr1, cnt; 1470 u32 val; 1471 1472 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx)); 1473 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? 1474 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) : 1475 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt); 1476 1477 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx)); 1478 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 1479 1480 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx)); 1481 mib->rx_mpdu_cnt += cnt; 1482 1483 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx)); 1484 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 1485 1486 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx)); 1487 mib->rx_vector_mismatch_cnt += 1488 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 1489 1490 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx)); 1491 mib->rx_delimiter_fail_cnt += 1492 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 1493 1494 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx)); 1495 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ? 1496 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) : 1497 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt); 1498 1499 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx)); 1500 mib->rx_len_mismatch_cnt += 1501 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 1502 1503 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx)); 1504 mib->tx_ampdu_cnt += cnt; 1505 1506 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx)); 1507 mib->tx_stop_q_empty_cnt += 1508 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 1509 1510 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx)); 1511 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ? 1512 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) : 1513 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt); 1514 1515 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx)); 1516 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ? 1517 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) : 1518 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt); 1519 1520 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx)); 1521 mib->primary_cca_busy_time += 1522 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt); 1523 1524 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx)); 1525 mib->secondary_cca_busy_time += 1526 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt); 1527 1528 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx)); 1529 mib->primary_energy_detect_time += 1530 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt); 1531 1532 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx)); 1533 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt); 1534 1535 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx)); 1536 mib->ofdm_mdrdy_time += 1537 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt); 1538 1539 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx)); 1540 mib->green_mdrdy_time += 1541 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt); 1542 1543 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx)); 1544 mib->rx_ampdu_cnt += cnt; 1545 1546 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx)); 1547 mib->rx_ampdu_bytes_cnt += cnt; 1548 1549 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx)); 1550 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ? 1551 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) : 1552 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt); 1553 1554 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx)); 1555 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 1556 1557 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx)); 1558 mib->tx_rwp_fail_cnt += 1559 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 1560 1561 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx)); 1562 mib->tx_rwp_need_cnt += 1563 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 1564 1565 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx)); 1566 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ? 1567 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) : 1568 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt); 1569 1570 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx)); 1571 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ? 1572 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) : 1573 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt); 1574 1575 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx)); 1576 mib->rx_ba_cnt += cnt; 1577 1578 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx)); 1579 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 1580 1581 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx)); 1582 mib->tx_mu_mpdu_cnt += cnt; 1583 1584 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx)); 1585 mib->tx_mu_acked_mpdu_cnt += cnt; 1586 1587 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx)); 1588 mib->tx_su_acked_mpdu_cnt += cnt; 1589 1590 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx)); 1591 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt); 1592 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt); 1593 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt); 1594 1595 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 1596 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 1597 mib->tx_amsdu[i] += cnt; 1598 mib->tx_amsdu_cnt += cnt; 1599 } 1600 1601 aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 1602 if (is_mt7915(&dev->mt76)) { 1603 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { 1604 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4))); 1605 mib->ba_miss_cnt += 1606 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 1607 mib->ack_fail_cnt += 1608 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 1609 1610 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4))); 1611 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 1612 mib->rts_retries_cnt += 1613 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 1614 1615 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1616 dev->mt76.aggr_stats[aggr0++] += val & 0xffff; 1617 dev->mt76.aggr_stats[aggr0++] += val >> 16; 1618 1619 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i)); 1620 dev->mt76.aggr_stats[aggr1++] += val & 0xffff; 1621 dev->mt76.aggr_stats[aggr1++] += val >> 16; 1622 } 1623 1624 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1625 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1626 1627 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx)); 1628 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt); 1629 1630 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx)); 1631 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 1632 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 1633 1634 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx)); 1635 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 1636 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 1637 1638 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx)); 1639 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 1640 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 1641 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 1642 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 1643 } else { 1644 for (i = 0; i < 2; i++) { 1645 /* rts count */ 1646 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2))); 1647 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val); 1648 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val); 1649 1650 /* rts retry count */ 1651 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2))); 1652 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val); 1653 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val); 1654 1655 /* ba miss count */ 1656 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2))); 1657 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val); 1658 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val); 1659 1660 /* ack fail count */ 1661 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2))); 1662 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val); 1663 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val); 1664 } 1665 1666 for (i = 0; i < 8; i++) { 1667 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i)); 1668 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val); 1669 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val); 1670 } 1671 1672 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx)); 1673 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1674 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt); 1675 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1676 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt); 1677 1678 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx)); 1679 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt); 1680 1681 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx)); 1682 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt); 1683 1684 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx)); 1685 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1686 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt); 1687 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1688 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt); 1689 1690 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx)); 1691 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1692 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt); 1693 } 1694 } 1695 1696 static void mt7915_mac_severe_check(struct mt7915_phy *phy) 1697 { 1698 struct mt7915_dev *dev = phy->dev; 1699 bool ext_phy = phy != &dev->phy; 1700 u32 trb; 1701 1702 if (!phy->omac_mask) 1703 return; 1704 1705 /* In rare cases, TRB pointers might be out of sync leads to RMAC 1706 * stopping Rx, so check status periodically to see if TRB hardware 1707 * requires minimal recovery. 1708 */ 1709 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); 1710 1711 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != 1712 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && 1713 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != 1714 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && 1715 trb == phy->trb_ts) 1716 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, 1717 ext_phy); 1718 1719 phy->trb_ts = trb; 1720 } 1721 1722 void mt7915_mac_sta_rc_work(struct work_struct *work) 1723 { 1724 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1725 struct ieee80211_sta *sta; 1726 struct ieee80211_vif *vif; 1727 struct mt7915_sta *msta; 1728 u32 changed; 1729 LIST_HEAD(list); 1730 1731 spin_lock_bh(&dev->sta_poll_lock); 1732 list_splice_init(&dev->sta_rc_list, &list); 1733 1734 while (!list_empty(&list)) { 1735 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1736 list_del_init(&msta->rc_list); 1737 changed = msta->changed; 1738 msta->changed = 0; 1739 spin_unlock_bh(&dev->sta_poll_lock); 1740 1741 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1742 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1743 1744 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1745 IEEE80211_RC_NSS_CHANGED | 1746 IEEE80211_RC_BW_CHANGED)) 1747 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 1748 1749 if (changed & IEEE80211_RC_SMPS_CHANGED) 1750 mt7915_mcu_add_smps(dev, vif, sta); 1751 1752 spin_lock_bh(&dev->sta_poll_lock); 1753 } 1754 1755 spin_unlock_bh(&dev->sta_poll_lock); 1756 } 1757 1758 void mt7915_mac_work(struct work_struct *work) 1759 { 1760 struct mt7915_phy *phy; 1761 struct mt76_phy *mphy; 1762 1763 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 1764 mac_work.work); 1765 phy = mphy->priv; 1766 1767 mutex_lock(&mphy->dev->mutex); 1768 1769 mt76_update_survey(mphy); 1770 if (++mphy->mac_work_count == 5) { 1771 mphy->mac_work_count = 0; 1772 1773 mt7915_mac_update_stats(phy); 1774 mt7915_mac_severe_check(phy); 1775 } 1776 1777 mutex_unlock(&mphy->dev->mutex); 1778 1779 mt76_tx_status_check(mphy->dev, false); 1780 1781 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1782 MT7915_WATCHDOG_TIME); 1783 } 1784 1785 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 1786 { 1787 struct mt7915_dev *dev = phy->dev; 1788 1789 if (phy->rdd_state & BIT(0)) 1790 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0, 1791 MT_RX_SEL0, 0); 1792 if (phy->rdd_state & BIT(1)) 1793 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1, 1794 MT_RX_SEL0, 0); 1795 } 1796 1797 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 1798 { 1799 int err, region; 1800 1801 switch (dev->mt76.region) { 1802 case NL80211_DFS_ETSI: 1803 region = 0; 1804 break; 1805 case NL80211_DFS_JP: 1806 region = 2; 1807 break; 1808 case NL80211_DFS_FCC: 1809 default: 1810 region = 1; 1811 break; 1812 } 1813 1814 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain, 1815 MT_RX_SEL0, region); 1816 if (err < 0) 1817 return err; 1818 1819 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain, 1820 MT_RX_SEL0, 1); 1821 } 1822 1823 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 1824 { 1825 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 1826 struct mt7915_dev *dev = phy->dev; 1827 int err; 1828 1829 /* start CAC */ 1830 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx, 1831 MT_RX_SEL0, 0); 1832 if (err < 0) 1833 return err; 1834 1835 err = mt7915_dfs_start_rdd(dev, phy->band_idx); 1836 if (err < 0) 1837 return err; 1838 1839 phy->rdd_state |= BIT(phy->band_idx); 1840 1841 if (!is_mt7915(&dev->mt76)) 1842 return 0; 1843 1844 if (chandef->width == NL80211_CHAN_WIDTH_160 || 1845 chandef->width == NL80211_CHAN_WIDTH_80P80) { 1846 err = mt7915_dfs_start_rdd(dev, 1); 1847 if (err < 0) 1848 return err; 1849 1850 phy->rdd_state |= BIT(1); 1851 } 1852 1853 return 0; 1854 } 1855 1856 static int 1857 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 1858 { 1859 const struct mt7915_dfs_radar_spec *radar_specs; 1860 struct mt7915_dev *dev = phy->dev; 1861 int err, i; 1862 1863 switch (dev->mt76.region) { 1864 case NL80211_DFS_FCC: 1865 radar_specs = &fcc_radar_specs; 1866 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 1867 if (err < 0) 1868 return err; 1869 break; 1870 case NL80211_DFS_ETSI: 1871 radar_specs = &etsi_radar_specs; 1872 break; 1873 case NL80211_DFS_JP: 1874 radar_specs = &jp_radar_specs; 1875 break; 1876 default: 1877 return -EINVAL; 1878 } 1879 1880 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 1881 err = mt7915_mcu_set_radar_th(dev, i, 1882 &radar_specs->radar_pattern[i]); 1883 if (err < 0) 1884 return err; 1885 } 1886 1887 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 1888 } 1889 1890 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 1891 { 1892 struct mt7915_dev *dev = phy->dev; 1893 enum mt76_dfs_state dfs_state, prev_state; 1894 int err; 1895 1896 prev_state = phy->mt76->dfs_state; 1897 dfs_state = mt76_phy_dfs_state(phy->mt76); 1898 1899 if (prev_state == dfs_state) 1900 return 0; 1901 1902 if (prev_state == MT_DFS_STATE_UNKNOWN) 1903 mt7915_dfs_stop_radar_detector(phy); 1904 1905 if (dfs_state == MT_DFS_STATE_DISABLED) 1906 goto stop; 1907 1908 if (prev_state <= MT_DFS_STATE_DISABLED) { 1909 err = mt7915_dfs_init_radar_specs(phy); 1910 if (err < 0) 1911 return err; 1912 1913 err = mt7915_dfs_start_radar_detector(phy); 1914 if (err < 0) 1915 return err; 1916 1917 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 1918 } 1919 1920 if (dfs_state == MT_DFS_STATE_CAC) 1921 return 0; 1922 1923 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, 1924 phy->band_idx, MT_RX_SEL0, 0); 1925 if (err < 0) { 1926 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 1927 return err; 1928 } 1929 1930 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 1931 return 0; 1932 1933 stop: 1934 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, 1935 phy->band_idx, MT_RX_SEL0, 0); 1936 if (err < 0) 1937 return err; 1938 1939 mt7915_dfs_stop_radar_detector(phy); 1940 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 1941 1942 return 0; 1943 } 1944 1945 static int 1946 mt7915_mac_twt_duration_align(int duration) 1947 { 1948 return duration << 8; 1949 } 1950 1951 static u64 1952 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 1953 struct mt7915_twt_flow *flow) 1954 { 1955 struct mt7915_twt_flow *iter, *iter_next; 1956 u32 duration = flow->duration << 8; 1957 u64 start_tsf; 1958 1959 iter = list_first_entry_or_null(&dev->twt_list, 1960 struct mt7915_twt_flow, list); 1961 if (!iter || !iter->sched || iter->start_tsf > duration) { 1962 /* add flow as first entry in the list */ 1963 list_add(&flow->list, &dev->twt_list); 1964 return 0; 1965 } 1966 1967 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 1968 start_tsf = iter->start_tsf + 1969 mt7915_mac_twt_duration_align(iter->duration); 1970 if (list_is_last(&iter->list, &dev->twt_list)) 1971 break; 1972 1973 if (!iter_next->sched || 1974 iter_next->start_tsf > start_tsf + duration) { 1975 list_add(&flow->list, &iter->list); 1976 goto out; 1977 } 1978 } 1979 1980 /* add flow as last entry in the list */ 1981 list_add_tail(&flow->list, &dev->twt_list); 1982 out: 1983 return start_tsf; 1984 } 1985 1986 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 1987 { 1988 struct ieee80211_twt_params *twt_agrt; 1989 u64 interval, duration; 1990 u16 mantissa; 1991 u8 exp; 1992 1993 /* only individual agreement supported */ 1994 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 1995 return -EOPNOTSUPP; 1996 1997 /* only 256us unit supported */ 1998 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 1999 return -EOPNOTSUPP; 2000 2001 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2002 2003 /* explicit agreement not supported */ 2004 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2005 return -EOPNOTSUPP; 2006 2007 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2008 le16_to_cpu(twt_agrt->req_type)); 2009 mantissa = le16_to_cpu(twt_agrt->mantissa); 2010 duration = twt_agrt->min_twt_dur << 8; 2011 2012 interval = (u64)mantissa << exp; 2013 if (interval < duration) 2014 return -EOPNOTSUPP; 2015 2016 return 0; 2017 } 2018 2019 static bool 2020 mt7915_mac_twt_param_equal(struct mt7915_sta *msta, 2021 struct ieee80211_twt_params *twt_agrt) 2022 { 2023 u16 type = le16_to_cpu(twt_agrt->req_type); 2024 u8 exp; 2025 int i; 2026 2027 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 2028 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { 2029 struct mt7915_twt_flow *f; 2030 2031 if (!(msta->twt.flowid_mask & BIT(i))) 2032 continue; 2033 2034 f = &msta->twt.flow[i]; 2035 if (f->duration == twt_agrt->min_twt_dur && 2036 f->mantissa == twt_agrt->mantissa && 2037 f->exp == exp && 2038 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 2039 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 2040 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 2041 return true; 2042 } 2043 2044 return false; 2045 } 2046 2047 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2048 struct ieee80211_sta *sta, 2049 struct ieee80211_twt_setup *twt) 2050 { 2051 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2052 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2053 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2054 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2055 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2056 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2057 struct mt7915_twt_flow *flow; 2058 int flowid, table_id; 2059 u8 exp; 2060 2061 if (mt7915_mac_check_twt_req(twt)) 2062 goto out; 2063 2064 mutex_lock(&dev->mt76.mutex); 2065 2066 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2067 goto unlock; 2068 2069 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2070 goto unlock; 2071 2072 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { 2073 setup_cmd = TWT_SETUP_CMD_DICTATE; 2074 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; 2075 goto unlock; 2076 } 2077 2078 flowid = ffs(~msta->twt.flowid_mask) - 1; 2079 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 2080 twt_agrt->req_type |= le16_encode_bits(flowid, 2081 IEEE80211_TWT_REQTYPE_FLOWID); 2082 2083 table_id = ffs(~dev->twt.table_mask) - 1; 2084 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2085 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2086 2087 if (mt7915_mac_twt_param_equal(msta, twt_agrt)) 2088 goto unlock; 2089 2090 flow = &msta->twt.flow[flowid]; 2091 memset(flow, 0, sizeof(*flow)); 2092 INIT_LIST_HEAD(&flow->list); 2093 flow->wcid = msta->wcid.idx; 2094 flow->table_id = table_id; 2095 flow->id = flowid; 2096 flow->duration = twt_agrt->min_twt_dur; 2097 flow->mantissa = twt_agrt->mantissa; 2098 flow->exp = exp; 2099 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2100 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2101 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2102 2103 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2104 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2105 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2106 u64 flow_tsf, curr_tsf; 2107 u32 rem; 2108 2109 flow->sched = true; 2110 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2111 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2112 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2113 flow_tsf = curr_tsf + interval - rem; 2114 twt_agrt->twt = cpu_to_le64(flow_tsf); 2115 } else { 2116 list_add_tail(&flow->list, &dev->twt_list); 2117 } 2118 flow->tsf = le64_to_cpu(twt_agrt->twt); 2119 2120 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2121 goto unlock; 2122 2123 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2124 dev->twt.table_mask |= BIT(table_id); 2125 msta->twt.flowid_mask |= BIT(flowid); 2126 dev->twt.n_agrt++; 2127 2128 unlock: 2129 mutex_unlock(&dev->mt76.mutex); 2130 out: 2131 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 2132 twt_agrt->req_type |= 2133 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 2134 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2135 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2136 } 2137 2138 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2139 struct mt7915_sta *msta, 2140 u8 flowid) 2141 { 2142 struct mt7915_twt_flow *flow; 2143 2144 lockdep_assert_held(&dev->mt76.mutex); 2145 2146 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2147 return; 2148 2149 if (!(msta->twt.flowid_mask & BIT(flowid))) 2150 return; 2151 2152 flow = &msta->twt.flow[flowid]; 2153 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2154 MCU_TWT_AGRT_DELETE)) 2155 return; 2156 2157 list_del_init(&flow->list); 2158 msta->twt.flowid_mask &= ~BIT(flowid); 2159 dev->twt.table_mask &= ~BIT(flow->table_id); 2160 dev->twt.n_agrt--; 2161 } 2162