1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 15 16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 18 .radar_pattern = { 19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 27 }, 28 }; 29 30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 32 .radar_pattern = { 33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 38 }, 39 }; 40 41 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 43 .radar_pattern = { 44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 52 }, 53 }; 54 55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 56 u16 idx, bool unicast) 57 { 58 struct mt7996_sta *sta; 59 struct mt76_wcid *wcid; 60 61 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 62 return NULL; 63 64 wcid = rcu_dereference(dev->mt76.wcid[idx]); 65 if (unicast || !wcid) 66 return wcid; 67 68 if (!wcid->sta) 69 return NULL; 70 71 sta = container_of(wcid, struct mt7996_sta, wcid); 72 if (!sta->vif) 73 return NULL; 74 75 return &sta->vif->sta.wcid; 76 } 77 78 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 79 { 80 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 81 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 82 83 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 84 0, 5000); 85 } 86 87 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 88 { 89 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 90 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 91 92 return MT_WTBL_LMAC_OFFS(wcid, dw); 93 } 94 95 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 96 { 97 static const u8 ac_to_tid[] = { 98 [IEEE80211_AC_BE] = 0, 99 [IEEE80211_AC_BK] = 1, 100 [IEEE80211_AC_VI] = 4, 101 [IEEE80211_AC_VO] = 6 102 }; 103 struct ieee80211_sta *sta; 104 struct mt7996_sta *msta; 105 struct rate_info *rate; 106 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 107 LIST_HEAD(sta_poll_list); 108 int i; 109 110 spin_lock_bh(&dev->mt76.sta_poll_lock); 111 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 112 spin_unlock_bh(&dev->mt76.sta_poll_lock); 113 114 rcu_read_lock(); 115 116 while (true) { 117 bool clear = false; 118 u32 addr, val; 119 u16 idx; 120 s8 rssi[4]; 121 u8 bw; 122 123 spin_lock_bh(&dev->mt76.sta_poll_lock); 124 if (list_empty(&sta_poll_list)) { 125 spin_unlock_bh(&dev->mt76.sta_poll_lock); 126 break; 127 } 128 msta = list_first_entry(&sta_poll_list, 129 struct mt7996_sta, wcid.poll_list); 130 list_del_init(&msta->wcid.poll_list); 131 spin_unlock_bh(&dev->mt76.sta_poll_lock); 132 133 idx = msta->wcid.idx; 134 135 /* refresh peer's airtime reporting */ 136 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 137 138 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 139 u32 tx_last = msta->airtime_ac[i]; 140 u32 rx_last = msta->airtime_ac[i + 4]; 141 142 msta->airtime_ac[i] = mt76_rr(dev, addr); 143 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 144 145 tx_time[i] = msta->airtime_ac[i] - tx_last; 146 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 147 148 if ((tx_last | rx_last) & BIT(30)) 149 clear = true; 150 151 addr += 8; 152 } 153 154 if (clear) { 155 mt7996_mac_wtbl_update(dev, idx, 156 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 157 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 158 } 159 160 if (!msta->wcid.sta) 161 continue; 162 163 sta = container_of((void *)msta, struct ieee80211_sta, 164 drv_priv); 165 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 166 u8 q = mt76_connac_lmac_mapping(i); 167 u32 tx_cur = tx_time[q]; 168 u32 rx_cur = rx_time[q]; 169 u8 tid = ac_to_tid[i]; 170 171 if (!tx_cur && !rx_cur) 172 continue; 173 174 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 175 } 176 177 /* We don't support reading GI info from txs packets. 178 * For accurate tx status reporting and AQL improvement, 179 * we need to make sure that flags match so polling GI 180 * from per-sta counters directly. 181 */ 182 rate = &msta->wcid.rate; 183 184 switch (rate->bw) { 185 case RATE_INFO_BW_320: 186 bw = IEEE80211_STA_RX_BW_320; 187 break; 188 case RATE_INFO_BW_160: 189 bw = IEEE80211_STA_RX_BW_160; 190 break; 191 case RATE_INFO_BW_80: 192 bw = IEEE80211_STA_RX_BW_80; 193 break; 194 case RATE_INFO_BW_40: 195 bw = IEEE80211_STA_RX_BW_40; 196 break; 197 default: 198 bw = IEEE80211_STA_RX_BW_20; 199 break; 200 } 201 202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6); 203 val = mt76_rr(dev, addr); 204 if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) { 205 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5); 206 val = mt76_rr(dev, addr); 207 rate->eht_gi = FIELD_GET(GENMASK(25, 24), val); 208 } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 209 u8 offs = 24 + 2 * bw; 210 211 rate->he_gi = (val & (0x3 << offs)) >> offs; 212 } else if (rate->flags & 213 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 214 if (val & BIT(12 + bw)) 215 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 216 else 217 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 218 } 219 220 /* get signal strength of resp frames (CTS/BA/ACK) */ 221 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 222 val = mt76_rr(dev, addr); 223 224 rssi[0] = to_rssi(GENMASK(7, 0), val); 225 rssi[1] = to_rssi(GENMASK(15, 8), val); 226 rssi[2] = to_rssi(GENMASK(23, 16), val); 227 rssi[3] = to_rssi(GENMASK(31, 14), val); 228 229 msta->ack_signal = 230 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); 231 232 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); 233 } 234 235 rcu_read_unlock(); 236 } 237 238 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev, 239 struct ieee80211_vif *vif, bool enable) 240 { 241 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 242 u32 addr; 243 244 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5); 245 if (enable) 246 mt76_set(dev, addr, BIT(5)); 247 else 248 mt76_clear(dev, addr, BIT(5)); 249 } 250 251 void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev, 252 u8 tbl_idx, u16 rate_idx) 253 { 254 u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx; 255 256 mt76_wr(dev, MT_WTBL_ITDR0, rate_idx); 257 /* use wtbl spe idx */ 258 mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL); 259 mt76_wr(dev, MT_WTBL_ITCR, ctrl); 260 } 261 262 /* The HW does not translate the mac header to 802.3 for mesh point */ 263 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 264 { 265 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 266 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 267 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid; 268 __le32 *rxd = (__le32 *)skb->data; 269 struct ieee80211_sta *sta; 270 struct ieee80211_vif *vif; 271 struct ieee80211_hdr hdr; 272 u16 frame_control; 273 274 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 275 MT_RXD3_NORMAL_U2M) 276 return -EINVAL; 277 278 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 279 return -EINVAL; 280 281 if (!msta || !msta->vif) 282 return -EINVAL; 283 284 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 285 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 286 287 /* store the info from RXD and ethhdr to avoid being overridden */ 288 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 289 hdr.frame_control = cpu_to_le16(frame_control); 290 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 291 hdr.duration_id = 0; 292 293 ether_addr_copy(hdr.addr1, vif->addr); 294 ether_addr_copy(hdr.addr2, sta->addr); 295 switch (frame_control & (IEEE80211_FCTL_TODS | 296 IEEE80211_FCTL_FROMDS)) { 297 case 0: 298 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 299 break; 300 case IEEE80211_FCTL_FROMDS: 301 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 302 break; 303 case IEEE80211_FCTL_TODS: 304 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 305 break; 306 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 307 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 308 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 309 break; 310 default: 311 return -EINVAL; 312 } 313 314 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 315 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 316 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 317 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 318 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 319 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 320 else 321 skb_pull(skb, 2); 322 323 if (ieee80211_has_order(hdr.frame_control)) 324 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 325 IEEE80211_HT_CTL_LEN); 326 if (ieee80211_is_data_qos(hdr.frame_control)) { 327 __le16 qos_ctrl; 328 329 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 330 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 331 IEEE80211_QOS_CTL_LEN); 332 } 333 334 if (ieee80211_has_a4(hdr.frame_control)) 335 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 336 else 337 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 338 339 return 0; 340 } 341 342 static int 343 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 344 struct mt76_rx_status *status, 345 struct ieee80211_supported_band *sband, 346 __le32 *rxv, u8 *mode) 347 { 348 u32 v0, v2; 349 u8 stbc, gi, bw, dcm, nss; 350 int i, idx; 351 bool cck = false; 352 353 v0 = le32_to_cpu(rxv[0]); 354 v2 = le32_to_cpu(rxv[2]); 355 356 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 357 i = idx; 358 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 359 360 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 361 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 362 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 363 dcm = FIELD_GET(MT_PRXV_DCM, v2); 364 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 365 366 switch (*mode) { 367 case MT_PHY_TYPE_CCK: 368 cck = true; 369 fallthrough; 370 case MT_PHY_TYPE_OFDM: 371 i = mt76_get_rate(&dev->mt76, sband, i, cck); 372 break; 373 case MT_PHY_TYPE_HT_GF: 374 case MT_PHY_TYPE_HT: 375 status->encoding = RX_ENC_HT; 376 if (gi) 377 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 378 if (i > 31) 379 return -EINVAL; 380 break; 381 case MT_PHY_TYPE_VHT: 382 status->nss = nss; 383 status->encoding = RX_ENC_VHT; 384 if (gi) 385 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 386 if (i > 11) 387 return -EINVAL; 388 break; 389 case MT_PHY_TYPE_HE_MU: 390 case MT_PHY_TYPE_HE_SU: 391 case MT_PHY_TYPE_HE_EXT_SU: 392 case MT_PHY_TYPE_HE_TB: 393 status->nss = nss; 394 status->encoding = RX_ENC_HE; 395 i &= GENMASK(3, 0); 396 397 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 398 status->he_gi = gi; 399 400 status->he_dcm = dcm; 401 break; 402 case MT_PHY_TYPE_EHT_SU: 403 case MT_PHY_TYPE_EHT_TRIG: 404 case MT_PHY_TYPE_EHT_MU: 405 status->nss = nss; 406 status->encoding = RX_ENC_EHT; 407 i &= GENMASK(3, 0); 408 409 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 410 status->eht.gi = gi; 411 break; 412 default: 413 return -EINVAL; 414 } 415 status->rate_idx = i; 416 417 switch (bw) { 418 case IEEE80211_STA_RX_BW_20: 419 break; 420 case IEEE80211_STA_RX_BW_40: 421 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 422 (idx & MT_PRXV_TX_ER_SU_106T)) { 423 status->bw = RATE_INFO_BW_HE_RU; 424 status->he_ru = 425 NL80211_RATE_INFO_HE_RU_ALLOC_106; 426 } else { 427 status->bw = RATE_INFO_BW_40; 428 } 429 break; 430 case IEEE80211_STA_RX_BW_80: 431 status->bw = RATE_INFO_BW_80; 432 break; 433 case IEEE80211_STA_RX_BW_160: 434 status->bw = RATE_INFO_BW_160; 435 break; 436 /* rxv reports bw 320-1 and 320-2 separately */ 437 case IEEE80211_STA_RX_BW_320: 438 case IEEE80211_STA_RX_BW_320 + 1: 439 status->bw = RATE_INFO_BW_320; 440 break; 441 default: 442 return -EINVAL; 443 } 444 445 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 446 if (*mode < MT_PHY_TYPE_HE_SU && gi) 447 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 448 449 return 0; 450 } 451 452 static int 453 mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb) 454 { 455 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 456 struct mt76_phy *mphy = &dev->mt76.phy; 457 struct mt7996_phy *phy = &dev->phy; 458 struct ieee80211_supported_band *sband; 459 __le32 *rxd = (__le32 *)skb->data; 460 __le32 *rxv = NULL; 461 u32 rxd0 = le32_to_cpu(rxd[0]); 462 u32 rxd1 = le32_to_cpu(rxd[1]); 463 u32 rxd2 = le32_to_cpu(rxd[2]); 464 u32 rxd3 = le32_to_cpu(rxd[3]); 465 u32 rxd4 = le32_to_cpu(rxd[4]); 466 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 467 u32 csum_status = *(u32 *)skb->cb; 468 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 469 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 470 bool unicast, insert_ccmp_hdr = false; 471 u8 remove_pad, amsdu_info, band_idx; 472 u8 mode = 0, qos_ctl = 0; 473 bool hdr_trans; 474 u16 hdr_gap; 475 u16 seq_ctrl = 0; 476 __le16 fc = 0; 477 int idx; 478 479 memset(status, 0, sizeof(*status)); 480 481 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 482 mphy = dev->mt76.phys[band_idx]; 483 phy = mphy->priv; 484 status->phy_idx = mphy->band_idx; 485 486 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 487 return -EINVAL; 488 489 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 490 return -EINVAL; 491 492 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 493 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 494 return -EINVAL; 495 496 /* ICV error or CCMP/BIP/WPI MIC error */ 497 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 498 status->flag |= RX_FLAG_ONLY_MONITOR; 499 500 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 501 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 502 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); 503 504 if (status->wcid) { 505 struct mt7996_sta *msta; 506 507 msta = container_of(status->wcid, struct mt7996_sta, wcid); 508 spin_lock_bh(&dev->mt76.sta_poll_lock); 509 if (list_empty(&msta->wcid.poll_list)) 510 list_add_tail(&msta->wcid.poll_list, 511 &dev->mt76.sta_poll_list); 512 spin_unlock_bh(&dev->mt76.sta_poll_lock); 513 } 514 515 status->freq = mphy->chandef.chan->center_freq; 516 status->band = mphy->chandef.chan->band; 517 if (status->band == NL80211_BAND_5GHZ) 518 sband = &mphy->sband_5g.sband; 519 else if (status->band == NL80211_BAND_6GHZ) 520 sband = &mphy->sband_6g.sband; 521 else 522 sband = &mphy->sband_2g.sband; 523 524 if (!sband->channels) 525 return -EINVAL; 526 527 if ((rxd0 & csum_mask) == csum_mask && 528 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 529 skb->ip_summed = CHECKSUM_UNNECESSARY; 530 531 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 532 status->flag |= RX_FLAG_FAILED_FCS_CRC; 533 534 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 535 status->flag |= RX_FLAG_MMIC_ERROR; 536 537 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 538 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 539 status->flag |= RX_FLAG_DECRYPTED; 540 status->flag |= RX_FLAG_IV_STRIPPED; 541 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 542 } 543 544 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 545 546 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 547 return -EINVAL; 548 549 rxd += 8; 550 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 551 u32 v0 = le32_to_cpu(rxd[0]); 552 u32 v2 = le32_to_cpu(rxd[2]); 553 554 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 555 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 556 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 557 558 rxd += 4; 559 if ((u8 *)rxd - skb->data >= skb->len) 560 return -EINVAL; 561 } 562 563 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 564 u8 *data = (u8 *)rxd; 565 566 if (status->flag & RX_FLAG_DECRYPTED) { 567 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 568 case MT_CIPHER_AES_CCMP: 569 case MT_CIPHER_CCMP_CCX: 570 case MT_CIPHER_CCMP_256: 571 insert_ccmp_hdr = 572 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 573 fallthrough; 574 case MT_CIPHER_TKIP: 575 case MT_CIPHER_TKIP_NO_MIC: 576 case MT_CIPHER_GCMP: 577 case MT_CIPHER_GCMP_256: 578 status->iv[0] = data[5]; 579 status->iv[1] = data[4]; 580 status->iv[2] = data[3]; 581 status->iv[3] = data[2]; 582 status->iv[4] = data[1]; 583 status->iv[5] = data[0]; 584 break; 585 default: 586 break; 587 } 588 } 589 rxd += 4; 590 if ((u8 *)rxd - skb->data >= skb->len) 591 return -EINVAL; 592 } 593 594 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 595 status->timestamp = le32_to_cpu(rxd[0]); 596 status->flag |= RX_FLAG_MACTIME_START; 597 598 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 599 status->flag |= RX_FLAG_AMPDU_DETAILS; 600 601 /* all subframes of an A-MPDU have the same timestamp */ 602 if (phy->rx_ampdu_ts != status->timestamp) { 603 if (!++phy->ampdu_ref) 604 phy->ampdu_ref++; 605 } 606 phy->rx_ampdu_ts = status->timestamp; 607 608 status->ampdu_ref = phy->ampdu_ref; 609 } 610 611 rxd += 4; 612 if ((u8 *)rxd - skb->data >= skb->len) 613 return -EINVAL; 614 } 615 616 /* RXD Group 3 - P-RXV */ 617 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 618 u32 v3; 619 int ret; 620 621 rxv = rxd; 622 rxd += 4; 623 if ((u8 *)rxd - skb->data >= skb->len) 624 return -EINVAL; 625 626 v3 = le32_to_cpu(rxv[3]); 627 628 status->chains = mphy->antenna_mask; 629 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 630 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 631 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 632 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 633 634 /* RXD Group 5 - C-RXV */ 635 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 636 rxd += 24; 637 if ((u8 *)rxd - skb->data >= skb->len) 638 return -EINVAL; 639 } 640 641 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 642 if (ret < 0) 643 return ret; 644 } 645 646 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 647 status->amsdu = !!amsdu_info; 648 if (status->amsdu) { 649 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 650 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 651 } 652 653 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 654 if (hdr_trans && ieee80211_has_morefrags(fc)) { 655 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 656 return -EINVAL; 657 hdr_trans = false; 658 } else { 659 int pad_start = 0; 660 661 skb_pull(skb, hdr_gap); 662 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 663 pad_start = ieee80211_get_hdrlen_from_skb(skb); 664 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 665 /* When header translation failure is indicated, 666 * the hardware will insert an extra 2-byte field 667 * containing the data length after the protocol 668 * type field. This happens either when the LLC-SNAP 669 * pattern did not match, or if a VLAN header was 670 * detected. 671 */ 672 pad_start = 12; 673 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 674 pad_start += 4; 675 else 676 pad_start = 0; 677 } 678 679 if (pad_start) { 680 memmove(skb->data + 2, skb->data, pad_start); 681 skb_pull(skb, 2); 682 } 683 } 684 685 if (!hdr_trans) { 686 struct ieee80211_hdr *hdr; 687 688 if (insert_ccmp_hdr) { 689 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 690 691 mt76_insert_ccmp_hdr(skb, key_id); 692 } 693 694 hdr = mt76_skb_get_hdr(skb); 695 fc = hdr->frame_control; 696 if (ieee80211_is_data_qos(fc)) { 697 u8 *qos = ieee80211_get_qos_ctl(hdr); 698 699 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 700 qos_ctl = *qos; 701 702 /* Mesh DA/SA/Length will be stripped after hardware 703 * de-amsdu, so here needs to clear amsdu present bit 704 * to mark it as a normal mesh frame. 705 */ 706 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 707 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 708 } 709 } else { 710 status->flag |= RX_FLAG_8023; 711 } 712 713 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 714 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 715 716 if (!status->wcid || !ieee80211_is_data_qos(fc)) 717 return 0; 718 719 status->aggr = unicast && 720 !ieee80211_is_qos_nullfunc(fc); 721 status->qos_ctl = qos_ctl; 722 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 723 724 return 0; 725 } 726 727 static void 728 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 729 struct sk_buff *skb, struct mt76_wcid *wcid) 730 { 731 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 732 u8 fc_type, fc_stype; 733 u16 ethertype; 734 bool wmm = false; 735 u32 val; 736 737 if (wcid->sta) { 738 struct ieee80211_sta *sta; 739 740 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 741 wmm = sta->wme; 742 } 743 744 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 745 FIELD_PREP(MT_TXD1_TID, tid); 746 747 ethertype = get_unaligned_be16(&skb->data[12]); 748 if (ethertype >= ETH_P_802_3_MIN) 749 val |= MT_TXD1_ETH_802_3; 750 751 txwi[1] |= cpu_to_le32(val); 752 753 fc_type = IEEE80211_FTYPE_DATA >> 2; 754 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 755 756 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 757 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 758 759 txwi[2] |= cpu_to_le32(val); 760 } 761 762 static void 763 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 764 struct sk_buff *skb, struct ieee80211_key_conf *key) 765 { 766 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 767 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 768 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 769 bool multicast = is_multicast_ether_addr(hdr->addr1); 770 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 771 __le16 fc = hdr->frame_control; 772 u8 fc_type, fc_stype; 773 u32 val; 774 775 if (ieee80211_is_action(fc) && 776 mgmt->u.action.category == WLAN_CATEGORY_BACK && 777 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) 778 tid = MT_TX_ADDBA; 779 else if (ieee80211_is_mgmt(hdr->frame_control)) 780 tid = MT_TX_NORMAL; 781 782 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 783 FIELD_PREP(MT_TXD1_HDR_INFO, 784 ieee80211_get_hdrlen_from_skb(skb) / 2) | 785 FIELD_PREP(MT_TXD1_TID, tid); 786 787 if (!ieee80211_is_data(fc) || multicast || 788 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 789 val |= MT_TXD1_FIXED_RATE; 790 791 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && 792 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 793 val |= MT_TXD1_BIP; 794 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 795 } 796 797 txwi[1] |= cpu_to_le32(val); 798 799 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 800 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 801 802 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 803 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 804 805 txwi[2] |= cpu_to_le32(val); 806 807 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 808 if (ieee80211_is_beacon(fc)) { 809 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 810 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 811 } 812 813 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 814 u16 seqno = le16_to_cpu(hdr->seq_ctrl); 815 816 if (ieee80211_is_back_req(hdr->frame_control)) { 817 struct ieee80211_bar *bar; 818 819 bar = (struct ieee80211_bar *)skb->data; 820 seqno = le16_to_cpu(bar->start_seq_num); 821 } 822 823 val = MT_TXD3_SN_VALID | 824 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 825 txwi[3] |= cpu_to_le32(val); 826 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 827 } 828 } 829 830 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 831 struct sk_buff *skb, struct mt76_wcid *wcid, 832 struct ieee80211_key_conf *key, int pid, 833 enum mt76_txq_id qid, u32 changed) 834 { 835 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 836 struct ieee80211_vif *vif = info->control.vif; 837 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 838 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 839 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 840 struct mt76_vif *mvif; 841 u16 tx_count = 15; 842 u32 val; 843 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 844 BSS_CHANGED_FILS_DISCOVERY)); 845 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 846 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 847 848 mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; 849 if (mvif) { 850 omac_idx = mvif->omac_idx; 851 wmm_idx = mvif->wmm_idx; 852 band_idx = mvif->band_idx; 853 } 854 855 if (inband_disc) { 856 p_fmt = MT_TX_TYPE_FW; 857 q_idx = MT_LMAC_ALTX0; 858 } else if (beacon) { 859 p_fmt = MT_TX_TYPE_FW; 860 q_idx = MT_LMAC_BCN0; 861 } else if (qid >= MT_TXQ_PSD) { 862 p_fmt = MT_TX_TYPE_CT; 863 q_idx = MT_LMAC_ALTX0; 864 } else { 865 p_fmt = MT_TX_TYPE_CT; 866 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 867 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 868 } 869 870 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 871 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 872 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 873 txwi[0] = cpu_to_le32(val); 874 875 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 876 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 877 878 if (band_idx) 879 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 880 881 txwi[1] = cpu_to_le32(val); 882 txwi[2] = 0; 883 884 val = MT_TXD3_SW_POWER_MGMT | 885 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 886 if (key) 887 val |= MT_TXD3_PROTECT_FRAME; 888 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 889 val |= MT_TXD3_NO_ACK; 890 if (wcid->amsdu) 891 val |= MT_TXD3_HW_AMSDU; 892 893 txwi[3] = cpu_to_le32(val); 894 txwi[4] = 0; 895 896 val = FIELD_PREP(MT_TXD5_PID, pid); 897 if (pid >= MT_PACKET_ID_FIRST) 898 val |= MT_TXD5_TX_STATUS_HOST; 899 txwi[5] = cpu_to_le32(val); 900 901 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | 902 FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 903 txwi[6] = cpu_to_le32(val); 904 txwi[7] = 0; 905 906 if (is_8023) 907 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 908 else 909 mt7996_mac_write_txwi_80211(dev, txwi, skb, key); 910 911 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 912 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 913 bool mcast = ieee80211_is_data(hdr->frame_control) && 914 is_multicast_ether_addr(hdr->addr1); 915 u8 idx = MT7996_BASIC_RATES_TBL; 916 917 if (mvif) { 918 if (mcast && mvif->mcast_rates_idx) 919 idx = mvif->mcast_rates_idx; 920 else if (beacon && mvif->beacon_rates_idx) 921 idx = mvif->beacon_rates_idx; 922 else 923 idx = mvif->basic_rates_idx; 924 } 925 926 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx)); 927 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 928 } 929 } 930 931 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 932 enum mt76_txq_id qid, struct mt76_wcid *wcid, 933 struct ieee80211_sta *sta, 934 struct mt76_tx_info *tx_info) 935 { 936 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 937 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 938 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 939 struct ieee80211_key_conf *key = info->control.hw_key; 940 struct ieee80211_vif *vif = info->control.vif; 941 struct mt76_connac_txp_common *txp; 942 struct mt76_txwi_cache *t; 943 int id, i, pid, nbuf = tx_info->nbuf - 1; 944 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 945 u8 *txwi = (u8 *)txwi_ptr; 946 947 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 948 return -EINVAL; 949 950 if (!wcid) 951 wcid = &dev->mt76.global_wcid; 952 953 if (sta) { 954 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 955 956 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 957 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 958 msta->jiffies = jiffies; 959 } 960 } 961 962 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 963 t->skb = tx_info->skb; 964 965 id = mt76_token_consume(mdev, &t); 966 if (id < 0) 967 return id; 968 969 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 970 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 971 pid, qid, 0); 972 973 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 974 for (i = 0; i < nbuf; i++) { 975 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 976 txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 977 } 978 txp->fw.nbuf = nbuf; 979 980 txp->fw.flags = 981 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD); 982 983 if (!key) 984 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 985 986 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control)) 987 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 988 989 if (vif) { 990 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; 991 992 txp->fw.bss_idx = mvif->mt76.idx; 993 } 994 995 txp->fw.token = cpu_to_le16(id); 996 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 997 998 tx_info->skb = NULL; 999 1000 /* pass partial skb header to fw */ 1001 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1002 tx_info->buf[1].skip_unmap = true; 1003 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1004 1005 return 0; 1006 } 1007 1008 static void 1009 mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 1010 { 1011 struct mt7996_sta *msta; 1012 u16 fc, tid; 1013 u32 val; 1014 1015 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1016 return; 1017 1018 tid = le32_get_bits(txwi[1], MT_TXD1_TID); 1019 if (tid >= 6) /* skip VO queue */ 1020 return; 1021 1022 val = le32_to_cpu(txwi[2]); 1023 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 1024 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 1025 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1026 return; 1027 1028 msta = (struct mt7996_sta *)sta->drv_priv; 1029 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) 1030 ieee80211_start_tx_ba_session(sta, tid, 0); 1031 } 1032 1033 static void 1034 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1035 struct ieee80211_sta *sta, struct list_head *free_list) 1036 { 1037 struct mt76_dev *mdev = &dev->mt76; 1038 struct mt76_wcid *wcid; 1039 __le32 *txwi; 1040 u16 wcid_idx; 1041 1042 mt76_connac_txp_skb_unmap(mdev, t); 1043 if (!t->skb) 1044 goto out; 1045 1046 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1047 if (sta) { 1048 wcid = (struct mt76_wcid *)sta->drv_priv; 1049 wcid_idx = wcid->idx; 1050 1051 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1052 mt7996_tx_check_aggr(sta, txwi); 1053 } else { 1054 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1055 } 1056 1057 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1058 1059 out: 1060 t->skb = NULL; 1061 mt76_put_txwi(mdev, t); 1062 } 1063 1064 static void 1065 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1066 { 1067 __le32 *tx_free = (__le32 *)data, *cur_info; 1068 struct mt76_dev *mdev = &dev->mt76; 1069 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1070 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1071 struct mt76_txwi_cache *txwi; 1072 struct ieee80211_sta *sta = NULL; 1073 LIST_HEAD(free_list); 1074 struct sk_buff *skb, *tmp; 1075 void *end = data + len; 1076 bool wake = false; 1077 u16 total, count = 0; 1078 1079 /* clean DMA queues and unmap buffers first */ 1080 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1081 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1082 if (phy2) { 1083 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1084 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1085 } 1086 if (phy3) { 1087 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1088 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1089 } 1090 1091 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4)) 1092 return; 1093 1094 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1095 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1096 u32 msdu, info; 1097 u8 i; 1098 1099 if (WARN_ON_ONCE((void *)cur_info >= end)) 1100 return; 1101 /* 1'b1: new wcid pair. 1102 * 1'b0: msdu_id with the same 'wcid pair' as above. 1103 */ 1104 info = le32_to_cpu(*cur_info); 1105 if (info & MT_TXFREE_INFO_PAIR) { 1106 struct mt7996_sta *msta; 1107 struct mt76_wcid *wcid; 1108 u16 idx; 1109 1110 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1111 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1112 sta = wcid_to_sta(wcid); 1113 if (!sta) 1114 continue; 1115 1116 msta = container_of(wcid, struct mt7996_sta, wcid); 1117 spin_lock_bh(&mdev->sta_poll_lock); 1118 if (list_empty(&msta->wcid.poll_list)) 1119 list_add_tail(&msta->wcid.poll_list, 1120 &mdev->sta_poll_list); 1121 spin_unlock_bh(&mdev->sta_poll_lock); 1122 continue; 1123 } 1124 1125 if (info & MT_TXFREE_INFO_HEADER) 1126 continue; 1127 1128 for (i = 0; i < 2; i++) { 1129 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1130 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1131 continue; 1132 1133 count++; 1134 txwi = mt76_token_release(mdev, msdu, &wake); 1135 if (!txwi) 1136 continue; 1137 1138 mt7996_txwi_free(dev, txwi, sta, &free_list); 1139 } 1140 } 1141 1142 mt7996_mac_sta_poll(dev); 1143 1144 if (wake) 1145 mt76_set_tx_blocked(&dev->mt76, false); 1146 1147 mt76_worker_schedule(&dev->mt76.tx_worker); 1148 1149 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1150 skb_list_del_init(skb); 1151 napi_consume_skb(skb, 1); 1152 } 1153 } 1154 1155 static bool 1156 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1157 int pid, __le32 *txs_data) 1158 { 1159 struct mt76_sta_stats *stats = &wcid->stats; 1160 struct ieee80211_supported_band *sband; 1161 struct mt76_dev *mdev = &dev->mt76; 1162 struct mt76_phy *mphy; 1163 struct ieee80211_tx_info *info; 1164 struct sk_buff_head list; 1165 struct rate_info rate = {}; 1166 struct sk_buff *skb; 1167 bool cck = false; 1168 u32 txrate, txs, mode, stbc; 1169 1170 mt76_tx_status_lock(mdev, &list); 1171 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1172 if (!skb) 1173 goto out_no_skb; 1174 1175 txs = le32_to_cpu(txs_data[0]); 1176 1177 info = IEEE80211_SKB_CB(skb); 1178 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1179 info->flags |= IEEE80211_TX_STAT_ACK; 1180 1181 info->status.ampdu_len = 1; 1182 info->status.ampdu_ack_len = !!(info->flags & 1183 IEEE80211_TX_STAT_ACK); 1184 1185 info->status.rates[0].idx = -1; 1186 1187 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1188 1189 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1190 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1191 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1192 1193 if (stbc && rate.nss > 1) 1194 rate.nss >>= 1; 1195 1196 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1197 stats->tx_nss[rate.nss - 1]++; 1198 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1199 stats->tx_mcs[rate.mcs]++; 1200 1201 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1202 switch (mode) { 1203 case MT_PHY_TYPE_CCK: 1204 cck = true; 1205 fallthrough; 1206 case MT_PHY_TYPE_OFDM: 1207 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1208 1209 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1210 sband = &mphy->sband_5g.sband; 1211 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1212 sband = &mphy->sband_6g.sband; 1213 else 1214 sband = &mphy->sband_2g.sband; 1215 1216 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1217 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1218 break; 1219 case MT_PHY_TYPE_HT: 1220 case MT_PHY_TYPE_HT_GF: 1221 if (rate.mcs > 31) 1222 goto out; 1223 1224 rate.flags = RATE_INFO_FLAGS_MCS; 1225 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1226 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1227 break; 1228 case MT_PHY_TYPE_VHT: 1229 if (rate.mcs > 9) 1230 goto out; 1231 1232 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1233 break; 1234 case MT_PHY_TYPE_HE_SU: 1235 case MT_PHY_TYPE_HE_EXT_SU: 1236 case MT_PHY_TYPE_HE_TB: 1237 case MT_PHY_TYPE_HE_MU: 1238 if (rate.mcs > 11) 1239 goto out; 1240 1241 rate.he_gi = wcid->rate.he_gi; 1242 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1243 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1244 break; 1245 case MT_PHY_TYPE_EHT_SU: 1246 case MT_PHY_TYPE_EHT_TRIG: 1247 case MT_PHY_TYPE_EHT_MU: 1248 if (rate.mcs > 13) 1249 goto out; 1250 1251 rate.eht_gi = wcid->rate.eht_gi; 1252 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1253 break; 1254 default: 1255 goto out; 1256 } 1257 1258 stats->tx_mode[mode]++; 1259 1260 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1261 case IEEE80211_STA_RX_BW_320: 1262 rate.bw = RATE_INFO_BW_320; 1263 stats->tx_bw[4]++; 1264 break; 1265 case IEEE80211_STA_RX_BW_160: 1266 rate.bw = RATE_INFO_BW_160; 1267 stats->tx_bw[3]++; 1268 break; 1269 case IEEE80211_STA_RX_BW_80: 1270 rate.bw = RATE_INFO_BW_80; 1271 stats->tx_bw[2]++; 1272 break; 1273 case IEEE80211_STA_RX_BW_40: 1274 rate.bw = RATE_INFO_BW_40; 1275 stats->tx_bw[1]++; 1276 break; 1277 default: 1278 rate.bw = RATE_INFO_BW_20; 1279 stats->tx_bw[0]++; 1280 break; 1281 } 1282 wcid->rate = rate; 1283 1284 out: 1285 mt76_tx_status_skb_done(mdev, skb, &list); 1286 1287 out_no_skb: 1288 mt76_tx_status_unlock(mdev, &list); 1289 1290 return !!skb; 1291 } 1292 1293 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1294 { 1295 struct mt7996_sta *msta = NULL; 1296 struct mt76_wcid *wcid; 1297 __le32 *txs_data = data; 1298 u16 wcidx; 1299 u8 pid; 1300 1301 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1) 1302 return; 1303 1304 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1305 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1306 1307 if (pid < MT_PACKET_ID_FIRST) 1308 return; 1309 1310 if (wcidx >= mt7996_wtbl_size(dev)) 1311 return; 1312 1313 rcu_read_lock(); 1314 1315 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1316 if (!wcid) 1317 goto out; 1318 1319 msta = container_of(wcid, struct mt7996_sta, wcid); 1320 1321 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1322 1323 if (!wcid->sta) 1324 goto out; 1325 1326 spin_lock_bh(&dev->mt76.sta_poll_lock); 1327 if (list_empty(&msta->wcid.poll_list)) 1328 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); 1329 spin_unlock_bh(&dev->mt76.sta_poll_lock); 1330 1331 out: 1332 rcu_read_unlock(); 1333 } 1334 1335 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1336 { 1337 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1338 __le32 *rxd = (__le32 *)data; 1339 __le32 *end = (__le32 *)&rxd[len / 4]; 1340 enum rx_pkt_type type; 1341 1342 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1343 if (type != PKT_TYPE_NORMAL) { 1344 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1345 1346 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1347 MT_RXD0_SW_PKT_TYPE_FRAME)) 1348 return true; 1349 } 1350 1351 switch (type) { 1352 case PKT_TYPE_TXRX_NOTIFY: 1353 mt7996_mac_tx_free(dev, data, len); 1354 return false; 1355 case PKT_TYPE_TXS: 1356 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1357 mt7996_mac_add_txs(dev, rxd); 1358 return false; 1359 case PKT_TYPE_RX_FW_MONITOR: 1360 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1361 return false; 1362 default: 1363 return true; 1364 } 1365 } 1366 1367 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1368 struct sk_buff *skb, u32 *info) 1369 { 1370 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1371 __le32 *rxd = (__le32 *)skb->data; 1372 __le32 *end = (__le32 *)&skb->data[skb->len]; 1373 enum rx_pkt_type type; 1374 1375 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1376 if (type != PKT_TYPE_NORMAL) { 1377 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1378 1379 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1380 MT_RXD0_SW_PKT_TYPE_FRAME)) 1381 type = PKT_TYPE_NORMAL; 1382 } 1383 1384 switch (type) { 1385 case PKT_TYPE_TXRX_NOTIFY: 1386 mt7996_mac_tx_free(dev, skb->data, skb->len); 1387 napi_consume_skb(skb, 1); 1388 break; 1389 case PKT_TYPE_RX_EVENT: 1390 mt7996_mcu_rx_event(dev, skb); 1391 break; 1392 case PKT_TYPE_TXS: 1393 for (rxd += 4; rxd + 8 <= end; rxd += 8) 1394 mt7996_mac_add_txs(dev, rxd); 1395 dev_kfree_skb(skb); 1396 break; 1397 case PKT_TYPE_RX_FW_MONITOR: 1398 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1399 dev_kfree_skb(skb); 1400 break; 1401 case PKT_TYPE_NORMAL: 1402 if (!mt7996_mac_fill_rx(dev, skb)) { 1403 mt76_rx(&dev->mt76, q, skb); 1404 return; 1405 } 1406 fallthrough; 1407 default: 1408 dev_kfree_skb(skb); 1409 break; 1410 } 1411 } 1412 1413 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 1414 { 1415 struct mt7996_dev *dev = phy->dev; 1416 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 1417 1418 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 1419 mt76_set(dev, reg, BIT(11) | BIT(9)); 1420 } 1421 1422 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 1423 { 1424 struct mt7996_dev *dev = phy->dev; 1425 u8 band_idx = phy->mt76->band_idx; 1426 int i; 1427 1428 for (i = 0; i < 16; i++) 1429 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 1430 1431 phy->mt76->survey_time = ktime_get_boottime(); 1432 1433 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 1434 1435 /* reset airtime counters */ 1436 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 1437 MT_WF_RMAC_MIB_RXTIME_CLR); 1438 1439 mt7996_mcu_get_chan_mib_info(phy, true); 1440 } 1441 1442 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 1443 { 1444 s16 coverage_class = phy->coverage_class; 1445 struct mt7996_dev *dev = phy->dev; 1446 struct mt7996_phy *phy2 = mt7996_phy2(dev); 1447 struct mt7996_phy *phy3 = mt7996_phy3(dev); 1448 u32 reg_offset; 1449 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1450 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1451 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1452 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1453 u8 band_idx = phy->mt76->band_idx; 1454 int offset; 1455 1456 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1457 return; 1458 1459 if (phy2) 1460 coverage_class = max_t(s16, dev->phy.coverage_class, 1461 phy2->coverage_class); 1462 1463 if (phy3) 1464 coverage_class = max_t(s16, coverage_class, 1465 phy3->coverage_class); 1466 1467 offset = 3 * coverage_class; 1468 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1469 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1470 1471 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 1472 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 1473 } 1474 1475 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 1476 { 1477 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 1478 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 1479 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 1480 1481 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 1482 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 1483 } 1484 1485 static u8 1486 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 1487 { 1488 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1489 struct mt7996_dev *dev = phy->dev; 1490 u32 val, sum = 0, n = 0; 1491 int ant, i; 1492 1493 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 1494 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 1495 1496 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1497 val = mt76_rr(dev, reg); 1498 sum += val * nf_power[i]; 1499 n += val; 1500 } 1501 } 1502 1503 return n ? sum / n : 0; 1504 } 1505 1506 void mt7996_update_channel(struct mt76_phy *mphy) 1507 { 1508 struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv; 1509 struct mt76_channel_state *state = mphy->chan_state; 1510 int nf; 1511 1512 mt7996_mcu_get_chan_mib_info(phy, false); 1513 1514 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 1515 if (!phy->noise) 1516 phy->noise = nf << 4; 1517 else if (nf) 1518 phy->noise += nf - (phy->noise >> 4); 1519 1520 state->noise = -(phy->noise >> 4); 1521 } 1522 1523 static bool 1524 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 1525 { 1526 bool ret; 1527 1528 ret = wait_event_timeout(dev->reset_wait, 1529 (READ_ONCE(dev->recovery.state) & state), 1530 MT7996_RESET_TIMEOUT); 1531 1532 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1533 return ret; 1534 } 1535 1536 static void 1537 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1538 { 1539 struct ieee80211_hw *hw = priv; 1540 1541 switch (vif->type) { 1542 case NL80211_IFTYPE_MESH_POINT: 1543 case NL80211_IFTYPE_ADHOC: 1544 case NL80211_IFTYPE_AP: 1545 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); 1546 break; 1547 default: 1548 break; 1549 } 1550 } 1551 1552 static void 1553 mt7996_update_beacons(struct mt7996_dev *dev) 1554 { 1555 struct mt76_phy *phy2, *phy3; 1556 1557 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1558 IEEE80211_IFACE_ITER_RESUME_ALL, 1559 mt7996_update_vif_beacon, dev->mt76.hw); 1560 1561 phy2 = dev->mt76.phys[MT_BAND1]; 1562 if (!phy2) 1563 return; 1564 1565 ieee80211_iterate_active_interfaces(phy2->hw, 1566 IEEE80211_IFACE_ITER_RESUME_ALL, 1567 mt7996_update_vif_beacon, phy2->hw); 1568 1569 phy3 = dev->mt76.phys[MT_BAND2]; 1570 if (!phy3) 1571 return; 1572 1573 ieee80211_iterate_active_interfaces(phy3->hw, 1574 IEEE80211_IFACE_ITER_RESUME_ALL, 1575 mt7996_update_vif_beacon, phy3->hw); 1576 } 1577 1578 void mt7996_tx_token_put(struct mt7996_dev *dev) 1579 { 1580 struct mt76_txwi_cache *txwi; 1581 int id; 1582 1583 spin_lock_bh(&dev->mt76.token_lock); 1584 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1585 mt7996_txwi_free(dev, txwi, NULL, NULL); 1586 dev->mt76.token_count--; 1587 } 1588 spin_unlock_bh(&dev->mt76.token_lock); 1589 idr_destroy(&dev->mt76.token); 1590 } 1591 1592 static int 1593 mt7996_mac_restart(struct mt7996_dev *dev) 1594 { 1595 struct mt7996_phy *phy2, *phy3; 1596 struct mt76_dev *mdev = &dev->mt76; 1597 int i, ret; 1598 1599 phy2 = mt7996_phy2(dev); 1600 phy3 = mt7996_phy3(dev); 1601 1602 if (dev->hif2) { 1603 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 1604 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1605 } 1606 1607 if (dev_is_pci(mdev->dev)) { 1608 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 1609 if (dev->hif2) 1610 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 1611 } 1612 1613 set_bit(MT76_RESET, &dev->mphy.state); 1614 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1615 wake_up(&dev->mt76.mcu.wait); 1616 if (phy2) { 1617 set_bit(MT76_RESET, &phy2->mt76->state); 1618 set_bit(MT76_MCU_RESET, &phy2->mt76->state); 1619 } 1620 if (phy3) { 1621 set_bit(MT76_RESET, &phy3->mt76->state); 1622 set_bit(MT76_MCU_RESET, &phy3->mt76->state); 1623 } 1624 1625 /* lock/unlock all queues to ensure that no tx is pending */ 1626 mt76_txq_schedule_all(&dev->mphy); 1627 if (phy2) 1628 mt76_txq_schedule_all(phy2->mt76); 1629 if (phy3) 1630 mt76_txq_schedule_all(phy3->mt76); 1631 1632 /* disable all tx/rx napi */ 1633 mt76_worker_disable(&dev->mt76.tx_worker); 1634 mt76_for_each_q_rx(mdev, i) { 1635 if (mdev->q_rx[i].ndesc) 1636 napi_disable(&dev->mt76.napi[i]); 1637 } 1638 napi_disable(&dev->mt76.tx_napi); 1639 1640 /* token reinit */ 1641 mt7996_tx_token_put(dev); 1642 idr_init(&dev->mt76.token); 1643 1644 mt7996_dma_reset(dev, true); 1645 1646 local_bh_disable(); 1647 mt76_for_each_q_rx(mdev, i) { 1648 if (mdev->q_rx[i].ndesc) { 1649 napi_enable(&dev->mt76.napi[i]); 1650 napi_schedule(&dev->mt76.napi[i]); 1651 } 1652 } 1653 local_bh_enable(); 1654 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1655 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 1656 1657 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 1658 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 1659 if (dev->hif2) { 1660 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 1661 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 1662 } 1663 if (dev_is_pci(mdev->dev)) { 1664 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1665 if (dev->hif2) 1666 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 1667 } 1668 1669 /* load firmware */ 1670 ret = mt7996_mcu_init_firmware(dev); 1671 if (ret) 1672 goto out; 1673 1674 /* set the necessary init items */ 1675 ret = mt7996_mcu_set_eeprom(dev); 1676 if (ret) 1677 goto out; 1678 1679 mt7996_mac_init(dev); 1680 mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband); 1681 mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband); 1682 mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband); 1683 ret = mt7996_txbf_init(dev); 1684 1685 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) { 1686 ret = mt7996_run(dev->mphy.hw); 1687 if (ret) 1688 goto out; 1689 } 1690 1691 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) { 1692 ret = mt7996_run(phy2->mt76->hw); 1693 if (ret) 1694 goto out; 1695 } 1696 1697 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) { 1698 ret = mt7996_run(phy3->mt76->hw); 1699 if (ret) 1700 goto out; 1701 } 1702 1703 out: 1704 /* reset done */ 1705 clear_bit(MT76_RESET, &dev->mphy.state); 1706 if (phy2) 1707 clear_bit(MT76_RESET, &phy2->mt76->state); 1708 if (phy3) 1709 clear_bit(MT76_RESET, &phy3->mt76->state); 1710 1711 local_bh_disable(); 1712 napi_enable(&dev->mt76.tx_napi); 1713 napi_schedule(&dev->mt76.tx_napi); 1714 local_bh_enable(); 1715 1716 mt76_worker_enable(&dev->mt76.tx_worker); 1717 return ret; 1718 } 1719 1720 static void 1721 mt7996_mac_full_reset(struct mt7996_dev *dev) 1722 { 1723 struct mt7996_phy *phy2, *phy3; 1724 int i; 1725 1726 phy2 = mt7996_phy2(dev); 1727 phy3 = mt7996_phy3(dev); 1728 dev->recovery.hw_full_reset = true; 1729 1730 wake_up(&dev->mt76.mcu.wait); 1731 ieee80211_stop_queues(mt76_hw(dev)); 1732 if (phy2) 1733 ieee80211_stop_queues(phy2->mt76->hw); 1734 if (phy3) 1735 ieee80211_stop_queues(phy3->mt76->hw); 1736 1737 cancel_delayed_work_sync(&dev->mphy.mac_work); 1738 if (phy2) 1739 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1740 if (phy3) 1741 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1742 1743 mutex_lock(&dev->mt76.mutex); 1744 for (i = 0; i < 10; i++) { 1745 if (!mt7996_mac_restart(dev)) 1746 break; 1747 } 1748 mutex_unlock(&dev->mt76.mutex); 1749 1750 if (i == 10) 1751 dev_err(dev->mt76.dev, "chip full reset failed\n"); 1752 1753 ieee80211_restart_hw(mt76_hw(dev)); 1754 if (phy2) 1755 ieee80211_restart_hw(phy2->mt76->hw); 1756 if (phy3) 1757 ieee80211_restart_hw(phy3->mt76->hw); 1758 1759 ieee80211_wake_queues(mt76_hw(dev)); 1760 if (phy2) 1761 ieee80211_wake_queues(phy2->mt76->hw); 1762 if (phy3) 1763 ieee80211_wake_queues(phy3->mt76->hw); 1764 1765 dev->recovery.hw_full_reset = false; 1766 ieee80211_queue_delayed_work(mt76_hw(dev), 1767 &dev->mphy.mac_work, 1768 MT7996_WATCHDOG_TIME); 1769 if (phy2) 1770 ieee80211_queue_delayed_work(phy2->mt76->hw, 1771 &phy2->mt76->mac_work, 1772 MT7996_WATCHDOG_TIME); 1773 if (phy3) 1774 ieee80211_queue_delayed_work(phy3->mt76->hw, 1775 &phy3->mt76->mac_work, 1776 MT7996_WATCHDOG_TIME); 1777 } 1778 1779 void mt7996_mac_reset_work(struct work_struct *work) 1780 { 1781 struct mt7996_phy *phy2, *phy3; 1782 struct mt7996_dev *dev; 1783 int i; 1784 1785 dev = container_of(work, struct mt7996_dev, reset_work); 1786 phy2 = mt7996_phy2(dev); 1787 phy3 = mt7996_phy3(dev); 1788 1789 /* chip full reset */ 1790 if (dev->recovery.restart) { 1791 /* disable WA/WM WDT */ 1792 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 1793 MT_MCU_CMD_WDT_MASK); 1794 1795 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 1796 dev->recovery.wa_reset_count++; 1797 else 1798 dev->recovery.wm_reset_count++; 1799 1800 mt7996_mac_full_reset(dev); 1801 1802 /* enable mcu irq */ 1803 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 1804 mt7996_irq_disable(dev, 0); 1805 1806 /* enable WA/WM WDT */ 1807 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 1808 1809 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 1810 dev->recovery.restart = false; 1811 return; 1812 } 1813 1814 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 1815 return; 1816 1817 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 1818 wiphy_name(dev->mt76.hw->wiphy)); 1819 ieee80211_stop_queues(mt76_hw(dev)); 1820 if (phy2) 1821 ieee80211_stop_queues(phy2->mt76->hw); 1822 if (phy3) 1823 ieee80211_stop_queues(phy3->mt76->hw); 1824 1825 set_bit(MT76_RESET, &dev->mphy.state); 1826 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1827 wake_up(&dev->mt76.mcu.wait); 1828 cancel_delayed_work_sync(&dev->mphy.mac_work); 1829 if (phy2) { 1830 set_bit(MT76_RESET, &phy2->mt76->state); 1831 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1832 } 1833 if (phy3) { 1834 set_bit(MT76_RESET, &phy3->mt76->state); 1835 cancel_delayed_work_sync(&phy3->mt76->mac_work); 1836 } 1837 mt76_worker_disable(&dev->mt76.tx_worker); 1838 mt76_for_each_q_rx(&dev->mt76, i) 1839 napi_disable(&dev->mt76.napi[i]); 1840 napi_disable(&dev->mt76.tx_napi); 1841 1842 mutex_lock(&dev->mt76.mutex); 1843 1844 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1845 1846 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1847 mt7996_dma_reset(dev, false); 1848 1849 mt7996_tx_token_put(dev); 1850 idr_init(&dev->mt76.token); 1851 1852 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1853 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 1854 } 1855 1856 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 1857 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 1858 1859 /* enable DMA Tx/Tx and interrupt */ 1860 mt7996_dma_start(dev, false); 1861 1862 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1863 clear_bit(MT76_RESET, &dev->mphy.state); 1864 if (phy2) 1865 clear_bit(MT76_RESET, &phy2->mt76->state); 1866 if (phy3) 1867 clear_bit(MT76_RESET, &phy3->mt76->state); 1868 1869 local_bh_disable(); 1870 mt76_for_each_q_rx(&dev->mt76, i) { 1871 napi_enable(&dev->mt76.napi[i]); 1872 napi_schedule(&dev->mt76.napi[i]); 1873 } 1874 local_bh_enable(); 1875 1876 tasklet_schedule(&dev->mt76.irq_tasklet); 1877 1878 mt76_worker_enable(&dev->mt76.tx_worker); 1879 1880 local_bh_disable(); 1881 napi_enable(&dev->mt76.tx_napi); 1882 napi_schedule(&dev->mt76.tx_napi); 1883 local_bh_enable(); 1884 1885 ieee80211_wake_queues(mt76_hw(dev)); 1886 if (phy2) 1887 ieee80211_wake_queues(phy2->mt76->hw); 1888 if (phy3) 1889 ieee80211_wake_queues(phy3->mt76->hw); 1890 1891 mutex_unlock(&dev->mt76.mutex); 1892 1893 mt7996_update_beacons(dev); 1894 1895 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1896 MT7996_WATCHDOG_TIME); 1897 if (phy2) 1898 ieee80211_queue_delayed_work(phy2->mt76->hw, 1899 &phy2->mt76->mac_work, 1900 MT7996_WATCHDOG_TIME); 1901 if (phy3) 1902 ieee80211_queue_delayed_work(phy3->mt76->hw, 1903 &phy3->mt76->mac_work, 1904 MT7996_WATCHDOG_TIME); 1905 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 1906 wiphy_name(dev->mt76.hw->wiphy)); 1907 } 1908 1909 /* firmware coredump */ 1910 void mt7996_mac_dump_work(struct work_struct *work) 1911 { 1912 const struct mt7996_mem_region *mem_region; 1913 struct mt7996_crash_data *crash_data; 1914 struct mt7996_dev *dev; 1915 struct mt7996_mem_hdr *hdr; 1916 size_t buf_len; 1917 int i; 1918 u32 num; 1919 u8 *buf; 1920 1921 dev = container_of(work, struct mt7996_dev, dump_work); 1922 1923 mutex_lock(&dev->dump_mutex); 1924 1925 crash_data = mt7996_coredump_new(dev); 1926 if (!crash_data) { 1927 mutex_unlock(&dev->dump_mutex); 1928 goto skip_coredump; 1929 } 1930 1931 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 1932 if (!mem_region || !crash_data->memdump_buf_len) { 1933 mutex_unlock(&dev->dump_mutex); 1934 goto skip_memdump; 1935 } 1936 1937 buf = crash_data->memdump_buf; 1938 buf_len = crash_data->memdump_buf_len; 1939 1940 /* dumping memory content... */ 1941 memset(buf, 0, buf_len); 1942 for (i = 0; i < num; i++) { 1943 if (mem_region->len > buf_len) { 1944 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 1945 mem_region->name, mem_region->len); 1946 break; 1947 } 1948 1949 /* reserve space for the header */ 1950 hdr = (void *)buf; 1951 buf += sizeof(*hdr); 1952 buf_len -= sizeof(*hdr); 1953 1954 mt7996_memcpy_fromio(dev, buf, mem_region->start, 1955 mem_region->len); 1956 1957 hdr->start = mem_region->start; 1958 hdr->len = mem_region->len; 1959 1960 if (!mem_region->len) 1961 /* note: the header remains, just with zero length */ 1962 break; 1963 1964 buf += mem_region->len; 1965 buf_len -= mem_region->len; 1966 1967 mem_region++; 1968 } 1969 1970 mutex_unlock(&dev->dump_mutex); 1971 1972 skip_memdump: 1973 mt7996_coredump_submit(dev); 1974 skip_coredump: 1975 queue_work(dev->mt76.wq, &dev->reset_work); 1976 } 1977 1978 void mt7996_reset(struct mt7996_dev *dev) 1979 { 1980 if (!dev->recovery.hw_init_done) 1981 return; 1982 1983 if (dev->recovery.hw_full_reset) 1984 return; 1985 1986 /* wm/wa exception: do full recovery */ 1987 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 1988 dev->recovery.restart = true; 1989 dev_info(dev->mt76.dev, 1990 "%s indicated firmware crash, attempting recovery\n", 1991 wiphy_name(dev->mt76.hw->wiphy)); 1992 1993 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 1994 queue_work(dev->mt76.wq, &dev->dump_work); 1995 return; 1996 } 1997 1998 queue_work(dev->mt76.wq, &dev->reset_work); 1999 wake_up(&dev->reset_wait); 2000 } 2001 2002 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2003 { 2004 struct mt76_mib_stats *mib = &phy->mib; 2005 struct mt7996_dev *dev = phy->dev; 2006 u8 band_idx = phy->mt76->band_idx; 2007 u32 cnt; 2008 int i; 2009 2010 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2011 mib->fcs_err_cnt += cnt; 2012 2013 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2014 mib->rx_fifo_full_cnt += cnt; 2015 2016 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2017 mib->rx_mpdu_cnt += cnt; 2018 2019 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2020 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2021 2022 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2023 mib->rx_vector_mismatch_cnt += cnt; 2024 2025 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2026 mib->rx_delimiter_fail_cnt += cnt; 2027 2028 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2029 mib->rx_len_mismatch_cnt += cnt; 2030 2031 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2032 mib->tx_ampdu_cnt += cnt; 2033 2034 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2035 mib->tx_stop_q_empty_cnt += cnt; 2036 2037 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2038 mib->tx_mpdu_attempts_cnt += cnt; 2039 2040 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2041 mib->tx_mpdu_success_cnt += cnt; 2042 2043 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2044 mib->rx_ampdu_cnt += cnt; 2045 2046 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2047 mib->rx_ampdu_bytes_cnt += cnt; 2048 2049 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2050 mib->rx_ampdu_valid_subframe_cnt += cnt; 2051 2052 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2053 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2054 2055 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2056 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2057 2058 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2059 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2060 2061 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2062 mib->rx_pfdrop_cnt += cnt; 2063 2064 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2065 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2066 2067 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2068 mib->rx_ba_cnt += cnt; 2069 2070 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2071 mib->tx_bf_ebf_ppdu_cnt += cnt; 2072 2073 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2074 mib->tx_bf_ibf_ppdu_cnt += cnt; 2075 2076 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2077 mib->tx_mu_bf_cnt += cnt; 2078 2079 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2080 mib->tx_mu_mpdu_cnt += cnt; 2081 2082 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2083 mib->tx_mu_acked_mpdu_cnt += cnt; 2084 2085 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2086 mib->tx_su_acked_mpdu_cnt += cnt; 2087 2088 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2089 mib->tx_bf_rx_fb_ht_cnt += cnt; 2090 mib->tx_bf_rx_fb_all_cnt += cnt; 2091 2092 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2093 mib->tx_bf_rx_fb_vht_cnt += cnt; 2094 mib->tx_bf_rx_fb_all_cnt += cnt; 2095 2096 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2097 mib->tx_bf_rx_fb_he_cnt += cnt; 2098 mib->tx_bf_rx_fb_all_cnt += cnt; 2099 2100 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2101 mib->tx_bf_rx_fb_eht_cnt += cnt; 2102 mib->tx_bf_rx_fb_all_cnt += cnt; 2103 2104 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2105 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2106 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2107 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2108 2109 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2110 mib->tx_bf_fb_trig_cnt += cnt; 2111 2112 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2113 mib->tx_bf_fb_cpl_cnt += cnt; 2114 2115 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2116 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2117 mib->tx_amsdu[i] += cnt; 2118 mib->tx_amsdu_cnt += cnt; 2119 } 2120 2121 /* rts count */ 2122 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2123 mib->rts_cnt += cnt; 2124 2125 /* rts retry count */ 2126 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2127 mib->rts_retries_cnt += cnt; 2128 2129 /* ba miss count */ 2130 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2131 mib->ba_miss_cnt += cnt; 2132 2133 /* ack fail count */ 2134 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2135 mib->ack_fail_cnt += cnt; 2136 2137 for (i = 0; i < 16; i++) { 2138 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2139 phy->mt76->aggr_stats[i] += cnt; 2140 } 2141 } 2142 2143 void mt7996_mac_sta_rc_work(struct work_struct *work) 2144 { 2145 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2146 struct ieee80211_sta *sta; 2147 struct ieee80211_vif *vif; 2148 struct mt7996_sta *msta; 2149 u32 changed; 2150 LIST_HEAD(list); 2151 2152 spin_lock_bh(&dev->mt76.sta_poll_lock); 2153 list_splice_init(&dev->sta_rc_list, &list); 2154 2155 while (!list_empty(&list)) { 2156 msta = list_first_entry(&list, struct mt7996_sta, rc_list); 2157 list_del_init(&msta->rc_list); 2158 changed = msta->changed; 2159 msta->changed = 0; 2160 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2161 2162 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2163 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2164 2165 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2166 IEEE80211_RC_NSS_CHANGED | 2167 IEEE80211_RC_BW_CHANGED)) 2168 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true); 2169 2170 /* TODO: smps change */ 2171 2172 spin_lock_bh(&dev->mt76.sta_poll_lock); 2173 } 2174 2175 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2176 } 2177 2178 void mt7996_mac_work(struct work_struct *work) 2179 { 2180 struct mt7996_phy *phy; 2181 struct mt76_phy *mphy; 2182 2183 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2184 mac_work.work); 2185 phy = mphy->priv; 2186 2187 mutex_lock(&mphy->dev->mutex); 2188 2189 mt76_update_survey(mphy); 2190 if (++mphy->mac_work_count == 5) { 2191 mphy->mac_work_count = 0; 2192 2193 mt7996_mac_update_stats(phy); 2194 } 2195 2196 mutex_unlock(&mphy->dev->mutex); 2197 2198 mt76_tx_status_check(mphy->dev, false); 2199 2200 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2201 MT7996_WATCHDOG_TIME); 2202 } 2203 2204 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2205 { 2206 struct mt7996_dev *dev = phy->dev; 2207 2208 if (phy->rdd_state & BIT(0)) 2209 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0, 2210 MT_RX_SEL0, 0); 2211 if (phy->rdd_state & BIT(1)) 2212 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1, 2213 MT_RX_SEL0, 0); 2214 } 2215 2216 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain) 2217 { 2218 int err, region; 2219 2220 switch (dev->mt76.region) { 2221 case NL80211_DFS_ETSI: 2222 region = 0; 2223 break; 2224 case NL80211_DFS_JP: 2225 region = 2; 2226 break; 2227 case NL80211_DFS_FCC: 2228 default: 2229 region = 1; 2230 break; 2231 } 2232 2233 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain, 2234 MT_RX_SEL0, region); 2235 if (err < 0) 2236 return err; 2237 2238 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, 2239 MT_RX_SEL0, 1); 2240 } 2241 2242 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 2243 { 2244 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2245 struct mt7996_dev *dev = phy->dev; 2246 u8 band_idx = phy->mt76->band_idx; 2247 int err; 2248 2249 /* start CAC */ 2250 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx, 2251 MT_RX_SEL0, 0); 2252 if (err < 0) 2253 return err; 2254 2255 err = mt7996_dfs_start_rdd(dev, band_idx); 2256 if (err < 0) 2257 return err; 2258 2259 phy->rdd_state |= BIT(band_idx); 2260 2261 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2262 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2263 err = mt7996_dfs_start_rdd(dev, 1); 2264 if (err < 0) 2265 return err; 2266 2267 phy->rdd_state |= BIT(1); 2268 } 2269 2270 return 0; 2271 } 2272 2273 static int 2274 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 2275 { 2276 const struct mt7996_dfs_radar_spec *radar_specs; 2277 struct mt7996_dev *dev = phy->dev; 2278 int err, i; 2279 2280 switch (dev->mt76.region) { 2281 case NL80211_DFS_FCC: 2282 radar_specs = &fcc_radar_specs; 2283 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 2284 if (err < 0) 2285 return err; 2286 break; 2287 case NL80211_DFS_ETSI: 2288 radar_specs = &etsi_radar_specs; 2289 break; 2290 case NL80211_DFS_JP: 2291 radar_specs = &jp_radar_specs; 2292 break; 2293 default: 2294 return -EINVAL; 2295 } 2296 2297 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2298 err = mt7996_mcu_set_radar_th(dev, i, 2299 &radar_specs->radar_pattern[i]); 2300 if (err < 0) 2301 return err; 2302 } 2303 2304 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2305 } 2306 2307 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 2308 { 2309 struct mt7996_dev *dev = phy->dev; 2310 enum mt76_dfs_state dfs_state, prev_state; 2311 int err; 2312 2313 prev_state = phy->mt76->dfs_state; 2314 dfs_state = mt76_phy_dfs_state(phy->mt76); 2315 2316 if (prev_state == dfs_state) 2317 return 0; 2318 2319 if (prev_state == MT_DFS_STATE_UNKNOWN) 2320 mt7996_dfs_stop_radar_detector(phy); 2321 2322 if (dfs_state == MT_DFS_STATE_DISABLED) 2323 goto stop; 2324 2325 if (prev_state <= MT_DFS_STATE_DISABLED) { 2326 err = mt7996_dfs_init_radar_specs(phy); 2327 if (err < 0) 2328 return err; 2329 2330 err = mt7996_dfs_start_radar_detector(phy); 2331 if (err < 0) 2332 return err; 2333 2334 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 2335 } 2336 2337 if (dfs_state == MT_DFS_STATE_CAC) 2338 return 0; 2339 2340 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, 2341 phy->mt76->band_idx, MT_RX_SEL0, 0); 2342 if (err < 0) { 2343 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 2344 return err; 2345 } 2346 2347 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 2348 return 0; 2349 2350 stop: 2351 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, 2352 phy->mt76->band_idx, MT_RX_SEL0, 0); 2353 if (err < 0) 2354 return err; 2355 2356 mt7996_dfs_stop_radar_detector(phy); 2357 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 2358 2359 return 0; 2360 } 2361 2362 static int 2363 mt7996_mac_twt_duration_align(int duration) 2364 { 2365 return duration << 8; 2366 } 2367 2368 static u64 2369 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 2370 struct mt7996_twt_flow *flow) 2371 { 2372 struct mt7996_twt_flow *iter, *iter_next; 2373 u32 duration = flow->duration << 8; 2374 u64 start_tsf; 2375 2376 iter = list_first_entry_or_null(&dev->twt_list, 2377 struct mt7996_twt_flow, list); 2378 if (!iter || !iter->sched || iter->start_tsf > duration) { 2379 /* add flow as first entry in the list */ 2380 list_add(&flow->list, &dev->twt_list); 2381 return 0; 2382 } 2383 2384 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2385 start_tsf = iter->start_tsf + 2386 mt7996_mac_twt_duration_align(iter->duration); 2387 if (list_is_last(&iter->list, &dev->twt_list)) 2388 break; 2389 2390 if (!iter_next->sched || 2391 iter_next->start_tsf > start_tsf + duration) { 2392 list_add(&flow->list, &iter->list); 2393 goto out; 2394 } 2395 } 2396 2397 /* add flow as last entry in the list */ 2398 list_add_tail(&flow->list, &dev->twt_list); 2399 out: 2400 return start_tsf; 2401 } 2402 2403 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2404 { 2405 struct ieee80211_twt_params *twt_agrt; 2406 u64 interval, duration; 2407 u16 mantissa; 2408 u8 exp; 2409 2410 /* only individual agreement supported */ 2411 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2412 return -EOPNOTSUPP; 2413 2414 /* only 256us unit supported */ 2415 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2416 return -EOPNOTSUPP; 2417 2418 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2419 2420 /* explicit agreement not supported */ 2421 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2422 return -EOPNOTSUPP; 2423 2424 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2425 le16_to_cpu(twt_agrt->req_type)); 2426 mantissa = le16_to_cpu(twt_agrt->mantissa); 2427 duration = twt_agrt->min_twt_dur << 8; 2428 2429 interval = (u64)mantissa << exp; 2430 if (interval < duration) 2431 return -EOPNOTSUPP; 2432 2433 return 0; 2434 } 2435 2436 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 2437 struct ieee80211_sta *sta, 2438 struct ieee80211_twt_setup *twt) 2439 { 2440 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2441 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2442 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2443 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2444 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2445 struct mt7996_dev *dev = mt7996_hw_dev(hw); 2446 struct mt7996_twt_flow *flow; 2447 int flowid, table_id; 2448 u8 exp; 2449 2450 if (mt7996_mac_check_twt_req(twt)) 2451 goto out; 2452 2453 mutex_lock(&dev->mt76.mutex); 2454 2455 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 2456 goto unlock; 2457 2458 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2459 goto unlock; 2460 2461 flowid = ffs(~msta->twt.flowid_mask) - 1; 2462 le16p_replace_bits(&twt_agrt->req_type, flowid, 2463 IEEE80211_TWT_REQTYPE_FLOWID); 2464 2465 table_id = ffs(~dev->twt.table_mask) - 1; 2466 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2467 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2468 2469 flow = &msta->twt.flow[flowid]; 2470 memset(flow, 0, sizeof(*flow)); 2471 INIT_LIST_HEAD(&flow->list); 2472 flow->wcid = msta->wcid.idx; 2473 flow->table_id = table_id; 2474 flow->id = flowid; 2475 flow->duration = twt_agrt->min_twt_dur; 2476 flow->mantissa = twt_agrt->mantissa; 2477 flow->exp = exp; 2478 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2479 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2480 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2481 2482 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2483 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2484 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2485 u64 flow_tsf, curr_tsf; 2486 u32 rem; 2487 2488 flow->sched = true; 2489 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 2490 curr_tsf = __mt7996_get_tsf(hw, msta->vif); 2491 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2492 flow_tsf = curr_tsf + interval - rem; 2493 twt_agrt->twt = cpu_to_le64(flow_tsf); 2494 } else { 2495 list_add_tail(&flow->list, &dev->twt_list); 2496 } 2497 flow->tsf = le64_to_cpu(twt_agrt->twt); 2498 2499 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2500 goto unlock; 2501 2502 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2503 dev->twt.table_mask |= BIT(table_id); 2504 msta->twt.flowid_mask |= BIT(flowid); 2505 dev->twt.n_agrt++; 2506 2507 unlock: 2508 mutex_unlock(&dev->mt76.mutex); 2509 out: 2510 le16p_replace_bits(&twt_agrt->req_type, setup_cmd, 2511 IEEE80211_TWT_REQTYPE_SETUP_CMD); 2512 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2513 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2514 } 2515 2516 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 2517 struct mt7996_sta *msta, 2518 u8 flowid) 2519 { 2520 struct mt7996_twt_flow *flow; 2521 2522 lockdep_assert_held(&dev->mt76.mutex); 2523 2524 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2525 return; 2526 2527 if (!(msta->twt.flowid_mask & BIT(flowid))) 2528 return; 2529 2530 flow = &msta->twt.flow[flowid]; 2531 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, 2532 MCU_TWT_AGRT_DELETE)) 2533 return; 2534 2535 list_del_init(&flow->list); 2536 msta->twt.flowid_mask &= ~BIT(flowid); 2537 dev->twt.table_mask &= ~BIT(flow->table_id); 2538 dev->twt.n_agrt--; 2539 } 2540