1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/timekeeping.h> 6 #include "mt7915.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #include "mcu.h" 10 11 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2) 12 13 #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) 14 #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ 15 IEEE80211_RADIOTAP_HE_##f) 16 17 static const struct mt7915_dfs_radar_spec etsi_radar_specs = { 18 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 19 .radar_pattern = { 20 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 21 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 22 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 23 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 24 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 25 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 26 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 27 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 28 }, 29 }; 30 31 static const struct mt7915_dfs_radar_spec fcc_radar_specs = { 32 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 33 .radar_pattern = { 34 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 35 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 36 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 37 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 38 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 39 }, 40 }; 41 42 static const struct mt7915_dfs_radar_spec jp_radar_specs = { 43 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 44 .radar_pattern = { 45 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 46 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 47 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 48 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 49 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 50 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 51 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 52 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 53 }, 54 }; 55 56 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev, 57 u16 idx, bool unicast) 58 { 59 struct mt7915_sta *sta; 60 struct mt76_wcid *wcid; 61 62 if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 63 return NULL; 64 65 wcid = rcu_dereference(dev->mt76.wcid[idx]); 66 if (unicast || !wcid) 67 return wcid; 68 69 if (!wcid->sta) 70 return NULL; 71 72 sta = container_of(wcid, struct mt7915_sta, wcid); 73 if (!sta->vif) 74 return NULL; 75 76 return &sta->vif->sta.wcid; 77 } 78 79 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 80 { 81 } 82 83 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask) 84 { 85 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 86 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 87 88 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 89 0, 5000); 90 } 91 92 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw) 93 { 94 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 95 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 96 97 return MT_WTBL_LMAC_OFFS(wcid, dw); 98 } 99 100 static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 101 { 102 static const u8 ac_to_tid[] = { 103 [IEEE80211_AC_BE] = 0, 104 [IEEE80211_AC_BK] = 1, 105 [IEEE80211_AC_VI] = 4, 106 [IEEE80211_AC_VO] = 6 107 }; 108 struct ieee80211_sta *sta; 109 struct mt7915_sta *msta; 110 struct rate_info *rate; 111 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 112 LIST_HEAD(sta_poll_list); 113 int i; 114 115 spin_lock_bh(&dev->sta_poll_lock); 116 list_splice_init(&dev->sta_poll_list, &sta_poll_list); 117 spin_unlock_bh(&dev->sta_poll_lock); 118 119 rcu_read_lock(); 120 121 while (true) { 122 bool clear = false; 123 u32 addr, val; 124 u16 idx; 125 u8 bw; 126 127 spin_lock_bh(&dev->sta_poll_lock); 128 if (list_empty(&sta_poll_list)) { 129 spin_unlock_bh(&dev->sta_poll_lock); 130 break; 131 } 132 msta = list_first_entry(&sta_poll_list, 133 struct mt7915_sta, poll_list); 134 list_del_init(&msta->poll_list); 135 spin_unlock_bh(&dev->sta_poll_lock); 136 137 idx = msta->wcid.idx; 138 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20); 139 140 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 141 u32 tx_last = msta->airtime_ac[i]; 142 u32 rx_last = msta->airtime_ac[i + 4]; 143 144 msta->airtime_ac[i] = mt76_rr(dev, addr); 145 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 146 147 tx_time[i] = msta->airtime_ac[i] - tx_last; 148 rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 149 150 if ((tx_last | rx_last) & BIT(30)) 151 clear = true; 152 153 addr += 8; 154 } 155 156 if (clear) { 157 mt7915_mac_wtbl_update(dev, idx, 158 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 159 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); 160 } 161 162 if (!msta->wcid.sta) 163 continue; 164 165 sta = container_of((void *)msta, struct ieee80211_sta, 166 drv_priv); 167 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 168 u8 q = mt7915_lmac_mapping(dev, i); 169 u32 tx_cur = tx_time[q]; 170 u32 rx_cur = rx_time[q]; 171 u8 tid = ac_to_tid[i]; 172 173 if (!tx_cur && !rx_cur) 174 continue; 175 176 ieee80211_sta_register_airtime(sta, tid, tx_cur, 177 rx_cur); 178 } 179 180 /* 181 * We don't support reading GI info from txs packets. 182 * For accurate tx status reporting and AQL improvement, 183 * we need to make sure that flags match so polling GI 184 * from per-sta counters directly. 185 */ 186 rate = &msta->wcid.rate; 187 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7); 188 val = mt76_rr(dev, addr); 189 190 switch (rate->bw) { 191 case RATE_INFO_BW_160: 192 bw = IEEE80211_STA_RX_BW_160; 193 break; 194 case RATE_INFO_BW_80: 195 bw = IEEE80211_STA_RX_BW_80; 196 break; 197 case RATE_INFO_BW_40: 198 bw = IEEE80211_STA_RX_BW_40; 199 break; 200 default: 201 bw = IEEE80211_STA_RX_BW_20; 202 break; 203 } 204 205 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) { 206 u8 offs = 24 + 2 * bw; 207 208 rate->he_gi = (val & (0x3 << offs)) >> offs; 209 } else if (rate->flags & 210 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) { 211 if (val & BIT(12 + bw)) 212 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 213 else 214 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI; 215 } 216 } 217 218 rcu_read_unlock(); 219 } 220 221 static void 222 mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, 223 struct ieee80211_radiotap_he *he, 224 __le32 *rxv) 225 { 226 u32 ru_h, ru_l; 227 u8 ru, offs = 0; 228 229 ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0])); 230 ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1])); 231 ru = (u8)(ru_l | ru_h << 4); 232 233 status->bw = RATE_INFO_BW_HE_RU; 234 235 switch (ru) { 236 case 0 ... 36: 237 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; 238 offs = ru; 239 break; 240 case 37 ... 52: 241 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; 242 offs = ru - 37; 243 break; 244 case 53 ... 60: 245 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; 246 offs = ru - 53; 247 break; 248 case 61 ... 64: 249 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; 250 offs = ru - 61; 251 break; 252 case 65 ... 66: 253 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; 254 offs = ru - 65; 255 break; 256 case 67: 257 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; 258 break; 259 case 68: 260 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; 261 break; 262 } 263 264 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN); 265 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) | 266 le16_encode_bits(offs, 267 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); 268 } 269 270 static void 271 mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) 272 { 273 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 274 static const struct ieee80211_radiotap_he_mu mu_known = { 275 .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) | 276 HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) | 277 HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) | 278 HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN), 279 .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), 280 }; 281 struct ieee80211_radiotap_he_mu *he_mu = NULL; 282 283 status->flag |= RX_FLAG_RADIOTAP_HE_MU; 284 285 he_mu = skb_push(skb, sizeof(mu_known)); 286 memcpy(he_mu, &mu_known, sizeof(mu_known)); 287 288 #define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f) 289 290 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx); 291 if (status->he_dcm) 292 he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm); 293 294 he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) | 295 MU_PREP(FLAGS2_SIG_B_SYMS_USERS, 296 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER)); 297 298 he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0); 299 300 if (status->bw >= RATE_INFO_BW_40) { 301 he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN); 302 he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1); 303 } 304 305 if (status->bw >= RATE_INFO_BW_80) { 306 he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2); 307 he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3); 308 } 309 } 310 311 static void 312 mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode) 313 { 314 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 315 static const struct ieee80211_radiotap_he known = { 316 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) | 317 HE_BITS(DATA1_DATA_DCM_KNOWN) | 318 HE_BITS(DATA1_STBC_KNOWN) | 319 HE_BITS(DATA1_CODING_KNOWN) | 320 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) | 321 HE_BITS(DATA1_DOPPLER_KNOWN) | 322 HE_BITS(DATA1_SPTL_REUSE_KNOWN) | 323 HE_BITS(DATA1_BSS_COLOR_KNOWN), 324 .data2 = HE_BITS(DATA2_GI_KNOWN) | 325 HE_BITS(DATA2_TXBF_KNOWN) | 326 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) | 327 HE_BITS(DATA2_TXOP_KNOWN), 328 }; 329 struct ieee80211_radiotap_he *he = NULL; 330 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; 331 332 status->flag |= RX_FLAG_RADIOTAP_HE; 333 334 he = skb_push(skb, sizeof(known)); 335 memcpy(he, &known, sizeof(known)); 336 337 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | 338 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); 339 he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); 340 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | 341 le16_encode_bits(ltf_size, 342 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); 343 if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) 344 he->data5 |= HE_BITS(DATA5_TXBF); 345 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | 346 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); 347 348 switch (mode) { 349 case MT_PHY_TYPE_HE_SU: 350 he->data1 |= HE_BITS(DATA1_FORMAT_SU) | 351 HE_BITS(DATA1_UL_DL_KNOWN) | 352 HE_BITS(DATA1_BEAM_CHANGE_KNOWN); 353 354 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | 355 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 356 break; 357 case MT_PHY_TYPE_HE_EXT_SU: 358 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | 359 HE_BITS(DATA1_UL_DL_KNOWN); 360 361 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 362 break; 363 case MT_PHY_TYPE_HE_MU: 364 he->data1 |= HE_BITS(DATA1_FORMAT_MU) | 365 HE_BITS(DATA1_UL_DL_KNOWN); 366 367 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 368 he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]); 369 370 mt7915_mac_decode_he_radiotap_ru(status, he, rxv); 371 mt7915_mac_decode_he_mu_radiotap(skb, rxv); 372 break; 373 case MT_PHY_TYPE_HE_TB: 374 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | 375 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) | 376 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | 377 HE_BITS(DATA1_SPTL_REUSE4_KNOWN); 378 379 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | 380 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) | 381 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]); 382 383 mt7915_mac_decode_he_radiotap_ru(status, he, rxv); 384 break; 385 default: 386 break; 387 } 388 } 389 390 /* The HW does not translate the mac header to 802.3 for mesh point */ 391 static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 392 { 393 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 394 struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid; 395 struct ieee80211_sta *sta; 396 struct ieee80211_vif *vif; 397 struct ieee80211_hdr hdr; 398 struct ethhdr eth_hdr; 399 __le32 *rxd = (__le32 *)skb->data; 400 __le32 qos_ctrl, ht_ctrl; 401 402 if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) != 403 MT_RXD3_NORMAL_U2M) 404 return -EINVAL; 405 406 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 407 return -EINVAL; 408 409 if (!msta || !msta->vif) 410 return -EINVAL; 411 412 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 413 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 414 415 /* store the info from RXD and ethhdr to avoid being overridden */ 416 memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr)); 417 hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]); 418 hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]); 419 qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]); 420 ht_ctrl = FIELD_GET(MT_RXD9_HT_CONTROL, rxd[9]); 421 422 hdr.duration_id = 0; 423 ether_addr_copy(hdr.addr1, vif->addr); 424 ether_addr_copy(hdr.addr2, sta->addr); 425 switch (le16_to_cpu(hdr.frame_control) & 426 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 427 case 0: 428 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid); 429 break; 430 case IEEE80211_FCTL_FROMDS: 431 ether_addr_copy(hdr.addr3, eth_hdr.h_source); 432 break; 433 case IEEE80211_FCTL_TODS: 434 ether_addr_copy(hdr.addr3, eth_hdr.h_dest); 435 break; 436 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 437 ether_addr_copy(hdr.addr3, eth_hdr.h_dest); 438 ether_addr_copy(hdr.addr4, eth_hdr.h_source); 439 break; 440 default: 441 break; 442 } 443 444 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 445 if (eth_hdr.h_proto == htons(ETH_P_AARP) || 446 eth_hdr.h_proto == htons(ETH_P_IPX)) 447 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 448 else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN)) 449 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 450 else 451 skb_pull(skb, 2); 452 453 if (ieee80211_has_order(hdr.frame_control)) 454 memcpy(skb_push(skb, 2), &ht_ctrl, 2); 455 if (ieee80211_is_data_qos(hdr.frame_control)) 456 memcpy(skb_push(skb, 2), &qos_ctrl, 2); 457 if (ieee80211_has_a4(hdr.frame_control)) 458 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 459 else 460 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 461 462 return 0; 463 } 464 465 static int 466 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) 467 { 468 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 469 struct mt76_phy *mphy = &dev->mt76.phy; 470 struct mt7915_phy *phy = &dev->phy; 471 struct ieee80211_supported_band *sband; 472 __le32 *rxd = (__le32 *)skb->data; 473 __le32 *rxv = NULL; 474 u32 mode = 0; 475 u32 rxd0 = le32_to_cpu(rxd[0]); 476 u32 rxd1 = le32_to_cpu(rxd[1]); 477 u32 rxd2 = le32_to_cpu(rxd[2]); 478 u32 rxd3 = le32_to_cpu(rxd[3]); 479 u32 rxd4 = le32_to_cpu(rxd[4]); 480 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; 481 bool unicast, insert_ccmp_hdr = false; 482 u8 remove_pad, amsdu_info; 483 bool hdr_trans; 484 u16 hdr_gap; 485 u16 seq_ctrl = 0; 486 u8 qos_ctl = 0; 487 __le16 fc = 0; 488 int i, idx; 489 490 memset(status, 0, sizeof(*status)); 491 492 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) { 493 mphy = dev->mt76.phy2; 494 if (!mphy) 495 return -EINVAL; 496 497 phy = mphy->priv; 498 status->ext_phy = true; 499 } 500 501 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 502 return -EINVAL; 503 504 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 505 return -EINVAL; 506 507 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 508 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 509 return -EINVAL; 510 511 /* ICV error or CCMP/BIP/WPI MIC error */ 512 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 513 status->flag |= RX_FLAG_ONLY_MONITOR; 514 515 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 516 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 517 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast); 518 519 if (status->wcid) { 520 struct mt7915_sta *msta; 521 522 msta = container_of(status->wcid, struct mt7915_sta, wcid); 523 spin_lock_bh(&dev->sta_poll_lock); 524 if (list_empty(&msta->poll_list)) 525 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 526 spin_unlock_bh(&dev->sta_poll_lock); 527 } 528 529 status->freq = mphy->chandef.chan->center_freq; 530 status->band = mphy->chandef.chan->band; 531 if (status->band == NL80211_BAND_5GHZ) 532 sband = &mphy->sband_5g.sband; 533 else 534 sband = &mphy->sband_2g.sband; 535 536 if (!sband->channels) 537 return -EINVAL; 538 539 if ((rxd0 & csum_mask) == csum_mask) 540 skb->ip_summed = CHECKSUM_UNNECESSARY; 541 542 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) 543 status->flag |= RX_FLAG_FAILED_FCS_CRC; 544 545 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 546 status->flag |= RX_FLAG_MMIC_ERROR; 547 548 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 && 549 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 550 status->flag |= RX_FLAG_DECRYPTED; 551 status->flag |= RX_FLAG_IV_STRIPPED; 552 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 553 } 554 555 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 556 557 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 558 return -EINVAL; 559 560 rxd += 6; 561 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 562 u32 v0 = le32_to_cpu(rxd[0]); 563 u32 v2 = le32_to_cpu(rxd[2]); 564 565 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0)); 566 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2); 567 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2); 568 569 rxd += 4; 570 if ((u8 *)rxd - skb->data >= skb->len) 571 return -EINVAL; 572 } 573 574 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 575 u8 *data = (u8 *)rxd; 576 577 if (status->flag & RX_FLAG_DECRYPTED) { 578 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) { 579 case MT_CIPHER_AES_CCMP: 580 case MT_CIPHER_CCMP_CCX: 581 case MT_CIPHER_CCMP_256: 582 insert_ccmp_hdr = 583 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 584 fallthrough; 585 case MT_CIPHER_TKIP: 586 case MT_CIPHER_TKIP_NO_MIC: 587 case MT_CIPHER_GCMP: 588 case MT_CIPHER_GCMP_256: 589 status->iv[0] = data[5]; 590 status->iv[1] = data[4]; 591 status->iv[2] = data[3]; 592 status->iv[3] = data[2]; 593 status->iv[4] = data[1]; 594 status->iv[5] = data[0]; 595 break; 596 default: 597 break; 598 } 599 } 600 rxd += 4; 601 if ((u8 *)rxd - skb->data >= skb->len) 602 return -EINVAL; 603 } 604 605 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 606 status->timestamp = le32_to_cpu(rxd[0]); 607 status->flag |= RX_FLAG_MACTIME_START; 608 609 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 610 status->flag |= RX_FLAG_AMPDU_DETAILS; 611 612 /* all subframes of an A-MPDU have the same timestamp */ 613 if (phy->rx_ampdu_ts != status->timestamp) { 614 if (!++phy->ampdu_ref) 615 phy->ampdu_ref++; 616 } 617 phy->rx_ampdu_ts = status->timestamp; 618 619 status->ampdu_ref = phy->ampdu_ref; 620 } 621 622 rxd += 2; 623 if ((u8 *)rxd - skb->data >= skb->len) 624 return -EINVAL; 625 } 626 627 /* RXD Group 3 - P-RXV */ 628 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 629 u32 v0, v1, v2; 630 631 rxv = rxd; 632 rxd += 2; 633 if ((u8 *)rxd - skb->data >= skb->len) 634 return -EINVAL; 635 636 v0 = le32_to_cpu(rxv[0]); 637 v1 = le32_to_cpu(rxv[1]); 638 v2 = le32_to_cpu(rxv[2]); 639 640 if (v0 & MT_PRXV_HT_AD_CODE) 641 status->enc_flags |= RX_ENC_FLAG_LDPC; 642 643 status->chains = mphy->antenna_mask; 644 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1); 645 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1); 646 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1); 647 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1); 648 status->signal = status->chain_signal[0]; 649 650 for (i = 1; i < hweight8(mphy->antenna_mask); i++) { 651 if (!(status->chains & BIT(i))) 652 continue; 653 654 status->signal = max(status->signal, 655 status->chain_signal[i]); 656 } 657 658 /* RXD Group 5 - C-RXV */ 659 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 660 u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2); 661 u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2); 662 bool cck = false; 663 664 rxd += 18; 665 if ((u8 *)rxd - skb->data >= skb->len) 666 return -EINVAL; 667 668 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); 669 mode = FIELD_GET(MT_CRXV_TX_MODE, v2); 670 671 switch (mode) { 672 case MT_PHY_TYPE_CCK: 673 cck = true; 674 fallthrough; 675 case MT_PHY_TYPE_OFDM: 676 i = mt76_get_rate(&dev->mt76, sband, i, cck); 677 break; 678 case MT_PHY_TYPE_HT_GF: 679 case MT_PHY_TYPE_HT: 680 status->encoding = RX_ENC_HT; 681 if (i > 31) 682 return -EINVAL; 683 break; 684 case MT_PHY_TYPE_VHT: 685 status->nss = 686 FIELD_GET(MT_PRXV_NSTS, v0) + 1; 687 status->encoding = RX_ENC_VHT; 688 if (i > 9) 689 return -EINVAL; 690 break; 691 case MT_PHY_TYPE_HE_MU: 692 case MT_PHY_TYPE_HE_SU: 693 case MT_PHY_TYPE_HE_EXT_SU: 694 case MT_PHY_TYPE_HE_TB: 695 status->nss = 696 FIELD_GET(MT_PRXV_NSTS, v0) + 1; 697 status->encoding = RX_ENC_HE; 698 i &= GENMASK(3, 0); 699 700 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 701 status->he_gi = gi; 702 703 status->he_dcm = !!(idx & MT_PRXV_TX_DCM); 704 break; 705 default: 706 return -EINVAL; 707 } 708 status->rate_idx = i; 709 710 switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) { 711 case IEEE80211_STA_RX_BW_20: 712 break; 713 case IEEE80211_STA_RX_BW_40: 714 if (mode & MT_PHY_TYPE_HE_EXT_SU && 715 (idx & MT_PRXV_TX_ER_SU_106T)) { 716 status->bw = RATE_INFO_BW_HE_RU; 717 status->he_ru = 718 NL80211_RATE_INFO_HE_RU_ALLOC_106; 719 } else { 720 status->bw = RATE_INFO_BW_40; 721 } 722 break; 723 case IEEE80211_STA_RX_BW_80: 724 status->bw = RATE_INFO_BW_80; 725 break; 726 case IEEE80211_STA_RX_BW_160: 727 status->bw = RATE_INFO_BW_160; 728 break; 729 default: 730 return -EINVAL; 731 } 732 733 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 734 if (mode < MT_PHY_TYPE_HE_SU && gi) 735 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 736 } 737 } 738 739 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 740 status->amsdu = !!amsdu_info; 741 if (status->amsdu) { 742 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 743 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 744 } 745 746 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 747 if (hdr_trans && ieee80211_has_morefrags(fc)) { 748 if (mt7915_reverse_frag0_hdr_trans(skb, hdr_gap)) 749 return -EINVAL; 750 hdr_trans = false; 751 } else { 752 int pad_start = 0; 753 754 skb_pull(skb, hdr_gap); 755 if (!hdr_trans && status->amsdu) { 756 pad_start = ieee80211_get_hdrlen_from_skb(skb); 757 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 758 /* 759 * When header translation failure is indicated, 760 * the hardware will insert an extra 2-byte field 761 * containing the data length after the protocol 762 * type field. 763 */ 764 pad_start = 12; 765 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 766 pad_start += 4; 767 768 if (get_unaligned_be16(skb->data + pad_start) != 769 skb->len - pad_start - 2) 770 pad_start = 0; 771 } 772 773 if (pad_start) { 774 memmove(skb->data + 2, skb->data, pad_start); 775 skb_pull(skb, 2); 776 } 777 } 778 779 if (!hdr_trans) { 780 struct ieee80211_hdr *hdr; 781 782 if (insert_ccmp_hdr) { 783 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 784 785 mt76_insert_ccmp_hdr(skb, key_id); 786 } 787 788 hdr = mt76_skb_get_hdr(skb); 789 fc = hdr->frame_control; 790 if (ieee80211_is_data_qos(fc)) { 791 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 792 qos_ctl = *ieee80211_get_qos_ctl(hdr); 793 } 794 } else { 795 status->flag |= RX_FLAG_8023; 796 } 797 798 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) 799 mt7915_mac_decode_he_radiotap(skb, rxv, mode); 800 801 if (!status->wcid || !ieee80211_is_data_qos(fc)) 802 return 0; 803 804 status->aggr = unicast && 805 !ieee80211_is_qos_nullfunc(fc); 806 status->qos_ctl = qos_ctl; 807 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 808 809 return 0; 810 } 811 812 static void 813 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) 814 { 815 #ifdef CONFIG_NL80211_TESTMODE 816 struct mt7915_phy *phy = &dev->phy; 817 __le32 *rxd = (__le32 *)skb->data; 818 __le32 *rxv_hdr = rxd + 2; 819 __le32 *rxv = rxd + 4; 820 u32 rcpi, ib_rssi, wb_rssi, v20, v21; 821 bool ext_phy; 822 s32 foe; 823 u8 snr; 824 int i; 825 826 ext_phy = FIELD_GET(MT_RXV_HDR_BAND_IDX, le32_to_cpu(rxv_hdr[1])); 827 if (ext_phy) 828 phy = mt7915_ext_phy(dev); 829 830 rcpi = le32_to_cpu(rxv[6]); 831 ib_rssi = le32_to_cpu(rxv[7]); 832 wb_rssi = le32_to_cpu(rxv[8]) >> 5; 833 834 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { 835 if (i == 3) 836 wb_rssi = le32_to_cpu(rxv[9]); 837 838 phy->test.last_rcpi[i] = rcpi & 0xff; 839 phy->test.last_ib_rssi[i] = ib_rssi & 0xff; 840 phy->test.last_wb_rssi[i] = wb_rssi & 0xff; 841 } 842 843 v20 = le32_to_cpu(rxv[20]); 844 v21 = le32_to_cpu(rxv[21]); 845 846 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | 847 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); 848 849 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; 850 851 phy->test.last_freq_offset = foe; 852 phy->test.last_snr = snr; 853 #endif 854 855 dev_kfree_skb(skb); 856 } 857 858 static void 859 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi, 860 struct sk_buff *skb) 861 { 862 #ifdef CONFIG_NL80211_TESTMODE 863 struct mt76_testmode_data *td = &phy->mt76->test; 864 const struct ieee80211_rate *r; 865 u8 bw, mode, nss = td->tx_rate_nss; 866 u8 rate_idx = td->tx_rate_idx; 867 u16 rateval = 0; 868 u32 val; 869 bool cck = false; 870 int band; 871 872 if (skb != phy->mt76->test.tx_skb) 873 return; 874 875 switch (td->tx_rate_mode) { 876 case MT76_TM_TX_MODE_HT: 877 nss = 1 + (rate_idx >> 3); 878 mode = MT_PHY_TYPE_HT; 879 break; 880 case MT76_TM_TX_MODE_VHT: 881 mode = MT_PHY_TYPE_VHT; 882 break; 883 case MT76_TM_TX_MODE_HE_SU: 884 mode = MT_PHY_TYPE_HE_SU; 885 break; 886 case MT76_TM_TX_MODE_HE_EXT_SU: 887 mode = MT_PHY_TYPE_HE_EXT_SU; 888 break; 889 case MT76_TM_TX_MODE_HE_TB: 890 mode = MT_PHY_TYPE_HE_TB; 891 break; 892 case MT76_TM_TX_MODE_HE_MU: 893 mode = MT_PHY_TYPE_HE_MU; 894 break; 895 case MT76_TM_TX_MODE_CCK: 896 cck = true; 897 fallthrough; 898 case MT76_TM_TX_MODE_OFDM: 899 band = phy->mt76->chandef.chan->band; 900 if (band == NL80211_BAND_2GHZ && !cck) 901 rate_idx += 4; 902 903 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 904 val = cck ? r->hw_value_short : r->hw_value; 905 906 mode = val >> 8; 907 rate_idx = val & 0xff; 908 break; 909 default: 910 mode = MT_PHY_TYPE_OFDM; 911 break; 912 } 913 914 switch (phy->mt76->chandef.width) { 915 case NL80211_CHAN_WIDTH_40: 916 bw = 1; 917 break; 918 case NL80211_CHAN_WIDTH_80: 919 bw = 2; 920 break; 921 case NL80211_CHAN_WIDTH_80P80: 922 case NL80211_CHAN_WIDTH_160: 923 bw = 3; 924 break; 925 default: 926 bw = 0; 927 break; 928 } 929 930 if (td->tx_rate_stbc && nss == 1) { 931 nss++; 932 rateval |= MT_TX_RATE_STBC; 933 } 934 935 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 936 FIELD_PREP(MT_TX_RATE_MODE, mode) | 937 FIELD_PREP(MT_TX_RATE_NSS, nss - 1); 938 939 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 940 941 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); 942 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) 943 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 944 945 val = MT_TXD6_FIXED_BW | 946 FIELD_PREP(MT_TXD6_BW, bw) | 947 FIELD_PREP(MT_TXD6_TX_RATE, rateval) | 948 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); 949 950 /* for HE_SU/HE_EXT_SU PPDU 951 * - 1x, 2x, 4x LTF + 0.8us GI 952 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 953 * for HE_MU PPDU 954 * - 2x, 4x LTF + 0.8us GI 955 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI 956 * for HE_TB PPDU 957 * - 1x, 2x LTF + 1.6us GI 958 * - 4x LTF + 3.2us GI 959 */ 960 if (mode >= MT_PHY_TYPE_HE_SU) 961 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 962 963 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 964 val |= MT_TXD6_LDPC; 965 966 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 967 txwi[6] |= cpu_to_le32(val); 968 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 969 phy->test.spe_idx)); 970 #endif 971 } 972 973 static void 974 mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi, 975 struct sk_buff *skb, struct mt76_wcid *wcid) 976 { 977 978 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 979 u8 fc_type, fc_stype; 980 bool wmm = false; 981 u32 val; 982 983 if (wcid->sta) { 984 struct ieee80211_sta *sta; 985 986 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 987 wmm = sta->wme; 988 } 989 990 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 991 FIELD_PREP(MT_TXD1_TID, tid); 992 993 if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN) 994 val |= MT_TXD1_ETH_802_3; 995 996 txwi[1] |= cpu_to_le32(val); 997 998 fc_type = IEEE80211_FTYPE_DATA >> 2; 999 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 1000 1001 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 1002 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 1003 1004 txwi[2] |= cpu_to_le32(val); 1005 1006 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 1007 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 1008 txwi[7] |= cpu_to_le32(val); 1009 } 1010 1011 static void 1012 mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, 1013 struct sk_buff *skb, struct ieee80211_key_conf *key, 1014 bool *mcast) 1015 { 1016 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1017 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1018 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1019 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1020 __le16 fc = hdr->frame_control; 1021 u8 fc_type, fc_stype; 1022 u32 val; 1023 1024 *mcast = is_multicast_ether_addr(hdr->addr1); 1025 1026 if (ieee80211_is_action(fc) && 1027 mgmt->u.action.category == WLAN_CATEGORY_BACK && 1028 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 1029 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); 1030 1031 txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA); 1032 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK; 1033 } else if (ieee80211_is_back_req(hdr->frame_control)) { 1034 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; 1035 u16 control = le16_to_cpu(bar->control); 1036 1037 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control); 1038 } 1039 1040 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 1041 FIELD_PREP(MT_TXD1_HDR_INFO, 1042 ieee80211_get_hdrlen_from_skb(skb) / 2) | 1043 FIELD_PREP(MT_TXD1_TID, tid); 1044 txwi[1] |= cpu_to_le32(val); 1045 1046 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 1047 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 1048 1049 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 1050 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | 1051 FIELD_PREP(MT_TXD2_MULTICAST, *mcast); 1052 1053 if (key && *mcast && ieee80211_is_robust_mgmt_frame(skb) && 1054 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 1055 val |= MT_TXD2_BIP; 1056 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 1057 } 1058 1059 if (!ieee80211_is_data(fc) || *mcast || 1060 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 1061 val |= MT_TXD2_FIX_RATE; 1062 1063 txwi[2] |= cpu_to_le32(val); 1064 1065 if (ieee80211_is_beacon(fc)) { 1066 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 1067 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 1068 } 1069 1070 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 1071 u16 seqno = le16_to_cpu(hdr->seq_ctrl); 1072 1073 if (ieee80211_is_back_req(hdr->frame_control)) { 1074 struct ieee80211_bar *bar; 1075 1076 bar = (struct ieee80211_bar *)skb->data; 1077 seqno = le16_to_cpu(bar->start_seq_num); 1078 } 1079 1080 val = MT_TXD3_SN_VALID | 1081 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 1082 txwi[3] |= cpu_to_le32(val); 1083 } 1084 1085 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 1086 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 1087 txwi[7] |= cpu_to_le32(val); 1088 } 1089 1090 static u16 1091 mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif, 1092 bool beacon, bool mcast) 1093 { 1094 u8 mode = 0, band = mphy->chandef.chan->band; 1095 int rateidx = 0, mcast_rate; 1096 1097 if (beacon) { 1098 struct cfg80211_bitrate_mask *mask; 1099 1100 mask = &vif->bss_conf.beacon_tx_rate; 1101 if (hweight16(mask->control[band].he_mcs[0]) == 1) { 1102 rateidx = ffs(mask->control[band].he_mcs[0]) - 1; 1103 mode = MT_PHY_TYPE_HE_SU; 1104 goto out; 1105 } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) { 1106 rateidx = ffs(mask->control[band].vht_mcs[0]) - 1; 1107 mode = MT_PHY_TYPE_VHT; 1108 goto out; 1109 } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) { 1110 rateidx = ffs(mask->control[band].ht_mcs[0]) - 1; 1111 mode = MT_PHY_TYPE_HT; 1112 goto out; 1113 } else if (hweight32(mask->control[band].legacy) == 1) { 1114 rateidx = ffs(mask->control[band].legacy) - 1; 1115 goto legacy; 1116 } 1117 } 1118 1119 mcast_rate = vif->bss_conf.mcast_rate[band]; 1120 if (mcast && mcast_rate > 0) 1121 rateidx = mcast_rate - 1; 1122 else 1123 rateidx = ffs(vif->bss_conf.basic_rates) - 1; 1124 1125 legacy: 1126 rateidx = mt76_calculate_default_rate(mphy, rateidx); 1127 mode = rateidx >> 8; 1128 rateidx &= GENMASK(7, 0); 1129 1130 out: 1131 return FIELD_PREP(MT_TX_RATE_IDX, rateidx) | 1132 FIELD_PREP(MT_TX_RATE_MODE, mode); 1133 } 1134 1135 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, 1136 struct sk_buff *skb, struct mt76_wcid *wcid, int pid, 1137 struct ieee80211_key_conf *key, bool beacon) 1138 { 1139 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1140 struct ieee80211_vif *vif = info->control.vif; 1141 struct mt76_phy *mphy = &dev->mphy; 1142 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 1143 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 1144 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1145 bool mcast = false; 1146 u16 tx_count = 15; 1147 u32 val; 1148 1149 if (vif) { 1150 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1151 1152 omac_idx = mvif->mt76.omac_idx; 1153 wmm_idx = mvif->mt76.wmm_idx; 1154 } 1155 1156 if (ext_phy && dev->mt76.phy2) 1157 mphy = dev->mt76.phy2; 1158 1159 if (beacon) { 1160 p_fmt = MT_TX_TYPE_FW; 1161 q_idx = MT_LMAC_BCN0; 1162 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 1163 p_fmt = MT_TX_TYPE_CT; 1164 q_idx = MT_LMAC_ALTX0; 1165 } else { 1166 p_fmt = MT_TX_TYPE_CT; 1167 q_idx = wmm_idx * MT7915_MAX_WMM_SETS + 1168 mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); 1169 } 1170 1171 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 1172 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 1173 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 1174 txwi[0] = cpu_to_le32(val); 1175 1176 val = MT_TXD1_LONG_FORMAT | MT_TXD1_VTA | 1177 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 1178 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 1179 1180 if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) 1181 val |= MT_TXD1_TGID; 1182 1183 txwi[1] = cpu_to_le32(val); 1184 1185 txwi[2] = 0; 1186 1187 val = MT_TXD3_SW_POWER_MGMT | 1188 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 1189 if (key) 1190 val |= MT_TXD3_PROTECT_FRAME; 1191 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1192 val |= MT_TXD3_NO_ACK; 1193 1194 txwi[3] = cpu_to_le32(val); 1195 txwi[4] = 0; 1196 1197 val = FIELD_PREP(MT_TXD5_PID, pid); 1198 if (pid >= MT_PACKET_ID_FIRST) 1199 val |= MT_TXD5_TX_STATUS_HOST; 1200 txwi[5] = cpu_to_le32(val); 1201 1202 txwi[6] = 0; 1203 txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; 1204 1205 if (is_8023) 1206 mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid); 1207 else 1208 mt7915_mac_write_txwi_80211(dev, txwi, skb, key, &mcast); 1209 1210 if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { 1211 u16 rate = mt7915_mac_tx_rate_val(mphy, vif, beacon, mcast); 1212 1213 /* hardware won't add HTC for mgmt/ctrl frame */ 1214 txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD); 1215 1216 val = MT_TXD6_FIXED_BW | 1217 FIELD_PREP(MT_TXD6_TX_RATE, rate); 1218 txwi[6] |= cpu_to_le32(val); 1219 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 1220 } 1221 1222 if (mt76_testmode_enabled(mphy)) 1223 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 1224 } 1225 1226 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1227 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1228 struct ieee80211_sta *sta, 1229 struct mt76_tx_info *tx_info) 1230 { 1231 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1232 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1233 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1234 struct ieee80211_key_conf *key = info->control.hw_key; 1235 struct ieee80211_vif *vif = info->control.vif; 1236 struct mt76_txwi_cache *t; 1237 struct mt7915_txp *txp; 1238 int id, i, nbuf = tx_info->nbuf - 1; 1239 u8 *txwi = (u8 *)txwi_ptr; 1240 int pid; 1241 1242 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1243 return -EINVAL; 1244 1245 if (!wcid) 1246 wcid = &dev->mt76.global_wcid; 1247 1248 if (sta) { 1249 struct mt7915_sta *msta; 1250 1251 msta = (struct mt7915_sta *)sta->drv_priv; 1252 1253 if (time_after(jiffies, msta->jiffies + HZ / 4)) { 1254 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 1255 msta->jiffies = jiffies; 1256 } 1257 } 1258 1259 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1260 t->skb = tx_info->skb; 1261 1262 id = mt76_token_consume(mdev, &t); 1263 if (id < 0) 1264 return id; 1265 1266 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1267 mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, 1268 false); 1269 1270 txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE); 1271 for (i = 0; i < nbuf; i++) { 1272 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1273 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len); 1274 } 1275 txp->nbuf = nbuf; 1276 1277 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); 1278 1279 if (!key) 1280 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1281 1282 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 1283 ieee80211_is_mgmt(hdr->frame_control)) 1284 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1285 1286 if (vif) { 1287 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1288 1289 txp->bss_idx = mvif->mt76.idx; 1290 } 1291 1292 txp->token = cpu_to_le16(id); 1293 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) 1294 txp->rept_wds_wcid = cpu_to_le16(wcid->idx); 1295 else 1296 txp->rept_wds_wcid = cpu_to_le16(0x3ff); 1297 tx_info->skb = DMA_DUMMY_DATA; 1298 1299 /* pass partial skb header to fw */ 1300 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1301 tx_info->buf[1].skip_unmap = true; 1302 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1303 1304 return 0; 1305 } 1306 1307 static void 1308 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 1309 { 1310 struct mt7915_sta *msta; 1311 u16 fc, tid; 1312 u32 val; 1313 1314 if (!sta || !sta->ht_cap.ht_supported) 1315 return; 1316 1317 tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1])); 1318 if (tid >= 6) /* skip VO queue */ 1319 return; 1320 1321 val = le32_to_cpu(txwi[2]); 1322 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 1323 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 1324 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1325 return; 1326 1327 msta = (struct mt7915_sta *)sta->drv_priv; 1328 if (!test_and_set_bit(tid, &msta->ampdu_state)) 1329 ieee80211_start_tx_ba_session(sta, tid, 0); 1330 } 1331 1332 static void 1333 mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) 1334 { 1335 struct mt7915_txp *txp; 1336 int i; 1337 1338 txp = mt7915_txwi_to_txp(dev, t); 1339 for (i = 0; i < txp->nbuf; i++) 1340 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), 1341 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); 1342 } 1343 1344 static void 1345 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, 1346 struct ieee80211_sta *sta, struct list_head *free_list) 1347 { 1348 struct mt76_dev *mdev = &dev->mt76; 1349 struct mt76_wcid *wcid; 1350 __le32 *txwi; 1351 u16 wcid_idx; 1352 1353 mt7915_txp_skb_unmap(mdev, t); 1354 if (!t->skb) 1355 goto out; 1356 1357 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1358 if (sta) { 1359 wcid = (struct mt76_wcid *)sta->drv_priv; 1360 wcid_idx = wcid->idx; 1361 1362 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 1363 mt7915_tx_check_aggr(sta, txwi); 1364 } else { 1365 wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1])); 1366 } 1367 1368 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1369 1370 out: 1371 t->skb = NULL; 1372 mt76_put_txwi(mdev, t); 1373 } 1374 1375 static void 1376 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) 1377 { 1378 struct mt7915_tx_free *free = (struct mt7915_tx_free *)data; 1379 struct mt76_dev *mdev = &dev->mt76; 1380 struct mt76_phy *mphy_ext = mdev->phy2; 1381 struct mt76_txwi_cache *txwi; 1382 struct ieee80211_sta *sta = NULL; 1383 LIST_HEAD(free_list); 1384 struct sk_buff *skb, *tmp; 1385 void *end = data + len; 1386 u8 i, count; 1387 bool wake = false; 1388 1389 /* clean DMA queues and unmap buffers first */ 1390 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1391 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1392 if (mphy_ext) { 1393 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); 1394 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); 1395 } 1396 1397 /* 1398 * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, 1399 * to the time ack is received or dropped by hw (air + hw queue time). 1400 * Should avoid accessing WTBL to get Tx airtime, and use it instead. 1401 */ 1402 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); 1403 if (WARN_ON_ONCE((void *)&free->info[count] > end)) 1404 return; 1405 1406 for (i = 0; i < count; i++) { 1407 u32 msdu, info = le32_to_cpu(free->info[i]); 1408 1409 /* 1410 * 1'b1: new wcid pair. 1411 * 1'b0: msdu_id with the same 'wcid pair' as above. 1412 */ 1413 if (info & MT_TX_FREE_PAIR) { 1414 struct mt7915_sta *msta; 1415 struct mt76_wcid *wcid; 1416 u16 idx; 1417 1418 count++; 1419 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 1420 wcid = rcu_dereference(dev->mt76.wcid[idx]); 1421 sta = wcid_to_sta(wcid); 1422 if (!sta) 1423 continue; 1424 1425 msta = container_of(wcid, struct mt7915_sta, wcid); 1426 spin_lock_bh(&dev->sta_poll_lock); 1427 if (list_empty(&msta->poll_list)) 1428 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1429 spin_unlock_bh(&dev->sta_poll_lock); 1430 continue; 1431 } 1432 1433 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 1434 txwi = mt76_token_release(mdev, msdu, &wake); 1435 if (!txwi) 1436 continue; 1437 1438 mt7915_txwi_free(dev, txwi, sta, &free_list); 1439 } 1440 1441 mt7915_mac_sta_poll(dev); 1442 1443 if (wake) 1444 mt76_set_tx_blocked(&dev->mt76, false); 1445 1446 mt76_worker_schedule(&dev->mt76.tx_worker); 1447 1448 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1449 skb_list_del_init(skb); 1450 napi_consume_skb(skb, 1); 1451 } 1452 } 1453 1454 static bool 1455 mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid, 1456 __le32 *txs_data, struct mt76_sta_stats *stats) 1457 { 1458 struct ieee80211_supported_band *sband; 1459 struct mt76_dev *mdev = &dev->mt76; 1460 struct mt76_phy *mphy; 1461 struct ieee80211_tx_info *info; 1462 struct sk_buff_head list; 1463 struct rate_info rate = {}; 1464 struct sk_buff *skb; 1465 bool cck = false; 1466 u32 txrate, txs, mode; 1467 1468 mt76_tx_status_lock(mdev, &list); 1469 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1470 if (!skb) 1471 goto out_no_skb; 1472 1473 txs = le32_to_cpu(txs_data[0]); 1474 1475 info = IEEE80211_SKB_CB(skb); 1476 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1477 info->flags |= IEEE80211_TX_STAT_ACK; 1478 1479 info->status.ampdu_len = 1; 1480 info->status.ampdu_ack_len = !!(info->flags & 1481 IEEE80211_TX_STAT_ACK); 1482 1483 info->status.rates[0].idx = -1; 1484 1485 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1486 1487 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1488 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1489 1490 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1491 stats->tx_nss[rate.nss - 1]++; 1492 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1493 stats->tx_mcs[rate.mcs]++; 1494 1495 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1496 switch (mode) { 1497 case MT_PHY_TYPE_CCK: 1498 cck = true; 1499 fallthrough; 1500 case MT_PHY_TYPE_OFDM: 1501 mphy = &dev->mphy; 1502 if (wcid->ext_phy && dev->mt76.phy2) 1503 mphy = dev->mt76.phy2; 1504 1505 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1506 sband = &mphy->sband_5g.sband; 1507 else 1508 sband = &mphy->sband_2g.sband; 1509 1510 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1511 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1512 break; 1513 case MT_PHY_TYPE_HT: 1514 case MT_PHY_TYPE_HT_GF: 1515 rate.mcs += (rate.nss - 1) * 8; 1516 if (rate.mcs > 31) 1517 goto out; 1518 1519 rate.flags = RATE_INFO_FLAGS_MCS; 1520 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1521 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1522 break; 1523 case MT_PHY_TYPE_VHT: 1524 if (rate.mcs > 9) 1525 goto out; 1526 1527 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1528 break; 1529 case MT_PHY_TYPE_HE_SU: 1530 case MT_PHY_TYPE_HE_EXT_SU: 1531 case MT_PHY_TYPE_HE_TB: 1532 case MT_PHY_TYPE_HE_MU: 1533 if (rate.mcs > 11) 1534 goto out; 1535 1536 rate.he_gi = wcid->rate.he_gi; 1537 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1538 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1539 break; 1540 default: 1541 goto out; 1542 } 1543 1544 stats->tx_mode[mode]++; 1545 1546 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1547 case IEEE80211_STA_RX_BW_160: 1548 rate.bw = RATE_INFO_BW_160; 1549 stats->tx_bw[3]++; 1550 break; 1551 case IEEE80211_STA_RX_BW_80: 1552 rate.bw = RATE_INFO_BW_80; 1553 stats->tx_bw[2]++; 1554 break; 1555 case IEEE80211_STA_RX_BW_40: 1556 rate.bw = RATE_INFO_BW_40; 1557 stats->tx_bw[1]++; 1558 break; 1559 default: 1560 rate.bw = RATE_INFO_BW_20; 1561 stats->tx_bw[0]++; 1562 break; 1563 } 1564 wcid->rate = rate; 1565 1566 out: 1567 mt76_tx_status_skb_done(mdev, skb, &list); 1568 1569 out_no_skb: 1570 mt76_tx_status_unlock(mdev, &list); 1571 1572 return !!skb; 1573 } 1574 1575 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data) 1576 { 1577 struct mt7915_sta *msta = NULL; 1578 struct mt76_wcid *wcid; 1579 __le32 *txs_data = data; 1580 u16 wcidx; 1581 u32 txs; 1582 u8 pid; 1583 1584 txs = le32_to_cpu(txs_data[0]); 1585 if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) 1586 return; 1587 1588 txs = le32_to_cpu(txs_data[2]); 1589 wcidx = FIELD_GET(MT_TXS2_WCID, txs); 1590 1591 txs = le32_to_cpu(txs_data[3]); 1592 pid = FIELD_GET(MT_TXS3_PID, txs); 1593 1594 if (pid < MT_PACKET_ID_FIRST) 1595 return; 1596 1597 if (wcidx >= MT7915_WTBL_SIZE) 1598 return; 1599 1600 rcu_read_lock(); 1601 1602 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1603 if (!wcid) 1604 goto out; 1605 1606 msta = container_of(wcid, struct mt7915_sta, wcid); 1607 1608 mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats); 1609 1610 if (!wcid->sta) 1611 goto out; 1612 1613 spin_lock_bh(&dev->sta_poll_lock); 1614 if (list_empty(&msta->poll_list)) 1615 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1616 spin_unlock_bh(&dev->sta_poll_lock); 1617 1618 out: 1619 rcu_read_unlock(); 1620 } 1621 1622 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) 1623 { 1624 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1625 __le32 *rxd = (__le32 *)data; 1626 __le32 *end = (__le32 *)&rxd[len / 4]; 1627 enum rx_pkt_type type; 1628 1629 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1630 switch (type) { 1631 case PKT_TYPE_TXRX_NOTIFY: 1632 mt7915_mac_tx_free(dev, data, len); 1633 return false; 1634 case PKT_TYPE_TXS: 1635 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1636 mt7915_mac_add_txs(dev, rxd); 1637 return false; 1638 default: 1639 return true; 1640 } 1641 } 1642 1643 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1644 struct sk_buff *skb) 1645 { 1646 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 1647 __le32 *rxd = (__le32 *)skb->data; 1648 __le32 *end = (__le32 *)&skb->data[skb->len]; 1649 enum rx_pkt_type type; 1650 1651 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 1652 1653 switch (type) { 1654 case PKT_TYPE_TXRX_NOTIFY: 1655 mt7915_mac_tx_free(dev, skb->data, skb->len); 1656 napi_consume_skb(skb, 1); 1657 break; 1658 case PKT_TYPE_RX_EVENT: 1659 mt7915_mcu_rx_event(dev, skb); 1660 break; 1661 case PKT_TYPE_TXRXV: 1662 mt7915_mac_fill_rx_vector(dev, skb); 1663 break; 1664 case PKT_TYPE_TXS: 1665 for (rxd += 2; rxd + 8 <= end; rxd += 8) 1666 mt7915_mac_add_txs(dev, rxd); 1667 dev_kfree_skb(skb); 1668 break; 1669 case PKT_TYPE_NORMAL: 1670 if (!mt7915_mac_fill_rx(dev, skb)) { 1671 mt76_rx(&dev->mt76, q, skb); 1672 return; 1673 } 1674 fallthrough; 1675 default: 1676 dev_kfree_skb(skb); 1677 break; 1678 } 1679 } 1680 1681 void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 1682 { 1683 if (!e->txwi) { 1684 dev_kfree_skb_any(e->skb); 1685 return; 1686 } 1687 1688 /* error path */ 1689 if (e->skb == DMA_DUMMY_DATA) { 1690 struct mt76_txwi_cache *t; 1691 struct mt7915_txp *txp; 1692 1693 txp = mt7915_txwi_to_txp(mdev, e->txwi); 1694 t = mt76_token_put(mdev, le16_to_cpu(txp->token)); 1695 e->skb = t ? t->skb : NULL; 1696 } 1697 1698 if (e->skb) 1699 mt76_tx_complete_skb(mdev, e->wcid, e->skb); 1700 } 1701 1702 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy) 1703 { 1704 struct mt7915_dev *dev = phy->dev; 1705 bool ext_phy = phy != &dev->phy; 1706 u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy); 1707 1708 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN); 1709 mt76_set(dev, reg, BIT(11) | BIT(9)); 1710 } 1711 1712 void mt7915_mac_reset_counters(struct mt7915_phy *phy) 1713 { 1714 struct mt7915_dev *dev = phy->dev; 1715 bool ext_phy = phy != &dev->phy; 1716 int i; 1717 1718 for (i = 0; i < 4; i++) { 1719 mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 1720 mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); 1721 } 1722 1723 if (ext_phy) { 1724 dev->mt76.phy2->survey_time = ktime_get_boottime(); 1725 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2; 1726 } else { 1727 dev->mt76.phy.survey_time = ktime_get_boottime(); 1728 i = 0; 1729 } 1730 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2); 1731 1732 /* reset airtime counters */ 1733 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy), 1734 MT_WF_RMAC_MIB_RXTIME_CLR); 1735 1736 mt7915_mcu_get_chan_mib_info(phy, true); 1737 } 1738 1739 void mt7915_mac_set_timing(struct mt7915_phy *phy) 1740 { 1741 s16 coverage_class = phy->coverage_class; 1742 struct mt7915_dev *dev = phy->dev; 1743 bool ext_phy = phy != &dev->phy; 1744 u32 val, reg_offset; 1745 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 1746 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 1747 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 1748 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 1749 int offset; 1750 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ; 1751 1752 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 1753 return; 1754 1755 if (ext_phy) { 1756 coverage_class = max_t(s16, dev->phy.coverage_class, 1757 coverage_class); 1758 } else { 1759 struct mt7915_phy *phy_ext = mt7915_ext_phy(dev); 1760 1761 if (phy_ext) 1762 coverage_class = max_t(s16, phy_ext->coverage_class, 1763 coverage_class); 1764 } 1765 mt76_set(dev, MT_ARB_SCR(ext_phy), 1766 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1767 udelay(1); 1768 1769 offset = 3 * coverage_class; 1770 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 1771 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 1772 1773 mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset); 1774 mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset); 1775 mt76_wr(dev, MT_TMAC_ICR0(ext_phy), 1776 FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) | 1777 FIELD_PREP(MT_IFS_RIFS, 2) | 1778 FIELD_PREP(MT_IFS_SIFS, 10) | 1779 FIELD_PREP(MT_IFS_SLOT, phy->slottime)); 1780 1781 mt76_wr(dev, MT_TMAC_ICR1(ext_phy), 1782 FIELD_PREP(MT_IFS_EIFS_CCK, 314)); 1783 1784 if (phy->slottime < 20 || is_5ghz) 1785 val = MT7915_CFEND_RATE_DEFAULT; 1786 else 1787 val = MT7915_CFEND_RATE_11B; 1788 1789 mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val); 1790 mt76_clear(dev, MT_ARB_SCR(ext_phy), 1791 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1792 } 1793 1794 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) 1795 { 1796 mt76_set(dev, MT_WF_PHY_RXTD12(ext_phy), 1797 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | 1798 MT_WF_PHY_RXTD12_IRPI_SW_CLR); 1799 1800 mt76_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), 1801 FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); 1802 } 1803 1804 static u8 1805 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) 1806 { 1807 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 1808 struct mt7915_dev *dev = phy->dev; 1809 u32 val, sum = 0, n = 0; 1810 int nss, i; 1811 1812 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) { 1813 u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support)); 1814 1815 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 1816 val = mt76_rr(dev, reg); 1817 sum += val * nf_power[i]; 1818 n += val; 1819 } 1820 } 1821 1822 if (!n) 1823 return 0; 1824 1825 return sum / n; 1826 } 1827 1828 void mt7915_update_channel(struct mt76_phy *mphy) 1829 { 1830 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; 1831 struct mt76_channel_state *state = mphy->chan_state; 1832 bool ext_phy = phy != &phy->dev->phy; 1833 int nf; 1834 1835 mt7915_mcu_get_chan_mib_info(phy, false); 1836 1837 nf = mt7915_phy_get_nf(phy, ext_phy); 1838 if (!phy->noise) 1839 phy->noise = nf << 4; 1840 else if (nf) 1841 phy->noise += nf - (phy->noise >> 4); 1842 1843 state->noise = -(phy->noise >> 4); 1844 } 1845 1846 static bool 1847 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state) 1848 { 1849 bool ret; 1850 1851 ret = wait_event_timeout(dev->reset_wait, 1852 (READ_ONCE(dev->reset_state) & state), 1853 MT7915_RESET_TIMEOUT); 1854 1855 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 1856 return ret; 1857 } 1858 1859 static void 1860 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 1861 { 1862 struct ieee80211_hw *hw = priv; 1863 1864 switch (vif->type) { 1865 case NL80211_IFTYPE_MESH_POINT: 1866 case NL80211_IFTYPE_ADHOC: 1867 case NL80211_IFTYPE_AP: 1868 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); 1869 break; 1870 default: 1871 break; 1872 } 1873 } 1874 1875 static void 1876 mt7915_update_beacons(struct mt7915_dev *dev) 1877 { 1878 ieee80211_iterate_active_interfaces(dev->mt76.hw, 1879 IEEE80211_IFACE_ITER_RESUME_ALL, 1880 mt7915_update_vif_beacon, dev->mt76.hw); 1881 1882 if (!dev->mt76.phy2) 1883 return; 1884 1885 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw, 1886 IEEE80211_IFACE_ITER_RESUME_ALL, 1887 mt7915_update_vif_beacon, dev->mt76.phy2->hw); 1888 } 1889 1890 static void 1891 mt7915_dma_reset(struct mt7915_dev *dev) 1892 { 1893 struct mt76_phy *mphy_ext = dev->mt76.phy2; 1894 u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE; 1895 int i; 1896 1897 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 1898 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1899 mt76_clear(dev, MT_WFDMA1_GLO_CFG, 1900 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); 1901 if (dev->hif2) { 1902 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1903 (MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1904 MT_WFDMA0_GLO_CFG_RX_DMA_EN)); 1905 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1906 (MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1907 MT_WFDMA1_GLO_CFG_RX_DMA_EN)); 1908 } 1909 1910 usleep_range(1000, 2000); 1911 1912 for (i = 0; i < __MT_TXQ_MAX; i++) { 1913 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1914 if (mphy_ext) 1915 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 1916 } 1917 1918 for (i = 0; i < __MT_MCUQ_MAX; i++) 1919 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 1920 1921 mt76_for_each_q_rx(&dev->mt76, i) 1922 mt76_queue_rx_reset(dev, i); 1923 1924 mt76_tx_status_check(&dev->mt76, true); 1925 1926 /* re-init prefetch settings after reset */ 1927 mt7915_dma_prefetch(dev); 1928 1929 mt76_set(dev, MT_WFDMA0_GLO_CFG, 1930 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1931 mt76_set(dev, MT_WFDMA1_GLO_CFG, 1932 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1933 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1934 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 1935 if (dev->hif2) { 1936 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 1937 (MT_WFDMA0_GLO_CFG_TX_DMA_EN | 1938 MT_WFDMA0_GLO_CFG_RX_DMA_EN)); 1939 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 1940 (MT_WFDMA1_GLO_CFG_TX_DMA_EN | 1941 MT_WFDMA1_GLO_CFG_RX_DMA_EN | 1942 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 1943 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO)); 1944 } 1945 } 1946 1947 void mt7915_tx_token_put(struct mt7915_dev *dev) 1948 { 1949 struct mt76_txwi_cache *txwi; 1950 int id; 1951 1952 spin_lock_bh(&dev->mt76.token_lock); 1953 idr_for_each_entry(&dev->mt76.token, txwi, id) { 1954 mt7915_txwi_free(dev, txwi, NULL, NULL); 1955 dev->mt76.token_count--; 1956 } 1957 spin_unlock_bh(&dev->mt76.token_lock); 1958 idr_destroy(&dev->mt76.token); 1959 } 1960 1961 /* system error recovery */ 1962 void mt7915_mac_reset_work(struct work_struct *work) 1963 { 1964 struct mt7915_phy *phy2; 1965 struct mt76_phy *ext_phy; 1966 struct mt7915_dev *dev; 1967 1968 dev = container_of(work, struct mt7915_dev, reset_work); 1969 ext_phy = dev->mt76.phy2; 1970 phy2 = ext_phy ? ext_phy->priv : NULL; 1971 1972 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA)) 1973 return; 1974 1975 ieee80211_stop_queues(mt76_hw(dev)); 1976 if (ext_phy) 1977 ieee80211_stop_queues(ext_phy->hw); 1978 1979 set_bit(MT76_RESET, &dev->mphy.state); 1980 set_bit(MT76_MCU_RESET, &dev->mphy.state); 1981 wake_up(&dev->mt76.mcu.wait); 1982 cancel_delayed_work_sync(&dev->mphy.mac_work); 1983 if (phy2) { 1984 set_bit(MT76_RESET, &phy2->mt76->state); 1985 cancel_delayed_work_sync(&phy2->mt76->mac_work); 1986 } 1987 mt76_worker_disable(&dev->mt76.tx_worker); 1988 napi_disable(&dev->mt76.napi[0]); 1989 napi_disable(&dev->mt76.napi[1]); 1990 napi_disable(&dev->mt76.napi[2]); 1991 napi_disable(&dev->mt76.tx_napi); 1992 1993 mutex_lock(&dev->mt76.mutex); 1994 1995 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1996 1997 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1998 mt7915_dma_reset(dev); 1999 2000 mt7915_tx_token_put(dev); 2001 idr_init(&dev->mt76.token); 2002 2003 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2004 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2005 } 2006 2007 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2008 clear_bit(MT76_RESET, &dev->mphy.state); 2009 if (phy2) 2010 clear_bit(MT76_RESET, &phy2->mt76->state); 2011 2012 local_bh_disable(); 2013 napi_enable(&dev->mt76.napi[0]); 2014 napi_schedule(&dev->mt76.napi[0]); 2015 2016 napi_enable(&dev->mt76.napi[1]); 2017 napi_schedule(&dev->mt76.napi[1]); 2018 2019 napi_enable(&dev->mt76.napi[2]); 2020 napi_schedule(&dev->mt76.napi[2]); 2021 local_bh_enable(); 2022 2023 tasklet_schedule(&dev->irq_tasklet); 2024 2025 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2026 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2027 2028 mt76_worker_enable(&dev->mt76.tx_worker); 2029 2030 napi_enable(&dev->mt76.tx_napi); 2031 napi_schedule(&dev->mt76.tx_napi); 2032 2033 ieee80211_wake_queues(mt76_hw(dev)); 2034 if (ext_phy) 2035 ieee80211_wake_queues(ext_phy->hw); 2036 2037 mutex_unlock(&dev->mt76.mutex); 2038 2039 mt7915_update_beacons(dev); 2040 2041 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 2042 MT7915_WATCHDOG_TIME); 2043 if (phy2) 2044 ieee80211_queue_delayed_work(ext_phy->hw, 2045 &phy2->mt76->mac_work, 2046 MT7915_WATCHDOG_TIME); 2047 } 2048 2049 void mt7915_mac_update_stats(struct mt7915_phy *phy) 2050 { 2051 struct mt7915_dev *dev = phy->dev; 2052 struct mib_stats *mib = &phy->mib; 2053 bool ext_phy = phy != &dev->phy; 2054 int i, aggr0, aggr1, cnt; 2055 2056 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy), 2057 MT_MIB_SDR3_FCS_ERR_MASK); 2058 2059 cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy)); 2060 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt); 2061 2062 cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy)); 2063 mib->rx_mpdu_cnt += cnt; 2064 2065 cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy)); 2066 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2067 2068 cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy)); 2069 mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt); 2070 2071 cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy)); 2072 mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt); 2073 2074 cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy)); 2075 mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt); 2076 2077 cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy)); 2078 mib->tx_ampdu_cnt += cnt; 2079 2080 cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy)); 2081 mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt); 2082 2083 cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy)); 2084 mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt); 2085 2086 cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy)); 2087 mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt); 2088 2089 cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy)); 2090 mib->rx_ampdu_cnt += cnt; 2091 2092 cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy)); 2093 mib->rx_ampdu_bytes_cnt += cnt; 2094 2095 cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy)); 2096 mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt); 2097 2098 cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy)); 2099 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2100 2101 cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy)); 2102 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt); 2103 2104 cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy)); 2105 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt); 2106 2107 cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy)); 2108 mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt); 2109 2110 cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy)); 2111 mib->rx_vec_queue_overflow_drop_cnt += 2112 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt); 2113 2114 cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy)); 2115 mib->rx_ba_cnt += cnt; 2116 2117 cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy)); 2118 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt); 2119 2120 cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy)); 2121 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt); 2122 2123 cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy)); 2124 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt); 2125 2126 cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy)); 2127 mib->tx_mu_mpdu_cnt += cnt; 2128 2129 cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy)); 2130 mib->tx_mu_acked_mpdu_cnt += cnt; 2131 2132 cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); 2133 mib->tx_su_acked_mpdu_cnt += cnt; 2134 2135 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy)); 2136 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt); 2137 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt); 2138 2139 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy)); 2140 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt); 2141 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt); 2142 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt); 2143 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt); 2144 2145 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy)); 2146 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2147 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2148 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2149 2150 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy)); 2151 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt); 2152 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt); 2153 2154 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2155 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2156 mib->tx_amsdu[i] += cnt; 2157 mib->tx_amsdu_cnt += cnt; 2158 } 2159 2160 aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0; 2161 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { 2162 u32 val; 2163 2164 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i)); 2165 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val); 2166 mib->ack_fail_cnt += 2167 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val); 2168 2169 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i)); 2170 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val); 2171 mib->rts_retries_cnt += 2172 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val); 2173 2174 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i)); 2175 dev->mt76.aggr_stats[aggr0++] += val & 0xffff; 2176 dev->mt76.aggr_stats[aggr0++] += val >> 16; 2177 2178 val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i)); 2179 dev->mt76.aggr_stats[aggr1++] += val & 0xffff; 2180 dev->mt76.aggr_stats[aggr1++] += val >> 16; 2181 } 2182 } 2183 2184 void mt7915_mac_sta_rc_work(struct work_struct *work) 2185 { 2186 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 2187 struct ieee80211_sta *sta; 2188 struct ieee80211_vif *vif; 2189 struct mt7915_sta *msta; 2190 u32 changed; 2191 LIST_HEAD(list); 2192 2193 spin_lock_bh(&dev->sta_poll_lock); 2194 list_splice_init(&dev->sta_rc_list, &list); 2195 2196 while (!list_empty(&list)) { 2197 msta = list_first_entry(&list, struct mt7915_sta, rc_list); 2198 list_del_init(&msta->rc_list); 2199 changed = msta->changed; 2200 msta->changed = 0; 2201 spin_unlock_bh(&dev->sta_poll_lock); 2202 2203 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 2204 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 2205 2206 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2207 IEEE80211_RC_NSS_CHANGED | 2208 IEEE80211_RC_BW_CHANGED)) 2209 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true); 2210 2211 if (changed & IEEE80211_RC_SMPS_CHANGED) 2212 mt7915_mcu_add_smps(dev, vif, sta); 2213 2214 spin_lock_bh(&dev->sta_poll_lock); 2215 } 2216 2217 spin_unlock_bh(&dev->sta_poll_lock); 2218 } 2219 2220 void mt7915_mac_work(struct work_struct *work) 2221 { 2222 struct mt7915_phy *phy; 2223 struct mt76_phy *mphy; 2224 2225 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2226 mac_work.work); 2227 phy = mphy->priv; 2228 2229 mutex_lock(&mphy->dev->mutex); 2230 2231 mt76_update_survey(mphy); 2232 if (++mphy->mac_work_count == 5) { 2233 mphy->mac_work_count = 0; 2234 2235 mt7915_mac_update_stats(phy); 2236 } 2237 2238 mutex_unlock(&mphy->dev->mutex); 2239 2240 mt76_tx_status_check(mphy->dev, false); 2241 2242 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2243 MT7915_WATCHDOG_TIME); 2244 } 2245 2246 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy) 2247 { 2248 struct mt7915_dev *dev = phy->dev; 2249 2250 if (phy->rdd_state & BIT(0)) 2251 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0); 2252 if (phy->rdd_state & BIT(1)) 2253 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0); 2254 } 2255 2256 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain) 2257 { 2258 int err; 2259 2260 err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0); 2261 if (err < 0) 2262 return err; 2263 2264 return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1); 2265 } 2266 2267 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy) 2268 { 2269 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2270 struct mt7915_dev *dev = phy->dev; 2271 bool ext_phy = phy != &dev->phy; 2272 int err; 2273 2274 /* start CAC */ 2275 err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0); 2276 if (err < 0) 2277 return err; 2278 2279 err = mt7915_dfs_start_rdd(dev, ext_phy); 2280 if (err < 0) 2281 return err; 2282 2283 phy->rdd_state |= BIT(ext_phy); 2284 2285 if (chandef->width == NL80211_CHAN_WIDTH_160 || 2286 chandef->width == NL80211_CHAN_WIDTH_80P80) { 2287 err = mt7915_dfs_start_rdd(dev, 1); 2288 if (err < 0) 2289 return err; 2290 2291 phy->rdd_state |= BIT(1); 2292 } 2293 2294 return 0; 2295 } 2296 2297 static int 2298 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy) 2299 { 2300 const struct mt7915_dfs_radar_spec *radar_specs; 2301 struct mt7915_dev *dev = phy->dev; 2302 int err, i; 2303 2304 switch (dev->mt76.region) { 2305 case NL80211_DFS_FCC: 2306 radar_specs = &fcc_radar_specs; 2307 err = mt7915_mcu_set_fcc5_lpn(dev, 8); 2308 if (err < 0) 2309 return err; 2310 break; 2311 case NL80211_DFS_ETSI: 2312 radar_specs = &etsi_radar_specs; 2313 break; 2314 case NL80211_DFS_JP: 2315 radar_specs = &jp_radar_specs; 2316 break; 2317 default: 2318 return -EINVAL; 2319 } 2320 2321 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 2322 err = mt7915_mcu_set_radar_th(dev, i, 2323 &radar_specs->radar_pattern[i]); 2324 if (err < 0) 2325 return err; 2326 } 2327 2328 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 2329 } 2330 2331 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy) 2332 { 2333 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2334 struct mt7915_dev *dev = phy->dev; 2335 bool ext_phy = phy != &dev->phy; 2336 int err; 2337 2338 if (dev->mt76.region == NL80211_DFS_UNSET) { 2339 phy->dfs_state = -1; 2340 if (phy->rdd_state) 2341 goto stop; 2342 2343 return 0; 2344 } 2345 2346 if (test_bit(MT76_SCANNING, &phy->mt76->state)) 2347 return 0; 2348 2349 if (phy->dfs_state == chandef->chan->dfs_state) 2350 return 0; 2351 2352 err = mt7915_dfs_init_radar_specs(phy); 2353 if (err < 0) { 2354 phy->dfs_state = -1; 2355 goto stop; 2356 } 2357 2358 phy->dfs_state = chandef->chan->dfs_state; 2359 2360 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) { 2361 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 2362 return mt7915_dfs_start_radar_detector(phy); 2363 2364 return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy, 2365 MT_RX_SEL0, 0); 2366 } 2367 2368 stop: 2369 err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, 2370 MT_RX_SEL0, 0); 2371 if (err < 0) 2372 return err; 2373 2374 mt7915_dfs_stop_radar_detector(phy); 2375 return 0; 2376 } 2377 2378 static int 2379 mt7915_mac_twt_duration_align(int duration) 2380 { 2381 return duration << 8; 2382 } 2383 2384 static u64 2385 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev, 2386 struct mt7915_twt_flow *flow) 2387 { 2388 struct mt7915_twt_flow *iter, *iter_next; 2389 u32 duration = flow->duration << 8; 2390 u64 start_tsf; 2391 2392 iter = list_first_entry_or_null(&dev->twt_list, 2393 struct mt7915_twt_flow, list); 2394 if (!iter || !iter->sched || iter->start_tsf > duration) { 2395 /* add flow as first entry in the list */ 2396 list_add(&flow->list, &dev->twt_list); 2397 return 0; 2398 } 2399 2400 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 2401 start_tsf = iter->start_tsf + 2402 mt7915_mac_twt_duration_align(iter->duration); 2403 if (list_is_last(&iter->list, &dev->twt_list)) 2404 break; 2405 2406 if (!iter_next->sched || 2407 iter_next->start_tsf > start_tsf + duration) { 2408 list_add(&flow->list, &iter->list); 2409 goto out; 2410 } 2411 } 2412 2413 /* add flow as last entry in the list */ 2414 list_add_tail(&flow->list, &dev->twt_list); 2415 out: 2416 return start_tsf; 2417 } 2418 2419 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) 2420 { 2421 struct ieee80211_twt_params *twt_agrt; 2422 u64 interval, duration; 2423 u16 mantissa; 2424 u8 exp; 2425 2426 /* only individual agreement supported */ 2427 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 2428 return -EOPNOTSUPP; 2429 2430 /* only 256us unit supported */ 2431 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 2432 return -EOPNOTSUPP; 2433 2434 twt_agrt = (struct ieee80211_twt_params *)twt->params; 2435 2436 /* explicit agreement not supported */ 2437 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 2438 return -EOPNOTSUPP; 2439 2440 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 2441 le16_to_cpu(twt_agrt->req_type)); 2442 mantissa = le16_to_cpu(twt_agrt->mantissa); 2443 duration = twt_agrt->min_twt_dur << 8; 2444 2445 interval = (u64)mantissa << exp; 2446 if (interval < duration) 2447 return -EOPNOTSUPP; 2448 2449 return 0; 2450 } 2451 2452 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, 2453 struct ieee80211_sta *sta, 2454 struct ieee80211_twt_setup *twt) 2455 { 2456 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 2457 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2458 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 2459 u16 req_type = le16_to_cpu(twt_agrt->req_type); 2460 enum ieee80211_twt_setup_cmd sta_setup_cmd; 2461 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2462 struct mt7915_twt_flow *flow; 2463 int flowid, table_id; 2464 u8 exp; 2465 2466 if (mt7915_mac_check_twt_req(twt)) 2467 goto out; 2468 2469 mutex_lock(&dev->mt76.mutex); 2470 2471 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT) 2472 goto unlock; 2473 2474 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) 2475 goto unlock; 2476 2477 flowid = ffs(~msta->twt.flowid_mask) - 1; 2478 le16p_replace_bits(&twt_agrt->req_type, flowid, 2479 IEEE80211_TWT_REQTYPE_FLOWID); 2480 2481 table_id = ffs(~dev->twt.table_mask) - 1; 2482 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 2483 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 2484 2485 flow = &msta->twt.flow[flowid]; 2486 memset(flow, 0, sizeof(*flow)); 2487 INIT_LIST_HEAD(&flow->list); 2488 flow->wcid = msta->wcid.idx; 2489 flow->table_id = table_id; 2490 flow->id = flowid; 2491 flow->duration = twt_agrt->min_twt_dur; 2492 flow->mantissa = twt_agrt->mantissa; 2493 flow->exp = exp; 2494 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 2495 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 2496 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 2497 2498 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 2499 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 2500 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 2501 u64 flow_tsf, curr_tsf; 2502 u32 rem; 2503 2504 flow->sched = true; 2505 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow); 2506 curr_tsf = __mt7915_get_tsf(hw, msta->vif); 2507 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 2508 flow_tsf = curr_tsf + interval - rem; 2509 twt_agrt->twt = cpu_to_le64(flow_tsf); 2510 } else { 2511 list_add_tail(&flow->list, &dev->twt_list); 2512 } 2513 flow->tsf = le64_to_cpu(twt_agrt->twt); 2514 2515 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) 2516 goto unlock; 2517 2518 setup_cmd = TWT_SETUP_CMD_ACCEPT; 2519 dev->twt.table_mask |= BIT(table_id); 2520 msta->twt.flowid_mask |= BIT(flowid); 2521 dev->twt.n_agrt++; 2522 2523 unlock: 2524 mutex_unlock(&dev->mt76.mutex); 2525 out: 2526 le16p_replace_bits(&twt_agrt->req_type, setup_cmd, 2527 IEEE80211_TWT_REQTYPE_SETUP_CMD); 2528 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) | 2529 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED); 2530 } 2531 2532 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev, 2533 struct mt7915_sta *msta, 2534 u8 flowid) 2535 { 2536 struct mt7915_twt_flow *flow; 2537 2538 lockdep_assert_held(&dev->mt76.mutex); 2539 2540 if (flowid >= ARRAY_SIZE(msta->twt.flow)) 2541 return; 2542 2543 if (!(msta->twt.flowid_mask & BIT(flowid))) 2544 return; 2545 2546 flow = &msta->twt.flow[flowid]; 2547 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, 2548 MCU_TWT_AGRT_DELETE)) 2549 return; 2550 2551 list_del_init(&flow->list); 2552 msta->twt.flowid_mask &= ~BIT(flowid); 2553 dev->twt.table_mask &= ~BIT(flow->table_id); 2554 dev->twt.n_agrt--; 2555 } 2556