1 // SPDX-License-Identifier: ISC 2 3 #include <linux/etherdevice.h> 4 #include <linux/timekeeping.h> 5 #include "mt7603.h" 6 #include "mac.h" 7 #include "../trace.h" 8 9 #define MT_PSE_PAGE_SIZE 128 10 11 static u32 12 mt7603_ac_queue_mask0(u32 mask) 13 { 14 u32 ret = 0; 15 16 ret |= GENMASK(3, 0) * !!(mask & BIT(0)); 17 ret |= GENMASK(8, 5) * !!(mask & BIT(1)); 18 ret |= GENMASK(13, 10) * !!(mask & BIT(2)); 19 ret |= GENMASK(19, 16) * !!(mask & BIT(3)); 20 return ret; 21 } 22 23 static void 24 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) 25 { 26 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); 27 } 28 29 static void 30 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) 31 { 32 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); 33 } 34 35 void mt7603_mac_reset_counters(struct mt7603_dev *dev) 36 { 37 int i; 38 39 for (i = 0; i < 2; i++) 40 mt76_rr(dev, MT_TX_AGG_CNT(i)); 41 42 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats)); 43 } 44 45 void mt7603_mac_set_timing(struct mt7603_dev *dev) 46 { 47 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 48 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 49 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 50 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 51 int offset = 3 * dev->coverage_class; 52 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 53 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 54 bool is_5ghz = dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ; 55 int sifs; 56 u32 val; 57 58 if (is_5ghz) 59 sifs = 16; 60 else 61 sifs = 10; 62 63 mt76_set(dev, MT_ARB_SCR, 64 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 65 udelay(1); 66 67 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); 68 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); 69 mt76_wr(dev, MT_IFS, 70 FIELD_PREP(MT_IFS_EIFS, 360) | 71 FIELD_PREP(MT_IFS_RIFS, 2) | 72 FIELD_PREP(MT_IFS_SIFS, sifs) | 73 FIELD_PREP(MT_IFS_SLOT, dev->slottime)); 74 75 if (dev->slottime < 20 || is_5ghz) 76 val = MT7603_CFEND_RATE_DEFAULT; 77 else 78 val = MT7603_CFEND_RATE_11B; 79 80 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); 81 82 mt76_clear(dev, MT_ARB_SCR, 83 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 84 } 85 86 static void 87 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) 88 { 89 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 90 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 91 92 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 93 } 94 95 static u32 96 mt7603_wtbl1_addr(int idx) 97 { 98 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 99 } 100 101 static u32 102 mt7603_wtbl2_addr(int idx) 103 { 104 /* Mapped to WTBL2 */ 105 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; 106 } 107 108 static u32 109 mt7603_wtbl3_addr(int idx) 110 { 111 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); 112 113 return base + idx * MT_WTBL3_SIZE; 114 } 115 116 static u32 117 mt7603_wtbl4_addr(int idx) 118 { 119 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); 120 121 return base + idx * MT_WTBL4_SIZE; 122 } 123 124 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 125 const u8 *mac_addr) 126 { 127 const void *_mac = mac_addr; 128 u32 addr = mt7603_wtbl1_addr(idx); 129 u32 w0 = 0, w1 = 0; 130 int i; 131 132 if (_mac) { 133 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, 134 get_unaligned_le16(_mac + 4)); 135 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, 136 get_unaligned_le32(_mac)); 137 } 138 139 if (vif < 0) 140 vif = 0; 141 else 142 w0 |= MT_WTBL1_W0_RX_CHECK_A1; 143 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); 144 145 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 146 147 mt76_set(dev, addr + 0 * 4, w0); 148 mt76_set(dev, addr + 1 * 4, w1); 149 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); 150 151 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 152 addr = mt7603_wtbl2_addr(idx); 153 for (i = 0; i < MT_WTBL2_SIZE; i += 4) 154 mt76_wr(dev, addr + i, 0); 155 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 156 mt76_start_tx_ac(dev, GENMASK(3, 0)); 157 158 addr = mt7603_wtbl3_addr(idx); 159 for (i = 0; i < MT_WTBL3_SIZE; i += 4) 160 mt76_wr(dev, addr + i, 0); 161 162 addr = mt7603_wtbl4_addr(idx); 163 for (i = 0; i < MT_WTBL4_SIZE; i += 4) 164 mt76_wr(dev, addr + i, 0); 165 166 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 167 } 168 169 static void 170 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) 171 { 172 u32 addr = mt7603_wtbl1_addr(idx); 173 u32 val = mt76_rr(dev, addr + 3 * 4); 174 175 val &= ~MT_WTBL1_W3_SKIP_TX; 176 val |= enabled * MT_WTBL1_W3_SKIP_TX; 177 178 mt76_wr(dev, addr + 3 * 4, val); 179 } 180 181 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) 182 { 183 int i, port, queue; 184 185 if (abort) { 186 port = 3; /* PSE */ 187 queue = 8; /* free queue */ 188 } else { 189 port = 0; /* HIF */ 190 queue = 1; /* MCU queue */ 191 } 192 193 mt7603_wtbl_set_skip_tx(dev, idx, true); 194 195 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | 196 FIELD_PREP(MT_TX_ABORT_WCID, idx)); 197 198 for (i = 0; i < 4; i++) { 199 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 200 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | 201 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | 202 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | 203 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); 204 205 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 206 0, 5000)); 207 } 208 209 mt76_wr(dev, MT_TX_ABORT, 0); 210 211 mt7603_wtbl_set_skip_tx(dev, idx, false); 212 } 213 214 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 215 bool enabled) 216 { 217 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); 218 219 if (sta->smps == enabled) 220 return; 221 222 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); 223 sta->smps = enabled; 224 } 225 226 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 227 bool enabled) 228 { 229 int idx = sta->wcid.idx; 230 u32 addr; 231 232 spin_lock_bh(&dev->ps_lock); 233 234 if (sta->ps == enabled) 235 goto out; 236 237 mt76_wr(dev, MT_PSE_RTA, 238 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | 239 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | 240 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | 241 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | 242 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); 243 244 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 245 246 if (enabled) 247 mt7603_filter_tx(dev, idx, false); 248 249 addr = mt7603_wtbl1_addr(idx); 250 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 251 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, 252 enabled * MT_WTBL1_W3_POWER_SAVE); 253 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 254 sta->ps = enabled; 255 256 out: 257 spin_unlock_bh(&dev->ps_lock); 258 } 259 260 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) 261 { 262 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; 263 int wtbl2_frame = idx / wtbl2_frame_size; 264 int wtbl2_entry = idx % wtbl2_frame_size; 265 266 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; 267 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; 268 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; 269 int wtbl3_entry = (idx % wtbl3_frame_size) * 2; 270 271 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; 272 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; 273 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; 274 int wtbl4_entry = idx % wtbl4_frame_size; 275 276 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 277 int i; 278 279 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 280 281 mt76_wr(dev, addr + 0 * 4, 282 MT_WTBL1_W0_RX_CHECK_A1 | 283 MT_WTBL1_W0_RX_CHECK_A2 | 284 MT_WTBL1_W0_RX_VALID); 285 mt76_wr(dev, addr + 1 * 4, 0); 286 mt76_wr(dev, addr + 2 * 4, 0); 287 288 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 289 290 mt76_wr(dev, addr + 3 * 4, 291 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | 292 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | 293 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | 294 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); 295 mt76_wr(dev, addr + 4 * 4, 296 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | 297 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | 298 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); 299 300 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 301 302 addr = mt7603_wtbl2_addr(idx); 303 304 /* Clear BA information */ 305 mt76_wr(dev, addr + (15 * 4), 0); 306 307 mt76_stop_tx_ac(dev, GENMASK(3, 0)); 308 for (i = 2; i <= 4; i++) 309 mt76_wr(dev, addr + (i * 4), 0); 310 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 311 mt76_start_tx_ac(dev, GENMASK(3, 0)); 312 313 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); 314 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); 315 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 316 } 317 318 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) 319 { 320 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 321 int idx = msta->wcid.idx; 322 u8 ampdu_density; 323 u32 addr; 324 u32 val; 325 326 addr = mt7603_wtbl1_addr(idx); 327 328 ampdu_density = sta->ht_cap.ampdu_density; 329 if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4) 330 ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; 331 332 val = mt76_rr(dev, addr + 2 * 4); 333 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; 334 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | 335 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | 336 MT_WTBL1_W2_TXS_BAF_REPORT; 337 338 if (sta->ht_cap.cap) 339 val |= MT_WTBL1_W2_HT; 340 if (sta->vht_cap.cap) 341 val |= MT_WTBL1_W2_VHT; 342 343 mt76_wr(dev, addr + 2 * 4, val); 344 345 addr = mt7603_wtbl2_addr(idx); 346 val = mt76_rr(dev, addr + 9 * 4); 347 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 348 MT_WTBL2_W9_SHORT_GI_80); 349 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 350 val |= MT_WTBL2_W9_SHORT_GI_20; 351 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 352 val |= MT_WTBL2_W9_SHORT_GI_40; 353 mt76_wr(dev, addr + 9 * 4, val); 354 } 355 356 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) 357 { 358 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); 359 mt76_wr(dev, MT_BA_CONTROL_1, 360 (get_unaligned_le16(addr + 4) | 361 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | 362 MT_BA_CONTROL_1_RESET)); 363 } 364 365 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, 366 int ba_size) 367 { 368 u32 addr = mt7603_wtbl2_addr(wcid); 369 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 370 (MT_WTBL2_W15_BA_WIN_SIZE << 371 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); 372 u32 tid_val; 373 int i; 374 375 if (ba_size < 0) { 376 /* disable */ 377 mt76_clear(dev, addr + (15 * 4), tid_mask); 378 return; 379 } 380 381 for (i = 7; i > 0; i--) { 382 if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 383 break; 384 } 385 386 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 387 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); 388 389 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); 390 } 391 392 void mt7603_mac_sta_poll(struct mt7603_dev *dev) 393 { 394 static const u8 ac_to_tid[4] = { 395 [IEEE80211_AC_BE] = 0, 396 [IEEE80211_AC_BK] = 1, 397 [IEEE80211_AC_VI] = 4, 398 [IEEE80211_AC_VO] = 6 399 }; 400 struct ieee80211_sta *sta; 401 struct mt7603_sta *msta; 402 u32 total_airtime = 0; 403 u32 airtime[4]; 404 u32 addr; 405 int i; 406 407 rcu_read_lock(); 408 409 while (1) { 410 bool clear = false; 411 412 spin_lock_bh(&dev->sta_poll_lock); 413 if (list_empty(&dev->sta_poll_list)) { 414 spin_unlock_bh(&dev->sta_poll_lock); 415 break; 416 } 417 418 msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta, 419 poll_list); 420 list_del_init(&msta->poll_list); 421 spin_unlock_bh(&dev->sta_poll_lock); 422 423 addr = mt7603_wtbl4_addr(msta->wcid.idx); 424 for (i = 0; i < 4; i++) { 425 u32 airtime_last = msta->tx_airtime_ac[i]; 426 427 msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8); 428 airtime[i] = msta->tx_airtime_ac[i] - airtime_last; 429 airtime[i] *= 32; 430 total_airtime += airtime[i]; 431 432 if (msta->tx_airtime_ac[i] & BIT(22)) 433 clear = true; 434 } 435 436 if (clear) { 437 mt7603_wtbl_update(dev, msta->wcid.idx, 438 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 439 memset(msta->tx_airtime_ac, 0, 440 sizeof(msta->tx_airtime_ac)); 441 } 442 443 if (!msta->wcid.sta) 444 continue; 445 446 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 447 for (i = 0; i < 4; i++) { 448 struct mt76_queue *q = dev->mphy.q_tx[i]; 449 u8 qidx = q->hw_idx; 450 u8 tid = ac_to_tid[i]; 451 u32 txtime = airtime[qidx]; 452 453 if (!txtime) 454 continue; 455 456 ieee80211_sta_register_airtime(sta, tid, txtime, 0); 457 } 458 } 459 460 rcu_read_unlock(); 461 462 if (!total_airtime) 463 return; 464 465 spin_lock_bh(&dev->mt76.cc_lock); 466 dev->mphy.chan_state->cc_tx += total_airtime; 467 spin_unlock_bh(&dev->mt76.cc_lock); 468 } 469 470 static struct mt76_wcid * 471 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) 472 { 473 struct mt7603_sta *sta; 474 struct mt76_wcid *wcid; 475 476 if (idx >= MT7603_WTBL_SIZE) 477 return NULL; 478 479 wcid = rcu_dereference(dev->mt76.wcid[idx]); 480 if (unicast || !wcid) 481 return wcid; 482 483 if (!wcid->sta) 484 return NULL; 485 486 sta = container_of(wcid, struct mt7603_sta, wcid); 487 if (!sta->vif) 488 return NULL; 489 490 return &sta->vif->sta.wcid; 491 } 492 493 int 494 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) 495 { 496 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 497 struct ieee80211_supported_band *sband; 498 struct ieee80211_hdr *hdr; 499 __le32 *rxd = (__le32 *)skb->data; 500 u32 rxd0 = le32_to_cpu(rxd[0]); 501 u32 rxd1 = le32_to_cpu(rxd[1]); 502 u32 rxd2 = le32_to_cpu(rxd[2]); 503 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; 504 bool insert_ccmp_hdr = false; 505 bool remove_pad; 506 int idx; 507 int i; 508 509 memset(status, 0, sizeof(*status)); 510 511 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 512 sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband; 513 i >>= 1; 514 515 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 516 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); 517 518 status->band = sband->band; 519 if (i < sband->n_channels) 520 status->freq = sband->channels[i].center_freq; 521 522 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 523 status->flag |= RX_FLAG_FAILED_FCS_CRC; 524 525 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 526 status->flag |= RX_FLAG_MMIC_ERROR; 527 528 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 529 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 530 status->flag |= RX_FLAG_DECRYPTED; 531 status->flag |= RX_FLAG_IV_STRIPPED; 532 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 533 } 534 535 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 536 537 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 538 return -EINVAL; 539 540 if (!sband->channels) 541 return -EINVAL; 542 543 rxd += 4; 544 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 545 rxd += 4; 546 if ((u8 *)rxd - skb->data >= skb->len) 547 return -EINVAL; 548 } 549 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 550 u8 *data = (u8 *)rxd; 551 552 if (status->flag & RX_FLAG_DECRYPTED) { 553 status->iv[0] = data[5]; 554 status->iv[1] = data[4]; 555 status->iv[2] = data[3]; 556 status->iv[3] = data[2]; 557 status->iv[4] = data[1]; 558 status->iv[5] = data[0]; 559 560 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 561 } 562 563 rxd += 4; 564 if ((u8 *)rxd - skb->data >= skb->len) 565 return -EINVAL; 566 } 567 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 568 status->timestamp = le32_to_cpu(rxd[0]); 569 status->flag |= RX_FLAG_MACTIME_START; 570 571 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB | 572 MT_RXD2_NORMAL_NON_AMPDU))) { 573 status->flag |= RX_FLAG_AMPDU_DETAILS; 574 575 /* all subframes of an A-MPDU have the same timestamp */ 576 if (dev->rx_ampdu_ts != status->timestamp) { 577 if (!++dev->ampdu_ref) 578 dev->ampdu_ref++; 579 } 580 dev->rx_ampdu_ts = status->timestamp; 581 582 status->ampdu_ref = dev->ampdu_ref; 583 } 584 585 rxd += 2; 586 if ((u8 *)rxd - skb->data >= skb->len) 587 return -EINVAL; 588 } 589 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 590 u32 rxdg0 = le32_to_cpu(rxd[0]); 591 u32 rxdg3 = le32_to_cpu(rxd[3]); 592 bool cck = false; 593 594 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 595 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 596 case MT_PHY_TYPE_CCK: 597 cck = true; 598 fallthrough; 599 case MT_PHY_TYPE_OFDM: 600 i = mt76_get_rate(&dev->mt76, sband, i, cck); 601 break; 602 case MT_PHY_TYPE_HT_GF: 603 case MT_PHY_TYPE_HT: 604 status->encoding = RX_ENC_HT; 605 if (i > 15) 606 return -EINVAL; 607 break; 608 default: 609 return -EINVAL; 610 } 611 612 if (rxdg0 & MT_RXV1_HT_SHORT_GI) 613 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 614 if (rxdg0 & MT_RXV1_HT_AD_CODE) 615 status->enc_flags |= RX_ENC_FLAG_LDPC; 616 617 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * 618 FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 619 620 status->rate_idx = i; 621 622 status->chains = dev->mphy.antenna_mask; 623 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + 624 dev->rssi_offset[0]; 625 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + 626 dev->rssi_offset[1]; 627 628 status->signal = status->chain_signal[0]; 629 if (status->chains & BIT(1)) 630 status->signal = max(status->signal, 631 status->chain_signal[1]); 632 633 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) 634 status->bw = RATE_INFO_BW_40; 635 636 rxd += 6; 637 if ((u8 *)rxd - skb->data >= skb->len) 638 return -EINVAL; 639 } else { 640 return -EINVAL; 641 } 642 643 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 644 645 if (insert_ccmp_hdr) { 646 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 647 648 mt76_insert_ccmp_hdr(skb, key_id); 649 } 650 651 hdr = (struct ieee80211_hdr *)skb->data; 652 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 653 return 0; 654 655 status->aggr = unicast && 656 !ieee80211_is_qos_nullfunc(hdr->frame_control); 657 status->qos_ctl = *ieee80211_get_qos_ctl(hdr); 658 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 659 660 return 0; 661 } 662 663 static u16 664 mt7603_mac_tx_rate_val(struct mt7603_dev *dev, 665 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) 666 { 667 u8 phy, nss, rate_idx; 668 u16 rateval; 669 670 *bw = 0; 671 if (rate->flags & IEEE80211_TX_RC_MCS) { 672 rate_idx = rate->idx; 673 nss = 1 + (rate->idx >> 3); 674 phy = MT_PHY_TYPE_HT; 675 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 676 phy = MT_PHY_TYPE_HT_GF; 677 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 678 *bw = 1; 679 } else { 680 const struct ieee80211_rate *r; 681 int band = dev->mphy.chandef.chan->band; 682 u16 val; 683 684 nss = 1; 685 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; 686 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 687 val = r->hw_value_short; 688 else 689 val = r->hw_value; 690 691 phy = val >> 8; 692 rate_idx = val & 0xff; 693 } 694 695 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 696 FIELD_PREP(MT_TX_RATE_MODE, phy)); 697 698 if (stbc && nss == 1) 699 rateval |= MT_TX_RATE_STBC; 700 701 return rateval; 702 } 703 704 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 705 struct ieee80211_tx_rate *probe_rate, 706 struct ieee80211_tx_rate *rates) 707 { 708 struct ieee80211_tx_rate *ref; 709 int wcid = sta->wcid.idx; 710 u32 addr = mt7603_wtbl2_addr(wcid); 711 bool stbc = false; 712 int n_rates = sta->n_rates; 713 u8 bw, bw_prev, bw_idx = 0; 714 u16 val[4]; 715 u16 probe_val; 716 u32 w9 = mt76_rr(dev, addr + 9 * 4); 717 bool rateset; 718 int i, k; 719 720 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 721 return; 722 723 for (i = n_rates; i < 4; i++) 724 rates[i] = rates[n_rates - 1]; 725 726 rateset = !(sta->rate_set_tsf & BIT(0)); 727 memcpy(sta->rateset[rateset].rates, rates, 728 sizeof(sta->rateset[rateset].rates)); 729 if (probe_rate) { 730 sta->rateset[rateset].probe_rate = *probe_rate; 731 ref = &sta->rateset[rateset].probe_rate; 732 } else { 733 sta->rateset[rateset].probe_rate.idx = -1; 734 ref = &sta->rateset[rateset].rates[0]; 735 } 736 737 rates = sta->rateset[rateset].rates; 738 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) { 739 /* 740 * We don't support switching between short and long GI 741 * within the rate set. For accurate tx status reporting, we 742 * need to make sure that flags match. 743 * For improved performance, avoid duplicate entries by 744 * decrementing the MCS index if necessary 745 */ 746 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI) 747 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI; 748 749 for (k = 0; k < i; k++) { 750 if (rates[i].idx != rates[k].idx) 751 continue; 752 if ((rates[i].flags ^ rates[k].flags) & 753 IEEE80211_TX_RC_40_MHZ_WIDTH) 754 continue; 755 756 if (!rates[i].idx) 757 continue; 758 759 rates[i].idx--; 760 } 761 } 762 763 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 764 MT_WTBL2_W9_SHORT_GI_80; 765 766 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); 767 bw_prev = bw; 768 769 if (probe_rate) { 770 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); 771 if (bw) 772 bw_idx = 1; 773 else 774 bw_prev = 0; 775 } else { 776 probe_val = val[0]; 777 } 778 779 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); 780 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); 781 782 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); 783 if (bw_prev) { 784 bw_idx = 3; 785 bw_prev = bw; 786 } 787 788 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); 789 if (bw_prev) { 790 bw_idx = 5; 791 bw_prev = bw; 792 } 793 794 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); 795 if (bw_prev) 796 bw_idx = 7; 797 798 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, 799 bw_idx ? bw_idx - 1 : 7); 800 801 mt76_wr(dev, MT_WTBL_RIUCR0, w9); 802 803 mt76_wr(dev, MT_WTBL_RIUCR1, 804 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 805 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 806 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1])); 807 808 mt76_wr(dev, MT_WTBL_RIUCR2, 809 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) | 810 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 811 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) | 812 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 813 814 mt76_wr(dev, MT_WTBL_RIUCR3, 815 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 816 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) | 817 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 818 819 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 820 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset; 821 822 mt76_wr(dev, MT_WTBL_UPDATE, 823 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 824 MT_WTBL_UPDATE_RATE_UPDATE | 825 MT_WTBL_UPDATE_TX_COUNT_CLEAR); 826 827 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 828 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 829 830 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; 831 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 832 } 833 834 static enum mt7603_cipher_type 835 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 836 { 837 memset(key_data, 0, 32); 838 if (!key) 839 return MT_CIPHER_NONE; 840 841 if (key->keylen > 32) 842 return MT_CIPHER_NONE; 843 844 memcpy(key_data, key->key, key->keylen); 845 846 switch (key->cipher) { 847 case WLAN_CIPHER_SUITE_WEP40: 848 return MT_CIPHER_WEP40; 849 case WLAN_CIPHER_SUITE_WEP104: 850 return MT_CIPHER_WEP104; 851 case WLAN_CIPHER_SUITE_TKIP: 852 /* Rx/Tx MIC keys are swapped */ 853 memcpy(key_data + 16, key->key + 24, 8); 854 memcpy(key_data + 24, key->key + 16, 8); 855 return MT_CIPHER_TKIP; 856 case WLAN_CIPHER_SUITE_CCMP: 857 return MT_CIPHER_AES_CCMP; 858 default: 859 return MT_CIPHER_NONE; 860 } 861 } 862 863 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 864 struct ieee80211_key_conf *key) 865 { 866 enum mt7603_cipher_type cipher; 867 u32 addr = mt7603_wtbl3_addr(wcid); 868 u8 key_data[32]; 869 int key_len = sizeof(key_data); 870 871 cipher = mt7603_mac_get_key_info(key, key_data); 872 if (cipher == MT_CIPHER_NONE && key) 873 return -EOPNOTSUPP; 874 875 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { 876 addr += key->keyidx * 16; 877 key_len = 16; 878 } 879 880 mt76_wr_copy(dev, addr, key_data, key_len); 881 882 addr = mt7603_wtbl1_addr(wcid); 883 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); 884 if (key) 885 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); 886 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); 887 888 return 0; 889 } 890 891 static int 892 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, 893 struct sk_buff *skb, enum mt76_txq_id qid, 894 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 895 int pid, struct ieee80211_key_conf *key) 896 { 897 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 898 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 899 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 900 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 901 struct ieee80211_vif *vif = info->control.vif; 902 struct mt76_queue *q = dev->mphy.q_tx[qid]; 903 struct mt7603_vif *mvif; 904 int wlan_idx; 905 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 906 int tx_count = 8; 907 u8 frame_type, frame_subtype; 908 u16 fc = le16_to_cpu(hdr->frame_control); 909 u16 seqno = 0; 910 u8 vif_idx = 0; 911 u32 val; 912 u8 bw; 913 914 if (vif) { 915 mvif = (struct mt7603_vif *)vif->drv_priv; 916 vif_idx = mvif->idx; 917 if (vif_idx && qid >= MT_TXQ_BEACON) 918 vif_idx += 0x10; 919 } 920 921 if (sta) { 922 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 923 924 tx_count = msta->rate_count; 925 } 926 927 if (wcid) 928 wlan_idx = wcid->idx; 929 else 930 wlan_idx = MT7603_WTBL_RESERVED; 931 932 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; 933 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; 934 935 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 936 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); 937 txwi[0] = cpu_to_le32(val); 938 939 val = MT_TXD1_LONG_FORMAT | 940 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | 941 FIELD_PREP(MT_TXD1_TID, 942 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 943 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 944 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | 945 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | 946 FIELD_PREP(MT_TXD1_PROTECTED, !!key); 947 txwi[1] = cpu_to_le32(val); 948 949 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 950 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); 951 952 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | 953 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | 954 FIELD_PREP(MT_TXD2_MULTICAST, 955 is_multicast_ether_addr(hdr->addr1)); 956 txwi[2] = cpu_to_le32(val); 957 958 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 959 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 960 961 txwi[4] = 0; 962 963 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 964 FIELD_PREP(MT_TXD5_PID, pid); 965 txwi[5] = cpu_to_le32(val); 966 967 txwi[6] = 0; 968 969 if (rate->idx >= 0 && rate->count && 970 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 971 bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 972 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); 973 974 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 975 976 val = MT_TXD6_FIXED_BW | 977 FIELD_PREP(MT_TXD6_BW, bw) | 978 FIELD_PREP(MT_TXD6_TX_RATE, rateval); 979 txwi[6] |= cpu_to_le32(val); 980 981 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 982 txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 983 984 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 985 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 986 987 tx_count = rate->count; 988 } 989 990 /* use maximum tx count for beacons and buffered multicast */ 991 if (qid >= MT_TXQ_BEACON) 992 tx_count = 0x1f; 993 994 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 995 MT_TXD3_SN_VALID; 996 997 if (ieee80211_is_data_qos(hdr->frame_control)) 998 seqno = le16_to_cpu(hdr->seq_ctrl); 999 else if (ieee80211_is_back_req(hdr->frame_control)) 1000 seqno = le16_to_cpu(bar->start_seq_num); 1001 else 1002 val &= ~MT_TXD3_SN_VALID; 1003 1004 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); 1005 1006 txwi[3] = cpu_to_le32(val); 1007 1008 if (key) { 1009 u64 pn = atomic64_inc_return(&key->tx_pn); 1010 1011 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); 1012 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); 1013 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); 1014 } 1015 1016 txwi[7] = 0; 1017 1018 return 0; 1019 } 1020 1021 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1022 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1023 struct ieee80211_sta *sta, 1024 struct mt76_tx_info *tx_info) 1025 { 1026 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1027 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); 1028 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1029 struct ieee80211_key_conf *key = info->control.hw_key; 1030 int pid; 1031 1032 if (!wcid) 1033 wcid = &dev->global_sta.wcid; 1034 1035 if (sta) { 1036 msta = (struct mt7603_sta *)sta->drv_priv; 1037 1038 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 1039 IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 1040 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 1041 mt7603_wtbl_set_ps(dev, msta, false); 1042 1043 mt76_tx_check_agg_ssn(sta, tx_info->skb); 1044 } 1045 1046 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1047 1048 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 1049 spin_lock_bh(&dev->mt76.lock); 1050 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], 1051 msta->rates); 1052 msta->rate_probe = true; 1053 spin_unlock_bh(&dev->mt76.lock); 1054 } 1055 1056 mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid, 1057 sta, pid, key); 1058 1059 return 0; 1060 } 1061 1062 static bool 1063 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, 1064 struct ieee80211_tx_info *info, __le32 *txs_data) 1065 { 1066 struct ieee80211_supported_band *sband; 1067 struct mt7603_rate_set *rs; 1068 int first_idx = 0, last_idx; 1069 u32 rate_set_tsf; 1070 u32 final_rate; 1071 u32 final_rate_flags; 1072 bool rs_idx; 1073 bool ack_timeout; 1074 bool fixed_rate; 1075 bool probe; 1076 bool ampdu; 1077 bool cck = false; 1078 int count; 1079 u32 txs; 1080 int idx; 1081 int i; 1082 1083 fixed_rate = info->status.rates[0].count; 1084 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 1085 1086 txs = le32_to_cpu(txs_data[4]); 1087 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); 1088 count = FIELD_GET(MT_TXS4_TX_COUNT, txs); 1089 last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs); 1090 1091 txs = le32_to_cpu(txs_data[0]); 1092 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1093 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1094 1095 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1096 return false; 1097 1098 if (txs & MT_TXS0_QUEUE_TIMEOUT) 1099 return false; 1100 1101 if (!ack_timeout) 1102 info->flags |= IEEE80211_TX_STAT_ACK; 1103 1104 info->status.ampdu_len = 1; 1105 info->status.ampdu_ack_len = !!(info->flags & 1106 IEEE80211_TX_STAT_ACK); 1107 1108 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1109 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1110 1111 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7603_RATE_RETRY); 1112 1113 if (fixed_rate && !probe) { 1114 info->status.rates[0].count = count; 1115 i = 0; 1116 goto out; 1117 } 1118 1119 rate_set_tsf = READ_ONCE(sta->rate_set_tsf); 1120 rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) - 1121 rate_set_tsf) < 1000000); 1122 rs_idx ^= rate_set_tsf & BIT(0); 1123 rs = &sta->rateset[rs_idx]; 1124 1125 if (!first_idx && rs->probe_rate.idx >= 0) { 1126 info->status.rates[0] = rs->probe_rate; 1127 1128 spin_lock_bh(&dev->mt76.lock); 1129 if (sta->rate_probe) { 1130 mt7603_wtbl_set_rates(dev, sta, NULL, 1131 sta->rates); 1132 sta->rate_probe = false; 1133 } 1134 spin_unlock_bh(&dev->mt76.lock); 1135 } else { 1136 info->status.rates[0] = rs->rates[first_idx / 2]; 1137 } 1138 info->status.rates[0].count = 0; 1139 1140 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) { 1141 struct ieee80211_tx_rate *cur_rate; 1142 int cur_count; 1143 1144 cur_rate = &rs->rates[idx / 2]; 1145 cur_count = min_t(int, MT7603_RATE_RETRY, count); 1146 count -= cur_count; 1147 1148 if (idx && (cur_rate->idx != info->status.rates[i].idx || 1149 cur_rate->flags != info->status.rates[i].flags)) { 1150 i++; 1151 if (i == ARRAY_SIZE(info->status.rates)) { 1152 i--; 1153 break; 1154 } 1155 1156 info->status.rates[i] = *cur_rate; 1157 info->status.rates[i].count = 0; 1158 } 1159 1160 info->status.rates[i].count += cur_count; 1161 } 1162 1163 out: 1164 final_rate_flags = info->status.rates[i].flags; 1165 1166 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1167 case MT_PHY_TYPE_CCK: 1168 cck = true; 1169 fallthrough; 1170 case MT_PHY_TYPE_OFDM: 1171 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) 1172 sband = &dev->mphy.sband_5g.sband; 1173 else 1174 sband = &dev->mphy.sband_2g.sband; 1175 final_rate &= GENMASK(5, 0); 1176 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate, 1177 cck); 1178 final_rate_flags = 0; 1179 break; 1180 case MT_PHY_TYPE_HT_GF: 1181 case MT_PHY_TYPE_HT: 1182 final_rate_flags |= IEEE80211_TX_RC_MCS; 1183 final_rate &= GENMASK(5, 0); 1184 if (final_rate > 15) 1185 return false; 1186 break; 1187 default: 1188 return false; 1189 } 1190 1191 info->status.rates[i].idx = final_rate; 1192 info->status.rates[i].flags = final_rate_flags; 1193 1194 return true; 1195 } 1196 1197 static bool 1198 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, 1199 __le32 *txs_data) 1200 { 1201 struct mt76_dev *mdev = &dev->mt76; 1202 struct sk_buff_head list; 1203 struct sk_buff *skb; 1204 1205 if (pid < MT_PACKET_ID_FIRST) 1206 return false; 1207 1208 trace_mac_txdone(mdev, sta->wcid.idx, pid); 1209 1210 mt76_tx_status_lock(mdev, &list); 1211 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1212 if (skb) { 1213 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1214 1215 if (!mt7603_fill_txs(dev, sta, info, txs_data)) { 1216 ieee80211_tx_info_clear_status(info); 1217 info->status.rates[0].idx = -1; 1218 } 1219 1220 mt76_tx_status_skb_done(mdev, skb, &list); 1221 } 1222 mt76_tx_status_unlock(mdev, &list); 1223 1224 return !!skb; 1225 } 1226 1227 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) 1228 { 1229 struct ieee80211_tx_info info = {}; 1230 struct ieee80211_sta *sta = NULL; 1231 struct mt7603_sta *msta = NULL; 1232 struct mt76_wcid *wcid; 1233 __le32 *txs_data = data; 1234 u32 txs; 1235 u8 wcidx; 1236 u8 pid; 1237 1238 txs = le32_to_cpu(txs_data[4]); 1239 pid = FIELD_GET(MT_TXS4_PID, txs); 1240 txs = le32_to_cpu(txs_data[3]); 1241 wcidx = FIELD_GET(MT_TXS3_WCID, txs); 1242 1243 if (pid == MT_PACKET_ID_NO_ACK) 1244 return; 1245 1246 if (wcidx >= MT7603_WTBL_SIZE) 1247 return; 1248 1249 rcu_read_lock(); 1250 1251 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1252 if (!wcid) 1253 goto out; 1254 1255 msta = container_of(wcid, struct mt7603_sta, wcid); 1256 sta = wcid_to_sta(wcid); 1257 1258 if (list_empty(&msta->poll_list)) { 1259 spin_lock_bh(&dev->sta_poll_lock); 1260 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1261 spin_unlock_bh(&dev->sta_poll_lock); 1262 } 1263 1264 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) 1265 goto out; 1266 1267 if (wcidx >= MT7603_WTBL_STA || !sta) 1268 goto out; 1269 1270 if (mt7603_fill_txs(dev, msta, &info, txs_data)) 1271 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); 1272 1273 out: 1274 rcu_read_unlock(); 1275 } 1276 1277 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 1278 { 1279 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1280 struct sk_buff *skb = e->skb; 1281 1282 if (!e->txwi) { 1283 dev_kfree_skb_any(skb); 1284 return; 1285 } 1286 1287 dev->tx_hang_check = 0; 1288 mt76_tx_complete_skb(mdev, e->wcid, skb); 1289 } 1290 1291 static bool 1292 wait_for_wpdma(struct mt7603_dev *dev) 1293 { 1294 return mt76_poll(dev, MT_WPDMA_GLO_CFG, 1295 MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 1296 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 1297 0, 1000); 1298 } 1299 1300 static void mt7603_pse_reset(struct mt7603_dev *dev) 1301 { 1302 /* Clear previous reset result */ 1303 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1304 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); 1305 1306 /* Reset PSE */ 1307 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1308 1309 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, 1310 MT_MCU_DEBUG_RESET_PSE_S, 1311 MT_MCU_DEBUG_RESET_PSE_S, 500)) { 1312 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; 1313 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1314 } else { 1315 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1316 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); 1317 } 1318 1319 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) 1320 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1321 } 1322 1323 void mt7603_mac_dma_start(struct mt7603_dev *dev) 1324 { 1325 mt7603_mac_start(dev); 1326 1327 wait_for_wpdma(dev); 1328 usleep_range(50, 100); 1329 1330 mt76_set(dev, MT_WPDMA_GLO_CFG, 1331 (MT_WPDMA_GLO_CFG_TX_DMA_EN | 1332 MT_WPDMA_GLO_CFG_RX_DMA_EN | 1333 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 1334 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); 1335 1336 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); 1337 } 1338 1339 void mt7603_mac_start(struct mt7603_dev *dev) 1340 { 1341 mt76_clear(dev, MT_ARB_SCR, 1342 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1343 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); 1344 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1345 } 1346 1347 void mt7603_mac_stop(struct mt7603_dev *dev) 1348 { 1349 mt76_set(dev, MT_ARB_SCR, 1350 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1351 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); 1352 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1353 } 1354 1355 void mt7603_pse_client_reset(struct mt7603_dev *dev) 1356 { 1357 u32 addr; 1358 1359 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + 1360 MT_CLIENT_RESET_TX); 1361 1362 /* Clear previous reset state */ 1363 mt76_clear(dev, addr, 1364 MT_CLIENT_RESET_TX_R_E_1 | 1365 MT_CLIENT_RESET_TX_R_E_2 | 1366 MT_CLIENT_RESET_TX_R_E_1_S | 1367 MT_CLIENT_RESET_TX_R_E_2_S); 1368 1369 /* Start PSE client TX abort */ 1370 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); 1371 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, 1372 MT_CLIENT_RESET_TX_R_E_1_S, 500); 1373 1374 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); 1375 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 1376 1377 /* Wait for PSE client to clear TX FIFO */ 1378 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, 1379 MT_CLIENT_RESET_TX_R_E_2_S, 500); 1380 1381 /* Clear PSE client TX abort state */ 1382 mt76_clear(dev, addr, 1383 MT_CLIENT_RESET_TX_R_E_1 | 1384 MT_CLIENT_RESET_TX_R_E_2); 1385 } 1386 1387 static void mt7603_dma_sched_reset(struct mt7603_dev *dev) 1388 { 1389 if (!is_mt7628(dev)) 1390 return; 1391 1392 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); 1393 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); 1394 } 1395 1396 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) 1397 { 1398 int beacon_int = dev->mt76.beacon_int; 1399 u32 mask = dev->mt76.mmio.irqmask; 1400 int i; 1401 1402 ieee80211_stop_queues(dev->mt76.hw); 1403 set_bit(MT76_RESET, &dev->mphy.state); 1404 1405 /* lock/unlock all queues to ensure that no tx is pending */ 1406 mt76_txq_schedule_all(&dev->mphy); 1407 1408 mt76_worker_disable(&dev->mt76.tx_worker); 1409 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 1410 napi_disable(&dev->mt76.napi[0]); 1411 napi_disable(&dev->mt76.napi[1]); 1412 napi_disable(&dev->mt76.tx_napi); 1413 1414 mutex_lock(&dev->mt76.mutex); 1415 1416 mt7603_beacon_set_timer(dev, -1, 0); 1417 1418 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || 1419 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || 1420 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || 1421 dev->cur_reset_cause == RESET_CAUSE_TX_HANG) 1422 mt7603_pse_reset(dev); 1423 1424 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1425 goto skip_dma_reset; 1426 1427 mt7603_mac_stop(dev); 1428 1429 mt76_clear(dev, MT_WPDMA_GLO_CFG, 1430 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1431 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1432 usleep_range(1000, 2000); 1433 1434 mt7603_irq_disable(dev, mask); 1435 1436 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); 1437 1438 mt7603_pse_client_reset(dev); 1439 1440 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); 1441 for (i = 0; i < __MT_TXQ_MAX; i++) 1442 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1443 1444 mt76_for_each_q_rx(&dev->mt76, i) { 1445 mt76_queue_rx_reset(dev, i); 1446 } 1447 1448 mt76_tx_status_check(&dev->mt76, NULL, true); 1449 1450 mt7603_dma_sched_reset(dev); 1451 1452 mt7603_mac_dma_start(dev); 1453 1454 mt7603_irq_enable(dev, mask); 1455 1456 skip_dma_reset: 1457 clear_bit(MT76_RESET, &dev->mphy.state); 1458 mutex_unlock(&dev->mt76.mutex); 1459 1460 mt76_worker_enable(&dev->mt76.tx_worker); 1461 napi_enable(&dev->mt76.tx_napi); 1462 napi_schedule(&dev->mt76.tx_napi); 1463 1464 tasklet_enable(&dev->mt76.pre_tbtt_tasklet); 1465 mt7603_beacon_set_timer(dev, -1, beacon_int); 1466 1467 napi_enable(&dev->mt76.napi[0]); 1468 napi_schedule(&dev->mt76.napi[0]); 1469 1470 napi_enable(&dev->mt76.napi[1]); 1471 napi_schedule(&dev->mt76.napi[1]); 1472 1473 ieee80211_wake_queues(dev->mt76.hw); 1474 mt76_txq_schedule_all(&dev->mphy); 1475 } 1476 1477 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) 1478 { 1479 u32 val; 1480 1481 mt76_wr(dev, MT_WPDMA_DEBUG, 1482 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | 1483 MT_WPDMA_DEBUG_SEL); 1484 1485 val = mt76_rr(dev, MT_WPDMA_DEBUG); 1486 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); 1487 } 1488 1489 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) 1490 { 1491 if (is_mt7628(dev)) 1492 return mt7603_dma_debug(dev, 9) & BIT(9); 1493 1494 return mt7603_dma_debug(dev, 2) & BIT(8); 1495 } 1496 1497 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) 1498 { 1499 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) 1500 return false; 1501 1502 return mt7603_rx_fifo_busy(dev); 1503 } 1504 1505 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) 1506 { 1507 u32 val; 1508 1509 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) 1510 return false; 1511 1512 val = mt7603_dma_debug(dev, 9); 1513 return (val & BIT(8)) && (val & 0xf) != 0xf; 1514 } 1515 1516 static bool mt7603_tx_hang(struct mt7603_dev *dev) 1517 { 1518 struct mt76_queue *q; 1519 u32 dma_idx, prev_dma_idx; 1520 int i; 1521 1522 for (i = 0; i < 4; i++) { 1523 q = dev->mphy.q_tx[i]; 1524 1525 if (!q->queued) 1526 continue; 1527 1528 prev_dma_idx = dev->tx_dma_idx[i]; 1529 dma_idx = readl(&q->regs->dma_idx); 1530 dev->tx_dma_idx[i] = dma_idx; 1531 1532 if (dma_idx == prev_dma_idx && 1533 dma_idx != readl(&q->regs->cpu_idx)) 1534 break; 1535 } 1536 1537 return i < 4; 1538 } 1539 1540 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) 1541 { 1542 u32 addr, val; 1543 1544 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) 1545 return true; 1546 1547 if (mt7603_rx_fifo_busy(dev)) 1548 return false; 1549 1550 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); 1551 mt76_wr(dev, addr, 3); 1552 val = mt76_rr(dev, addr) >> 16; 1553 1554 if (is_mt7628(dev) && (val & 0x4001) == 0x4001) 1555 return true; 1556 1557 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; 1558 } 1559 1560 static bool 1561 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, 1562 enum mt7603_reset_cause cause, 1563 bool (*check)(struct mt7603_dev *dev)) 1564 { 1565 if (dev->reset_test == cause + 1) { 1566 dev->reset_test = 0; 1567 goto trigger; 1568 } 1569 1570 if (check) { 1571 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { 1572 *counter = 0; 1573 return false; 1574 } 1575 1576 (*counter)++; 1577 } 1578 1579 if (*counter < MT7603_WATCHDOG_TIMEOUT) 1580 return false; 1581 trigger: 1582 dev->cur_reset_cause = cause; 1583 dev->reset_cause[cause]++; 1584 return true; 1585 } 1586 1587 void mt7603_update_channel(struct mt76_dev *mdev) 1588 { 1589 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1590 struct mt76_channel_state *state; 1591 1592 state = mdev->phy.chan_state; 1593 state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA); 1594 } 1595 1596 void 1597 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) 1598 { 1599 u32 rxtd_6 = 0xd7c80000; 1600 1601 if (val == dev->ed_strict_mode) 1602 return; 1603 1604 dev->ed_strict_mode = val; 1605 1606 /* Ensure that ED/CCA does not trigger if disabled */ 1607 if (!dev->ed_monitor) 1608 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); 1609 else 1610 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); 1611 1612 if (dev->ed_monitor && !dev->ed_strict_mode) 1613 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); 1614 else 1615 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); 1616 1617 mt76_wr(dev, MT_RXTD(6), rxtd_6); 1618 1619 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, 1620 dev->ed_monitor && !dev->ed_strict_mode); 1621 } 1622 1623 static void 1624 mt7603_edcca_check(struct mt7603_dev *dev) 1625 { 1626 u32 val = mt76_rr(dev, MT_AGC(41)); 1627 ktime_t cur_time; 1628 int rssi0, rssi1; 1629 u32 active; 1630 u32 ed_busy; 1631 1632 if (!dev->ed_monitor) 1633 return; 1634 1635 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); 1636 if (rssi0 > 128) 1637 rssi0 -= 256; 1638 1639 if (dev->mphy.antenna_mask & BIT(1)) { 1640 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); 1641 if (rssi1 > 128) 1642 rssi1 -= 256; 1643 } else { 1644 rssi1 = rssi0; 1645 } 1646 1647 if (max(rssi0, rssi1) >= -40 && 1648 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) 1649 dev->ed_strong_signal++; 1650 else if (dev->ed_strong_signal > 0) 1651 dev->ed_strong_signal--; 1652 1653 cur_time = ktime_get_boottime(); 1654 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; 1655 1656 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1657 dev->ed_time = cur_time; 1658 1659 if (!active) 1660 return; 1661 1662 if (100 * ed_busy / active > 90) { 1663 if (dev->ed_trigger < 0) 1664 dev->ed_trigger = 0; 1665 dev->ed_trigger++; 1666 } else { 1667 if (dev->ed_trigger > 0) 1668 dev->ed_trigger = 0; 1669 dev->ed_trigger--; 1670 } 1671 1672 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || 1673 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { 1674 mt7603_edcca_set_strict(dev, true); 1675 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { 1676 mt7603_edcca_set_strict(dev, false); 1677 } 1678 1679 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) 1680 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; 1681 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) 1682 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; 1683 } 1684 1685 void mt7603_cca_stats_reset(struct mt7603_dev *dev) 1686 { 1687 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1688 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1689 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); 1690 } 1691 1692 static void 1693 mt7603_adjust_sensitivity(struct mt7603_dev *dev) 1694 { 1695 u32 agc0 = dev->agc0, agc3 = dev->agc3; 1696 u32 adj; 1697 1698 if (!dev->sensitivity || dev->sensitivity < -100) { 1699 dev->sensitivity = 0; 1700 } else if (dev->sensitivity <= -84) { 1701 adj = 7 + (dev->sensitivity + 92) / 2; 1702 1703 agc0 = 0x56f0076f; 1704 agc0 |= adj << 12; 1705 agc0 |= adj << 16; 1706 agc3 = 0x81d0d5e3; 1707 } else if (dev->sensitivity <= -72) { 1708 adj = 7 + (dev->sensitivity + 80) / 2; 1709 1710 agc0 = 0x6af0006f; 1711 agc0 |= adj << 8; 1712 agc0 |= adj << 12; 1713 agc0 |= adj << 16; 1714 1715 agc3 = 0x8181d5e3; 1716 } else { 1717 if (dev->sensitivity > -54) 1718 dev->sensitivity = -54; 1719 1720 adj = 7 + (dev->sensitivity + 80) / 2; 1721 1722 agc0 = 0x7ff0000f; 1723 agc0 |= adj << 4; 1724 agc0 |= adj << 8; 1725 agc0 |= adj << 12; 1726 agc0 |= adj << 16; 1727 1728 agc3 = 0x818181e3; 1729 } 1730 1731 mt76_wr(dev, MT_AGC(0), agc0); 1732 mt76_wr(dev, MT_AGC1(0), agc0); 1733 1734 mt76_wr(dev, MT_AGC(3), agc3); 1735 mt76_wr(dev, MT_AGC1(3), agc3); 1736 } 1737 1738 static void 1739 mt7603_false_cca_check(struct mt7603_dev *dev) 1740 { 1741 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; 1742 int false_cca; 1743 int min_signal; 1744 u32 val; 1745 1746 if (!dev->dynamic_sensitivity) 1747 return; 1748 1749 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); 1750 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); 1751 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); 1752 1753 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); 1754 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); 1755 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); 1756 1757 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1758 dev->false_cca_cck = pd_cck - mdrdy_cck; 1759 1760 mt7603_cca_stats_reset(dev); 1761 1762 min_signal = mt76_get_min_avg_rssi(&dev->mt76, false); 1763 if (!min_signal) { 1764 dev->sensitivity = 0; 1765 dev->last_cca_adj = jiffies; 1766 goto out; 1767 } 1768 1769 min_signal -= 15; 1770 1771 false_cca = dev->false_cca_ofdm + dev->false_cca_cck; 1772 if (false_cca > 600 && 1773 dev->sensitivity < -100 + dev->sensitivity_limit) { 1774 if (!dev->sensitivity) 1775 dev->sensitivity = -92; 1776 else 1777 dev->sensitivity += 2; 1778 dev->last_cca_adj = jiffies; 1779 } else if (false_cca < 100 || 1780 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { 1781 dev->last_cca_adj = jiffies; 1782 if (!dev->sensitivity) 1783 goto out; 1784 1785 dev->sensitivity -= 2; 1786 } 1787 1788 if (dev->sensitivity && dev->sensitivity > min_signal) { 1789 dev->sensitivity = min_signal; 1790 dev->last_cca_adj = jiffies; 1791 } 1792 1793 out: 1794 mt7603_adjust_sensitivity(dev); 1795 } 1796 1797 void mt7603_mac_work(struct work_struct *work) 1798 { 1799 struct mt7603_dev *dev = container_of(work, struct mt7603_dev, 1800 mphy.mac_work.work); 1801 bool reset = false; 1802 int i, idx; 1803 1804 mt76_tx_status_check(&dev->mt76, NULL, false); 1805 1806 mutex_lock(&dev->mt76.mutex); 1807 1808 dev->mphy.mac_work_count++; 1809 mt76_update_survey(&dev->mt76); 1810 mt7603_edcca_check(dev); 1811 1812 for (i = 0, idx = 0; i < 2; i++) { 1813 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); 1814 1815 dev->mt76.aggr_stats[idx++] += val & 0xffff; 1816 dev->mt76.aggr_stats[idx++] += val >> 16; 1817 } 1818 1819 if (dev->mphy.mac_work_count == 10) 1820 mt7603_false_cca_check(dev); 1821 1822 if (mt7603_watchdog_check(dev, &dev->rx_pse_check, 1823 RESET_CAUSE_RX_PSE_BUSY, 1824 mt7603_rx_pse_busy) || 1825 mt7603_watchdog_check(dev, &dev->beacon_check, 1826 RESET_CAUSE_BEACON_STUCK, 1827 NULL) || 1828 mt7603_watchdog_check(dev, &dev->tx_hang_check, 1829 RESET_CAUSE_TX_HANG, 1830 mt7603_tx_hang) || 1831 mt7603_watchdog_check(dev, &dev->tx_dma_check, 1832 RESET_CAUSE_TX_BUSY, 1833 mt7603_tx_dma_busy) || 1834 mt7603_watchdog_check(dev, &dev->rx_dma_check, 1835 RESET_CAUSE_RX_BUSY, 1836 mt7603_rx_dma_busy) || 1837 mt7603_watchdog_check(dev, &dev->mcu_hang, 1838 RESET_CAUSE_MCU_HANG, 1839 NULL) || 1840 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { 1841 dev->beacon_check = 0; 1842 dev->tx_dma_check = 0; 1843 dev->tx_hang_check = 0; 1844 dev->rx_dma_check = 0; 1845 dev->rx_pse_check = 0; 1846 dev->mcu_hang = 0; 1847 dev->rx_dma_idx = ~0; 1848 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); 1849 reset = true; 1850 dev->mphy.mac_work_count = 0; 1851 } 1852 1853 if (dev->mphy.mac_work_count >= 10) 1854 dev->mphy.mac_work_count = 0; 1855 1856 mutex_unlock(&dev->mt76.mutex); 1857 1858 if (reset) 1859 mt7603_mac_watchdog_reset(dev); 1860 1861 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, 1862 msecs_to_jiffies(MT7603_WATCHDOG_TIME)); 1863 } 1864