1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2021 MediaTek Inc. */ 3 4 #include "mt7921.h" 5 #include "../dma.h" 6 #include "mac.h" 7 8 static void 9 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, 10 void *txp_ptr, u32 id) 11 { 12 struct mt7921_hw_txp *txp = txp_ptr; 13 struct mt7921_txp_ptr *ptr = &txp->ptr[0]; 14 int i, nbuf = tx_info->nbuf - 1; 15 16 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 17 tx_info->nbuf = 1; 18 19 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID); 20 21 for (i = 0; i < nbuf; i++) { 22 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK; 23 u32 addr = tx_info->buf[i + 1].addr; 24 25 if (i == nbuf - 1) 26 len |= MT_TXD_LEN_LAST; 27 28 if (i & 1) { 29 ptr->buf1 = cpu_to_le32(addr); 30 ptr->len1 = cpu_to_le16(len); 31 ptr++; 32 } else { 33 ptr->buf0 = cpu_to_le32(addr); 34 ptr->len0 = cpu_to_le16(len); 35 } 36 } 37 } 38 39 int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 40 enum mt76_txq_id qid, struct mt76_wcid *wcid, 41 struct ieee80211_sta *sta, 42 struct mt76_tx_info *tx_info) 43 { 44 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 46 struct ieee80211_key_conf *key = info->control.hw_key; 47 struct mt76_txwi_cache *t; 48 struct mt7921_txp_common *txp; 49 int id, pid; 50 u8 *txwi = (u8 *)txwi_ptr; 51 52 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 53 return -EINVAL; 54 55 if (!wcid) 56 wcid = &dev->mt76.global_wcid; 57 58 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 59 t->skb = tx_info->skb; 60 61 id = mt76_token_consume(mdev, &t); 62 if (id < 0) 63 return id; 64 65 if (sta) { 66 struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; 67 68 if (time_after(jiffies, msta->last_txs + HZ / 4)) { 69 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; 70 msta->last_txs = jiffies; 71 } 72 } 73 74 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 75 mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 76 pid, false); 77 78 txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE); 79 memset(txp, 0, sizeof(struct mt7921_txp_common)); 80 mt7921_write_hw_txp(dev, tx_info, txp, id); 81 82 tx_info->skb = DMA_DUMMY_DATA; 83 84 return 0; 85 } 86 87 static void 88 mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) 89 { 90 struct mt7921_txp_common *txp; 91 int i; 92 93 txp = mt7921_txwi_to_txp(dev, t); 94 95 for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) { 96 struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i]; 97 bool last; 98 u16 len; 99 100 len = le16_to_cpu(ptr->len0); 101 last = len & MT_TXD_LEN_LAST; 102 len &= MT_TXD_LEN_MASK; 103 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, 104 DMA_TO_DEVICE); 105 if (last) 106 break; 107 108 len = le16_to_cpu(ptr->len1); 109 last = len & MT_TXD_LEN_LAST; 110 len &= MT_TXD_LEN_MASK; 111 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, 112 DMA_TO_DEVICE); 113 if (last) 114 break; 115 } 116 } 117 118 static void 119 mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t, 120 struct ieee80211_sta *sta, bool clear_status, 121 struct list_head *free_list) 122 { 123 struct mt76_dev *mdev = &dev->mt76; 124 __le32 *txwi; 125 u16 wcid_idx; 126 127 mt7921_txp_skb_unmap(mdev, t); 128 if (!t->skb) 129 goto out; 130 131 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 132 if (sta) { 133 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 134 135 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) 136 mt7921_tx_check_aggr(sta, txwi); 137 138 wcid_idx = wcid->idx; 139 } else { 140 wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1])); 141 } 142 143 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 144 145 out: 146 t->skb = NULL; 147 mt76_put_txwi(mdev, t); 148 } 149 150 static void 151 mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) 152 { 153 struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data; 154 struct mt76_dev *mdev = &dev->mt76; 155 struct mt76_txwi_cache *txwi; 156 struct ieee80211_sta *sta = NULL; 157 LIST_HEAD(free_list); 158 struct sk_buff *tmp; 159 bool wake = false; 160 u8 i, count; 161 162 /* clean DMA queues and unmap buffers first */ 163 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 164 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 165 166 /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, 167 * to the time ack is received or dropped by hw (air + hw queue time). 168 * Should avoid accessing WTBL to get Tx airtime, and use it instead. 169 */ 170 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl)); 171 for (i = 0; i < count; i++) { 172 u32 msdu, info = le32_to_cpu(free->info[i]); 173 u8 stat; 174 175 /* 1'b1: new wcid pair. 176 * 1'b0: msdu_id with the same 'wcid pair' as above. 177 */ 178 if (info & MT_TX_FREE_PAIR) { 179 struct mt7921_sta *msta; 180 struct mt76_wcid *wcid; 181 u16 idx; 182 183 count++; 184 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info); 185 wcid = rcu_dereference(dev->mt76.wcid[idx]); 186 sta = wcid_to_sta(wcid); 187 if (!sta) 188 continue; 189 190 msta = container_of(wcid, struct mt7921_sta, wcid); 191 spin_lock_bh(&dev->sta_poll_lock); 192 if (list_empty(&msta->poll_list)) 193 list_add_tail(&msta->poll_list, &dev->sta_poll_list); 194 spin_unlock_bh(&dev->sta_poll_lock); 195 continue; 196 } 197 198 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 199 stat = FIELD_GET(MT_TX_FREE_STATUS, info); 200 201 txwi = mt76_token_release(mdev, msdu, &wake); 202 if (!txwi) 203 continue; 204 205 mt7921_txwi_free(dev, txwi, sta, stat, &free_list); 206 } 207 208 if (wake) 209 mt76_set_tx_blocked(&dev->mt76, false); 210 211 napi_consume_skb(skb, 1); 212 213 list_for_each_entry_safe(skb, tmp, &free_list, list) { 214 skb_list_del_init(skb); 215 napi_consume_skb(skb, 1); 216 } 217 218 rcu_read_lock(); 219 mt7921_mac_sta_poll(dev); 220 rcu_read_unlock(); 221 222 mt76_worker_schedule(&dev->mt76.tx_worker); 223 } 224 225 void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 226 struct sk_buff *skb) 227 { 228 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 229 __le32 *rxd = (__le32 *)skb->data; 230 enum rx_pkt_type type; 231 232 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 233 234 switch (type) { 235 case PKT_TYPE_TXRX_NOTIFY: 236 mt7921_mac_tx_free(dev, skb); 237 break; 238 default: 239 mt7921_queue_rx_skb(mdev, q, skb); 240 break; 241 } 242 } 243 244 void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 245 { 246 if (!e->txwi) { 247 dev_kfree_skb_any(e->skb); 248 return; 249 } 250 251 /* error path */ 252 if (e->skb == DMA_DUMMY_DATA) { 253 struct mt76_txwi_cache *t; 254 struct mt7921_txp_common *txp; 255 u16 token; 256 257 txp = mt7921_txwi_to_txp(mdev, e->txwi); 258 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; 259 t = mt76_token_put(mdev, token); 260 e->skb = t ? t->skb : NULL; 261 } 262 263 if (e->skb) 264 mt76_tx_complete_skb(mdev, e->wcid, e->skb); 265 } 266 267 void mt7921_tx_token_put(struct mt7921_dev *dev) 268 { 269 struct mt76_txwi_cache *txwi; 270 int id; 271 272 spin_lock_bh(&dev->mt76.token_lock); 273 idr_for_each_entry(&dev->mt76.token, txwi, id) { 274 mt7921_txwi_free(dev, txwi, NULL, false, NULL); 275 dev->mt76.token_count--; 276 } 277 spin_unlock_bh(&dev->mt76.token_lock); 278 idr_destroy(&dev->mt76.token); 279 } 280 281 int mt7921e_mac_reset(struct mt7921_dev *dev) 282 { 283 int i, err; 284 285 mt7921e_mcu_drv_pmctrl(dev); 286 287 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); 288 289 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); 290 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 291 292 set_bit(MT76_RESET, &dev->mphy.state); 293 set_bit(MT76_MCU_RESET, &dev->mphy.state); 294 wake_up(&dev->mt76.mcu.wait); 295 skb_queue_purge(&dev->mt76.mcu.res_q); 296 297 mt76_txq_schedule_all(&dev->mphy); 298 299 mt76_worker_disable(&dev->mt76.tx_worker); 300 napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]); 301 napi_disable(&dev->mt76.napi[MT_RXQ_MCU]); 302 napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]); 303 napi_disable(&dev->mt76.tx_napi); 304 305 mt7921_tx_token_put(dev); 306 idr_init(&dev->mt76.token); 307 308 mt7921_wpdma_reset(dev, true); 309 310 local_bh_disable(); 311 mt76_for_each_q_rx(&dev->mt76, i) { 312 napi_enable(&dev->mt76.napi[i]); 313 napi_schedule(&dev->mt76.napi[i]); 314 } 315 local_bh_enable(); 316 317 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 318 319 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 320 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 321 MT_INT_MCU_CMD); 322 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 323 324 err = mt7921_run_firmware(dev); 325 if (err) 326 goto out; 327 328 err = mt7921_mcu_set_eeprom(dev); 329 if (err) 330 goto out; 331 332 err = mt7921_mac_init(dev); 333 if (err) 334 goto out; 335 336 err = __mt7921_start(&dev->phy); 337 out: 338 clear_bit(MT76_RESET, &dev->mphy.state); 339 340 local_bh_disable(); 341 napi_enable(&dev->mt76.tx_napi); 342 napi_schedule(&dev->mt76.tx_napi); 343 local_bh_enable(); 344 345 mt76_worker_enable(&dev->mt76.tx_worker); 346 347 return err; 348 } 349