1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2021 MediaTek Inc. */
3 
4 #include "mt7921.h"
5 #include "../dma.h"
6 #include "mac.h"
7 
8 int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
9 			   enum mt76_txq_id qid, struct mt76_wcid *wcid,
10 			   struct ieee80211_sta *sta,
11 			   struct mt76_tx_info *tx_info)
12 {
13 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
14 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
15 	struct ieee80211_key_conf *key = info->control.hw_key;
16 	struct mt76_connac_hw_txp *txp;
17 	struct mt76_txwi_cache *t;
18 	int id, pid;
19 	u8 *txwi = (u8 *)txwi_ptr;
20 
21 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
22 		return -EINVAL;
23 
24 	if (!wcid)
25 		wcid = &dev->mt76.global_wcid;
26 
27 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
28 	t->skb = tx_info->skb;
29 
30 	id = mt76_token_consume(mdev, &t);
31 	if (id < 0)
32 		return id;
33 
34 	if (sta) {
35 		struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
36 
37 		if (time_after(jiffies, msta->last_txs + HZ / 4)) {
38 			info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
39 			msta->last_txs = jiffies;
40 		}
41 	}
42 
43 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
44 	mt76_connac2_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, key,
45 				    pid, qid, 0);
46 
47 	txp = (struct mt76_connac_hw_txp *)(txwi + MT_TXD_SIZE);
48 	memset(txp, 0, sizeof(struct mt76_connac_hw_txp));
49 	mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
50 
51 	tx_info->skb = DMA_DUMMY_DATA;
52 
53 	return 0;
54 }
55 
56 static void
57 mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
58 		 struct ieee80211_sta *sta, bool clear_status,
59 		 struct list_head *free_list)
60 {
61 	struct mt76_dev *mdev = &dev->mt76;
62 	__le32 *txwi;
63 	u16 wcid_idx;
64 
65 	mt76_connac_txp_skb_unmap(mdev, t);
66 	if (!t->skb)
67 		goto out;
68 
69 	txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
70 	if (sta) {
71 		struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
72 
73 		if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
74 			mt7921_tx_check_aggr(sta, txwi);
75 
76 		wcid_idx = wcid->idx;
77 	} else {
78 		wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
79 	}
80 
81 	__mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
82 
83 out:
84 	t->skb = NULL;
85 	mt76_put_txwi(mdev, t);
86 }
87 
88 static void
89 mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
90 {
91 	struct mt76_connac_tx_free *free = data;
92 	__le32 *tx_info = (__le32 *)(data + sizeof(*free));
93 	struct mt76_dev *mdev = &dev->mt76;
94 	struct mt76_txwi_cache *txwi;
95 	struct ieee80211_sta *sta = NULL;
96 	struct sk_buff *skb, *tmp;
97 	void *end = data + len;
98 	LIST_HEAD(free_list);
99 	bool wake = false;
100 	u8 i, count;
101 
102 	/* clean DMA queues and unmap buffers first */
103 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
104 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
105 
106 	count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
107 	if (WARN_ON_ONCE((void *)&tx_info[count] > end))
108 		return;
109 
110 	for (i = 0; i < count; i++) {
111 		u32 msdu, info = le32_to_cpu(tx_info[i]);
112 		u8 stat;
113 
114 		/* 1'b1: new wcid pair.
115 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
116 		 */
117 		if (info & MT_TX_FREE_PAIR) {
118 			struct mt7921_sta *msta;
119 			struct mt76_wcid *wcid;
120 			u16 idx;
121 
122 			count++;
123 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
124 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
125 			sta = wcid_to_sta(wcid);
126 			if (!sta)
127 				continue;
128 
129 			msta = container_of(wcid, struct mt7921_sta, wcid);
130 			spin_lock_bh(&dev->sta_poll_lock);
131 			if (list_empty(&msta->poll_list))
132 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
133 			spin_unlock_bh(&dev->sta_poll_lock);
134 			continue;
135 		}
136 
137 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
138 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
139 
140 		txwi = mt76_token_release(mdev, msdu, &wake);
141 		if (!txwi)
142 			continue;
143 
144 		mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
145 	}
146 
147 	if (wake)
148 		mt76_set_tx_blocked(&dev->mt76, false);
149 
150 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
151 		skb_list_del_init(skb);
152 		napi_consume_skb(skb, 1);
153 	}
154 
155 	rcu_read_lock();
156 	mt7921_mac_sta_poll(dev);
157 	rcu_read_unlock();
158 
159 	mt76_worker_schedule(&dev->mt76.tx_worker);
160 }
161 
162 bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
163 {
164 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
165 	__le32 *rxd = (__le32 *)data;
166 	__le32 *end = (__le32 *)&rxd[len / 4];
167 	enum rx_pkt_type type;
168 
169 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
170 
171 	switch (type) {
172 	case PKT_TYPE_TXRX_NOTIFY:
173 		mt7921e_mac_tx_free(dev, data, len);
174 		return false;
175 	case PKT_TYPE_TXS:
176 		for (rxd += 2; rxd + 8 <= end; rxd += 8)
177 			mt7921_mac_add_txs(dev, rxd);
178 		return false;
179 	default:
180 		return true;
181 	}
182 }
183 
184 void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
185 			  struct sk_buff *skb)
186 {
187 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
188 	__le32 *rxd = (__le32 *)skb->data;
189 	enum rx_pkt_type type;
190 
191 	type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
192 
193 	switch (type) {
194 	case PKT_TYPE_TXRX_NOTIFY:
195 		mt7921e_mac_tx_free(dev, skb->data, skb->len);
196 		napi_consume_skb(skb, 1);
197 		break;
198 	default:
199 		mt7921_queue_rx_skb(mdev, q, skb);
200 		break;
201 	}
202 }
203 
204 void mt7921_tx_token_put(struct mt7921_dev *dev)
205 {
206 	struct mt76_txwi_cache *txwi;
207 	int id;
208 
209 	spin_lock_bh(&dev->mt76.token_lock);
210 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
211 		mt7921_txwi_free(dev, txwi, NULL, false, NULL);
212 		dev->mt76.token_count--;
213 	}
214 	spin_unlock_bh(&dev->mt76.token_lock);
215 	idr_destroy(&dev->mt76.token);
216 }
217 
218 int mt7921e_mac_reset(struct mt7921_dev *dev)
219 {
220 	int i, err;
221 
222 	mt7921e_mcu_drv_pmctrl(dev);
223 
224 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
225 
226 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
227 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
228 
229 	set_bit(MT76_RESET, &dev->mphy.state);
230 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
231 	wake_up(&dev->mt76.mcu.wait);
232 	skb_queue_purge(&dev->mt76.mcu.res_q);
233 
234 	mt76_txq_schedule_all(&dev->mphy);
235 
236 	mt76_worker_disable(&dev->mt76.tx_worker);
237 	napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
238 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
239 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
240 	napi_disable(&dev->mt76.tx_napi);
241 
242 	mt7921_tx_token_put(dev);
243 	idr_init(&dev->mt76.token);
244 
245 	mt7921_wpdma_reset(dev, true);
246 
247 	local_bh_disable();
248 	mt76_for_each_q_rx(&dev->mt76, i) {
249 		napi_enable(&dev->mt76.napi[i]);
250 		napi_schedule(&dev->mt76.napi[i]);
251 	}
252 	local_bh_enable();
253 
254 	dev->fw_assert = false;
255 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
256 
257 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
258 		MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
259 		MT_INT_MCU_CMD);
260 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
261 
262 	err = mt7921e_driver_own(dev);
263 	if (err)
264 		return err;
265 
266 	err = mt7921_run_firmware(dev);
267 	if (err)
268 		goto out;
269 
270 	err = mt7921_mcu_set_eeprom(dev);
271 	if (err)
272 		goto out;
273 
274 	err = mt7921_mac_init(dev);
275 	if (err)
276 		goto out;
277 
278 	err = __mt7921_start(&dev->phy);
279 out:
280 	clear_bit(MT76_RESET, &dev->mphy.state);
281 
282 	local_bh_disable();
283 	napi_enable(&dev->mt76.tx_napi);
284 	napi_schedule(&dev->mt76.tx_napi);
285 	local_bh_enable();
286 
287 	mt76_worker_enable(&dev->mt76.tx_worker);
288 
289 	return err;
290 }
291