1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 #include "mt7615.h"
13 #include "../dma.h"
14 #include "mac.h"
15 
16 static inline s8 to_rssi(u32 field, u32 rxv)
17 {
18 	return (FIELD_GET(field, rxv) - 220) / 2;
19 }
20 
21 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
22 					    u8 idx, bool unicast)
23 {
24 	struct mt7615_sta *sta;
25 	struct mt76_wcid *wcid;
26 
27 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
28 		return NULL;
29 
30 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
31 	if (unicast || !wcid)
32 		return wcid;
33 
34 	if (!wcid->sta)
35 		return NULL;
36 
37 	sta = container_of(wcid, struct mt7615_sta, wcid);
38 	if (!sta->vif)
39 		return NULL;
40 
41 	return &sta->vif->sta.wcid;
42 }
43 
44 int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
45 {
46 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
47 	struct ieee80211_supported_band *sband;
48 	struct ieee80211_hdr *hdr;
49 	__le32 *rxd = (__le32 *)skb->data;
50 	u32 rxd0 = le32_to_cpu(rxd[0]);
51 	u32 rxd1 = le32_to_cpu(rxd[1]);
52 	u32 rxd2 = le32_to_cpu(rxd[2]);
53 	bool unicast, remove_pad, insert_ccmp_hdr = false;
54 	int i, idx;
55 
56 	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
57 		return -EINVAL;
58 
59 	memset(status, 0, sizeof(*status));
60 
61 	unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
62 	idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
63 	status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
64 
65 	/* TODO: properly support DBDC */
66 	status->freq = dev->mt76.chandef.chan->center_freq;
67 	status->band = dev->mt76.chandef.chan->band;
68 	if (status->band == NL80211_BAND_5GHZ)
69 		sband = &dev->mt76.sband_5g.sband;
70 	else
71 		sband = &dev->mt76.sband_2g.sband;
72 
73 	if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
74 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
75 
76 	if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
77 		status->flag |= RX_FLAG_MMIC_ERROR;
78 
79 	if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
80 	    !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
81 		status->flag |= RX_FLAG_DECRYPTED;
82 		status->flag |= RX_FLAG_IV_STRIPPED;
83 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
84 	}
85 
86 	remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
87 
88 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
89 		return -EINVAL;
90 
91 	if (!sband->channels)
92 		return -EINVAL;
93 
94 	rxd += 4;
95 	if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
96 		rxd += 4;
97 		if ((u8 *)rxd - skb->data >= skb->len)
98 			return -EINVAL;
99 	}
100 
101 	if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
102 		u8 *data = (u8 *)rxd;
103 
104 		if (status->flag & RX_FLAG_DECRYPTED) {
105 			status->iv[0] = data[5];
106 			status->iv[1] = data[4];
107 			status->iv[2] = data[3];
108 			status->iv[3] = data[2];
109 			status->iv[4] = data[1];
110 			status->iv[5] = data[0];
111 
112 			insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
113 		}
114 		rxd += 4;
115 		if ((u8 *)rxd - skb->data >= skb->len)
116 			return -EINVAL;
117 	}
118 
119 	if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
120 		rxd += 2;
121 		if ((u8 *)rxd - skb->data >= skb->len)
122 			return -EINVAL;
123 	}
124 
125 	if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
126 		u32 rxdg0 = le32_to_cpu(rxd[0]);
127 		u32 rxdg1 = le32_to_cpu(rxd[1]);
128 		u32 rxdg3 = le32_to_cpu(rxd[3]);
129 		u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
130 		bool cck = false;
131 
132 		i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
133 		switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
134 		case MT_PHY_TYPE_CCK:
135 			cck = true;
136 			/* fall through */
137 		case MT_PHY_TYPE_OFDM:
138 			i = mt76_get_rate(&dev->mt76, sband, i, cck);
139 			break;
140 		case MT_PHY_TYPE_HT_GF:
141 		case MT_PHY_TYPE_HT:
142 			status->encoding = RX_ENC_HT;
143 			if (i > 31)
144 				return -EINVAL;
145 			break;
146 		case MT_PHY_TYPE_VHT:
147 			status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
148 			status->encoding = RX_ENC_VHT;
149 			break;
150 		default:
151 			return -EINVAL;
152 		}
153 		status->rate_idx = i;
154 
155 		switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
156 		case MT_PHY_BW_20:
157 			break;
158 		case MT_PHY_BW_40:
159 			status->bw = RATE_INFO_BW_40;
160 			break;
161 		case MT_PHY_BW_80:
162 			status->bw = RATE_INFO_BW_80;
163 			break;
164 		case MT_PHY_BW_160:
165 			status->bw = RATE_INFO_BW_160;
166 			break;
167 		default:
168 			return -EINVAL;
169 		}
170 
171 		if (rxdg0 & MT_RXV1_HT_SHORT_GI)
172 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
173 		if (rxdg0 & MT_RXV1_HT_AD_CODE)
174 			status->enc_flags |= RX_ENC_FLAG_LDPC;
175 
176 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
177 
178 		status->chains = dev->mt76.antenna_mask;
179 		status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
180 		status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
181 		status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
182 		status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
183 		status->signal = status->chain_signal[0];
184 
185 		for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
186 			if (!(status->chains & BIT(i)))
187 				continue;
188 
189 			status->signal = max(status->signal,
190 					     status->chain_signal[i]);
191 		}
192 
193 		rxd += 6;
194 		if ((u8 *)rxd - skb->data >= skb->len)
195 			return -EINVAL;
196 	}
197 
198 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
199 
200 	if (insert_ccmp_hdr) {
201 		u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
202 
203 		mt76_insert_ccmp_hdr(skb, key_id);
204 	}
205 
206 	hdr = (struct ieee80211_hdr *)skb->data;
207 	if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
208 		return 0;
209 
210 	status->aggr = unicast &&
211 		       !ieee80211_is_qos_nullfunc(hdr->frame_control);
212 	status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
213 	status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
214 
215 	return 0;
216 }
217 
218 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
219 {
220 }
221 
222 void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
223 			    struct mt76_queue_entry *e)
224 {
225 	if (!e->txwi) {
226 		dev_kfree_skb_any(e->skb);
227 		return;
228 	}
229 
230 	/* error path */
231 	if (e->skb == DMA_DUMMY_DATA) {
232 		struct mt76_txwi_cache *t;
233 		struct mt7615_dev *dev;
234 		struct mt7615_txp *txp;
235 		u8 *txwi_ptr;
236 
237 		txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
238 		txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
239 		dev = container_of(mdev, struct mt7615_dev, mt76);
240 
241 		spin_lock_bh(&dev->token_lock);
242 		t = idr_remove(&dev->token, le16_to_cpu(txp->token));
243 		spin_unlock_bh(&dev->token_lock);
244 		e->skb = t ? t->skb : NULL;
245 	}
246 
247 	if (e->skb)
248 		mt76_tx_complete_skb(mdev, e->skb);
249 }
250 
251 u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
252 			   const struct ieee80211_tx_rate *rate,
253 			   bool stbc, u8 *bw)
254 {
255 	u8 phy, nss, rate_idx;
256 	u16 rateval;
257 
258 	*bw = 0;
259 
260 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
261 		rate_idx = ieee80211_rate_get_vht_mcs(rate);
262 		nss = ieee80211_rate_get_vht_nss(rate);
263 		phy = MT_PHY_TYPE_VHT;
264 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
265 			*bw = 1;
266 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
267 			*bw = 2;
268 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
269 			*bw = 3;
270 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
271 		rate_idx = rate->idx;
272 		nss = 1 + (rate->idx >> 3);
273 		phy = MT_PHY_TYPE_HT;
274 		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
275 			phy = MT_PHY_TYPE_HT_GF;
276 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
277 			*bw = 1;
278 	} else {
279 		const struct ieee80211_rate *r;
280 		int band = dev->mt76.chandef.chan->band;
281 		u16 val;
282 
283 		nss = 1;
284 		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
285 		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
286 			val = r->hw_value_short;
287 		else
288 			val = r->hw_value;
289 
290 		phy = val >> 8;
291 		rate_idx = val & 0xff;
292 	}
293 
294 	rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
295 		   FIELD_PREP(MT_TX_RATE_MODE, phy) |
296 		   FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
297 
298 	if (stbc && nss == 1)
299 		rateval |= MT_TX_RATE_STBC;
300 
301 	return rateval;
302 }
303 
304 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
305 			  struct sk_buff *skb, struct mt76_wcid *wcid,
306 			  struct ieee80211_sta *sta, int pid,
307 			  struct ieee80211_key_conf *key)
308 {
309 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
310 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
311 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
312 	struct ieee80211_vif *vif = info->control.vif;
313 	int tx_count = 8;
314 	u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
315 	__le16 fc = hdr->frame_control;
316 	u16 seqno = 0;
317 	u32 val;
318 
319 	if (vif) {
320 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
321 
322 		omac_idx = mvif->omac_idx;
323 	}
324 
325 	if (sta) {
326 		struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
327 
328 		tx_count = msta->rate_count;
329 	}
330 
331 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
332 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
333 
334 	if (ieee80211_is_data(fc)) {
335 		q_idx = skb_get_queue_mapping(skb);
336 		p_fmt = MT_TX_TYPE_CT;
337 	} else if (ieee80211_is_beacon(fc)) {
338 		q_idx = MT_LMAC_BCN0;
339 		p_fmt = MT_TX_TYPE_FW;
340 	} else {
341 		q_idx = MT_LMAC_ALTX0;
342 		p_fmt = MT_TX_TYPE_CT;
343 	}
344 
345 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
346 	      FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
347 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
348 	txwi[0] = cpu_to_le32(val);
349 
350 	val = MT_TXD1_LONG_FORMAT |
351 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
352 	      FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
353 	      FIELD_PREP(MT_TXD1_HDR_INFO,
354 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
355 	      FIELD_PREP(MT_TXD1_TID,
356 			 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
357 	      FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
358 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
359 	txwi[1] = cpu_to_le32(val);
360 
361 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
362 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
363 	      FIELD_PREP(MT_TXD2_MULTICAST,
364 			 is_multicast_ether_addr(hdr->addr1));
365 	txwi[2] = cpu_to_le32(val);
366 
367 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
368 		txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
369 
370 	txwi[4] = 0;
371 	txwi[6] = 0;
372 
373 	if (rate->idx >= 0 && rate->count &&
374 	    !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
375 		bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
376 		u8 bw;
377 		u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
378 
379 		txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
380 
381 		val = MT_TXD6_FIXED_BW |
382 		      FIELD_PREP(MT_TXD6_BW, bw) |
383 		      FIELD_PREP(MT_TXD6_TX_RATE, rateval);
384 		txwi[6] |= cpu_to_le32(val);
385 
386 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
387 			txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
388 
389 		if (info->flags & IEEE80211_TX_CTL_LDPC)
390 			txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
391 
392 		if (!(rate->flags & (IEEE80211_TX_RC_MCS |
393 				     IEEE80211_TX_RC_VHT_MCS)))
394 			txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
395 
396 		tx_count = rate->count;
397 	}
398 
399 	if (!ieee80211_is_beacon(fc)) {
400 		val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
401 		      FIELD_PREP(MT_TXD5_PID, pid);
402 		txwi[5] = cpu_to_le32(val);
403 	} else {
404 		txwi[5] = 0;
405 		/* use maximum tx count for beacons */
406 		tx_count = 0x1f;
407 	}
408 
409 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
410 	if (ieee80211_is_data_qos(hdr->frame_control)) {
411 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
412 		val |= MT_TXD3_SN_VALID;
413 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
414 		struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
415 
416 		seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
417 		val |= MT_TXD3_SN_VALID;
418 	}
419 	val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
420 
421 	txwi[3] = cpu_to_le32(val);
422 
423 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
424 		txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
425 
426 	if (key)
427 		txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
428 
429 	txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
430 		  FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
431 
432 	return 0;
433 }
434 
435 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
436 			  struct mt76_txwi_cache *t)
437 {
438 	struct mt7615_txp *txp;
439 	u8 *txwi;
440 	int i;
441 
442 	txwi = mt76_get_txwi_ptr(dev, t);
443 	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
444 	for (i = 1; i < txp->nbuf; i++)
445 		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
446 				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
447 }
448 
449 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
450 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
451 			  struct ieee80211_sta *sta,
452 			  struct mt76_tx_info *tx_info)
453 {
454 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
455 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
456 	struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
457 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
458 	struct ieee80211_key_conf *key = info->control.hw_key;
459 	struct ieee80211_vif *vif = info->control.vif;
460 	int i, pid, id, nbuf = tx_info->nbuf - 1;
461 	u8 *txwi = (u8 *)txwi_ptr;
462 	struct mt76_txwi_cache *t;
463 	struct mt7615_txp *txp;
464 
465 	if (!wcid)
466 		wcid = &dev->mt76.global_wcid;
467 
468 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
469 
470 	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
471 		spin_lock_bh(&dev->mt76.lock);
472 		msta->rate_probe = true;
473 		mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
474 				     msta->rates);
475 		spin_unlock_bh(&dev->mt76.lock);
476 	}
477 
478 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
479 			      pid, key);
480 
481 	txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
482 	for (i = 0; i < nbuf; i++) {
483 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
484 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
485 	}
486 	txp->nbuf = nbuf;
487 
488 	/* pass partial skb header to fw */
489 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
490 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
491 
492 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
493 
494 	if (!key)
495 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
496 
497 	if (ieee80211_is_mgmt(hdr->frame_control))
498 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
499 
500 	if (vif) {
501 		struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
502 
503 		txp->bss_idx = mvif->idx;
504 	}
505 
506 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
507 	t->skb = tx_info->skb;
508 
509 	spin_lock_bh(&dev->token_lock);
510 	id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
511 	spin_unlock_bh(&dev->token_lock);
512 	if (id < 0)
513 		return id;
514 
515 	txp->token = cpu_to_le16(id);
516 	txp->rept_wds_wcid = 0xff;
517 	tx_info->skb = DMA_DUMMY_DATA;
518 
519 	return 0;
520 }
521 
522 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
523 			    struct ieee80211_tx_info *info, __le32 *txs_data)
524 {
525 	struct ieee80211_supported_band *sband;
526 	int i, idx, count, final_idx = 0;
527 	bool fixed_rate, ack_timeout;
528 	bool probe, ampdu, cck = false;
529 	u32 final_rate, final_rate_flags, final_nss, txs;
530 
531 	fixed_rate = info->status.rates[0].count;
532 	probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
533 
534 	txs = le32_to_cpu(txs_data[1]);
535 	ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
536 
537 	txs = le32_to_cpu(txs_data[3]);
538 	count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
539 
540 	txs = le32_to_cpu(txs_data[0]);
541 	final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
542 	ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
543 
544 	if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
545 		return false;
546 
547 	if (txs & MT_TXS0_QUEUE_TIMEOUT)
548 		return false;
549 
550 	if (!ack_timeout)
551 		info->flags |= IEEE80211_TX_STAT_ACK;
552 
553 	info->status.ampdu_len = 1;
554 	info->status.ampdu_ack_len = !!(info->flags &
555 					IEEE80211_TX_STAT_ACK);
556 
557 	if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
558 		info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
559 
560 	if (fixed_rate && !probe) {
561 		info->status.rates[0].count = count;
562 		goto out;
563 	}
564 
565 	for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
566 		int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
567 
568 		if (!i && probe) {
569 			cur_count = 1;
570 		} else {
571 			info->status.rates[i] = sta->rates[idx];
572 			idx++;
573 		}
574 
575 		if (i && info->status.rates[i].idx < 0) {
576 			info->status.rates[i - 1].count += count;
577 			break;
578 		}
579 
580 		if (!count) {
581 			info->status.rates[i].idx = -1;
582 			break;
583 		}
584 
585 		info->status.rates[i].count = cur_count;
586 		final_idx = i;
587 		count -= cur_count;
588 	}
589 
590 out:
591 	final_rate_flags = info->status.rates[final_idx].flags;
592 
593 	switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
594 	case MT_PHY_TYPE_CCK:
595 		cck = true;
596 		/* fall through */
597 	case MT_PHY_TYPE_OFDM:
598 		if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
599 			sband = &dev->mt76.sband_5g.sband;
600 		else
601 			sband = &dev->mt76.sband_2g.sband;
602 		final_rate &= MT_TX_RATE_IDX;
603 		final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
604 					   cck);
605 		final_rate_flags = 0;
606 		break;
607 	case MT_PHY_TYPE_HT_GF:
608 	case MT_PHY_TYPE_HT:
609 		final_rate_flags |= IEEE80211_TX_RC_MCS;
610 		final_rate &= MT_TX_RATE_IDX;
611 		if (final_rate > 31)
612 			return false;
613 		break;
614 	case MT_PHY_TYPE_VHT:
615 		final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
616 		final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
617 		final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
618 		break;
619 	default:
620 		return false;
621 	}
622 
623 	info->status.rates[final_idx].idx = final_rate;
624 	info->status.rates[final_idx].flags = final_rate_flags;
625 
626 	return true;
627 }
628 
629 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
630 				   struct mt7615_sta *sta, int pid,
631 				   __le32 *txs_data)
632 {
633 	struct mt76_dev *mdev = &dev->mt76;
634 	struct sk_buff_head list;
635 	struct sk_buff *skb;
636 
637 	if (pid < MT_PACKET_ID_FIRST)
638 		return false;
639 
640 	mt76_tx_status_lock(mdev, &list);
641 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
642 	if (skb) {
643 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
644 
645 		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
646 			spin_lock_bh(&dev->mt76.lock);
647 			if (sta->rate_probe) {
648 				mt7615_mcu_set_rates(dev, sta, NULL,
649 						     sta->rates);
650 				sta->rate_probe = false;
651 			}
652 			spin_unlock_bh(&dev->mt76.lock);
653 		}
654 
655 		if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
656 			ieee80211_tx_info_clear_status(info);
657 			info->status.rates[0].idx = -1;
658 		}
659 
660 		mt76_tx_status_skb_done(mdev, skb, &list);
661 	}
662 	mt76_tx_status_unlock(mdev, &list);
663 
664 	return !!skb;
665 }
666 
667 void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
668 {
669 	struct ieee80211_tx_info info = {};
670 	struct ieee80211_sta *sta = NULL;
671 	struct mt7615_sta *msta = NULL;
672 	struct mt76_wcid *wcid;
673 	__le32 *txs_data = data;
674 	u32 txs;
675 	u8 wcidx;
676 	u8 pid;
677 
678 	txs = le32_to_cpu(txs_data[0]);
679 	pid = FIELD_GET(MT_TXS0_PID, txs);
680 	txs = le32_to_cpu(txs_data[2]);
681 	wcidx = FIELD_GET(MT_TXS2_WCID, txs);
682 
683 	if (pid == MT_PACKET_ID_NO_ACK)
684 		return;
685 
686 	if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
687 		return;
688 
689 	rcu_read_lock();
690 
691 	wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
692 	if (!wcid)
693 		goto out;
694 
695 	msta = container_of(wcid, struct mt7615_sta, wcid);
696 	sta = wcid_to_sta(wcid);
697 
698 	if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
699 		goto out;
700 
701 	if (wcidx >= MT7615_WTBL_STA || !sta)
702 		goto out;
703 
704 	if (mt7615_fill_txs(dev, msta, &info, txs_data))
705 		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
706 
707 out:
708 	rcu_read_unlock();
709 }
710 
711 void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
712 {
713 	struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
714 	struct mt76_dev *mdev = &dev->mt76;
715 	struct mt76_txwi_cache *txwi;
716 	u8 i, count;
717 
718 	count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
719 	for (i = 0; i < count; i++) {
720 		spin_lock_bh(&dev->token_lock);
721 		txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
722 		spin_unlock_bh(&dev->token_lock);
723 
724 		if (!txwi)
725 			continue;
726 
727 		mt7615_txp_skb_unmap(mdev, txwi);
728 		if (txwi->skb) {
729 			mt76_tx_complete_skb(mdev, txwi->skb);
730 			txwi->skb = NULL;
731 		}
732 
733 		mt76_put_txwi(mdev, txwi);
734 	}
735 	dev_kfree_skb(skb);
736 }
737 
738 void mt7615_mac_work(struct work_struct *work)
739 {
740 	struct mt7615_dev *dev;
741 
742 	dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
743 						mac_work.work);
744 
745 	mt76_tx_status_check(&dev->mt76, NULL, false);
746 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
747 				     MT7615_WATCHDOG_TIME);
748 }
749