1 /* SPDX-License-Identifier: ISC */
2 
3 #include "mt7603.h"
4 #include "mac.h"
5 #include "../dma.h"
6 
7 static int
8 mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
9 		     int idx, int n_desc)
10 {
11 	int ret;
12 
13 	q->hw_idx = idx;
14 	q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
15 	q->ndesc = n_desc;
16 
17 	ret = mt76_queue_alloc(dev, q);
18 	if (ret)
19 		return ret;
20 
21 	mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
22 
23 	return 0;
24 }
25 
26 static void
27 mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
28 {
29 	__le32 *txd = (__le32 *)skb->data;
30 	struct ieee80211_hdr *hdr;
31 	struct ieee80211_sta *sta;
32 	struct mt7603_sta *msta;
33 	struct mt76_wcid *wcid;
34 	void *priv;
35 	int idx;
36 	u32 val;
37 	u8 tid;
38 
39 	if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
40 		goto free;
41 
42 	val = le32_to_cpu(txd[1]);
43 	idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
44 	skb->priority = FIELD_GET(MT_TXD1_TID, val);
45 
46 	if (idx >= MT7603_WTBL_STA - 1)
47 		goto free;
48 
49 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
50 	if (!wcid)
51 		goto free;
52 
53 	priv = msta = container_of(wcid, struct mt7603_sta, wcid);
54 	val = le32_to_cpu(txd[0]);
55 	skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
56 
57 	val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
58 	val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
59 	txd[0] = cpu_to_le32(val);
60 
61 	sta = container_of(priv, struct ieee80211_sta, drv_priv);
62 	hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
63 	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
64 	ieee80211_sta_set_buffered(sta, tid, true);
65 
66 	spin_lock_bh(&dev->ps_lock);
67 	__skb_queue_tail(&msta->psq, skb);
68 	if (skb_queue_len(&msta->psq) >= 64) {
69 		skb = __skb_dequeue(&msta->psq);
70 		dev_kfree_skb(skb);
71 	}
72 	spin_unlock_bh(&dev->ps_lock);
73 	return;
74 
75 free:
76 	dev_kfree_skb(skb);
77 }
78 
79 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
80 			 struct sk_buff *skb)
81 {
82 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
83 	__le32 *rxd = (__le32 *)skb->data;
84 	__le32 *end = (__le32 *)&skb->data[skb->len];
85 	enum rx_pkt_type type;
86 
87 	type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
88 
89 	if (q == MT_RXQ_MCU) {
90 		if (type == PKT_TYPE_RX_EVENT)
91 			mt76_mcu_rx_event(&dev->mt76, skb);
92 		else
93 			mt7603_rx_loopback_skb(dev, skb);
94 		return;
95 	}
96 
97 	switch (type) {
98 	case PKT_TYPE_TXS:
99 		for (rxd++; rxd + 5 <= end; rxd += 5)
100 			mt7603_mac_add_txs(dev, rxd);
101 		dev_kfree_skb(skb);
102 		break;
103 	case PKT_TYPE_RX_EVENT:
104 		mt76_mcu_rx_event(&dev->mt76, skb);
105 		return;
106 	case PKT_TYPE_NORMAL:
107 		if (mt7603_mac_fill_rx(dev, skb) == 0) {
108 			mt76_rx(&dev->mt76, q, skb);
109 			return;
110 		}
111 		/* fall through */
112 	default:
113 		dev_kfree_skb(skb);
114 		break;
115 	}
116 }
117 
118 static int
119 mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
120 		     int idx, int n_desc, int bufsize)
121 {
122 	int ret;
123 
124 	q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
125 	q->ndesc = n_desc;
126 	q->buf_size = bufsize;
127 
128 	ret = mt76_queue_alloc(dev, q);
129 	if (ret)
130 		return ret;
131 
132 	mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
133 
134 	return 0;
135 }
136 
137 static void
138 mt7603_tx_tasklet(unsigned long data)
139 {
140 	struct mt7603_dev *dev = (struct mt7603_dev *)data;
141 	int i;
142 
143 	dev->tx_dma_check = 0;
144 	for (i = MT_TXQ_MCU; i >= 0; i--)
145 		mt76_queue_tx_cleanup(dev, i, false);
146 
147 	mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
148 }
149 
150 int mt7603_dma_init(struct mt7603_dev *dev)
151 {
152 	static const u8 wmm_queue_map[] = {
153 		[IEEE80211_AC_BK] = 0,
154 		[IEEE80211_AC_BE] = 1,
155 		[IEEE80211_AC_VI] = 2,
156 		[IEEE80211_AC_VO] = 3,
157 	};
158 	int ret;
159 	int i;
160 
161 	mt76_dma_attach(&dev->mt76);
162 
163 	init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
164 	skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
165 
166 	tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
167 
168 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
169 		   MT_WPDMA_GLO_CFG_TX_DMA_EN |
170 		   MT_WPDMA_GLO_CFG_RX_DMA_EN |
171 		   MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
172 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
173 
174 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
175 	mt7603_pse_client_reset(dev);
176 
177 	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
178 		ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
179 					   wmm_queue_map[i],
180 					   MT_TX_RING_SIZE);
181 		if (ret)
182 			return ret;
183 	}
184 
185 	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
186 				   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
187 	if (ret)
188 		return ret;
189 
190 	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
191 				   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
192 	if (ret)
193 		return ret;
194 
195 	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
196 				   MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
197 	if (ret)
198 		return ret;
199 
200 	ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
201 				   MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
202 	if (ret)
203 		return ret;
204 
205 	ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
206 				   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
207 	if (ret)
208 		return ret;
209 
210 	ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
211 				   MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
212 	if (ret)
213 		return ret;
214 
215 	mt76_wr(dev, MT_DELAY_INT_CFG, 0);
216 	return mt76_init_queues(dev);
217 }
218 
219 void mt7603_dma_cleanup(struct mt7603_dev *dev)
220 {
221 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
222 		   MT_WPDMA_GLO_CFG_TX_DMA_EN |
223 		   MT_WPDMA_GLO_CFG_RX_DMA_EN |
224 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
225 
226 	tasklet_kill(&dev->tx_tasklet);
227 	mt76_dma_cleanup(&dev->mt76);
228 }
229