xref: /openbmc/linux/drivers/net/wireless/mediatek/mt76/sdio.c (revision 5fa1f7680f2728d62561db6d4a9282c4d21f2324)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3  *
4  * This file is written based on mt76/usb.c.
5  *
6  * Author: Felix Fietkau <nbd@nbd.name>
7  *	   Lorenzo Bianconi <lorenzo@kernel.org>
8  *	   Sean Wang <sean.wang@mediatek.com>
9  */
10 
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mmc/sdio_func.h>
15 #include <linux/sched.h>
16 #include <linux/kthread.h>
17 
18 #include "mt76.h"
19 
20 static int
21 mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
22 {
23 	struct mt76_queue *q = &dev->q_rx[qid];
24 
25 	spin_lock_init(&q->lock);
26 	q->entry = devm_kcalloc(dev->dev,
27 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
28 				GFP_KERNEL);
29 	if (!q->entry)
30 		return -ENOMEM;
31 
32 	q->ndesc = MT_NUM_RX_ENTRIES;
33 	q->head = q->tail = 0;
34 	q->queued = 0;
35 
36 	return 0;
37 }
38 
39 static int mt76s_alloc_tx(struct mt76_dev *dev)
40 {
41 	struct mt76_queue *q;
42 	int i;
43 
44 	for (i = 0; i < MT_TXQ_MCU_WA; i++) {
45 		INIT_LIST_HEAD(&dev->q_tx[i].swq);
46 
47 		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
48 		if (!q)
49 			return -ENOMEM;
50 
51 		spin_lock_init(&q->lock);
52 		q->hw_idx = i;
53 		dev->q_tx[i].q = q;
54 
55 		q->entry = devm_kcalloc(dev->dev,
56 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
57 					GFP_KERNEL);
58 		if (!q->entry)
59 			return -ENOMEM;
60 
61 		q->ndesc = MT_NUM_TX_ENTRIES;
62 	}
63 
64 	return 0;
65 }
66 
67 void mt76s_stop_txrx(struct mt76_dev *dev)
68 {
69 	struct mt76_sdio *sdio = &dev->sdio;
70 
71 	cancel_work_sync(&sdio->stat_work);
72 	clear_bit(MT76_READING_STATS, &dev->phy.state);
73 
74 	mt76_tx_status_check(dev, NULL, true);
75 }
76 EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
77 
78 int mt76s_alloc_queues(struct mt76_dev *dev)
79 {
80 	int err;
81 
82 	err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
83 	if (err < 0)
84 		return err;
85 
86 	return mt76s_alloc_tx(dev);
87 }
88 EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
89 
90 static struct mt76_queue_entry *
91 mt76s_get_next_rx_entry(struct mt76_queue *q)
92 {
93 	struct mt76_queue_entry *e = NULL;
94 
95 	spin_lock_bh(&q->lock);
96 	if (q->queued > 0) {
97 		e = &q->entry[q->head];
98 		q->head = (q->head + 1) % q->ndesc;
99 		q->queued--;
100 	}
101 	spin_unlock_bh(&q->lock);
102 
103 	return e;
104 }
105 
106 static int
107 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
108 {
109 	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
110 	int nframes = 0;
111 
112 	while (true) {
113 		struct mt76_queue_entry *e;
114 
115 		if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
116 			break;
117 
118 		e = mt76s_get_next_rx_entry(q);
119 		if (!e || !e->skb)
120 			break;
121 
122 		dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
123 		e->skb = NULL;
124 		nframes++;
125 	}
126 	if (qid == MT_RXQ_MAIN)
127 		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
128 
129 	return nframes;
130 }
131 
132 static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
133 {
134 	struct mt76_sw_queue *sq = &dev->q_tx[qid];
135 	u32 n_dequeued = 0, n_sw_dequeued = 0;
136 	struct mt76_queue_entry entry;
137 	struct mt76_queue *q = sq->q;
138 	bool wake;
139 
140 	while (q->queued > n_dequeued) {
141 		if (!q->entry[q->head].done)
142 			break;
143 
144 		if (q->entry[q->head].schedule) {
145 			q->entry[q->head].schedule = false;
146 			n_sw_dequeued++;
147 		}
148 
149 		entry = q->entry[q->head];
150 		q->entry[q->head].done = false;
151 		q->head = (q->head + 1) % q->ndesc;
152 		n_dequeued++;
153 
154 		if (qid == MT_TXQ_MCU)
155 			dev_kfree_skb(entry.skb);
156 		else
157 			dev->drv->tx_complete_skb(dev, qid, &entry);
158 	}
159 
160 	spin_lock_bh(&q->lock);
161 
162 	sq->swq_queued -= n_sw_dequeued;
163 	q->queued -= n_dequeued;
164 
165 	wake = q->stopped && q->queued < q->ndesc - 8;
166 	if (wake)
167 		q->stopped = false;
168 
169 	if (!q->queued)
170 		wake_up(&dev->tx_wait);
171 
172 	spin_unlock_bh(&q->lock);
173 
174 	if (qid == MT_TXQ_MCU)
175 		goto out;
176 
177 	mt76_txq_schedule(&dev->phy, qid);
178 
179 	if (wake)
180 		ieee80211_wake_queue(dev->hw, qid);
181 
182 	wake_up_process(dev->sdio.tx_kthread);
183 out:
184 	return n_dequeued;
185 }
186 
187 static void mt76s_tx_status_data(struct work_struct *work)
188 {
189 	struct mt76_sdio *sdio;
190 	struct mt76_dev *dev;
191 	u8 update = 1;
192 	u16 count = 0;
193 
194 	sdio = container_of(work, struct mt76_sdio, stat_work);
195 	dev = container_of(sdio, struct mt76_dev, sdio);
196 
197 	while (true) {
198 		if (test_bit(MT76_REMOVED, &dev->phy.state))
199 			break;
200 
201 		if (!dev->drv->tx_status_data(dev, &update))
202 			break;
203 		count++;
204 	}
205 
206 	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
207 		queue_work(dev->wq, &sdio->stat_work);
208 	else
209 		clear_bit(MT76_READING_STATS, &dev->phy.state);
210 }
211 
212 static int
213 mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
214 		   struct sk_buff *skb, struct mt76_wcid *wcid,
215 		   struct ieee80211_sta *sta)
216 {
217 	struct mt76_queue *q = dev->q_tx[qid].q;
218 	struct mt76_tx_info tx_info = {
219 		.skb = skb,
220 	};
221 	int err, len = skb->len;
222 	u16 idx = q->tail;
223 
224 	if (q->queued == q->ndesc)
225 		return -ENOSPC;
226 
227 	skb->prev = skb->next = NULL;
228 	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
229 	if (err < 0)
230 		return err;
231 
232 	q->entry[q->tail].skb = tx_info.skb;
233 	q->entry[q->tail].buf_sz = len;
234 	q->tail = (q->tail + 1) % q->ndesc;
235 	q->queued++;
236 
237 	return idx;
238 }
239 
240 static int
241 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
242 		       struct sk_buff *skb, u32 tx_info)
243 {
244 	struct mt76_queue *q = dev->q_tx[qid].q;
245 	int ret = -ENOSPC, len = skb->len;
246 
247 	spin_lock_bh(&q->lock);
248 	if (q->queued == q->ndesc)
249 		goto out;
250 
251 	ret = mt76_skb_adjust_pad(skb);
252 	if (ret)
253 		goto out;
254 
255 	q->entry[q->tail].buf_sz = len;
256 	q->entry[q->tail].skb = skb;
257 	q->tail = (q->tail + 1) % q->ndesc;
258 	q->queued++;
259 
260 out:
261 	spin_unlock_bh(&q->lock);
262 
263 	return ret;
264 }
265 
266 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
267 {
268 	struct mt76_sdio *sdio = &dev->sdio;
269 
270 	wake_up_process(sdio->tx_kthread);
271 }
272 
273 static const struct mt76_queue_ops sdio_queue_ops = {
274 	.tx_queue_skb = mt76s_tx_queue_skb,
275 	.kick = mt76s_tx_kick,
276 	.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
277 };
278 
279 static int mt76s_kthread_run(void *data)
280 {
281 	struct mt76_dev *dev = data;
282 	struct mt76_phy *mphy = &dev->phy;
283 
284 	while (!kthread_should_stop()) {
285 		int i, nframes = 0;
286 
287 		cond_resched();
288 
289 		/* rx processing */
290 		local_bh_disable();
291 		rcu_read_lock();
292 
293 		mt76_for_each_q_rx(dev, i)
294 			nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
295 
296 		rcu_read_unlock();
297 		local_bh_enable();
298 
299 		/* tx processing */
300 		for (i = 0; i < MT_TXQ_MCU_WA; i++)
301 			nframes += mt76s_process_tx_queue(dev, i);
302 
303 		if (dev->drv->tx_status_data &&
304 		    !test_and_set_bit(MT76_READING_STATS, &mphy->state))
305 			queue_work(dev->wq, &dev->sdio.stat_work);
306 
307 		if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
308 			set_current_state(TASK_INTERRUPTIBLE);
309 			schedule();
310 		}
311 	}
312 
313 	return 0;
314 }
315 
316 void mt76s_deinit(struct mt76_dev *dev)
317 {
318 	struct mt76_sdio *sdio = &dev->sdio;
319 	int i;
320 
321 	kthread_stop(sdio->kthread);
322 	kthread_stop(sdio->tx_kthread);
323 	mt76s_stop_txrx(dev);
324 
325 	sdio_claim_host(sdio->func);
326 	sdio_release_irq(sdio->func);
327 	sdio_release_host(sdio->func);
328 
329 	mt76_for_each_q_rx(dev, i) {
330 		struct mt76_queue *q = &dev->q_rx[i];
331 		int j;
332 
333 		for (j = 0; j < q->ndesc; j++) {
334 			struct mt76_queue_entry *e = &q->entry[j];
335 
336 			if (!e->skb)
337 				continue;
338 
339 			dev_kfree_skb(e->skb);
340 			e->skb = NULL;
341 		}
342 	}
343 }
344 EXPORT_SYMBOL_GPL(mt76s_deinit);
345 
346 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
347 	       const struct mt76_bus_ops *bus_ops)
348 {
349 	struct mt76_sdio *sdio = &dev->sdio;
350 
351 	sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
352 	if (IS_ERR(sdio->kthread))
353 		return PTR_ERR(sdio->kthread);
354 
355 	INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
356 
357 	mutex_init(&sdio->sched.lock);
358 	dev->queue_ops = &sdio_queue_ops;
359 	dev->bus = bus_ops;
360 	dev->sdio.func = func;
361 
362 	return 0;
363 }
364 EXPORT_SYMBOL_GPL(mt76s_init);
365 
366 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
367 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
368 MODULE_LICENSE("Dual BSD/GPL");
369