xref: /openbmc/linux/drivers/net/wireless/mediatek/mt76/dma.c (revision 06d5d6b7f9948a89543e1160ef852d57892c750d)
1 /*
2  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include "mt76.h"
19 #include "dma.h"
20 
21 #define DMA_DUMMY_TXWI	((void *) ~0)
22 
23 static int
24 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
25 {
26 	int size;
27 	int i;
28 
29 	spin_lock_init(&q->lock);
30 	INIT_LIST_HEAD(&q->swq);
31 
32 	size = q->ndesc * sizeof(struct mt76_desc);
33 	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
34 	if (!q->desc)
35 		return -ENOMEM;
36 
37 	size = q->ndesc * sizeof(*q->entry);
38 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
39 	if (!q->entry)
40 		return -ENOMEM;
41 
42 	/* clear descriptors */
43 	for (i = 0; i < q->ndesc; i++)
44 		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
45 
46 	iowrite32(q->desc_dma, &q->regs->desc_base);
47 	iowrite32(0, &q->regs->cpu_idx);
48 	iowrite32(0, &q->regs->dma_idx);
49 	iowrite32(q->ndesc, &q->regs->ring_size);
50 
51 	return 0;
52 }
53 
54 static int
55 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
56 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
57 		 struct sk_buff *skb, void *txwi)
58 {
59 	struct mt76_desc *desc;
60 	u32 ctrl;
61 	int i, idx = -1;
62 
63 	if (txwi)
64 		q->entry[q->head].txwi = DMA_DUMMY_TXWI;
65 
66 	for (i = 0; i < nbufs; i += 2, buf += 2) {
67 		u32 buf0 = buf[0].addr, buf1 = 0;
68 
69 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
70 		if (i < nbufs - 1) {
71 			buf1 = buf[1].addr;
72 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
73 		}
74 
75 		if (i == nbufs - 1)
76 			ctrl |= MT_DMA_CTL_LAST_SEC0;
77 		else if (i == nbufs - 2)
78 			ctrl |= MT_DMA_CTL_LAST_SEC1;
79 
80 		idx = q->head;
81 		q->head = (q->head + 1) % q->ndesc;
82 
83 		desc = &q->desc[idx];
84 
85 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
86 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
87 		WRITE_ONCE(desc->info, cpu_to_le32(info));
88 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
89 
90 		q->queued++;
91 	}
92 
93 	q->entry[idx].txwi = txwi;
94 	q->entry[idx].skb = skb;
95 
96 	return idx;
97 }
98 
99 static void
100 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
101 			struct mt76_queue_entry *prev_e)
102 {
103 	struct mt76_queue_entry *e = &q->entry[idx];
104 	__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105 	u32 ctrl = le32_to_cpu(__ctrl);
106 
107 	if (!e->txwi || !e->skb) {
108 		__le32 addr = READ_ONCE(q->desc[idx].buf0);
109 		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
110 
111 		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
112 				 DMA_TO_DEVICE);
113 	}
114 
115 	if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116 		__le32 addr = READ_ONCE(q->desc[idx].buf1);
117 		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
118 
119 		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
120 				 DMA_TO_DEVICE);
121 	}
122 
123 	if (e->txwi == DMA_DUMMY_TXWI)
124 		e->txwi = NULL;
125 
126 	*prev_e = *e;
127 	memset(e, 0, sizeof(*e));
128 }
129 
130 static void
131 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132 {
133 	iowrite32(q->desc_dma, &q->regs->desc_base);
134 	iowrite32(q->ndesc, &q->regs->ring_size);
135 	q->head = ioread32(&q->regs->dma_idx);
136 	q->tail = q->head;
137 	iowrite32(q->head, &q->regs->cpu_idx);
138 }
139 
140 static void
141 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
142 {
143 	struct mt76_queue *q = &dev->q_tx[qid];
144 	struct mt76_queue_entry entry;
145 	bool wake = false;
146 	int last;
147 
148 	if (!q->ndesc)
149 		return;
150 
151 	spin_lock_bh(&q->lock);
152 	if (flush)
153 		last = -1;
154 	else
155 		last = ioread32(&q->regs->dma_idx);
156 
157 	while (q->queued && q->tail != last) {
158 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
159 		if (entry.schedule)
160 			q->swq_queued--;
161 
162 		q->tail = (q->tail + 1) % q->ndesc;
163 		q->queued--;
164 
165 		if (entry.skb) {
166 			spin_unlock_bh(&q->lock);
167 			dev->drv->tx_complete_skb(dev, q, &entry, flush);
168 			spin_lock_bh(&q->lock);
169 		}
170 
171 		if (entry.txwi) {
172 			mt76_put_txwi(dev, entry.txwi);
173 			wake = !flush;
174 		}
175 
176 		if (!flush && q->tail == last)
177 			last = ioread32(&q->regs->dma_idx);
178 	}
179 
180 	if (!flush)
181 		mt76_txq_schedule(dev, q);
182 	else
183 		mt76_dma_sync_idx(dev, q);
184 
185 	wake = wake && q->stopped &&
186 	       qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
187 	if (wake)
188 		q->stopped = false;
189 
190 	if (!q->queued)
191 		wake_up(&dev->tx_wait);
192 
193 	spin_unlock_bh(&q->lock);
194 
195 	if (wake)
196 		ieee80211_wake_queue(dev->hw, qid);
197 }
198 
199 static void *
200 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
201 		 int *len, u32 *info, bool *more)
202 {
203 	struct mt76_queue_entry *e = &q->entry[idx];
204 	struct mt76_desc *desc = &q->desc[idx];
205 	dma_addr_t buf_addr;
206 	void *buf = e->buf;
207 	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
208 
209 	buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
210 	if (len) {
211 		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
212 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
213 		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
214 	}
215 
216 	if (info)
217 		*info = le32_to_cpu(desc->info);
218 
219 	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
220 	e->buf = NULL;
221 
222 	return buf;
223 }
224 
225 static void *
226 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
227 		 int *len, u32 *info, bool *more)
228 {
229 	int idx = q->tail;
230 
231 	*more = false;
232 	if (!q->queued)
233 		return NULL;
234 
235 	if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
236 		return NULL;
237 
238 	q->tail = (q->tail + 1) % q->ndesc;
239 	q->queued--;
240 
241 	return mt76_dma_get_buf(dev, q, idx, len, info, more);
242 }
243 
244 static void
245 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
246 {
247 	iowrite32(q->head, &q->regs->cpu_idx);
248 }
249 
250 static int
251 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
252 			  struct sk_buff *skb, u32 tx_info)
253 {
254 	struct mt76_queue *q = &dev->q_tx[qid];
255 	struct mt76_queue_buf buf;
256 	dma_addr_t addr;
257 
258 	addr = dma_map_single(dev->dev, skb->data, skb->len,
259 			      DMA_TO_DEVICE);
260 	if (dma_mapping_error(dev->dev, addr))
261 		return -ENOMEM;
262 
263 	buf.addr = addr;
264 	buf.len = skb->len;
265 
266 	spin_lock_bh(&q->lock);
267 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
268 	mt76_dma_kick_queue(dev, q);
269 	spin_unlock_bh(&q->lock);
270 
271 	return 0;
272 }
273 
274 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
275 			  struct sk_buff *skb, struct mt76_wcid *wcid,
276 			  struct ieee80211_sta *sta)
277 {
278 	struct mt76_queue_entry e;
279 	struct mt76_txwi_cache *t;
280 	struct mt76_queue_buf buf[32];
281 	struct sk_buff *iter;
282 	dma_addr_t addr;
283 	int len;
284 	u32 tx_info = 0;
285 	int n, ret;
286 
287 	t = mt76_get_txwi(dev);
288 	if (!t) {
289 		ieee80211_free_txskb(dev->hw, skb);
290 		return -ENOMEM;
291 	}
292 
293 	skb->prev = skb->next = NULL;
294 	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
295 				DMA_TO_DEVICE);
296 	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
297 				       &tx_info);
298 	dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
299 				   DMA_TO_DEVICE);
300 	if (ret < 0)
301 		goto free;
302 
303 	len = skb->len - skb->data_len;
304 	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
305 	if (dma_mapping_error(dev->dev, addr)) {
306 		ret = -ENOMEM;
307 		goto free;
308 	}
309 
310 	n = 0;
311 	buf[n].addr = t->dma_addr;
312 	buf[n++].len = dev->drv->txwi_size;
313 	buf[n].addr = addr;
314 	buf[n++].len = len;
315 
316 	skb_walk_frags(skb, iter) {
317 		if (n == ARRAY_SIZE(buf))
318 			goto unmap;
319 
320 		addr = dma_map_single(dev->dev, iter->data, iter->len,
321 				      DMA_TO_DEVICE);
322 		if (dma_mapping_error(dev->dev, addr))
323 			goto unmap;
324 
325 		buf[n].addr = addr;
326 		buf[n++].len = iter->len;
327 	}
328 
329 	if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
330 		goto unmap;
331 
332 	return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
333 
334 unmap:
335 	ret = -ENOMEM;
336 	for (n--; n > 0; n--)
337 		dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
338 				 DMA_TO_DEVICE);
339 
340 free:
341 	e.skb = skb;
342 	e.txwi = t;
343 	dev->drv->tx_complete_skb(dev, q, &e, true);
344 	mt76_put_txwi(dev, t);
345 	return ret;
346 }
347 EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
348 
349 static int
350 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
351 {
352 	dma_addr_t addr;
353 	void *buf;
354 	int frames = 0;
355 	int len = SKB_WITH_OVERHEAD(q->buf_size);
356 	int offset = q->buf_offset;
357 	int idx;
358 
359 	spin_lock_bh(&q->lock);
360 
361 	while (q->queued < q->ndesc - 1) {
362 		struct mt76_queue_buf qbuf;
363 
364 		buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
365 		if (!buf)
366 			break;
367 
368 		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
369 		if (dma_mapping_error(dev->dev, addr)) {
370 			skb_free_frag(buf);
371 			break;
372 		}
373 
374 		qbuf.addr = addr + offset;
375 		qbuf.len = len - offset;
376 		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
377 		frames++;
378 	}
379 
380 	if (frames)
381 		mt76_dma_kick_queue(dev, q);
382 
383 	spin_unlock_bh(&q->lock);
384 
385 	return frames;
386 }
387 
388 static void
389 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
390 {
391 	struct page *page;
392 	void *buf;
393 	bool more;
394 
395 	spin_lock_bh(&q->lock);
396 	do {
397 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
398 		if (!buf)
399 			break;
400 
401 		skb_free_frag(buf);
402 	} while (1);
403 	spin_unlock_bh(&q->lock);
404 
405 	if (!q->rx_page.va)
406 		return;
407 
408 	page = virt_to_page(q->rx_page.va);
409 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
410 	memset(&q->rx_page, 0, sizeof(q->rx_page));
411 }
412 
413 static void
414 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
415 {
416 	struct mt76_queue *q = &dev->q_rx[qid];
417 	int i;
418 
419 	for (i = 0; i < q->ndesc; i++)
420 		q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
421 
422 	mt76_dma_rx_cleanup(dev, q);
423 	mt76_dma_sync_idx(dev, q);
424 	mt76_dma_rx_fill(dev, q);
425 }
426 
427 static void
428 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
429 		  int len, bool more)
430 {
431 	struct page *page = virt_to_head_page(data);
432 	int offset = data - page_address(page);
433 	struct sk_buff *skb = q->rx_head;
434 
435 	offset += q->buf_offset;
436 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
437 			q->buf_size);
438 
439 	if (more)
440 		return;
441 
442 	q->rx_head = NULL;
443 	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
444 }
445 
446 static int
447 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
448 {
449 	int len, data_len, done = 0;
450 	struct sk_buff *skb;
451 	unsigned char *data;
452 	bool more;
453 
454 	while (done < budget) {
455 		u32 info;
456 
457 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
458 		if (!data)
459 			break;
460 
461 		if (q->rx_head)
462 			data_len = q->buf_size;
463 		else
464 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
465 
466 		if (data_len < len + q->buf_offset) {
467 			dev_kfree_skb(q->rx_head);
468 			q->rx_head = NULL;
469 
470 			skb_free_frag(data);
471 			continue;
472 		}
473 
474 		if (q->rx_head) {
475 			mt76_add_fragment(dev, q, data, len, more);
476 			continue;
477 		}
478 
479 		skb = build_skb(data, q->buf_size);
480 		if (!skb) {
481 			skb_free_frag(data);
482 			continue;
483 		}
484 		skb_reserve(skb, q->buf_offset);
485 
486 		if (q == &dev->q_rx[MT_RXQ_MCU]) {
487 			u32 *rxfce = (u32 *) skb->cb;
488 			*rxfce = info;
489 		}
490 
491 		__skb_put(skb, len);
492 		done++;
493 
494 		if (more) {
495 			q->rx_head = skb;
496 			continue;
497 		}
498 
499 		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
500 	}
501 
502 	mt76_dma_rx_fill(dev, q);
503 	return done;
504 }
505 
506 static int
507 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
508 {
509 	struct mt76_dev *dev;
510 	int qid, done = 0, cur;
511 
512 	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
513 	qid = napi - dev->napi;
514 
515 	rcu_read_lock();
516 
517 	do {
518 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
519 		mt76_rx_poll_complete(dev, qid, napi);
520 		done += cur;
521 	} while (cur && done < budget);
522 
523 	rcu_read_unlock();
524 
525 	if (done < budget) {
526 		napi_complete(napi);
527 		dev->drv->rx_poll_complete(dev, qid);
528 	}
529 
530 	return done;
531 }
532 
533 static int
534 mt76_dma_init(struct mt76_dev *dev)
535 {
536 	int i;
537 
538 	init_dummy_netdev(&dev->napi_dev);
539 
540 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
541 		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
542 			       64);
543 		mt76_dma_rx_fill(dev, &dev->q_rx[i]);
544 		skb_queue_head_init(&dev->rx_skb[i]);
545 		napi_enable(&dev->napi[i]);
546 	}
547 
548 	return 0;
549 }
550 
551 static const struct mt76_queue_ops mt76_dma_ops = {
552 	.init = mt76_dma_init,
553 	.alloc = mt76_dma_alloc_queue,
554 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
555 	.tx_queue_skb = mt76_dma_tx_queue_skb,
556 	.tx_cleanup = mt76_dma_tx_cleanup,
557 	.rx_reset = mt76_dma_rx_reset,
558 	.kick = mt76_dma_kick_queue,
559 };
560 
561 void mt76_dma_attach(struct mt76_dev *dev)
562 {
563 	dev->queue_ops = &mt76_dma_ops;
564 }
565 EXPORT_SYMBOL_GPL(mt76_dma_attach);
566 
567 void mt76_dma_cleanup(struct mt76_dev *dev)
568 {
569 	int i;
570 
571 	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
572 		mt76_dma_tx_cleanup(dev, i, true);
573 
574 	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
575 		netif_napi_del(&dev->napi[i]);
576 		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
577 	}
578 }
579 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
580