1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
3 
4 #include <linux/bitfield.h>
5 #include <linux/dmapool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/if_vlan.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12 
13 #include "prestera_dsa.h"
14 #include "prestera.h"
15 #include "prestera_hw.h"
16 #include "prestera_rxtx.h"
17 
18 #define PRESTERA_SDMA_WAIT_MUL		10
19 
20 struct prestera_sdma_desc {
21 	__le32 word1;
22 	__le32 word2;
23 	__le32 buff;
24 	__le32 next;
25 } __packed __aligned(16);
26 
27 #define PRESTERA_SDMA_BUFF_SIZE_MAX	1544
28 
29 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
30 	((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
31 
32 #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
33 	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
34 
35 #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
36 	(PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
37 
38 #define PRESTERA_SDMA_RX_DESC_CPU_OWN	0
39 #define PRESTERA_SDMA_RX_DESC_DMA_OWN	1
40 
41 #define PRESTERA_SDMA_RX_QUEUE_NUM	8
42 
43 #define PRESTERA_SDMA_RX_DESC_PER_Q	1000
44 
45 #define PRESTERA_SDMA_TX_DESC_PER_Q	1000
46 #define PRESTERA_SDMA_TX_MAX_BURST	64
47 
48 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
49 	((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
50 
51 #define PRESTERA_SDMA_TX_DESC_CPU_OWN	0
52 #define PRESTERA_SDMA_TX_DESC_DMA_OWN	1U
53 
54 #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
55 	(PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
56 
57 #define PRESTERA_SDMA_TX_DESC_LAST	BIT(20)
58 #define PRESTERA_SDMA_TX_DESC_FIRST	BIT(21)
59 #define PRESTERA_SDMA_TX_DESC_CALC_CRC	BIT(12)
60 
61 #define PRESTERA_SDMA_TX_DESC_SINGLE	\
62 	(PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
63 
64 #define PRESTERA_SDMA_TX_DESC_INIT	\
65 	(PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
66 
67 #define PRESTERA_SDMA_RX_INTR_MASK_REG		0x2814
68 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG	0x2680
69 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n)	(0x260C + (n) * 16)
70 
71 #define PRESTERA_SDMA_TX_QUEUE_DESC_REG		0x26C0
72 #define PRESTERA_SDMA_TX_QUEUE_START_REG	0x2868
73 
74 struct prestera_sdma_buf {
75 	struct prestera_sdma_desc *desc;
76 	dma_addr_t desc_dma;
77 	struct sk_buff *skb;
78 	dma_addr_t buf_dma;
79 	bool is_used;
80 };
81 
82 struct prestera_rx_ring {
83 	struct prestera_sdma_buf *bufs;
84 	int next_rx;
85 };
86 
87 struct prestera_tx_ring {
88 	struct prestera_sdma_buf *bufs;
89 	int next_tx;
90 	int max_burst;
91 	int burst;
92 };
93 
94 struct prestera_sdma {
95 	struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
96 	struct prestera_tx_ring tx_ring;
97 	struct prestera_switch *sw;
98 	struct dma_pool *desc_pool;
99 	struct work_struct tx_work;
100 	struct napi_struct rx_napi;
101 	struct net_device napi_dev;
102 	u32 map_addr;
103 	u64 dma_mask;
104 	/* protect SDMA with concurrrent access from multiple CPUs */
105 	spinlock_t tx_lock;
106 };
107 
108 struct prestera_rxtx {
109 	struct prestera_sdma sdma;
110 };
111 
112 static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
113 				  struct prestera_sdma_buf *buf)
114 {
115 	struct prestera_sdma_desc *desc;
116 	dma_addr_t dma;
117 
118 	desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
119 	if (!desc)
120 		return -ENOMEM;
121 
122 	buf->buf_dma = DMA_MAPPING_ERROR;
123 	buf->desc_dma = dma;
124 	buf->desc = desc;
125 	buf->skb = NULL;
126 
127 	return 0;
128 }
129 
130 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
131 {
132 	return sdma->map_addr + pa;
133 }
134 
135 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
136 				       struct prestera_sdma_desc *desc,
137 				       dma_addr_t buf)
138 {
139 	u32 word = le32_to_cpu(desc->word2);
140 
141 	u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
142 	desc->word2 = cpu_to_le32(word);
143 
144 	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
145 
146 	/* make sure buffer is set before reset the descriptor */
147 	wmb();
148 
149 	desc->word1 = cpu_to_le32(0xA0000000);
150 }
151 
152 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
153 					   struct prestera_sdma_desc *desc,
154 					   dma_addr_t next)
155 {
156 	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
157 }
158 
159 static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
160 				      struct prestera_sdma_buf *buf)
161 {
162 	struct device *dev = sdma->sw->dev->dev;
163 	struct sk_buff *skb;
164 	dma_addr_t dma;
165 
166 	skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
167 	if (!skb)
168 		return -ENOMEM;
169 
170 	dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
171 	if (dma_mapping_error(dev, dma))
172 		goto err_dma_map;
173 
174 	if (buf->skb)
175 		dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
176 				 DMA_FROM_DEVICE);
177 
178 	buf->buf_dma = dma;
179 	buf->skb = skb;
180 
181 	return 0;
182 
183 err_dma_map:
184 	kfree_skb(skb);
185 
186 	return -ENOMEM;
187 }
188 
189 static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
190 						struct prestera_sdma_buf *buf)
191 {
192 	dma_addr_t buf_dma = buf->buf_dma;
193 	struct sk_buff *skb = buf->skb;
194 	u32 len = skb->len;
195 	int err;
196 
197 	err = prestera_sdma_rx_skb_alloc(sdma, buf);
198 	if (err) {
199 		buf->buf_dma = buf_dma;
200 		buf->skb = skb;
201 
202 		skb = alloc_skb(skb->len, GFP_ATOMIC);
203 		if (skb) {
204 			skb_put(skb, len);
205 			skb_copy_from_linear_data(buf->skb, skb->data, len);
206 		}
207 	}
208 
209 	prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
210 
211 	return skb;
212 }
213 
214 static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
215 				     struct sk_buff *skb)
216 {
217 	const struct prestera_port *port;
218 	struct prestera_dsa dsa;
219 	u32 hw_port, dev_id;
220 	int err;
221 
222 	skb_pull(skb, ETH_HLEN);
223 
224 	/* ethertype field is part of the dsa header */
225 	err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
226 	if (err)
227 		return err;
228 
229 	dev_id = dsa.hw_dev_num;
230 	hw_port = dsa.port_num;
231 
232 	port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
233 	if (unlikely(!port)) {
234 		dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
235 				     dev_id, hw_port);
236 		return -ENOENT;
237 	}
238 
239 	if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
240 		return -EINVAL;
241 
242 	/* remove DSA tag and update checksum */
243 	skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
244 
245 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
246 		ETH_ALEN * 2);
247 
248 	skb_push(skb, ETH_HLEN);
249 
250 	skb->protocol = eth_type_trans(skb, port->dev);
251 
252 	if (dsa.vlan.is_tagged) {
253 		u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
254 
255 		tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
256 		if (dsa.vlan.cfi_bit)
257 			tci |= VLAN_CFI_MASK;
258 
259 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
260 	}
261 
262 	return 0;
263 }
264 
265 static int prestera_sdma_next_rx_buf_idx(int buf_idx)
266 {
267 	return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
268 }
269 
270 static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
271 {
272 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
273 	unsigned int rxq_done_map = 0;
274 	struct prestera_sdma *sdma;
275 	struct list_head rx_list;
276 	unsigned int qmask;
277 	int pkts_done = 0;
278 	int q;
279 
280 	qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
281 	qmask = GENMASK(qnum - 1, 0);
282 
283 	INIT_LIST_HEAD(&rx_list);
284 
285 	sdma = container_of(napi, struct prestera_sdma, rx_napi);
286 
287 	while (pkts_done < budget && rxq_done_map != qmask) {
288 		for (q = 0; q < qnum && pkts_done < budget; q++) {
289 			struct prestera_rx_ring *ring = &sdma->rx_ring[q];
290 			struct prestera_sdma_desc *desc;
291 			struct prestera_sdma_buf *buf;
292 			int buf_idx = ring->next_rx;
293 			struct sk_buff *skb;
294 
295 			buf = &ring->bufs[buf_idx];
296 			desc = buf->desc;
297 
298 			if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
299 				rxq_done_map &= ~BIT(q);
300 			} else {
301 				rxq_done_map |= BIT(q);
302 				continue;
303 			}
304 
305 			pkts_done++;
306 
307 			__skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
308 
309 			skb = prestera_sdma_rx_skb_get(sdma, buf);
310 			if (!skb)
311 				goto rx_next_buf;
312 
313 			if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
314 				goto rx_next_buf;
315 
316 			list_add_tail(&skb->list, &rx_list);
317 rx_next_buf:
318 			ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
319 		}
320 	}
321 
322 	if (pkts_done < budget && napi_complete_done(napi, pkts_done))
323 		prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
324 			       GENMASK(9, 2));
325 
326 	netif_receive_skb_list(&rx_list);
327 
328 	return pkts_done;
329 }
330 
331 static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
332 {
333 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
334 	int q, b;
335 
336 	/* disable all rx queues */
337 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
338 		       GENMASK(15, 8));
339 
340 	for (q = 0; q < qnum; q++) {
341 		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
342 
343 		if (!ring->bufs)
344 			break;
345 
346 		for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
347 			struct prestera_sdma_buf *buf = &ring->bufs[b];
348 
349 			if (buf->desc_dma)
350 				dma_pool_free(sdma->desc_pool, buf->desc,
351 					      buf->desc_dma);
352 
353 			if (!buf->skb)
354 				continue;
355 
356 			if (buf->buf_dma != DMA_MAPPING_ERROR)
357 				dma_unmap_single(sdma->sw->dev->dev,
358 						 buf->buf_dma, buf->skb->len,
359 						 DMA_FROM_DEVICE);
360 			kfree_skb(buf->skb);
361 		}
362 	}
363 }
364 
365 static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
366 {
367 	int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
368 	int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
369 	int err;
370 	int q;
371 
372 	/* disable all rx queues */
373 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
374 		       GENMASK(15, 8));
375 
376 	for (q = 0; q < qnum; q++) {
377 		struct prestera_sdma_buf *head, *tail, *next, *prev;
378 		struct prestera_rx_ring *ring = &sdma->rx_ring[q];
379 
380 		ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
381 		if (!ring->bufs)
382 			return -ENOMEM;
383 
384 		ring->next_rx = 0;
385 
386 		tail = &ring->bufs[bnum - 1];
387 		head = &ring->bufs[0];
388 		next = head;
389 		prev = next;
390 
391 		do {
392 			err = prestera_sdma_buf_init(sdma, next);
393 			if (err)
394 				return err;
395 
396 			err = prestera_sdma_rx_skb_alloc(sdma, next);
397 			if (err)
398 				return err;
399 
400 			prestera_sdma_rx_desc_init(sdma, next->desc,
401 						   next->buf_dma);
402 
403 			prestera_sdma_rx_desc_set_next(sdma, prev->desc,
404 						       next->desc_dma);
405 
406 			prev = next;
407 			next++;
408 		} while (prev != tail);
409 
410 		/* join tail with head to make a circular list */
411 		prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
412 
413 		prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
414 			       prestera_sdma_map(sdma, head->desc_dma));
415 	}
416 
417 	/* make sure all rx descs are filled before enabling all rx queues */
418 	wmb();
419 
420 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
421 		       GENMASK(7, 0));
422 
423 	return 0;
424 }
425 
426 static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
427 				       struct prestera_sdma_desc *desc)
428 {
429 	desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
430 	desc->word2 = 0;
431 }
432 
433 static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
434 					   struct prestera_sdma_desc *desc,
435 					   dma_addr_t next)
436 {
437 	desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
438 }
439 
440 static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
441 					  struct prestera_sdma_desc *desc,
442 					  dma_addr_t buf, size_t len)
443 {
444 	u32 word = le32_to_cpu(desc->word2);
445 
446 	u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
447 
448 	desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
449 	desc->word2 = cpu_to_le32(word);
450 }
451 
452 static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
453 {
454 	u32 word = le32_to_cpu(desc->word1);
455 
456 	word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
457 
458 	/* make sure everything is written before enable xmit */
459 	wmb();
460 
461 	desc->word1 = cpu_to_le32(word);
462 }
463 
464 static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
465 				    struct prestera_sdma_buf *buf,
466 				    struct sk_buff *skb)
467 {
468 	struct device *dma_dev = sdma->sw->dev->dev;
469 	dma_addr_t dma;
470 
471 	dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
472 	if (dma_mapping_error(dma_dev, dma))
473 		return -ENOMEM;
474 
475 	buf->buf_dma = dma;
476 	buf->skb = skb;
477 
478 	return 0;
479 }
480 
481 static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
482 				       struct prestera_sdma_buf *buf)
483 {
484 	struct device *dma_dev = sdma->sw->dev->dev;
485 
486 	dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
487 }
488 
489 static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
490 {
491 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
492 	struct prestera_tx_ring *tx_ring;
493 	struct prestera_sdma *sdma;
494 	int b;
495 
496 	sdma = container_of(work, struct prestera_sdma, tx_work);
497 
498 	tx_ring = &sdma->tx_ring;
499 
500 	for (b = 0; b < bnum; b++) {
501 		struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
502 
503 		if (!buf->is_used)
504 			continue;
505 
506 		if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
507 			continue;
508 
509 		prestera_sdma_tx_buf_unmap(sdma, buf);
510 		dev_consume_skb_any(buf->skb);
511 		buf->skb = NULL;
512 
513 		/* make sure everything is cleaned up */
514 		wmb();
515 
516 		buf->is_used = false;
517 	}
518 }
519 
520 static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
521 {
522 	struct prestera_sdma_buf *head, *tail, *next, *prev;
523 	struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
524 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
525 	int err;
526 
527 	INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
528 	spin_lock_init(&sdma->tx_lock);
529 
530 	tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
531 	if (!tx_ring->bufs)
532 		return -ENOMEM;
533 
534 	tail = &tx_ring->bufs[bnum - 1];
535 	head = &tx_ring->bufs[0];
536 	next = head;
537 	prev = next;
538 
539 	tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
540 	tx_ring->burst = tx_ring->max_burst;
541 	tx_ring->next_tx = 0;
542 
543 	do {
544 		err = prestera_sdma_buf_init(sdma, next);
545 		if (err)
546 			return err;
547 
548 		next->is_used = false;
549 
550 		prestera_sdma_tx_desc_init(sdma, next->desc);
551 
552 		prestera_sdma_tx_desc_set_next(sdma, prev->desc,
553 					       next->desc_dma);
554 
555 		prev = next;
556 		next++;
557 	} while (prev != tail);
558 
559 	/* join tail with head to make a circular list */
560 	prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
561 
562 	/* make sure descriptors are written */
563 	wmb();
564 
565 	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
566 		       prestera_sdma_map(sdma, head->desc_dma));
567 
568 	return 0;
569 }
570 
571 static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
572 {
573 	struct prestera_tx_ring *ring = &sdma->tx_ring;
574 	int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
575 	int b;
576 
577 	cancel_work_sync(&sdma->tx_work);
578 
579 	if (!ring->bufs)
580 		return;
581 
582 	for (b = 0; b < bnum; b++) {
583 		struct prestera_sdma_buf *buf = &ring->bufs[b];
584 
585 		if (buf->desc)
586 			dma_pool_free(sdma->desc_pool, buf->desc,
587 				      buf->desc_dma);
588 
589 		if (!buf->skb)
590 			continue;
591 
592 		dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
593 				 buf->skb->len, DMA_TO_DEVICE);
594 
595 		dev_consume_skb_any(buf->skb);
596 	}
597 }
598 
599 static void prestera_rxtx_handle_event(struct prestera_switch *sw,
600 				       struct prestera_event *evt,
601 				       void *arg)
602 {
603 	struct prestera_sdma *sdma = arg;
604 
605 	if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
606 		return;
607 
608 	prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
609 	napi_schedule(&sdma->rx_napi);
610 }
611 
612 static int prestera_sdma_switch_init(struct prestera_switch *sw)
613 {
614 	struct prestera_sdma *sdma = &sw->rxtx->sdma;
615 	struct device *dev = sw->dev->dev;
616 	struct prestera_rxtx_params p;
617 	int err;
618 
619 	p.use_sdma = true;
620 
621 	err = prestera_hw_rxtx_init(sw, &p);
622 	if (err) {
623 		dev_err(dev, "failed to init rxtx by hw\n");
624 		return err;
625 	}
626 
627 	sdma->dma_mask = dma_get_mask(dev);
628 	sdma->map_addr = p.map_addr;
629 	sdma->sw = sw;
630 
631 	sdma->desc_pool = dma_pool_create("desc_pool", dev,
632 					  sizeof(struct prestera_sdma_desc),
633 					  16, 0);
634 	if (!sdma->desc_pool)
635 		return -ENOMEM;
636 
637 	err = prestera_sdma_rx_init(sdma);
638 	if (err) {
639 		dev_err(dev, "failed to init rx ring\n");
640 		goto err_rx_init;
641 	}
642 
643 	err = prestera_sdma_tx_init(sdma);
644 	if (err) {
645 		dev_err(dev, "failed to init tx ring\n");
646 		goto err_tx_init;
647 	}
648 
649 	err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
650 						 prestera_rxtx_handle_event,
651 						 sdma);
652 	if (err)
653 		goto err_evt_register;
654 
655 	init_dummy_netdev(&sdma->napi_dev);
656 
657 	netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
658 	napi_enable(&sdma->rx_napi);
659 
660 	return 0;
661 
662 err_evt_register:
663 err_tx_init:
664 	prestera_sdma_tx_fini(sdma);
665 err_rx_init:
666 	prestera_sdma_rx_fini(sdma);
667 
668 	dma_pool_destroy(sdma->desc_pool);
669 	return err;
670 }
671 
672 static void prestera_sdma_switch_fini(struct prestera_switch *sw)
673 {
674 	struct prestera_sdma *sdma = &sw->rxtx->sdma;
675 
676 	napi_disable(&sdma->rx_napi);
677 	netif_napi_del(&sdma->rx_napi);
678 	prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
679 					     prestera_rxtx_handle_event);
680 	prestera_sdma_tx_fini(sdma);
681 	prestera_sdma_rx_fini(sdma);
682 	dma_pool_destroy(sdma->desc_pool);
683 }
684 
685 static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
686 {
687 	return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
688 }
689 
690 static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
691 				 struct prestera_tx_ring *tx_ring)
692 {
693 	int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
694 
695 	do {
696 		if (prestera_sdma_is_ready(sdma))
697 			return 0;
698 
699 		udelay(1);
700 	} while (--tx_wait_num);
701 
702 	return -EBUSY;
703 }
704 
705 static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
706 {
707 	prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
708 	schedule_work(&sdma->tx_work);
709 }
710 
711 static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
712 				      struct sk_buff *skb)
713 {
714 	struct device *dma_dev = sdma->sw->dev->dev;
715 	struct net_device *dev = skb->dev;
716 	struct prestera_tx_ring *tx_ring;
717 	struct prestera_sdma_buf *buf;
718 	int err;
719 
720 	spin_lock(&sdma->tx_lock);
721 
722 	tx_ring = &sdma->tx_ring;
723 
724 	buf = &tx_ring->bufs[tx_ring->next_tx];
725 	if (buf->is_used) {
726 		schedule_work(&sdma->tx_work);
727 		goto drop_skb;
728 	}
729 
730 	if (unlikely(eth_skb_pad(skb)))
731 		goto drop_skb_nofree;
732 
733 	err = prestera_sdma_tx_buf_map(sdma, buf, skb);
734 	if (err)
735 		goto drop_skb;
736 
737 	prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
738 
739 	dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
740 				   DMA_TO_DEVICE);
741 
742 	if (tx_ring->burst) {
743 		tx_ring->burst--;
744 	} else {
745 		tx_ring->burst = tx_ring->max_burst;
746 
747 		err = prestera_sdma_tx_wait(sdma, tx_ring);
748 		if (err)
749 			goto drop_skb_unmap;
750 	}
751 
752 	tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
753 	prestera_sdma_tx_desc_xmit(buf->desc);
754 	buf->is_used = true;
755 
756 	prestera_sdma_tx_start(sdma);
757 
758 	goto tx_done;
759 
760 drop_skb_unmap:
761 	prestera_sdma_tx_buf_unmap(sdma, buf);
762 drop_skb:
763 	dev_consume_skb_any(skb);
764 drop_skb_nofree:
765 	dev->stats.tx_dropped++;
766 tx_done:
767 	spin_unlock(&sdma->tx_lock);
768 	return NETDEV_TX_OK;
769 }
770 
771 int prestera_rxtx_switch_init(struct prestera_switch *sw)
772 {
773 	struct prestera_rxtx *rxtx;
774 
775 	rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
776 	if (!rxtx)
777 		return -ENOMEM;
778 
779 	sw->rxtx = rxtx;
780 
781 	return prestera_sdma_switch_init(sw);
782 }
783 
784 void prestera_rxtx_switch_fini(struct prestera_switch *sw)
785 {
786 	prestera_sdma_switch_fini(sw);
787 	kfree(sw->rxtx);
788 }
789 
790 int prestera_rxtx_port_init(struct prestera_port *port)
791 {
792 	int err;
793 
794 	err = prestera_hw_rxtx_port_init(port);
795 	if (err)
796 		return err;
797 
798 	port->dev->needed_headroom = PRESTERA_DSA_HLEN;
799 
800 	return 0;
801 }
802 
803 netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
804 {
805 	struct prestera_dsa dsa;
806 
807 	dsa.hw_dev_num = port->dev_id;
808 	dsa.port_num = port->hw_id;
809 
810 	if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
811 		return NET_XMIT_DROP;
812 
813 	skb_push(skb, PRESTERA_DSA_HLEN);
814 	memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
815 
816 	if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
817 		return NET_XMIT_DROP;
818 
819 	return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
820 }
821