1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Microsemi SoCs FDMA driver
4  *
5  * Copyright (c) 2021 Microchip
6  *
7  * Page recycling code is mostly taken from gianfar driver.
8  */
9 
10 #include <linux/align.h>
11 #include <linux/bitops.h>
12 #include <linux/dmapool.h>
13 #include <linux/dsa/ocelot.h>
14 #include <linux/netdevice.h>
15 #include <linux/skbuff.h>
16 
17 #include "ocelot_fdma.h"
18 #include "ocelot_qs.h"
19 
20 DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
21 
22 static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
23 {
24 	regmap_write(ocelot->targets[FDMA], reg, data);
25 }
26 
27 static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
28 {
29 	u32 retval;
30 
31 	regmap_read(ocelot->targets[FDMA], reg, &retval);
32 
33 	return retval;
34 }
35 
36 static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
37 {
38 	return base + idx * sizeof(struct ocelot_fdma_dcb);
39 }
40 
41 static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
42 {
43 	return (dma - base) / sizeof(struct ocelot_fdma_dcb);
44 }
45 
46 static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
47 {
48 	return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
49 }
50 
51 static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
52 {
53 	return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
54 }
55 
56 static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
57 {
58 	struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
59 
60 	if (rx_ring->next_to_use >= rx_ring->next_to_clean)
61 		return OCELOT_FDMA_RX_RING_SIZE -
62 		       (rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
63 	else
64 		return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
65 }
66 
67 static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
68 {
69 	struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
70 
71 	if (tx_ring->next_to_use >= tx_ring->next_to_clean)
72 		return OCELOT_FDMA_TX_RING_SIZE -
73 		       (tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
74 	else
75 		return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
76 }
77 
78 static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
79 {
80 	struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
81 
82 	return tx_ring->next_to_clean == tx_ring->next_to_use;
83 }
84 
85 static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
86 				      int chan)
87 {
88 	ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
89 	/* Barrier to force memory writes to DCB to be completed before starting
90 	 * the channel.
91 	 */
92 	wmb();
93 	ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
94 }
95 
96 static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
97 {
98 	return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
99 }
100 
101 static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
102 {
103 	u32 safe;
104 
105 	return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
106 					 safe & BIT(chan), 0,
107 					 OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
108 }
109 
110 static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
111 				     dma_addr_t dma_addr,
112 				     size_t size)
113 {
114 	u32 offset = dma_addr & 0x3;
115 
116 	dcb->llp = 0;
117 	dcb->datap = ALIGN_DOWN(dma_addr, 4);
118 	dcb->datal = ALIGN_DOWN(size, 4);
119 	dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
120 }
121 
122 static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
123 				      struct ocelot_fdma_rx_buf *rxb)
124 {
125 	dma_addr_t mapping;
126 	struct page *page;
127 
128 	page = dev_alloc_page();
129 	if (unlikely(!page))
130 		return false;
131 
132 	mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
133 			       DMA_FROM_DEVICE);
134 	if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
135 		__free_page(page);
136 		return false;
137 	}
138 
139 	rxb->page = page;
140 	rxb->page_offset = 0;
141 	rxb->dma_addr = mapping;
142 
143 	return true;
144 }
145 
146 static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
147 {
148 	struct ocelot_fdma *fdma = ocelot->fdma;
149 	struct ocelot_fdma_rx_ring *rx_ring;
150 	struct ocelot_fdma_rx_buf *rxb;
151 	struct ocelot_fdma_dcb *dcb;
152 	dma_addr_t dma_addr;
153 	int ret = 0;
154 	u16 idx;
155 
156 	rx_ring = &fdma->rx_ring;
157 	idx = rx_ring->next_to_use;
158 
159 	while (alloc_cnt--) {
160 		rxb = &rx_ring->bufs[idx];
161 		/* try reuse page */
162 		if (unlikely(!rxb->page)) {
163 			if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
164 				dev_err_ratelimited(ocelot->dev,
165 						    "Failed to allocate rx\n");
166 				ret = -ENOMEM;
167 				break;
168 			}
169 		}
170 
171 		dcb = &rx_ring->dcbs[idx];
172 		dma_addr = rxb->dma_addr + rxb->page_offset;
173 		ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
174 
175 		idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
176 		/* Chain the DCB to the next one */
177 		dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
178 	}
179 
180 	rx_ring->next_to_use = idx;
181 	rx_ring->next_to_alloc = idx;
182 
183 	return ret;
184 }
185 
186 static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
187 				       struct ocelot_fdma_tx_buf *tx_buf,
188 				       struct ocelot_fdma_dcb *dcb,
189 				       struct sk_buff *skb)
190 {
191 	dma_addr_t mapping;
192 
193 	mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
194 				 DMA_TO_DEVICE);
195 	if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
196 		return false;
197 
198 	dma_unmap_addr_set(tx_buf, dma_addr, mapping);
199 
200 	ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
201 	tx_buf->skb = skb;
202 	dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
203 	dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
204 
205 	return true;
206 }
207 
208 static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
209 {
210 	u32 llp;
211 
212 	/* Check if the FDMA hits the DCB with LLP == NULL */
213 	llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
214 	if (unlikely(llp))
215 		return false;
216 
217 	ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
218 			   BIT(MSCC_FDMA_XTR_CHAN));
219 
220 	return true;
221 }
222 
223 static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
224 {
225 	struct ocelot_fdma_dcb *dcb;
226 	unsigned int idx;
227 
228 	idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
229 				   OCELOT_FDMA_RX_RING_SIZE);
230 	dcb = &rx_ring->dcbs[idx];
231 	dcb->llp = 0;
232 }
233 
234 static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
235 {
236 	struct ocelot_fdma *fdma = ocelot->fdma;
237 	struct ocelot_fdma_rx_ring *rx_ring;
238 	const u8 chan = MSCC_FDMA_XTR_CHAN;
239 	dma_addr_t new_llp, dma_base;
240 	unsigned int idx;
241 	u32 llp_prev;
242 	int ret;
243 
244 	rx_ring = &fdma->rx_ring;
245 	ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
246 	if (ret) {
247 		dev_err_ratelimited(ocelot->dev,
248 				    "Unable to stop RX channel\n");
249 		return;
250 	}
251 
252 	ocelot_fdma_rx_set_llp(rx_ring);
253 
254 	/* FDMA stopped on the last DCB that contained a NULL LLP, since
255 	 * we processed some DCBs in RX, there is free space, and  we must set
256 	 * DCB_LLP to point to the next DCB
257 	 */
258 	llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
259 	dma_base = rx_ring->dcbs_dma;
260 
261 	/* Get the next DMA addr located after LLP == NULL DCB */
262 	idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
263 	idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
264 	new_llp = ocelot_fdma_idx_dma(dma_base, idx);
265 
266 	/* Finally reactivate the channel */
267 	ocelot_fdma_activate_chan(ocelot, new_llp, chan);
268 }
269 
270 static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
271 				    struct sk_buff *skb, bool first)
272 {
273 	int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
274 	struct page *page = rxb->page;
275 
276 	if (likely(first)) {
277 		skb_put(skb, size);
278 	} else {
279 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
280 				rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
281 	}
282 
283 	/* Try to reuse page */
284 	if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
285 		return false;
286 
287 	/* Change offset to the other half */
288 	rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
289 
290 	page_ref_inc(page);
291 
292 	return true;
293 }
294 
295 static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
296 				      struct ocelot_fdma_rx_buf *old_rxb)
297 {
298 	struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
299 	struct ocelot_fdma_rx_buf *new_rxb;
300 
301 	new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
302 	rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
303 						      OCELOT_FDMA_RX_RING_SIZE);
304 
305 	/* Copy page reference */
306 	*new_rxb = *old_rxb;
307 
308 	/* Sync for use by the device */
309 	dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
310 					 old_rxb->page_offset,
311 					 OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
312 }
313 
314 static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
315 					   struct ocelot_fdma_rx_buf *rxb,
316 					   struct sk_buff *skb)
317 {
318 	bool first = false;
319 
320 	/* Allocate skb head and data */
321 	if (likely(!skb)) {
322 		void *buff_addr = page_address(rxb->page) +
323 				  rxb->page_offset;
324 
325 		skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
326 		if (unlikely(!skb)) {
327 			dev_err_ratelimited(ocelot->dev,
328 					    "build_skb failed !\n");
329 			return NULL;
330 		}
331 		first = true;
332 	}
333 
334 	dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
335 				      rxb->page_offset, OCELOT_FDMA_RX_SIZE,
336 				      DMA_FROM_DEVICE);
337 
338 	if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
339 		/* Reuse the free half of the page for the next_to_alloc DCB*/
340 		ocelot_fdma_reuse_rx_page(ocelot, rxb);
341 	} else {
342 		/* page cannot be reused, unmap it */
343 		dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
344 			       DMA_FROM_DEVICE);
345 	}
346 
347 	/* clear rx buff content */
348 	rxb->page = NULL;
349 
350 	return skb;
351 }
352 
353 static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
354 {
355 	struct net_device *ndev;
356 	void *xfh = skb->data;
357 	u64 timestamp;
358 	u64 src_port;
359 
360 	skb_pull(skb, OCELOT_TAG_LEN);
361 
362 	ocelot_xfh_get_src_port(xfh, &src_port);
363 	if (unlikely(src_port >= ocelot->num_phys_ports))
364 		return false;
365 
366 	ndev = ocelot_port_to_netdev(ocelot, src_port);
367 	if (unlikely(!ndev))
368 		return false;
369 
370 	if (pskb_trim(skb, skb->len - ETH_FCS_LEN))
371 		return false;
372 
373 	skb->dev = ndev;
374 	skb->protocol = eth_type_trans(skb, skb->dev);
375 	skb->dev->stats.rx_bytes += skb->len;
376 	skb->dev->stats.rx_packets++;
377 
378 	if (ocelot->ptp) {
379 		ocelot_xfh_get_rew_val(xfh, &timestamp);
380 		ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
381 	}
382 
383 	if (likely(!skb_defer_rx_timestamp(skb)))
384 		netif_receive_skb(skb);
385 
386 	return true;
387 }
388 
389 static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
390 {
391 	struct ocelot_fdma *fdma = ocelot->fdma;
392 	struct ocelot_fdma_rx_ring *rx_ring;
393 	struct ocelot_fdma_rx_buf *rxb;
394 	struct ocelot_fdma_dcb *dcb;
395 	struct sk_buff *skb;
396 	int work_done = 0;
397 	int cleaned_cnt;
398 	u32 stat;
399 	u16 idx;
400 
401 	cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
402 	rx_ring = &fdma->rx_ring;
403 	skb = rx_ring->skb;
404 
405 	while (budget--) {
406 		idx = rx_ring->next_to_clean;
407 		dcb = &rx_ring->dcbs[idx];
408 		stat = dcb->stat;
409 		if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
410 			break;
411 
412 		/* New packet is a start of frame but we already got a skb set,
413 		 * we probably lost an EOF packet, free skb
414 		 */
415 		if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
416 			dev_kfree_skb(skb);
417 			skb = NULL;
418 		}
419 
420 		rxb = &rx_ring->bufs[idx];
421 		/* Fetch next to clean buffer from the rx_ring */
422 		skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
423 		if (unlikely(!skb))
424 			break;
425 
426 		work_done++;
427 		cleaned_cnt++;
428 
429 		idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
430 		rx_ring->next_to_clean = idx;
431 
432 		if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
433 			     stat & MSCC_FDMA_DCB_STAT_PD)) {
434 			dev_err_ratelimited(ocelot->dev,
435 					    "DCB aborted or pruned\n");
436 			dev_kfree_skb(skb);
437 			skb = NULL;
438 			continue;
439 		}
440 
441 		/* We still need to process the other fragment of the packet
442 		 * before delivering it to the network stack
443 		 */
444 		if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
445 			continue;
446 
447 		if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
448 			dev_kfree_skb(skb);
449 
450 		skb = NULL;
451 	}
452 
453 	rx_ring->skb = skb;
454 
455 	if (cleaned_cnt)
456 		ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
457 
458 	return work_done;
459 }
460 
461 static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
462 {
463 	struct ocelot_port_private *priv;
464 	struct ocelot_port *ocelot_port;
465 	struct net_device *dev;
466 	int port;
467 
468 	for (port = 0; port < ocelot->num_phys_ports; port++) {
469 		ocelot_port = ocelot->ports[port];
470 		if (!ocelot_port)
471 			continue;
472 		priv = container_of(ocelot_port, struct ocelot_port_private,
473 				    port);
474 		dev = priv->dev;
475 
476 		if (unlikely(netif_queue_stopped(dev)))
477 			netif_wake_queue(dev);
478 	}
479 }
480 
481 static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
482 {
483 	struct ocelot_fdma *fdma = ocelot->fdma;
484 	struct ocelot_fdma_tx_ring *tx_ring;
485 	struct ocelot_fdma_tx_buf *buf;
486 	unsigned int new_null_llp_idx;
487 	struct ocelot_fdma_dcb *dcb;
488 	bool end_of_list = false;
489 	struct sk_buff *skb;
490 	dma_addr_t dma;
491 	u32 dcb_llp;
492 	u16 ntc;
493 	int ret;
494 
495 	tx_ring = &fdma->tx_ring;
496 
497 	/* Purge the TX packets that have been sent up to the NULL llp or the
498 	 * end of done list.
499 	 */
500 	while (!ocelot_fdma_tx_ring_empty(fdma)) {
501 		ntc = tx_ring->next_to_clean;
502 		dcb = &tx_ring->dcbs[ntc];
503 		if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
504 			break;
505 
506 		buf = &tx_ring->bufs[ntc];
507 		skb = buf->skb;
508 		dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
509 				 skb->len, DMA_TO_DEVICE);
510 		napi_consume_skb(skb, budget);
511 		dcb_llp = dcb->llp;
512 
513 		/* Only update after accessing all dcb fields */
514 		tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
515 							      OCELOT_FDMA_TX_RING_SIZE);
516 
517 		/* If we hit the NULL LLP, stop, we might need to reload FDMA */
518 		if (dcb_llp == 0) {
519 			end_of_list = true;
520 			break;
521 		}
522 	}
523 
524 	/* No need to try to wake if there were no TX cleaned_cnt up. */
525 	if (ocelot_fdma_tx_ring_free(fdma))
526 		ocelot_fdma_wakeup_netdev(ocelot);
527 
528 	/* If there is still some DCBs to be processed by the FDMA or if the
529 	 * pending list is empty, there is no need to restart the FDMA.
530 	 */
531 	if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
532 		return;
533 
534 	ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
535 	if (ret) {
536 		dev_warn(ocelot->dev,
537 			 "Failed to wait for TX channel to stop\n");
538 		return;
539 	}
540 
541 	/* Set NULL LLP to be the last DCB used */
542 	new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
543 						OCELOT_FDMA_TX_RING_SIZE);
544 	dcb = &tx_ring->dcbs[new_null_llp_idx];
545 	dcb->llp = 0;
546 
547 	dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
548 	ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
549 }
550 
551 static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
552 {
553 	struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
554 	struct ocelot *ocelot = fdma->ocelot;
555 	int work_done = 0;
556 	bool rx_stopped;
557 
558 	ocelot_fdma_tx_cleanup(ocelot, budget);
559 
560 	rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
561 
562 	work_done = ocelot_fdma_rx_get(ocelot, budget);
563 
564 	if (rx_stopped)
565 		ocelot_fdma_rx_restart(ocelot);
566 
567 	if (work_done < budget) {
568 		napi_complete_done(&fdma->napi, work_done);
569 		ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
570 				   BIT(MSCC_FDMA_INJ_CHAN) |
571 				   BIT(MSCC_FDMA_XTR_CHAN));
572 	}
573 
574 	return work_done;
575 }
576 
577 static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
578 {
579 	u32 ident, llp, frm, err, err_code;
580 	struct ocelot *ocelot = dev_id;
581 
582 	ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
583 	frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
584 	llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
585 
586 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
587 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
588 	if (frm || llp) {
589 		ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
590 		napi_schedule(&ocelot->fdma->napi);
591 	}
592 
593 	err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
594 	if (unlikely(err)) {
595 		err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
596 		dev_err_ratelimited(ocelot->dev,
597 				    "Error ! chans mask: %#x, code: %#x\n",
598 				    err, err_code);
599 
600 		ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
601 		ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
602 	}
603 
604 	return IRQ_HANDLED;
605 }
606 
607 static void ocelot_fdma_send_skb(struct ocelot *ocelot,
608 				 struct ocelot_fdma *fdma, struct sk_buff *skb)
609 {
610 	struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
611 	struct ocelot_fdma_tx_buf *tx_buf;
612 	struct ocelot_fdma_dcb *dcb;
613 	dma_addr_t dma;
614 	u16 next_idx;
615 
616 	dcb = &tx_ring->dcbs[tx_ring->next_to_use];
617 	tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
618 	if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
619 		dev_kfree_skb_any(skb);
620 		return;
621 	}
622 
623 	next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
624 					OCELOT_FDMA_TX_RING_SIZE);
625 	skb_tx_timestamp(skb);
626 
627 	/* If the FDMA TX chan is empty, then enqueue the DCB directly */
628 	if (ocelot_fdma_tx_ring_empty(fdma)) {
629 		dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
630 					  tx_ring->next_to_use);
631 		ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
632 	} else {
633 		/* Chain the DCBs */
634 		dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
635 	}
636 
637 	tx_ring->next_to_use = next_idx;
638 }
639 
640 static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
641 				   struct sk_buff *skb, struct net_device *dev)
642 {
643 	int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
644 	int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
645 	void *ifh;
646 	int err;
647 
648 	if (unlikely(needed_headroom || needed_tailroom ||
649 		     skb_header_cloned(skb))) {
650 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
651 				       GFP_ATOMIC);
652 		if (unlikely(err)) {
653 			dev_kfree_skb_any(skb);
654 			return 1;
655 		}
656 	}
657 
658 	err = skb_linearize(skb);
659 	if (err) {
660 		net_err_ratelimited("%s: skb_linearize error (%d)!\n",
661 				    dev->name, err);
662 		dev_kfree_skb_any(skb);
663 		return 1;
664 	}
665 
666 	ifh = skb_push(skb, OCELOT_TAG_LEN);
667 	skb_put(skb, ETH_FCS_LEN);
668 	ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
669 
670 	return 0;
671 }
672 
673 int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
674 			     struct sk_buff *skb, struct net_device *dev)
675 {
676 	struct ocelot_fdma *fdma = ocelot->fdma;
677 	int ret = NETDEV_TX_OK;
678 
679 	spin_lock(&fdma->tx_ring.xmit_lock);
680 
681 	if (ocelot_fdma_tx_ring_free(fdma) == 0) {
682 		netif_stop_queue(dev);
683 		ret = NETDEV_TX_BUSY;
684 		goto out;
685 	}
686 
687 	if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
688 		goto out;
689 
690 	ocelot_fdma_send_skb(ocelot, fdma, skb);
691 
692 out:
693 	spin_unlock(&fdma->tx_ring.xmit_lock);
694 
695 	return ret;
696 }
697 
698 static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
699 {
700 	struct ocelot_fdma *fdma = ocelot->fdma;
701 	struct ocelot_fdma_rx_ring *rx_ring;
702 	struct ocelot_fdma_rx_buf *rxb;
703 	u16 idx;
704 
705 	rx_ring = &fdma->rx_ring;
706 	idx = rx_ring->next_to_clean;
707 
708 	/* Free the pages held in the RX ring */
709 	while (idx != rx_ring->next_to_use) {
710 		rxb = &rx_ring->bufs[idx];
711 		dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
712 			       DMA_FROM_DEVICE);
713 		__free_page(rxb->page);
714 		idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
715 	}
716 
717 	if (fdma->rx_ring.skb)
718 		dev_kfree_skb_any(fdma->rx_ring.skb);
719 }
720 
721 static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
722 {
723 	struct ocelot_fdma *fdma = ocelot->fdma;
724 	struct ocelot_fdma_tx_ring *tx_ring;
725 	struct ocelot_fdma_tx_buf *txb;
726 	struct sk_buff *skb;
727 	u16 idx;
728 
729 	tx_ring = &fdma->tx_ring;
730 	idx = tx_ring->next_to_clean;
731 
732 	while (idx != tx_ring->next_to_use) {
733 		txb = &tx_ring->bufs[idx];
734 		skb = txb->skb;
735 		dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
736 				 skb->len, DMA_TO_DEVICE);
737 		dev_kfree_skb_any(skb);
738 		idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
739 	}
740 }
741 
742 static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
743 {
744 	struct ocelot_fdma *fdma = ocelot->fdma;
745 	struct ocelot_fdma_dcb *dcbs;
746 	unsigned int adjust;
747 	dma_addr_t dcbs_dma;
748 	int ret;
749 
750 	/* Create a pool of consistent memory blocks for hardware descriptors */
751 	fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
752 					      OCELOT_DCBS_HW_ALLOC_SIZE,
753 					      &fdma->dcbs_dma_base, GFP_KERNEL);
754 	if (!fdma->dcbs_base)
755 		return -ENOMEM;
756 
757 	/* DCBs must be aligned on a 32bit boundary */
758 	dcbs = fdma->dcbs_base;
759 	dcbs_dma = fdma->dcbs_dma_base;
760 	if (!IS_ALIGNED(dcbs_dma, 4)) {
761 		adjust = dcbs_dma & 0x3;
762 		dcbs_dma = ALIGN(dcbs_dma, 4);
763 		dcbs = (void *)dcbs + adjust;
764 	}
765 
766 	/* TX queue */
767 	fdma->tx_ring.dcbs = dcbs;
768 	fdma->tx_ring.dcbs_dma = dcbs_dma;
769 	spin_lock_init(&fdma->tx_ring.xmit_lock);
770 
771 	/* RX queue */
772 	fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
773 	fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
774 	ret = ocelot_fdma_alloc_rx_buffs(ocelot,
775 					 ocelot_fdma_tx_ring_free(fdma));
776 	if (ret) {
777 		ocelot_fdma_free_rx_ring(ocelot);
778 		return ret;
779 	}
780 
781 	/* Set the last DCB LLP as NULL, this is normally done when restarting
782 	 * the RX chan, but this is for the first run
783 	 */
784 	ocelot_fdma_rx_set_llp(&fdma->rx_ring);
785 
786 	return 0;
787 }
788 
789 void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
790 {
791 	struct ocelot_fdma *fdma = ocelot->fdma;
792 
793 	dev->needed_headroom = OCELOT_TAG_LEN;
794 	dev->needed_tailroom = ETH_FCS_LEN;
795 
796 	if (fdma->ndev)
797 		return;
798 
799 	fdma->ndev = dev;
800 	netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll,
801 			      OCELOT_FDMA_WEIGHT);
802 }
803 
804 void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
805 {
806 	struct ocelot_fdma *fdma = ocelot->fdma;
807 
808 	if (fdma->ndev == dev) {
809 		netif_napi_del(&fdma->napi);
810 		fdma->ndev = NULL;
811 	}
812 }
813 
814 void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
815 {
816 	struct device *dev = ocelot->dev;
817 	struct ocelot_fdma *fdma;
818 	int ret;
819 
820 	fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
821 	if (!fdma)
822 		return;
823 
824 	ocelot->fdma = fdma;
825 	ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
826 
827 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
828 
829 	fdma->ocelot = ocelot;
830 	fdma->irq = platform_get_irq_byname(pdev, "fdma");
831 	ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
832 			       dev_name(dev), ocelot);
833 	if (ret)
834 		goto err_free_fdma;
835 
836 	ret = ocelot_fdma_rings_alloc(ocelot);
837 	if (ret)
838 		goto err_free_irq;
839 
840 	static_branch_enable(&ocelot_fdma_enabled);
841 
842 	return;
843 
844 err_free_irq:
845 	devm_free_irq(dev, fdma->irq, fdma);
846 err_free_fdma:
847 	devm_kfree(dev, fdma);
848 
849 	ocelot->fdma = NULL;
850 }
851 
852 void ocelot_fdma_start(struct ocelot *ocelot)
853 {
854 	struct ocelot_fdma *fdma = ocelot->fdma;
855 
856 	/* Reconfigure for extraction and injection using DMA */
857 	ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
858 	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
859 
860 	ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
861 
862 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
863 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
864 
865 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
866 			   BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
867 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
868 			   BIT(MSCC_FDMA_XTR_CHAN));
869 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
870 			   BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
871 
872 	napi_enable(&fdma->napi);
873 
874 	ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
875 				  MSCC_FDMA_XTR_CHAN);
876 }
877 
878 void ocelot_fdma_deinit(struct ocelot *ocelot)
879 {
880 	struct ocelot_fdma *fdma = ocelot->fdma;
881 
882 	ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
883 	ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
884 			   BIT(MSCC_FDMA_XTR_CHAN));
885 	ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
886 			   BIT(MSCC_FDMA_INJ_CHAN));
887 	napi_synchronize(&fdma->napi);
888 	napi_disable(&fdma->napi);
889 
890 	ocelot_fdma_free_rx_ring(ocelot);
891 	ocelot_fdma_free_tx_ring(ocelot);
892 }
893