1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include "lan966x_main.h"
4 
5 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
6 {
7 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
8 }
9 
10 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
11 					       struct lan966x_db *db)
12 {
13 	struct lan966x *lan966x = rx->lan966x;
14 	dma_addr_t dma_addr;
15 	struct page *page;
16 
17 	page = dev_alloc_pages(rx->page_order);
18 	if (unlikely(!page))
19 		return NULL;
20 
21 	dma_addr = dma_map_page(lan966x->dev, page, 0,
22 				PAGE_SIZE << rx->page_order,
23 				DMA_FROM_DEVICE);
24 	if (unlikely(dma_mapping_error(lan966x->dev, dma_addr)))
25 		goto free_page;
26 
27 	db->dataptr = dma_addr;
28 
29 	return page;
30 
31 free_page:
32 	__free_pages(page, rx->page_order);
33 	return NULL;
34 }
35 
36 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
37 {
38 	struct lan966x *lan966x = rx->lan966x;
39 	struct lan966x_rx_dcb *dcb;
40 	struct lan966x_db *db;
41 	int i, j;
42 
43 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
44 		dcb = &rx->dcbs[i];
45 
46 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
47 			db = &dcb->db[j];
48 			dma_unmap_single(lan966x->dev,
49 					 (dma_addr_t)db->dataptr,
50 					 PAGE_SIZE << rx->page_order,
51 					 DMA_FROM_DEVICE);
52 			__free_pages(rx->page[i][j], rx->page_order);
53 		}
54 	}
55 }
56 
57 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
58 				    struct lan966x_rx_dcb *dcb,
59 				    u64 nextptr)
60 {
61 	struct lan966x_db *db;
62 	int i;
63 
64 	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
65 		db = &dcb->db[i];
66 		db->status = FDMA_DCB_STATUS_INTR;
67 	}
68 
69 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
70 	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
71 
72 	rx->last_entry->nextptr = nextptr;
73 	rx->last_entry = dcb;
74 }
75 
76 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
77 {
78 	struct lan966x *lan966x = rx->lan966x;
79 	struct lan966x_rx_dcb *dcb;
80 	struct lan966x_db *db;
81 	struct page *page;
82 	int i, j;
83 	int size;
84 
85 	/* calculate how many pages are needed to allocate the dcbs */
86 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
87 	size = ALIGN(size, PAGE_SIZE);
88 
89 	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
90 	if (!rx->dcbs)
91 		return -ENOMEM;
92 
93 	rx->last_entry = rx->dcbs;
94 	rx->db_index = 0;
95 	rx->dcb_index = 0;
96 
97 	/* Now for each dcb allocate the dbs */
98 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
99 		dcb = &rx->dcbs[i];
100 		dcb->info = 0;
101 
102 		/* For each db allocate a page and map it to the DB dataptr. */
103 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
104 			db = &dcb->db[j];
105 			page = lan966x_fdma_rx_alloc_page(rx, db);
106 			if (!page)
107 				return -ENOMEM;
108 
109 			db->status = 0;
110 			rx->page[i][j] = page;
111 		}
112 
113 		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
114 	}
115 
116 	return 0;
117 }
118 
119 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
120 {
121 	struct lan966x *lan966x = rx->lan966x;
122 	u32 size;
123 
124 	/* Now it is possible to do the cleanup of dcb */
125 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
126 	size = ALIGN(size, PAGE_SIZE);
127 	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
128 }
129 
130 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
131 {
132 	struct lan966x *lan966x = rx->lan966x;
133 	u32 mask;
134 
135 	/* When activating a channel, first is required to write the first DCB
136 	 * address and then to activate it
137 	 */
138 	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
139 	       FDMA_DCB_LLP(rx->channel_id));
140 	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
141 	       FDMA_DCB_LLP1(rx->channel_id));
142 
143 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
144 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
145 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
146 	       FDMA_CH_CFG_CH_MEM_SET(1),
147 	       lan966x, FDMA_CH_CFG(rx->channel_id));
148 
149 	/* Start fdma */
150 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
151 		FDMA_PORT_CTRL_XTR_STOP,
152 		lan966x, FDMA_PORT_CTRL(0));
153 
154 	/* Enable interrupts */
155 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
156 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
157 	mask |= BIT(rx->channel_id);
158 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
159 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
160 		lan966x, FDMA_INTR_DB_ENA);
161 
162 	/* Activate the channel */
163 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
164 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
165 		lan966x, FDMA_CH_ACTIVATE);
166 }
167 
168 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
169 {
170 	struct lan966x *lan966x = rx->lan966x;
171 	u32 val;
172 
173 	/* Disable the channel */
174 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
175 		FDMA_CH_DISABLE_CH_DISABLE,
176 		lan966x, FDMA_CH_DISABLE);
177 
178 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
179 				  val, !(val & BIT(rx->channel_id)),
180 				  READL_SLEEP_US, READL_TIMEOUT_US);
181 
182 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
183 		FDMA_CH_DB_DISCARD_DB_DISCARD,
184 		lan966x, FDMA_CH_DB_DISCARD);
185 }
186 
187 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
188 {
189 	struct lan966x *lan966x = rx->lan966x;
190 
191 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
192 		FDMA_CH_RELOAD_CH_RELOAD,
193 		lan966x, FDMA_CH_RELOAD);
194 }
195 
196 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
197 				    struct lan966x_tx_dcb *dcb)
198 {
199 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
200 	dcb->info = 0;
201 }
202 
203 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
204 {
205 	struct lan966x *lan966x = tx->lan966x;
206 	struct lan966x_tx_dcb *dcb;
207 	struct lan966x_db *db;
208 	int size;
209 	int i, j;
210 
211 	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
212 			       GFP_KERNEL);
213 	if (!tx->dcbs_buf)
214 		return -ENOMEM;
215 
216 	/* calculate how many pages are needed to allocate the dcbs */
217 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
218 	size = ALIGN(size, PAGE_SIZE);
219 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
220 	if (!tx->dcbs)
221 		goto out;
222 
223 	/* Now for each dcb allocate the db */
224 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
225 		dcb = &tx->dcbs[i];
226 
227 		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
228 			db = &dcb->db[j];
229 			db->dataptr = 0;
230 			db->status = 0;
231 		}
232 
233 		lan966x_fdma_tx_add_dcb(tx, dcb);
234 	}
235 
236 	return 0;
237 
238 out:
239 	kfree(tx->dcbs_buf);
240 	return -ENOMEM;
241 }
242 
243 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
244 {
245 	struct lan966x *lan966x = tx->lan966x;
246 	int size;
247 
248 	kfree(tx->dcbs_buf);
249 
250 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
251 	size = ALIGN(size, PAGE_SIZE);
252 	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
253 }
254 
255 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
256 {
257 	struct lan966x *lan966x = tx->lan966x;
258 	u32 mask;
259 
260 	/* When activating a channel, first is required to write the first DCB
261 	 * address and then to activate it
262 	 */
263 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
264 	       FDMA_DCB_LLP(tx->channel_id));
265 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
266 	       FDMA_DCB_LLP1(tx->channel_id));
267 
268 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
269 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
270 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
271 	       FDMA_CH_CFG_CH_MEM_SET(1),
272 	       lan966x, FDMA_CH_CFG(tx->channel_id));
273 
274 	/* Start fdma */
275 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
276 		FDMA_PORT_CTRL_INJ_STOP,
277 		lan966x, FDMA_PORT_CTRL(0));
278 
279 	/* Enable interrupts */
280 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
281 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
282 	mask |= BIT(tx->channel_id);
283 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
284 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
285 		lan966x, FDMA_INTR_DB_ENA);
286 
287 	/* Activate the channel */
288 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
289 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
290 		lan966x, FDMA_CH_ACTIVATE);
291 }
292 
293 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
294 {
295 	struct lan966x *lan966x = tx->lan966x;
296 	u32 val;
297 
298 	/* Disable the channel */
299 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
300 		FDMA_CH_DISABLE_CH_DISABLE,
301 		lan966x, FDMA_CH_DISABLE);
302 
303 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
304 				  val, !(val & BIT(tx->channel_id)),
305 				  READL_SLEEP_US, READL_TIMEOUT_US);
306 
307 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
308 		FDMA_CH_DB_DISCARD_DB_DISCARD,
309 		lan966x, FDMA_CH_DB_DISCARD);
310 
311 	tx->activated = false;
312 }
313 
314 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
315 {
316 	struct lan966x *lan966x = tx->lan966x;
317 
318 	/* Write the registers to reload the channel */
319 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
320 		FDMA_CH_RELOAD_CH_RELOAD,
321 		lan966x, FDMA_CH_RELOAD);
322 }
323 
324 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
325 {
326 	struct lan966x_port *port;
327 	int i;
328 
329 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
330 		port = lan966x->ports[i];
331 		if (!port)
332 			continue;
333 
334 		if (netif_queue_stopped(port->dev))
335 			netif_wake_queue(port->dev);
336 	}
337 }
338 
339 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
340 {
341 	struct lan966x_port *port;
342 	int i;
343 
344 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
345 		port = lan966x->ports[i];
346 		if (!port)
347 			continue;
348 
349 		netif_stop_queue(port->dev);
350 	}
351 }
352 
353 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
354 {
355 	struct lan966x_tx *tx = &lan966x->tx;
356 	struct lan966x_tx_dcb_buf *dcb_buf;
357 	struct lan966x_db *db;
358 	unsigned long flags;
359 	bool clear = false;
360 	int i;
361 
362 	spin_lock_irqsave(&lan966x->tx_lock, flags);
363 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
364 		dcb_buf = &tx->dcbs_buf[i];
365 
366 		if (!dcb_buf->used)
367 			continue;
368 
369 		db = &tx->dcbs[i].db[0];
370 		if (!(db->status & FDMA_DCB_STATUS_DONE))
371 			continue;
372 
373 		dcb_buf->dev->stats.tx_packets++;
374 		dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
375 
376 		dcb_buf->used = false;
377 		dma_unmap_single(lan966x->dev,
378 				 dcb_buf->dma_addr,
379 				 dcb_buf->skb->len,
380 				 DMA_TO_DEVICE);
381 		if (!dcb_buf->ptp)
382 			dev_kfree_skb_any(dcb_buf->skb);
383 
384 		clear = true;
385 	}
386 
387 	if (clear)
388 		lan966x_fdma_wakeup_netdev(lan966x);
389 
390 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
391 }
392 
393 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
394 {
395 	struct lan966x_db *db;
396 
397 	/* Check if there is any data */
398 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
399 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
400 		return false;
401 
402 	return true;
403 }
404 
405 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
406 {
407 	struct lan966x *lan966x = rx->lan966x;
408 	u64 src_port, timestamp;
409 	struct lan966x_db *db;
410 	struct sk_buff *skb;
411 	struct page *page;
412 
413 	/* Get the received frame and unmap it */
414 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
415 	page = rx->page[rx->dcb_index][rx->db_index];
416 	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
417 	if (unlikely(!skb))
418 		goto unmap_page;
419 
420 	dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr,
421 			 FDMA_DCB_STATUS_BLOCKL(db->status),
422 			 DMA_FROM_DEVICE);
423 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
424 
425 	lan966x_ifh_get_src_port(skb->data, &src_port);
426 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
427 
428 	if (WARN_ON(src_port >= lan966x->num_phys_ports))
429 		goto free_skb;
430 
431 	skb->dev = lan966x->ports[src_port]->dev;
432 	skb_pull(skb, IFH_LEN * sizeof(u32));
433 
434 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
435 		skb_trim(skb, skb->len - ETH_FCS_LEN);
436 
437 	lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
438 	skb->protocol = eth_type_trans(skb, skb->dev);
439 
440 	if (lan966x->bridge_mask & BIT(src_port)) {
441 		skb->offload_fwd_mark = 1;
442 
443 		skb_reset_network_header(skb);
444 		if (!lan966x_hw_offload(lan966x, src_port, skb))
445 			skb->offload_fwd_mark = 0;
446 	}
447 
448 	skb->dev->stats.rx_bytes += skb->len;
449 	skb->dev->stats.rx_packets++;
450 
451 	return skb;
452 
453 free_skb:
454 	kfree_skb(skb);
455 unmap_page:
456 	dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
457 		       FDMA_DCB_STATUS_BLOCKL(db->status),
458 		       DMA_FROM_DEVICE);
459 	__free_pages(page, rx->page_order);
460 
461 	return NULL;
462 }
463 
464 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
465 {
466 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
467 	struct lan966x_rx *rx = &lan966x->rx;
468 	int dcb_reload = rx->dcb_index;
469 	struct lan966x_rx_dcb *old_dcb;
470 	struct lan966x_db *db;
471 	struct sk_buff *skb;
472 	struct page *page;
473 	int counter = 0;
474 	u64 nextptr;
475 
476 	lan966x_fdma_tx_clear_buf(lan966x, weight);
477 
478 	/* Get all received skb */
479 	while (counter < weight) {
480 		if (!lan966x_fdma_rx_more_frames(rx))
481 			break;
482 
483 		skb = lan966x_fdma_rx_get_frame(rx);
484 
485 		rx->page[rx->dcb_index][rx->db_index] = NULL;
486 		rx->dcb_index++;
487 		rx->dcb_index &= FDMA_DCB_MAX - 1;
488 
489 		if (!skb)
490 			break;
491 
492 		napi_gro_receive(&lan966x->napi, skb);
493 		counter++;
494 	}
495 
496 	/* Allocate new pages and map them */
497 	while (dcb_reload != rx->dcb_index) {
498 		db = &rx->dcbs[dcb_reload].db[rx->db_index];
499 		page = lan966x_fdma_rx_alloc_page(rx, db);
500 		if (unlikely(!page))
501 			break;
502 		rx->page[dcb_reload][rx->db_index] = page;
503 
504 		old_dcb = &rx->dcbs[dcb_reload];
505 		dcb_reload++;
506 		dcb_reload &= FDMA_DCB_MAX - 1;
507 
508 		nextptr = rx->dma + ((unsigned long)old_dcb -
509 				     (unsigned long)rx->dcbs);
510 		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
511 		lan966x_fdma_rx_reload(rx);
512 	}
513 
514 	if (counter < weight && napi_complete_done(napi, counter))
515 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
516 
517 	return counter;
518 }
519 
520 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
521 {
522 	struct lan966x *lan966x = args;
523 	u32 db, err, err_type;
524 
525 	db = lan_rd(lan966x, FDMA_INTR_DB);
526 	err = lan_rd(lan966x, FDMA_INTR_ERR);
527 
528 	if (db) {
529 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
530 		lan_wr(db, lan966x, FDMA_INTR_DB);
531 
532 		napi_schedule(&lan966x->napi);
533 	}
534 
535 	if (err) {
536 		err_type = lan_rd(lan966x, FDMA_ERRORS);
537 
538 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
539 
540 		lan_wr(err, lan966x, FDMA_INTR_ERR);
541 		lan_wr(err_type, lan966x, FDMA_ERRORS);
542 	}
543 
544 	return IRQ_HANDLED;
545 }
546 
547 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
548 {
549 	struct lan966x_tx_dcb_buf *dcb_buf;
550 	int i;
551 
552 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
553 		dcb_buf = &tx->dcbs_buf[i];
554 		if (!dcb_buf->used && i != tx->last_in_use)
555 			return i;
556 	}
557 
558 	return -1;
559 }
560 
561 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
562 {
563 	struct lan966x_port *port = netdev_priv(dev);
564 	struct lan966x *lan966x = port->lan966x;
565 	struct lan966x_tx_dcb_buf *next_dcb_buf;
566 	struct lan966x_tx_dcb *next_dcb, *dcb;
567 	struct lan966x_tx *tx = &lan966x->tx;
568 	struct lan966x_db *next_db;
569 	int needed_headroom;
570 	int needed_tailroom;
571 	dma_addr_t dma_addr;
572 	int next_to_use;
573 	int err;
574 
575 	/* Get next index */
576 	next_to_use = lan966x_fdma_get_next_dcb(tx);
577 	if (next_to_use < 0) {
578 		netif_stop_queue(dev);
579 		return NETDEV_TX_BUSY;
580 	}
581 
582 	if (skb_put_padto(skb, ETH_ZLEN)) {
583 		dev->stats.tx_dropped++;
584 		return NETDEV_TX_OK;
585 	}
586 
587 	/* skb processing */
588 	needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0);
589 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
590 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
591 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
592 				       GFP_ATOMIC);
593 		if (unlikely(err)) {
594 			dev->stats.tx_dropped++;
595 			err = NETDEV_TX_OK;
596 			goto release;
597 		}
598 	}
599 
600 	skb_tx_timestamp(skb);
601 	skb_push(skb, IFH_LEN * sizeof(u32));
602 	memcpy(skb->data, ifh, IFH_LEN * sizeof(u32));
603 	skb_put(skb, 4);
604 
605 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
606 				  DMA_TO_DEVICE);
607 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
608 		dev->stats.tx_dropped++;
609 		err = NETDEV_TX_OK;
610 		goto release;
611 	}
612 
613 	/* Setup next dcb */
614 	next_dcb = &tx->dcbs[next_to_use];
615 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
616 
617 	next_db = &next_dcb->db[0];
618 	next_db->dataptr = dma_addr;
619 	next_db->status = FDMA_DCB_STATUS_SOF |
620 			  FDMA_DCB_STATUS_EOF |
621 			  FDMA_DCB_STATUS_INTR |
622 			  FDMA_DCB_STATUS_BLOCKO(0) |
623 			  FDMA_DCB_STATUS_BLOCKL(skb->len);
624 
625 	/* Fill up the buffer */
626 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
627 	next_dcb_buf->skb = skb;
628 	next_dcb_buf->dma_addr = dma_addr;
629 	next_dcb_buf->used = true;
630 	next_dcb_buf->ptp = false;
631 	next_dcb_buf->dev = dev;
632 
633 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
634 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
635 		next_dcb_buf->ptp = true;
636 
637 	if (likely(lan966x->tx.activated)) {
638 		/* Connect current dcb to the next db */
639 		dcb = &tx->dcbs[tx->last_in_use];
640 		dcb->nextptr = tx->dma + (next_to_use *
641 					  sizeof(struct lan966x_tx_dcb));
642 
643 		lan966x_fdma_tx_reload(tx);
644 	} else {
645 		/* Because it is first time, then just activate */
646 		lan966x->tx.activated = true;
647 		lan966x_fdma_tx_activate(tx);
648 	}
649 
650 	/* Move to next dcb because this last in use */
651 	tx->last_in_use = next_to_use;
652 
653 	return NETDEV_TX_OK;
654 
655 release:
656 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
657 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
658 		lan966x_ptp_txtstamp_release(port, skb);
659 
660 	dev_kfree_skb_any(skb);
661 	return err;
662 }
663 
664 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
665 {
666 	int max_mtu = 0;
667 	int i;
668 
669 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
670 		int mtu;
671 
672 		if (!lan966x->ports[i])
673 			continue;
674 
675 		mtu = lan966x->ports[i]->dev->mtu;
676 		if (mtu > max_mtu)
677 			max_mtu = mtu;
678 	}
679 
680 	return max_mtu;
681 }
682 
683 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
684 {
685 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
686 }
687 
688 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
689 {
690 	void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf;
691 	dma_addr_t rx_dma, tx_dma;
692 	u32 size;
693 	int err;
694 
695 	/* Store these for later to free them */
696 	rx_dma = lan966x->rx.dma;
697 	tx_dma = lan966x->tx.dma;
698 	rx_dcbs = lan966x->rx.dcbs;
699 	tx_dcbs = lan966x->tx.dcbs;
700 	tx_dcbs_buf = lan966x->tx.dcbs_buf;
701 
702 	napi_synchronize(&lan966x->napi);
703 	napi_disable(&lan966x->napi);
704 	lan966x_fdma_stop_netdev(lan966x);
705 
706 	lan966x_fdma_rx_disable(&lan966x->rx);
707 	lan966x_fdma_rx_free_pages(&lan966x->rx);
708 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
709 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
710 	if (err)
711 		goto restore;
712 	lan966x_fdma_rx_start(&lan966x->rx);
713 
714 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
715 	size = ALIGN(size, PAGE_SIZE);
716 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
717 
718 	lan966x_fdma_tx_disable(&lan966x->tx);
719 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
720 	if (err)
721 		goto restore_tx;
722 
723 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
724 	size = ALIGN(size, PAGE_SIZE);
725 	dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma);
726 
727 	kfree(tx_dcbs_buf);
728 
729 	lan966x_fdma_wakeup_netdev(lan966x);
730 	napi_enable(&lan966x->napi);
731 
732 	return err;
733 restore:
734 	lan966x->rx.dma = rx_dma;
735 	lan966x->rx.dcbs = rx_dcbs;
736 	lan966x_fdma_rx_start(&lan966x->rx);
737 
738 restore_tx:
739 	lan966x->tx.dma = tx_dma;
740 	lan966x->tx.dcbs = tx_dcbs;
741 	lan966x->tx.dcbs_buf = tx_dcbs_buf;
742 
743 	return err;
744 }
745 
746 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
747 {
748 	int max_mtu;
749 	int err;
750 	u32 val;
751 
752 	max_mtu = lan966x_fdma_get_max_mtu(lan966x);
753 	max_mtu += IFH_LEN * sizeof(u32);
754 
755 	if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 ==
756 	    lan966x->rx.page_order)
757 		return 0;
758 
759 	/* Disable the CPU port */
760 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
761 		QSYS_SW_PORT_MODE_PORT_ENA,
762 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
763 
764 	/* Flush the CPU queues */
765 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
766 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
767 			   READL_SLEEP_US, READL_TIMEOUT_US);
768 
769 	/* Add a sleep in case there are frames between the queues and the CPU
770 	 * port
771 	 */
772 	usleep_range(1000, 2000);
773 
774 	err = lan966x_fdma_reload(lan966x, max_mtu);
775 
776 	/* Enable back the CPU port */
777 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
778 		QSYS_SW_PORT_MODE_PORT_ENA,
779 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
780 
781 	return err;
782 }
783 
784 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
785 {
786 	if (lan966x->fdma_ndev)
787 		return;
788 
789 	lan966x->fdma_ndev = dev;
790 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
791 		       NAPI_POLL_WEIGHT);
792 	napi_enable(&lan966x->napi);
793 }
794 
795 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
796 {
797 	if (lan966x->fdma_ndev == dev) {
798 		netif_napi_del(&lan966x->napi);
799 		lan966x->fdma_ndev = NULL;
800 	}
801 }
802 
803 int lan966x_fdma_init(struct lan966x *lan966x)
804 {
805 	int err;
806 
807 	if (!lan966x->fdma)
808 		return 0;
809 
810 	lan966x->rx.lan966x = lan966x;
811 	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
812 	lan966x->tx.lan966x = lan966x;
813 	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
814 	lan966x->tx.last_in_use = -1;
815 
816 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
817 	if (err)
818 		return err;
819 
820 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
821 	if (err) {
822 		lan966x_fdma_rx_free(&lan966x->rx);
823 		return err;
824 	}
825 
826 	lan966x_fdma_rx_start(&lan966x->rx);
827 
828 	return 0;
829 }
830 
831 void lan966x_fdma_deinit(struct lan966x *lan966x)
832 {
833 	if (!lan966x->fdma)
834 		return;
835 
836 	lan966x_fdma_rx_disable(&lan966x->rx);
837 	lan966x_fdma_tx_disable(&lan966x->tx);
838 
839 	napi_synchronize(&lan966x->napi);
840 	napi_disable(&lan966x->napi);
841 
842 	lan966x_fdma_rx_free_pages(&lan966x->rx);
843 	lan966x_fdma_rx_free(&lan966x->rx);
844 	lan966x_fdma_tx_free(&lan966x->tx);
845 }
846