xref: /openbmc/linux/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (revision c334ac6461d516c6d79dd10fd84cd69a00422966)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include "lan966x_main.h"
4 
5 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
6 {
7 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
8 }
9 
10 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
11 					       struct lan966x_db *db)
12 {
13 	struct page *page;
14 
15 	page = page_pool_dev_alloc_pages(rx->page_pool);
16 	if (unlikely(!page))
17 		return NULL;
18 
19 	db->dataptr = page_pool_get_dma_addr(page);
20 
21 	return page;
22 }
23 
24 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
25 {
26 	int i, j;
27 
28 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
29 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
30 			page_pool_put_full_page(rx->page_pool,
31 						rx->page[i][j], false);
32 	}
33 }
34 
35 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
36 {
37 	struct page *page;
38 
39 	page = rx->page[rx->dcb_index][rx->db_index];
40 	if (unlikely(!page))
41 		return;
42 
43 	page_pool_recycle_direct(rx->page_pool, page);
44 }
45 
46 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
47 				    struct lan966x_rx_dcb *dcb,
48 				    u64 nextptr)
49 {
50 	struct lan966x_db *db;
51 	int i;
52 
53 	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
54 		db = &dcb->db[i];
55 		db->status = FDMA_DCB_STATUS_INTR;
56 	}
57 
58 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
59 	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
60 
61 	rx->last_entry->nextptr = nextptr;
62 	rx->last_entry = dcb;
63 }
64 
65 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
66 {
67 	struct lan966x *lan966x = rx->lan966x;
68 	struct page_pool_params pp_params = {
69 		.order = rx->page_order,
70 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
71 		.pool_size = FDMA_DCB_MAX,
72 		.nid = NUMA_NO_NODE,
73 		.dev = lan966x->dev,
74 		.dma_dir = DMA_FROM_DEVICE,
75 		.offset = 0,
76 		.max_len = rx->max_mtu -
77 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
78 	};
79 
80 	rx->page_pool = page_pool_create(&pp_params);
81 	return PTR_ERR_OR_ZERO(rx->page_pool);
82 }
83 
84 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
85 {
86 	struct lan966x *lan966x = rx->lan966x;
87 	struct lan966x_rx_dcb *dcb;
88 	struct lan966x_db *db;
89 	struct page *page;
90 	int i, j;
91 	int size;
92 
93 	if (lan966x_fdma_rx_alloc_page_pool(rx))
94 		return PTR_ERR(rx->page_pool);
95 
96 	/* calculate how many pages are needed to allocate the dcbs */
97 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
98 	size = ALIGN(size, PAGE_SIZE);
99 
100 	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
101 	if (!rx->dcbs)
102 		return -ENOMEM;
103 
104 	rx->last_entry = rx->dcbs;
105 	rx->db_index = 0;
106 	rx->dcb_index = 0;
107 
108 	/* Now for each dcb allocate the dbs */
109 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
110 		dcb = &rx->dcbs[i];
111 		dcb->info = 0;
112 
113 		/* For each db allocate a page and map it to the DB dataptr. */
114 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
115 			db = &dcb->db[j];
116 			page = lan966x_fdma_rx_alloc_page(rx, db);
117 			if (!page)
118 				return -ENOMEM;
119 
120 			db->status = 0;
121 			rx->page[i][j] = page;
122 		}
123 
124 		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
125 	}
126 
127 	return 0;
128 }
129 
130 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
131 {
132 	rx->dcb_index++;
133 	rx->dcb_index &= FDMA_DCB_MAX - 1;
134 }
135 
136 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
137 {
138 	struct lan966x *lan966x = rx->lan966x;
139 	u32 size;
140 
141 	/* Now it is possible to do the cleanup of dcb */
142 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
143 	size = ALIGN(size, PAGE_SIZE);
144 	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
145 }
146 
147 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
148 {
149 	struct lan966x *lan966x = rx->lan966x;
150 	u32 mask;
151 
152 	/* When activating a channel, first is required to write the first DCB
153 	 * address and then to activate it
154 	 */
155 	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
156 	       FDMA_DCB_LLP(rx->channel_id));
157 	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
158 	       FDMA_DCB_LLP1(rx->channel_id));
159 
160 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
161 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
162 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
163 	       FDMA_CH_CFG_CH_MEM_SET(1),
164 	       lan966x, FDMA_CH_CFG(rx->channel_id));
165 
166 	/* Start fdma */
167 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
168 		FDMA_PORT_CTRL_XTR_STOP,
169 		lan966x, FDMA_PORT_CTRL(0));
170 
171 	/* Enable interrupts */
172 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
173 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
174 	mask |= BIT(rx->channel_id);
175 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
176 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
177 		lan966x, FDMA_INTR_DB_ENA);
178 
179 	/* Activate the channel */
180 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
181 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
182 		lan966x, FDMA_CH_ACTIVATE);
183 }
184 
185 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
186 {
187 	struct lan966x *lan966x = rx->lan966x;
188 	u32 val;
189 
190 	/* Disable the channel */
191 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
192 		FDMA_CH_DISABLE_CH_DISABLE,
193 		lan966x, FDMA_CH_DISABLE);
194 
195 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
196 				  val, !(val & BIT(rx->channel_id)),
197 				  READL_SLEEP_US, READL_TIMEOUT_US);
198 
199 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
200 		FDMA_CH_DB_DISCARD_DB_DISCARD,
201 		lan966x, FDMA_CH_DB_DISCARD);
202 }
203 
204 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
205 {
206 	struct lan966x *lan966x = rx->lan966x;
207 
208 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
209 		FDMA_CH_RELOAD_CH_RELOAD,
210 		lan966x, FDMA_CH_RELOAD);
211 }
212 
213 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
214 				    struct lan966x_tx_dcb *dcb)
215 {
216 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
217 	dcb->info = 0;
218 }
219 
220 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
221 {
222 	struct lan966x *lan966x = tx->lan966x;
223 	struct lan966x_tx_dcb *dcb;
224 	struct lan966x_db *db;
225 	int size;
226 	int i, j;
227 
228 	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
229 			       GFP_KERNEL);
230 	if (!tx->dcbs_buf)
231 		return -ENOMEM;
232 
233 	/* calculate how many pages are needed to allocate the dcbs */
234 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
235 	size = ALIGN(size, PAGE_SIZE);
236 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
237 	if (!tx->dcbs)
238 		goto out;
239 
240 	/* Now for each dcb allocate the db */
241 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
242 		dcb = &tx->dcbs[i];
243 
244 		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
245 			db = &dcb->db[j];
246 			db->dataptr = 0;
247 			db->status = 0;
248 		}
249 
250 		lan966x_fdma_tx_add_dcb(tx, dcb);
251 	}
252 
253 	return 0;
254 
255 out:
256 	kfree(tx->dcbs_buf);
257 	return -ENOMEM;
258 }
259 
260 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
261 {
262 	struct lan966x *lan966x = tx->lan966x;
263 	int size;
264 
265 	kfree(tx->dcbs_buf);
266 
267 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
268 	size = ALIGN(size, PAGE_SIZE);
269 	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
270 }
271 
272 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
273 {
274 	struct lan966x *lan966x = tx->lan966x;
275 	u32 mask;
276 
277 	/* When activating a channel, first is required to write the first DCB
278 	 * address and then to activate it
279 	 */
280 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
281 	       FDMA_DCB_LLP(tx->channel_id));
282 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
283 	       FDMA_DCB_LLP1(tx->channel_id));
284 
285 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
286 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
287 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
288 	       FDMA_CH_CFG_CH_MEM_SET(1),
289 	       lan966x, FDMA_CH_CFG(tx->channel_id));
290 
291 	/* Start fdma */
292 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
293 		FDMA_PORT_CTRL_INJ_STOP,
294 		lan966x, FDMA_PORT_CTRL(0));
295 
296 	/* Enable interrupts */
297 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
298 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
299 	mask |= BIT(tx->channel_id);
300 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
301 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
302 		lan966x, FDMA_INTR_DB_ENA);
303 
304 	/* Activate the channel */
305 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
306 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
307 		lan966x, FDMA_CH_ACTIVATE);
308 }
309 
310 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
311 {
312 	struct lan966x *lan966x = tx->lan966x;
313 	u32 val;
314 
315 	/* Disable the channel */
316 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
317 		FDMA_CH_DISABLE_CH_DISABLE,
318 		lan966x, FDMA_CH_DISABLE);
319 
320 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
321 				  val, !(val & BIT(tx->channel_id)),
322 				  READL_SLEEP_US, READL_TIMEOUT_US);
323 
324 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
325 		FDMA_CH_DB_DISCARD_DB_DISCARD,
326 		lan966x, FDMA_CH_DB_DISCARD);
327 
328 	tx->activated = false;
329 	tx->last_in_use = -1;
330 }
331 
332 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
333 {
334 	struct lan966x *lan966x = tx->lan966x;
335 
336 	/* Write the registers to reload the channel */
337 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
338 		FDMA_CH_RELOAD_CH_RELOAD,
339 		lan966x, FDMA_CH_RELOAD);
340 }
341 
342 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
343 {
344 	struct lan966x_port *port;
345 	int i;
346 
347 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
348 		port = lan966x->ports[i];
349 		if (!port)
350 			continue;
351 
352 		if (netif_queue_stopped(port->dev))
353 			netif_wake_queue(port->dev);
354 	}
355 }
356 
357 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
358 {
359 	struct lan966x_port *port;
360 	int i;
361 
362 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
363 		port = lan966x->ports[i];
364 		if (!port)
365 			continue;
366 
367 		netif_stop_queue(port->dev);
368 	}
369 }
370 
371 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
372 {
373 	struct lan966x_tx *tx = &lan966x->tx;
374 	struct lan966x_tx_dcb_buf *dcb_buf;
375 	struct lan966x_db *db;
376 	unsigned long flags;
377 	bool clear = false;
378 	int i;
379 
380 	spin_lock_irqsave(&lan966x->tx_lock, flags);
381 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
382 		dcb_buf = &tx->dcbs_buf[i];
383 
384 		if (!dcb_buf->used)
385 			continue;
386 
387 		db = &tx->dcbs[i].db[0];
388 		if (!(db->status & FDMA_DCB_STATUS_DONE))
389 			continue;
390 
391 		dcb_buf->dev->stats.tx_packets++;
392 		dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
393 
394 		dcb_buf->used = false;
395 		dma_unmap_single(lan966x->dev,
396 				 dcb_buf->dma_addr,
397 				 dcb_buf->skb->len,
398 				 DMA_TO_DEVICE);
399 		if (!dcb_buf->ptp)
400 			dev_kfree_skb_any(dcb_buf->skb);
401 
402 		clear = true;
403 	}
404 
405 	if (clear)
406 		lan966x_fdma_wakeup_netdev(lan966x);
407 
408 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
409 }
410 
411 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
412 {
413 	struct lan966x_db *db;
414 
415 	/* Check if there is any data */
416 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
417 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
418 		return false;
419 
420 	return true;
421 }
422 
423 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
424 {
425 	struct lan966x *lan966x = rx->lan966x;
426 	struct lan966x_port *port;
427 	struct lan966x_db *db;
428 	struct page *page;
429 
430 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
431 	page = rx->page[rx->dcb_index][rx->db_index];
432 	if (unlikely(!page))
433 		return FDMA_ERROR;
434 
435 	dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr,
436 				FDMA_DCB_STATUS_BLOCKL(db->status),
437 				DMA_FROM_DEVICE);
438 
439 	lan966x_ifh_get_src_port(page_address(page), src_port);
440 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
441 		return FDMA_ERROR;
442 
443 	port = lan966x->ports[*src_port];
444 	if (!lan966x_xdp_port_present(port))
445 		return FDMA_PASS;
446 
447 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
448 }
449 
450 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
451 						 u64 src_port)
452 {
453 	struct lan966x *lan966x = rx->lan966x;
454 	struct lan966x_db *db;
455 	struct sk_buff *skb;
456 	struct page *page;
457 	u64 timestamp;
458 
459 	/* Get the received frame and unmap it */
460 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
461 	page = rx->page[rx->dcb_index][rx->db_index];
462 
463 	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
464 	if (unlikely(!skb))
465 		goto free_page;
466 
467 	skb_mark_for_recycle(skb);
468 
469 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
470 
471 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
472 
473 	skb->dev = lan966x->ports[src_port]->dev;
474 	skb_pull(skb, IFH_LEN_BYTES);
475 
476 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
477 		skb_trim(skb, skb->len - ETH_FCS_LEN);
478 
479 	lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
480 	skb->protocol = eth_type_trans(skb, skb->dev);
481 
482 	if (lan966x->bridge_mask & BIT(src_port)) {
483 		skb->offload_fwd_mark = 1;
484 
485 		skb_reset_network_header(skb);
486 		if (!lan966x_hw_offload(lan966x, src_port, skb))
487 			skb->offload_fwd_mark = 0;
488 	}
489 
490 	skb->dev->stats.rx_bytes += skb->len;
491 	skb->dev->stats.rx_packets++;
492 
493 	return skb;
494 
495 free_page:
496 	page_pool_recycle_direct(rx->page_pool, page);
497 
498 	return NULL;
499 }
500 
501 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
502 {
503 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
504 	struct lan966x_rx *rx = &lan966x->rx;
505 	int dcb_reload = rx->dcb_index;
506 	struct lan966x_rx_dcb *old_dcb;
507 	struct lan966x_db *db;
508 	struct sk_buff *skb;
509 	struct page *page;
510 	int counter = 0;
511 	u64 src_port;
512 	u64 nextptr;
513 
514 	lan966x_fdma_tx_clear_buf(lan966x, weight);
515 
516 	/* Get all received skb */
517 	while (counter < weight) {
518 		if (!lan966x_fdma_rx_more_frames(rx))
519 			break;
520 
521 		counter++;
522 
523 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
524 		case FDMA_PASS:
525 			break;
526 		case FDMA_ERROR:
527 			lan966x_fdma_rx_free_page(rx);
528 			lan966x_fdma_rx_advance_dcb(rx);
529 			goto allocate_new;
530 		case FDMA_DROP:
531 			lan966x_fdma_rx_free_page(rx);
532 			lan966x_fdma_rx_advance_dcb(rx);
533 			continue;
534 		}
535 
536 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
537 		lan966x_fdma_rx_advance_dcb(rx);
538 		if (!skb)
539 			goto allocate_new;
540 
541 		napi_gro_receive(&lan966x->napi, skb);
542 	}
543 
544 allocate_new:
545 	/* Allocate new pages and map them */
546 	while (dcb_reload != rx->dcb_index) {
547 		db = &rx->dcbs[dcb_reload].db[rx->db_index];
548 		page = lan966x_fdma_rx_alloc_page(rx, db);
549 		if (unlikely(!page))
550 			break;
551 		rx->page[dcb_reload][rx->db_index] = page;
552 
553 		old_dcb = &rx->dcbs[dcb_reload];
554 		dcb_reload++;
555 		dcb_reload &= FDMA_DCB_MAX - 1;
556 
557 		nextptr = rx->dma + ((unsigned long)old_dcb -
558 				     (unsigned long)rx->dcbs);
559 		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
560 		lan966x_fdma_rx_reload(rx);
561 	}
562 
563 	if (counter < weight && napi_complete_done(napi, counter))
564 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
565 
566 	return counter;
567 }
568 
569 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
570 {
571 	struct lan966x *lan966x = args;
572 	u32 db, err, err_type;
573 
574 	db = lan_rd(lan966x, FDMA_INTR_DB);
575 	err = lan_rd(lan966x, FDMA_INTR_ERR);
576 
577 	if (db) {
578 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
579 		lan_wr(db, lan966x, FDMA_INTR_DB);
580 
581 		napi_schedule(&lan966x->napi);
582 	}
583 
584 	if (err) {
585 		err_type = lan_rd(lan966x, FDMA_ERRORS);
586 
587 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
588 
589 		lan_wr(err, lan966x, FDMA_INTR_ERR);
590 		lan_wr(err_type, lan966x, FDMA_ERRORS);
591 	}
592 
593 	return IRQ_HANDLED;
594 }
595 
596 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
597 {
598 	struct lan966x_tx_dcb_buf *dcb_buf;
599 	int i;
600 
601 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
602 		dcb_buf = &tx->dcbs_buf[i];
603 		if (!dcb_buf->used && i != tx->last_in_use)
604 			return i;
605 	}
606 
607 	return -1;
608 }
609 
610 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
611 {
612 	struct lan966x_port *port = netdev_priv(dev);
613 	struct lan966x *lan966x = port->lan966x;
614 	struct lan966x_tx_dcb_buf *next_dcb_buf;
615 	struct lan966x_tx_dcb *next_dcb, *dcb;
616 	struct lan966x_tx *tx = &lan966x->tx;
617 	struct lan966x_db *next_db;
618 	int needed_headroom;
619 	int needed_tailroom;
620 	dma_addr_t dma_addr;
621 	int next_to_use;
622 	int err;
623 
624 	/* Get next index */
625 	next_to_use = lan966x_fdma_get_next_dcb(tx);
626 	if (next_to_use < 0) {
627 		netif_stop_queue(dev);
628 		return NETDEV_TX_BUSY;
629 	}
630 
631 	if (skb_put_padto(skb, ETH_ZLEN)) {
632 		dev->stats.tx_dropped++;
633 		return NETDEV_TX_OK;
634 	}
635 
636 	/* skb processing */
637 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
638 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
639 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
640 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
641 				       GFP_ATOMIC);
642 		if (unlikely(err)) {
643 			dev->stats.tx_dropped++;
644 			err = NETDEV_TX_OK;
645 			goto release;
646 		}
647 	}
648 
649 	skb_tx_timestamp(skb);
650 	skb_push(skb, IFH_LEN_BYTES);
651 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
652 	skb_put(skb, 4);
653 
654 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
655 				  DMA_TO_DEVICE);
656 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
657 		dev->stats.tx_dropped++;
658 		err = NETDEV_TX_OK;
659 		goto release;
660 	}
661 
662 	/* Setup next dcb */
663 	next_dcb = &tx->dcbs[next_to_use];
664 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
665 
666 	next_db = &next_dcb->db[0];
667 	next_db->dataptr = dma_addr;
668 	next_db->status = FDMA_DCB_STATUS_SOF |
669 			  FDMA_DCB_STATUS_EOF |
670 			  FDMA_DCB_STATUS_INTR |
671 			  FDMA_DCB_STATUS_BLOCKO(0) |
672 			  FDMA_DCB_STATUS_BLOCKL(skb->len);
673 
674 	/* Fill up the buffer */
675 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
676 	next_dcb_buf->skb = skb;
677 	next_dcb_buf->dma_addr = dma_addr;
678 	next_dcb_buf->used = true;
679 	next_dcb_buf->ptp = false;
680 	next_dcb_buf->dev = dev;
681 
682 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
683 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
684 		next_dcb_buf->ptp = true;
685 
686 	if (likely(lan966x->tx.activated)) {
687 		/* Connect current dcb to the next db */
688 		dcb = &tx->dcbs[tx->last_in_use];
689 		dcb->nextptr = tx->dma + (next_to_use *
690 					  sizeof(struct lan966x_tx_dcb));
691 
692 		lan966x_fdma_tx_reload(tx);
693 	} else {
694 		/* Because it is first time, then just activate */
695 		lan966x->tx.activated = true;
696 		lan966x_fdma_tx_activate(tx);
697 	}
698 
699 	/* Move to next dcb because this last in use */
700 	tx->last_in_use = next_to_use;
701 
702 	return NETDEV_TX_OK;
703 
704 release:
705 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
706 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
707 		lan966x_ptp_txtstamp_release(port, skb);
708 
709 	dev_kfree_skb_any(skb);
710 	return err;
711 }
712 
713 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
714 {
715 	int max_mtu = 0;
716 	int i;
717 
718 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
719 		struct lan966x_port *port;
720 		int mtu;
721 
722 		port = lan966x->ports[i];
723 		if (!port)
724 			continue;
725 
726 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
727 		if (mtu > max_mtu)
728 			max_mtu = mtu;
729 	}
730 
731 	return max_mtu;
732 }
733 
734 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
735 {
736 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
737 }
738 
739 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
740 {
741 	struct page_pool *page_pool;
742 	dma_addr_t rx_dma;
743 	void *rx_dcbs;
744 	u32 size;
745 	int err;
746 
747 	/* Store these for later to free them */
748 	rx_dma = lan966x->rx.dma;
749 	rx_dcbs = lan966x->rx.dcbs;
750 	page_pool = lan966x->rx.page_pool;
751 
752 	napi_synchronize(&lan966x->napi);
753 	napi_disable(&lan966x->napi);
754 	lan966x_fdma_stop_netdev(lan966x);
755 
756 	lan966x_fdma_rx_disable(&lan966x->rx);
757 	lan966x_fdma_rx_free_pages(&lan966x->rx);
758 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
759 	lan966x->rx.max_mtu = new_mtu;
760 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
761 	if (err)
762 		goto restore;
763 	lan966x_fdma_rx_start(&lan966x->rx);
764 
765 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
766 	size = ALIGN(size, PAGE_SIZE);
767 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
768 
769 	page_pool_destroy(page_pool);
770 
771 	lan966x_fdma_wakeup_netdev(lan966x);
772 	napi_enable(&lan966x->napi);
773 
774 	return err;
775 restore:
776 	lan966x->rx.page_pool = page_pool;
777 	lan966x->rx.dma = rx_dma;
778 	lan966x->rx.dcbs = rx_dcbs;
779 	lan966x_fdma_rx_start(&lan966x->rx);
780 
781 	return err;
782 }
783 
784 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
785 {
786 	return lan966x_fdma_get_max_mtu(lan966x) +
787 	       IFH_LEN_BYTES +
788 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
789 	       VLAN_HLEN * 2;
790 }
791 
792 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
793 {
794 	int max_mtu;
795 	int err;
796 	u32 val;
797 
798 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
799 	if (max_mtu == lan966x->rx.max_mtu)
800 		return 0;
801 
802 	/* Disable the CPU port */
803 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
804 		QSYS_SW_PORT_MODE_PORT_ENA,
805 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
806 
807 	/* Flush the CPU queues */
808 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
809 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
810 			   READL_SLEEP_US, READL_TIMEOUT_US);
811 
812 	/* Add a sleep in case there are frames between the queues and the CPU
813 	 * port
814 	 */
815 	usleep_range(1000, 2000);
816 
817 	err = lan966x_fdma_reload(lan966x, max_mtu);
818 
819 	/* Enable back the CPU port */
820 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
821 		QSYS_SW_PORT_MODE_PORT_ENA,
822 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
823 
824 	return err;
825 }
826 
827 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
828 {
829 	if (lan966x->fdma_ndev)
830 		return;
831 
832 	lan966x->fdma_ndev = dev;
833 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
834 	napi_enable(&lan966x->napi);
835 }
836 
837 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
838 {
839 	if (lan966x->fdma_ndev == dev) {
840 		netif_napi_del(&lan966x->napi);
841 		lan966x->fdma_ndev = NULL;
842 	}
843 }
844 
845 int lan966x_fdma_init(struct lan966x *lan966x)
846 {
847 	int err;
848 
849 	if (!lan966x->fdma)
850 		return 0;
851 
852 	lan966x->rx.lan966x = lan966x;
853 	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
854 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
855 	lan966x->tx.lan966x = lan966x;
856 	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
857 	lan966x->tx.last_in_use = -1;
858 
859 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
860 	if (err)
861 		return err;
862 
863 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
864 	if (err) {
865 		lan966x_fdma_rx_free(&lan966x->rx);
866 		return err;
867 	}
868 
869 	lan966x_fdma_rx_start(&lan966x->rx);
870 
871 	return 0;
872 }
873 
874 void lan966x_fdma_deinit(struct lan966x *lan966x)
875 {
876 	if (!lan966x->fdma)
877 		return;
878 
879 	lan966x_fdma_rx_disable(&lan966x->rx);
880 	lan966x_fdma_tx_disable(&lan966x->tx);
881 
882 	napi_synchronize(&lan966x->napi);
883 	napi_disable(&lan966x->napi);
884 
885 	lan966x_fdma_rx_free_pages(&lan966x->rx);
886 	lan966x_fdma_rx_free(&lan966x->rx);
887 	page_pool_destroy(lan966x->rx.page_pool);
888 	lan966x_fdma_tx_free(&lan966x->tx);
889 }
890