1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5 #include <net/page_pool/helpers.h>
6 
7 #include "lan966x_main.h"
8 
9 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
10 {
11 	return lan_rd(lan966x, FDMA_CH_ACTIVE);
12 }
13 
14 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
15 					       struct lan966x_db *db)
16 {
17 	struct page *page;
18 
19 	page = page_pool_dev_alloc_pages(rx->page_pool);
20 	if (unlikely(!page))
21 		return NULL;
22 
23 	db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
24 
25 	return page;
26 }
27 
28 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
29 {
30 	int i, j;
31 
32 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
33 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
34 			page_pool_put_full_page(rx->page_pool,
35 						rx->page[i][j], false);
36 	}
37 }
38 
39 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
40 {
41 	struct page *page;
42 
43 	page = rx->page[rx->dcb_index][rx->db_index];
44 	if (unlikely(!page))
45 		return;
46 
47 	page_pool_recycle_direct(rx->page_pool, page);
48 }
49 
50 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
51 				    struct lan966x_rx_dcb *dcb,
52 				    u64 nextptr)
53 {
54 	struct lan966x_db *db;
55 	int i;
56 
57 	for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
58 		db = &dcb->db[i];
59 		db->status = FDMA_DCB_STATUS_INTR;
60 	}
61 
62 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
63 	dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
64 
65 	rx->last_entry->nextptr = nextptr;
66 	rx->last_entry = dcb;
67 }
68 
69 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
70 {
71 	struct lan966x *lan966x = rx->lan966x;
72 	struct page_pool_params pp_params = {
73 		.order = rx->page_order,
74 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
75 		.pool_size = FDMA_DCB_MAX,
76 		.nid = NUMA_NO_NODE,
77 		.dev = lan966x->dev,
78 		.dma_dir = DMA_FROM_DEVICE,
79 		.offset = XDP_PACKET_HEADROOM,
80 		.max_len = rx->max_mtu -
81 			   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
82 	};
83 
84 	if (lan966x_xdp_present(lan966x))
85 		pp_params.dma_dir = DMA_BIDIRECTIONAL;
86 
87 	rx->page_pool = page_pool_create(&pp_params);
88 
89 	for (int i = 0; i < lan966x->num_phys_ports; ++i) {
90 		struct lan966x_port *port;
91 
92 		if (!lan966x->ports[i])
93 			continue;
94 
95 		port = lan966x->ports[i];
96 		xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
97 		xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
98 					   rx->page_pool);
99 	}
100 
101 	return PTR_ERR_OR_ZERO(rx->page_pool);
102 }
103 
104 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
105 {
106 	struct lan966x *lan966x = rx->lan966x;
107 	struct lan966x_rx_dcb *dcb;
108 	struct lan966x_db *db;
109 	struct page *page;
110 	int i, j;
111 	int size;
112 
113 	if (lan966x_fdma_rx_alloc_page_pool(rx))
114 		return PTR_ERR(rx->page_pool);
115 
116 	/* calculate how many pages are needed to allocate the dcbs */
117 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
118 	size = ALIGN(size, PAGE_SIZE);
119 
120 	rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
121 	if (!rx->dcbs)
122 		return -ENOMEM;
123 
124 	rx->last_entry = rx->dcbs;
125 	rx->db_index = 0;
126 	rx->dcb_index = 0;
127 
128 	/* Now for each dcb allocate the dbs */
129 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
130 		dcb = &rx->dcbs[i];
131 		dcb->info = 0;
132 
133 		/* For each db allocate a page and map it to the DB dataptr. */
134 		for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
135 			db = &dcb->db[j];
136 			page = lan966x_fdma_rx_alloc_page(rx, db);
137 			if (!page)
138 				return -ENOMEM;
139 
140 			db->status = 0;
141 			rx->page[i][j] = page;
142 		}
143 
144 		lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
145 	}
146 
147 	return 0;
148 }
149 
150 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
151 {
152 	rx->dcb_index++;
153 	rx->dcb_index &= FDMA_DCB_MAX - 1;
154 }
155 
156 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
157 {
158 	struct lan966x *lan966x = rx->lan966x;
159 	u32 size;
160 
161 	/* Now it is possible to do the cleanup of dcb */
162 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
163 	size = ALIGN(size, PAGE_SIZE);
164 	dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
165 }
166 
167 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
168 {
169 	struct lan966x *lan966x = rx->lan966x;
170 	u32 mask;
171 
172 	/* When activating a channel, first is required to write the first DCB
173 	 * address and then to activate it
174 	 */
175 	lan_wr(lower_32_bits((u64)rx->dma), lan966x,
176 	       FDMA_DCB_LLP(rx->channel_id));
177 	lan_wr(upper_32_bits((u64)rx->dma), lan966x,
178 	       FDMA_DCB_LLP1(rx->channel_id));
179 
180 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
181 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
182 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
183 	       FDMA_CH_CFG_CH_MEM_SET(1),
184 	       lan966x, FDMA_CH_CFG(rx->channel_id));
185 
186 	/* Start fdma */
187 	lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
188 		FDMA_PORT_CTRL_XTR_STOP,
189 		lan966x, FDMA_PORT_CTRL(0));
190 
191 	/* Enable interrupts */
192 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
193 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
194 	mask |= BIT(rx->channel_id);
195 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
196 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
197 		lan966x, FDMA_INTR_DB_ENA);
198 
199 	/* Activate the channel */
200 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
201 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
202 		lan966x, FDMA_CH_ACTIVATE);
203 }
204 
205 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
206 {
207 	struct lan966x *lan966x = rx->lan966x;
208 	u32 val;
209 
210 	/* Disable the channel */
211 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
212 		FDMA_CH_DISABLE_CH_DISABLE,
213 		lan966x, FDMA_CH_DISABLE);
214 
215 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
216 				  val, !(val & BIT(rx->channel_id)),
217 				  READL_SLEEP_US, READL_TIMEOUT_US);
218 
219 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
220 		FDMA_CH_DB_DISCARD_DB_DISCARD,
221 		lan966x, FDMA_CH_DB_DISCARD);
222 }
223 
224 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
225 {
226 	struct lan966x *lan966x = rx->lan966x;
227 
228 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
229 		FDMA_CH_RELOAD_CH_RELOAD,
230 		lan966x, FDMA_CH_RELOAD);
231 }
232 
233 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
234 				    struct lan966x_tx_dcb *dcb)
235 {
236 	dcb->nextptr = FDMA_DCB_INVALID_DATA;
237 	dcb->info = 0;
238 }
239 
240 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
241 {
242 	struct lan966x *lan966x = tx->lan966x;
243 	struct lan966x_tx_dcb *dcb;
244 	struct lan966x_db *db;
245 	int size;
246 	int i, j;
247 
248 	tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
249 			       GFP_KERNEL);
250 	if (!tx->dcbs_buf)
251 		return -ENOMEM;
252 
253 	/* calculate how many pages are needed to allocate the dcbs */
254 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
255 	size = ALIGN(size, PAGE_SIZE);
256 	tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
257 	if (!tx->dcbs)
258 		goto out;
259 
260 	/* Now for each dcb allocate the db */
261 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
262 		dcb = &tx->dcbs[i];
263 
264 		for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
265 			db = &dcb->db[j];
266 			db->dataptr = 0;
267 			db->status = 0;
268 		}
269 
270 		lan966x_fdma_tx_add_dcb(tx, dcb);
271 	}
272 
273 	return 0;
274 
275 out:
276 	kfree(tx->dcbs_buf);
277 	return -ENOMEM;
278 }
279 
280 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
281 {
282 	struct lan966x *lan966x = tx->lan966x;
283 	int size;
284 
285 	kfree(tx->dcbs_buf);
286 
287 	size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
288 	size = ALIGN(size, PAGE_SIZE);
289 	dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
290 }
291 
292 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
293 {
294 	struct lan966x *lan966x = tx->lan966x;
295 	u32 mask;
296 
297 	/* When activating a channel, first is required to write the first DCB
298 	 * address and then to activate it
299 	 */
300 	lan_wr(lower_32_bits((u64)tx->dma), lan966x,
301 	       FDMA_DCB_LLP(tx->channel_id));
302 	lan_wr(upper_32_bits((u64)tx->dma), lan966x,
303 	       FDMA_DCB_LLP1(tx->channel_id));
304 
305 	lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
306 	       FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
307 	       FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
308 	       FDMA_CH_CFG_CH_MEM_SET(1),
309 	       lan966x, FDMA_CH_CFG(tx->channel_id));
310 
311 	/* Start fdma */
312 	lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
313 		FDMA_PORT_CTRL_INJ_STOP,
314 		lan966x, FDMA_PORT_CTRL(0));
315 
316 	/* Enable interrupts */
317 	mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
318 	mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
319 	mask |= BIT(tx->channel_id);
320 	lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
321 		FDMA_INTR_DB_ENA_INTR_DB_ENA,
322 		lan966x, FDMA_INTR_DB_ENA);
323 
324 	/* Activate the channel */
325 	lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
326 		FDMA_CH_ACTIVATE_CH_ACTIVATE,
327 		lan966x, FDMA_CH_ACTIVATE);
328 }
329 
330 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
331 {
332 	struct lan966x *lan966x = tx->lan966x;
333 	u32 val;
334 
335 	/* Disable the channel */
336 	lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
337 		FDMA_CH_DISABLE_CH_DISABLE,
338 		lan966x, FDMA_CH_DISABLE);
339 
340 	readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
341 				  val, !(val & BIT(tx->channel_id)),
342 				  READL_SLEEP_US, READL_TIMEOUT_US);
343 
344 	lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
345 		FDMA_CH_DB_DISCARD_DB_DISCARD,
346 		lan966x, FDMA_CH_DB_DISCARD);
347 
348 	tx->activated = false;
349 	tx->last_in_use = -1;
350 }
351 
352 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
353 {
354 	struct lan966x *lan966x = tx->lan966x;
355 
356 	/* Write the registers to reload the channel */
357 	lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
358 		FDMA_CH_RELOAD_CH_RELOAD,
359 		lan966x, FDMA_CH_RELOAD);
360 }
361 
362 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
363 {
364 	struct lan966x_port *port;
365 	int i;
366 
367 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
368 		port = lan966x->ports[i];
369 		if (!port)
370 			continue;
371 
372 		if (netif_queue_stopped(port->dev))
373 			netif_wake_queue(port->dev);
374 	}
375 }
376 
377 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
378 {
379 	struct lan966x_port *port;
380 	int i;
381 
382 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
383 		port = lan966x->ports[i];
384 		if (!port)
385 			continue;
386 
387 		netif_stop_queue(port->dev);
388 	}
389 }
390 
391 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
392 {
393 	struct lan966x_tx *tx = &lan966x->tx;
394 	struct lan966x_rx *rx = &lan966x->rx;
395 	struct lan966x_tx_dcb_buf *dcb_buf;
396 	struct xdp_frame_bulk bq;
397 	struct lan966x_db *db;
398 	unsigned long flags;
399 	bool clear = false;
400 	int i;
401 
402 	xdp_frame_bulk_init(&bq);
403 
404 	spin_lock_irqsave(&lan966x->tx_lock, flags);
405 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
406 		dcb_buf = &tx->dcbs_buf[i];
407 
408 		if (!dcb_buf->used)
409 			continue;
410 
411 		db = &tx->dcbs[i].db[0];
412 		if (!(db->status & FDMA_DCB_STATUS_DONE))
413 			continue;
414 
415 		dcb_buf->dev->stats.tx_packets++;
416 		dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
417 
418 		dcb_buf->used = false;
419 		if (dcb_buf->use_skb) {
420 			dma_unmap_single(lan966x->dev,
421 					 dcb_buf->dma_addr,
422 					 dcb_buf->len,
423 					 DMA_TO_DEVICE);
424 
425 			if (!dcb_buf->ptp)
426 				napi_consume_skb(dcb_buf->data.skb, weight);
427 		} else {
428 			if (dcb_buf->xdp_ndo)
429 				dma_unmap_single(lan966x->dev,
430 						 dcb_buf->dma_addr,
431 						 dcb_buf->len,
432 						 DMA_TO_DEVICE);
433 
434 			if (dcb_buf->xdp_ndo)
435 				xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
436 			else
437 				page_pool_recycle_direct(rx->page_pool,
438 							 dcb_buf->data.page);
439 		}
440 
441 		clear = true;
442 	}
443 
444 	xdp_flush_frame_bulk(&bq);
445 
446 	if (clear)
447 		lan966x_fdma_wakeup_netdev(lan966x);
448 
449 	spin_unlock_irqrestore(&lan966x->tx_lock, flags);
450 }
451 
452 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
453 {
454 	struct lan966x_db *db;
455 
456 	/* Check if there is any data */
457 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
458 	if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
459 		return false;
460 
461 	return true;
462 }
463 
464 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
465 {
466 	struct lan966x *lan966x = rx->lan966x;
467 	struct lan966x_port *port;
468 	struct lan966x_db *db;
469 	struct page *page;
470 
471 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
472 	page = rx->page[rx->dcb_index][rx->db_index];
473 	if (unlikely(!page))
474 		return FDMA_ERROR;
475 
476 	dma_sync_single_for_cpu(lan966x->dev,
477 				(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
478 				FDMA_DCB_STATUS_BLOCKL(db->status),
479 				DMA_FROM_DEVICE);
480 
481 	lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
482 				 src_port);
483 	if (WARN_ON(*src_port >= lan966x->num_phys_ports))
484 		return FDMA_ERROR;
485 
486 	port = lan966x->ports[*src_port];
487 	if (!lan966x_xdp_port_present(port))
488 		return FDMA_PASS;
489 
490 	return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
491 }
492 
493 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
494 						 u64 src_port)
495 {
496 	struct lan966x *lan966x = rx->lan966x;
497 	struct lan966x_db *db;
498 	struct sk_buff *skb;
499 	struct page *page;
500 	u64 timestamp;
501 
502 	/* Get the received frame and unmap it */
503 	db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
504 	page = rx->page[rx->dcb_index][rx->db_index];
505 
506 	skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
507 	if (unlikely(!skb))
508 		goto free_page;
509 
510 	skb_mark_for_recycle(skb);
511 
512 	skb_reserve(skb, XDP_PACKET_HEADROOM);
513 	skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
514 
515 	lan966x_ifh_get_timestamp(skb->data, &timestamp);
516 
517 	skb->dev = lan966x->ports[src_port]->dev;
518 	skb_pull(skb, IFH_LEN_BYTES);
519 
520 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
521 		skb_trim(skb, skb->len - ETH_FCS_LEN);
522 
523 	lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
524 	skb->protocol = eth_type_trans(skb, skb->dev);
525 
526 	if (lan966x->bridge_mask & BIT(src_port)) {
527 		skb->offload_fwd_mark = 1;
528 
529 		skb_reset_network_header(skb);
530 		if (!lan966x_hw_offload(lan966x, src_port, skb))
531 			skb->offload_fwd_mark = 0;
532 	}
533 
534 	skb->dev->stats.rx_bytes += skb->len;
535 	skb->dev->stats.rx_packets++;
536 
537 	return skb;
538 
539 free_page:
540 	page_pool_recycle_direct(rx->page_pool, page);
541 
542 	return NULL;
543 }
544 
545 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
546 {
547 	struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
548 	struct lan966x_rx *rx = &lan966x->rx;
549 	int dcb_reload = rx->dcb_index;
550 	struct lan966x_rx_dcb *old_dcb;
551 	struct lan966x_db *db;
552 	bool redirect = false;
553 	struct sk_buff *skb;
554 	struct page *page;
555 	int counter = 0;
556 	u64 src_port;
557 	u64 nextptr;
558 
559 	lan966x_fdma_tx_clear_buf(lan966x, weight);
560 
561 	/* Get all received skb */
562 	while (counter < weight) {
563 		if (!lan966x_fdma_rx_more_frames(rx))
564 			break;
565 
566 		counter++;
567 
568 		switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
569 		case FDMA_PASS:
570 			break;
571 		case FDMA_ERROR:
572 			lan966x_fdma_rx_free_page(rx);
573 			lan966x_fdma_rx_advance_dcb(rx);
574 			goto allocate_new;
575 		case FDMA_REDIRECT:
576 			redirect = true;
577 			fallthrough;
578 		case FDMA_TX:
579 			lan966x_fdma_rx_advance_dcb(rx);
580 			continue;
581 		case FDMA_DROP:
582 			lan966x_fdma_rx_free_page(rx);
583 			lan966x_fdma_rx_advance_dcb(rx);
584 			continue;
585 		}
586 
587 		skb = lan966x_fdma_rx_get_frame(rx, src_port);
588 		lan966x_fdma_rx_advance_dcb(rx);
589 		if (!skb)
590 			goto allocate_new;
591 
592 		napi_gro_receive(&lan966x->napi, skb);
593 	}
594 
595 allocate_new:
596 	/* Allocate new pages and map them */
597 	while (dcb_reload != rx->dcb_index) {
598 		db = &rx->dcbs[dcb_reload].db[rx->db_index];
599 		page = lan966x_fdma_rx_alloc_page(rx, db);
600 		if (unlikely(!page))
601 			break;
602 		rx->page[dcb_reload][rx->db_index] = page;
603 
604 		old_dcb = &rx->dcbs[dcb_reload];
605 		dcb_reload++;
606 		dcb_reload &= FDMA_DCB_MAX - 1;
607 
608 		nextptr = rx->dma + ((unsigned long)old_dcb -
609 				     (unsigned long)rx->dcbs);
610 		lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
611 		lan966x_fdma_rx_reload(rx);
612 	}
613 
614 	if (redirect)
615 		xdp_do_flush();
616 
617 	if (counter < weight && napi_complete_done(napi, counter))
618 		lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
619 
620 	return counter;
621 }
622 
623 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
624 {
625 	struct lan966x *lan966x = args;
626 	u32 db, err, err_type;
627 
628 	db = lan_rd(lan966x, FDMA_INTR_DB);
629 	err = lan_rd(lan966x, FDMA_INTR_ERR);
630 
631 	if (db) {
632 		lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
633 		lan_wr(db, lan966x, FDMA_INTR_DB);
634 
635 		napi_schedule(&lan966x->napi);
636 	}
637 
638 	if (err) {
639 		err_type = lan_rd(lan966x, FDMA_ERRORS);
640 
641 		WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
642 
643 		lan_wr(err, lan966x, FDMA_INTR_ERR);
644 		lan_wr(err_type, lan966x, FDMA_ERRORS);
645 	}
646 
647 	return IRQ_HANDLED;
648 }
649 
650 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
651 {
652 	struct lan966x_tx_dcb_buf *dcb_buf;
653 	int i;
654 
655 	for (i = 0; i < FDMA_DCB_MAX; ++i) {
656 		dcb_buf = &tx->dcbs_buf[i];
657 		if (!dcb_buf->used && i != tx->last_in_use)
658 			return i;
659 	}
660 
661 	return -1;
662 }
663 
664 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
665 				      int next_to_use, int len,
666 				      dma_addr_t dma_addr)
667 {
668 	struct lan966x_tx_dcb *next_dcb;
669 	struct lan966x_db *next_db;
670 
671 	next_dcb = &tx->dcbs[next_to_use];
672 	next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
673 
674 	next_db = &next_dcb->db[0];
675 	next_db->dataptr = dma_addr;
676 	next_db->status = FDMA_DCB_STATUS_SOF |
677 			  FDMA_DCB_STATUS_EOF |
678 			  FDMA_DCB_STATUS_INTR |
679 			  FDMA_DCB_STATUS_BLOCKO(0) |
680 			  FDMA_DCB_STATUS_BLOCKL(len);
681 }
682 
683 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
684 {
685 	struct lan966x *lan966x = tx->lan966x;
686 	struct lan966x_tx_dcb *dcb;
687 
688 	if (likely(lan966x->tx.activated)) {
689 		/* Connect current dcb to the next db */
690 		dcb = &tx->dcbs[tx->last_in_use];
691 		dcb->nextptr = tx->dma + (next_to_use *
692 					  sizeof(struct lan966x_tx_dcb));
693 
694 		lan966x_fdma_tx_reload(tx);
695 	} else {
696 		/* Because it is first time, then just activate */
697 		lan966x->tx.activated = true;
698 		lan966x_fdma_tx_activate(tx);
699 	}
700 
701 	/* Move to next dcb because this last in use */
702 	tx->last_in_use = next_to_use;
703 }
704 
705 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len)
706 {
707 	struct lan966x *lan966x = port->lan966x;
708 	struct lan966x_tx_dcb_buf *next_dcb_buf;
709 	struct lan966x_tx *tx = &lan966x->tx;
710 	struct xdp_frame *xdpf;
711 	dma_addr_t dma_addr;
712 	struct page *page;
713 	int next_to_use;
714 	__be32 *ifh;
715 	int ret = 0;
716 
717 	spin_lock(&lan966x->tx_lock);
718 
719 	/* Get next index */
720 	next_to_use = lan966x_fdma_get_next_dcb(tx);
721 	if (next_to_use < 0) {
722 		netif_stop_queue(port->dev);
723 		ret = NETDEV_TX_BUSY;
724 		goto out;
725 	}
726 
727 	/* Get the next buffer */
728 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
729 
730 	/* Generate new IFH */
731 	if (!len) {
732 		xdpf = ptr;
733 
734 		if (xdpf->headroom < IFH_LEN_BYTES) {
735 			ret = NETDEV_TX_OK;
736 			goto out;
737 		}
738 
739 		ifh = xdpf->data - IFH_LEN_BYTES;
740 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
741 		lan966x_ifh_set_bypass(ifh, 1);
742 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
743 
744 		dma_addr = dma_map_single(lan966x->dev,
745 					  xdpf->data - IFH_LEN_BYTES,
746 					  xdpf->len + IFH_LEN_BYTES,
747 					  DMA_TO_DEVICE);
748 		if (dma_mapping_error(lan966x->dev, dma_addr)) {
749 			ret = NETDEV_TX_OK;
750 			goto out;
751 		}
752 
753 		next_dcb_buf->data.xdpf = xdpf;
754 		next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
755 
756 		/* Setup next dcb */
757 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
758 					  xdpf->len + IFH_LEN_BYTES,
759 					  dma_addr);
760 	} else {
761 		page = ptr;
762 
763 		ifh = page_address(page) + XDP_PACKET_HEADROOM;
764 		memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
765 		lan966x_ifh_set_bypass(ifh, 1);
766 		lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
767 
768 		dma_addr = page_pool_get_dma_addr(page);
769 		dma_sync_single_for_device(lan966x->dev,
770 					   dma_addr + XDP_PACKET_HEADROOM,
771 					   len + IFH_LEN_BYTES,
772 					   DMA_TO_DEVICE);
773 
774 		next_dcb_buf->data.page = page;
775 		next_dcb_buf->len = len + IFH_LEN_BYTES;
776 
777 		/* Setup next dcb */
778 		lan966x_fdma_tx_setup_dcb(tx, next_to_use,
779 					  len + IFH_LEN_BYTES,
780 					  dma_addr + XDP_PACKET_HEADROOM);
781 	}
782 
783 	/* Fill up the buffer */
784 	next_dcb_buf->use_skb = false;
785 	next_dcb_buf->xdp_ndo = !len;
786 	next_dcb_buf->dma_addr = dma_addr;
787 	next_dcb_buf->used = true;
788 	next_dcb_buf->ptp = false;
789 	next_dcb_buf->dev = port->dev;
790 
791 	/* Start the transmission */
792 	lan966x_fdma_tx_start(tx, next_to_use);
793 
794 out:
795 	spin_unlock(&lan966x->tx_lock);
796 
797 	return ret;
798 }
799 
800 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
801 {
802 	struct lan966x_port *port = netdev_priv(dev);
803 	struct lan966x *lan966x = port->lan966x;
804 	struct lan966x_tx_dcb_buf *next_dcb_buf;
805 	struct lan966x_tx *tx = &lan966x->tx;
806 	int needed_headroom;
807 	int needed_tailroom;
808 	dma_addr_t dma_addr;
809 	int next_to_use;
810 	int err;
811 
812 	/* Get next index */
813 	next_to_use = lan966x_fdma_get_next_dcb(tx);
814 	if (next_to_use < 0) {
815 		netif_stop_queue(dev);
816 		return NETDEV_TX_BUSY;
817 	}
818 
819 	if (skb_put_padto(skb, ETH_ZLEN)) {
820 		dev->stats.tx_dropped++;
821 		return NETDEV_TX_OK;
822 	}
823 
824 	/* skb processing */
825 	needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
826 	needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
827 	if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
828 		err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
829 				       GFP_ATOMIC);
830 		if (unlikely(err)) {
831 			dev->stats.tx_dropped++;
832 			err = NETDEV_TX_OK;
833 			goto release;
834 		}
835 	}
836 
837 	skb_tx_timestamp(skb);
838 	skb_push(skb, IFH_LEN_BYTES);
839 	memcpy(skb->data, ifh, IFH_LEN_BYTES);
840 	skb_put(skb, 4);
841 
842 	dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
843 				  DMA_TO_DEVICE);
844 	if (dma_mapping_error(lan966x->dev, dma_addr)) {
845 		dev->stats.tx_dropped++;
846 		err = NETDEV_TX_OK;
847 		goto release;
848 	}
849 
850 	/* Setup next dcb */
851 	lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
852 
853 	/* Fill up the buffer */
854 	next_dcb_buf = &tx->dcbs_buf[next_to_use];
855 	next_dcb_buf->use_skb = true;
856 	next_dcb_buf->data.skb = skb;
857 	next_dcb_buf->xdp_ndo = false;
858 	next_dcb_buf->len = skb->len;
859 	next_dcb_buf->dma_addr = dma_addr;
860 	next_dcb_buf->used = true;
861 	next_dcb_buf->ptp = false;
862 	next_dcb_buf->dev = dev;
863 
864 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
865 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
866 		next_dcb_buf->ptp = true;
867 
868 	/* Start the transmission */
869 	lan966x_fdma_tx_start(tx, next_to_use);
870 
871 	return NETDEV_TX_OK;
872 
873 release:
874 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
875 	    LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
876 		lan966x_ptp_txtstamp_release(port, skb);
877 
878 	dev_kfree_skb_any(skb);
879 	return err;
880 }
881 
882 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
883 {
884 	int max_mtu = 0;
885 	int i;
886 
887 	for (i = 0; i < lan966x->num_phys_ports; ++i) {
888 		struct lan966x_port *port;
889 		int mtu;
890 
891 		port = lan966x->ports[i];
892 		if (!port)
893 			continue;
894 
895 		mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
896 		if (mtu > max_mtu)
897 			max_mtu = mtu;
898 	}
899 
900 	return max_mtu;
901 }
902 
903 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
904 {
905 	return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
906 }
907 
908 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
909 {
910 	struct page_pool *page_pool;
911 	dma_addr_t rx_dma;
912 	void *rx_dcbs;
913 	u32 size;
914 	int err;
915 
916 	/* Store these for later to free them */
917 	rx_dma = lan966x->rx.dma;
918 	rx_dcbs = lan966x->rx.dcbs;
919 	page_pool = lan966x->rx.page_pool;
920 
921 	napi_synchronize(&lan966x->napi);
922 	napi_disable(&lan966x->napi);
923 	lan966x_fdma_stop_netdev(lan966x);
924 
925 	lan966x_fdma_rx_disable(&lan966x->rx);
926 	lan966x_fdma_rx_free_pages(&lan966x->rx);
927 	lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
928 	lan966x->rx.max_mtu = new_mtu;
929 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
930 	if (err)
931 		goto restore;
932 	lan966x_fdma_rx_start(&lan966x->rx);
933 
934 	size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
935 	size = ALIGN(size, PAGE_SIZE);
936 	dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
937 
938 	page_pool_destroy(page_pool);
939 
940 	lan966x_fdma_wakeup_netdev(lan966x);
941 	napi_enable(&lan966x->napi);
942 
943 	return err;
944 restore:
945 	lan966x->rx.page_pool = page_pool;
946 	lan966x->rx.dma = rx_dma;
947 	lan966x->rx.dcbs = rx_dcbs;
948 	lan966x_fdma_rx_start(&lan966x->rx);
949 
950 	return err;
951 }
952 
953 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
954 {
955 	return lan966x_fdma_get_max_mtu(lan966x) +
956 	       IFH_LEN_BYTES +
957 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
958 	       VLAN_HLEN * 2 +
959 	       XDP_PACKET_HEADROOM;
960 }
961 
962 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
963 {
964 	int err;
965 	u32 val;
966 
967 	/* Disable the CPU port */
968 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
969 		QSYS_SW_PORT_MODE_PORT_ENA,
970 		lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
971 
972 	/* Flush the CPU queues */
973 	readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
974 			   val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
975 			   READL_SLEEP_US, READL_TIMEOUT_US);
976 
977 	/* Add a sleep in case there are frames between the queues and the CPU
978 	 * port
979 	 */
980 	usleep_range(1000, 2000);
981 
982 	err = lan966x_fdma_reload(lan966x, max_mtu);
983 
984 	/* Enable back the CPU port */
985 	lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
986 		QSYS_SW_PORT_MODE_PORT_ENA,
987 		lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
988 
989 	return err;
990 }
991 
992 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
993 {
994 	int max_mtu;
995 
996 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
997 	if (max_mtu == lan966x->rx.max_mtu)
998 		return 0;
999 
1000 	return __lan966x_fdma_reload(lan966x, max_mtu);
1001 }
1002 
1003 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
1004 {
1005 	int max_mtu;
1006 
1007 	max_mtu = lan966x_fdma_get_max_frame(lan966x);
1008 	return __lan966x_fdma_reload(lan966x, max_mtu);
1009 }
1010 
1011 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1012 {
1013 	if (lan966x->fdma_ndev)
1014 		return;
1015 
1016 	lan966x->fdma_ndev = dev;
1017 	netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1018 	napi_enable(&lan966x->napi);
1019 }
1020 
1021 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1022 {
1023 	if (lan966x->fdma_ndev == dev) {
1024 		netif_napi_del(&lan966x->napi);
1025 		lan966x->fdma_ndev = NULL;
1026 	}
1027 }
1028 
1029 int lan966x_fdma_init(struct lan966x *lan966x)
1030 {
1031 	int err;
1032 
1033 	if (!lan966x->fdma)
1034 		return 0;
1035 
1036 	lan966x->rx.lan966x = lan966x;
1037 	lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
1038 	lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1039 	lan966x->tx.lan966x = lan966x;
1040 	lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1041 	lan966x->tx.last_in_use = -1;
1042 
1043 	err = lan966x_fdma_rx_alloc(&lan966x->rx);
1044 	if (err)
1045 		return err;
1046 
1047 	err = lan966x_fdma_tx_alloc(&lan966x->tx);
1048 	if (err) {
1049 		lan966x_fdma_rx_free(&lan966x->rx);
1050 		return err;
1051 	}
1052 
1053 	lan966x_fdma_rx_start(&lan966x->rx);
1054 
1055 	return 0;
1056 }
1057 
1058 void lan966x_fdma_deinit(struct lan966x *lan966x)
1059 {
1060 	if (!lan966x->fdma)
1061 		return;
1062 
1063 	lan966x_fdma_rx_disable(&lan966x->rx);
1064 	lan966x_fdma_tx_disable(&lan966x->tx);
1065 
1066 	napi_synchronize(&lan966x->napi);
1067 	napi_disable(&lan966x->napi);
1068 
1069 	lan966x_fdma_rx_free_pages(&lan966x->rx);
1070 	lan966x_fdma_rx_free(&lan966x->rx);
1071 	page_pool_destroy(lan966x->rx.page_pool);
1072 	lan966x_fdma_tx_free(&lan966x->tx);
1073 }
1074