Lines Matching refs:tx

265 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)  in tsnep_tx_ring_cleanup()  argument
267 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
270 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
273 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
274 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
275 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
276 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
277 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
282 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
284 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
291 tx->page[i] = in tsnep_tx_ring_create()
292 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
294 if (!tx->page[i]) { in tsnep_tx_ring_create()
299 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
301 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
304 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
309 entry = &tx->entry[i]; in tsnep_tx_ring_create()
310 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
317 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
321 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
325 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
326 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
327 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
328 tx->write = 0; in tsnep_tx_init()
329 tx->read = 0; in tsnep_tx_init()
330 tx->owner_counter = 1; in tsnep_tx_init()
331 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
334 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
338 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
345 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
350 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
357 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
362 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
368 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
371 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
404 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
405 tx->owner_counter++; in tsnep_tx_activate()
406 if (tx->owner_counter == 4) in tsnep_tx_activate()
407 tx->owner_counter = 1; in tsnep_tx_activate()
408 tx->increment_owner_counter--; in tsnep_tx_activate()
409 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
410 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
413 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
428 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
430 if (tx->read <= tx->write) in tsnep_tx_desc_available()
431 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
433 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
436 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
438 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
446 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
468 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
476 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
478 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
484 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
507 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
518 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
522 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
527 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
530 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
532 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
536 tx->dropped++; in tsnep_xmit_frame_ring()
546 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
548 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
555 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
557 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
559 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
565 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
568 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
581 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
607 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
622 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
636 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
639 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
642 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
644 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
647 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
654 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
656 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
664 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
666 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
671 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
689 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
700 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
705 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
708 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
709 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
714 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
720 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
724 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
726 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
727 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
730 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
732 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
733 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
744 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
746 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
754 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
758 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
767 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
771 if (tx->read == tx->write) in tsnep_tx_poll()
774 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
793 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
825 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
827 tx->packets++; in tsnep_tx_poll()
828 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
833 if (tx->xsk_pool) { in tsnep_tx_poll()
835 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
836 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
837 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
838 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
841 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
851 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
857 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
860 if (tx->read != tx->write) { in tsnep_tx_pending()
861 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
873 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
877 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
881 tsnep_tx_init(tx); in tsnep_tx_open()
886 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
888 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
1220 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1233 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1265 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1283 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1301 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1305 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1375 struct tsnep_tx *tx; in tsnep_rx_poll() local
1387 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1448 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1464 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1478 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1490 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1548 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1573 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1739 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1755 if (queue->tx) in tsnep_poll()
1756 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1802 if (queue->tx && queue->rx) in tsnep_request_irq()
1805 else if (queue->tx) in tsnep_request_irq()
1807 name, queue->tx->queue_index); in tsnep_request_irq()
1860 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1867 if (tx) in tsnep_queue_open()
1868 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1920 if (queue->tx) in tsnep_queue_enable()
1921 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1929 if (queue->tx) in tsnep_queue_disable()
1930 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
1948 if (adapter->queue[i].tx) { in tsnep_netdev_open()
1949 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
1991 if (adapter->queue[i].tx) in tsnep_netdev_open()
1992 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2012 if (adapter->queue[i].tx) in tsnep_netdev_close()
2013 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2048 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2069 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2091 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2129 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2130 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2131 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2247 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2256 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2263 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2264 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2269 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2281 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2437 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2438 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2439 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2440 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2468 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2469 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2470 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2471 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()