Lines Matching refs:desc

64 #define POOL_ALLOC_SIZE		(sizeof(struct desc) * (RX_DESCS + TX_DESCS))
193 struct desc *desc_tab; /* coherent */
214 struct desc { struct
252 (n) * sizeof(struct desc)) argument
256 ((n) + RX_DESCS) * sizeof(struct desc))
620 static inline void debug_desc(u32 phys, struct desc *desc) in debug_desc() argument
625 phys, desc->next, desc->buf_len, desc->pkt_len, in debug_desc()
626 desc->data, desc->dest_id, desc->src_id, desc->flags, in debug_desc()
627 desc->qos, desc->padlen, desc->vlan_tci, in debug_desc()
628 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, in debug_desc()
629 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, in debug_desc()
630 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, in debug_desc()
631 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); in debug_desc()
639 struct desc *tab; in queue_get_desc()
647 n_desc = (phys - tab_phys) / sizeof(struct desc); in queue_get_desc()
655 struct desc *desc) in queue_put_desc() argument
657 debug_desc(phys, desc); in queue_put_desc()
665 static inline void dma_unmap_tx(struct port *port, struct desc *desc) in dma_unmap_tx() argument
668 dma_unmap_single(&port->netdev->dev, desc->data, in dma_unmap_tx()
669 desc->buf_len, DMA_TO_DEVICE); in dma_unmap_tx()
671 dma_unmap_single(&port->netdev->dev, desc->data & ~3, in dma_unmap_tx()
672 ALIGN((desc->data & 3) + desc->buf_len, 4), in dma_unmap_tx()
703 struct desc *desc; in eth_poll() local
730 desc = rx_desc_ptr(port, n); in eth_poll()
743 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); in eth_poll()
749 desc->buf_len = MAX_MRU; in eth_poll()
750 desc->pkt_len = 0; in eth_poll()
751 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in eth_poll()
759 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
762 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, in eth_poll()
765 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); in eth_poll()
768 skb_put(skb, desc->pkt_len); in eth_poll()
781 desc->data = phys + NET_IP_ALIGN; in eth_poll()
783 desc->buf_len = MAX_MRU; in eth_poll()
784 desc->pkt_len = 0; in eth_poll()
785 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); in eth_poll()
806 struct desc *desc; in eth_txdone_irq() local
814 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); in eth_txdone_irq()
816 desc = tx_desc_ptr(port, n_desc); in eth_txdone_irq()
817 debug_desc(phys, desc); in eth_txdone_irq()
821 port->netdev->stats.tx_bytes += desc->pkt_len; in eth_txdone_irq()
823 dma_unmap_tx(port, desc); in eth_txdone_irq()
833 queue_put_desc(port->plat->txreadyq, phys, desc); in eth_txdone_irq()
851 struct desc *desc; in eth_xmit() local
893 desc = tx_desc_ptr(port, n); in eth_xmit()
900 desc->data = phys + offset; in eth_xmit()
901 desc->buf_len = desc->pkt_len = len; in eth_xmit()
905 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); in eth_xmit()
1126 struct desc *desc = rx_desc_ptr(port, i); in init_queues() local
1138 desc->buf_len = MAX_MRU; in init_queues()
1139 desc->data = dma_map_single(&port->netdev->dev, data, in init_queues()
1141 if (dma_mapping_error(&port->netdev->dev, desc->data)) { in init_queues()
1145 desc->data += NET_IP_ALIGN; in init_queues()
1158 struct desc *desc = rx_desc_ptr(port, i); in destroy_queues() local
1162 desc->data - NET_IP_ALIGN, in destroy_queues()
1168 struct desc *desc = tx_desc_ptr(port, i); in destroy_queues() local
1171 dma_unmap_tx(port, desc); in destroy_queues()
1320 struct desc *desc; in eth_close() local
1324 desc = tx_desc_ptr(port, n); in eth_close()
1326 desc->buf_len = desc->pkt_len = 1; in eth_close()
1328 queue_put_desc(TX_QUEUE(port->id), phys, desc); in eth_close()