xref: /openbmc/linux/drivers/net/ethernet/ec_bhf.c (revision 3228150b)
19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26af55ff5SDarek Marcinkiewicz  /*
3a9b0b2faSDarek Marcinkiewicz  * drivers/net/ethernet/ec_bhf.c
46af55ff5SDarek Marcinkiewicz  *
56af55ff5SDarek Marcinkiewicz  * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
66af55ff5SDarek Marcinkiewicz  */
76af55ff5SDarek Marcinkiewicz 
86af55ff5SDarek Marcinkiewicz /* This is a driver for EtherCAT master module present on CCAT FPGA.
96af55ff5SDarek Marcinkiewicz  * Those can be found on Bechhoff CX50xx industrial PCs.
106af55ff5SDarek Marcinkiewicz  */
116af55ff5SDarek Marcinkiewicz 
126af55ff5SDarek Marcinkiewicz #include <linux/kernel.h>
136af55ff5SDarek Marcinkiewicz #include <linux/module.h>
146af55ff5SDarek Marcinkiewicz #include <linux/moduleparam.h>
156af55ff5SDarek Marcinkiewicz #include <linux/pci.h>
166af55ff5SDarek Marcinkiewicz #include <linux/init.h>
176af55ff5SDarek Marcinkiewicz 
186af55ff5SDarek Marcinkiewicz #include <linux/netdevice.h>
196af55ff5SDarek Marcinkiewicz #include <linux/etherdevice.h>
206af55ff5SDarek Marcinkiewicz #include <linux/ip.h>
216af55ff5SDarek Marcinkiewicz #include <linux/skbuff.h>
226af55ff5SDarek Marcinkiewicz #include <linux/hrtimer.h>
236af55ff5SDarek Marcinkiewicz #include <linux/interrupt.h>
246af55ff5SDarek Marcinkiewicz #include <linux/stat.h>
256af55ff5SDarek Marcinkiewicz 
266af55ff5SDarek Marcinkiewicz #define TIMER_INTERVAL_NSEC	20000
276af55ff5SDarek Marcinkiewicz 
286af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_SIZE		0x10
296af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_TYPE		0x0
306af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_REV		0x2
316af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_BLK_CNT	0x4
326af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_TX_CHAN	0x4
336af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_RX_CHAN	0x5
346af55ff5SDarek Marcinkiewicz #define INFO_BLOCK_OFFSET	0x8
356af55ff5SDarek Marcinkiewicz 
366af55ff5SDarek Marcinkiewicz #define EC_MII_OFFSET		0x4
376af55ff5SDarek Marcinkiewicz #define EC_FIFO_OFFSET		0x8
386af55ff5SDarek Marcinkiewicz #define EC_MAC_OFFSET		0xc
396af55ff5SDarek Marcinkiewicz 
406af55ff5SDarek Marcinkiewicz #define MAC_FRAME_ERR_CNT	0x0
416af55ff5SDarek Marcinkiewicz #define MAC_RX_ERR_CNT		0x1
426af55ff5SDarek Marcinkiewicz #define MAC_CRC_ERR_CNT		0x2
436af55ff5SDarek Marcinkiewicz #define MAC_LNK_LST_ERR_CNT	0x3
446af55ff5SDarek Marcinkiewicz #define MAC_TX_FRAME_CNT	0x10
456af55ff5SDarek Marcinkiewicz #define MAC_RX_FRAME_CNT	0x14
466af55ff5SDarek Marcinkiewicz #define MAC_TX_FIFO_LVL		0x20
476af55ff5SDarek Marcinkiewicz #define MAC_DROPPED_FRMS	0x28
486af55ff5SDarek Marcinkiewicz #define MAC_CONNECTED_CCAT_FLAG	0x78
496af55ff5SDarek Marcinkiewicz 
506af55ff5SDarek Marcinkiewicz #define MII_MAC_ADDR		0x8
516af55ff5SDarek Marcinkiewicz #define MII_MAC_FILT_FLAG	0xe
526af55ff5SDarek Marcinkiewicz #define MII_LINK_STATUS		0xf
536af55ff5SDarek Marcinkiewicz 
546af55ff5SDarek Marcinkiewicz #define FIFO_TX_REG		0x0
556af55ff5SDarek Marcinkiewicz #define FIFO_TX_RESET		0x8
566af55ff5SDarek Marcinkiewicz #define FIFO_RX_REG		0x10
576af55ff5SDarek Marcinkiewicz #define FIFO_RX_ADDR_VALID	(1u << 31)
586af55ff5SDarek Marcinkiewicz #define FIFO_RX_RESET		0x18
596af55ff5SDarek Marcinkiewicz 
606af55ff5SDarek Marcinkiewicz #define DMA_CHAN_OFFSET		0x1000
616af55ff5SDarek Marcinkiewicz #define DMA_CHAN_SIZE		0x8
626af55ff5SDarek Marcinkiewicz 
636af55ff5SDarek Marcinkiewicz #define DMA_WINDOW_SIZE_MASK	0xfffffffc
646af55ff5SDarek Marcinkiewicz 
65a9b0b2faSDarek Marcinkiewicz #define ETHERCAT_MASTER_ID	0x14
66a9b0b2faSDarek Marcinkiewicz 
677924a421SArvind Yadav static const struct pci_device_id ids[] = {
686af55ff5SDarek Marcinkiewicz 	{ PCI_DEVICE(0x15ec, 0x5000), },
696af55ff5SDarek Marcinkiewicz 	{ 0, }
706af55ff5SDarek Marcinkiewicz };
716af55ff5SDarek Marcinkiewicz MODULE_DEVICE_TABLE(pci, ids);
726af55ff5SDarek Marcinkiewicz 
736af55ff5SDarek Marcinkiewicz struct rx_header {
746af55ff5SDarek Marcinkiewicz #define RXHDR_NEXT_ADDR_MASK	0xffffffu
756af55ff5SDarek Marcinkiewicz #define RXHDR_NEXT_VALID	(1u << 31)
766af55ff5SDarek Marcinkiewicz 	__le32 next;
776af55ff5SDarek Marcinkiewicz #define RXHDR_NEXT_RECV_FLAG	0x1
786af55ff5SDarek Marcinkiewicz 	__le32 recv;
796af55ff5SDarek Marcinkiewicz #define RXHDR_LEN_MASK		0xfffu
806af55ff5SDarek Marcinkiewicz 	__le16 len;
816af55ff5SDarek Marcinkiewicz 	__le16 port;
826af55ff5SDarek Marcinkiewicz 	__le32 reserved;
836af55ff5SDarek Marcinkiewicz 	u8 timestamp[8];
846af55ff5SDarek Marcinkiewicz } __packed;
856af55ff5SDarek Marcinkiewicz 
866af55ff5SDarek Marcinkiewicz #define PKT_PAYLOAD_SIZE	0x7e8
876af55ff5SDarek Marcinkiewicz struct rx_desc {
886af55ff5SDarek Marcinkiewicz 	struct rx_header header;
896af55ff5SDarek Marcinkiewicz 	u8 data[PKT_PAYLOAD_SIZE];
906af55ff5SDarek Marcinkiewicz } __packed;
916af55ff5SDarek Marcinkiewicz 
926af55ff5SDarek Marcinkiewicz struct tx_header {
936af55ff5SDarek Marcinkiewicz 	__le16 len;
946af55ff5SDarek Marcinkiewicz #define TX_HDR_PORT_0		0x1
956af55ff5SDarek Marcinkiewicz #define TX_HDR_PORT_1		0x2
966af55ff5SDarek Marcinkiewicz 	u8 port;
976af55ff5SDarek Marcinkiewicz 	u8 ts_enable;
986af55ff5SDarek Marcinkiewicz #define TX_HDR_SENT		0x1
996af55ff5SDarek Marcinkiewicz 	__le32 sent;
1006af55ff5SDarek Marcinkiewicz 	u8 timestamp[8];
1016af55ff5SDarek Marcinkiewicz } __packed;
1026af55ff5SDarek Marcinkiewicz 
1036af55ff5SDarek Marcinkiewicz struct tx_desc {
1046af55ff5SDarek Marcinkiewicz 	struct tx_header header;
1056af55ff5SDarek Marcinkiewicz 	u8 data[PKT_PAYLOAD_SIZE];
1066af55ff5SDarek Marcinkiewicz } __packed;
1076af55ff5SDarek Marcinkiewicz 
1086af55ff5SDarek Marcinkiewicz #define FIFO_SIZE		64
1096af55ff5SDarek Marcinkiewicz 
1106af55ff5SDarek Marcinkiewicz static long polling_frequency = TIMER_INTERVAL_NSEC;
1116af55ff5SDarek Marcinkiewicz 
1126af55ff5SDarek Marcinkiewicz struct bhf_dma {
1136af55ff5SDarek Marcinkiewicz 	u8 *buf;
1146af55ff5SDarek Marcinkiewicz 	size_t len;
1156af55ff5SDarek Marcinkiewicz 	dma_addr_t buf_phys;
1166af55ff5SDarek Marcinkiewicz 
1176af55ff5SDarek Marcinkiewicz 	u8 *alloc;
1186af55ff5SDarek Marcinkiewicz 	size_t alloc_len;
1196af55ff5SDarek Marcinkiewicz 	dma_addr_t alloc_phys;
1206af55ff5SDarek Marcinkiewicz };
1216af55ff5SDarek Marcinkiewicz 
1226af55ff5SDarek Marcinkiewicz struct ec_bhf_priv {
1236af55ff5SDarek Marcinkiewicz 	struct net_device *net_dev;
1246af55ff5SDarek Marcinkiewicz 	struct pci_dev *dev;
1256af55ff5SDarek Marcinkiewicz 
126eb02a272SDarek Marcinkiewicz 	void __iomem *io;
127eb02a272SDarek Marcinkiewicz 	void __iomem *dma_io;
1286af55ff5SDarek Marcinkiewicz 
1296af55ff5SDarek Marcinkiewicz 	struct hrtimer hrtimer;
1306af55ff5SDarek Marcinkiewicz 
1316af55ff5SDarek Marcinkiewicz 	int tx_dma_chan;
1326af55ff5SDarek Marcinkiewicz 	int rx_dma_chan;
133eb02a272SDarek Marcinkiewicz 	void __iomem *ec_io;
134eb02a272SDarek Marcinkiewicz 	void __iomem *fifo_io;
135eb02a272SDarek Marcinkiewicz 	void __iomem *mii_io;
136eb02a272SDarek Marcinkiewicz 	void __iomem *mac_io;
1376af55ff5SDarek Marcinkiewicz 
1386af55ff5SDarek Marcinkiewicz 	struct bhf_dma rx_buf;
1396af55ff5SDarek Marcinkiewicz 	struct rx_desc *rx_descs;
1406af55ff5SDarek Marcinkiewicz 	int rx_dnext;
1416af55ff5SDarek Marcinkiewicz 	int rx_dcount;
1426af55ff5SDarek Marcinkiewicz 
1436af55ff5SDarek Marcinkiewicz 	struct bhf_dma tx_buf;
1446af55ff5SDarek Marcinkiewicz 	struct tx_desc *tx_descs;
1456af55ff5SDarek Marcinkiewicz 	int tx_dcount;
1466af55ff5SDarek Marcinkiewicz 	int tx_dnext;
1476af55ff5SDarek Marcinkiewicz 
1486af55ff5SDarek Marcinkiewicz 	u64 stat_rx_bytes;
1496af55ff5SDarek Marcinkiewicz 	u64 stat_tx_bytes;
1506af55ff5SDarek Marcinkiewicz };
1516af55ff5SDarek Marcinkiewicz 
1526af55ff5SDarek Marcinkiewicz #define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
1536af55ff5SDarek Marcinkiewicz 
ec_bhf_reset(struct ec_bhf_priv * priv)1546af55ff5SDarek Marcinkiewicz static void ec_bhf_reset(struct ec_bhf_priv *priv)
1556af55ff5SDarek Marcinkiewicz {
1566af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
1576af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
1586af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
1596af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
1606af55ff5SDarek Marcinkiewicz 	iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
1616af55ff5SDarek Marcinkiewicz 	iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
1626af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
1636af55ff5SDarek Marcinkiewicz 
1646af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
1656af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
1666af55ff5SDarek Marcinkiewicz 
1676af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
1686af55ff5SDarek Marcinkiewicz }
1696af55ff5SDarek Marcinkiewicz 
ec_bhf_send_packet(struct ec_bhf_priv * priv,struct tx_desc * desc)1706af55ff5SDarek Marcinkiewicz static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
1716af55ff5SDarek Marcinkiewicz {
1726af55ff5SDarek Marcinkiewicz 	u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
1736af55ff5SDarek Marcinkiewicz 	u32 addr = (u8 *)desc - priv->tx_buf.buf;
1746af55ff5SDarek Marcinkiewicz 
1756af55ff5SDarek Marcinkiewicz 	iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
1766af55ff5SDarek Marcinkiewicz }
1776af55ff5SDarek Marcinkiewicz 
ec_bhf_desc_sent(struct tx_desc * desc)1786af55ff5SDarek Marcinkiewicz static int ec_bhf_desc_sent(struct tx_desc *desc)
1796af55ff5SDarek Marcinkiewicz {
1806af55ff5SDarek Marcinkiewicz 	return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
1816af55ff5SDarek Marcinkiewicz }
1826af55ff5SDarek Marcinkiewicz 
ec_bhf_process_tx(struct ec_bhf_priv * priv)1836af55ff5SDarek Marcinkiewicz static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
1846af55ff5SDarek Marcinkiewicz {
1856af55ff5SDarek Marcinkiewicz 	if (unlikely(netif_queue_stopped(priv->net_dev))) {
1866af55ff5SDarek Marcinkiewicz 		/* Make sure that we perceive changes to tx_dnext. */
1876af55ff5SDarek Marcinkiewicz 		smp_rmb();
1886af55ff5SDarek Marcinkiewicz 
1896af55ff5SDarek Marcinkiewicz 		if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
1906af55ff5SDarek Marcinkiewicz 			netif_wake_queue(priv->net_dev);
1916af55ff5SDarek Marcinkiewicz 	}
1926af55ff5SDarek Marcinkiewicz }
1936af55ff5SDarek Marcinkiewicz 
ec_bhf_pkt_received(struct rx_desc * desc)1946af55ff5SDarek Marcinkiewicz static int ec_bhf_pkt_received(struct rx_desc *desc)
1956af55ff5SDarek Marcinkiewicz {
1966af55ff5SDarek Marcinkiewicz 	return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
1976af55ff5SDarek Marcinkiewicz }
1986af55ff5SDarek Marcinkiewicz 
ec_bhf_add_rx_desc(struct ec_bhf_priv * priv,struct rx_desc * desc)1996af55ff5SDarek Marcinkiewicz static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
2006af55ff5SDarek Marcinkiewicz {
2016af55ff5SDarek Marcinkiewicz 	iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
2026af55ff5SDarek Marcinkiewicz 		  priv->fifo_io + FIFO_RX_REG);
2036af55ff5SDarek Marcinkiewicz }
2046af55ff5SDarek Marcinkiewicz 
ec_bhf_process_rx(struct ec_bhf_priv * priv)2056af55ff5SDarek Marcinkiewicz static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
2066af55ff5SDarek Marcinkiewicz {
2076af55ff5SDarek Marcinkiewicz 	struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
2086af55ff5SDarek Marcinkiewicz 
2096af55ff5SDarek Marcinkiewicz 	while (ec_bhf_pkt_received(desc)) {
2106af55ff5SDarek Marcinkiewicz 		int pkt_size = (le16_to_cpu(desc->header.len) &
2116af55ff5SDarek Marcinkiewicz 			       RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
2126af55ff5SDarek Marcinkiewicz 		u8 *data = desc->data;
2136af55ff5SDarek Marcinkiewicz 		struct sk_buff *skb;
2146af55ff5SDarek Marcinkiewicz 
2156af55ff5SDarek Marcinkiewicz 		skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
2166af55ff5SDarek Marcinkiewicz 		if (skb) {
21759ae1d12SJohannes Berg 			skb_put_data(skb, data, pkt_size);
2186af55ff5SDarek Marcinkiewicz 			skb->protocol = eth_type_trans(skb, priv->net_dev);
2196af55ff5SDarek Marcinkiewicz 			priv->stat_rx_bytes += pkt_size;
2206af55ff5SDarek Marcinkiewicz 
2216af55ff5SDarek Marcinkiewicz 			netif_rx(skb);
2226af55ff5SDarek Marcinkiewicz 		} else {
223a9b0b2faSDarek Marcinkiewicz 			dev_err_ratelimited(PRIV_TO_DEV(priv),
2246af55ff5SDarek Marcinkiewicz 					    "Couldn't allocate a skb_buff for a packet of size %u\n",
2256af55ff5SDarek Marcinkiewicz 					    pkt_size);
2266af55ff5SDarek Marcinkiewicz 		}
2276af55ff5SDarek Marcinkiewicz 
2286af55ff5SDarek Marcinkiewicz 		desc->header.recv = 0;
2296af55ff5SDarek Marcinkiewicz 
2306af55ff5SDarek Marcinkiewicz 		ec_bhf_add_rx_desc(priv, desc);
2316af55ff5SDarek Marcinkiewicz 
2326af55ff5SDarek Marcinkiewicz 		priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
2336af55ff5SDarek Marcinkiewicz 		desc = &priv->rx_descs[priv->rx_dnext];
2346af55ff5SDarek Marcinkiewicz 	}
2356af55ff5SDarek Marcinkiewicz }
2366af55ff5SDarek Marcinkiewicz 
ec_bhf_timer_fun(struct hrtimer * timer)2376af55ff5SDarek Marcinkiewicz static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
2386af55ff5SDarek Marcinkiewicz {
2396af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
2406af55ff5SDarek Marcinkiewicz 						hrtimer);
2416af55ff5SDarek Marcinkiewicz 	ec_bhf_process_rx(priv);
2426af55ff5SDarek Marcinkiewicz 	ec_bhf_process_tx(priv);
2436af55ff5SDarek Marcinkiewicz 
2446af55ff5SDarek Marcinkiewicz 	if (!netif_running(priv->net_dev))
2456af55ff5SDarek Marcinkiewicz 		return HRTIMER_NORESTART;
2466af55ff5SDarek Marcinkiewicz 
2478b0e1953SThomas Gleixner 	hrtimer_forward_now(timer, polling_frequency);
2486af55ff5SDarek Marcinkiewicz 	return HRTIMER_RESTART;
2496af55ff5SDarek Marcinkiewicz }
2506af55ff5SDarek Marcinkiewicz 
ec_bhf_setup_offsets(struct ec_bhf_priv * priv)2516af55ff5SDarek Marcinkiewicz static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
2526af55ff5SDarek Marcinkiewicz {
2536af55ff5SDarek Marcinkiewicz 	struct device *dev = PRIV_TO_DEV(priv);
2546af55ff5SDarek Marcinkiewicz 	unsigned block_count, i;
255eb02a272SDarek Marcinkiewicz 	void __iomem *ec_info;
2566af55ff5SDarek Marcinkiewicz 
2576af55ff5SDarek Marcinkiewicz 	block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
2586af55ff5SDarek Marcinkiewicz 	for (i = 0; i < block_count; i++) {
2596af55ff5SDarek Marcinkiewicz 		u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
2606af55ff5SDarek Marcinkiewicz 				    INFO_BLOCK_TYPE);
2616af55ff5SDarek Marcinkiewicz 		if (type == ETHERCAT_MASTER_ID)
2626af55ff5SDarek Marcinkiewicz 			break;
2636af55ff5SDarek Marcinkiewicz 	}
2646af55ff5SDarek Marcinkiewicz 	if (i == block_count) {
2656af55ff5SDarek Marcinkiewicz 		dev_err(dev, "EtherCAT master with DMA block not found\n");
2666af55ff5SDarek Marcinkiewicz 		return -ENODEV;
2676af55ff5SDarek Marcinkiewicz 	}
2686af55ff5SDarek Marcinkiewicz 
2696af55ff5SDarek Marcinkiewicz 	ec_info = priv->io + i * INFO_BLOCK_SIZE;
2706af55ff5SDarek Marcinkiewicz 
2716af55ff5SDarek Marcinkiewicz 	priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
2726af55ff5SDarek Marcinkiewicz 	priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
2736af55ff5SDarek Marcinkiewicz 
2746af55ff5SDarek Marcinkiewicz 	priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
2756af55ff5SDarek Marcinkiewicz 	priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
2766af55ff5SDarek Marcinkiewicz 	priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
2776af55ff5SDarek Marcinkiewicz 	priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
2786af55ff5SDarek Marcinkiewicz 
2796af55ff5SDarek Marcinkiewicz 	return 0;
2806af55ff5SDarek Marcinkiewicz }
2816af55ff5SDarek Marcinkiewicz 
ec_bhf_start_xmit(struct sk_buff * skb,struct net_device * net_dev)2826af55ff5SDarek Marcinkiewicz static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
2836af55ff5SDarek Marcinkiewicz 				     struct net_device *net_dev)
2846af55ff5SDarek Marcinkiewicz {
2856af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
2866af55ff5SDarek Marcinkiewicz 	struct tx_desc *desc;
2876af55ff5SDarek Marcinkiewicz 	unsigned len;
2886af55ff5SDarek Marcinkiewicz 
2896af55ff5SDarek Marcinkiewicz 	desc = &priv->tx_descs[priv->tx_dnext];
2906af55ff5SDarek Marcinkiewicz 
2916af55ff5SDarek Marcinkiewicz 	skb_copy_and_csum_dev(skb, desc->data);
2926af55ff5SDarek Marcinkiewicz 	len = skb->len;
2936af55ff5SDarek Marcinkiewicz 
2946af55ff5SDarek Marcinkiewicz 	memset(&desc->header, 0, sizeof(desc->header));
2956af55ff5SDarek Marcinkiewicz 	desc->header.len = cpu_to_le16(len);
2966af55ff5SDarek Marcinkiewicz 	desc->header.port = TX_HDR_PORT_0;
2976af55ff5SDarek Marcinkiewicz 
2986af55ff5SDarek Marcinkiewicz 	ec_bhf_send_packet(priv, desc);
2996af55ff5SDarek Marcinkiewicz 
3006af55ff5SDarek Marcinkiewicz 	priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
3016af55ff5SDarek Marcinkiewicz 
3026af55ff5SDarek Marcinkiewicz 	if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
303a9b0b2faSDarek Marcinkiewicz 		/* Make sure that updates to tx_dnext are perceived
3046af55ff5SDarek Marcinkiewicz 		 * by timer routine.
3056af55ff5SDarek Marcinkiewicz 		 */
3066af55ff5SDarek Marcinkiewicz 		smp_wmb();
3076af55ff5SDarek Marcinkiewicz 
3086af55ff5SDarek Marcinkiewicz 		netif_stop_queue(net_dev);
3096af55ff5SDarek Marcinkiewicz 	}
3106af55ff5SDarek Marcinkiewicz 
3116af55ff5SDarek Marcinkiewicz 	priv->stat_tx_bytes += len;
3126af55ff5SDarek Marcinkiewicz 
3136af55ff5SDarek Marcinkiewicz 	dev_kfree_skb(skb);
3146af55ff5SDarek Marcinkiewicz 
3156af55ff5SDarek Marcinkiewicz 	return NETDEV_TX_OK;
3166af55ff5SDarek Marcinkiewicz }
3176af55ff5SDarek Marcinkiewicz 
ec_bhf_alloc_dma_mem(struct ec_bhf_priv * priv,struct bhf_dma * buf,int channel,int size)3186af55ff5SDarek Marcinkiewicz static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
3196af55ff5SDarek Marcinkiewicz 				struct bhf_dma *buf,
3206af55ff5SDarek Marcinkiewicz 				int channel,
3216af55ff5SDarek Marcinkiewicz 				int size)
3226af55ff5SDarek Marcinkiewicz {
3236af55ff5SDarek Marcinkiewicz 	int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
3246af55ff5SDarek Marcinkiewicz 	struct device *dev = PRIV_TO_DEV(priv);
3256af55ff5SDarek Marcinkiewicz 	u32 mask;
3266af55ff5SDarek Marcinkiewicz 
3276af55ff5SDarek Marcinkiewicz 	iowrite32(0xffffffff, priv->dma_io + offset);
3286af55ff5SDarek Marcinkiewicz 
3296af55ff5SDarek Marcinkiewicz 	mask = ioread32(priv->dma_io + offset);
3306af55ff5SDarek Marcinkiewicz 	mask &= DMA_WINDOW_SIZE_MASK;
3316af55ff5SDarek Marcinkiewicz 
3326af55ff5SDarek Marcinkiewicz 	/* We want to allocate a chunk of memory that is:
3336af55ff5SDarek Marcinkiewicz 	 * - aligned to the mask we just read
3346af55ff5SDarek Marcinkiewicz 	 * - is of size 2^mask bytes (at most)
3356af55ff5SDarek Marcinkiewicz 	 * In order to ensure that we will allocate buffer of
3366af55ff5SDarek Marcinkiewicz 	 * 2 * 2^mask bytes.
3376af55ff5SDarek Marcinkiewicz 	 */
3386af55ff5SDarek Marcinkiewicz 	buf->len = min_t(int, ~mask + 1, size);
3396af55ff5SDarek Marcinkiewicz 	buf->alloc_len = 2 * buf->len;
3406af55ff5SDarek Marcinkiewicz 
3416af55ff5SDarek Marcinkiewicz 	buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
3426af55ff5SDarek Marcinkiewicz 					GFP_KERNEL);
3436af55ff5SDarek Marcinkiewicz 	if (buf->alloc == NULL) {
344a9b0b2faSDarek Marcinkiewicz 		dev_err(dev, "Failed to allocate buffer\n");
3456af55ff5SDarek Marcinkiewicz 		return -ENOMEM;
3466af55ff5SDarek Marcinkiewicz 	}
3476af55ff5SDarek Marcinkiewicz 
3486af55ff5SDarek Marcinkiewicz 	buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
3496af55ff5SDarek Marcinkiewicz 	buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
3506af55ff5SDarek Marcinkiewicz 
3516af55ff5SDarek Marcinkiewicz 	iowrite32(0, priv->dma_io + offset + 4);
3526af55ff5SDarek Marcinkiewicz 	iowrite32(buf->buf_phys, priv->dma_io + offset);
3536af55ff5SDarek Marcinkiewicz 
3546af55ff5SDarek Marcinkiewicz 	return 0;
3556af55ff5SDarek Marcinkiewicz }
3566af55ff5SDarek Marcinkiewicz 
ec_bhf_setup_tx_descs(struct ec_bhf_priv * priv)3576af55ff5SDarek Marcinkiewicz static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
3586af55ff5SDarek Marcinkiewicz {
3596af55ff5SDarek Marcinkiewicz 	int i = 0;
3606af55ff5SDarek Marcinkiewicz 
3616af55ff5SDarek Marcinkiewicz 	priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
3626af55ff5SDarek Marcinkiewicz 	priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
3636af55ff5SDarek Marcinkiewicz 	priv->tx_dnext = 0;
3646af55ff5SDarek Marcinkiewicz 
3656af55ff5SDarek Marcinkiewicz 	for (i = 0; i < priv->tx_dcount; i++)
3666af55ff5SDarek Marcinkiewicz 		priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
3676af55ff5SDarek Marcinkiewicz }
3686af55ff5SDarek Marcinkiewicz 
ec_bhf_setup_rx_descs(struct ec_bhf_priv * priv)3696af55ff5SDarek Marcinkiewicz static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
3706af55ff5SDarek Marcinkiewicz {
3716af55ff5SDarek Marcinkiewicz 	int i;
3726af55ff5SDarek Marcinkiewicz 
3736af55ff5SDarek Marcinkiewicz 	priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
3746af55ff5SDarek Marcinkiewicz 	priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
3756af55ff5SDarek Marcinkiewicz 	priv->rx_dnext = 0;
3766af55ff5SDarek Marcinkiewicz 
3776af55ff5SDarek Marcinkiewicz 	for (i = 0; i < priv->rx_dcount; i++) {
3786af55ff5SDarek Marcinkiewicz 		struct rx_desc *desc = &priv->rx_descs[i];
3796af55ff5SDarek Marcinkiewicz 		u32 next;
3806af55ff5SDarek Marcinkiewicz 
3816af55ff5SDarek Marcinkiewicz 		if (i != priv->rx_dcount - 1)
3826af55ff5SDarek Marcinkiewicz 			next = (u8 *)(desc + 1) - priv->rx_buf.buf;
3836af55ff5SDarek Marcinkiewicz 		else
3846af55ff5SDarek Marcinkiewicz 			next = 0;
3856af55ff5SDarek Marcinkiewicz 		next |= RXHDR_NEXT_VALID;
3866af55ff5SDarek Marcinkiewicz 		desc->header.next = cpu_to_le32(next);
3876af55ff5SDarek Marcinkiewicz 		desc->header.recv = 0;
3886af55ff5SDarek Marcinkiewicz 		ec_bhf_add_rx_desc(priv, desc);
3896af55ff5SDarek Marcinkiewicz 	}
3906af55ff5SDarek Marcinkiewicz }
3916af55ff5SDarek Marcinkiewicz 
ec_bhf_open(struct net_device * net_dev)3926af55ff5SDarek Marcinkiewicz static int ec_bhf_open(struct net_device *net_dev)
3936af55ff5SDarek Marcinkiewicz {
3946af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
3956af55ff5SDarek Marcinkiewicz 	struct device *dev = PRIV_TO_DEV(priv);
3966af55ff5SDarek Marcinkiewicz 	int err = 0;
3976af55ff5SDarek Marcinkiewicz 
3986af55ff5SDarek Marcinkiewicz 	ec_bhf_reset(priv);
3996af55ff5SDarek Marcinkiewicz 
4006af55ff5SDarek Marcinkiewicz 	err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
4016af55ff5SDarek Marcinkiewicz 				   FIFO_SIZE * sizeof(struct rx_desc));
4026af55ff5SDarek Marcinkiewicz 	if (err) {
4036af55ff5SDarek Marcinkiewicz 		dev_err(dev, "Failed to allocate rx buffer\n");
4046af55ff5SDarek Marcinkiewicz 		goto out;
4056af55ff5SDarek Marcinkiewicz 	}
4066af55ff5SDarek Marcinkiewicz 	ec_bhf_setup_rx_descs(priv);
4076af55ff5SDarek Marcinkiewicz 
4086af55ff5SDarek Marcinkiewicz 	err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
4096af55ff5SDarek Marcinkiewicz 				   FIFO_SIZE * sizeof(struct tx_desc));
4106af55ff5SDarek Marcinkiewicz 	if (err) {
4116af55ff5SDarek Marcinkiewicz 		dev_err(dev, "Failed to allocate tx buffer\n");
4126af55ff5SDarek Marcinkiewicz 		goto error_rx_free;
4136af55ff5SDarek Marcinkiewicz 	}
4146af55ff5SDarek Marcinkiewicz 	iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
4156af55ff5SDarek Marcinkiewicz 	ec_bhf_setup_tx_descs(priv);
4166af55ff5SDarek Marcinkiewicz 
4176af55ff5SDarek Marcinkiewicz 	netif_start_queue(net_dev);
4186af55ff5SDarek Marcinkiewicz 
4196af55ff5SDarek Marcinkiewicz 	hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4206af55ff5SDarek Marcinkiewicz 	priv->hrtimer.function = ec_bhf_timer_fun;
4218b0e1953SThomas Gleixner 	hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
4226af55ff5SDarek Marcinkiewicz 
4236af55ff5SDarek Marcinkiewicz 	return 0;
4246af55ff5SDarek Marcinkiewicz 
4256af55ff5SDarek Marcinkiewicz error_rx_free:
4266af55ff5SDarek Marcinkiewicz 	dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
4276af55ff5SDarek Marcinkiewicz 			  priv->rx_buf.alloc_len);
4286af55ff5SDarek Marcinkiewicz out:
4296af55ff5SDarek Marcinkiewicz 	return err;
4306af55ff5SDarek Marcinkiewicz }
4316af55ff5SDarek Marcinkiewicz 
ec_bhf_stop(struct net_device * net_dev)4326af55ff5SDarek Marcinkiewicz static int ec_bhf_stop(struct net_device *net_dev)
4336af55ff5SDarek Marcinkiewicz {
4346af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
4356af55ff5SDarek Marcinkiewicz 	struct device *dev = PRIV_TO_DEV(priv);
4366af55ff5SDarek Marcinkiewicz 
4376af55ff5SDarek Marcinkiewicz 	hrtimer_cancel(&priv->hrtimer);
4386af55ff5SDarek Marcinkiewicz 
4396af55ff5SDarek Marcinkiewicz 	ec_bhf_reset(priv);
4406af55ff5SDarek Marcinkiewicz 
4416af55ff5SDarek Marcinkiewicz 	netif_tx_disable(net_dev);
4426af55ff5SDarek Marcinkiewicz 
4436af55ff5SDarek Marcinkiewicz 	dma_free_coherent(dev, priv->tx_buf.alloc_len,
4446af55ff5SDarek Marcinkiewicz 			  priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
4456af55ff5SDarek Marcinkiewicz 	dma_free_coherent(dev, priv->rx_buf.alloc_len,
4466af55ff5SDarek Marcinkiewicz 			  priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
4476af55ff5SDarek Marcinkiewicz 
4486af55ff5SDarek Marcinkiewicz 	return 0;
4496af55ff5SDarek Marcinkiewicz }
4506af55ff5SDarek Marcinkiewicz 
451bc1f4470Sstephen hemminger static void
ec_bhf_get_stats(struct net_device * net_dev,struct rtnl_link_stats64 * stats)4526af55ff5SDarek Marcinkiewicz ec_bhf_get_stats(struct net_device *net_dev,
4536af55ff5SDarek Marcinkiewicz 		 struct rtnl_link_stats64 *stats)
4546af55ff5SDarek Marcinkiewicz {
4556af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
4566af55ff5SDarek Marcinkiewicz 
4576af55ff5SDarek Marcinkiewicz 	stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
4586af55ff5SDarek Marcinkiewicz 				ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
4596af55ff5SDarek Marcinkiewicz 				ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
4606af55ff5SDarek Marcinkiewicz 	stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
4616af55ff5SDarek Marcinkiewicz 	stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
4626af55ff5SDarek Marcinkiewicz 	stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
4636af55ff5SDarek Marcinkiewicz 
4646af55ff5SDarek Marcinkiewicz 	stats->tx_bytes = priv->stat_tx_bytes;
4656af55ff5SDarek Marcinkiewicz 	stats->rx_bytes = priv->stat_rx_bytes;
4666af55ff5SDarek Marcinkiewicz }
4676af55ff5SDarek Marcinkiewicz 
4686af55ff5SDarek Marcinkiewicz static const struct net_device_ops ec_bhf_netdev_ops = {
4696af55ff5SDarek Marcinkiewicz 	.ndo_start_xmit		= ec_bhf_start_xmit,
4706af55ff5SDarek Marcinkiewicz 	.ndo_open		= ec_bhf_open,
4716af55ff5SDarek Marcinkiewicz 	.ndo_stop		= ec_bhf_stop,
4726af55ff5SDarek Marcinkiewicz 	.ndo_get_stats64	= ec_bhf_get_stats,
4736af55ff5SDarek Marcinkiewicz 	.ndo_validate_addr	= eth_validate_addr,
4746af55ff5SDarek Marcinkiewicz 	.ndo_set_mac_address	= eth_mac_addr
4756af55ff5SDarek Marcinkiewicz };
4766af55ff5SDarek Marcinkiewicz 
ec_bhf_probe(struct pci_dev * dev,const struct pci_device_id * id)4776af55ff5SDarek Marcinkiewicz static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
4786af55ff5SDarek Marcinkiewicz {
4796af55ff5SDarek Marcinkiewicz 	struct net_device *net_dev;
4806af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv;
481eb02a272SDarek Marcinkiewicz 	void __iomem *dma_io;
482*10e6ded8SJakub Kicinski 	u8 addr[ETH_ALEN];
483eb02a272SDarek Marcinkiewicz 	void __iomem *io;
4846af55ff5SDarek Marcinkiewicz 	int err = 0;
4856af55ff5SDarek Marcinkiewicz 
4866af55ff5SDarek Marcinkiewicz 	err = pci_enable_device(dev);
4876af55ff5SDarek Marcinkiewicz 	if (err)
4886af55ff5SDarek Marcinkiewicz 		return err;
4896af55ff5SDarek Marcinkiewicz 
4906af55ff5SDarek Marcinkiewicz 	pci_set_master(dev);
4916af55ff5SDarek Marcinkiewicz 
49205fbeb21SChristophe JAILLET 	err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
4936af55ff5SDarek Marcinkiewicz 	if (err) {
4946af55ff5SDarek Marcinkiewicz 		dev_err(&dev->dev,
4956af55ff5SDarek Marcinkiewicz 			"Required dma mask not supported, failed to initialize device\n");
4966af55ff5SDarek Marcinkiewicz 		goto err_disable_dev;
4976af55ff5SDarek Marcinkiewicz 	}
4986af55ff5SDarek Marcinkiewicz 
4996af55ff5SDarek Marcinkiewicz 	err = pci_request_regions(dev, "ec_bhf");
5006af55ff5SDarek Marcinkiewicz 	if (err) {
5016af55ff5SDarek Marcinkiewicz 		dev_err(&dev->dev, "Failed to request pci memory regions\n");
5026af55ff5SDarek Marcinkiewicz 		goto err_disable_dev;
5036af55ff5SDarek Marcinkiewicz 	}
5046af55ff5SDarek Marcinkiewicz 
5056af55ff5SDarek Marcinkiewicz 	io = pci_iomap(dev, 0, 0);
5066af55ff5SDarek Marcinkiewicz 	if (!io) {
5076af55ff5SDarek Marcinkiewicz 		dev_err(&dev->dev, "Failed to map pci card memory bar 0");
5086af55ff5SDarek Marcinkiewicz 		err = -EIO;
5096af55ff5SDarek Marcinkiewicz 		goto err_release_regions;
5106af55ff5SDarek Marcinkiewicz 	}
5116af55ff5SDarek Marcinkiewicz 
5126af55ff5SDarek Marcinkiewicz 	dma_io = pci_iomap(dev, 2, 0);
5136af55ff5SDarek Marcinkiewicz 	if (!dma_io) {
5146af55ff5SDarek Marcinkiewicz 		dev_err(&dev->dev, "Failed to map pci card memory bar 2");
5156af55ff5SDarek Marcinkiewicz 		err = -EIO;
5166af55ff5SDarek Marcinkiewicz 		goto err_unmap;
5176af55ff5SDarek Marcinkiewicz 	}
5186af55ff5SDarek Marcinkiewicz 
5196af55ff5SDarek Marcinkiewicz 	net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
520eb02a272SDarek Marcinkiewicz 	if (net_dev == NULL) {
5216af55ff5SDarek Marcinkiewicz 		err = -ENOMEM;
5226af55ff5SDarek Marcinkiewicz 		goto err_unmap_dma_io;
5236af55ff5SDarek Marcinkiewicz 	}
5246af55ff5SDarek Marcinkiewicz 
5256af55ff5SDarek Marcinkiewicz 	pci_set_drvdata(dev, net_dev);
5266af55ff5SDarek Marcinkiewicz 	SET_NETDEV_DEV(net_dev, &dev->dev);
5276af55ff5SDarek Marcinkiewicz 
5286af55ff5SDarek Marcinkiewicz 	net_dev->features = 0;
5296af55ff5SDarek Marcinkiewicz 	net_dev->flags |= IFF_NOARP;
5306af55ff5SDarek Marcinkiewicz 
5316af55ff5SDarek Marcinkiewicz 	net_dev->netdev_ops = &ec_bhf_netdev_ops;
5326af55ff5SDarek Marcinkiewicz 
5336af55ff5SDarek Marcinkiewicz 	priv = netdev_priv(net_dev);
5346af55ff5SDarek Marcinkiewicz 	priv->net_dev = net_dev;
5356af55ff5SDarek Marcinkiewicz 	priv->io = io;
5366af55ff5SDarek Marcinkiewicz 	priv->dma_io = dma_io;
5376af55ff5SDarek Marcinkiewicz 	priv->dev = dev;
5386af55ff5SDarek Marcinkiewicz 
5396af55ff5SDarek Marcinkiewicz 	err = ec_bhf_setup_offsets(priv);
5406af55ff5SDarek Marcinkiewicz 	if (err < 0)
5416af55ff5SDarek Marcinkiewicz 		goto err_free_net_dev;
5426af55ff5SDarek Marcinkiewicz 
543*10e6ded8SJakub Kicinski 	memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
544*10e6ded8SJakub Kicinski 	eth_hw_addr_set(net_dev, addr);
5456af55ff5SDarek Marcinkiewicz 
5466af55ff5SDarek Marcinkiewicz 	err = register_netdev(net_dev);
5476af55ff5SDarek Marcinkiewicz 	if (err < 0)
5486af55ff5SDarek Marcinkiewicz 		goto err_free_net_dev;
5496af55ff5SDarek Marcinkiewicz 
5506af55ff5SDarek Marcinkiewicz 	return 0;
5516af55ff5SDarek Marcinkiewicz 
5526af55ff5SDarek Marcinkiewicz err_free_net_dev:
5536af55ff5SDarek Marcinkiewicz 	free_netdev(net_dev);
5546af55ff5SDarek Marcinkiewicz err_unmap_dma_io:
5556af55ff5SDarek Marcinkiewicz 	pci_iounmap(dev, dma_io);
5566af55ff5SDarek Marcinkiewicz err_unmap:
5576af55ff5SDarek Marcinkiewicz 	pci_iounmap(dev, io);
5586af55ff5SDarek Marcinkiewicz err_release_regions:
5596af55ff5SDarek Marcinkiewicz 	pci_release_regions(dev);
5606af55ff5SDarek Marcinkiewicz err_disable_dev:
5616af55ff5SDarek Marcinkiewicz 	pci_disable_device(dev);
5626af55ff5SDarek Marcinkiewicz 
5636af55ff5SDarek Marcinkiewicz 	return err;
5646af55ff5SDarek Marcinkiewicz }
5656af55ff5SDarek Marcinkiewicz 
ec_bhf_remove(struct pci_dev * dev)5666af55ff5SDarek Marcinkiewicz static void ec_bhf_remove(struct pci_dev *dev)
5676af55ff5SDarek Marcinkiewicz {
5686af55ff5SDarek Marcinkiewicz 	struct net_device *net_dev = pci_get_drvdata(dev);
5696af55ff5SDarek Marcinkiewicz 	struct ec_bhf_priv *priv = netdev_priv(net_dev);
5706af55ff5SDarek Marcinkiewicz 
5716af55ff5SDarek Marcinkiewicz 	unregister_netdev(net_dev);
5726af55ff5SDarek Marcinkiewicz 
5736af55ff5SDarek Marcinkiewicz 	pci_iounmap(dev, priv->dma_io);
5746af55ff5SDarek Marcinkiewicz 	pci_iounmap(dev, priv->io);
5759cca0c2dSPavel Skripkin 
5769cca0c2dSPavel Skripkin 	free_netdev(net_dev);
5779cca0c2dSPavel Skripkin 
5786af55ff5SDarek Marcinkiewicz 	pci_release_regions(dev);
5796af55ff5SDarek Marcinkiewicz 	pci_disable_device(dev);
5806af55ff5SDarek Marcinkiewicz }
5816af55ff5SDarek Marcinkiewicz 
5826af55ff5SDarek Marcinkiewicz static struct pci_driver pci_driver = {
5836af55ff5SDarek Marcinkiewicz 	.name		= "ec_bhf",
5846af55ff5SDarek Marcinkiewicz 	.id_table	= ids,
5856af55ff5SDarek Marcinkiewicz 	.probe		= ec_bhf_probe,
5866af55ff5SDarek Marcinkiewicz 	.remove		= ec_bhf_remove,
5876af55ff5SDarek Marcinkiewicz };
588b11b6ed0SVaishali Thakkar module_pci_driver(pci_driver);
5896af55ff5SDarek Marcinkiewicz 
590d3757ba4SJoe Perches module_param(polling_frequency, long, 0444);
5916af55ff5SDarek Marcinkiewicz MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
5926af55ff5SDarek Marcinkiewicz 
5936af55ff5SDarek Marcinkiewicz MODULE_LICENSE("GPL");
5946af55ff5SDarek Marcinkiewicz MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
595