1*492caffaSMoritz Fischer // SPDX-License-Identifier: GPL-2.0 2*492caffaSMoritz Fischer /* Copyright (c) 2016-2017, National Instruments Corp. 3*492caffaSMoritz Fischer * 4*492caffaSMoritz Fischer * Author: Moritz Fischer <mdf@kernel.org> 5*492caffaSMoritz Fischer */ 6*492caffaSMoritz Fischer 7*492caffaSMoritz Fischer #include <linux/etherdevice.h> 8*492caffaSMoritz Fischer #include <linux/module.h> 9*492caffaSMoritz Fischer #include <linux/netdevice.h> 10*492caffaSMoritz Fischer #include <linux/of_address.h> 11*492caffaSMoritz Fischer #include <linux/of_mdio.h> 12*492caffaSMoritz Fischer #include <linux/of_net.h> 13*492caffaSMoritz Fischer #include <linux/of_platform.h> 14*492caffaSMoritz Fischer #include <linux/of_irq.h> 15*492caffaSMoritz Fischer #include <linux/skbuff.h> 16*492caffaSMoritz Fischer #include <linux/phy.h> 17*492caffaSMoritz Fischer #include <linux/mii.h> 18*492caffaSMoritz Fischer #include <linux/nvmem-consumer.h> 19*492caffaSMoritz Fischer #include <linux/ethtool.h> 20*492caffaSMoritz Fischer #include <linux/iopoll.h> 21*492caffaSMoritz Fischer 22*492caffaSMoritz Fischer #define TX_BD_NUM 64 23*492caffaSMoritz Fischer #define RX_BD_NUM 128 24*492caffaSMoritz Fischer 25*492caffaSMoritz Fischer /* Axi DMA Register definitions */ 26*492caffaSMoritz Fischer #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */ 27*492caffaSMoritz Fischer #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */ 28*492caffaSMoritz Fischer #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */ 29*492caffaSMoritz Fischer #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */ 30*492caffaSMoritz Fischer 31*492caffaSMoritz Fischer #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */ 32*492caffaSMoritz Fischer #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */ 33*492caffaSMoritz Fischer #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */ 34*492caffaSMoritz Fischer #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */ 35*492caffaSMoritz Fischer 36*492caffaSMoritz Fischer #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */ 37*492caffaSMoritz Fischer #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */ 38*492caffaSMoritz Fischer 39*492caffaSMoritz Fischer #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ 40*492caffaSMoritz Fischer #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ 41*492caffaSMoritz Fischer #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ 42*492caffaSMoritz Fischer #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ 43*492caffaSMoritz Fischer 44*492caffaSMoritz Fischer #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ 45*492caffaSMoritz Fischer #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ 46*492caffaSMoritz Fischer 47*492caffaSMoritz Fischer #define XAXIDMA_DELAY_SHIFT 24 48*492caffaSMoritz Fischer #define XAXIDMA_COALESCE_SHIFT 16 49*492caffaSMoritz Fischer 50*492caffaSMoritz Fischer #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ 51*492caffaSMoritz Fischer #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ 52*492caffaSMoritz Fischer #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ 53*492caffaSMoritz Fischer #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ 54*492caffaSMoritz Fischer 55*492caffaSMoritz Fischer /* Default TX/RX Threshold and waitbound values for SGDMA mode */ 56*492caffaSMoritz Fischer #define XAXIDMA_DFT_TX_THRESHOLD 24 57*492caffaSMoritz Fischer #define XAXIDMA_DFT_TX_WAITBOUND 254 58*492caffaSMoritz Fischer #define XAXIDMA_DFT_RX_THRESHOLD 24 59*492caffaSMoritz Fischer #define XAXIDMA_DFT_RX_WAITBOUND 254 60*492caffaSMoritz Fischer 61*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ 62*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ 63*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ 64*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ 65*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ 66*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ 67*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ 68*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ 69*492caffaSMoritz Fischer #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ 70*492caffaSMoritz Fischer 71*492caffaSMoritz Fischer #define NIXGE_REG_CTRL_OFFSET 0x4000 72*492caffaSMoritz Fischer #define NIXGE_REG_INFO 0x00 73*492caffaSMoritz Fischer #define NIXGE_REG_MAC_CTL 0x04 74*492caffaSMoritz Fischer #define NIXGE_REG_PHY_CTL 0x08 75*492caffaSMoritz Fischer #define NIXGE_REG_LED_CTL 0x0c 76*492caffaSMoritz Fischer #define NIXGE_REG_MDIO_DATA 0x10 77*492caffaSMoritz Fischer #define NIXGE_REG_MDIO_ADDR 0x14 78*492caffaSMoritz Fischer #define NIXGE_REG_MDIO_OP 0x18 79*492caffaSMoritz Fischer #define NIXGE_REG_MDIO_CTRL 0x1c 80*492caffaSMoritz Fischer 81*492caffaSMoritz Fischer #define NIXGE_ID_LED_CTL_EN BIT(0) 82*492caffaSMoritz Fischer #define NIXGE_ID_LED_CTL_VAL BIT(1) 83*492caffaSMoritz Fischer 84*492caffaSMoritz Fischer #define NIXGE_MDIO_CLAUSE45 BIT(12) 85*492caffaSMoritz Fischer #define NIXGE_MDIO_CLAUSE22 0 86*492caffaSMoritz Fischer #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10) 87*492caffaSMoritz Fischer #define NIXGE_MDIO_OP_ADDRESS 0 88*492caffaSMoritz Fischer #define NIXGE_MDIO_C45_WRITE BIT(0) 89*492caffaSMoritz Fischer #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0)) 90*492caffaSMoritz Fischer #define NIXGE_MDIO_C22_WRITE BIT(0) 91*492caffaSMoritz Fischer #define NIXGE_MDIO_C22_READ BIT(1) 92*492caffaSMoritz Fischer #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5) 93*492caffaSMoritz Fischer #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0) 94*492caffaSMoritz Fischer 95*492caffaSMoritz Fischer #define NIXGE_REG_MAC_LSB 0x1000 96*492caffaSMoritz Fischer #define NIXGE_REG_MAC_MSB 0x1004 97*492caffaSMoritz Fischer 98*492caffaSMoritz Fischer /* Packet size info */ 99*492caffaSMoritz Fischer #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */ 100*492caffaSMoritz Fischer #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ 101*492caffaSMoritz Fischer #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */ 102*492caffaSMoritz Fischer #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ 103*492caffaSMoritz Fischer 104*492caffaSMoritz Fischer #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) 105*492caffaSMoritz Fischer #define NIXGE_MAX_JUMBO_FRAME_SIZE \ 106*492caffaSMoritz Fischer (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) 107*492caffaSMoritz Fischer 108*492caffaSMoritz Fischer struct nixge_hw_dma_bd { 109*492caffaSMoritz Fischer u32 next; 110*492caffaSMoritz Fischer u32 reserved1; 111*492caffaSMoritz Fischer u32 phys; 112*492caffaSMoritz Fischer u32 reserved2; 113*492caffaSMoritz Fischer u32 reserved3; 114*492caffaSMoritz Fischer u32 reserved4; 115*492caffaSMoritz Fischer u32 cntrl; 116*492caffaSMoritz Fischer u32 status; 117*492caffaSMoritz Fischer u32 app0; 118*492caffaSMoritz Fischer u32 app1; 119*492caffaSMoritz Fischer u32 app2; 120*492caffaSMoritz Fischer u32 app3; 121*492caffaSMoritz Fischer u32 app4; 122*492caffaSMoritz Fischer u32 sw_id_offset; 123*492caffaSMoritz Fischer u32 reserved5; 124*492caffaSMoritz Fischer u32 reserved6; 125*492caffaSMoritz Fischer }; 126*492caffaSMoritz Fischer 127*492caffaSMoritz Fischer struct nixge_tx_skb { 128*492caffaSMoritz Fischer struct sk_buff *skb; 129*492caffaSMoritz Fischer dma_addr_t mapping; 130*492caffaSMoritz Fischer size_t size; 131*492caffaSMoritz Fischer bool mapped_as_page; 132*492caffaSMoritz Fischer }; 133*492caffaSMoritz Fischer 134*492caffaSMoritz Fischer struct nixge_priv { 135*492caffaSMoritz Fischer struct net_device *ndev; 136*492caffaSMoritz Fischer struct napi_struct napi; 137*492caffaSMoritz Fischer struct device *dev; 138*492caffaSMoritz Fischer 139*492caffaSMoritz Fischer /* Connection to PHY device */ 140*492caffaSMoritz Fischer struct device_node *phy_node; 141*492caffaSMoritz Fischer phy_interface_t phy_mode; 142*492caffaSMoritz Fischer 143*492caffaSMoritz Fischer int link; 144*492caffaSMoritz Fischer unsigned int speed; 145*492caffaSMoritz Fischer unsigned int duplex; 146*492caffaSMoritz Fischer 147*492caffaSMoritz Fischer /* MDIO bus data */ 148*492caffaSMoritz Fischer struct mii_bus *mii_bus; /* MII bus reference */ 149*492caffaSMoritz Fischer 150*492caffaSMoritz Fischer /* IO registers, dma functions and IRQs */ 151*492caffaSMoritz Fischer void __iomem *ctrl_regs; 152*492caffaSMoritz Fischer void __iomem *dma_regs; 153*492caffaSMoritz Fischer 154*492caffaSMoritz Fischer struct tasklet_struct dma_err_tasklet; 155*492caffaSMoritz Fischer 156*492caffaSMoritz Fischer int tx_irq; 157*492caffaSMoritz Fischer int rx_irq; 158*492caffaSMoritz Fischer u32 last_link; 159*492caffaSMoritz Fischer 160*492caffaSMoritz Fischer /* Buffer descriptors */ 161*492caffaSMoritz Fischer struct nixge_hw_dma_bd *tx_bd_v; 162*492caffaSMoritz Fischer struct nixge_tx_skb *tx_skb; 163*492caffaSMoritz Fischer dma_addr_t tx_bd_p; 164*492caffaSMoritz Fischer 165*492caffaSMoritz Fischer struct nixge_hw_dma_bd *rx_bd_v; 166*492caffaSMoritz Fischer dma_addr_t rx_bd_p; 167*492caffaSMoritz Fischer u32 tx_bd_ci; 168*492caffaSMoritz Fischer u32 tx_bd_tail; 169*492caffaSMoritz Fischer u32 rx_bd_ci; 170*492caffaSMoritz Fischer 171*492caffaSMoritz Fischer u32 coalesce_count_rx; 172*492caffaSMoritz Fischer u32 coalesce_count_tx; 173*492caffaSMoritz Fischer }; 174*492caffaSMoritz Fischer 175*492caffaSMoritz Fischer static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) 176*492caffaSMoritz Fischer { 177*492caffaSMoritz Fischer writel(val, priv->dma_regs + offset); 178*492caffaSMoritz Fischer } 179*492caffaSMoritz Fischer 180*492caffaSMoritz Fischer static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) 181*492caffaSMoritz Fischer { 182*492caffaSMoritz Fischer return readl(priv->dma_regs + offset); 183*492caffaSMoritz Fischer } 184*492caffaSMoritz Fischer 185*492caffaSMoritz Fischer static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val) 186*492caffaSMoritz Fischer { 187*492caffaSMoritz Fischer writel(val, priv->ctrl_regs + offset); 188*492caffaSMoritz Fischer } 189*492caffaSMoritz Fischer 190*492caffaSMoritz Fischer static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) 191*492caffaSMoritz Fischer { 192*492caffaSMoritz Fischer return readl(priv->ctrl_regs + offset); 193*492caffaSMoritz Fischer } 194*492caffaSMoritz Fischer 195*492caffaSMoritz Fischer #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ 196*492caffaSMoritz Fischer readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \ 197*492caffaSMoritz Fischer (sleep_us), (timeout_us)) 198*492caffaSMoritz Fischer 199*492caffaSMoritz Fischer #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ 200*492caffaSMoritz Fischer readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \ 201*492caffaSMoritz Fischer (sleep_us), (timeout_us)) 202*492caffaSMoritz Fischer 203*492caffaSMoritz Fischer static void nixge_hw_dma_bd_release(struct net_device *ndev) 204*492caffaSMoritz Fischer { 205*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 206*492caffaSMoritz Fischer int i; 207*492caffaSMoritz Fischer 208*492caffaSMoritz Fischer for (i = 0; i < RX_BD_NUM; i++) { 209*492caffaSMoritz Fischer dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys, 210*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); 211*492caffaSMoritz Fischer dev_kfree_skb((struct sk_buff *) 212*492caffaSMoritz Fischer (priv->rx_bd_v[i].sw_id_offset)); 213*492caffaSMoritz Fischer } 214*492caffaSMoritz Fischer 215*492caffaSMoritz Fischer if (priv->rx_bd_v) 216*492caffaSMoritz Fischer dma_free_coherent(ndev->dev.parent, 217*492caffaSMoritz Fischer sizeof(*priv->rx_bd_v) * RX_BD_NUM, 218*492caffaSMoritz Fischer priv->rx_bd_v, 219*492caffaSMoritz Fischer priv->rx_bd_p); 220*492caffaSMoritz Fischer 221*492caffaSMoritz Fischer if (priv->tx_skb) 222*492caffaSMoritz Fischer devm_kfree(ndev->dev.parent, priv->tx_skb); 223*492caffaSMoritz Fischer 224*492caffaSMoritz Fischer if (priv->tx_bd_v) 225*492caffaSMoritz Fischer dma_free_coherent(ndev->dev.parent, 226*492caffaSMoritz Fischer sizeof(*priv->tx_bd_v) * TX_BD_NUM, 227*492caffaSMoritz Fischer priv->tx_bd_v, 228*492caffaSMoritz Fischer priv->tx_bd_p); 229*492caffaSMoritz Fischer } 230*492caffaSMoritz Fischer 231*492caffaSMoritz Fischer static int nixge_hw_dma_bd_init(struct net_device *ndev) 232*492caffaSMoritz Fischer { 233*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 234*492caffaSMoritz Fischer struct sk_buff *skb; 235*492caffaSMoritz Fischer u32 cr; 236*492caffaSMoritz Fischer int i; 237*492caffaSMoritz Fischer 238*492caffaSMoritz Fischer /* Reset the indexes which are used for accessing the BDs */ 239*492caffaSMoritz Fischer priv->tx_bd_ci = 0; 240*492caffaSMoritz Fischer priv->tx_bd_tail = 0; 241*492caffaSMoritz Fischer priv->rx_bd_ci = 0; 242*492caffaSMoritz Fischer 243*492caffaSMoritz Fischer /* Allocate the Tx and Rx buffer descriptors. */ 244*492caffaSMoritz Fischer priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 245*492caffaSMoritz Fischer sizeof(*priv->tx_bd_v) * TX_BD_NUM, 246*492caffaSMoritz Fischer &priv->tx_bd_p, GFP_KERNEL); 247*492caffaSMoritz Fischer if (!priv->tx_bd_v) 248*492caffaSMoritz Fischer goto out; 249*492caffaSMoritz Fischer 250*492caffaSMoritz Fischer priv->tx_skb = devm_kzalloc(ndev->dev.parent, 251*492caffaSMoritz Fischer sizeof(*priv->tx_skb) * 252*492caffaSMoritz Fischer TX_BD_NUM, 253*492caffaSMoritz Fischer GFP_KERNEL); 254*492caffaSMoritz Fischer if (!priv->tx_skb) 255*492caffaSMoritz Fischer goto out; 256*492caffaSMoritz Fischer 257*492caffaSMoritz Fischer priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 258*492caffaSMoritz Fischer sizeof(*priv->rx_bd_v) * RX_BD_NUM, 259*492caffaSMoritz Fischer &priv->rx_bd_p, GFP_KERNEL); 260*492caffaSMoritz Fischer if (!priv->rx_bd_v) 261*492caffaSMoritz Fischer goto out; 262*492caffaSMoritz Fischer 263*492caffaSMoritz Fischer for (i = 0; i < TX_BD_NUM; i++) { 264*492caffaSMoritz Fischer priv->tx_bd_v[i].next = priv->tx_bd_p + 265*492caffaSMoritz Fischer sizeof(*priv->tx_bd_v) * 266*492caffaSMoritz Fischer ((i + 1) % TX_BD_NUM); 267*492caffaSMoritz Fischer } 268*492caffaSMoritz Fischer 269*492caffaSMoritz Fischer for (i = 0; i < RX_BD_NUM; i++) { 270*492caffaSMoritz Fischer priv->rx_bd_v[i].next = priv->rx_bd_p + 271*492caffaSMoritz Fischer sizeof(*priv->rx_bd_v) * 272*492caffaSMoritz Fischer ((i + 1) % RX_BD_NUM); 273*492caffaSMoritz Fischer 274*492caffaSMoritz Fischer skb = netdev_alloc_skb_ip_align(ndev, 275*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE); 276*492caffaSMoritz Fischer if (!skb) 277*492caffaSMoritz Fischer goto out; 278*492caffaSMoritz Fischer 279*492caffaSMoritz Fischer priv->rx_bd_v[i].sw_id_offset = (u32)skb; 280*492caffaSMoritz Fischer priv->rx_bd_v[i].phys = 281*492caffaSMoritz Fischer dma_map_single(ndev->dev.parent, 282*492caffaSMoritz Fischer skb->data, 283*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE, 284*492caffaSMoritz Fischer DMA_FROM_DEVICE); 285*492caffaSMoritz Fischer priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; 286*492caffaSMoritz Fischer } 287*492caffaSMoritz Fischer 288*492caffaSMoritz Fischer /* Start updating the Rx channel control register */ 289*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 290*492caffaSMoritz Fischer /* Update the interrupt coalesce count */ 291*492caffaSMoritz Fischer cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 292*492caffaSMoritz Fischer ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 293*492caffaSMoritz Fischer /* Update the delay timer count */ 294*492caffaSMoritz Fischer cr = ((cr & ~XAXIDMA_DELAY_MASK) | 295*492caffaSMoritz Fischer (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 296*492caffaSMoritz Fischer /* Enable coalesce, delay timer and error interrupts */ 297*492caffaSMoritz Fischer cr |= XAXIDMA_IRQ_ALL_MASK; 298*492caffaSMoritz Fischer /* Write to the Rx channel control register */ 299*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 300*492caffaSMoritz Fischer 301*492caffaSMoritz Fischer /* Start updating the Tx channel control register */ 302*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 303*492caffaSMoritz Fischer /* Update the interrupt coalesce count */ 304*492caffaSMoritz Fischer cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 305*492caffaSMoritz Fischer ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 306*492caffaSMoritz Fischer /* Update the delay timer count */ 307*492caffaSMoritz Fischer cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 308*492caffaSMoritz Fischer (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 309*492caffaSMoritz Fischer /* Enable coalesce, delay timer and error interrupts */ 310*492caffaSMoritz Fischer cr |= XAXIDMA_IRQ_ALL_MASK; 311*492caffaSMoritz Fischer /* Write to the Tx channel control register */ 312*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 313*492caffaSMoritz Fischer 314*492caffaSMoritz Fischer /* Populate the tail pointer and bring the Rx Axi DMA engine out of 315*492caffaSMoritz Fischer * halted state. This will make the Rx side ready for reception. 316*492caffaSMoritz Fischer */ 317*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); 318*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 319*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, 320*492caffaSMoritz Fischer cr | XAXIDMA_CR_RUNSTOP_MASK); 321*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + 322*492caffaSMoritz Fischer (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); 323*492caffaSMoritz Fischer 324*492caffaSMoritz Fischer /* Write to the RS (Run-stop) bit in the Tx channel control register. 325*492caffaSMoritz Fischer * Tx channel is now ready to run. But only after we write to the 326*492caffaSMoritz Fischer * tail pointer register that the Tx channel will start transmitting. 327*492caffaSMoritz Fischer */ 328*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); 329*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 330*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, 331*492caffaSMoritz Fischer cr | XAXIDMA_CR_RUNSTOP_MASK); 332*492caffaSMoritz Fischer 333*492caffaSMoritz Fischer return 0; 334*492caffaSMoritz Fischer out: 335*492caffaSMoritz Fischer nixge_hw_dma_bd_release(ndev); 336*492caffaSMoritz Fischer return -ENOMEM; 337*492caffaSMoritz Fischer } 338*492caffaSMoritz Fischer 339*492caffaSMoritz Fischer static void __nixge_device_reset(struct nixge_priv *priv, off_t offset) 340*492caffaSMoritz Fischer { 341*492caffaSMoritz Fischer u32 status; 342*492caffaSMoritz Fischer int err; 343*492caffaSMoritz Fischer 344*492caffaSMoritz Fischer /* Reset Axi DMA. This would reset NIXGE Ethernet core as well. 345*492caffaSMoritz Fischer * The reset process of Axi DMA takes a while to complete as all 346*492caffaSMoritz Fischer * pending commands/transfers will be flushed or completed during 347*492caffaSMoritz Fischer * this reset process. 348*492caffaSMoritz Fischer */ 349*492caffaSMoritz Fischer nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK); 350*492caffaSMoritz Fischer err = nixge_dma_poll_timeout(priv, offset, status, 351*492caffaSMoritz Fischer !(status & XAXIDMA_CR_RESET_MASK), 10, 352*492caffaSMoritz Fischer 1000); 353*492caffaSMoritz Fischer if (err) 354*492caffaSMoritz Fischer netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__); 355*492caffaSMoritz Fischer } 356*492caffaSMoritz Fischer 357*492caffaSMoritz Fischer static void nixge_device_reset(struct net_device *ndev) 358*492caffaSMoritz Fischer { 359*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 360*492caffaSMoritz Fischer 361*492caffaSMoritz Fischer __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET); 362*492caffaSMoritz Fischer __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET); 363*492caffaSMoritz Fischer 364*492caffaSMoritz Fischer if (nixge_hw_dma_bd_init(ndev)) 365*492caffaSMoritz Fischer netdev_err(ndev, "%s: descriptor allocation failed\n", 366*492caffaSMoritz Fischer __func__); 367*492caffaSMoritz Fischer 368*492caffaSMoritz Fischer netif_trans_update(ndev); 369*492caffaSMoritz Fischer } 370*492caffaSMoritz Fischer 371*492caffaSMoritz Fischer static void nixge_handle_link_change(struct net_device *ndev) 372*492caffaSMoritz Fischer { 373*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 374*492caffaSMoritz Fischer struct phy_device *phydev = ndev->phydev; 375*492caffaSMoritz Fischer 376*492caffaSMoritz Fischer if (phydev->link != priv->link || phydev->speed != priv->speed || 377*492caffaSMoritz Fischer phydev->duplex != priv->duplex) { 378*492caffaSMoritz Fischer priv->link = phydev->link; 379*492caffaSMoritz Fischer priv->speed = phydev->speed; 380*492caffaSMoritz Fischer priv->duplex = phydev->duplex; 381*492caffaSMoritz Fischer phy_print_status(phydev); 382*492caffaSMoritz Fischer } 383*492caffaSMoritz Fischer } 384*492caffaSMoritz Fischer 385*492caffaSMoritz Fischer static void nixge_tx_skb_unmap(struct nixge_priv *priv, 386*492caffaSMoritz Fischer struct nixge_tx_skb *tx_skb) 387*492caffaSMoritz Fischer { 388*492caffaSMoritz Fischer if (tx_skb->mapping) { 389*492caffaSMoritz Fischer if (tx_skb->mapped_as_page) 390*492caffaSMoritz Fischer dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping, 391*492caffaSMoritz Fischer tx_skb->size, DMA_TO_DEVICE); 392*492caffaSMoritz Fischer else 393*492caffaSMoritz Fischer dma_unmap_single(priv->ndev->dev.parent, 394*492caffaSMoritz Fischer tx_skb->mapping, 395*492caffaSMoritz Fischer tx_skb->size, DMA_TO_DEVICE); 396*492caffaSMoritz Fischer tx_skb->mapping = 0; 397*492caffaSMoritz Fischer } 398*492caffaSMoritz Fischer 399*492caffaSMoritz Fischer if (tx_skb->skb) { 400*492caffaSMoritz Fischer dev_kfree_skb_any(tx_skb->skb); 401*492caffaSMoritz Fischer tx_skb->skb = NULL; 402*492caffaSMoritz Fischer } 403*492caffaSMoritz Fischer } 404*492caffaSMoritz Fischer 405*492caffaSMoritz Fischer static void nixge_start_xmit_done(struct net_device *ndev) 406*492caffaSMoritz Fischer { 407*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 408*492caffaSMoritz Fischer struct nixge_hw_dma_bd *cur_p; 409*492caffaSMoritz Fischer struct nixge_tx_skb *tx_skb; 410*492caffaSMoritz Fischer unsigned int status = 0; 411*492caffaSMoritz Fischer u32 packets = 0; 412*492caffaSMoritz Fischer u32 size = 0; 413*492caffaSMoritz Fischer 414*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; 415*492caffaSMoritz Fischer tx_skb = &priv->tx_skb[priv->tx_bd_ci]; 416*492caffaSMoritz Fischer 417*492caffaSMoritz Fischer status = cur_p->status; 418*492caffaSMoritz Fischer 419*492caffaSMoritz Fischer while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 420*492caffaSMoritz Fischer nixge_tx_skb_unmap(priv, tx_skb); 421*492caffaSMoritz Fischer cur_p->status = 0; 422*492caffaSMoritz Fischer 423*492caffaSMoritz Fischer size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 424*492caffaSMoritz Fischer packets++; 425*492caffaSMoritz Fischer 426*492caffaSMoritz Fischer ++priv->tx_bd_ci; 427*492caffaSMoritz Fischer priv->tx_bd_ci %= TX_BD_NUM; 428*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; 429*492caffaSMoritz Fischer tx_skb = &priv->tx_skb[priv->tx_bd_ci]; 430*492caffaSMoritz Fischer status = cur_p->status; 431*492caffaSMoritz Fischer } 432*492caffaSMoritz Fischer 433*492caffaSMoritz Fischer ndev->stats.tx_packets += packets; 434*492caffaSMoritz Fischer ndev->stats.tx_bytes += size; 435*492caffaSMoritz Fischer 436*492caffaSMoritz Fischer if (packets) 437*492caffaSMoritz Fischer netif_wake_queue(ndev); 438*492caffaSMoritz Fischer } 439*492caffaSMoritz Fischer 440*492caffaSMoritz Fischer static int nixge_check_tx_bd_space(struct nixge_priv *priv, 441*492caffaSMoritz Fischer int num_frag) 442*492caffaSMoritz Fischer { 443*492caffaSMoritz Fischer struct nixge_hw_dma_bd *cur_p; 444*492caffaSMoritz Fischer 445*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM]; 446*492caffaSMoritz Fischer if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 447*492caffaSMoritz Fischer return NETDEV_TX_BUSY; 448*492caffaSMoritz Fischer return 0; 449*492caffaSMoritz Fischer } 450*492caffaSMoritz Fischer 451*492caffaSMoritz Fischer static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) 452*492caffaSMoritz Fischer { 453*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 454*492caffaSMoritz Fischer struct nixge_hw_dma_bd *cur_p; 455*492caffaSMoritz Fischer struct nixge_tx_skb *tx_skb; 456*492caffaSMoritz Fischer dma_addr_t tail_p; 457*492caffaSMoritz Fischer skb_frag_t *frag; 458*492caffaSMoritz Fischer u32 num_frag; 459*492caffaSMoritz Fischer u32 ii; 460*492caffaSMoritz Fischer 461*492caffaSMoritz Fischer num_frag = skb_shinfo(skb)->nr_frags; 462*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 463*492caffaSMoritz Fischer tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 464*492caffaSMoritz Fischer 465*492caffaSMoritz Fischer if (nixge_check_tx_bd_space(priv, num_frag)) { 466*492caffaSMoritz Fischer if (!netif_queue_stopped(ndev)) 467*492caffaSMoritz Fischer netif_stop_queue(ndev); 468*492caffaSMoritz Fischer return NETDEV_TX_OK; 469*492caffaSMoritz Fischer } 470*492caffaSMoritz Fischer 471*492caffaSMoritz Fischer cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 472*492caffaSMoritz Fischer skb_headlen(skb), DMA_TO_DEVICE); 473*492caffaSMoritz Fischer if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) 474*492caffaSMoritz Fischer goto drop; 475*492caffaSMoritz Fischer 476*492caffaSMoritz Fischer cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 477*492caffaSMoritz Fischer 478*492caffaSMoritz Fischer tx_skb->skb = NULL; 479*492caffaSMoritz Fischer tx_skb->mapping = cur_p->phys; 480*492caffaSMoritz Fischer tx_skb->size = skb_headlen(skb); 481*492caffaSMoritz Fischer tx_skb->mapped_as_page = false; 482*492caffaSMoritz Fischer 483*492caffaSMoritz Fischer for (ii = 0; ii < num_frag; ii++) { 484*492caffaSMoritz Fischer ++priv->tx_bd_tail; 485*492caffaSMoritz Fischer priv->tx_bd_tail %= TX_BD_NUM; 486*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 487*492caffaSMoritz Fischer tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 488*492caffaSMoritz Fischer frag = &skb_shinfo(skb)->frags[ii]; 489*492caffaSMoritz Fischer 490*492caffaSMoritz Fischer cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, 491*492caffaSMoritz Fischer skb_frag_size(frag), 492*492caffaSMoritz Fischer DMA_TO_DEVICE); 493*492caffaSMoritz Fischer if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) 494*492caffaSMoritz Fischer goto frag_err; 495*492caffaSMoritz Fischer 496*492caffaSMoritz Fischer cur_p->cntrl = skb_frag_size(frag); 497*492caffaSMoritz Fischer 498*492caffaSMoritz Fischer tx_skb->skb = NULL; 499*492caffaSMoritz Fischer tx_skb->mapping = cur_p->phys; 500*492caffaSMoritz Fischer tx_skb->size = skb_frag_size(frag); 501*492caffaSMoritz Fischer tx_skb->mapped_as_page = true; 502*492caffaSMoritz Fischer } 503*492caffaSMoritz Fischer 504*492caffaSMoritz Fischer /* last buffer of the frame */ 505*492caffaSMoritz Fischer tx_skb->skb = skb; 506*492caffaSMoritz Fischer 507*492caffaSMoritz Fischer cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 508*492caffaSMoritz Fischer cur_p->app4 = (unsigned long)skb; 509*492caffaSMoritz Fischer 510*492caffaSMoritz Fischer tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; 511*492caffaSMoritz Fischer /* Start the transfer */ 512*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); 513*492caffaSMoritz Fischer ++priv->tx_bd_tail; 514*492caffaSMoritz Fischer priv->tx_bd_tail %= TX_BD_NUM; 515*492caffaSMoritz Fischer 516*492caffaSMoritz Fischer return NETDEV_TX_OK; 517*492caffaSMoritz Fischer frag_err: 518*492caffaSMoritz Fischer for (; ii > 0; ii--) { 519*492caffaSMoritz Fischer if (priv->tx_bd_tail) 520*492caffaSMoritz Fischer priv->tx_bd_tail--; 521*492caffaSMoritz Fischer else 522*492caffaSMoritz Fischer priv->tx_bd_tail = TX_BD_NUM - 1; 523*492caffaSMoritz Fischer 524*492caffaSMoritz Fischer tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 525*492caffaSMoritz Fischer nixge_tx_skb_unmap(priv, tx_skb); 526*492caffaSMoritz Fischer 527*492caffaSMoritz Fischer cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 528*492caffaSMoritz Fischer cur_p->status = 0; 529*492caffaSMoritz Fischer } 530*492caffaSMoritz Fischer dma_unmap_single(priv->ndev->dev.parent, 531*492caffaSMoritz Fischer tx_skb->mapping, 532*492caffaSMoritz Fischer tx_skb->size, DMA_TO_DEVICE); 533*492caffaSMoritz Fischer drop: 534*492caffaSMoritz Fischer ndev->stats.tx_dropped++; 535*492caffaSMoritz Fischer return NETDEV_TX_OK; 536*492caffaSMoritz Fischer } 537*492caffaSMoritz Fischer 538*492caffaSMoritz Fischer static int nixge_recv(struct net_device *ndev, int budget) 539*492caffaSMoritz Fischer { 540*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 541*492caffaSMoritz Fischer struct sk_buff *skb, *new_skb; 542*492caffaSMoritz Fischer struct nixge_hw_dma_bd *cur_p; 543*492caffaSMoritz Fischer dma_addr_t tail_p = 0; 544*492caffaSMoritz Fischer u32 packets = 0; 545*492caffaSMoritz Fischer u32 length = 0; 546*492caffaSMoritz Fischer u32 size = 0; 547*492caffaSMoritz Fischer 548*492caffaSMoritz Fischer cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; 549*492caffaSMoritz Fischer 550*492caffaSMoritz Fischer while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK && 551*492caffaSMoritz Fischer budget > packets)) { 552*492caffaSMoritz Fischer tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * 553*492caffaSMoritz Fischer priv->rx_bd_ci; 554*492caffaSMoritz Fischer 555*492caffaSMoritz Fischer skb = (struct sk_buff *)(cur_p->sw_id_offset); 556*492caffaSMoritz Fischer 557*492caffaSMoritz Fischer length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 558*492caffaSMoritz Fischer if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) 559*492caffaSMoritz Fischer length = NIXGE_MAX_JUMBO_FRAME_SIZE; 560*492caffaSMoritz Fischer 561*492caffaSMoritz Fischer dma_unmap_single(ndev->dev.parent, cur_p->phys, 562*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE, 563*492caffaSMoritz Fischer DMA_FROM_DEVICE); 564*492caffaSMoritz Fischer 565*492caffaSMoritz Fischer skb_put(skb, length); 566*492caffaSMoritz Fischer 567*492caffaSMoritz Fischer skb->protocol = eth_type_trans(skb, ndev); 568*492caffaSMoritz Fischer skb_checksum_none_assert(skb); 569*492caffaSMoritz Fischer 570*492caffaSMoritz Fischer /* For now mark them as CHECKSUM_NONE since 571*492caffaSMoritz Fischer * we don't have offload capabilities 572*492caffaSMoritz Fischer */ 573*492caffaSMoritz Fischer skb->ip_summed = CHECKSUM_NONE; 574*492caffaSMoritz Fischer 575*492caffaSMoritz Fischer napi_gro_receive(&priv->napi, skb); 576*492caffaSMoritz Fischer 577*492caffaSMoritz Fischer size += length; 578*492caffaSMoritz Fischer packets++; 579*492caffaSMoritz Fischer 580*492caffaSMoritz Fischer new_skb = netdev_alloc_skb_ip_align(ndev, 581*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE); 582*492caffaSMoritz Fischer if (!new_skb) 583*492caffaSMoritz Fischer return packets; 584*492caffaSMoritz Fischer 585*492caffaSMoritz Fischer cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 586*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE, 587*492caffaSMoritz Fischer DMA_FROM_DEVICE); 588*492caffaSMoritz Fischer if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) { 589*492caffaSMoritz Fischer /* FIXME: bail out and clean up */ 590*492caffaSMoritz Fischer netdev_err(ndev, "Failed to map ...\n"); 591*492caffaSMoritz Fischer } 592*492caffaSMoritz Fischer cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; 593*492caffaSMoritz Fischer cur_p->status = 0; 594*492caffaSMoritz Fischer cur_p->sw_id_offset = (u32)new_skb; 595*492caffaSMoritz Fischer 596*492caffaSMoritz Fischer ++priv->rx_bd_ci; 597*492caffaSMoritz Fischer priv->rx_bd_ci %= RX_BD_NUM; 598*492caffaSMoritz Fischer cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; 599*492caffaSMoritz Fischer } 600*492caffaSMoritz Fischer 601*492caffaSMoritz Fischer ndev->stats.rx_packets += packets; 602*492caffaSMoritz Fischer ndev->stats.rx_bytes += size; 603*492caffaSMoritz Fischer 604*492caffaSMoritz Fischer if (tail_p) 605*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); 606*492caffaSMoritz Fischer 607*492caffaSMoritz Fischer return packets; 608*492caffaSMoritz Fischer } 609*492caffaSMoritz Fischer 610*492caffaSMoritz Fischer static int nixge_poll(struct napi_struct *napi, int budget) 611*492caffaSMoritz Fischer { 612*492caffaSMoritz Fischer struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi); 613*492caffaSMoritz Fischer int work_done; 614*492caffaSMoritz Fischer u32 status, cr; 615*492caffaSMoritz Fischer 616*492caffaSMoritz Fischer work_done = 0; 617*492caffaSMoritz Fischer 618*492caffaSMoritz Fischer work_done = nixge_recv(priv->ndev, budget); 619*492caffaSMoritz Fischer if (work_done < budget) { 620*492caffaSMoritz Fischer napi_complete_done(napi, work_done); 621*492caffaSMoritz Fischer status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); 622*492caffaSMoritz Fischer 623*492caffaSMoritz Fischer if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 624*492caffaSMoritz Fischer /* If there's more, reschedule, but clear */ 625*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 626*492caffaSMoritz Fischer napi_reschedule(napi); 627*492caffaSMoritz Fischer } else { 628*492caffaSMoritz Fischer /* if not, turn on RX IRQs again ... */ 629*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 630*492caffaSMoritz Fischer cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 631*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 632*492caffaSMoritz Fischer } 633*492caffaSMoritz Fischer } 634*492caffaSMoritz Fischer 635*492caffaSMoritz Fischer return work_done; 636*492caffaSMoritz Fischer } 637*492caffaSMoritz Fischer 638*492caffaSMoritz Fischer static irqreturn_t nixge_tx_irq(int irq, void *_ndev) 639*492caffaSMoritz Fischer { 640*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(_ndev); 641*492caffaSMoritz Fischer struct net_device *ndev = _ndev; 642*492caffaSMoritz Fischer unsigned int status; 643*492caffaSMoritz Fischer u32 cr; 644*492caffaSMoritz Fischer 645*492caffaSMoritz Fischer status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); 646*492caffaSMoritz Fischer if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 647*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); 648*492caffaSMoritz Fischer nixge_start_xmit_done(priv->ndev); 649*492caffaSMoritz Fischer goto out; 650*492caffaSMoritz Fischer } 651*492caffaSMoritz Fischer if (!(status & XAXIDMA_IRQ_ALL_MASK)) { 652*492caffaSMoritz Fischer netdev_err(ndev, "No interrupts asserted in Tx path\n"); 653*492caffaSMoritz Fischer return IRQ_NONE; 654*492caffaSMoritz Fischer } 655*492caffaSMoritz Fischer if (status & XAXIDMA_IRQ_ERROR_MASK) { 656*492caffaSMoritz Fischer netdev_err(ndev, "DMA Tx error 0x%x\n", status); 657*492caffaSMoritz Fischer netdev_err(ndev, "Current BD is at: 0x%x\n", 658*492caffaSMoritz Fischer (priv->tx_bd_v[priv->tx_bd_ci]).phys); 659*492caffaSMoritz Fischer 660*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 661*492caffaSMoritz Fischer /* Disable coalesce, delay timer and error interrupts */ 662*492caffaSMoritz Fischer cr &= (~XAXIDMA_IRQ_ALL_MASK); 663*492caffaSMoritz Fischer /* Write to the Tx channel control register */ 664*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 665*492caffaSMoritz Fischer 666*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 667*492caffaSMoritz Fischer /* Disable coalesce, delay timer and error interrupts */ 668*492caffaSMoritz Fischer cr &= (~XAXIDMA_IRQ_ALL_MASK); 669*492caffaSMoritz Fischer /* Write to the Rx channel control register */ 670*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 671*492caffaSMoritz Fischer 672*492caffaSMoritz Fischer tasklet_schedule(&priv->dma_err_tasklet); 673*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); 674*492caffaSMoritz Fischer } 675*492caffaSMoritz Fischer out: 676*492caffaSMoritz Fischer return IRQ_HANDLED; 677*492caffaSMoritz Fischer } 678*492caffaSMoritz Fischer 679*492caffaSMoritz Fischer static irqreturn_t nixge_rx_irq(int irq, void *_ndev) 680*492caffaSMoritz Fischer { 681*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(_ndev); 682*492caffaSMoritz Fischer struct net_device *ndev = _ndev; 683*492caffaSMoritz Fischer unsigned int status; 684*492caffaSMoritz Fischer u32 cr; 685*492caffaSMoritz Fischer 686*492caffaSMoritz Fischer status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); 687*492caffaSMoritz Fischer if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 688*492caffaSMoritz Fischer /* Turn of IRQs because NAPI */ 689*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 690*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 691*492caffaSMoritz Fischer cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 692*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 693*492caffaSMoritz Fischer 694*492caffaSMoritz Fischer if (napi_schedule_prep(&priv->napi)) 695*492caffaSMoritz Fischer __napi_schedule(&priv->napi); 696*492caffaSMoritz Fischer goto out; 697*492caffaSMoritz Fischer } 698*492caffaSMoritz Fischer if (!(status & XAXIDMA_IRQ_ALL_MASK)) { 699*492caffaSMoritz Fischer netdev_err(ndev, "No interrupts asserted in Rx path\n"); 700*492caffaSMoritz Fischer return IRQ_NONE; 701*492caffaSMoritz Fischer } 702*492caffaSMoritz Fischer if (status & XAXIDMA_IRQ_ERROR_MASK) { 703*492caffaSMoritz Fischer netdev_err(ndev, "DMA Rx error 0x%x\n", status); 704*492caffaSMoritz Fischer netdev_err(ndev, "Current BD is at: 0x%x\n", 705*492caffaSMoritz Fischer (priv->rx_bd_v[priv->rx_bd_ci]).phys); 706*492caffaSMoritz Fischer 707*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 708*492caffaSMoritz Fischer /* Disable coalesce, delay timer and error interrupts */ 709*492caffaSMoritz Fischer cr &= (~XAXIDMA_IRQ_ALL_MASK); 710*492caffaSMoritz Fischer /* Finally write to the Tx channel control register */ 711*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 712*492caffaSMoritz Fischer 713*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 714*492caffaSMoritz Fischer /* Disable coalesce, delay timer and error interrupts */ 715*492caffaSMoritz Fischer cr &= (~XAXIDMA_IRQ_ALL_MASK); 716*492caffaSMoritz Fischer /* write to the Rx channel control register */ 717*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 718*492caffaSMoritz Fischer 719*492caffaSMoritz Fischer tasklet_schedule(&priv->dma_err_tasklet); 720*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 721*492caffaSMoritz Fischer } 722*492caffaSMoritz Fischer out: 723*492caffaSMoritz Fischer return IRQ_HANDLED; 724*492caffaSMoritz Fischer } 725*492caffaSMoritz Fischer 726*492caffaSMoritz Fischer static void nixge_dma_err_handler(unsigned long data) 727*492caffaSMoritz Fischer { 728*492caffaSMoritz Fischer struct nixge_priv *lp = (struct nixge_priv *)data; 729*492caffaSMoritz Fischer struct nixge_hw_dma_bd *cur_p; 730*492caffaSMoritz Fischer struct nixge_tx_skb *tx_skb; 731*492caffaSMoritz Fischer u32 cr, i; 732*492caffaSMoritz Fischer 733*492caffaSMoritz Fischer __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 734*492caffaSMoritz Fischer __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 735*492caffaSMoritz Fischer 736*492caffaSMoritz Fischer for (i = 0; i < TX_BD_NUM; i++) { 737*492caffaSMoritz Fischer cur_p = &lp->tx_bd_v[i]; 738*492caffaSMoritz Fischer tx_skb = &lp->tx_skb[i]; 739*492caffaSMoritz Fischer nixge_tx_skb_unmap(lp, tx_skb); 740*492caffaSMoritz Fischer 741*492caffaSMoritz Fischer cur_p->phys = 0; 742*492caffaSMoritz Fischer cur_p->cntrl = 0; 743*492caffaSMoritz Fischer cur_p->status = 0; 744*492caffaSMoritz Fischer cur_p->app0 = 0; 745*492caffaSMoritz Fischer cur_p->app1 = 0; 746*492caffaSMoritz Fischer cur_p->app2 = 0; 747*492caffaSMoritz Fischer cur_p->app3 = 0; 748*492caffaSMoritz Fischer cur_p->app4 = 0; 749*492caffaSMoritz Fischer cur_p->sw_id_offset = 0; 750*492caffaSMoritz Fischer } 751*492caffaSMoritz Fischer 752*492caffaSMoritz Fischer for (i = 0; i < RX_BD_NUM; i++) { 753*492caffaSMoritz Fischer cur_p = &lp->rx_bd_v[i]; 754*492caffaSMoritz Fischer cur_p->status = 0; 755*492caffaSMoritz Fischer cur_p->app0 = 0; 756*492caffaSMoritz Fischer cur_p->app1 = 0; 757*492caffaSMoritz Fischer cur_p->app2 = 0; 758*492caffaSMoritz Fischer cur_p->app3 = 0; 759*492caffaSMoritz Fischer cur_p->app4 = 0; 760*492caffaSMoritz Fischer } 761*492caffaSMoritz Fischer 762*492caffaSMoritz Fischer lp->tx_bd_ci = 0; 763*492caffaSMoritz Fischer lp->tx_bd_tail = 0; 764*492caffaSMoritz Fischer lp->rx_bd_ci = 0; 765*492caffaSMoritz Fischer 766*492caffaSMoritz Fischer /* Start updating the Rx channel control register */ 767*492caffaSMoritz Fischer cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); 768*492caffaSMoritz Fischer /* Update the interrupt coalesce count */ 769*492caffaSMoritz Fischer cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 770*492caffaSMoritz Fischer (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 771*492caffaSMoritz Fischer /* Update the delay timer count */ 772*492caffaSMoritz Fischer cr = ((cr & ~XAXIDMA_DELAY_MASK) | 773*492caffaSMoritz Fischer (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 774*492caffaSMoritz Fischer /* Enable coalesce, delay timer and error interrupts */ 775*492caffaSMoritz Fischer cr |= XAXIDMA_IRQ_ALL_MASK; 776*492caffaSMoritz Fischer /* Finally write to the Rx channel control register */ 777*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr); 778*492caffaSMoritz Fischer 779*492caffaSMoritz Fischer /* Start updating the Tx channel control register */ 780*492caffaSMoritz Fischer cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); 781*492caffaSMoritz Fischer /* Update the interrupt coalesce count */ 782*492caffaSMoritz Fischer cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 783*492caffaSMoritz Fischer (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 784*492caffaSMoritz Fischer /* Update the delay timer count */ 785*492caffaSMoritz Fischer cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 786*492caffaSMoritz Fischer (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 787*492caffaSMoritz Fischer /* Enable coalesce, delay timer and error interrupts */ 788*492caffaSMoritz Fischer cr |= XAXIDMA_IRQ_ALL_MASK; 789*492caffaSMoritz Fischer /* Finally write to the Tx channel control register */ 790*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr); 791*492caffaSMoritz Fischer 792*492caffaSMoritz Fischer /* Populate the tail pointer and bring the Rx Axi DMA engine out of 793*492caffaSMoritz Fischer * halted state. This will make the Rx side ready for reception. 794*492caffaSMoritz Fischer */ 795*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 796*492caffaSMoritz Fischer cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); 797*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, 798*492caffaSMoritz Fischer cr | XAXIDMA_CR_RUNSTOP_MASK); 799*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 800*492caffaSMoritz Fischer (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 801*492caffaSMoritz Fischer 802*492caffaSMoritz Fischer /* Write to the RS (Run-stop) bit in the Tx channel control register. 803*492caffaSMoritz Fischer * Tx channel is now ready to run. But only after we write to the 804*492caffaSMoritz Fischer * tail pointer register that the Tx channel will start transmitting 805*492caffaSMoritz Fischer */ 806*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 807*492caffaSMoritz Fischer cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); 808*492caffaSMoritz Fischer nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, 809*492caffaSMoritz Fischer cr | XAXIDMA_CR_RUNSTOP_MASK); 810*492caffaSMoritz Fischer } 811*492caffaSMoritz Fischer 812*492caffaSMoritz Fischer static int nixge_open(struct net_device *ndev) 813*492caffaSMoritz Fischer { 814*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 815*492caffaSMoritz Fischer struct phy_device *phy; 816*492caffaSMoritz Fischer int ret; 817*492caffaSMoritz Fischer 818*492caffaSMoritz Fischer nixge_device_reset(ndev); 819*492caffaSMoritz Fischer 820*492caffaSMoritz Fischer phy = of_phy_connect(ndev, priv->phy_node, 821*492caffaSMoritz Fischer &nixge_handle_link_change, 0, priv->phy_mode); 822*492caffaSMoritz Fischer if (!phy) 823*492caffaSMoritz Fischer return -ENODEV; 824*492caffaSMoritz Fischer 825*492caffaSMoritz Fischer phy_start(phy); 826*492caffaSMoritz Fischer 827*492caffaSMoritz Fischer /* Enable tasklets for Axi DMA error handling */ 828*492caffaSMoritz Fischer tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler, 829*492caffaSMoritz Fischer (unsigned long)priv); 830*492caffaSMoritz Fischer 831*492caffaSMoritz Fischer napi_enable(&priv->napi); 832*492caffaSMoritz Fischer 833*492caffaSMoritz Fischer /* Enable interrupts for Axi DMA Tx */ 834*492caffaSMoritz Fischer ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev); 835*492caffaSMoritz Fischer if (ret) 836*492caffaSMoritz Fischer goto err_tx_irq; 837*492caffaSMoritz Fischer /* Enable interrupts for Axi DMA Rx */ 838*492caffaSMoritz Fischer ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev); 839*492caffaSMoritz Fischer if (ret) 840*492caffaSMoritz Fischer goto err_rx_irq; 841*492caffaSMoritz Fischer 842*492caffaSMoritz Fischer netif_start_queue(ndev); 843*492caffaSMoritz Fischer 844*492caffaSMoritz Fischer return 0; 845*492caffaSMoritz Fischer 846*492caffaSMoritz Fischer err_rx_irq: 847*492caffaSMoritz Fischer free_irq(priv->tx_irq, ndev); 848*492caffaSMoritz Fischer err_tx_irq: 849*492caffaSMoritz Fischer phy_stop(phy); 850*492caffaSMoritz Fischer phy_disconnect(phy); 851*492caffaSMoritz Fischer tasklet_kill(&priv->dma_err_tasklet); 852*492caffaSMoritz Fischer netdev_err(ndev, "request_irq() failed\n"); 853*492caffaSMoritz Fischer return ret; 854*492caffaSMoritz Fischer } 855*492caffaSMoritz Fischer 856*492caffaSMoritz Fischer static int nixge_stop(struct net_device *ndev) 857*492caffaSMoritz Fischer { 858*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 859*492caffaSMoritz Fischer u32 cr; 860*492caffaSMoritz Fischer 861*492caffaSMoritz Fischer netif_stop_queue(ndev); 862*492caffaSMoritz Fischer napi_disable(&priv->napi); 863*492caffaSMoritz Fischer 864*492caffaSMoritz Fischer if (ndev->phydev) { 865*492caffaSMoritz Fischer phy_stop(ndev->phydev); 866*492caffaSMoritz Fischer phy_disconnect(ndev->phydev); 867*492caffaSMoritz Fischer } 868*492caffaSMoritz Fischer 869*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 870*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, 871*492caffaSMoritz Fischer cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 872*492caffaSMoritz Fischer cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 873*492caffaSMoritz Fischer nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, 874*492caffaSMoritz Fischer cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 875*492caffaSMoritz Fischer 876*492caffaSMoritz Fischer tasklet_kill(&priv->dma_err_tasklet); 877*492caffaSMoritz Fischer 878*492caffaSMoritz Fischer free_irq(priv->tx_irq, ndev); 879*492caffaSMoritz Fischer free_irq(priv->rx_irq, ndev); 880*492caffaSMoritz Fischer 881*492caffaSMoritz Fischer nixge_hw_dma_bd_release(ndev); 882*492caffaSMoritz Fischer 883*492caffaSMoritz Fischer return 0; 884*492caffaSMoritz Fischer } 885*492caffaSMoritz Fischer 886*492caffaSMoritz Fischer static int nixge_change_mtu(struct net_device *ndev, int new_mtu) 887*492caffaSMoritz Fischer { 888*492caffaSMoritz Fischer if (netif_running(ndev)) 889*492caffaSMoritz Fischer return -EBUSY; 890*492caffaSMoritz Fischer 891*492caffaSMoritz Fischer if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) > 892*492caffaSMoritz Fischer NIXGE_MAX_JUMBO_FRAME_SIZE) 893*492caffaSMoritz Fischer return -EINVAL; 894*492caffaSMoritz Fischer 895*492caffaSMoritz Fischer ndev->mtu = new_mtu; 896*492caffaSMoritz Fischer 897*492caffaSMoritz Fischer return 0; 898*492caffaSMoritz Fischer } 899*492caffaSMoritz Fischer 900*492caffaSMoritz Fischer static s32 __nixge_hw_set_mac_address(struct net_device *ndev) 901*492caffaSMoritz Fischer { 902*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 903*492caffaSMoritz Fischer 904*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB, 905*492caffaSMoritz Fischer (ndev->dev_addr[2]) << 24 | 906*492caffaSMoritz Fischer (ndev->dev_addr[3] << 16) | 907*492caffaSMoritz Fischer (ndev->dev_addr[4] << 8) | 908*492caffaSMoritz Fischer (ndev->dev_addr[5] << 0)); 909*492caffaSMoritz Fischer 910*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB, 911*492caffaSMoritz Fischer (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8))); 912*492caffaSMoritz Fischer 913*492caffaSMoritz Fischer return 0; 914*492caffaSMoritz Fischer } 915*492caffaSMoritz Fischer 916*492caffaSMoritz Fischer static int nixge_net_set_mac_address(struct net_device *ndev, void *p) 917*492caffaSMoritz Fischer { 918*492caffaSMoritz Fischer int err; 919*492caffaSMoritz Fischer 920*492caffaSMoritz Fischer err = eth_mac_addr(ndev, p); 921*492caffaSMoritz Fischer if (!err) 922*492caffaSMoritz Fischer __nixge_hw_set_mac_address(ndev); 923*492caffaSMoritz Fischer 924*492caffaSMoritz Fischer return err; 925*492caffaSMoritz Fischer } 926*492caffaSMoritz Fischer 927*492caffaSMoritz Fischer static const struct net_device_ops nixge_netdev_ops = { 928*492caffaSMoritz Fischer .ndo_open = nixge_open, 929*492caffaSMoritz Fischer .ndo_stop = nixge_stop, 930*492caffaSMoritz Fischer .ndo_start_xmit = nixge_start_xmit, 931*492caffaSMoritz Fischer .ndo_change_mtu = nixge_change_mtu, 932*492caffaSMoritz Fischer .ndo_set_mac_address = nixge_net_set_mac_address, 933*492caffaSMoritz Fischer .ndo_validate_addr = eth_validate_addr, 934*492caffaSMoritz Fischer }; 935*492caffaSMoritz Fischer 936*492caffaSMoritz Fischer static void nixge_ethtools_get_drvinfo(struct net_device *ndev, 937*492caffaSMoritz Fischer struct ethtool_drvinfo *ed) 938*492caffaSMoritz Fischer { 939*492caffaSMoritz Fischer strlcpy(ed->driver, "nixge", sizeof(ed->driver)); 940*492caffaSMoritz Fischer strlcpy(ed->bus_info, "platform", sizeof(ed->driver)); 941*492caffaSMoritz Fischer } 942*492caffaSMoritz Fischer 943*492caffaSMoritz Fischer static int nixge_ethtools_get_coalesce(struct net_device *ndev, 944*492caffaSMoritz Fischer struct ethtool_coalesce *ecoalesce) 945*492caffaSMoritz Fischer { 946*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 947*492caffaSMoritz Fischer u32 regval = 0; 948*492caffaSMoritz Fischer 949*492caffaSMoritz Fischer regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 950*492caffaSMoritz Fischer ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 951*492caffaSMoritz Fischer >> XAXIDMA_COALESCE_SHIFT; 952*492caffaSMoritz Fischer regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 953*492caffaSMoritz Fischer ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 954*492caffaSMoritz Fischer >> XAXIDMA_COALESCE_SHIFT; 955*492caffaSMoritz Fischer return 0; 956*492caffaSMoritz Fischer } 957*492caffaSMoritz Fischer 958*492caffaSMoritz Fischer static int nixge_ethtools_set_coalesce(struct net_device *ndev, 959*492caffaSMoritz Fischer struct ethtool_coalesce *ecoalesce) 960*492caffaSMoritz Fischer { 961*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 962*492caffaSMoritz Fischer 963*492caffaSMoritz Fischer if (netif_running(ndev)) { 964*492caffaSMoritz Fischer netdev_err(ndev, 965*492caffaSMoritz Fischer "Please stop netif before applying configuration\n"); 966*492caffaSMoritz Fischer return -EBUSY; 967*492caffaSMoritz Fischer } 968*492caffaSMoritz Fischer 969*492caffaSMoritz Fischer if (ecoalesce->rx_coalesce_usecs || 970*492caffaSMoritz Fischer ecoalesce->rx_coalesce_usecs_irq || 971*492caffaSMoritz Fischer ecoalesce->rx_max_coalesced_frames_irq || 972*492caffaSMoritz Fischer ecoalesce->tx_coalesce_usecs || 973*492caffaSMoritz Fischer ecoalesce->tx_coalesce_usecs_irq || 974*492caffaSMoritz Fischer ecoalesce->tx_max_coalesced_frames_irq || 975*492caffaSMoritz Fischer ecoalesce->stats_block_coalesce_usecs || 976*492caffaSMoritz Fischer ecoalesce->use_adaptive_rx_coalesce || 977*492caffaSMoritz Fischer ecoalesce->use_adaptive_tx_coalesce || 978*492caffaSMoritz Fischer ecoalesce->pkt_rate_low || 979*492caffaSMoritz Fischer ecoalesce->rx_coalesce_usecs_low || 980*492caffaSMoritz Fischer ecoalesce->rx_max_coalesced_frames_low || 981*492caffaSMoritz Fischer ecoalesce->tx_coalesce_usecs_low || 982*492caffaSMoritz Fischer ecoalesce->tx_max_coalesced_frames_low || 983*492caffaSMoritz Fischer ecoalesce->pkt_rate_high || 984*492caffaSMoritz Fischer ecoalesce->rx_coalesce_usecs_high || 985*492caffaSMoritz Fischer ecoalesce->rx_max_coalesced_frames_high || 986*492caffaSMoritz Fischer ecoalesce->tx_coalesce_usecs_high || 987*492caffaSMoritz Fischer ecoalesce->tx_max_coalesced_frames_high || 988*492caffaSMoritz Fischer ecoalesce->rate_sample_interval) 989*492caffaSMoritz Fischer return -EOPNOTSUPP; 990*492caffaSMoritz Fischer if (ecoalesce->rx_max_coalesced_frames) 991*492caffaSMoritz Fischer priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 992*492caffaSMoritz Fischer if (ecoalesce->tx_max_coalesced_frames) 993*492caffaSMoritz Fischer priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 994*492caffaSMoritz Fischer 995*492caffaSMoritz Fischer return 0; 996*492caffaSMoritz Fischer } 997*492caffaSMoritz Fischer 998*492caffaSMoritz Fischer static int nixge_ethtools_set_phys_id(struct net_device *ndev, 999*492caffaSMoritz Fischer enum ethtool_phys_id_state state) 1000*492caffaSMoritz Fischer { 1001*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 1002*492caffaSMoritz Fischer u32 ctrl; 1003*492caffaSMoritz Fischer 1004*492caffaSMoritz Fischer ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL); 1005*492caffaSMoritz Fischer switch (state) { 1006*492caffaSMoritz Fischer case ETHTOOL_ID_ACTIVE: 1007*492caffaSMoritz Fischer ctrl |= NIXGE_ID_LED_CTL_EN; 1008*492caffaSMoritz Fischer /* Enable identification LED override*/ 1009*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1010*492caffaSMoritz Fischer return 2; 1011*492caffaSMoritz Fischer 1012*492caffaSMoritz Fischer case ETHTOOL_ID_ON: 1013*492caffaSMoritz Fischer ctrl |= NIXGE_ID_LED_CTL_VAL; 1014*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1015*492caffaSMoritz Fischer break; 1016*492caffaSMoritz Fischer 1017*492caffaSMoritz Fischer case ETHTOOL_ID_OFF: 1018*492caffaSMoritz Fischer ctrl &= ~NIXGE_ID_LED_CTL_VAL; 1019*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1020*492caffaSMoritz Fischer break; 1021*492caffaSMoritz Fischer 1022*492caffaSMoritz Fischer case ETHTOOL_ID_INACTIVE: 1023*492caffaSMoritz Fischer /* Restore LED settings */ 1024*492caffaSMoritz Fischer ctrl &= ~NIXGE_ID_LED_CTL_EN; 1025*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1026*492caffaSMoritz Fischer break; 1027*492caffaSMoritz Fischer } 1028*492caffaSMoritz Fischer 1029*492caffaSMoritz Fischer return 0; 1030*492caffaSMoritz Fischer } 1031*492caffaSMoritz Fischer 1032*492caffaSMoritz Fischer static const struct ethtool_ops nixge_ethtool_ops = { 1033*492caffaSMoritz Fischer .get_drvinfo = nixge_ethtools_get_drvinfo, 1034*492caffaSMoritz Fischer .get_coalesce = nixge_ethtools_get_coalesce, 1035*492caffaSMoritz Fischer .set_coalesce = nixge_ethtools_set_coalesce, 1036*492caffaSMoritz Fischer .set_phys_id = nixge_ethtools_set_phys_id, 1037*492caffaSMoritz Fischer .get_link_ksettings = phy_ethtool_get_link_ksettings, 1038*492caffaSMoritz Fischer .set_link_ksettings = phy_ethtool_set_link_ksettings, 1039*492caffaSMoritz Fischer .get_link = ethtool_op_get_link, 1040*492caffaSMoritz Fischer }; 1041*492caffaSMoritz Fischer 1042*492caffaSMoritz Fischer static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg) 1043*492caffaSMoritz Fischer { 1044*492caffaSMoritz Fischer struct nixge_priv *priv = bus->priv; 1045*492caffaSMoritz Fischer u32 status, tmp; 1046*492caffaSMoritz Fischer int err; 1047*492caffaSMoritz Fischer u16 device; 1048*492caffaSMoritz Fischer 1049*492caffaSMoritz Fischer if (reg & MII_ADDR_C45) { 1050*492caffaSMoritz Fischer device = (reg >> 16) & 0x1f; 1051*492caffaSMoritz Fischer 1052*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); 1053*492caffaSMoritz Fischer 1054*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) 1055*492caffaSMoritz Fischer | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1056*492caffaSMoritz Fischer 1057*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1058*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1059*492caffaSMoritz Fischer 1060*492caffaSMoritz Fischer err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1061*492caffaSMoritz Fischer !status, 10, 1000); 1062*492caffaSMoritz Fischer if (err) { 1063*492caffaSMoritz Fischer dev_err(priv->dev, "timeout setting address"); 1064*492caffaSMoritz Fischer return err; 1065*492caffaSMoritz Fischer } 1066*492caffaSMoritz Fischer 1067*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | 1068*492caffaSMoritz Fischer NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1069*492caffaSMoritz Fischer } else { 1070*492caffaSMoritz Fischer device = reg & 0x1f; 1071*492caffaSMoritz Fischer 1072*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | 1073*492caffaSMoritz Fischer NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1074*492caffaSMoritz Fischer } 1075*492caffaSMoritz Fischer 1076*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1077*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1078*492caffaSMoritz Fischer 1079*492caffaSMoritz Fischer err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1080*492caffaSMoritz Fischer !status, 10, 1000); 1081*492caffaSMoritz Fischer if (err) { 1082*492caffaSMoritz Fischer dev_err(priv->dev, "timeout setting read command"); 1083*492caffaSMoritz Fischer return err; 1084*492caffaSMoritz Fischer } 1085*492caffaSMoritz Fischer 1086*492caffaSMoritz Fischer status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); 1087*492caffaSMoritz Fischer 1088*492caffaSMoritz Fischer return status; 1089*492caffaSMoritz Fischer } 1090*492caffaSMoritz Fischer 1091*492caffaSMoritz Fischer static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) 1092*492caffaSMoritz Fischer { 1093*492caffaSMoritz Fischer struct nixge_priv *priv = bus->priv; 1094*492caffaSMoritz Fischer u32 status, tmp; 1095*492caffaSMoritz Fischer u16 device; 1096*492caffaSMoritz Fischer int err; 1097*492caffaSMoritz Fischer 1098*492caffaSMoritz Fischer if (reg & MII_ADDR_C45) { 1099*492caffaSMoritz Fischer device = (reg >> 16) & 0x1f; 1100*492caffaSMoritz Fischer 1101*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); 1102*492caffaSMoritz Fischer 1103*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) 1104*492caffaSMoritz Fischer | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1105*492caffaSMoritz Fischer 1106*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1107*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1108*492caffaSMoritz Fischer 1109*492caffaSMoritz Fischer err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1110*492caffaSMoritz Fischer !status, 10, 1000); 1111*492caffaSMoritz Fischer if (err) { 1112*492caffaSMoritz Fischer dev_err(priv->dev, "timeout setting address"); 1113*492caffaSMoritz Fischer return err; 1114*492caffaSMoritz Fischer } 1115*492caffaSMoritz Fischer 1116*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) 1117*492caffaSMoritz Fischer | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1118*492caffaSMoritz Fischer 1119*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); 1120*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1121*492caffaSMoritz Fischer err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1122*492caffaSMoritz Fischer !status, 10, 1000); 1123*492caffaSMoritz Fischer if (err) 1124*492caffaSMoritz Fischer dev_err(priv->dev, "timeout setting write command"); 1125*492caffaSMoritz Fischer } else { 1126*492caffaSMoritz Fischer device = reg & 0x1f; 1127*492caffaSMoritz Fischer 1128*492caffaSMoritz Fischer tmp = NIXGE_MDIO_CLAUSE22 | 1129*492caffaSMoritz Fischer NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | 1130*492caffaSMoritz Fischer NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1131*492caffaSMoritz Fischer 1132*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); 1133*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1134*492caffaSMoritz Fischer nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1135*492caffaSMoritz Fischer 1136*492caffaSMoritz Fischer err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1137*492caffaSMoritz Fischer !status, 10, 1000); 1138*492caffaSMoritz Fischer if (err) 1139*492caffaSMoritz Fischer dev_err(priv->dev, "timeout setting write command"); 1140*492caffaSMoritz Fischer } 1141*492caffaSMoritz Fischer 1142*492caffaSMoritz Fischer return err; 1143*492caffaSMoritz Fischer } 1144*492caffaSMoritz Fischer 1145*492caffaSMoritz Fischer static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np) 1146*492caffaSMoritz Fischer { 1147*492caffaSMoritz Fischer struct mii_bus *bus; 1148*492caffaSMoritz Fischer 1149*492caffaSMoritz Fischer bus = devm_mdiobus_alloc(priv->dev); 1150*492caffaSMoritz Fischer if (!bus) 1151*492caffaSMoritz Fischer return -ENOMEM; 1152*492caffaSMoritz Fischer 1153*492caffaSMoritz Fischer snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); 1154*492caffaSMoritz Fischer bus->priv = priv; 1155*492caffaSMoritz Fischer bus->name = "nixge_mii_bus"; 1156*492caffaSMoritz Fischer bus->read = nixge_mdio_read; 1157*492caffaSMoritz Fischer bus->write = nixge_mdio_write; 1158*492caffaSMoritz Fischer bus->parent = priv->dev; 1159*492caffaSMoritz Fischer 1160*492caffaSMoritz Fischer priv->mii_bus = bus; 1161*492caffaSMoritz Fischer 1162*492caffaSMoritz Fischer return of_mdiobus_register(bus, np); 1163*492caffaSMoritz Fischer } 1164*492caffaSMoritz Fischer 1165*492caffaSMoritz Fischer static void *nixge_get_nvmem_address(struct device *dev) 1166*492caffaSMoritz Fischer { 1167*492caffaSMoritz Fischer struct nvmem_cell *cell; 1168*492caffaSMoritz Fischer size_t cell_size; 1169*492caffaSMoritz Fischer char *mac; 1170*492caffaSMoritz Fischer 1171*492caffaSMoritz Fischer cell = nvmem_cell_get(dev, "address"); 1172*492caffaSMoritz Fischer if (IS_ERR(cell)) 1173*492caffaSMoritz Fischer return cell; 1174*492caffaSMoritz Fischer 1175*492caffaSMoritz Fischer mac = nvmem_cell_read(cell, &cell_size); 1176*492caffaSMoritz Fischer nvmem_cell_put(cell); 1177*492caffaSMoritz Fischer 1178*492caffaSMoritz Fischer return mac; 1179*492caffaSMoritz Fischer } 1180*492caffaSMoritz Fischer 1181*492caffaSMoritz Fischer static int nixge_probe(struct platform_device *pdev) 1182*492caffaSMoritz Fischer { 1183*492caffaSMoritz Fischer struct nixge_priv *priv; 1184*492caffaSMoritz Fischer struct net_device *ndev; 1185*492caffaSMoritz Fischer struct resource *dmares; 1186*492caffaSMoritz Fischer const char *mac_addr; 1187*492caffaSMoritz Fischer int err; 1188*492caffaSMoritz Fischer 1189*492caffaSMoritz Fischer ndev = alloc_etherdev(sizeof(*priv)); 1190*492caffaSMoritz Fischer if (!ndev) 1191*492caffaSMoritz Fischer return -ENOMEM; 1192*492caffaSMoritz Fischer 1193*492caffaSMoritz Fischer platform_set_drvdata(pdev, ndev); 1194*492caffaSMoritz Fischer SET_NETDEV_DEV(ndev, &pdev->dev); 1195*492caffaSMoritz Fischer 1196*492caffaSMoritz Fischer ndev->features = NETIF_F_SG; 1197*492caffaSMoritz Fischer ndev->netdev_ops = &nixge_netdev_ops; 1198*492caffaSMoritz Fischer ndev->ethtool_ops = &nixge_ethtool_ops; 1199*492caffaSMoritz Fischer 1200*492caffaSMoritz Fischer /* MTU range: 64 - 9000 */ 1201*492caffaSMoritz Fischer ndev->min_mtu = 64; 1202*492caffaSMoritz Fischer ndev->max_mtu = NIXGE_JUMBO_MTU; 1203*492caffaSMoritz Fischer 1204*492caffaSMoritz Fischer mac_addr = nixge_get_nvmem_address(&pdev->dev); 1205*492caffaSMoritz Fischer if (mac_addr && is_valid_ether_addr(mac_addr)) 1206*492caffaSMoritz Fischer ether_addr_copy(ndev->dev_addr, mac_addr); 1207*492caffaSMoritz Fischer else 1208*492caffaSMoritz Fischer eth_hw_addr_random(ndev); 1209*492caffaSMoritz Fischer 1210*492caffaSMoritz Fischer priv = netdev_priv(ndev); 1211*492caffaSMoritz Fischer priv->ndev = ndev; 1212*492caffaSMoritz Fischer priv->dev = &pdev->dev; 1213*492caffaSMoritz Fischer 1214*492caffaSMoritz Fischer netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); 1215*492caffaSMoritz Fischer 1216*492caffaSMoritz Fischer dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1217*492caffaSMoritz Fischer priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares); 1218*492caffaSMoritz Fischer if (IS_ERR(priv->dma_regs)) { 1219*492caffaSMoritz Fischer netdev_err(ndev, "failed to map dma regs\n"); 1220*492caffaSMoritz Fischer return PTR_ERR(priv->dma_regs); 1221*492caffaSMoritz Fischer } 1222*492caffaSMoritz Fischer priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; 1223*492caffaSMoritz Fischer __nixge_hw_set_mac_address(ndev); 1224*492caffaSMoritz Fischer 1225*492caffaSMoritz Fischer priv->tx_irq = platform_get_irq_byname(pdev, "tx"); 1226*492caffaSMoritz Fischer if (priv->tx_irq < 0) { 1227*492caffaSMoritz Fischer netdev_err(ndev, "could not find 'tx' irq"); 1228*492caffaSMoritz Fischer return priv->tx_irq; 1229*492caffaSMoritz Fischer } 1230*492caffaSMoritz Fischer 1231*492caffaSMoritz Fischer priv->rx_irq = platform_get_irq_byname(pdev, "rx"); 1232*492caffaSMoritz Fischer if (priv->rx_irq < 0) { 1233*492caffaSMoritz Fischer netdev_err(ndev, "could not find 'rx' irq"); 1234*492caffaSMoritz Fischer return priv->rx_irq; 1235*492caffaSMoritz Fischer } 1236*492caffaSMoritz Fischer 1237*492caffaSMoritz Fischer priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1238*492caffaSMoritz Fischer priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1239*492caffaSMoritz Fischer 1240*492caffaSMoritz Fischer err = nixge_mdio_setup(priv, pdev->dev.of_node); 1241*492caffaSMoritz Fischer if (err) { 1242*492caffaSMoritz Fischer netdev_err(ndev, "error registering mdio bus"); 1243*492caffaSMoritz Fischer goto free_netdev; 1244*492caffaSMoritz Fischer } 1245*492caffaSMoritz Fischer 1246*492caffaSMoritz Fischer priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); 1247*492caffaSMoritz Fischer if (priv->phy_mode < 0) { 1248*492caffaSMoritz Fischer netdev_err(ndev, "not find \"phy-mode\" property\n"); 1249*492caffaSMoritz Fischer err = -EINVAL; 1250*492caffaSMoritz Fischer goto unregister_mdio; 1251*492caffaSMoritz Fischer } 1252*492caffaSMoritz Fischer 1253*492caffaSMoritz Fischer priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1254*492caffaSMoritz Fischer if (!priv->phy_node) { 1255*492caffaSMoritz Fischer netdev_err(ndev, "not find \"phy-handle\" property\n"); 1256*492caffaSMoritz Fischer err = -EINVAL; 1257*492caffaSMoritz Fischer goto unregister_mdio; 1258*492caffaSMoritz Fischer } 1259*492caffaSMoritz Fischer 1260*492caffaSMoritz Fischer err = register_netdev(priv->ndev); 1261*492caffaSMoritz Fischer if (err) { 1262*492caffaSMoritz Fischer netdev_err(ndev, "register_netdev() error (%i)\n", err); 1263*492caffaSMoritz Fischer goto unregister_mdio; 1264*492caffaSMoritz Fischer } 1265*492caffaSMoritz Fischer 1266*492caffaSMoritz Fischer return 0; 1267*492caffaSMoritz Fischer 1268*492caffaSMoritz Fischer unregister_mdio: 1269*492caffaSMoritz Fischer mdiobus_unregister(priv->mii_bus); 1270*492caffaSMoritz Fischer 1271*492caffaSMoritz Fischer free_netdev: 1272*492caffaSMoritz Fischer free_netdev(ndev); 1273*492caffaSMoritz Fischer 1274*492caffaSMoritz Fischer return err; 1275*492caffaSMoritz Fischer } 1276*492caffaSMoritz Fischer 1277*492caffaSMoritz Fischer static int nixge_remove(struct platform_device *pdev) 1278*492caffaSMoritz Fischer { 1279*492caffaSMoritz Fischer struct net_device *ndev = platform_get_drvdata(pdev); 1280*492caffaSMoritz Fischer struct nixge_priv *priv = netdev_priv(ndev); 1281*492caffaSMoritz Fischer 1282*492caffaSMoritz Fischer unregister_netdev(ndev); 1283*492caffaSMoritz Fischer 1284*492caffaSMoritz Fischer mdiobus_unregister(priv->mii_bus); 1285*492caffaSMoritz Fischer 1286*492caffaSMoritz Fischer free_netdev(ndev); 1287*492caffaSMoritz Fischer 1288*492caffaSMoritz Fischer return 0; 1289*492caffaSMoritz Fischer } 1290*492caffaSMoritz Fischer 1291*492caffaSMoritz Fischer /* Match table for of_platform binding */ 1292*492caffaSMoritz Fischer static const struct of_device_id nixge_dt_ids[] = { 1293*492caffaSMoritz Fischer { .compatible = "ni,xge-enet-2.00", }, 1294*492caffaSMoritz Fischer {}, 1295*492caffaSMoritz Fischer }; 1296*492caffaSMoritz Fischer MODULE_DEVICE_TABLE(of, nixge_dt_ids); 1297*492caffaSMoritz Fischer 1298*492caffaSMoritz Fischer static struct platform_driver nixge_driver = { 1299*492caffaSMoritz Fischer .probe = nixge_probe, 1300*492caffaSMoritz Fischer .remove = nixge_remove, 1301*492caffaSMoritz Fischer .driver = { 1302*492caffaSMoritz Fischer .name = "nixge", 1303*492caffaSMoritz Fischer .of_match_table = of_match_ptr(nixge_dt_ids), 1304*492caffaSMoritz Fischer }, 1305*492caffaSMoritz Fischer }; 1306*492caffaSMoritz Fischer module_platform_driver(nixge_driver); 1307*492caffaSMoritz Fischer 1308*492caffaSMoritz Fischer MODULE_LICENSE("GPL v2"); 1309*492caffaSMoritz Fischer MODULE_DESCRIPTION("National Instruments XGE Management MAC"); 1310*492caffaSMoritz Fischer MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>"); 1311