1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2016-2017, National Instruments Corp. 3 * 4 * Author: Moritz Fischer <mdf@kernel.org> 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/module.h> 9 #include <linux/netdevice.h> 10 #include <linux/of_address.h> 11 #include <linux/of_mdio.h> 12 #include <linux/of_net.h> 13 #include <linux/of_platform.h> 14 #include <linux/of_irq.h> 15 #include <linux/skbuff.h> 16 #include <linux/phy.h> 17 #include <linux/mii.h> 18 #include <linux/nvmem-consumer.h> 19 #include <linux/ethtool.h> 20 #include <linux/iopoll.h> 21 22 #define TX_BD_NUM 64 23 #define RX_BD_NUM 128 24 25 /* Axi DMA Register definitions */ 26 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */ 27 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */ 28 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */ 29 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */ 30 31 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */ 32 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */ 33 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */ 34 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */ 35 36 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */ 37 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */ 38 39 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ 40 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ 41 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ 42 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ 43 44 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ 45 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ 46 47 #define XAXIDMA_DELAY_SHIFT 24 48 #define XAXIDMA_COALESCE_SHIFT 16 49 50 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ 51 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ 52 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ 53 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ 54 55 /* Default TX/RX Threshold and waitbound values for SGDMA mode */ 56 #define XAXIDMA_DFT_TX_THRESHOLD 24 57 #define XAXIDMA_DFT_TX_WAITBOUND 254 58 #define XAXIDMA_DFT_RX_THRESHOLD 24 59 #define XAXIDMA_DFT_RX_WAITBOUND 254 60 61 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ 62 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ 63 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ 64 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ 65 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ 66 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ 67 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ 68 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ 69 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ 70 71 #define NIXGE_REG_CTRL_OFFSET 0x4000 72 #define NIXGE_REG_INFO 0x00 73 #define NIXGE_REG_MAC_CTL 0x04 74 #define NIXGE_REG_PHY_CTL 0x08 75 #define NIXGE_REG_LED_CTL 0x0c 76 #define NIXGE_REG_MDIO_DATA 0x10 77 #define NIXGE_REG_MDIO_ADDR 0x14 78 #define NIXGE_REG_MDIO_OP 0x18 79 #define NIXGE_REG_MDIO_CTRL 0x1c 80 81 #define NIXGE_ID_LED_CTL_EN BIT(0) 82 #define NIXGE_ID_LED_CTL_VAL BIT(1) 83 84 #define NIXGE_MDIO_CLAUSE45 BIT(12) 85 #define NIXGE_MDIO_CLAUSE22 0 86 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10) 87 #define NIXGE_MDIO_OP_ADDRESS 0 88 #define NIXGE_MDIO_C45_WRITE BIT(0) 89 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0)) 90 #define NIXGE_MDIO_C22_WRITE BIT(0) 91 #define NIXGE_MDIO_C22_READ BIT(1) 92 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5) 93 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0) 94 95 #define NIXGE_REG_MAC_LSB 0x1000 96 #define NIXGE_REG_MAC_MSB 0x1004 97 98 /* Packet size info */ 99 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */ 100 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ 101 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */ 102 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ 103 104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) 105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \ 106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) 107 108 struct nixge_hw_dma_bd { 109 u32 next_lo; 110 u32 next_hi; 111 u32 phys_lo; 112 u32 phys_hi; 113 u32 reserved3; 114 u32 reserved4; 115 u32 cntrl; 116 u32 status; 117 u32 app0; 118 u32 app1; 119 u32 app2; 120 u32 app3; 121 u32 app4; 122 u32 sw_id_offset_lo; 123 u32 sw_id_offset_hi; 124 u32 reserved6; 125 }; 126 127 #ifdef CONFIG_PHYS_ADDR_T_64BIT 128 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \ 129 do { \ 130 (bd)->field##_lo = lower_32_bits((addr)); \ 131 (bd)->field##_hi = upper_32_bits((addr)); \ 132 } while (0) 133 #else 134 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \ 135 ((bd)->field##_lo = lower_32_bits((addr))) 136 #endif 137 138 #define nixge_hw_dma_bd_set_phys(bd, addr) \ 139 nixge_hw_dma_bd_set_addr((bd), phys, (addr)) 140 141 #define nixge_hw_dma_bd_set_next(bd, addr) \ 142 nixge_hw_dma_bd_set_addr((bd), next, (addr)) 143 144 #define nixge_hw_dma_bd_set_offset(bd, addr) \ 145 nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr)) 146 147 #ifdef CONFIG_PHYS_ADDR_T_64BIT 148 #define nixge_hw_dma_bd_get_addr(bd, field) \ 149 (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo)) 150 #else 151 #define nixge_hw_dma_bd_get_addr(bd, field) \ 152 (dma_addr_t)((bd)->field##_lo) 153 #endif 154 155 struct nixge_tx_skb { 156 struct sk_buff *skb; 157 dma_addr_t mapping; 158 size_t size; 159 bool mapped_as_page; 160 }; 161 162 struct nixge_priv { 163 struct net_device *ndev; 164 struct napi_struct napi; 165 struct device *dev; 166 167 /* Connection to PHY device */ 168 struct device_node *phy_node; 169 phy_interface_t phy_mode; 170 171 int link; 172 unsigned int speed; 173 unsigned int duplex; 174 175 /* MDIO bus data */ 176 struct mii_bus *mii_bus; /* MII bus reference */ 177 178 /* IO registers, dma functions and IRQs */ 179 void __iomem *ctrl_regs; 180 void __iomem *dma_regs; 181 182 struct tasklet_struct dma_err_tasklet; 183 184 int tx_irq; 185 int rx_irq; 186 187 /* Buffer descriptors */ 188 struct nixge_hw_dma_bd *tx_bd_v; 189 struct nixge_tx_skb *tx_skb; 190 dma_addr_t tx_bd_p; 191 192 struct nixge_hw_dma_bd *rx_bd_v; 193 dma_addr_t rx_bd_p; 194 u32 tx_bd_ci; 195 u32 tx_bd_tail; 196 u32 rx_bd_ci; 197 198 u32 coalesce_count_rx; 199 u32 coalesce_count_tx; 200 }; 201 202 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) 203 { 204 writel(val, priv->dma_regs + offset); 205 } 206 207 static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset, 208 dma_addr_t addr) 209 { 210 writel(lower_32_bits(addr), priv->dma_regs + offset); 211 #ifdef CONFIG_PHYS_ADDR_T_64BIT 212 writel(upper_32_bits(addr), priv->dma_regs + offset + 4); 213 #endif 214 } 215 216 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) 217 { 218 return readl(priv->dma_regs + offset); 219 } 220 221 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val) 222 { 223 writel(val, priv->ctrl_regs + offset); 224 } 225 226 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) 227 { 228 return readl(priv->ctrl_regs + offset); 229 } 230 231 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ 232 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \ 233 (sleep_us), (timeout_us)) 234 235 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ 236 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \ 237 (sleep_us), (timeout_us)) 238 239 static void nixge_hw_dma_bd_release(struct net_device *ndev) 240 { 241 struct nixge_priv *priv = netdev_priv(ndev); 242 dma_addr_t phys_addr; 243 struct sk_buff *skb; 244 int i; 245 246 for (i = 0; i < RX_BD_NUM; i++) { 247 phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], 248 phys); 249 250 dma_unmap_single(ndev->dev.parent, phys_addr, 251 NIXGE_MAX_JUMBO_FRAME_SIZE, 252 DMA_FROM_DEVICE); 253 254 skb = (struct sk_buff *)(uintptr_t) 255 nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], 256 sw_id_offset); 257 dev_kfree_skb(skb); 258 } 259 260 if (priv->rx_bd_v) 261 dma_free_coherent(ndev->dev.parent, 262 sizeof(*priv->rx_bd_v) * RX_BD_NUM, 263 priv->rx_bd_v, 264 priv->rx_bd_p); 265 266 if (priv->tx_skb) 267 devm_kfree(ndev->dev.parent, priv->tx_skb); 268 269 if (priv->tx_bd_v) 270 dma_free_coherent(ndev->dev.parent, 271 sizeof(*priv->tx_bd_v) * TX_BD_NUM, 272 priv->tx_bd_v, 273 priv->tx_bd_p); 274 } 275 276 static int nixge_hw_dma_bd_init(struct net_device *ndev) 277 { 278 struct nixge_priv *priv = netdev_priv(ndev); 279 struct sk_buff *skb; 280 dma_addr_t phys; 281 u32 cr; 282 int i; 283 284 /* Reset the indexes which are used for accessing the BDs */ 285 priv->tx_bd_ci = 0; 286 priv->tx_bd_tail = 0; 287 priv->rx_bd_ci = 0; 288 289 /* Allocate the Tx and Rx buffer descriptors. */ 290 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 291 sizeof(*priv->tx_bd_v) * TX_BD_NUM, 292 &priv->tx_bd_p, GFP_KERNEL); 293 if (!priv->tx_bd_v) 294 goto out; 295 296 priv->tx_skb = devm_kcalloc(ndev->dev.parent, 297 TX_BD_NUM, sizeof(*priv->tx_skb), 298 GFP_KERNEL); 299 if (!priv->tx_skb) 300 goto out; 301 302 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 303 sizeof(*priv->rx_bd_v) * RX_BD_NUM, 304 &priv->rx_bd_p, GFP_KERNEL); 305 if (!priv->rx_bd_v) 306 goto out; 307 308 for (i = 0; i < TX_BD_NUM; i++) { 309 nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i], 310 priv->tx_bd_p + 311 sizeof(*priv->tx_bd_v) * 312 ((i + 1) % TX_BD_NUM)); 313 } 314 315 for (i = 0; i < RX_BD_NUM; i++) { 316 nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i], 317 priv->rx_bd_p 318 + sizeof(*priv->rx_bd_v) * 319 ((i + 1) % RX_BD_NUM)); 320 321 skb = netdev_alloc_skb_ip_align(ndev, 322 NIXGE_MAX_JUMBO_FRAME_SIZE); 323 if (!skb) 324 goto out; 325 326 nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb); 327 phys = dma_map_single(ndev->dev.parent, skb->data, 328 NIXGE_MAX_JUMBO_FRAME_SIZE, 329 DMA_FROM_DEVICE); 330 331 nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys); 332 333 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; 334 } 335 336 /* Start updating the Rx channel control register */ 337 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 338 /* Update the interrupt coalesce count */ 339 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 340 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 341 /* Update the delay timer count */ 342 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 343 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 344 /* Enable coalesce, delay timer and error interrupts */ 345 cr |= XAXIDMA_IRQ_ALL_MASK; 346 /* Write to the Rx channel control register */ 347 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 348 349 /* Start updating the Tx channel control register */ 350 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 351 /* Update the interrupt coalesce count */ 352 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 353 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 354 /* Update the delay timer count */ 355 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 356 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 357 /* Enable coalesce, delay timer and error interrupts */ 358 cr |= XAXIDMA_IRQ_ALL_MASK; 359 /* Write to the Tx channel control register */ 360 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 361 362 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 363 * halted state. This will make the Rx side ready for reception. 364 */ 365 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); 366 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 367 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, 368 cr | XAXIDMA_CR_RUNSTOP_MASK); 369 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + 370 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); 371 372 /* Write to the RS (Run-stop) bit in the Tx channel control register. 373 * Tx channel is now ready to run. But only after we write to the 374 * tail pointer register that the Tx channel will start transmitting. 375 */ 376 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); 377 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 378 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, 379 cr | XAXIDMA_CR_RUNSTOP_MASK); 380 381 return 0; 382 out: 383 nixge_hw_dma_bd_release(ndev); 384 return -ENOMEM; 385 } 386 387 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset) 388 { 389 u32 status; 390 int err; 391 392 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well. 393 * The reset process of Axi DMA takes a while to complete as all 394 * pending commands/transfers will be flushed or completed during 395 * this reset process. 396 */ 397 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK); 398 err = nixge_dma_poll_timeout(priv, offset, status, 399 !(status & XAXIDMA_CR_RESET_MASK), 10, 400 1000); 401 if (err) 402 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__); 403 } 404 405 static void nixge_device_reset(struct net_device *ndev) 406 { 407 struct nixge_priv *priv = netdev_priv(ndev); 408 409 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET); 410 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET); 411 412 if (nixge_hw_dma_bd_init(ndev)) 413 netdev_err(ndev, "%s: descriptor allocation failed\n", 414 __func__); 415 416 netif_trans_update(ndev); 417 } 418 419 static void nixge_handle_link_change(struct net_device *ndev) 420 { 421 struct nixge_priv *priv = netdev_priv(ndev); 422 struct phy_device *phydev = ndev->phydev; 423 424 if (phydev->link != priv->link || phydev->speed != priv->speed || 425 phydev->duplex != priv->duplex) { 426 priv->link = phydev->link; 427 priv->speed = phydev->speed; 428 priv->duplex = phydev->duplex; 429 phy_print_status(phydev); 430 } 431 } 432 433 static void nixge_tx_skb_unmap(struct nixge_priv *priv, 434 struct nixge_tx_skb *tx_skb) 435 { 436 if (tx_skb->mapping) { 437 if (tx_skb->mapped_as_page) 438 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping, 439 tx_skb->size, DMA_TO_DEVICE); 440 else 441 dma_unmap_single(priv->ndev->dev.parent, 442 tx_skb->mapping, 443 tx_skb->size, DMA_TO_DEVICE); 444 tx_skb->mapping = 0; 445 } 446 447 if (tx_skb->skb) { 448 dev_kfree_skb_any(tx_skb->skb); 449 tx_skb->skb = NULL; 450 } 451 } 452 453 static void nixge_start_xmit_done(struct net_device *ndev) 454 { 455 struct nixge_priv *priv = netdev_priv(ndev); 456 struct nixge_hw_dma_bd *cur_p; 457 struct nixge_tx_skb *tx_skb; 458 unsigned int status = 0; 459 u32 packets = 0; 460 u32 size = 0; 461 462 cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; 463 tx_skb = &priv->tx_skb[priv->tx_bd_ci]; 464 465 status = cur_p->status; 466 467 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 468 nixge_tx_skb_unmap(priv, tx_skb); 469 cur_p->status = 0; 470 471 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 472 packets++; 473 474 ++priv->tx_bd_ci; 475 priv->tx_bd_ci %= TX_BD_NUM; 476 cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; 477 tx_skb = &priv->tx_skb[priv->tx_bd_ci]; 478 status = cur_p->status; 479 } 480 481 ndev->stats.tx_packets += packets; 482 ndev->stats.tx_bytes += size; 483 484 if (packets) 485 netif_wake_queue(ndev); 486 } 487 488 static int nixge_check_tx_bd_space(struct nixge_priv *priv, 489 int num_frag) 490 { 491 struct nixge_hw_dma_bd *cur_p; 492 493 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM]; 494 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 495 return NETDEV_TX_BUSY; 496 return 0; 497 } 498 499 static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) 500 { 501 struct nixge_priv *priv = netdev_priv(ndev); 502 struct nixge_hw_dma_bd *cur_p; 503 struct nixge_tx_skb *tx_skb; 504 dma_addr_t tail_p, cur_phys; 505 skb_frag_t *frag; 506 u32 num_frag; 507 u32 ii; 508 509 num_frag = skb_shinfo(skb)->nr_frags; 510 cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 511 tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 512 513 if (nixge_check_tx_bd_space(priv, num_frag)) { 514 if (!netif_queue_stopped(ndev)) 515 netif_stop_queue(ndev); 516 return NETDEV_TX_OK; 517 } 518 519 cur_phys = dma_map_single(ndev->dev.parent, skb->data, 520 skb_headlen(skb), DMA_TO_DEVICE); 521 if (dma_mapping_error(ndev->dev.parent, cur_phys)) 522 goto drop; 523 nixge_hw_dma_bd_set_phys(cur_p, cur_phys); 524 525 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 526 527 tx_skb->skb = NULL; 528 tx_skb->mapping = cur_phys; 529 tx_skb->size = skb_headlen(skb); 530 tx_skb->mapped_as_page = false; 531 532 for (ii = 0; ii < num_frag; ii++) { 533 ++priv->tx_bd_tail; 534 priv->tx_bd_tail %= TX_BD_NUM; 535 cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 536 tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 537 frag = &skb_shinfo(skb)->frags[ii]; 538 539 cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, 540 skb_frag_size(frag), 541 DMA_TO_DEVICE); 542 if (dma_mapping_error(ndev->dev.parent, cur_phys)) 543 goto frag_err; 544 nixge_hw_dma_bd_set_phys(cur_p, cur_phys); 545 546 cur_p->cntrl = skb_frag_size(frag); 547 548 tx_skb->skb = NULL; 549 tx_skb->mapping = cur_phys; 550 tx_skb->size = skb_frag_size(frag); 551 tx_skb->mapped_as_page = true; 552 } 553 554 /* last buffer of the frame */ 555 tx_skb->skb = skb; 556 557 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 558 559 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; 560 /* Start the transfer */ 561 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); 562 ++priv->tx_bd_tail; 563 priv->tx_bd_tail %= TX_BD_NUM; 564 565 return NETDEV_TX_OK; 566 frag_err: 567 for (; ii > 0; ii--) { 568 if (priv->tx_bd_tail) 569 priv->tx_bd_tail--; 570 else 571 priv->tx_bd_tail = TX_BD_NUM - 1; 572 573 tx_skb = &priv->tx_skb[priv->tx_bd_tail]; 574 nixge_tx_skb_unmap(priv, tx_skb); 575 576 cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; 577 cur_p->status = 0; 578 } 579 dma_unmap_single(priv->ndev->dev.parent, 580 tx_skb->mapping, 581 tx_skb->size, DMA_TO_DEVICE); 582 drop: 583 ndev->stats.tx_dropped++; 584 return NETDEV_TX_OK; 585 } 586 587 static int nixge_recv(struct net_device *ndev, int budget) 588 { 589 struct nixge_priv *priv = netdev_priv(ndev); 590 struct sk_buff *skb, *new_skb; 591 struct nixge_hw_dma_bd *cur_p; 592 dma_addr_t tail_p = 0, cur_phys = 0; 593 u32 packets = 0; 594 u32 length = 0; 595 u32 size = 0; 596 597 cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; 598 599 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK && 600 budget > packets)) { 601 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * 602 priv->rx_bd_ci; 603 604 skb = (struct sk_buff *)(uintptr_t) 605 nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset); 606 607 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 608 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) 609 length = NIXGE_MAX_JUMBO_FRAME_SIZE; 610 611 dma_unmap_single(ndev->dev.parent, 612 nixge_hw_dma_bd_get_addr(cur_p, phys), 613 NIXGE_MAX_JUMBO_FRAME_SIZE, 614 DMA_FROM_DEVICE); 615 616 skb_put(skb, length); 617 618 skb->protocol = eth_type_trans(skb, ndev); 619 skb_checksum_none_assert(skb); 620 621 /* For now mark them as CHECKSUM_NONE since 622 * we don't have offload capabilities 623 */ 624 skb->ip_summed = CHECKSUM_NONE; 625 626 napi_gro_receive(&priv->napi, skb); 627 628 size += length; 629 packets++; 630 631 new_skb = netdev_alloc_skb_ip_align(ndev, 632 NIXGE_MAX_JUMBO_FRAME_SIZE); 633 if (!new_skb) 634 return packets; 635 636 cur_phys = dma_map_single(ndev->dev.parent, new_skb->data, 637 NIXGE_MAX_JUMBO_FRAME_SIZE, 638 DMA_FROM_DEVICE); 639 if (dma_mapping_error(ndev->dev.parent, cur_phys)) { 640 /* FIXME: bail out and clean up */ 641 netdev_err(ndev, "Failed to map ...\n"); 642 } 643 nixge_hw_dma_bd_set_phys(cur_p, cur_phys); 644 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; 645 cur_p->status = 0; 646 nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb); 647 648 ++priv->rx_bd_ci; 649 priv->rx_bd_ci %= RX_BD_NUM; 650 cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; 651 } 652 653 ndev->stats.rx_packets += packets; 654 ndev->stats.rx_bytes += size; 655 656 if (tail_p) 657 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); 658 659 return packets; 660 } 661 662 static int nixge_poll(struct napi_struct *napi, int budget) 663 { 664 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi); 665 int work_done; 666 u32 status, cr; 667 668 work_done = 0; 669 670 work_done = nixge_recv(priv->ndev, budget); 671 if (work_done < budget) { 672 napi_complete_done(napi, work_done); 673 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); 674 675 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 676 /* If there's more, reschedule, but clear */ 677 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 678 napi_reschedule(napi); 679 } else { 680 /* if not, turn on RX IRQs again ... */ 681 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 682 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 683 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 684 } 685 } 686 687 return work_done; 688 } 689 690 static irqreturn_t nixge_tx_irq(int irq, void *_ndev) 691 { 692 struct nixge_priv *priv = netdev_priv(_ndev); 693 struct net_device *ndev = _ndev; 694 unsigned int status; 695 dma_addr_t phys; 696 u32 cr; 697 698 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); 699 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 700 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); 701 nixge_start_xmit_done(priv->ndev); 702 goto out; 703 } 704 if (!(status & XAXIDMA_IRQ_ALL_MASK)) { 705 netdev_err(ndev, "No interrupts asserted in Tx path\n"); 706 return IRQ_NONE; 707 } 708 if (status & XAXIDMA_IRQ_ERROR_MASK) { 709 phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], 710 phys); 711 712 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 713 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); 714 715 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 716 /* Disable coalesce, delay timer and error interrupts */ 717 cr &= (~XAXIDMA_IRQ_ALL_MASK); 718 /* Write to the Tx channel control register */ 719 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 720 721 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 722 /* Disable coalesce, delay timer and error interrupts */ 723 cr &= (~XAXIDMA_IRQ_ALL_MASK); 724 /* Write to the Rx channel control register */ 725 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 726 727 tasklet_schedule(&priv->dma_err_tasklet); 728 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); 729 } 730 out: 731 return IRQ_HANDLED; 732 } 733 734 static irqreturn_t nixge_rx_irq(int irq, void *_ndev) 735 { 736 struct nixge_priv *priv = netdev_priv(_ndev); 737 struct net_device *ndev = _ndev; 738 unsigned int status; 739 dma_addr_t phys; 740 u32 cr; 741 742 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); 743 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 744 /* Turn of IRQs because NAPI */ 745 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 746 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 747 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 748 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 749 750 if (napi_schedule_prep(&priv->napi)) 751 __napi_schedule(&priv->napi); 752 goto out; 753 } 754 if (!(status & XAXIDMA_IRQ_ALL_MASK)) { 755 netdev_err(ndev, "No interrupts asserted in Rx path\n"); 756 return IRQ_NONE; 757 } 758 if (status & XAXIDMA_IRQ_ERROR_MASK) { 759 phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci], 760 phys); 761 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 762 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); 763 764 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 765 /* Disable coalesce, delay timer and error interrupts */ 766 cr &= (~XAXIDMA_IRQ_ALL_MASK); 767 /* Finally write to the Tx channel control register */ 768 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); 769 770 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 771 /* Disable coalesce, delay timer and error interrupts */ 772 cr &= (~XAXIDMA_IRQ_ALL_MASK); 773 /* write to the Rx channel control register */ 774 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); 775 776 tasklet_schedule(&priv->dma_err_tasklet); 777 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); 778 } 779 out: 780 return IRQ_HANDLED; 781 } 782 783 static void nixge_dma_err_handler(unsigned long data) 784 { 785 struct nixge_priv *lp = (struct nixge_priv *)data; 786 struct nixge_hw_dma_bd *cur_p; 787 struct nixge_tx_skb *tx_skb; 788 u32 cr, i; 789 790 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 791 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 792 793 for (i = 0; i < TX_BD_NUM; i++) { 794 cur_p = &lp->tx_bd_v[i]; 795 tx_skb = &lp->tx_skb[i]; 796 nixge_tx_skb_unmap(lp, tx_skb); 797 798 nixge_hw_dma_bd_set_phys(cur_p, 0); 799 cur_p->cntrl = 0; 800 cur_p->status = 0; 801 nixge_hw_dma_bd_set_offset(cur_p, 0); 802 } 803 804 for (i = 0; i < RX_BD_NUM; i++) { 805 cur_p = &lp->rx_bd_v[i]; 806 cur_p->status = 0; 807 } 808 809 lp->tx_bd_ci = 0; 810 lp->tx_bd_tail = 0; 811 lp->rx_bd_ci = 0; 812 813 /* Start updating the Rx channel control register */ 814 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); 815 /* Update the interrupt coalesce count */ 816 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 817 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 818 /* Update the delay timer count */ 819 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 820 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 821 /* Enable coalesce, delay timer and error interrupts */ 822 cr |= XAXIDMA_IRQ_ALL_MASK; 823 /* Finally write to the Rx channel control register */ 824 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr); 825 826 /* Start updating the Tx channel control register */ 827 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); 828 /* Update the interrupt coalesce count */ 829 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 830 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 831 /* Update the delay timer count */ 832 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 833 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 834 /* Enable coalesce, delay timer and error interrupts */ 835 cr |= XAXIDMA_IRQ_ALL_MASK; 836 /* Finally write to the Tx channel control register */ 837 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr); 838 839 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 840 * halted state. This will make the Rx side ready for reception. 841 */ 842 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 843 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); 844 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, 845 cr | XAXIDMA_CR_RUNSTOP_MASK); 846 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 847 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 848 849 /* Write to the RS (Run-stop) bit in the Tx channel control register. 850 * Tx channel is now ready to run. But only after we write to the 851 * tail pointer register that the Tx channel will start transmitting 852 */ 853 nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 854 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); 855 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, 856 cr | XAXIDMA_CR_RUNSTOP_MASK); 857 } 858 859 static int nixge_open(struct net_device *ndev) 860 { 861 struct nixge_priv *priv = netdev_priv(ndev); 862 struct phy_device *phy; 863 int ret; 864 865 nixge_device_reset(ndev); 866 867 phy = of_phy_connect(ndev, priv->phy_node, 868 &nixge_handle_link_change, 0, priv->phy_mode); 869 if (!phy) 870 return -ENODEV; 871 872 phy_start(phy); 873 874 /* Enable tasklets for Axi DMA error handling */ 875 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler, 876 (unsigned long)priv); 877 878 napi_enable(&priv->napi); 879 880 /* Enable interrupts for Axi DMA Tx */ 881 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev); 882 if (ret) 883 goto err_tx_irq; 884 /* Enable interrupts for Axi DMA Rx */ 885 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev); 886 if (ret) 887 goto err_rx_irq; 888 889 netif_start_queue(ndev); 890 891 return 0; 892 893 err_rx_irq: 894 free_irq(priv->tx_irq, ndev); 895 err_tx_irq: 896 phy_stop(phy); 897 phy_disconnect(phy); 898 tasklet_kill(&priv->dma_err_tasklet); 899 netdev_err(ndev, "request_irq() failed\n"); 900 return ret; 901 } 902 903 static int nixge_stop(struct net_device *ndev) 904 { 905 struct nixge_priv *priv = netdev_priv(ndev); 906 u32 cr; 907 908 netif_stop_queue(ndev); 909 napi_disable(&priv->napi); 910 911 if (ndev->phydev) { 912 phy_stop(ndev->phydev); 913 phy_disconnect(ndev->phydev); 914 } 915 916 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 917 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, 918 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 919 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 920 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, 921 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 922 923 tasklet_kill(&priv->dma_err_tasklet); 924 925 free_irq(priv->tx_irq, ndev); 926 free_irq(priv->rx_irq, ndev); 927 928 nixge_hw_dma_bd_release(ndev); 929 930 return 0; 931 } 932 933 static int nixge_change_mtu(struct net_device *ndev, int new_mtu) 934 { 935 if (netif_running(ndev)) 936 return -EBUSY; 937 938 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) > 939 NIXGE_MAX_JUMBO_FRAME_SIZE) 940 return -EINVAL; 941 942 ndev->mtu = new_mtu; 943 944 return 0; 945 } 946 947 static s32 __nixge_hw_set_mac_address(struct net_device *ndev) 948 { 949 struct nixge_priv *priv = netdev_priv(ndev); 950 951 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB, 952 (ndev->dev_addr[2]) << 24 | 953 (ndev->dev_addr[3] << 16) | 954 (ndev->dev_addr[4] << 8) | 955 (ndev->dev_addr[5] << 0)); 956 957 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB, 958 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8))); 959 960 return 0; 961 } 962 963 static int nixge_net_set_mac_address(struct net_device *ndev, void *p) 964 { 965 int err; 966 967 err = eth_mac_addr(ndev, p); 968 if (!err) 969 __nixge_hw_set_mac_address(ndev); 970 971 return err; 972 } 973 974 static const struct net_device_ops nixge_netdev_ops = { 975 .ndo_open = nixge_open, 976 .ndo_stop = nixge_stop, 977 .ndo_start_xmit = nixge_start_xmit, 978 .ndo_change_mtu = nixge_change_mtu, 979 .ndo_set_mac_address = nixge_net_set_mac_address, 980 .ndo_validate_addr = eth_validate_addr, 981 }; 982 983 static void nixge_ethtools_get_drvinfo(struct net_device *ndev, 984 struct ethtool_drvinfo *ed) 985 { 986 strlcpy(ed->driver, "nixge", sizeof(ed->driver)); 987 strlcpy(ed->bus_info, "platform", sizeof(ed->driver)); 988 } 989 990 static int nixge_ethtools_get_coalesce(struct net_device *ndev, 991 struct ethtool_coalesce *ecoalesce) 992 { 993 struct nixge_priv *priv = netdev_priv(ndev); 994 u32 regval = 0; 995 996 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); 997 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 998 >> XAXIDMA_COALESCE_SHIFT; 999 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); 1000 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1001 >> XAXIDMA_COALESCE_SHIFT; 1002 return 0; 1003 } 1004 1005 static int nixge_ethtools_set_coalesce(struct net_device *ndev, 1006 struct ethtool_coalesce *ecoalesce) 1007 { 1008 struct nixge_priv *priv = netdev_priv(ndev); 1009 1010 if (netif_running(ndev)) { 1011 netdev_err(ndev, 1012 "Please stop netif before applying configuration\n"); 1013 return -EBUSY; 1014 } 1015 1016 if (ecoalesce->rx_coalesce_usecs || 1017 ecoalesce->rx_coalesce_usecs_irq || 1018 ecoalesce->rx_max_coalesced_frames_irq || 1019 ecoalesce->tx_coalesce_usecs || 1020 ecoalesce->tx_coalesce_usecs_irq || 1021 ecoalesce->tx_max_coalesced_frames_irq || 1022 ecoalesce->stats_block_coalesce_usecs || 1023 ecoalesce->use_adaptive_rx_coalesce || 1024 ecoalesce->use_adaptive_tx_coalesce || 1025 ecoalesce->pkt_rate_low || 1026 ecoalesce->rx_coalesce_usecs_low || 1027 ecoalesce->rx_max_coalesced_frames_low || 1028 ecoalesce->tx_coalesce_usecs_low || 1029 ecoalesce->tx_max_coalesced_frames_low || 1030 ecoalesce->pkt_rate_high || 1031 ecoalesce->rx_coalesce_usecs_high || 1032 ecoalesce->rx_max_coalesced_frames_high || 1033 ecoalesce->tx_coalesce_usecs_high || 1034 ecoalesce->tx_max_coalesced_frames_high || 1035 ecoalesce->rate_sample_interval) 1036 return -EOPNOTSUPP; 1037 if (ecoalesce->rx_max_coalesced_frames) 1038 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1039 if (ecoalesce->tx_max_coalesced_frames) 1040 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1041 1042 return 0; 1043 } 1044 1045 static int nixge_ethtools_set_phys_id(struct net_device *ndev, 1046 enum ethtool_phys_id_state state) 1047 { 1048 struct nixge_priv *priv = netdev_priv(ndev); 1049 u32 ctrl; 1050 1051 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL); 1052 switch (state) { 1053 case ETHTOOL_ID_ACTIVE: 1054 ctrl |= NIXGE_ID_LED_CTL_EN; 1055 /* Enable identification LED override*/ 1056 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1057 return 2; 1058 1059 case ETHTOOL_ID_ON: 1060 ctrl |= NIXGE_ID_LED_CTL_VAL; 1061 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1062 break; 1063 1064 case ETHTOOL_ID_OFF: 1065 ctrl &= ~NIXGE_ID_LED_CTL_VAL; 1066 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1067 break; 1068 1069 case ETHTOOL_ID_INACTIVE: 1070 /* Restore LED settings */ 1071 ctrl &= ~NIXGE_ID_LED_CTL_EN; 1072 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); 1073 break; 1074 } 1075 1076 return 0; 1077 } 1078 1079 static const struct ethtool_ops nixge_ethtool_ops = { 1080 .get_drvinfo = nixge_ethtools_get_drvinfo, 1081 .get_coalesce = nixge_ethtools_get_coalesce, 1082 .set_coalesce = nixge_ethtools_set_coalesce, 1083 .set_phys_id = nixge_ethtools_set_phys_id, 1084 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1085 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1086 .get_link = ethtool_op_get_link, 1087 }; 1088 1089 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg) 1090 { 1091 struct nixge_priv *priv = bus->priv; 1092 u32 status, tmp; 1093 int err; 1094 u16 device; 1095 1096 if (reg & MII_ADDR_C45) { 1097 device = (reg >> 16) & 0x1f; 1098 1099 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); 1100 1101 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) 1102 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1103 1104 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1105 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1106 1107 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1108 !status, 10, 1000); 1109 if (err) { 1110 dev_err(priv->dev, "timeout setting address"); 1111 return err; 1112 } 1113 1114 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | 1115 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1116 } else { 1117 device = reg & 0x1f; 1118 1119 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | 1120 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1121 } 1122 1123 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1124 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1125 1126 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1127 !status, 10, 1000); 1128 if (err) { 1129 dev_err(priv->dev, "timeout setting read command"); 1130 return err; 1131 } 1132 1133 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); 1134 1135 return status; 1136 } 1137 1138 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) 1139 { 1140 struct nixge_priv *priv = bus->priv; 1141 u32 status, tmp; 1142 u16 device; 1143 int err; 1144 1145 if (reg & MII_ADDR_C45) { 1146 device = (reg >> 16) & 0x1f; 1147 1148 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); 1149 1150 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) 1151 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1152 1153 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1154 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1155 1156 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1157 !status, 10, 1000); 1158 if (err) { 1159 dev_err(priv->dev, "timeout setting address"); 1160 return err; 1161 } 1162 1163 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) 1164 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1165 1166 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); 1167 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1168 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1169 !status, 10, 1000); 1170 if (err) 1171 dev_err(priv->dev, "timeout setting write command"); 1172 } else { 1173 device = reg & 0x1f; 1174 1175 tmp = NIXGE_MDIO_CLAUSE22 | 1176 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | 1177 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); 1178 1179 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); 1180 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); 1181 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); 1182 1183 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, 1184 !status, 10, 1000); 1185 if (err) 1186 dev_err(priv->dev, "timeout setting write command"); 1187 } 1188 1189 return err; 1190 } 1191 1192 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np) 1193 { 1194 struct mii_bus *bus; 1195 1196 bus = devm_mdiobus_alloc(priv->dev); 1197 if (!bus) 1198 return -ENOMEM; 1199 1200 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); 1201 bus->priv = priv; 1202 bus->name = "nixge_mii_bus"; 1203 bus->read = nixge_mdio_read; 1204 bus->write = nixge_mdio_write; 1205 bus->parent = priv->dev; 1206 1207 priv->mii_bus = bus; 1208 1209 return of_mdiobus_register(bus, np); 1210 } 1211 1212 static void *nixge_get_nvmem_address(struct device *dev) 1213 { 1214 struct nvmem_cell *cell; 1215 size_t cell_size; 1216 char *mac; 1217 1218 cell = nvmem_cell_get(dev, "address"); 1219 if (IS_ERR(cell)) 1220 return NULL; 1221 1222 mac = nvmem_cell_read(cell, &cell_size); 1223 nvmem_cell_put(cell); 1224 1225 return mac; 1226 } 1227 1228 static int nixge_probe(struct platform_device *pdev) 1229 { 1230 struct nixge_priv *priv; 1231 struct net_device *ndev; 1232 struct resource *dmares; 1233 const u8 *mac_addr; 1234 int err; 1235 1236 ndev = alloc_etherdev(sizeof(*priv)); 1237 if (!ndev) 1238 return -ENOMEM; 1239 1240 platform_set_drvdata(pdev, ndev); 1241 SET_NETDEV_DEV(ndev, &pdev->dev); 1242 1243 ndev->features = NETIF_F_SG; 1244 ndev->netdev_ops = &nixge_netdev_ops; 1245 ndev->ethtool_ops = &nixge_ethtool_ops; 1246 1247 /* MTU range: 64 - 9000 */ 1248 ndev->min_mtu = 64; 1249 ndev->max_mtu = NIXGE_JUMBO_MTU; 1250 1251 mac_addr = nixge_get_nvmem_address(&pdev->dev); 1252 if (mac_addr && is_valid_ether_addr(mac_addr)) { 1253 ether_addr_copy(ndev->dev_addr, mac_addr); 1254 kfree(mac_addr); 1255 } else { 1256 eth_hw_addr_random(ndev); 1257 } 1258 1259 priv = netdev_priv(ndev); 1260 priv->ndev = ndev; 1261 priv->dev = &pdev->dev; 1262 1263 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); 1264 1265 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1266 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares); 1267 if (IS_ERR(priv->dma_regs)) { 1268 netdev_err(ndev, "failed to map dma regs\n"); 1269 return PTR_ERR(priv->dma_regs); 1270 } 1271 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; 1272 __nixge_hw_set_mac_address(ndev); 1273 1274 priv->tx_irq = platform_get_irq_byname(pdev, "tx"); 1275 if (priv->tx_irq < 0) { 1276 netdev_err(ndev, "could not find 'tx' irq"); 1277 return priv->tx_irq; 1278 } 1279 1280 priv->rx_irq = platform_get_irq_byname(pdev, "rx"); 1281 if (priv->rx_irq < 0) { 1282 netdev_err(ndev, "could not find 'rx' irq"); 1283 return priv->rx_irq; 1284 } 1285 1286 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1287 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1288 1289 err = nixge_mdio_setup(priv, pdev->dev.of_node); 1290 if (err) { 1291 netdev_err(ndev, "error registering mdio bus"); 1292 goto free_netdev; 1293 } 1294 1295 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); 1296 if (priv->phy_mode < 0) { 1297 netdev_err(ndev, "not find \"phy-mode\" property\n"); 1298 err = -EINVAL; 1299 goto unregister_mdio; 1300 } 1301 1302 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1303 if (!priv->phy_node) { 1304 netdev_err(ndev, "not find \"phy-handle\" property\n"); 1305 err = -EINVAL; 1306 goto unregister_mdio; 1307 } 1308 1309 err = register_netdev(priv->ndev); 1310 if (err) { 1311 netdev_err(ndev, "register_netdev() error (%i)\n", err); 1312 goto unregister_mdio; 1313 } 1314 1315 return 0; 1316 1317 unregister_mdio: 1318 mdiobus_unregister(priv->mii_bus); 1319 1320 free_netdev: 1321 free_netdev(ndev); 1322 1323 return err; 1324 } 1325 1326 static int nixge_remove(struct platform_device *pdev) 1327 { 1328 struct net_device *ndev = platform_get_drvdata(pdev); 1329 struct nixge_priv *priv = netdev_priv(ndev); 1330 1331 unregister_netdev(ndev); 1332 1333 mdiobus_unregister(priv->mii_bus); 1334 1335 free_netdev(ndev); 1336 1337 return 0; 1338 } 1339 1340 /* Match table for of_platform binding */ 1341 static const struct of_device_id nixge_dt_ids[] = { 1342 { .compatible = "ni,xge-enet-2.00", }, 1343 {}, 1344 }; 1345 MODULE_DEVICE_TABLE(of, nixge_dt_ids); 1346 1347 static struct platform_driver nixge_driver = { 1348 .probe = nixge_probe, 1349 .remove = nixge_remove, 1350 .driver = { 1351 .name = "nixge", 1352 .of_match_table = of_match_ptr(nixge_dt_ids), 1353 }, 1354 }; 1355 module_platform_driver(nixge_driver); 1356 1357 MODULE_LICENSE("GPL v2"); 1358 MODULE_DESCRIPTION("National Instruments XGE Management MAC"); 1359 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>"); 1360