1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IXP4xx Ethernet driver for Linux 4 * 5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * Ethernet port config (0x00 is not present on IXP42X): 8 * 9 * logical port 0x00 0x10 0x20 10 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 11 * physical PortId 2 0 1 12 * TX queue 23 24 25 13 * RX-free queue 26 27 28 14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 15 * 16 * Queue entries: 17 * bits 0 -> 1 - NPE ID (RX and TX-done) 18 * bits 0 -> 2 - priority (TX, per 802.1D) 19 * bits 3 -> 4 - port ID (user-set?) 20 * bits 5 -> 31 - physical descriptor address 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmapool.h> 26 #include <linux/etherdevice.h> 27 #include <linux/io.h> 28 #include <linux/kernel.h> 29 #include <linux/net_tstamp.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/phy.h> 34 #include <linux/platform_device.h> 35 #include <linux/ptp_classify.h> 36 #include <linux/slab.h> 37 #include <linux/module.h> 38 #include <linux/soc/ixp4xx/npe.h> 39 #include <linux/soc/ixp4xx/qmgr.h> 40 #include <linux/soc/ixp4xx/cpu.h> 41 #include <linux/types.h> 42 43 #define IXP4XX_ETH_NPEA 0x00 44 #define IXP4XX_ETH_NPEB 0x10 45 #define IXP4XX_ETH_NPEC 0x20 46 47 #include "ixp46x_ts.h" 48 49 #define DEBUG_DESC 0 50 #define DEBUG_RX 0 51 #define DEBUG_TX 0 52 #define DEBUG_PKT_BYTES 0 53 #define DEBUG_MDIO 0 54 #define DEBUG_CLOSE 0 55 56 #define DRV_NAME "ixp4xx_eth" 57 58 #define MAX_NPES 3 59 60 #define RX_DESCS 64 /* also length of all RX queues */ 61 #define TX_DESCS 16 /* also length of all TX queues */ 62 #define TXDONE_QUEUE_LEN 64 /* dwords */ 63 64 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 65 #define REGS_SIZE 0x1000 66 #define MAX_MRU 1536 /* 0x600 */ 67 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 68 69 #define NAPI_WEIGHT 16 70 #define MDIO_INTERVAL (3 * HZ) 71 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 72 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 73 74 #define NPE_ID(port_id) ((port_id) >> 4) 75 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 76 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 77 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 78 #define TXDONE_QUEUE 31 79 80 #define PTP_SLAVE_MODE 1 81 #define PTP_MASTER_MODE 2 82 #define PORT2CHANNEL(p) NPE_ID(p->id) 83 84 /* TX Control Registers */ 85 #define TX_CNTRL0_TX_EN 0x01 86 #define TX_CNTRL0_HALFDUPLEX 0x02 87 #define TX_CNTRL0_RETRY 0x04 88 #define TX_CNTRL0_PAD_EN 0x08 89 #define TX_CNTRL0_APPEND_FCS 0x10 90 #define TX_CNTRL0_2DEFER 0x20 91 #define TX_CNTRL0_RMII 0x40 /* reduced MII */ 92 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 93 94 /* RX Control Registers */ 95 #define RX_CNTRL0_RX_EN 0x01 96 #define RX_CNTRL0_PADSTRIP_EN 0x02 97 #define RX_CNTRL0_SEND_FCS 0x04 98 #define RX_CNTRL0_PAUSE_EN 0x08 99 #define RX_CNTRL0_LOOP_EN 0x10 100 #define RX_CNTRL0_ADDR_FLTR_EN 0x20 101 #define RX_CNTRL0_RX_RUNT_EN 0x40 102 #define RX_CNTRL0_BCAST_DIS 0x80 103 #define RX_CNTRL1_DEFER_EN 0x01 104 105 /* Core Control Register */ 106 #define CORE_RESET 0x01 107 #define CORE_RX_FIFO_FLUSH 0x02 108 #define CORE_TX_FIFO_FLUSH 0x04 109 #define CORE_SEND_JAM 0x08 110 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 111 112 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 113 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 114 TX_CNTRL0_2DEFER) 115 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 116 #define DEFAULT_CORE_CNTRL CORE_MDC_EN 117 118 119 /* NPE message codes */ 120 #define NPE_GETSTATUS 0x00 121 #define NPE_EDB_SETPORTADDRESS 0x01 122 #define NPE_EDB_GETMACADDRESSDATABASE 0x02 123 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 124 #define NPE_GETSTATS 0x04 125 #define NPE_RESETSTATS 0x05 126 #define NPE_SETMAXFRAMELENGTHS 0x06 127 #define NPE_VLAN_SETRXTAGMODE 0x07 128 #define NPE_VLAN_SETDEFAULTRXVID 0x08 129 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 130 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 131 #define NPE_VLAN_SETRXQOSENTRY 0x0B 132 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 133 #define NPE_STP_SETBLOCKINGSTATE 0x0D 134 #define NPE_FW_SETFIREWALLMODE 0x0E 135 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 136 #define NPE_PC_SETAPMACTABLE 0x11 137 #define NPE_SETLOOPBACK_MODE 0x12 138 #define NPE_PC_SETBSSIDTABLE 0x13 139 #define NPE_ADDRESS_FILTER_CONFIG 0x14 140 #define NPE_APPENDFCSCONFIG 0x15 141 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 142 #define NPE_MAC_RECOVERY_START 0x17 143 144 145 #ifdef __ARMEB__ 146 typedef struct sk_buff buffer_t; 147 #define free_buffer dev_kfree_skb 148 #define free_buffer_irq dev_consume_skb_irq 149 #else 150 typedef void buffer_t; 151 #define free_buffer kfree 152 #define free_buffer_irq kfree 153 #endif 154 155 /* Information about built-in Ethernet MAC interfaces */ 156 struct eth_plat_info { 157 u8 phy; /* MII PHY ID, 0 - 31 */ 158 u8 rxq; /* configurable, currently 0 - 31 only */ 159 u8 txreadyq; 160 u8 hwaddr[ETH_ALEN]; 161 u8 npe; /* NPE instance used by this interface */ 162 bool has_mdio; /* If this instance has an MDIO bus */ 163 }; 164 165 struct eth_regs { 166 u32 tx_control[2], __res1[2]; /* 000 */ 167 u32 rx_control[2], __res2[2]; /* 010 */ 168 u32 random_seed, __res3[3]; /* 020 */ 169 u32 partial_empty_threshold, __res4; /* 030 */ 170 u32 partial_full_threshold, __res5; /* 038 */ 171 u32 tx_start_bytes, __res6[3]; /* 040 */ 172 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 173 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 174 u32 slot_time, __res9[3]; /* 070 */ 175 u32 mdio_command[4]; /* 080 */ 176 u32 mdio_status[4]; /* 090 */ 177 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 178 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 179 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 180 u32 hw_addr[6], __res13[61]; /* 0F0 */ 181 u32 core_control; /* 1FC */ 182 }; 183 184 struct port { 185 struct eth_regs __iomem *regs; 186 struct ixp46x_ts_regs __iomem *timesync_regs; 187 int phc_index; 188 struct npe *npe; 189 struct net_device *netdev; 190 struct napi_struct napi; 191 struct eth_plat_info *plat; 192 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 193 struct desc *desc_tab; /* coherent */ 194 dma_addr_t desc_tab_phys; 195 int id; /* logical port ID */ 196 int speed, duplex; 197 u8 firmware[4]; 198 int hwts_tx_en; 199 int hwts_rx_en; 200 }; 201 202 /* NPE message structure */ 203 struct msg { 204 #ifdef __ARMEB__ 205 u8 cmd, eth_id, byte2, byte3; 206 u8 byte4, byte5, byte6, byte7; 207 #else 208 u8 byte3, byte2, eth_id, cmd; 209 u8 byte7, byte6, byte5, byte4; 210 #endif 211 }; 212 213 /* Ethernet packet descriptor */ 214 struct desc { 215 u32 next; /* pointer to next buffer, unused */ 216 217 #ifdef __ARMEB__ 218 u16 buf_len; /* buffer length */ 219 u16 pkt_len; /* packet length */ 220 u32 data; /* pointer to data buffer in RAM */ 221 u8 dest_id; 222 u8 src_id; 223 u16 flags; 224 u8 qos; 225 u8 padlen; 226 u16 vlan_tci; 227 #else 228 u16 pkt_len; /* packet length */ 229 u16 buf_len; /* buffer length */ 230 u32 data; /* pointer to data buffer in RAM */ 231 u16 flags; 232 u8 src_id; 233 u8 dest_id; 234 u16 vlan_tci; 235 u8 padlen; 236 u8 qos; 237 #endif 238 239 #ifdef __ARMEB__ 240 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 241 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 242 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 243 #else 244 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 245 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 246 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 247 #endif 248 }; 249 250 251 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 252 (n) * sizeof(struct desc)) 253 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 254 255 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 256 ((n) + RX_DESCS) * sizeof(struct desc)) 257 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 258 259 #ifndef __ARMEB__ 260 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 261 { 262 int i; 263 for (i = 0; i < cnt; i++) 264 dest[i] = swab32(src[i]); 265 } 266 #endif 267 268 static DEFINE_SPINLOCK(mdio_lock); 269 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 270 static struct mii_bus *mdio_bus; 271 static struct device_node *mdio_bus_np; 272 static int ports_open; 273 static struct port *npe_port_tab[MAX_NPES]; 274 static struct dma_pool *dma_pool; 275 276 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 277 { 278 u8 *data = skb->data; 279 unsigned int offset; 280 u16 *hi, *id; 281 u32 lo; 282 283 if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) 284 return 0; 285 286 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 287 288 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) 289 return 0; 290 291 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); 292 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 293 294 memcpy(&lo, &hi[1], sizeof(lo)); 295 296 return (uid_hi == ntohs(*hi) && 297 uid_lo == ntohl(lo) && 298 seqid == ntohs(*id)); 299 } 300 301 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) 302 { 303 struct skb_shared_hwtstamps *shhwtstamps; 304 struct ixp46x_ts_regs *regs; 305 u64 ns; 306 u32 ch, hi, lo, val; 307 u16 uid, seq; 308 309 if (!port->hwts_rx_en) 310 return; 311 312 ch = PORT2CHANNEL(port); 313 314 regs = port->timesync_regs; 315 316 val = __raw_readl(®s->channel[ch].ch_event); 317 318 if (!(val & RX_SNAPSHOT_LOCKED)) 319 return; 320 321 lo = __raw_readl(®s->channel[ch].src_uuid_lo); 322 hi = __raw_readl(®s->channel[ch].src_uuid_hi); 323 324 uid = hi & 0xffff; 325 seq = (hi >> 16) & 0xffff; 326 327 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) 328 goto out; 329 330 lo = __raw_readl(®s->channel[ch].rx_snap_lo); 331 hi = __raw_readl(®s->channel[ch].rx_snap_hi); 332 ns = ((u64) hi) << 32; 333 ns |= lo; 334 ns <<= TICKS_NS_SHIFT; 335 336 shhwtstamps = skb_hwtstamps(skb); 337 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 338 shhwtstamps->hwtstamp = ns_to_ktime(ns); 339 out: 340 __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 341 } 342 343 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) 344 { 345 struct skb_shared_hwtstamps shhwtstamps; 346 struct ixp46x_ts_regs *regs; 347 struct skb_shared_info *shtx; 348 u64 ns; 349 u32 ch, cnt, hi, lo, val; 350 351 shtx = skb_shinfo(skb); 352 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) 353 shtx->tx_flags |= SKBTX_IN_PROGRESS; 354 else 355 return; 356 357 ch = PORT2CHANNEL(port); 358 359 regs = port->timesync_regs; 360 361 /* 362 * This really stinks, but we have to poll for the Tx time stamp. 363 * Usually, the time stamp is ready after 4 to 6 microseconds. 364 */ 365 for (cnt = 0; cnt < 100; cnt++) { 366 val = __raw_readl(®s->channel[ch].ch_event); 367 if (val & TX_SNAPSHOT_LOCKED) 368 break; 369 udelay(1); 370 } 371 if (!(val & TX_SNAPSHOT_LOCKED)) { 372 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; 373 return; 374 } 375 376 lo = __raw_readl(®s->channel[ch].tx_snap_lo); 377 hi = __raw_readl(®s->channel[ch].tx_snap_hi); 378 ns = ((u64) hi) << 32; 379 ns |= lo; 380 ns <<= TICKS_NS_SHIFT; 381 382 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 383 shhwtstamps.hwtstamp = ns_to_ktime(ns); 384 skb_tstamp_tx(skb, &shhwtstamps); 385 386 __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 387 } 388 389 static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 390 { 391 struct hwtstamp_config cfg; 392 struct ixp46x_ts_regs *regs; 393 struct port *port = netdev_priv(netdev); 394 int ret; 395 int ch; 396 397 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 398 return -EFAULT; 399 400 ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); 401 if (ret) 402 return ret; 403 404 ch = PORT2CHANNEL(port); 405 regs = port->timesync_regs; 406 407 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) 408 return -ERANGE; 409 410 switch (cfg.rx_filter) { 411 case HWTSTAMP_FILTER_NONE: 412 port->hwts_rx_en = 0; 413 break; 414 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 415 port->hwts_rx_en = PTP_SLAVE_MODE; 416 __raw_writel(0, ®s->channel[ch].ch_control); 417 break; 418 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 419 port->hwts_rx_en = PTP_MASTER_MODE; 420 __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); 421 break; 422 default: 423 return -ERANGE; 424 } 425 426 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; 427 428 /* Clear out any old time stamps. */ 429 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 430 ®s->channel[ch].ch_event); 431 432 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 433 } 434 435 static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 436 { 437 struct hwtstamp_config cfg; 438 struct port *port = netdev_priv(netdev); 439 440 cfg.flags = 0; 441 cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 442 443 switch (port->hwts_rx_en) { 444 case 0: 445 cfg.rx_filter = HWTSTAMP_FILTER_NONE; 446 break; 447 case PTP_SLAVE_MODE: 448 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 449 break; 450 case PTP_MASTER_MODE: 451 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 452 break; 453 default: 454 WARN_ON_ONCE(1); 455 return -ERANGE; 456 } 457 458 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 459 } 460 461 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 462 int write, u16 cmd) 463 { 464 int cycles = 0; 465 466 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 467 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); 468 return -1; 469 } 470 471 if (write) { 472 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 473 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 474 } 475 __raw_writel(((phy_id << 5) | location) & 0xFF, 476 &mdio_regs->mdio_command[2]); 477 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 478 &mdio_regs->mdio_command[3]); 479 480 while ((cycles < MAX_MDIO_RETRIES) && 481 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 482 udelay(1); 483 cycles++; 484 } 485 486 if (cycles == MAX_MDIO_RETRIES) { 487 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, 488 phy_id); 489 return -1; 490 } 491 492 #if DEBUG_MDIO 493 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, 494 phy_id, write ? "write" : "read", cycles); 495 #endif 496 497 if (write) 498 return 0; 499 500 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 501 #if DEBUG_MDIO 502 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, 503 phy_id); 504 #endif 505 return 0xFFFF; /* don't return error */ 506 } 507 508 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 509 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); 510 } 511 512 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) 513 { 514 unsigned long flags; 515 int ret; 516 517 spin_lock_irqsave(&mdio_lock, flags); 518 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); 519 spin_unlock_irqrestore(&mdio_lock, flags); 520 #if DEBUG_MDIO 521 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, 522 phy_id, location, ret); 523 #endif 524 return ret; 525 } 526 527 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, 528 u16 val) 529 { 530 unsigned long flags; 531 int ret; 532 533 spin_lock_irqsave(&mdio_lock, flags); 534 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); 535 spin_unlock_irqrestore(&mdio_lock, flags); 536 #if DEBUG_MDIO 537 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", 538 bus->name, phy_id, location, val, ret); 539 #endif 540 return ret; 541 } 542 543 static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) 544 { 545 int err; 546 547 if (!(mdio_bus = mdiobus_alloc())) 548 return -ENOMEM; 549 550 mdio_regs = regs; 551 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 552 mdio_bus->name = "IXP4xx MII Bus"; 553 mdio_bus->read = &ixp4xx_mdio_read; 554 mdio_bus->write = &ixp4xx_mdio_write; 555 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); 556 557 err = of_mdiobus_register(mdio_bus, mdio_bus_np); 558 if (err) 559 mdiobus_free(mdio_bus); 560 return err; 561 } 562 563 static void ixp4xx_mdio_remove(void) 564 { 565 mdiobus_unregister(mdio_bus); 566 mdiobus_free(mdio_bus); 567 } 568 569 570 static void ixp4xx_adjust_link(struct net_device *dev) 571 { 572 struct port *port = netdev_priv(dev); 573 struct phy_device *phydev = dev->phydev; 574 575 if (!phydev->link) { 576 if (port->speed) { 577 port->speed = 0; 578 printk(KERN_INFO "%s: link down\n", dev->name); 579 } 580 return; 581 } 582 583 if (port->speed == phydev->speed && port->duplex == phydev->duplex) 584 return; 585 586 port->speed = phydev->speed; 587 port->duplex = phydev->duplex; 588 589 if (port->duplex) 590 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 591 &port->regs->tx_control[0]); 592 else 593 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 594 &port->regs->tx_control[0]); 595 596 netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n", 597 dev->name, port->speed, port->duplex ? "full" : "half"); 598 } 599 600 601 static inline void debug_pkt(struct net_device *dev, const char *func, 602 u8 *data, int len) 603 { 604 #if DEBUG_PKT_BYTES 605 int i; 606 607 netdev_debug(dev, "%s(%i) ", func, len); 608 for (i = 0; i < len; i++) { 609 if (i >= DEBUG_PKT_BYTES) 610 break; 611 printk("%s%02X", 612 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 613 data[i]); 614 } 615 printk("\n"); 616 #endif 617 } 618 619 620 static inline void debug_desc(u32 phys, struct desc *desc) 621 { 622 #if DEBUG_DESC 623 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 624 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 625 phys, desc->next, desc->buf_len, desc->pkt_len, 626 desc->data, desc->dest_id, desc->src_id, desc->flags, 627 desc->qos, desc->padlen, desc->vlan_tci, 628 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 629 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 630 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 631 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 632 #endif 633 } 634 635 static inline int queue_get_desc(unsigned int queue, struct port *port, 636 int is_tx) 637 { 638 u32 phys, tab_phys, n_desc; 639 struct desc *tab; 640 641 if (!(phys = qmgr_get_entry(queue))) 642 return -1; 643 644 phys &= ~0x1F; /* mask out non-address bits */ 645 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 646 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 647 n_desc = (phys - tab_phys) / sizeof(struct desc); 648 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 649 debug_desc(phys, &tab[n_desc]); 650 BUG_ON(tab[n_desc].next); 651 return n_desc; 652 } 653 654 static inline void queue_put_desc(unsigned int queue, u32 phys, 655 struct desc *desc) 656 { 657 debug_desc(phys, desc); 658 BUG_ON(phys & 0x1F); 659 qmgr_put_entry(queue, phys); 660 /* Don't check for queue overflow here, we've allocated sufficient 661 length and queues >= 32 don't support this check anyway. */ 662 } 663 664 665 static inline void dma_unmap_tx(struct port *port, struct desc *desc) 666 { 667 #ifdef __ARMEB__ 668 dma_unmap_single(&port->netdev->dev, desc->data, 669 desc->buf_len, DMA_TO_DEVICE); 670 #else 671 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 672 ALIGN((desc->data & 3) + desc->buf_len, 4), 673 DMA_TO_DEVICE); 674 #endif 675 } 676 677 678 static void eth_rx_irq(void *pdev) 679 { 680 struct net_device *dev = pdev; 681 struct port *port = netdev_priv(dev); 682 683 #if DEBUG_RX 684 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 685 #endif 686 qmgr_disable_irq(port->plat->rxq); 687 napi_schedule(&port->napi); 688 } 689 690 static int eth_poll(struct napi_struct *napi, int budget) 691 { 692 struct port *port = container_of(napi, struct port, napi); 693 struct net_device *dev = port->netdev; 694 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 695 int received = 0; 696 697 #if DEBUG_RX 698 netdev_debug(dev, "eth_poll\n"); 699 #endif 700 701 while (received < budget) { 702 struct sk_buff *skb; 703 struct desc *desc; 704 int n; 705 #ifdef __ARMEB__ 706 struct sk_buff *temp; 707 u32 phys; 708 #endif 709 710 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 711 #if DEBUG_RX 712 netdev_debug(dev, "eth_poll napi_complete\n"); 713 #endif 714 napi_complete(napi); 715 qmgr_enable_irq(rxq); 716 if (!qmgr_stat_below_low_watermark(rxq) && 717 napi_reschedule(napi)) { /* not empty again */ 718 #if DEBUG_RX 719 netdev_debug(dev, "eth_poll napi_reschedule succeeded\n"); 720 #endif 721 qmgr_disable_irq(rxq); 722 continue; 723 } 724 #if DEBUG_RX 725 netdev_debug(dev, "eth_poll all done\n"); 726 #endif 727 return received; /* all work done */ 728 } 729 730 desc = rx_desc_ptr(port, n); 731 732 #ifdef __ARMEB__ 733 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 734 phys = dma_map_single(&dev->dev, skb->data, 735 RX_BUFF_SIZE, DMA_FROM_DEVICE); 736 if (dma_mapping_error(&dev->dev, phys)) { 737 dev_kfree_skb(skb); 738 skb = NULL; 739 } 740 } 741 #else 742 skb = netdev_alloc_skb(dev, 743 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 744 #endif 745 746 if (!skb) { 747 dev->stats.rx_dropped++; 748 /* put the desc back on RX-ready queue */ 749 desc->buf_len = MAX_MRU; 750 desc->pkt_len = 0; 751 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 752 continue; 753 } 754 755 /* process received frame */ 756 #ifdef __ARMEB__ 757 temp = skb; 758 skb = port->rx_buff_tab[n]; 759 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 760 RX_BUFF_SIZE, DMA_FROM_DEVICE); 761 #else 762 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, 763 RX_BUFF_SIZE, DMA_FROM_DEVICE); 764 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 765 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 766 #endif 767 skb_reserve(skb, NET_IP_ALIGN); 768 skb_put(skb, desc->pkt_len); 769 770 debug_pkt(dev, "eth_poll", skb->data, skb->len); 771 772 ixp_rx_timestamp(port, skb); 773 skb->protocol = eth_type_trans(skb, dev); 774 dev->stats.rx_packets++; 775 dev->stats.rx_bytes += skb->len; 776 netif_receive_skb(skb); 777 778 /* put the new buffer on RX-free queue */ 779 #ifdef __ARMEB__ 780 port->rx_buff_tab[n] = temp; 781 desc->data = phys + NET_IP_ALIGN; 782 #endif 783 desc->buf_len = MAX_MRU; 784 desc->pkt_len = 0; 785 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 786 received++; 787 } 788 789 #if DEBUG_RX 790 netdev_debug(dev, "eth_poll(): end, not all work done\n"); 791 #endif 792 return received; /* not all work done */ 793 } 794 795 796 static void eth_txdone_irq(void *unused) 797 { 798 u32 phys; 799 800 #if DEBUG_TX 801 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 802 #endif 803 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { 804 u32 npe_id, n_desc; 805 struct port *port; 806 struct desc *desc; 807 int start; 808 809 npe_id = phys & 3; 810 BUG_ON(npe_id >= MAX_NPES); 811 port = npe_port_tab[npe_id]; 812 BUG_ON(!port); 813 phys &= ~0x1F; /* mask out non-address bits */ 814 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 815 BUG_ON(n_desc >= TX_DESCS); 816 desc = tx_desc_ptr(port, n_desc); 817 debug_desc(phys, desc); 818 819 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 820 port->netdev->stats.tx_packets++; 821 port->netdev->stats.tx_bytes += desc->pkt_len; 822 823 dma_unmap_tx(port, desc); 824 #if DEBUG_TX 825 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 826 port->netdev->name, port->tx_buff_tab[n_desc]); 827 #endif 828 free_buffer_irq(port->tx_buff_tab[n_desc]); 829 port->tx_buff_tab[n_desc] = NULL; 830 } 831 832 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 833 queue_put_desc(port->plat->txreadyq, phys, desc); 834 if (start) { /* TX-ready queue was empty */ 835 #if DEBUG_TX 836 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 837 port->netdev->name); 838 #endif 839 netif_wake_queue(port->netdev); 840 } 841 } 842 } 843 844 static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev) 845 { 846 struct port *port = netdev_priv(dev); 847 unsigned int txreadyq = port->plat->txreadyq; 848 int len, offset, bytes, n; 849 void *mem; 850 u32 phys; 851 struct desc *desc; 852 853 #if DEBUG_TX 854 netdev_debug(dev, "eth_xmit\n"); 855 #endif 856 857 if (unlikely(skb->len > MAX_MRU)) { 858 dev_kfree_skb(skb); 859 dev->stats.tx_errors++; 860 return NETDEV_TX_OK; 861 } 862 863 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 864 865 len = skb->len; 866 #ifdef __ARMEB__ 867 offset = 0; /* no need to keep alignment */ 868 bytes = len; 869 mem = skb->data; 870 #else 871 offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ 872 bytes = ALIGN(offset + len, 4); 873 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 874 dev_kfree_skb(skb); 875 dev->stats.tx_dropped++; 876 return NETDEV_TX_OK; 877 } 878 memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); 879 #endif 880 881 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 882 if (dma_mapping_error(&dev->dev, phys)) { 883 dev_kfree_skb(skb); 884 #ifndef __ARMEB__ 885 kfree(mem); 886 #endif 887 dev->stats.tx_dropped++; 888 return NETDEV_TX_OK; 889 } 890 891 n = queue_get_desc(txreadyq, port, 1); 892 BUG_ON(n < 0); 893 desc = tx_desc_ptr(port, n); 894 895 #ifdef __ARMEB__ 896 port->tx_buff_tab[n] = skb; 897 #else 898 port->tx_buff_tab[n] = mem; 899 #endif 900 desc->data = phys + offset; 901 desc->buf_len = desc->pkt_len = len; 902 903 /* NPE firmware pads short frames with zeros internally */ 904 wmb(); 905 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 906 907 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 908 #if DEBUG_TX 909 netdev_debug(dev, "eth_xmit queue full\n"); 910 #endif 911 netif_stop_queue(dev); 912 /* we could miss TX ready interrupt */ 913 /* really empty in fact */ 914 if (!qmgr_stat_below_low_watermark(txreadyq)) { 915 #if DEBUG_TX 916 netdev_debug(dev, "eth_xmit ready again\n"); 917 #endif 918 netif_wake_queue(dev); 919 } 920 } 921 922 #if DEBUG_TX 923 netdev_debug(dev, "eth_xmit end\n"); 924 #endif 925 926 ixp_tx_timestamp(port, skb); 927 skb_tx_timestamp(skb); 928 929 #ifndef __ARMEB__ 930 dev_kfree_skb(skb); 931 #endif 932 return NETDEV_TX_OK; 933 } 934 935 936 static void eth_set_mcast_list(struct net_device *dev) 937 { 938 struct port *port = netdev_priv(dev); 939 struct netdev_hw_addr *ha; 940 u8 diffs[ETH_ALEN], *addr; 941 int i; 942 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 943 944 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { 945 for (i = 0; i < ETH_ALEN; i++) { 946 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 947 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 948 } 949 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 950 &port->regs->rx_control[0]); 951 return; 952 } 953 954 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 955 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 956 &port->regs->rx_control[0]); 957 return; 958 } 959 960 eth_zero_addr(diffs); 961 962 addr = NULL; 963 netdev_for_each_mc_addr(ha, dev) { 964 if (!addr) 965 addr = ha->addr; /* first MAC address */ 966 for (i = 0; i < ETH_ALEN; i++) 967 diffs[i] |= addr[i] ^ ha->addr[i]; 968 } 969 970 for (i = 0; i < ETH_ALEN; i++) { 971 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 972 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 973 } 974 975 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 976 &port->regs->rx_control[0]); 977 } 978 979 980 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 981 { 982 if (!netif_running(dev)) 983 return -EINVAL; 984 985 if (cpu_is_ixp46x()) { 986 if (cmd == SIOCSHWTSTAMP) 987 return hwtstamp_set(dev, req); 988 if (cmd == SIOCGHWTSTAMP) 989 return hwtstamp_get(dev, req); 990 } 991 992 return phy_mii_ioctl(dev->phydev, req, cmd); 993 } 994 995 /* ethtool support */ 996 997 static void ixp4xx_get_drvinfo(struct net_device *dev, 998 struct ethtool_drvinfo *info) 999 { 1000 struct port *port = netdev_priv(dev); 1001 1002 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 1003 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", 1004 port->firmware[0], port->firmware[1], 1005 port->firmware[2], port->firmware[3]); 1006 strscpy(info->bus_info, "internal", sizeof(info->bus_info)); 1007 } 1008 1009 static int ixp4xx_get_ts_info(struct net_device *dev, 1010 struct ethtool_ts_info *info) 1011 { 1012 struct port *port = netdev_priv(dev); 1013 1014 if (port->phc_index < 0) 1015 ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); 1016 1017 info->phc_index = port->phc_index; 1018 1019 if (info->phc_index < 0) { 1020 info->so_timestamping = 1021 SOF_TIMESTAMPING_TX_SOFTWARE | 1022 SOF_TIMESTAMPING_RX_SOFTWARE | 1023 SOF_TIMESTAMPING_SOFTWARE; 1024 return 0; 1025 } 1026 info->so_timestamping = 1027 SOF_TIMESTAMPING_TX_HARDWARE | 1028 SOF_TIMESTAMPING_RX_HARDWARE | 1029 SOF_TIMESTAMPING_RAW_HARDWARE; 1030 info->tx_types = 1031 (1 << HWTSTAMP_TX_OFF) | 1032 (1 << HWTSTAMP_TX_ON); 1033 info->rx_filters = 1034 (1 << HWTSTAMP_FILTER_NONE) | 1035 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1036 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); 1037 return 0; 1038 } 1039 1040 static const struct ethtool_ops ixp4xx_ethtool_ops = { 1041 .get_drvinfo = ixp4xx_get_drvinfo, 1042 .nway_reset = phy_ethtool_nway_reset, 1043 .get_link = ethtool_op_get_link, 1044 .get_ts_info = ixp4xx_get_ts_info, 1045 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1046 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1047 }; 1048 1049 1050 static int request_queues(struct port *port) 1051 { 1052 int err; 1053 1054 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 1055 "%s:RX-free", port->netdev->name); 1056 if (err) 1057 return err; 1058 1059 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 1060 "%s:RX", port->netdev->name); 1061 if (err) 1062 goto rel_rxfree; 1063 1064 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 1065 "%s:TX", port->netdev->name); 1066 if (err) 1067 goto rel_rx; 1068 1069 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 1070 "%s:TX-ready", port->netdev->name); 1071 if (err) 1072 goto rel_tx; 1073 1074 /* TX-done queue handles skbs sent out by the NPEs */ 1075 if (!ports_open) { 1076 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 1077 "%s:TX-done", DRV_NAME); 1078 if (err) 1079 goto rel_txready; 1080 } 1081 return 0; 1082 1083 rel_txready: 1084 qmgr_release_queue(port->plat->txreadyq); 1085 rel_tx: 1086 qmgr_release_queue(TX_QUEUE(port->id)); 1087 rel_rx: 1088 qmgr_release_queue(port->plat->rxq); 1089 rel_rxfree: 1090 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1091 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 1092 port->netdev->name); 1093 return err; 1094 } 1095 1096 static void release_queues(struct port *port) 1097 { 1098 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1099 qmgr_release_queue(port->plat->rxq); 1100 qmgr_release_queue(TX_QUEUE(port->id)); 1101 qmgr_release_queue(port->plat->txreadyq); 1102 1103 if (!ports_open) 1104 qmgr_release_queue(TXDONE_QUEUE); 1105 } 1106 1107 static int init_queues(struct port *port) 1108 { 1109 int i; 1110 1111 if (!ports_open) { 1112 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, 1113 POOL_ALLOC_SIZE, 32, 0); 1114 if (!dma_pool) 1115 return -ENOMEM; 1116 } 1117 1118 port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys); 1119 if (!port->desc_tab) 1120 return -ENOMEM; 1121 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 1122 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 1123 1124 /* Setup RX buffers */ 1125 for (i = 0; i < RX_DESCS; i++) { 1126 struct desc *desc = rx_desc_ptr(port, i); 1127 buffer_t *buff; /* skb or kmalloc()ated memory */ 1128 void *data; 1129 #ifdef __ARMEB__ 1130 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 1131 return -ENOMEM; 1132 data = buff->data; 1133 #else 1134 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 1135 return -ENOMEM; 1136 data = buff; 1137 #endif 1138 desc->buf_len = MAX_MRU; 1139 desc->data = dma_map_single(&port->netdev->dev, data, 1140 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1141 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 1142 free_buffer(buff); 1143 return -EIO; 1144 } 1145 desc->data += NET_IP_ALIGN; 1146 port->rx_buff_tab[i] = buff; 1147 } 1148 1149 return 0; 1150 } 1151 1152 static void destroy_queues(struct port *port) 1153 { 1154 int i; 1155 1156 if (port->desc_tab) { 1157 for (i = 0; i < RX_DESCS; i++) { 1158 struct desc *desc = rx_desc_ptr(port, i); 1159 buffer_t *buff = port->rx_buff_tab[i]; 1160 if (buff) { 1161 dma_unmap_single(&port->netdev->dev, 1162 desc->data - NET_IP_ALIGN, 1163 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1164 free_buffer(buff); 1165 } 1166 } 1167 for (i = 0; i < TX_DESCS; i++) { 1168 struct desc *desc = tx_desc_ptr(port, i); 1169 buffer_t *buff = port->tx_buff_tab[i]; 1170 if (buff) { 1171 dma_unmap_tx(port, desc); 1172 free_buffer(buff); 1173 } 1174 } 1175 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1176 port->desc_tab = NULL; 1177 } 1178 1179 if (!ports_open && dma_pool) { 1180 dma_pool_destroy(dma_pool); 1181 dma_pool = NULL; 1182 } 1183 } 1184 1185 static int eth_open(struct net_device *dev) 1186 { 1187 struct port *port = netdev_priv(dev); 1188 struct npe *npe = port->npe; 1189 struct msg msg; 1190 int i, err; 1191 1192 if (!npe_running(npe)) { 1193 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 1194 if (err) 1195 return err; 1196 1197 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 1198 netdev_err(dev, "%s not responding\n", npe_name(npe)); 1199 return -EIO; 1200 } 1201 port->firmware[0] = msg.byte4; 1202 port->firmware[1] = msg.byte5; 1203 port->firmware[2] = msg.byte6; 1204 port->firmware[3] = msg.byte7; 1205 } 1206 1207 memset(&msg, 0, sizeof(msg)); 1208 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 1209 msg.eth_id = port->id; 1210 msg.byte5 = port->plat->rxq | 0x80; 1211 msg.byte7 = port->plat->rxq << 4; 1212 for (i = 0; i < 8; i++) { 1213 msg.byte3 = i; 1214 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 1215 return -EIO; 1216 } 1217 1218 msg.cmd = NPE_EDB_SETPORTADDRESS; 1219 msg.eth_id = PHYSICAL_ID(port->id); 1220 msg.byte2 = dev->dev_addr[0]; 1221 msg.byte3 = dev->dev_addr[1]; 1222 msg.byte4 = dev->dev_addr[2]; 1223 msg.byte5 = dev->dev_addr[3]; 1224 msg.byte6 = dev->dev_addr[4]; 1225 msg.byte7 = dev->dev_addr[5]; 1226 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 1227 return -EIO; 1228 1229 memset(&msg, 0, sizeof(msg)); 1230 msg.cmd = NPE_FW_SETFIREWALLMODE; 1231 msg.eth_id = port->id; 1232 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 1233 return -EIO; 1234 1235 if ((err = request_queues(port)) != 0) 1236 return err; 1237 1238 if ((err = init_queues(port)) != 0) { 1239 destroy_queues(port); 1240 release_queues(port); 1241 return err; 1242 } 1243 1244 port->speed = 0; /* force "link up" message */ 1245 phy_start(dev->phydev); 1246 1247 for (i = 0; i < ETH_ALEN; i++) 1248 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1249 __raw_writel(0x08, &port->regs->random_seed); 1250 __raw_writel(0x12, &port->regs->partial_empty_threshold); 1251 __raw_writel(0x30, &port->regs->partial_full_threshold); 1252 __raw_writel(0x08, &port->regs->tx_start_bytes); 1253 __raw_writel(0x15, &port->regs->tx_deferral); 1254 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 1255 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 1256 __raw_writel(0x80, &port->regs->slot_time); 1257 __raw_writel(0x01, &port->regs->int_clock_threshold); 1258 1259 /* Populate queues with buffers, no failure after this point */ 1260 for (i = 0; i < TX_DESCS; i++) 1261 queue_put_desc(port->plat->txreadyq, 1262 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1263 1264 for (i = 0; i < RX_DESCS; i++) 1265 queue_put_desc(RXFREE_QUEUE(port->id), 1266 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1267 1268 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1269 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1270 __raw_writel(0, &port->regs->rx_control[1]); 1271 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1272 1273 napi_enable(&port->napi); 1274 eth_set_mcast_list(dev); 1275 netif_start_queue(dev); 1276 1277 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1278 eth_rx_irq, dev); 1279 if (!ports_open) { 1280 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1281 eth_txdone_irq, NULL); 1282 qmgr_enable_irq(TXDONE_QUEUE); 1283 } 1284 ports_open++; 1285 /* we may already have RX data, enables IRQ */ 1286 napi_schedule(&port->napi); 1287 return 0; 1288 } 1289 1290 static int eth_close(struct net_device *dev) 1291 { 1292 struct port *port = netdev_priv(dev); 1293 struct msg msg; 1294 int buffs = RX_DESCS; /* allocated RX buffers */ 1295 int i; 1296 1297 ports_open--; 1298 qmgr_disable_irq(port->plat->rxq); 1299 napi_disable(&port->napi); 1300 netif_stop_queue(dev); 1301 1302 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1303 buffs--; 1304 1305 memset(&msg, 0, sizeof(msg)); 1306 msg.cmd = NPE_SETLOOPBACK_MODE; 1307 msg.eth_id = port->id; 1308 msg.byte3 = 1; 1309 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1310 netdev_crit(dev, "unable to enable loopback\n"); 1311 1312 i = 0; 1313 do { /* drain RX buffers */ 1314 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1315 buffs--; 1316 if (!buffs) 1317 break; 1318 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1319 /* we have to inject some packet */ 1320 struct desc *desc; 1321 u32 phys; 1322 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1323 BUG_ON(n < 0); 1324 desc = tx_desc_ptr(port, n); 1325 phys = tx_desc_phys(port, n); 1326 desc->buf_len = desc->pkt_len = 1; 1327 wmb(); 1328 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1329 } 1330 udelay(1); 1331 } while (++i < MAX_CLOSE_WAIT); 1332 1333 if (buffs) 1334 netdev_crit(dev, "unable to drain RX queue, %i buffer(s)" 1335 " left in NPE\n", buffs); 1336 #if DEBUG_CLOSE 1337 if (!buffs) 1338 netdev_debug(dev, "draining RX queue took %i cycles\n", i); 1339 #endif 1340 1341 buffs = TX_DESCS; 1342 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1343 buffs--; /* cancel TX */ 1344 1345 i = 0; 1346 do { 1347 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1348 buffs--; 1349 if (!buffs) 1350 break; 1351 } while (++i < MAX_CLOSE_WAIT); 1352 1353 if (buffs) 1354 netdev_crit(dev, "unable to drain TX queue, %i buffer(s) " 1355 "left in NPE\n", buffs); 1356 #if DEBUG_CLOSE 1357 if (!buffs) 1358 netdev_debug(dev, "draining TX queues took %i cycles\n", i); 1359 #endif 1360 1361 msg.byte3 = 0; 1362 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1363 netdev_crit(dev, "unable to disable loopback\n"); 1364 1365 phy_stop(dev->phydev); 1366 1367 if (!ports_open) 1368 qmgr_disable_irq(TXDONE_QUEUE); 1369 destroy_queues(port); 1370 release_queues(port); 1371 return 0; 1372 } 1373 1374 static const struct net_device_ops ixp4xx_netdev_ops = { 1375 .ndo_open = eth_open, 1376 .ndo_stop = eth_close, 1377 .ndo_start_xmit = eth_xmit, 1378 .ndo_set_rx_mode = eth_set_mcast_list, 1379 .ndo_eth_ioctl = eth_ioctl, 1380 .ndo_set_mac_address = eth_mac_addr, 1381 .ndo_validate_addr = eth_validate_addr, 1382 }; 1383 1384 static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) 1385 { 1386 struct device_node *np = dev->of_node; 1387 struct of_phandle_args queue_spec; 1388 struct of_phandle_args npe_spec; 1389 struct device_node *mdio_np; 1390 struct eth_plat_info *plat; 1391 u8 mac[ETH_ALEN]; 1392 int ret; 1393 1394 plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); 1395 if (!plat) 1396 return NULL; 1397 1398 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0, 1399 &npe_spec); 1400 if (ret) { 1401 dev_err(dev, "no NPE engine specified\n"); 1402 return NULL; 1403 } 1404 /* NPE ID 0x00, 0x10, 0x20... */ 1405 plat->npe = (npe_spec.args[0] << 4); 1406 1407 /* Check if this device has an MDIO bus */ 1408 mdio_np = of_get_child_by_name(np, "mdio"); 1409 if (mdio_np) { 1410 plat->has_mdio = true; 1411 mdio_bus_np = mdio_np; 1412 /* DO NOT put the mdio_np, it will be used */ 1413 } 1414 1415 /* Get the rx queue as a resource from queue manager */ 1416 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, 1417 &queue_spec); 1418 if (ret) { 1419 dev_err(dev, "no rx queue phandle\n"); 1420 return NULL; 1421 } 1422 plat->rxq = queue_spec.args[0]; 1423 1424 /* Get the txready queue as resource from queue manager */ 1425 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, 1426 &queue_spec); 1427 if (ret) { 1428 dev_err(dev, "no txready queue phandle\n"); 1429 return NULL; 1430 } 1431 plat->txreadyq = queue_spec.args[0]; 1432 1433 ret = of_get_mac_address(np, mac); 1434 if (!ret) { 1435 dev_info(dev, "Setting macaddr from DT %pM\n", mac); 1436 memcpy(plat->hwaddr, mac, ETH_ALEN); 1437 } 1438 1439 return plat; 1440 } 1441 1442 static int ixp4xx_eth_probe(struct platform_device *pdev) 1443 { 1444 struct phy_device *phydev = NULL; 1445 struct device *dev = &pdev->dev; 1446 struct device_node *np = dev->of_node; 1447 struct eth_plat_info *plat; 1448 struct net_device *ndev; 1449 struct port *port; 1450 int err; 1451 1452 plat = ixp4xx_of_get_platdata(dev); 1453 if (!plat) 1454 return -ENODEV; 1455 1456 if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) 1457 return -ENOMEM; 1458 1459 SET_NETDEV_DEV(ndev, dev); 1460 port = netdev_priv(ndev); 1461 port->netdev = ndev; 1462 port->id = plat->npe; 1463 port->phc_index = -1; 1464 1465 /* Get the port resource and remap */ 1466 port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 1467 if (IS_ERR(port->regs)) 1468 return PTR_ERR(port->regs); 1469 1470 /* Register the MDIO bus if we have it */ 1471 if (plat->has_mdio) { 1472 err = ixp4xx_mdio_register(port->regs); 1473 if (err) { 1474 dev_err(dev, "failed to register MDIO bus\n"); 1475 return err; 1476 } 1477 } 1478 /* If the instance with the MDIO bus has not yet appeared, 1479 * defer probing until it gets probed. 1480 */ 1481 if (!mdio_bus) 1482 return -EPROBE_DEFER; 1483 1484 ndev->netdev_ops = &ixp4xx_netdev_ops; 1485 ndev->ethtool_ops = &ixp4xx_ethtool_ops; 1486 ndev->tx_queue_len = 100; 1487 /* Inherit the DMA masks from the platform device */ 1488 ndev->dev.dma_mask = dev->dma_mask; 1489 ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; 1490 1491 netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); 1492 1493 if (!(port->npe = npe_request(NPE_ID(port->id)))) 1494 return -EIO; 1495 1496 port->plat = plat; 1497 npe_port_tab[NPE_ID(port->id)] = port; 1498 if (is_valid_ether_addr(plat->hwaddr)) 1499 eth_hw_addr_set(ndev, plat->hwaddr); 1500 else 1501 eth_hw_addr_random(ndev); 1502 1503 platform_set_drvdata(pdev, ndev); 1504 1505 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1506 &port->regs->core_control); 1507 udelay(50); 1508 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1509 udelay(50); 1510 1511 phydev = of_phy_get_and_connect(ndev, np, ixp4xx_adjust_link); 1512 if (!phydev) { 1513 err = -ENODEV; 1514 dev_err(dev, "no phydev\n"); 1515 goto err_free_mem; 1516 } 1517 1518 phydev->irq = PHY_POLL; 1519 1520 if ((err = register_netdev(ndev))) 1521 goto err_phy_dis; 1522 1523 netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, 1524 npe_name(port->npe)); 1525 1526 return 0; 1527 1528 err_phy_dis: 1529 phy_disconnect(phydev); 1530 err_free_mem: 1531 npe_port_tab[NPE_ID(port->id)] = NULL; 1532 npe_release(port->npe); 1533 return err; 1534 } 1535 1536 static int ixp4xx_eth_remove(struct platform_device *pdev) 1537 { 1538 struct net_device *ndev = platform_get_drvdata(pdev); 1539 struct phy_device *phydev = ndev->phydev; 1540 struct port *port = netdev_priv(ndev); 1541 1542 unregister_netdev(ndev); 1543 phy_disconnect(phydev); 1544 ixp4xx_mdio_remove(); 1545 npe_port_tab[NPE_ID(port->id)] = NULL; 1546 npe_release(port->npe); 1547 return 0; 1548 } 1549 1550 static const struct of_device_id ixp4xx_eth_of_match[] = { 1551 { 1552 .compatible = "intel,ixp4xx-ethernet", 1553 }, 1554 { }, 1555 }; 1556 1557 static struct platform_driver ixp4xx_eth_driver = { 1558 .driver = { 1559 .name = DRV_NAME, 1560 .of_match_table = of_match_ptr(ixp4xx_eth_of_match), 1561 }, 1562 .probe = ixp4xx_eth_probe, 1563 .remove = ixp4xx_eth_remove, 1564 }; 1565 module_platform_driver(ixp4xx_eth_driver); 1566 1567 MODULE_AUTHOR("Krzysztof Halasa"); 1568 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1569 MODULE_LICENSE("GPL v2"); 1570 MODULE_ALIAS("platform:ixp4xx_eth"); 1571