1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IXP4xx Ethernet driver for Linux 4 * 5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * Ethernet port config (0x00 is not present on IXP42X): 8 * 9 * logical port 0x00 0x10 0x20 10 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 11 * physical PortId 2 0 1 12 * TX queue 23 24 25 13 * RX-free queue 26 27 28 14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 15 * 16 * Queue entries: 17 * bits 0 -> 1 - NPE ID (RX and TX-done) 18 * bits 0 -> 2 - priority (TX, per 802.1D) 19 * bits 3 -> 4 - port ID (user-set?) 20 * bits 5 -> 31 - physical descriptor address 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmapool.h> 26 #include <linux/etherdevice.h> 27 #include <linux/io.h> 28 #include <linux/kernel.h> 29 #include <linux/net_tstamp.h> 30 #include <linux/of.h> 31 #include <linux/phy.h> 32 #include <linux/platform_data/eth_ixp4xx.h> 33 #include <linux/platform_device.h> 34 #include <linux/ptp_classify.h> 35 #include <linux/slab.h> 36 #include <linux/module.h> 37 #include <linux/soc/ixp4xx/npe.h> 38 #include <linux/soc/ixp4xx/qmgr.h> 39 40 #include "ixp46x_ts.h" 41 42 #define DEBUG_DESC 0 43 #define DEBUG_RX 0 44 #define DEBUG_TX 0 45 #define DEBUG_PKT_BYTES 0 46 #define DEBUG_MDIO 0 47 #define DEBUG_CLOSE 0 48 49 #define DRV_NAME "ixp4xx_eth" 50 51 #define MAX_NPES 3 52 53 #define RX_DESCS 64 /* also length of all RX queues */ 54 #define TX_DESCS 16 /* also length of all TX queues */ 55 #define TXDONE_QUEUE_LEN 64 /* dwords */ 56 57 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 58 #define REGS_SIZE 0x1000 59 #define MAX_MRU 1536 /* 0x600 */ 60 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 61 62 #define NAPI_WEIGHT 16 63 #define MDIO_INTERVAL (3 * HZ) 64 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 65 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 66 67 #define NPE_ID(port_id) ((port_id) >> 4) 68 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 69 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 70 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 71 #define TXDONE_QUEUE 31 72 73 #define PTP_SLAVE_MODE 1 74 #define PTP_MASTER_MODE 2 75 #define PORT2CHANNEL(p) NPE_ID(p->id) 76 77 /* TX Control Registers */ 78 #define TX_CNTRL0_TX_EN 0x01 79 #define TX_CNTRL0_HALFDUPLEX 0x02 80 #define TX_CNTRL0_RETRY 0x04 81 #define TX_CNTRL0_PAD_EN 0x08 82 #define TX_CNTRL0_APPEND_FCS 0x10 83 #define TX_CNTRL0_2DEFER 0x20 84 #define TX_CNTRL0_RMII 0x40 /* reduced MII */ 85 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 86 87 /* RX Control Registers */ 88 #define RX_CNTRL0_RX_EN 0x01 89 #define RX_CNTRL0_PADSTRIP_EN 0x02 90 #define RX_CNTRL0_SEND_FCS 0x04 91 #define RX_CNTRL0_PAUSE_EN 0x08 92 #define RX_CNTRL0_LOOP_EN 0x10 93 #define RX_CNTRL0_ADDR_FLTR_EN 0x20 94 #define RX_CNTRL0_RX_RUNT_EN 0x40 95 #define RX_CNTRL0_BCAST_DIS 0x80 96 #define RX_CNTRL1_DEFER_EN 0x01 97 98 /* Core Control Register */ 99 #define CORE_RESET 0x01 100 #define CORE_RX_FIFO_FLUSH 0x02 101 #define CORE_TX_FIFO_FLUSH 0x04 102 #define CORE_SEND_JAM 0x08 103 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 104 105 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 106 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 107 TX_CNTRL0_2DEFER) 108 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 109 #define DEFAULT_CORE_CNTRL CORE_MDC_EN 110 111 112 /* NPE message codes */ 113 #define NPE_GETSTATUS 0x00 114 #define NPE_EDB_SETPORTADDRESS 0x01 115 #define NPE_EDB_GETMACADDRESSDATABASE 0x02 116 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 117 #define NPE_GETSTATS 0x04 118 #define NPE_RESETSTATS 0x05 119 #define NPE_SETMAXFRAMELENGTHS 0x06 120 #define NPE_VLAN_SETRXTAGMODE 0x07 121 #define NPE_VLAN_SETDEFAULTRXVID 0x08 122 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 123 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 124 #define NPE_VLAN_SETRXQOSENTRY 0x0B 125 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 126 #define NPE_STP_SETBLOCKINGSTATE 0x0D 127 #define NPE_FW_SETFIREWALLMODE 0x0E 128 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 129 #define NPE_PC_SETAPMACTABLE 0x11 130 #define NPE_SETLOOPBACK_MODE 0x12 131 #define NPE_PC_SETBSSIDTABLE 0x13 132 #define NPE_ADDRESS_FILTER_CONFIG 0x14 133 #define NPE_APPENDFCSCONFIG 0x15 134 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 135 #define NPE_MAC_RECOVERY_START 0x17 136 137 138 #ifdef __ARMEB__ 139 typedef struct sk_buff buffer_t; 140 #define free_buffer dev_kfree_skb 141 #define free_buffer_irq dev_consume_skb_irq 142 #else 143 typedef void buffer_t; 144 #define free_buffer kfree 145 #define free_buffer_irq kfree 146 #endif 147 148 struct eth_regs { 149 u32 tx_control[2], __res1[2]; /* 000 */ 150 u32 rx_control[2], __res2[2]; /* 010 */ 151 u32 random_seed, __res3[3]; /* 020 */ 152 u32 partial_empty_threshold, __res4; /* 030 */ 153 u32 partial_full_threshold, __res5; /* 038 */ 154 u32 tx_start_bytes, __res6[3]; /* 040 */ 155 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 156 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 157 u32 slot_time, __res9[3]; /* 070 */ 158 u32 mdio_command[4]; /* 080 */ 159 u32 mdio_status[4]; /* 090 */ 160 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 161 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 162 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 163 u32 hw_addr[6], __res13[61]; /* 0F0 */ 164 u32 core_control; /* 1FC */ 165 }; 166 167 struct port { 168 struct resource *mem_res; 169 struct eth_regs __iomem *regs; 170 struct npe *npe; 171 struct net_device *netdev; 172 struct napi_struct napi; 173 struct eth_plat_info *plat; 174 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 175 struct desc *desc_tab; /* coherent */ 176 u32 desc_tab_phys; 177 int id; /* logical port ID */ 178 int speed, duplex; 179 u8 firmware[4]; 180 int hwts_tx_en; 181 int hwts_rx_en; 182 }; 183 184 /* NPE message structure */ 185 struct msg { 186 #ifdef __ARMEB__ 187 u8 cmd, eth_id, byte2, byte3; 188 u8 byte4, byte5, byte6, byte7; 189 #else 190 u8 byte3, byte2, eth_id, cmd; 191 u8 byte7, byte6, byte5, byte4; 192 #endif 193 }; 194 195 /* Ethernet packet descriptor */ 196 struct desc { 197 u32 next; /* pointer to next buffer, unused */ 198 199 #ifdef __ARMEB__ 200 u16 buf_len; /* buffer length */ 201 u16 pkt_len; /* packet length */ 202 u32 data; /* pointer to data buffer in RAM */ 203 u8 dest_id; 204 u8 src_id; 205 u16 flags; 206 u8 qos; 207 u8 padlen; 208 u16 vlan_tci; 209 #else 210 u16 pkt_len; /* packet length */ 211 u16 buf_len; /* buffer length */ 212 u32 data; /* pointer to data buffer in RAM */ 213 u16 flags; 214 u8 src_id; 215 u8 dest_id; 216 u16 vlan_tci; 217 u8 padlen; 218 u8 qos; 219 #endif 220 221 #ifdef __ARMEB__ 222 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 223 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 224 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 225 #else 226 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 227 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 228 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 229 #endif 230 }; 231 232 233 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 234 (n) * sizeof(struct desc)) 235 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 236 237 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 238 ((n) + RX_DESCS) * sizeof(struct desc)) 239 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 240 241 #ifndef __ARMEB__ 242 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 243 { 244 int i; 245 for (i = 0; i < cnt; i++) 246 dest[i] = swab32(src[i]); 247 } 248 #endif 249 250 static DEFINE_SPINLOCK(mdio_lock); 251 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 252 static struct mii_bus *mdio_bus; 253 static int ports_open; 254 static struct port *npe_port_tab[MAX_NPES]; 255 static struct dma_pool *dma_pool; 256 257 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 258 { 259 u8 *data = skb->data; 260 unsigned int offset; 261 u16 *hi, *id; 262 u32 lo; 263 264 if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) 265 return 0; 266 267 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 268 269 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) 270 return 0; 271 272 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); 273 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 274 275 memcpy(&lo, &hi[1], sizeof(lo)); 276 277 return (uid_hi == ntohs(*hi) && 278 uid_lo == ntohl(lo) && 279 seqid == ntohs(*id)); 280 } 281 282 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) 283 { 284 struct skb_shared_hwtstamps *shhwtstamps; 285 struct ixp46x_ts_regs *regs; 286 u64 ns; 287 u32 ch, hi, lo, val; 288 u16 uid, seq; 289 290 if (!port->hwts_rx_en) 291 return; 292 293 ch = PORT2CHANNEL(port); 294 295 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 296 297 val = __raw_readl(®s->channel[ch].ch_event); 298 299 if (!(val & RX_SNAPSHOT_LOCKED)) 300 return; 301 302 lo = __raw_readl(®s->channel[ch].src_uuid_lo); 303 hi = __raw_readl(®s->channel[ch].src_uuid_hi); 304 305 uid = hi & 0xffff; 306 seq = (hi >> 16) & 0xffff; 307 308 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) 309 goto out; 310 311 lo = __raw_readl(®s->channel[ch].rx_snap_lo); 312 hi = __raw_readl(®s->channel[ch].rx_snap_hi); 313 ns = ((u64) hi) << 32; 314 ns |= lo; 315 ns <<= TICKS_NS_SHIFT; 316 317 shhwtstamps = skb_hwtstamps(skb); 318 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 319 shhwtstamps->hwtstamp = ns_to_ktime(ns); 320 out: 321 __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 322 } 323 324 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) 325 { 326 struct skb_shared_hwtstamps shhwtstamps; 327 struct ixp46x_ts_regs *regs; 328 struct skb_shared_info *shtx; 329 u64 ns; 330 u32 ch, cnt, hi, lo, val; 331 332 shtx = skb_shinfo(skb); 333 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) 334 shtx->tx_flags |= SKBTX_IN_PROGRESS; 335 else 336 return; 337 338 ch = PORT2CHANNEL(port); 339 340 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 341 342 /* 343 * This really stinks, but we have to poll for the Tx time stamp. 344 * Usually, the time stamp is ready after 4 to 6 microseconds. 345 */ 346 for (cnt = 0; cnt < 100; cnt++) { 347 val = __raw_readl(®s->channel[ch].ch_event); 348 if (val & TX_SNAPSHOT_LOCKED) 349 break; 350 udelay(1); 351 } 352 if (!(val & TX_SNAPSHOT_LOCKED)) { 353 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; 354 return; 355 } 356 357 lo = __raw_readl(®s->channel[ch].tx_snap_lo); 358 hi = __raw_readl(®s->channel[ch].tx_snap_hi); 359 ns = ((u64) hi) << 32; 360 ns |= lo; 361 ns <<= TICKS_NS_SHIFT; 362 363 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 364 shhwtstamps.hwtstamp = ns_to_ktime(ns); 365 skb_tstamp_tx(skb, &shhwtstamps); 366 367 __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 368 } 369 370 static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 371 { 372 struct hwtstamp_config cfg; 373 struct ixp46x_ts_regs *regs; 374 struct port *port = netdev_priv(netdev); 375 int ch; 376 377 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 378 return -EFAULT; 379 380 if (cfg.flags) /* reserved for future extensions */ 381 return -EINVAL; 382 383 ch = PORT2CHANNEL(port); 384 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 385 386 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) 387 return -ERANGE; 388 389 switch (cfg.rx_filter) { 390 case HWTSTAMP_FILTER_NONE: 391 port->hwts_rx_en = 0; 392 break; 393 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 394 port->hwts_rx_en = PTP_SLAVE_MODE; 395 __raw_writel(0, ®s->channel[ch].ch_control); 396 break; 397 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 398 port->hwts_rx_en = PTP_MASTER_MODE; 399 __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); 400 break; 401 default: 402 return -ERANGE; 403 } 404 405 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; 406 407 /* Clear out any old time stamps. */ 408 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 409 ®s->channel[ch].ch_event); 410 411 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 412 } 413 414 static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 415 { 416 struct hwtstamp_config cfg; 417 struct port *port = netdev_priv(netdev); 418 419 cfg.flags = 0; 420 cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 421 422 switch (port->hwts_rx_en) { 423 case 0: 424 cfg.rx_filter = HWTSTAMP_FILTER_NONE; 425 break; 426 case PTP_SLAVE_MODE: 427 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 428 break; 429 case PTP_MASTER_MODE: 430 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 431 break; 432 default: 433 WARN_ON_ONCE(1); 434 return -ERANGE; 435 } 436 437 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 438 } 439 440 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 441 int write, u16 cmd) 442 { 443 int cycles = 0; 444 445 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 446 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); 447 return -1; 448 } 449 450 if (write) { 451 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 452 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 453 } 454 __raw_writel(((phy_id << 5) | location) & 0xFF, 455 &mdio_regs->mdio_command[2]); 456 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 457 &mdio_regs->mdio_command[3]); 458 459 while ((cycles < MAX_MDIO_RETRIES) && 460 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 461 udelay(1); 462 cycles++; 463 } 464 465 if (cycles == MAX_MDIO_RETRIES) { 466 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, 467 phy_id); 468 return -1; 469 } 470 471 #if DEBUG_MDIO 472 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, 473 phy_id, write ? "write" : "read", cycles); 474 #endif 475 476 if (write) 477 return 0; 478 479 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 480 #if DEBUG_MDIO 481 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, 482 phy_id); 483 #endif 484 return 0xFFFF; /* don't return error */ 485 } 486 487 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 488 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); 489 } 490 491 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) 492 { 493 unsigned long flags; 494 int ret; 495 496 spin_lock_irqsave(&mdio_lock, flags); 497 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); 498 spin_unlock_irqrestore(&mdio_lock, flags); 499 #if DEBUG_MDIO 500 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, 501 phy_id, location, ret); 502 #endif 503 return ret; 504 } 505 506 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, 507 u16 val) 508 { 509 unsigned long flags; 510 int ret; 511 512 spin_lock_irqsave(&mdio_lock, flags); 513 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); 514 spin_unlock_irqrestore(&mdio_lock, flags); 515 #if DEBUG_MDIO 516 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", 517 bus->name, phy_id, location, val, ret); 518 #endif 519 return ret; 520 } 521 522 static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) 523 { 524 int err; 525 526 if (!(mdio_bus = mdiobus_alloc())) 527 return -ENOMEM; 528 529 mdio_regs = regs; 530 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 531 mdio_bus->name = "IXP4xx MII Bus"; 532 mdio_bus->read = &ixp4xx_mdio_read; 533 mdio_bus->write = &ixp4xx_mdio_write; 534 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); 535 536 if ((err = mdiobus_register(mdio_bus))) 537 mdiobus_free(mdio_bus); 538 return err; 539 } 540 541 static void ixp4xx_mdio_remove(void) 542 { 543 mdiobus_unregister(mdio_bus); 544 mdiobus_free(mdio_bus); 545 } 546 547 548 static void ixp4xx_adjust_link(struct net_device *dev) 549 { 550 struct port *port = netdev_priv(dev); 551 struct phy_device *phydev = dev->phydev; 552 553 if (!phydev->link) { 554 if (port->speed) { 555 port->speed = 0; 556 printk(KERN_INFO "%s: link down\n", dev->name); 557 } 558 return; 559 } 560 561 if (port->speed == phydev->speed && port->duplex == phydev->duplex) 562 return; 563 564 port->speed = phydev->speed; 565 port->duplex = phydev->duplex; 566 567 if (port->duplex) 568 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 569 &port->regs->tx_control[0]); 570 else 571 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 572 &port->regs->tx_control[0]); 573 574 netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n", 575 dev->name, port->speed, port->duplex ? "full" : "half"); 576 } 577 578 579 static inline void debug_pkt(struct net_device *dev, const char *func, 580 u8 *data, int len) 581 { 582 #if DEBUG_PKT_BYTES 583 int i; 584 585 netdev_debug(dev, "%s(%i) ", func, len); 586 for (i = 0; i < len; i++) { 587 if (i >= DEBUG_PKT_BYTES) 588 break; 589 printk("%s%02X", 590 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 591 data[i]); 592 } 593 printk("\n"); 594 #endif 595 } 596 597 598 static inline void debug_desc(u32 phys, struct desc *desc) 599 { 600 #if DEBUG_DESC 601 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 602 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 603 phys, desc->next, desc->buf_len, desc->pkt_len, 604 desc->data, desc->dest_id, desc->src_id, desc->flags, 605 desc->qos, desc->padlen, desc->vlan_tci, 606 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 607 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 608 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 609 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 610 #endif 611 } 612 613 static inline int queue_get_desc(unsigned int queue, struct port *port, 614 int is_tx) 615 { 616 u32 phys, tab_phys, n_desc; 617 struct desc *tab; 618 619 if (!(phys = qmgr_get_entry(queue))) 620 return -1; 621 622 phys &= ~0x1F; /* mask out non-address bits */ 623 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 624 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 625 n_desc = (phys - tab_phys) / sizeof(struct desc); 626 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 627 debug_desc(phys, &tab[n_desc]); 628 BUG_ON(tab[n_desc].next); 629 return n_desc; 630 } 631 632 static inline void queue_put_desc(unsigned int queue, u32 phys, 633 struct desc *desc) 634 { 635 debug_desc(phys, desc); 636 BUG_ON(phys & 0x1F); 637 qmgr_put_entry(queue, phys); 638 /* Don't check for queue overflow here, we've allocated sufficient 639 length and queues >= 32 don't support this check anyway. */ 640 } 641 642 643 static inline void dma_unmap_tx(struct port *port, struct desc *desc) 644 { 645 #ifdef __ARMEB__ 646 dma_unmap_single(&port->netdev->dev, desc->data, 647 desc->buf_len, DMA_TO_DEVICE); 648 #else 649 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 650 ALIGN((desc->data & 3) + desc->buf_len, 4), 651 DMA_TO_DEVICE); 652 #endif 653 } 654 655 656 static void eth_rx_irq(void *pdev) 657 { 658 struct net_device *dev = pdev; 659 struct port *port = netdev_priv(dev); 660 661 #if DEBUG_RX 662 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 663 #endif 664 qmgr_disable_irq(port->plat->rxq); 665 napi_schedule(&port->napi); 666 } 667 668 static int eth_poll(struct napi_struct *napi, int budget) 669 { 670 struct port *port = container_of(napi, struct port, napi); 671 struct net_device *dev = port->netdev; 672 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 673 int received = 0; 674 675 #if DEBUG_RX 676 netdev_debug(dev, "eth_poll\n"); 677 #endif 678 679 while (received < budget) { 680 struct sk_buff *skb; 681 struct desc *desc; 682 int n; 683 #ifdef __ARMEB__ 684 struct sk_buff *temp; 685 u32 phys; 686 #endif 687 688 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 689 #if DEBUG_RX 690 netdev_debug(dev, "eth_poll napi_complete\n"); 691 #endif 692 napi_complete(napi); 693 qmgr_enable_irq(rxq); 694 if (!qmgr_stat_below_low_watermark(rxq) && 695 napi_reschedule(napi)) { /* not empty again */ 696 #if DEBUG_RX 697 netdev_debug(dev, "eth_poll napi_reschedule succeeded\n"); 698 #endif 699 qmgr_disable_irq(rxq); 700 continue; 701 } 702 #if DEBUG_RX 703 netdev_debug(dev, "eth_poll all done\n"); 704 #endif 705 return received; /* all work done */ 706 } 707 708 desc = rx_desc_ptr(port, n); 709 710 #ifdef __ARMEB__ 711 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 712 phys = dma_map_single(&dev->dev, skb->data, 713 RX_BUFF_SIZE, DMA_FROM_DEVICE); 714 if (dma_mapping_error(&dev->dev, phys)) { 715 dev_kfree_skb(skb); 716 skb = NULL; 717 } 718 } 719 #else 720 skb = netdev_alloc_skb(dev, 721 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 722 #endif 723 724 if (!skb) { 725 dev->stats.rx_dropped++; 726 /* put the desc back on RX-ready queue */ 727 desc->buf_len = MAX_MRU; 728 desc->pkt_len = 0; 729 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 730 continue; 731 } 732 733 /* process received frame */ 734 #ifdef __ARMEB__ 735 temp = skb; 736 skb = port->rx_buff_tab[n]; 737 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 738 RX_BUFF_SIZE, DMA_FROM_DEVICE); 739 #else 740 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, 741 RX_BUFF_SIZE, DMA_FROM_DEVICE); 742 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 743 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 744 #endif 745 skb_reserve(skb, NET_IP_ALIGN); 746 skb_put(skb, desc->pkt_len); 747 748 debug_pkt(dev, "eth_poll", skb->data, skb->len); 749 750 ixp_rx_timestamp(port, skb); 751 skb->protocol = eth_type_trans(skb, dev); 752 dev->stats.rx_packets++; 753 dev->stats.rx_bytes += skb->len; 754 netif_receive_skb(skb); 755 756 /* put the new buffer on RX-free queue */ 757 #ifdef __ARMEB__ 758 port->rx_buff_tab[n] = temp; 759 desc->data = phys + NET_IP_ALIGN; 760 #endif 761 desc->buf_len = MAX_MRU; 762 desc->pkt_len = 0; 763 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 764 received++; 765 } 766 767 #if DEBUG_RX 768 netdev_debug(dev, "eth_poll(): end, not all work done\n"); 769 #endif 770 return received; /* not all work done */ 771 } 772 773 774 static void eth_txdone_irq(void *unused) 775 { 776 u32 phys; 777 778 #if DEBUG_TX 779 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 780 #endif 781 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { 782 u32 npe_id, n_desc; 783 struct port *port; 784 struct desc *desc; 785 int start; 786 787 npe_id = phys & 3; 788 BUG_ON(npe_id >= MAX_NPES); 789 port = npe_port_tab[npe_id]; 790 BUG_ON(!port); 791 phys &= ~0x1F; /* mask out non-address bits */ 792 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 793 BUG_ON(n_desc >= TX_DESCS); 794 desc = tx_desc_ptr(port, n_desc); 795 debug_desc(phys, desc); 796 797 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 798 port->netdev->stats.tx_packets++; 799 port->netdev->stats.tx_bytes += desc->pkt_len; 800 801 dma_unmap_tx(port, desc); 802 #if DEBUG_TX 803 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 804 port->netdev->name, port->tx_buff_tab[n_desc]); 805 #endif 806 free_buffer_irq(port->tx_buff_tab[n_desc]); 807 port->tx_buff_tab[n_desc] = NULL; 808 } 809 810 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 811 queue_put_desc(port->plat->txreadyq, phys, desc); 812 if (start) { /* TX-ready queue was empty */ 813 #if DEBUG_TX 814 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 815 port->netdev->name); 816 #endif 817 netif_wake_queue(port->netdev); 818 } 819 } 820 } 821 822 static int eth_xmit(struct sk_buff *skb, struct net_device *dev) 823 { 824 struct port *port = netdev_priv(dev); 825 unsigned int txreadyq = port->plat->txreadyq; 826 int len, offset, bytes, n; 827 void *mem; 828 u32 phys; 829 struct desc *desc; 830 831 #if DEBUG_TX 832 netdev_debug(dev, "eth_xmit\n"); 833 #endif 834 835 if (unlikely(skb->len > MAX_MRU)) { 836 dev_kfree_skb(skb); 837 dev->stats.tx_errors++; 838 return NETDEV_TX_OK; 839 } 840 841 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 842 843 len = skb->len; 844 #ifdef __ARMEB__ 845 offset = 0; /* no need to keep alignment */ 846 bytes = len; 847 mem = skb->data; 848 #else 849 offset = (int)skb->data & 3; /* keep 32-bit alignment */ 850 bytes = ALIGN(offset + len, 4); 851 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 852 dev_kfree_skb(skb); 853 dev->stats.tx_dropped++; 854 return NETDEV_TX_OK; 855 } 856 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 857 #endif 858 859 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 860 if (dma_mapping_error(&dev->dev, phys)) { 861 dev_kfree_skb(skb); 862 #ifndef __ARMEB__ 863 kfree(mem); 864 #endif 865 dev->stats.tx_dropped++; 866 return NETDEV_TX_OK; 867 } 868 869 n = queue_get_desc(txreadyq, port, 1); 870 BUG_ON(n < 0); 871 desc = tx_desc_ptr(port, n); 872 873 #ifdef __ARMEB__ 874 port->tx_buff_tab[n] = skb; 875 #else 876 port->tx_buff_tab[n] = mem; 877 #endif 878 desc->data = phys + offset; 879 desc->buf_len = desc->pkt_len = len; 880 881 /* NPE firmware pads short frames with zeros internally */ 882 wmb(); 883 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 884 885 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 886 #if DEBUG_TX 887 netdev_debug(dev, "eth_xmit queue full\n"); 888 #endif 889 netif_stop_queue(dev); 890 /* we could miss TX ready interrupt */ 891 /* really empty in fact */ 892 if (!qmgr_stat_below_low_watermark(txreadyq)) { 893 #if DEBUG_TX 894 netdev_debug(dev, "eth_xmit ready again\n"); 895 #endif 896 netif_wake_queue(dev); 897 } 898 } 899 900 #if DEBUG_TX 901 netdev_debug(dev, "eth_xmit end\n"); 902 #endif 903 904 ixp_tx_timestamp(port, skb); 905 skb_tx_timestamp(skb); 906 907 #ifndef __ARMEB__ 908 dev_kfree_skb(skb); 909 #endif 910 return NETDEV_TX_OK; 911 } 912 913 914 static void eth_set_mcast_list(struct net_device *dev) 915 { 916 struct port *port = netdev_priv(dev); 917 struct netdev_hw_addr *ha; 918 u8 diffs[ETH_ALEN], *addr; 919 int i; 920 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 921 922 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { 923 for (i = 0; i < ETH_ALEN; i++) { 924 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 925 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 926 } 927 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 928 &port->regs->rx_control[0]); 929 return; 930 } 931 932 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 933 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 934 &port->regs->rx_control[0]); 935 return; 936 } 937 938 eth_zero_addr(diffs); 939 940 addr = NULL; 941 netdev_for_each_mc_addr(ha, dev) { 942 if (!addr) 943 addr = ha->addr; /* first MAC address */ 944 for (i = 0; i < ETH_ALEN; i++) 945 diffs[i] |= addr[i] ^ ha->addr[i]; 946 } 947 948 for (i = 0; i < ETH_ALEN; i++) { 949 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 950 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 951 } 952 953 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 954 &port->regs->rx_control[0]); 955 } 956 957 958 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 959 { 960 if (!netif_running(dev)) 961 return -EINVAL; 962 963 if (cpu_is_ixp46x()) { 964 if (cmd == SIOCSHWTSTAMP) 965 return hwtstamp_set(dev, req); 966 if (cmd == SIOCGHWTSTAMP) 967 return hwtstamp_get(dev, req); 968 } 969 970 return phy_mii_ioctl(dev->phydev, req, cmd); 971 } 972 973 /* ethtool support */ 974 975 static void ixp4xx_get_drvinfo(struct net_device *dev, 976 struct ethtool_drvinfo *info) 977 { 978 struct port *port = netdev_priv(dev); 979 980 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 981 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", 982 port->firmware[0], port->firmware[1], 983 port->firmware[2], port->firmware[3]); 984 strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); 985 } 986 987 int ixp46x_phc_index = -1; 988 EXPORT_SYMBOL_GPL(ixp46x_phc_index); 989 990 static int ixp4xx_get_ts_info(struct net_device *dev, 991 struct ethtool_ts_info *info) 992 { 993 if (!cpu_is_ixp46x()) { 994 info->so_timestamping = 995 SOF_TIMESTAMPING_TX_SOFTWARE | 996 SOF_TIMESTAMPING_RX_SOFTWARE | 997 SOF_TIMESTAMPING_SOFTWARE; 998 info->phc_index = -1; 999 return 0; 1000 } 1001 info->so_timestamping = 1002 SOF_TIMESTAMPING_TX_HARDWARE | 1003 SOF_TIMESTAMPING_RX_HARDWARE | 1004 SOF_TIMESTAMPING_RAW_HARDWARE; 1005 info->phc_index = ixp46x_phc_index; 1006 info->tx_types = 1007 (1 << HWTSTAMP_TX_OFF) | 1008 (1 << HWTSTAMP_TX_ON); 1009 info->rx_filters = 1010 (1 << HWTSTAMP_FILTER_NONE) | 1011 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1012 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); 1013 return 0; 1014 } 1015 1016 static const struct ethtool_ops ixp4xx_ethtool_ops = { 1017 .get_drvinfo = ixp4xx_get_drvinfo, 1018 .nway_reset = phy_ethtool_nway_reset, 1019 .get_link = ethtool_op_get_link, 1020 .get_ts_info = ixp4xx_get_ts_info, 1021 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1022 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1023 }; 1024 1025 1026 static int request_queues(struct port *port) 1027 { 1028 int err; 1029 1030 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 1031 "%s:RX-free", port->netdev->name); 1032 if (err) 1033 return err; 1034 1035 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 1036 "%s:RX", port->netdev->name); 1037 if (err) 1038 goto rel_rxfree; 1039 1040 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 1041 "%s:TX", port->netdev->name); 1042 if (err) 1043 goto rel_rx; 1044 1045 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 1046 "%s:TX-ready", port->netdev->name); 1047 if (err) 1048 goto rel_tx; 1049 1050 /* TX-done queue handles skbs sent out by the NPEs */ 1051 if (!ports_open) { 1052 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 1053 "%s:TX-done", DRV_NAME); 1054 if (err) 1055 goto rel_txready; 1056 } 1057 return 0; 1058 1059 rel_txready: 1060 qmgr_release_queue(port->plat->txreadyq); 1061 rel_tx: 1062 qmgr_release_queue(TX_QUEUE(port->id)); 1063 rel_rx: 1064 qmgr_release_queue(port->plat->rxq); 1065 rel_rxfree: 1066 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1067 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 1068 port->netdev->name); 1069 return err; 1070 } 1071 1072 static void release_queues(struct port *port) 1073 { 1074 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1075 qmgr_release_queue(port->plat->rxq); 1076 qmgr_release_queue(TX_QUEUE(port->id)); 1077 qmgr_release_queue(port->plat->txreadyq); 1078 1079 if (!ports_open) 1080 qmgr_release_queue(TXDONE_QUEUE); 1081 } 1082 1083 static int init_queues(struct port *port) 1084 { 1085 int i; 1086 1087 if (!ports_open) { 1088 dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent, 1089 POOL_ALLOC_SIZE, 32, 0); 1090 if (!dma_pool) 1091 return -ENOMEM; 1092 } 1093 1094 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 1095 &port->desc_tab_phys))) 1096 return -ENOMEM; 1097 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); 1098 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 1099 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 1100 1101 /* Setup RX buffers */ 1102 for (i = 0; i < RX_DESCS; i++) { 1103 struct desc *desc = rx_desc_ptr(port, i); 1104 buffer_t *buff; /* skb or kmalloc()ated memory */ 1105 void *data; 1106 #ifdef __ARMEB__ 1107 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 1108 return -ENOMEM; 1109 data = buff->data; 1110 #else 1111 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 1112 return -ENOMEM; 1113 data = buff; 1114 #endif 1115 desc->buf_len = MAX_MRU; 1116 desc->data = dma_map_single(&port->netdev->dev, data, 1117 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1118 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 1119 free_buffer(buff); 1120 return -EIO; 1121 } 1122 desc->data += NET_IP_ALIGN; 1123 port->rx_buff_tab[i] = buff; 1124 } 1125 1126 return 0; 1127 } 1128 1129 static void destroy_queues(struct port *port) 1130 { 1131 int i; 1132 1133 if (port->desc_tab) { 1134 for (i = 0; i < RX_DESCS; i++) { 1135 struct desc *desc = rx_desc_ptr(port, i); 1136 buffer_t *buff = port->rx_buff_tab[i]; 1137 if (buff) { 1138 dma_unmap_single(&port->netdev->dev, 1139 desc->data - NET_IP_ALIGN, 1140 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1141 free_buffer(buff); 1142 } 1143 } 1144 for (i = 0; i < TX_DESCS; i++) { 1145 struct desc *desc = tx_desc_ptr(port, i); 1146 buffer_t *buff = port->tx_buff_tab[i]; 1147 if (buff) { 1148 dma_unmap_tx(port, desc); 1149 free_buffer(buff); 1150 } 1151 } 1152 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1153 port->desc_tab = NULL; 1154 } 1155 1156 if (!ports_open && dma_pool) { 1157 dma_pool_destroy(dma_pool); 1158 dma_pool = NULL; 1159 } 1160 } 1161 1162 static int eth_open(struct net_device *dev) 1163 { 1164 struct port *port = netdev_priv(dev); 1165 struct npe *npe = port->npe; 1166 struct msg msg; 1167 int i, err; 1168 1169 if (!npe_running(npe)) { 1170 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 1171 if (err) 1172 return err; 1173 1174 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 1175 netdev_err(dev, "%s not responding\n", npe_name(npe)); 1176 return -EIO; 1177 } 1178 port->firmware[0] = msg.byte4; 1179 port->firmware[1] = msg.byte5; 1180 port->firmware[2] = msg.byte6; 1181 port->firmware[3] = msg.byte7; 1182 } 1183 1184 memset(&msg, 0, sizeof(msg)); 1185 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 1186 msg.eth_id = port->id; 1187 msg.byte5 = port->plat->rxq | 0x80; 1188 msg.byte7 = port->plat->rxq << 4; 1189 for (i = 0; i < 8; i++) { 1190 msg.byte3 = i; 1191 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 1192 return -EIO; 1193 } 1194 1195 msg.cmd = NPE_EDB_SETPORTADDRESS; 1196 msg.eth_id = PHYSICAL_ID(port->id); 1197 msg.byte2 = dev->dev_addr[0]; 1198 msg.byte3 = dev->dev_addr[1]; 1199 msg.byte4 = dev->dev_addr[2]; 1200 msg.byte5 = dev->dev_addr[3]; 1201 msg.byte6 = dev->dev_addr[4]; 1202 msg.byte7 = dev->dev_addr[5]; 1203 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 1204 return -EIO; 1205 1206 memset(&msg, 0, sizeof(msg)); 1207 msg.cmd = NPE_FW_SETFIREWALLMODE; 1208 msg.eth_id = port->id; 1209 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 1210 return -EIO; 1211 1212 if ((err = request_queues(port)) != 0) 1213 return err; 1214 1215 if ((err = init_queues(port)) != 0) { 1216 destroy_queues(port); 1217 release_queues(port); 1218 return err; 1219 } 1220 1221 port->speed = 0; /* force "link up" message */ 1222 phy_start(dev->phydev); 1223 1224 for (i = 0; i < ETH_ALEN; i++) 1225 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1226 __raw_writel(0x08, &port->regs->random_seed); 1227 __raw_writel(0x12, &port->regs->partial_empty_threshold); 1228 __raw_writel(0x30, &port->regs->partial_full_threshold); 1229 __raw_writel(0x08, &port->regs->tx_start_bytes); 1230 __raw_writel(0x15, &port->regs->tx_deferral); 1231 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 1232 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 1233 __raw_writel(0x80, &port->regs->slot_time); 1234 __raw_writel(0x01, &port->regs->int_clock_threshold); 1235 1236 /* Populate queues with buffers, no failure after this point */ 1237 for (i = 0; i < TX_DESCS; i++) 1238 queue_put_desc(port->plat->txreadyq, 1239 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1240 1241 for (i = 0; i < RX_DESCS; i++) 1242 queue_put_desc(RXFREE_QUEUE(port->id), 1243 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1244 1245 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1246 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1247 __raw_writel(0, &port->regs->rx_control[1]); 1248 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1249 1250 napi_enable(&port->napi); 1251 eth_set_mcast_list(dev); 1252 netif_start_queue(dev); 1253 1254 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1255 eth_rx_irq, dev); 1256 if (!ports_open) { 1257 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1258 eth_txdone_irq, NULL); 1259 qmgr_enable_irq(TXDONE_QUEUE); 1260 } 1261 ports_open++; 1262 /* we may already have RX data, enables IRQ */ 1263 napi_schedule(&port->napi); 1264 return 0; 1265 } 1266 1267 static int eth_close(struct net_device *dev) 1268 { 1269 struct port *port = netdev_priv(dev); 1270 struct msg msg; 1271 int buffs = RX_DESCS; /* allocated RX buffers */ 1272 int i; 1273 1274 ports_open--; 1275 qmgr_disable_irq(port->plat->rxq); 1276 napi_disable(&port->napi); 1277 netif_stop_queue(dev); 1278 1279 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1280 buffs--; 1281 1282 memset(&msg, 0, sizeof(msg)); 1283 msg.cmd = NPE_SETLOOPBACK_MODE; 1284 msg.eth_id = port->id; 1285 msg.byte3 = 1; 1286 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1287 netdev_crit(dev, "unable to enable loopback\n"); 1288 1289 i = 0; 1290 do { /* drain RX buffers */ 1291 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1292 buffs--; 1293 if (!buffs) 1294 break; 1295 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1296 /* we have to inject some packet */ 1297 struct desc *desc; 1298 u32 phys; 1299 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1300 BUG_ON(n < 0); 1301 desc = tx_desc_ptr(port, n); 1302 phys = tx_desc_phys(port, n); 1303 desc->buf_len = desc->pkt_len = 1; 1304 wmb(); 1305 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1306 } 1307 udelay(1); 1308 } while (++i < MAX_CLOSE_WAIT); 1309 1310 if (buffs) 1311 netdev_crit(dev, "unable to drain RX queue, %i buffer(s)" 1312 " left in NPE\n", buffs); 1313 #if DEBUG_CLOSE 1314 if (!buffs) 1315 netdev_debug(dev, "draining RX queue took %i cycles\n", i); 1316 #endif 1317 1318 buffs = TX_DESCS; 1319 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1320 buffs--; /* cancel TX */ 1321 1322 i = 0; 1323 do { 1324 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1325 buffs--; 1326 if (!buffs) 1327 break; 1328 } while (++i < MAX_CLOSE_WAIT); 1329 1330 if (buffs) 1331 netdev_crit(dev, "unable to drain TX queue, %i buffer(s) " 1332 "left in NPE\n", buffs); 1333 #if DEBUG_CLOSE 1334 if (!buffs) 1335 netdev_debug(dev, "draining TX queues took %i cycles\n", i); 1336 #endif 1337 1338 msg.byte3 = 0; 1339 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1340 netdev_crit(dev, "unable to disable loopback\n"); 1341 1342 phy_stop(dev->phydev); 1343 1344 if (!ports_open) 1345 qmgr_disable_irq(TXDONE_QUEUE); 1346 destroy_queues(port); 1347 release_queues(port); 1348 return 0; 1349 } 1350 1351 static const struct net_device_ops ixp4xx_netdev_ops = { 1352 .ndo_open = eth_open, 1353 .ndo_stop = eth_close, 1354 .ndo_start_xmit = eth_xmit, 1355 .ndo_set_rx_mode = eth_set_mcast_list, 1356 .ndo_do_ioctl = eth_ioctl, 1357 .ndo_set_mac_address = eth_mac_addr, 1358 .ndo_validate_addr = eth_validate_addr, 1359 }; 1360 1361 static int ixp4xx_eth_probe(struct platform_device *pdev) 1362 { 1363 char phy_id[MII_BUS_ID_SIZE + 3]; 1364 struct phy_device *phydev = NULL; 1365 struct device *dev = &pdev->dev; 1366 struct eth_plat_info *plat; 1367 resource_size_t regs_phys; 1368 struct net_device *ndev; 1369 struct resource *res; 1370 struct port *port; 1371 int err; 1372 1373 plat = dev_get_platdata(dev); 1374 1375 if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) 1376 return -ENOMEM; 1377 1378 SET_NETDEV_DEV(ndev, dev); 1379 port = netdev_priv(ndev); 1380 port->netdev = ndev; 1381 port->id = pdev->id; 1382 1383 /* Get the port resource and remap */ 1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1385 if (!res) 1386 return -ENODEV; 1387 regs_phys = res->start; 1388 port->regs = devm_ioremap_resource(dev, res); 1389 if (IS_ERR(port->regs)) 1390 return PTR_ERR(port->regs); 1391 1392 switch (port->id) { 1393 case IXP4XX_ETH_NPEA: 1394 /* If the MDIO bus is not up yet, defer probe */ 1395 if (!mdio_bus) 1396 return -EPROBE_DEFER; 1397 break; 1398 case IXP4XX_ETH_NPEB: 1399 /* 1400 * On all except IXP43x, NPE-B is used for the MDIO bus. 1401 * If there is no NPE-B in the feature set, bail out, else 1402 * register the MDIO bus. 1403 */ 1404 if (!cpu_is_ixp43x()) { 1405 if (!(ixp4xx_read_feature_bits() & 1406 IXP4XX_FEATURE_NPEB_ETH0)) 1407 return -ENODEV; 1408 /* Else register the MDIO bus on NPE-B */ 1409 if ((err = ixp4xx_mdio_register(port->regs))) 1410 return err; 1411 } 1412 if (!mdio_bus) 1413 return -EPROBE_DEFER; 1414 break; 1415 case IXP4XX_ETH_NPEC: 1416 /* 1417 * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access, 1418 * of there is no NPE-C, no bus, nothing works, so bail out. 1419 */ 1420 if (cpu_is_ixp43x()) { 1421 if (!(ixp4xx_read_feature_bits() & 1422 IXP4XX_FEATURE_NPEC_ETH)) 1423 return -ENODEV; 1424 /* Else register the MDIO bus on NPE-C */ 1425 if ((err = ixp4xx_mdio_register(port->regs))) 1426 return err; 1427 } 1428 if (!mdio_bus) 1429 return -EPROBE_DEFER; 1430 break; 1431 default: 1432 return -ENODEV; 1433 } 1434 1435 ndev->netdev_ops = &ixp4xx_netdev_ops; 1436 ndev->ethtool_ops = &ixp4xx_ethtool_ops; 1437 ndev->tx_queue_len = 100; 1438 1439 netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT); 1440 1441 if (!(port->npe = npe_request(NPE_ID(port->id)))) 1442 return -EIO; 1443 1444 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name); 1445 if (!port->mem_res) { 1446 err = -EBUSY; 1447 goto err_npe_rel; 1448 } 1449 1450 port->plat = plat; 1451 npe_port_tab[NPE_ID(port->id)] = port; 1452 memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN); 1453 1454 platform_set_drvdata(pdev, ndev); 1455 1456 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1457 &port->regs->core_control); 1458 udelay(50); 1459 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1460 udelay(50); 1461 1462 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 1463 mdio_bus->id, plat->phy); 1464 phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link, 1465 PHY_INTERFACE_MODE_MII); 1466 if (IS_ERR(phydev)) { 1467 err = PTR_ERR(phydev); 1468 goto err_free_mem; 1469 } 1470 1471 phydev->irq = PHY_POLL; 1472 1473 if ((err = register_netdev(ndev))) 1474 goto err_phy_dis; 1475 1476 netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, 1477 npe_name(port->npe)); 1478 1479 return 0; 1480 1481 err_phy_dis: 1482 phy_disconnect(phydev); 1483 err_free_mem: 1484 npe_port_tab[NPE_ID(port->id)] = NULL; 1485 release_resource(port->mem_res); 1486 err_npe_rel: 1487 npe_release(port->npe); 1488 return err; 1489 } 1490 1491 static int ixp4xx_eth_remove(struct platform_device *pdev) 1492 { 1493 struct net_device *ndev = platform_get_drvdata(pdev); 1494 struct phy_device *phydev = ndev->phydev; 1495 struct port *port = netdev_priv(ndev); 1496 1497 unregister_netdev(ndev); 1498 phy_disconnect(phydev); 1499 ixp4xx_mdio_remove(); 1500 npe_port_tab[NPE_ID(port->id)] = NULL; 1501 npe_release(port->npe); 1502 release_resource(port->mem_res); 1503 return 0; 1504 } 1505 1506 static struct platform_driver ixp4xx_eth_driver = { 1507 .driver.name = DRV_NAME, 1508 .probe = ixp4xx_eth_probe, 1509 .remove = ixp4xx_eth_remove, 1510 }; 1511 module_platform_driver(ixp4xx_eth_driver); 1512 1513 MODULE_AUTHOR("Krzysztof Halasa"); 1514 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1515 MODULE_LICENSE("GPL v2"); 1516 MODULE_ALIAS("platform:ixp4xx_eth"); 1517