1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Intel IXP4xx Ethernet driver for Linux 4 * 5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * Ethernet port config (0x00 is not present on IXP42X): 8 * 9 * logical port 0x00 0x10 0x20 10 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 11 * physical PortId 2 0 1 12 * TX queue 23 24 25 13 * RX-free queue 26 27 28 14 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 15 * 16 * Queue entries: 17 * bits 0 -> 1 - NPE ID (RX and TX-done) 18 * bits 0 -> 2 - priority (TX, per 802.1D) 19 * bits 3 -> 4 - port ID (user-set?) 20 * bits 5 -> 31 - physical descriptor address 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmapool.h> 26 #include <linux/etherdevice.h> 27 #include <linux/io.h> 28 #include <linux/kernel.h> 29 #include <linux/net_tstamp.h> 30 #include <linux/of.h> 31 #include <linux/phy.h> 32 #include <linux/platform_data/eth_ixp4xx.h> 33 #include <linux/platform_device.h> 34 #include <linux/ptp_classify.h> 35 #include <linux/slab.h> 36 #include <linux/module.h> 37 #include <linux/soc/ixp4xx/npe.h> 38 #include <linux/soc/ixp4xx/qmgr.h> 39 40 #include "ixp46x_ts.h" 41 42 #define DEBUG_DESC 0 43 #define DEBUG_RX 0 44 #define DEBUG_TX 0 45 #define DEBUG_PKT_BYTES 0 46 #define DEBUG_MDIO 0 47 #define DEBUG_CLOSE 0 48 49 #define DRV_NAME "ixp4xx_eth" 50 51 #define MAX_NPES 3 52 53 #define RX_DESCS 64 /* also length of all RX queues */ 54 #define TX_DESCS 16 /* also length of all TX queues */ 55 #define TXDONE_QUEUE_LEN 64 /* dwords */ 56 57 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 58 #define REGS_SIZE 0x1000 59 #define MAX_MRU 1536 /* 0x600 */ 60 #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 61 62 #define NAPI_WEIGHT 16 63 #define MDIO_INTERVAL (3 * HZ) 64 #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 65 #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 66 67 #define NPE_ID(port_id) ((port_id) >> 4) 68 #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 69 #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 70 #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 71 #define TXDONE_QUEUE 31 72 73 #define PTP_SLAVE_MODE 1 74 #define PTP_MASTER_MODE 2 75 #define PORT2CHANNEL(p) NPE_ID(p->id) 76 77 /* TX Control Registers */ 78 #define TX_CNTRL0_TX_EN 0x01 79 #define TX_CNTRL0_HALFDUPLEX 0x02 80 #define TX_CNTRL0_RETRY 0x04 81 #define TX_CNTRL0_PAD_EN 0x08 82 #define TX_CNTRL0_APPEND_FCS 0x10 83 #define TX_CNTRL0_2DEFER 0x20 84 #define TX_CNTRL0_RMII 0x40 /* reduced MII */ 85 #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 86 87 /* RX Control Registers */ 88 #define RX_CNTRL0_RX_EN 0x01 89 #define RX_CNTRL0_PADSTRIP_EN 0x02 90 #define RX_CNTRL0_SEND_FCS 0x04 91 #define RX_CNTRL0_PAUSE_EN 0x08 92 #define RX_CNTRL0_LOOP_EN 0x10 93 #define RX_CNTRL0_ADDR_FLTR_EN 0x20 94 #define RX_CNTRL0_RX_RUNT_EN 0x40 95 #define RX_CNTRL0_BCAST_DIS 0x80 96 #define RX_CNTRL1_DEFER_EN 0x01 97 98 /* Core Control Register */ 99 #define CORE_RESET 0x01 100 #define CORE_RX_FIFO_FLUSH 0x02 101 #define CORE_TX_FIFO_FLUSH 0x04 102 #define CORE_SEND_JAM 0x08 103 #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 104 105 #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 106 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 107 TX_CNTRL0_2DEFER) 108 #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 109 #define DEFAULT_CORE_CNTRL CORE_MDC_EN 110 111 112 /* NPE message codes */ 113 #define NPE_GETSTATUS 0x00 114 #define NPE_EDB_SETPORTADDRESS 0x01 115 #define NPE_EDB_GETMACADDRESSDATABASE 0x02 116 #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 117 #define NPE_GETSTATS 0x04 118 #define NPE_RESETSTATS 0x05 119 #define NPE_SETMAXFRAMELENGTHS 0x06 120 #define NPE_VLAN_SETRXTAGMODE 0x07 121 #define NPE_VLAN_SETDEFAULTRXVID 0x08 122 #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 123 #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 124 #define NPE_VLAN_SETRXQOSENTRY 0x0B 125 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 126 #define NPE_STP_SETBLOCKINGSTATE 0x0D 127 #define NPE_FW_SETFIREWALLMODE 0x0E 128 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 129 #define NPE_PC_SETAPMACTABLE 0x11 130 #define NPE_SETLOOPBACK_MODE 0x12 131 #define NPE_PC_SETBSSIDTABLE 0x13 132 #define NPE_ADDRESS_FILTER_CONFIG 0x14 133 #define NPE_APPENDFCSCONFIG 0x15 134 #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 135 #define NPE_MAC_RECOVERY_START 0x17 136 137 138 #ifdef __ARMEB__ 139 typedef struct sk_buff buffer_t; 140 #define free_buffer dev_kfree_skb 141 #define free_buffer_irq dev_consume_skb_irq 142 #else 143 typedef void buffer_t; 144 #define free_buffer kfree 145 #define free_buffer_irq kfree 146 #endif 147 148 struct eth_regs { 149 u32 tx_control[2], __res1[2]; /* 000 */ 150 u32 rx_control[2], __res2[2]; /* 010 */ 151 u32 random_seed, __res3[3]; /* 020 */ 152 u32 partial_empty_threshold, __res4; /* 030 */ 153 u32 partial_full_threshold, __res5; /* 038 */ 154 u32 tx_start_bytes, __res6[3]; /* 040 */ 155 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 156 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 157 u32 slot_time, __res9[3]; /* 070 */ 158 u32 mdio_command[4]; /* 080 */ 159 u32 mdio_status[4]; /* 090 */ 160 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 161 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 162 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 163 u32 hw_addr[6], __res13[61]; /* 0F0 */ 164 u32 core_control; /* 1FC */ 165 }; 166 167 struct port { 168 struct resource *mem_res; 169 struct eth_regs __iomem *regs; 170 struct npe *npe; 171 struct net_device *netdev; 172 struct napi_struct napi; 173 struct eth_plat_info *plat; 174 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 175 struct desc *desc_tab; /* coherent */ 176 u32 desc_tab_phys; 177 int id; /* logical port ID */ 178 int speed, duplex; 179 u8 firmware[4]; 180 int hwts_tx_en; 181 int hwts_rx_en; 182 }; 183 184 /* NPE message structure */ 185 struct msg { 186 #ifdef __ARMEB__ 187 u8 cmd, eth_id, byte2, byte3; 188 u8 byte4, byte5, byte6, byte7; 189 #else 190 u8 byte3, byte2, eth_id, cmd; 191 u8 byte7, byte6, byte5, byte4; 192 #endif 193 }; 194 195 /* Ethernet packet descriptor */ 196 struct desc { 197 u32 next; /* pointer to next buffer, unused */ 198 199 #ifdef __ARMEB__ 200 u16 buf_len; /* buffer length */ 201 u16 pkt_len; /* packet length */ 202 u32 data; /* pointer to data buffer in RAM */ 203 u8 dest_id; 204 u8 src_id; 205 u16 flags; 206 u8 qos; 207 u8 padlen; 208 u16 vlan_tci; 209 #else 210 u16 pkt_len; /* packet length */ 211 u16 buf_len; /* buffer length */ 212 u32 data; /* pointer to data buffer in RAM */ 213 u16 flags; 214 u8 src_id; 215 u8 dest_id; 216 u16 vlan_tci; 217 u8 padlen; 218 u8 qos; 219 #endif 220 221 #ifdef __ARMEB__ 222 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 223 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 224 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 225 #else 226 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 227 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 228 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 229 #endif 230 }; 231 232 233 #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 234 (n) * sizeof(struct desc)) 235 #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 236 237 #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 238 ((n) + RX_DESCS) * sizeof(struct desc)) 239 #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 240 241 #ifndef __ARMEB__ 242 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 243 { 244 int i; 245 for (i = 0; i < cnt; i++) 246 dest[i] = swab32(src[i]); 247 } 248 #endif 249 250 static spinlock_t mdio_lock; 251 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 252 static struct mii_bus *mdio_bus; 253 static int ports_open; 254 static struct port *npe_port_tab[MAX_NPES]; 255 static struct dma_pool *dma_pool; 256 257 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) 258 { 259 u8 *data = skb->data; 260 unsigned int offset; 261 u16 *hi, *id; 262 u32 lo; 263 264 if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) 265 return 0; 266 267 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 268 269 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) 270 return 0; 271 272 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); 273 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 274 275 memcpy(&lo, &hi[1], sizeof(lo)); 276 277 return (uid_hi == ntohs(*hi) && 278 uid_lo == ntohl(lo) && 279 seqid == ntohs(*id)); 280 } 281 282 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) 283 { 284 struct skb_shared_hwtstamps *shhwtstamps; 285 struct ixp46x_ts_regs *regs; 286 u64 ns; 287 u32 ch, hi, lo, val; 288 u16 uid, seq; 289 290 if (!port->hwts_rx_en) 291 return; 292 293 ch = PORT2CHANNEL(port); 294 295 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 296 297 val = __raw_readl(®s->channel[ch].ch_event); 298 299 if (!(val & RX_SNAPSHOT_LOCKED)) 300 return; 301 302 lo = __raw_readl(®s->channel[ch].src_uuid_lo); 303 hi = __raw_readl(®s->channel[ch].src_uuid_hi); 304 305 uid = hi & 0xffff; 306 seq = (hi >> 16) & 0xffff; 307 308 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) 309 goto out; 310 311 lo = __raw_readl(®s->channel[ch].rx_snap_lo); 312 hi = __raw_readl(®s->channel[ch].rx_snap_hi); 313 ns = ((u64) hi) << 32; 314 ns |= lo; 315 ns <<= TICKS_NS_SHIFT; 316 317 shhwtstamps = skb_hwtstamps(skb); 318 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 319 shhwtstamps->hwtstamp = ns_to_ktime(ns); 320 out: 321 __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 322 } 323 324 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) 325 { 326 struct skb_shared_hwtstamps shhwtstamps; 327 struct ixp46x_ts_regs *regs; 328 struct skb_shared_info *shtx; 329 u64 ns; 330 u32 ch, cnt, hi, lo, val; 331 332 shtx = skb_shinfo(skb); 333 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) 334 shtx->tx_flags |= SKBTX_IN_PROGRESS; 335 else 336 return; 337 338 ch = PORT2CHANNEL(port); 339 340 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 341 342 /* 343 * This really stinks, but we have to poll for the Tx time stamp. 344 * Usually, the time stamp is ready after 4 to 6 microseconds. 345 */ 346 for (cnt = 0; cnt < 100; cnt++) { 347 val = __raw_readl(®s->channel[ch].ch_event); 348 if (val & TX_SNAPSHOT_LOCKED) 349 break; 350 udelay(1); 351 } 352 if (!(val & TX_SNAPSHOT_LOCKED)) { 353 shtx->tx_flags &= ~SKBTX_IN_PROGRESS; 354 return; 355 } 356 357 lo = __raw_readl(®s->channel[ch].tx_snap_lo); 358 hi = __raw_readl(®s->channel[ch].tx_snap_hi); 359 ns = ((u64) hi) << 32; 360 ns |= lo; 361 ns <<= TICKS_NS_SHIFT; 362 363 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 364 shhwtstamps.hwtstamp = ns_to_ktime(ns); 365 skb_tstamp_tx(skb, &shhwtstamps); 366 367 __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); 368 } 369 370 static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 371 { 372 struct hwtstamp_config cfg; 373 struct ixp46x_ts_regs *regs; 374 struct port *port = netdev_priv(netdev); 375 int ch; 376 377 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 378 return -EFAULT; 379 380 if (cfg.flags) /* reserved for future extensions */ 381 return -EINVAL; 382 383 ch = PORT2CHANNEL(port); 384 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; 385 386 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) 387 return -ERANGE; 388 389 switch (cfg.rx_filter) { 390 case HWTSTAMP_FILTER_NONE: 391 port->hwts_rx_en = 0; 392 break; 393 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 394 port->hwts_rx_en = PTP_SLAVE_MODE; 395 __raw_writel(0, ®s->channel[ch].ch_control); 396 break; 397 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 398 port->hwts_rx_en = PTP_MASTER_MODE; 399 __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); 400 break; 401 default: 402 return -ERANGE; 403 } 404 405 port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; 406 407 /* Clear out any old time stamps. */ 408 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, 409 ®s->channel[ch].ch_event); 410 411 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 412 } 413 414 static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 415 { 416 struct hwtstamp_config cfg; 417 struct port *port = netdev_priv(netdev); 418 419 cfg.flags = 0; 420 cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 421 422 switch (port->hwts_rx_en) { 423 case 0: 424 cfg.rx_filter = HWTSTAMP_FILTER_NONE; 425 break; 426 case PTP_SLAVE_MODE: 427 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 428 break; 429 case PTP_MASTER_MODE: 430 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 431 break; 432 default: 433 WARN_ON_ONCE(1); 434 return -ERANGE; 435 } 436 437 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 438 } 439 440 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 441 int write, u16 cmd) 442 { 443 int cycles = 0; 444 445 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 446 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); 447 return -1; 448 } 449 450 if (write) { 451 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 452 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 453 } 454 __raw_writel(((phy_id << 5) | location) & 0xFF, 455 &mdio_regs->mdio_command[2]); 456 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 457 &mdio_regs->mdio_command[3]); 458 459 while ((cycles < MAX_MDIO_RETRIES) && 460 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 461 udelay(1); 462 cycles++; 463 } 464 465 if (cycles == MAX_MDIO_RETRIES) { 466 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, 467 phy_id); 468 return -1; 469 } 470 471 #if DEBUG_MDIO 472 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, 473 phy_id, write ? "write" : "read", cycles); 474 #endif 475 476 if (write) 477 return 0; 478 479 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 480 #if DEBUG_MDIO 481 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, 482 phy_id); 483 #endif 484 return 0xFFFF; /* don't return error */ 485 } 486 487 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 488 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); 489 } 490 491 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) 492 { 493 unsigned long flags; 494 int ret; 495 496 spin_lock_irqsave(&mdio_lock, flags); 497 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); 498 spin_unlock_irqrestore(&mdio_lock, flags); 499 #if DEBUG_MDIO 500 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, 501 phy_id, location, ret); 502 #endif 503 return ret; 504 } 505 506 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, 507 u16 val) 508 { 509 unsigned long flags; 510 int ret; 511 512 spin_lock_irqsave(&mdio_lock, flags); 513 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); 514 spin_unlock_irqrestore(&mdio_lock, flags); 515 #if DEBUG_MDIO 516 printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", 517 bus->name, phy_id, location, val, ret); 518 #endif 519 return ret; 520 } 521 522 static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) 523 { 524 int err; 525 526 if (!(mdio_bus = mdiobus_alloc())) 527 return -ENOMEM; 528 529 mdio_regs = regs; 530 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 531 spin_lock_init(&mdio_lock); 532 mdio_bus->name = "IXP4xx MII Bus"; 533 mdio_bus->read = &ixp4xx_mdio_read; 534 mdio_bus->write = &ixp4xx_mdio_write; 535 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); 536 537 if ((err = mdiobus_register(mdio_bus))) 538 mdiobus_free(mdio_bus); 539 return err; 540 } 541 542 static void ixp4xx_mdio_remove(void) 543 { 544 mdiobus_unregister(mdio_bus); 545 mdiobus_free(mdio_bus); 546 } 547 548 549 static void ixp4xx_adjust_link(struct net_device *dev) 550 { 551 struct port *port = netdev_priv(dev); 552 struct phy_device *phydev = dev->phydev; 553 554 if (!phydev->link) { 555 if (port->speed) { 556 port->speed = 0; 557 printk(KERN_INFO "%s: link down\n", dev->name); 558 } 559 return; 560 } 561 562 if (port->speed == phydev->speed && port->duplex == phydev->duplex) 563 return; 564 565 port->speed = phydev->speed; 566 port->duplex = phydev->duplex; 567 568 if (port->duplex) 569 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 570 &port->regs->tx_control[0]); 571 else 572 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 573 &port->regs->tx_control[0]); 574 575 netdev_info(dev, "%s: link up, speed %u Mb/s, %s duplex\n", 576 dev->name, port->speed, port->duplex ? "full" : "half"); 577 } 578 579 580 static inline void debug_pkt(struct net_device *dev, const char *func, 581 u8 *data, int len) 582 { 583 #if DEBUG_PKT_BYTES 584 int i; 585 586 netdev_debug(dev, "%s(%i) ", func, len); 587 for (i = 0; i < len; i++) { 588 if (i >= DEBUG_PKT_BYTES) 589 break; 590 printk("%s%02X", 591 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 592 data[i]); 593 } 594 printk("\n"); 595 #endif 596 } 597 598 599 static inline void debug_desc(u32 phys, struct desc *desc) 600 { 601 #if DEBUG_DESC 602 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 603 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 604 phys, desc->next, desc->buf_len, desc->pkt_len, 605 desc->data, desc->dest_id, desc->src_id, desc->flags, 606 desc->qos, desc->padlen, desc->vlan_tci, 607 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 608 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 609 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 610 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 611 #endif 612 } 613 614 static inline int queue_get_desc(unsigned int queue, struct port *port, 615 int is_tx) 616 { 617 u32 phys, tab_phys, n_desc; 618 struct desc *tab; 619 620 if (!(phys = qmgr_get_entry(queue))) 621 return -1; 622 623 phys &= ~0x1F; /* mask out non-address bits */ 624 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 625 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 626 n_desc = (phys - tab_phys) / sizeof(struct desc); 627 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 628 debug_desc(phys, &tab[n_desc]); 629 BUG_ON(tab[n_desc].next); 630 return n_desc; 631 } 632 633 static inline void queue_put_desc(unsigned int queue, u32 phys, 634 struct desc *desc) 635 { 636 debug_desc(phys, desc); 637 BUG_ON(phys & 0x1F); 638 qmgr_put_entry(queue, phys); 639 /* Don't check for queue overflow here, we've allocated sufficient 640 length and queues >= 32 don't support this check anyway. */ 641 } 642 643 644 static inline void dma_unmap_tx(struct port *port, struct desc *desc) 645 { 646 #ifdef __ARMEB__ 647 dma_unmap_single(&port->netdev->dev, desc->data, 648 desc->buf_len, DMA_TO_DEVICE); 649 #else 650 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 651 ALIGN((desc->data & 3) + desc->buf_len, 4), 652 DMA_TO_DEVICE); 653 #endif 654 } 655 656 657 static void eth_rx_irq(void *pdev) 658 { 659 struct net_device *dev = pdev; 660 struct port *port = netdev_priv(dev); 661 662 #if DEBUG_RX 663 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 664 #endif 665 qmgr_disable_irq(port->plat->rxq); 666 napi_schedule(&port->napi); 667 } 668 669 static int eth_poll(struct napi_struct *napi, int budget) 670 { 671 struct port *port = container_of(napi, struct port, napi); 672 struct net_device *dev = port->netdev; 673 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 674 int received = 0; 675 676 #if DEBUG_RX 677 netdev_debug(dev, "eth_poll\n"); 678 #endif 679 680 while (received < budget) { 681 struct sk_buff *skb; 682 struct desc *desc; 683 int n; 684 #ifdef __ARMEB__ 685 struct sk_buff *temp; 686 u32 phys; 687 #endif 688 689 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 690 #if DEBUG_RX 691 netdev_debug(dev, "eth_poll napi_complete\n"); 692 #endif 693 napi_complete(napi); 694 qmgr_enable_irq(rxq); 695 if (!qmgr_stat_below_low_watermark(rxq) && 696 napi_reschedule(napi)) { /* not empty again */ 697 #if DEBUG_RX 698 netdev_debug(dev, "eth_poll napi_reschedule succeeded\n"); 699 #endif 700 qmgr_disable_irq(rxq); 701 continue; 702 } 703 #if DEBUG_RX 704 netdev_debug(dev, "eth_poll all done\n"); 705 #endif 706 return received; /* all work done */ 707 } 708 709 desc = rx_desc_ptr(port, n); 710 711 #ifdef __ARMEB__ 712 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 713 phys = dma_map_single(&dev->dev, skb->data, 714 RX_BUFF_SIZE, DMA_FROM_DEVICE); 715 if (dma_mapping_error(&dev->dev, phys)) { 716 dev_kfree_skb(skb); 717 skb = NULL; 718 } 719 } 720 #else 721 skb = netdev_alloc_skb(dev, 722 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 723 #endif 724 725 if (!skb) { 726 dev->stats.rx_dropped++; 727 /* put the desc back on RX-ready queue */ 728 desc->buf_len = MAX_MRU; 729 desc->pkt_len = 0; 730 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 731 continue; 732 } 733 734 /* process received frame */ 735 #ifdef __ARMEB__ 736 temp = skb; 737 skb = port->rx_buff_tab[n]; 738 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 739 RX_BUFF_SIZE, DMA_FROM_DEVICE); 740 #else 741 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, 742 RX_BUFF_SIZE, DMA_FROM_DEVICE); 743 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 744 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 745 #endif 746 skb_reserve(skb, NET_IP_ALIGN); 747 skb_put(skb, desc->pkt_len); 748 749 debug_pkt(dev, "eth_poll", skb->data, skb->len); 750 751 ixp_rx_timestamp(port, skb); 752 skb->protocol = eth_type_trans(skb, dev); 753 dev->stats.rx_packets++; 754 dev->stats.rx_bytes += skb->len; 755 netif_receive_skb(skb); 756 757 /* put the new buffer on RX-free queue */ 758 #ifdef __ARMEB__ 759 port->rx_buff_tab[n] = temp; 760 desc->data = phys + NET_IP_ALIGN; 761 #endif 762 desc->buf_len = MAX_MRU; 763 desc->pkt_len = 0; 764 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 765 received++; 766 } 767 768 #if DEBUG_RX 769 netdev_debug(dev, "eth_poll(): end, not all work done\n"); 770 #endif 771 return received; /* not all work done */ 772 } 773 774 775 static void eth_txdone_irq(void *unused) 776 { 777 u32 phys; 778 779 #if DEBUG_TX 780 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 781 #endif 782 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { 783 u32 npe_id, n_desc; 784 struct port *port; 785 struct desc *desc; 786 int start; 787 788 npe_id = phys & 3; 789 BUG_ON(npe_id >= MAX_NPES); 790 port = npe_port_tab[npe_id]; 791 BUG_ON(!port); 792 phys &= ~0x1F; /* mask out non-address bits */ 793 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 794 BUG_ON(n_desc >= TX_DESCS); 795 desc = tx_desc_ptr(port, n_desc); 796 debug_desc(phys, desc); 797 798 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 799 port->netdev->stats.tx_packets++; 800 port->netdev->stats.tx_bytes += desc->pkt_len; 801 802 dma_unmap_tx(port, desc); 803 #if DEBUG_TX 804 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 805 port->netdev->name, port->tx_buff_tab[n_desc]); 806 #endif 807 free_buffer_irq(port->tx_buff_tab[n_desc]); 808 port->tx_buff_tab[n_desc] = NULL; 809 } 810 811 start = qmgr_stat_below_low_watermark(port->plat->txreadyq); 812 queue_put_desc(port->plat->txreadyq, phys, desc); 813 if (start) { /* TX-ready queue was empty */ 814 #if DEBUG_TX 815 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 816 port->netdev->name); 817 #endif 818 netif_wake_queue(port->netdev); 819 } 820 } 821 } 822 823 static int eth_xmit(struct sk_buff *skb, struct net_device *dev) 824 { 825 struct port *port = netdev_priv(dev); 826 unsigned int txreadyq = port->plat->txreadyq; 827 int len, offset, bytes, n; 828 void *mem; 829 u32 phys; 830 struct desc *desc; 831 832 #if DEBUG_TX 833 netdev_debug(dev, "eth_xmit\n"); 834 #endif 835 836 if (unlikely(skb->len > MAX_MRU)) { 837 dev_kfree_skb(skb); 838 dev->stats.tx_errors++; 839 return NETDEV_TX_OK; 840 } 841 842 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 843 844 len = skb->len; 845 #ifdef __ARMEB__ 846 offset = 0; /* no need to keep alignment */ 847 bytes = len; 848 mem = skb->data; 849 #else 850 offset = (int)skb->data & 3; /* keep 32-bit alignment */ 851 bytes = ALIGN(offset + len, 4); 852 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 853 dev_kfree_skb(skb); 854 dev->stats.tx_dropped++; 855 return NETDEV_TX_OK; 856 } 857 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 858 #endif 859 860 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 861 if (dma_mapping_error(&dev->dev, phys)) { 862 dev_kfree_skb(skb); 863 #ifndef __ARMEB__ 864 kfree(mem); 865 #endif 866 dev->stats.tx_dropped++; 867 return NETDEV_TX_OK; 868 } 869 870 n = queue_get_desc(txreadyq, port, 1); 871 BUG_ON(n < 0); 872 desc = tx_desc_ptr(port, n); 873 874 #ifdef __ARMEB__ 875 port->tx_buff_tab[n] = skb; 876 #else 877 port->tx_buff_tab[n] = mem; 878 #endif 879 desc->data = phys + offset; 880 desc->buf_len = desc->pkt_len = len; 881 882 /* NPE firmware pads short frames with zeros internally */ 883 wmb(); 884 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 885 886 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 887 #if DEBUG_TX 888 netdev_debug(dev, "eth_xmit queue full\n"); 889 #endif 890 netif_stop_queue(dev); 891 /* we could miss TX ready interrupt */ 892 /* really empty in fact */ 893 if (!qmgr_stat_below_low_watermark(txreadyq)) { 894 #if DEBUG_TX 895 netdev_debug(dev, "eth_xmit ready again\n"); 896 #endif 897 netif_wake_queue(dev); 898 } 899 } 900 901 #if DEBUG_TX 902 netdev_debug(dev, "eth_xmit end\n"); 903 #endif 904 905 ixp_tx_timestamp(port, skb); 906 skb_tx_timestamp(skb); 907 908 #ifndef __ARMEB__ 909 dev_kfree_skb(skb); 910 #endif 911 return NETDEV_TX_OK; 912 } 913 914 915 static void eth_set_mcast_list(struct net_device *dev) 916 { 917 struct port *port = netdev_priv(dev); 918 struct netdev_hw_addr *ha; 919 u8 diffs[ETH_ALEN], *addr; 920 int i; 921 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 922 923 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { 924 for (i = 0; i < ETH_ALEN; i++) { 925 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 926 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 927 } 928 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 929 &port->regs->rx_control[0]); 930 return; 931 } 932 933 if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { 934 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 935 &port->regs->rx_control[0]); 936 return; 937 } 938 939 eth_zero_addr(diffs); 940 941 addr = NULL; 942 netdev_for_each_mc_addr(ha, dev) { 943 if (!addr) 944 addr = ha->addr; /* first MAC address */ 945 for (i = 0; i < ETH_ALEN; i++) 946 diffs[i] |= addr[i] ^ ha->addr[i]; 947 } 948 949 for (i = 0; i < ETH_ALEN; i++) { 950 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 951 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 952 } 953 954 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 955 &port->regs->rx_control[0]); 956 } 957 958 959 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 960 { 961 if (!netif_running(dev)) 962 return -EINVAL; 963 964 if (cpu_is_ixp46x()) { 965 if (cmd == SIOCSHWTSTAMP) 966 return hwtstamp_set(dev, req); 967 if (cmd == SIOCGHWTSTAMP) 968 return hwtstamp_get(dev, req); 969 } 970 971 return phy_mii_ioctl(dev->phydev, req, cmd); 972 } 973 974 /* ethtool support */ 975 976 static void ixp4xx_get_drvinfo(struct net_device *dev, 977 struct ethtool_drvinfo *info) 978 { 979 struct port *port = netdev_priv(dev); 980 981 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 982 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", 983 port->firmware[0], port->firmware[1], 984 port->firmware[2], port->firmware[3]); 985 strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); 986 } 987 988 int ixp46x_phc_index = -1; 989 EXPORT_SYMBOL_GPL(ixp46x_phc_index); 990 991 static int ixp4xx_get_ts_info(struct net_device *dev, 992 struct ethtool_ts_info *info) 993 { 994 if (!cpu_is_ixp46x()) { 995 info->so_timestamping = 996 SOF_TIMESTAMPING_TX_SOFTWARE | 997 SOF_TIMESTAMPING_RX_SOFTWARE | 998 SOF_TIMESTAMPING_SOFTWARE; 999 info->phc_index = -1; 1000 return 0; 1001 } 1002 info->so_timestamping = 1003 SOF_TIMESTAMPING_TX_HARDWARE | 1004 SOF_TIMESTAMPING_RX_HARDWARE | 1005 SOF_TIMESTAMPING_RAW_HARDWARE; 1006 info->phc_index = ixp46x_phc_index; 1007 info->tx_types = 1008 (1 << HWTSTAMP_TX_OFF) | 1009 (1 << HWTSTAMP_TX_ON); 1010 info->rx_filters = 1011 (1 << HWTSTAMP_FILTER_NONE) | 1012 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1013 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); 1014 return 0; 1015 } 1016 1017 static const struct ethtool_ops ixp4xx_ethtool_ops = { 1018 .get_drvinfo = ixp4xx_get_drvinfo, 1019 .nway_reset = phy_ethtool_nway_reset, 1020 .get_link = ethtool_op_get_link, 1021 .get_ts_info = ixp4xx_get_ts_info, 1022 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1023 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1024 }; 1025 1026 1027 static int request_queues(struct port *port) 1028 { 1029 int err; 1030 1031 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, 1032 "%s:RX-free", port->netdev->name); 1033 if (err) 1034 return err; 1035 1036 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, 1037 "%s:RX", port->netdev->name); 1038 if (err) 1039 goto rel_rxfree; 1040 1041 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, 1042 "%s:TX", port->netdev->name); 1043 if (err) 1044 goto rel_rx; 1045 1046 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, 1047 "%s:TX-ready", port->netdev->name); 1048 if (err) 1049 goto rel_tx; 1050 1051 /* TX-done queue handles skbs sent out by the NPEs */ 1052 if (!ports_open) { 1053 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, 1054 "%s:TX-done", DRV_NAME); 1055 if (err) 1056 goto rel_txready; 1057 } 1058 return 0; 1059 1060 rel_txready: 1061 qmgr_release_queue(port->plat->txreadyq); 1062 rel_tx: 1063 qmgr_release_queue(TX_QUEUE(port->id)); 1064 rel_rx: 1065 qmgr_release_queue(port->plat->rxq); 1066 rel_rxfree: 1067 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1068 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 1069 port->netdev->name); 1070 return err; 1071 } 1072 1073 static void release_queues(struct port *port) 1074 { 1075 qmgr_release_queue(RXFREE_QUEUE(port->id)); 1076 qmgr_release_queue(port->plat->rxq); 1077 qmgr_release_queue(TX_QUEUE(port->id)); 1078 qmgr_release_queue(port->plat->txreadyq); 1079 1080 if (!ports_open) 1081 qmgr_release_queue(TXDONE_QUEUE); 1082 } 1083 1084 static int init_queues(struct port *port) 1085 { 1086 int i; 1087 1088 if (!ports_open) { 1089 dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent, 1090 POOL_ALLOC_SIZE, 32, 0); 1091 if (!dma_pool) 1092 return -ENOMEM; 1093 } 1094 1095 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 1096 &port->desc_tab_phys))) 1097 return -ENOMEM; 1098 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); 1099 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 1100 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 1101 1102 /* Setup RX buffers */ 1103 for (i = 0; i < RX_DESCS; i++) { 1104 struct desc *desc = rx_desc_ptr(port, i); 1105 buffer_t *buff; /* skb or kmalloc()ated memory */ 1106 void *data; 1107 #ifdef __ARMEB__ 1108 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 1109 return -ENOMEM; 1110 data = buff->data; 1111 #else 1112 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 1113 return -ENOMEM; 1114 data = buff; 1115 #endif 1116 desc->buf_len = MAX_MRU; 1117 desc->data = dma_map_single(&port->netdev->dev, data, 1118 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1119 if (dma_mapping_error(&port->netdev->dev, desc->data)) { 1120 free_buffer(buff); 1121 return -EIO; 1122 } 1123 desc->data += NET_IP_ALIGN; 1124 port->rx_buff_tab[i] = buff; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static void destroy_queues(struct port *port) 1131 { 1132 int i; 1133 1134 if (port->desc_tab) { 1135 for (i = 0; i < RX_DESCS; i++) { 1136 struct desc *desc = rx_desc_ptr(port, i); 1137 buffer_t *buff = port->rx_buff_tab[i]; 1138 if (buff) { 1139 dma_unmap_single(&port->netdev->dev, 1140 desc->data - NET_IP_ALIGN, 1141 RX_BUFF_SIZE, DMA_FROM_DEVICE); 1142 free_buffer(buff); 1143 } 1144 } 1145 for (i = 0; i < TX_DESCS; i++) { 1146 struct desc *desc = tx_desc_ptr(port, i); 1147 buffer_t *buff = port->tx_buff_tab[i]; 1148 if (buff) { 1149 dma_unmap_tx(port, desc); 1150 free_buffer(buff); 1151 } 1152 } 1153 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 1154 port->desc_tab = NULL; 1155 } 1156 1157 if (!ports_open && dma_pool) { 1158 dma_pool_destroy(dma_pool); 1159 dma_pool = NULL; 1160 } 1161 } 1162 1163 static int eth_open(struct net_device *dev) 1164 { 1165 struct port *port = netdev_priv(dev); 1166 struct npe *npe = port->npe; 1167 struct msg msg; 1168 int i, err; 1169 1170 if (!npe_running(npe)) { 1171 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 1172 if (err) 1173 return err; 1174 1175 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 1176 netdev_err(dev, "%s not responding\n", npe_name(npe)); 1177 return -EIO; 1178 } 1179 port->firmware[0] = msg.byte4; 1180 port->firmware[1] = msg.byte5; 1181 port->firmware[2] = msg.byte6; 1182 port->firmware[3] = msg.byte7; 1183 } 1184 1185 memset(&msg, 0, sizeof(msg)); 1186 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 1187 msg.eth_id = port->id; 1188 msg.byte5 = port->plat->rxq | 0x80; 1189 msg.byte7 = port->plat->rxq << 4; 1190 for (i = 0; i < 8; i++) { 1191 msg.byte3 = i; 1192 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 1193 return -EIO; 1194 } 1195 1196 msg.cmd = NPE_EDB_SETPORTADDRESS; 1197 msg.eth_id = PHYSICAL_ID(port->id); 1198 msg.byte2 = dev->dev_addr[0]; 1199 msg.byte3 = dev->dev_addr[1]; 1200 msg.byte4 = dev->dev_addr[2]; 1201 msg.byte5 = dev->dev_addr[3]; 1202 msg.byte6 = dev->dev_addr[4]; 1203 msg.byte7 = dev->dev_addr[5]; 1204 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 1205 return -EIO; 1206 1207 memset(&msg, 0, sizeof(msg)); 1208 msg.cmd = NPE_FW_SETFIREWALLMODE; 1209 msg.eth_id = port->id; 1210 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 1211 return -EIO; 1212 1213 if ((err = request_queues(port)) != 0) 1214 return err; 1215 1216 if ((err = init_queues(port)) != 0) { 1217 destroy_queues(port); 1218 release_queues(port); 1219 return err; 1220 } 1221 1222 port->speed = 0; /* force "link up" message */ 1223 phy_start(dev->phydev); 1224 1225 for (i = 0; i < ETH_ALEN; i++) 1226 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1227 __raw_writel(0x08, &port->regs->random_seed); 1228 __raw_writel(0x12, &port->regs->partial_empty_threshold); 1229 __raw_writel(0x30, &port->regs->partial_full_threshold); 1230 __raw_writel(0x08, &port->regs->tx_start_bytes); 1231 __raw_writel(0x15, &port->regs->tx_deferral); 1232 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 1233 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 1234 __raw_writel(0x80, &port->regs->slot_time); 1235 __raw_writel(0x01, &port->regs->int_clock_threshold); 1236 1237 /* Populate queues with buffers, no failure after this point */ 1238 for (i = 0; i < TX_DESCS; i++) 1239 queue_put_desc(port->plat->txreadyq, 1240 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1241 1242 for (i = 0; i < RX_DESCS; i++) 1243 queue_put_desc(RXFREE_QUEUE(port->id), 1244 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1245 1246 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1247 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1248 __raw_writel(0, &port->regs->rx_control[1]); 1249 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1250 1251 napi_enable(&port->napi); 1252 eth_set_mcast_list(dev); 1253 netif_start_queue(dev); 1254 1255 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1256 eth_rx_irq, dev); 1257 if (!ports_open) { 1258 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1259 eth_txdone_irq, NULL); 1260 qmgr_enable_irq(TXDONE_QUEUE); 1261 } 1262 ports_open++; 1263 /* we may already have RX data, enables IRQ */ 1264 napi_schedule(&port->napi); 1265 return 0; 1266 } 1267 1268 static int eth_close(struct net_device *dev) 1269 { 1270 struct port *port = netdev_priv(dev); 1271 struct msg msg; 1272 int buffs = RX_DESCS; /* allocated RX buffers */ 1273 int i; 1274 1275 ports_open--; 1276 qmgr_disable_irq(port->plat->rxq); 1277 napi_disable(&port->napi); 1278 netif_stop_queue(dev); 1279 1280 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1281 buffs--; 1282 1283 memset(&msg, 0, sizeof(msg)); 1284 msg.cmd = NPE_SETLOOPBACK_MODE; 1285 msg.eth_id = port->id; 1286 msg.byte3 = 1; 1287 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1288 netdev_crit(dev, "unable to enable loopback\n"); 1289 1290 i = 0; 1291 do { /* drain RX buffers */ 1292 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1293 buffs--; 1294 if (!buffs) 1295 break; 1296 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1297 /* we have to inject some packet */ 1298 struct desc *desc; 1299 u32 phys; 1300 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1301 BUG_ON(n < 0); 1302 desc = tx_desc_ptr(port, n); 1303 phys = tx_desc_phys(port, n); 1304 desc->buf_len = desc->pkt_len = 1; 1305 wmb(); 1306 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1307 } 1308 udelay(1); 1309 } while (++i < MAX_CLOSE_WAIT); 1310 1311 if (buffs) 1312 netdev_crit(dev, "unable to drain RX queue, %i buffer(s)" 1313 " left in NPE\n", buffs); 1314 #if DEBUG_CLOSE 1315 if (!buffs) 1316 netdev_debug(dev, "draining RX queue took %i cycles\n", i); 1317 #endif 1318 1319 buffs = TX_DESCS; 1320 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1321 buffs--; /* cancel TX */ 1322 1323 i = 0; 1324 do { 1325 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1326 buffs--; 1327 if (!buffs) 1328 break; 1329 } while (++i < MAX_CLOSE_WAIT); 1330 1331 if (buffs) 1332 netdev_crit(dev, "unable to drain TX queue, %i buffer(s) " 1333 "left in NPE\n", buffs); 1334 #if DEBUG_CLOSE 1335 if (!buffs) 1336 netdev_debug(dev, "draining TX queues took %i cycles\n", i); 1337 #endif 1338 1339 msg.byte3 = 0; 1340 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1341 netdev_crit(dev, "unable to disable loopback\n"); 1342 1343 phy_stop(dev->phydev); 1344 1345 if (!ports_open) 1346 qmgr_disable_irq(TXDONE_QUEUE); 1347 destroy_queues(port); 1348 release_queues(port); 1349 return 0; 1350 } 1351 1352 static const struct net_device_ops ixp4xx_netdev_ops = { 1353 .ndo_open = eth_open, 1354 .ndo_stop = eth_close, 1355 .ndo_start_xmit = eth_xmit, 1356 .ndo_set_rx_mode = eth_set_mcast_list, 1357 .ndo_do_ioctl = eth_ioctl, 1358 .ndo_set_mac_address = eth_mac_addr, 1359 .ndo_validate_addr = eth_validate_addr, 1360 }; 1361 1362 static int ixp4xx_eth_probe(struct platform_device *pdev) 1363 { 1364 char phy_id[MII_BUS_ID_SIZE + 3]; 1365 struct phy_device *phydev = NULL; 1366 struct device *dev = &pdev->dev; 1367 struct eth_plat_info *plat; 1368 resource_size_t regs_phys; 1369 struct net_device *ndev; 1370 struct resource *res; 1371 struct port *port; 1372 int err; 1373 1374 plat = dev_get_platdata(dev); 1375 1376 if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) 1377 return -ENOMEM; 1378 1379 SET_NETDEV_DEV(ndev, dev); 1380 port = netdev_priv(ndev); 1381 port->netdev = ndev; 1382 port->id = pdev->id; 1383 1384 /* Get the port resource and remap */ 1385 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1386 if (!res) 1387 return -ENODEV; 1388 regs_phys = res->start; 1389 port->regs = devm_ioremap_resource(dev, res); 1390 if (IS_ERR(port->regs)) 1391 return PTR_ERR(port->regs); 1392 1393 switch (port->id) { 1394 case IXP4XX_ETH_NPEA: 1395 /* If the MDIO bus is not up yet, defer probe */ 1396 if (!mdio_bus) 1397 return -EPROBE_DEFER; 1398 break; 1399 case IXP4XX_ETH_NPEB: 1400 /* 1401 * On all except IXP43x, NPE-B is used for the MDIO bus. 1402 * If there is no NPE-B in the feature set, bail out, else 1403 * register the MDIO bus. 1404 */ 1405 if (!cpu_is_ixp43x()) { 1406 if (!(ixp4xx_read_feature_bits() & 1407 IXP4XX_FEATURE_NPEB_ETH0)) 1408 return -ENODEV; 1409 /* Else register the MDIO bus on NPE-B */ 1410 if ((err = ixp4xx_mdio_register(port->regs))) 1411 return err; 1412 } 1413 if (!mdio_bus) 1414 return -EPROBE_DEFER; 1415 break; 1416 case IXP4XX_ETH_NPEC: 1417 /* 1418 * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access, 1419 * of there is no NPE-C, no bus, nothing works, so bail out. 1420 */ 1421 if (cpu_is_ixp43x()) { 1422 if (!(ixp4xx_read_feature_bits() & 1423 IXP4XX_FEATURE_NPEC_ETH)) 1424 return -ENODEV; 1425 /* Else register the MDIO bus on NPE-C */ 1426 if ((err = ixp4xx_mdio_register(port->regs))) 1427 return err; 1428 } 1429 if (!mdio_bus) 1430 return -EPROBE_DEFER; 1431 break; 1432 default: 1433 return -ENODEV; 1434 } 1435 1436 ndev->netdev_ops = &ixp4xx_netdev_ops; 1437 ndev->ethtool_ops = &ixp4xx_ethtool_ops; 1438 ndev->tx_queue_len = 100; 1439 1440 netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT); 1441 1442 if (!(port->npe = npe_request(NPE_ID(port->id)))) 1443 return -EIO; 1444 1445 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, ndev->name); 1446 if (!port->mem_res) { 1447 err = -EBUSY; 1448 goto err_npe_rel; 1449 } 1450 1451 port->plat = plat; 1452 npe_port_tab[NPE_ID(port->id)] = port; 1453 memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN); 1454 1455 platform_set_drvdata(pdev, ndev); 1456 1457 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1458 &port->regs->core_control); 1459 udelay(50); 1460 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1461 udelay(50); 1462 1463 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 1464 mdio_bus->id, plat->phy); 1465 phydev = phy_connect(ndev, phy_id, &ixp4xx_adjust_link, 1466 PHY_INTERFACE_MODE_MII); 1467 if (IS_ERR(phydev)) { 1468 err = PTR_ERR(phydev); 1469 goto err_free_mem; 1470 } 1471 1472 phydev->irq = PHY_POLL; 1473 1474 if ((err = register_netdev(ndev))) 1475 goto err_phy_dis; 1476 1477 netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy, 1478 npe_name(port->npe)); 1479 1480 return 0; 1481 1482 err_phy_dis: 1483 phy_disconnect(phydev); 1484 err_free_mem: 1485 npe_port_tab[NPE_ID(port->id)] = NULL; 1486 release_resource(port->mem_res); 1487 err_npe_rel: 1488 npe_release(port->npe); 1489 return err; 1490 } 1491 1492 static int ixp4xx_eth_remove(struct platform_device *pdev) 1493 { 1494 struct net_device *ndev = platform_get_drvdata(pdev); 1495 struct phy_device *phydev = ndev->phydev; 1496 struct port *port = netdev_priv(ndev); 1497 1498 unregister_netdev(ndev); 1499 phy_disconnect(phydev); 1500 ixp4xx_mdio_remove(); 1501 npe_port_tab[NPE_ID(port->id)] = NULL; 1502 npe_release(port->npe); 1503 release_resource(port->mem_res); 1504 return 0; 1505 } 1506 1507 static struct platform_driver ixp4xx_eth_driver = { 1508 .driver.name = DRV_NAME, 1509 .probe = ixp4xx_eth_probe, 1510 .remove = ixp4xx_eth_remove, 1511 }; 1512 module_platform_driver(ixp4xx_eth_driver); 1513 1514 MODULE_AUTHOR("Krzysztof Halasa"); 1515 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1516 MODULE_LICENSE("GPL v2"); 1517 MODULE_ALIAS("platform:ixp4xx_eth"); 1518