1 /* 2 * Copyright (C) 2006, 2007 Eugene Konev 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/moduleparam.h> 22 23 #include <linux/sched.h> 24 #include <linux/kernel.h> 25 #include <linux/slab.h> 26 #include <linux/errno.h> 27 #include <linux/types.h> 28 #include <linux/delay.h> 29 30 #include <linux/netdevice.h> 31 #include <linux/if_vlan.h> 32 #include <linux/etherdevice.h> 33 #include <linux/ethtool.h> 34 #include <linux/skbuff.h> 35 #include <linux/mii.h> 36 #include <linux/phy.h> 37 #include <linux/phy_fixed.h> 38 #include <linux/platform_device.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/clk.h> 41 #include <linux/gpio.h> 42 #include <linux/atomic.h> 43 44 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 45 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 46 MODULE_LICENSE("GPL"); 47 MODULE_ALIAS("platform:cpmac"); 48 49 static int debug_level = 8; 50 static int dumb_switch; 51 52 /* Next 2 are only used in cpmac_probe, so it's pointless to change them */ 53 module_param(debug_level, int, 0444); 54 module_param(dumb_switch, int, 0444); 55 56 MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); 57 MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); 58 59 #define CPMAC_VERSION "0.5.2" 60 /* frame size + 802.1q tag + FCS size */ 61 #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 62 #define CPMAC_QUEUES 8 63 64 /* Ethernet registers */ 65 #define CPMAC_TX_CONTROL 0x0004 66 #define CPMAC_TX_TEARDOWN 0x0008 67 #define CPMAC_RX_CONTROL 0x0014 68 #define CPMAC_RX_TEARDOWN 0x0018 69 #define CPMAC_MBP 0x0100 70 #define MBP_RXPASSCRC 0x40000000 71 #define MBP_RXQOS 0x20000000 72 #define MBP_RXNOCHAIN 0x10000000 73 #define MBP_RXCMF 0x01000000 74 #define MBP_RXSHORT 0x00800000 75 #define MBP_RXCEF 0x00400000 76 #define MBP_RXPROMISC 0x00200000 77 #define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) 78 #define MBP_RXBCAST 0x00002000 79 #define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) 80 #define MBP_RXMCAST 0x00000020 81 #define MBP_MCASTCHAN(channel) ((channel) & 0x7) 82 #define CPMAC_UNICAST_ENABLE 0x0104 83 #define CPMAC_UNICAST_CLEAR 0x0108 84 #define CPMAC_MAX_LENGTH 0x010c 85 #define CPMAC_BUFFER_OFFSET 0x0110 86 #define CPMAC_MAC_CONTROL 0x0160 87 #define MAC_TXPTYPE 0x00000200 88 #define MAC_TXPACE 0x00000040 89 #define MAC_MII 0x00000020 90 #define MAC_TXFLOW 0x00000010 91 #define MAC_RXFLOW 0x00000008 92 #define MAC_MTEST 0x00000004 93 #define MAC_LOOPBACK 0x00000002 94 #define MAC_FDX 0x00000001 95 #define CPMAC_MAC_STATUS 0x0164 96 #define MAC_STATUS_QOS 0x00000004 97 #define MAC_STATUS_RXFLOW 0x00000002 98 #define MAC_STATUS_TXFLOW 0x00000001 99 #define CPMAC_TX_INT_ENABLE 0x0178 100 #define CPMAC_TX_INT_CLEAR 0x017c 101 #define CPMAC_MAC_INT_VECTOR 0x0180 102 #define MAC_INT_STATUS 0x00080000 103 #define MAC_INT_HOST 0x00040000 104 #define MAC_INT_RX 0x00020000 105 #define MAC_INT_TX 0x00010000 106 #define CPMAC_MAC_EOI_VECTOR 0x0184 107 #define CPMAC_RX_INT_ENABLE 0x0198 108 #define CPMAC_RX_INT_CLEAR 0x019c 109 #define CPMAC_MAC_INT_ENABLE 0x01a8 110 #define CPMAC_MAC_INT_CLEAR 0x01ac 111 #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) 112 #define CPMAC_MAC_ADDR_MID 0x01d0 113 #define CPMAC_MAC_ADDR_HI 0x01d4 114 #define CPMAC_MAC_HASH_LO 0x01d8 115 #define CPMAC_MAC_HASH_HI 0x01dc 116 #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) 117 #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) 118 #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) 119 #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) 120 #define CPMAC_REG_END 0x0680 121 122 /* Rx/Tx statistics 123 * TODO: use some of them to fill stats in cpmac_stats() 124 */ 125 #define CPMAC_STATS_RX_GOOD 0x0200 126 #define CPMAC_STATS_RX_BCAST 0x0204 127 #define CPMAC_STATS_RX_MCAST 0x0208 128 #define CPMAC_STATS_RX_PAUSE 0x020c 129 #define CPMAC_STATS_RX_CRC 0x0210 130 #define CPMAC_STATS_RX_ALIGN 0x0214 131 #define CPMAC_STATS_RX_OVER 0x0218 132 #define CPMAC_STATS_RX_JABBER 0x021c 133 #define CPMAC_STATS_RX_UNDER 0x0220 134 #define CPMAC_STATS_RX_FRAG 0x0224 135 #define CPMAC_STATS_RX_FILTER 0x0228 136 #define CPMAC_STATS_RX_QOSFILTER 0x022c 137 #define CPMAC_STATS_RX_OCTETS 0x0230 138 139 #define CPMAC_STATS_TX_GOOD 0x0234 140 #define CPMAC_STATS_TX_BCAST 0x0238 141 #define CPMAC_STATS_TX_MCAST 0x023c 142 #define CPMAC_STATS_TX_PAUSE 0x0240 143 #define CPMAC_STATS_TX_DEFER 0x0244 144 #define CPMAC_STATS_TX_COLLISION 0x0248 145 #define CPMAC_STATS_TX_SINGLECOLL 0x024c 146 #define CPMAC_STATS_TX_MULTICOLL 0x0250 147 #define CPMAC_STATS_TX_EXCESSCOLL 0x0254 148 #define CPMAC_STATS_TX_LATECOLL 0x0258 149 #define CPMAC_STATS_TX_UNDERRUN 0x025c 150 #define CPMAC_STATS_TX_CARRIERSENSE 0x0260 151 #define CPMAC_STATS_TX_OCTETS 0x0264 152 153 #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) 154 #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ 155 (reg))) 156 157 /* MDIO bus */ 158 #define CPMAC_MDIO_VERSION 0x0000 159 #define CPMAC_MDIO_CONTROL 0x0004 160 #define MDIOC_IDLE 0x80000000 161 #define MDIOC_ENABLE 0x40000000 162 #define MDIOC_PREAMBLE 0x00100000 163 #define MDIOC_FAULT 0x00080000 164 #define MDIOC_FAULTDETECT 0x00040000 165 #define MDIOC_INTTEST 0x00020000 166 #define MDIOC_CLKDIV(div) ((div) & 0xff) 167 #define CPMAC_MDIO_ALIVE 0x0008 168 #define CPMAC_MDIO_LINK 0x000c 169 #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) 170 #define MDIO_BUSY 0x80000000 171 #define MDIO_WRITE 0x40000000 172 #define MDIO_REG(reg) (((reg) & 0x1f) << 21) 173 #define MDIO_PHY(phy) (((phy) & 0x1f) << 16) 174 #define MDIO_DATA(data) ((data) & 0xffff) 175 #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) 176 #define PHYSEL_LINKSEL 0x00000040 177 #define PHYSEL_LINKINT 0x00000020 178 179 struct cpmac_desc { 180 u32 hw_next; 181 u32 hw_data; 182 u16 buflen; 183 u16 bufflags; 184 u16 datalen; 185 u16 dataflags; 186 #define CPMAC_SOP 0x8000 187 #define CPMAC_EOP 0x4000 188 #define CPMAC_OWN 0x2000 189 #define CPMAC_EOQ 0x1000 190 struct sk_buff *skb; 191 struct cpmac_desc *next; 192 struct cpmac_desc *prev; 193 dma_addr_t mapping; 194 dma_addr_t data_mapping; 195 }; 196 197 struct cpmac_priv { 198 spinlock_t lock; 199 spinlock_t rx_lock; 200 struct cpmac_desc *rx_head; 201 int ring_size; 202 struct cpmac_desc *desc_ring; 203 dma_addr_t dma_ring; 204 void __iomem *regs; 205 struct mii_bus *mii_bus; 206 struct phy_device *phy; 207 char phy_name[MII_BUS_ID_SIZE + 3]; 208 int oldlink, oldspeed, oldduplex; 209 u32 msg_enable; 210 struct net_device *dev; 211 struct work_struct reset_work; 212 struct platform_device *pdev; 213 struct napi_struct napi; 214 atomic_t reset_pending; 215 }; 216 217 static irqreturn_t cpmac_irq(int, void *); 218 static void cpmac_hw_start(struct net_device *dev); 219 static void cpmac_hw_stop(struct net_device *dev); 220 static int cpmac_stop(struct net_device *dev); 221 static int cpmac_open(struct net_device *dev); 222 223 static void cpmac_dump_regs(struct net_device *dev) 224 { 225 int i; 226 struct cpmac_priv *priv = netdev_priv(dev); 227 228 for (i = 0; i < CPMAC_REG_END; i += 4) { 229 if (i % 16 == 0) { 230 if (i) 231 printk("\n"); 232 printk("%s: reg[%p]:", dev->name, priv->regs + i); 233 } 234 printk(" %08x", cpmac_read(priv->regs, i)); 235 } 236 printk("\n"); 237 } 238 239 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) 240 { 241 int i; 242 243 printk("%s: desc[%p]:", dev->name, desc); 244 for (i = 0; i < sizeof(*desc) / 4; i++) 245 printk(" %08x", ((u32 *)desc)[i]); 246 printk("\n"); 247 } 248 249 static void cpmac_dump_all_desc(struct net_device *dev) 250 { 251 struct cpmac_priv *priv = netdev_priv(dev); 252 struct cpmac_desc *dump = priv->rx_head; 253 254 do { 255 cpmac_dump_desc(dev, dump); 256 dump = dump->next; 257 } while (dump != priv->rx_head); 258 } 259 260 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 261 { 262 int i; 263 264 printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); 265 for (i = 0; i < skb->len; i++) { 266 if (i % 16 == 0) { 267 if (i) 268 printk("\n"); 269 printk("%s: data[%p]:", dev->name, skb->data + i); 270 } 271 printk(" %02x", ((u8 *)skb->data)[i]); 272 } 273 printk("\n"); 274 } 275 276 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) 277 { 278 u32 val; 279 280 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 281 cpu_relax(); 282 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | 283 MDIO_PHY(phy_id)); 284 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) 285 cpu_relax(); 286 287 return MDIO_DATA(val); 288 } 289 290 static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, 291 int reg, u16 val) 292 { 293 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 294 cpu_relax(); 295 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | 296 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); 297 298 return 0; 299 } 300 301 static int cpmac_mdio_reset(struct mii_bus *bus) 302 { 303 struct clk *cpmac_clk; 304 305 cpmac_clk = clk_get(&bus->dev, "cpmac"); 306 if (IS_ERR(cpmac_clk)) { 307 pr_err("unable to get cpmac clock\n"); 308 return -1; 309 } 310 ar7_device_reset(AR7_RESET_BIT_MDIO); 311 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 312 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); 313 314 return 0; 315 } 316 317 static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; 318 319 static struct mii_bus *cpmac_mii; 320 321 static void cpmac_set_multicast_list(struct net_device *dev) 322 { 323 struct netdev_hw_addr *ha; 324 u8 tmp; 325 u32 mbp, bit, hash[2] = { 0, }; 326 struct cpmac_priv *priv = netdev_priv(dev); 327 328 mbp = cpmac_read(priv->regs, CPMAC_MBP); 329 if (dev->flags & IFF_PROMISC) { 330 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | 331 MBP_RXPROMISC); 332 } else { 333 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); 334 if (dev->flags & IFF_ALLMULTI) { 335 /* enable all multicast mode */ 336 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); 337 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); 338 } else { 339 /* cpmac uses some strange mac address hashing 340 * (not crc32) 341 */ 342 netdev_for_each_mc_addr(ha, dev) { 343 bit = 0; 344 tmp = ha->addr[0]; 345 bit ^= (tmp >> 2) ^ (tmp << 4); 346 tmp = ha->addr[1]; 347 bit ^= (tmp >> 4) ^ (tmp << 2); 348 tmp = ha->addr[2]; 349 bit ^= (tmp >> 6) ^ tmp; 350 tmp = ha->addr[3]; 351 bit ^= (tmp >> 2) ^ (tmp << 4); 352 tmp = ha->addr[4]; 353 bit ^= (tmp >> 4) ^ (tmp << 2); 354 tmp = ha->addr[5]; 355 bit ^= (tmp >> 6) ^ tmp; 356 bit &= 0x3f; 357 hash[bit / 32] |= 1 << (bit % 32); 358 } 359 360 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); 361 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); 362 } 363 } 364 } 365 366 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, 367 struct cpmac_desc *desc) 368 { 369 struct sk_buff *skb, *result = NULL; 370 371 if (unlikely(netif_msg_hw(priv))) 372 cpmac_dump_desc(priv->dev, desc); 373 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); 374 if (unlikely(!desc->datalen)) { 375 if (netif_msg_rx_err(priv) && net_ratelimit()) 376 netdev_warn(priv->dev, "rx: spurious interrupt\n"); 377 378 return NULL; 379 } 380 381 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); 382 if (likely(skb)) { 383 skb_put(desc->skb, desc->datalen); 384 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 385 skb_checksum_none_assert(desc->skb); 386 priv->dev->stats.rx_packets++; 387 priv->dev->stats.rx_bytes += desc->datalen; 388 result = desc->skb; 389 dma_unmap_single(&priv->dev->dev, desc->data_mapping, 390 CPMAC_SKB_SIZE, DMA_FROM_DEVICE); 391 desc->skb = skb; 392 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, 393 CPMAC_SKB_SIZE, 394 DMA_FROM_DEVICE); 395 desc->hw_data = (u32)desc->data_mapping; 396 if (unlikely(netif_msg_pktdata(priv))) { 397 netdev_dbg(priv->dev, "received packet:\n"); 398 cpmac_dump_skb(priv->dev, result); 399 } 400 } else { 401 if (netif_msg_rx_err(priv) && net_ratelimit()) 402 netdev_warn(priv->dev, 403 "low on skbs, dropping packet\n"); 404 405 priv->dev->stats.rx_dropped++; 406 } 407 408 desc->buflen = CPMAC_SKB_SIZE; 409 desc->dataflags = CPMAC_OWN; 410 411 return result; 412 } 413 414 static int cpmac_poll(struct napi_struct *napi, int budget) 415 { 416 struct sk_buff *skb; 417 struct cpmac_desc *desc, *restart; 418 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 419 int received = 0, processed = 0; 420 421 spin_lock(&priv->rx_lock); 422 if (unlikely(!priv->rx_head)) { 423 if (netif_msg_rx_err(priv) && net_ratelimit()) 424 netdev_warn(priv->dev, "rx: polling, but no queue\n"); 425 426 spin_unlock(&priv->rx_lock); 427 napi_complete(napi); 428 return 0; 429 } 430 431 desc = priv->rx_head; 432 restart = NULL; 433 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 434 processed++; 435 436 if ((desc->dataflags & CPMAC_EOQ) != 0) { 437 /* The last update to eoq->hw_next didn't happen 438 * soon enough, and the receiver stopped here. 439 * Remember this descriptor so we can restart 440 * the receiver after freeing some space. 441 */ 442 if (unlikely(restart)) { 443 if (netif_msg_rx_err(priv)) 444 netdev_err(priv->dev, "poll found a" 445 " duplicate EOQ: %p and %p\n", 446 restart, desc); 447 goto fatal_error; 448 } 449 450 restart = desc->next; 451 } 452 453 skb = cpmac_rx_one(priv, desc); 454 if (likely(skb)) { 455 netif_receive_skb(skb); 456 received++; 457 } 458 desc = desc->next; 459 } 460 461 if (desc != priv->rx_head) { 462 /* We freed some buffers, but not the whole ring, 463 * add what we did free to the rx list 464 */ 465 desc->prev->hw_next = (u32)0; 466 priv->rx_head->prev->hw_next = priv->rx_head->mapping; 467 } 468 469 /* Optimization: If we did not actually process an EOQ (perhaps because 470 * of quota limits), check to see if the tail of the queue has EOQ set. 471 * We should immediately restart in that case so that the receiver can 472 * restart and run in parallel with more packet processing. 473 * This lets us handle slightly larger bursts before running 474 * out of ring space (assuming dev->weight < ring_size) 475 */ 476 477 if (!restart && 478 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 479 == CPMAC_EOQ && 480 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 481 /* reset EOQ so the poll loop (above) doesn't try to 482 * restart this when it eventually gets to this descriptor. 483 */ 484 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 485 restart = priv->rx_head; 486 } 487 488 if (restart) { 489 priv->dev->stats.rx_errors++; 490 priv->dev->stats.rx_fifo_errors++; 491 if (netif_msg_rx_err(priv) && net_ratelimit()) 492 netdev_warn(priv->dev, "rx dma ring overrun\n"); 493 494 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 495 if (netif_msg_drv(priv)) 496 netdev_err(priv->dev, "cpmac_poll is trying " 497 "to restart rx from a descriptor " 498 "that's not free: %p\n", restart); 499 goto fatal_error; 500 } 501 502 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 503 } 504 505 priv->rx_head = desc; 506 spin_unlock(&priv->rx_lock); 507 if (unlikely(netif_msg_rx_status(priv))) 508 netdev_dbg(priv->dev, "poll processed %d packets\n", received); 509 510 if (processed == 0) { 511 /* we ran out of packets to read, 512 * revert to interrupt-driven mode 513 */ 514 napi_complete(napi); 515 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 516 return 0; 517 } 518 519 return 1; 520 521 fatal_error: 522 /* Something went horribly wrong. 523 * Reset hardware to try to recover rather than wedging. 524 */ 525 if (netif_msg_drv(priv)) { 526 netdev_err(priv->dev, "cpmac_poll is confused. " 527 "Resetting hardware\n"); 528 cpmac_dump_all_desc(priv->dev); 529 netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 530 cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 531 cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 532 } 533 534 spin_unlock(&priv->rx_lock); 535 napi_complete(napi); 536 netif_tx_stop_all_queues(priv->dev); 537 napi_disable(&priv->napi); 538 539 atomic_inc(&priv->reset_pending); 540 cpmac_hw_stop(priv->dev); 541 if (!schedule_work(&priv->reset_work)) 542 atomic_dec(&priv->reset_pending); 543 544 return 0; 545 546 } 547 548 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 549 { 550 int queue, len; 551 struct cpmac_desc *desc; 552 struct cpmac_priv *priv = netdev_priv(dev); 553 554 if (unlikely(atomic_read(&priv->reset_pending))) 555 return NETDEV_TX_BUSY; 556 557 if (unlikely(skb_padto(skb, ETH_ZLEN))) 558 return NETDEV_TX_OK; 559 560 len = max(skb->len, ETH_ZLEN); 561 queue = skb_get_queue_mapping(skb); 562 netif_stop_subqueue(dev, queue); 563 564 desc = &priv->desc_ring[queue]; 565 if (unlikely(desc->dataflags & CPMAC_OWN)) { 566 if (netif_msg_tx_err(priv) && net_ratelimit()) 567 netdev_warn(dev, "tx dma ring full\n"); 568 569 return NETDEV_TX_BUSY; 570 } 571 572 spin_lock(&priv->lock); 573 spin_unlock(&priv->lock); 574 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; 575 desc->skb = skb; 576 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, 577 DMA_TO_DEVICE); 578 desc->hw_data = (u32)desc->data_mapping; 579 desc->datalen = len; 580 desc->buflen = len; 581 if (unlikely(netif_msg_tx_queued(priv))) 582 netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); 583 if (unlikely(netif_msg_hw(priv))) 584 cpmac_dump_desc(dev, desc); 585 if (unlikely(netif_msg_pktdata(priv))) 586 cpmac_dump_skb(dev, skb); 587 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 588 589 return NETDEV_TX_OK; 590 } 591 592 static void cpmac_end_xmit(struct net_device *dev, int queue) 593 { 594 struct cpmac_desc *desc; 595 struct cpmac_priv *priv = netdev_priv(dev); 596 597 desc = &priv->desc_ring[queue]; 598 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); 599 if (likely(desc->skb)) { 600 spin_lock(&priv->lock); 601 dev->stats.tx_packets++; 602 dev->stats.tx_bytes += desc->skb->len; 603 spin_unlock(&priv->lock); 604 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, 605 DMA_TO_DEVICE); 606 607 if (unlikely(netif_msg_tx_done(priv))) 608 netdev_dbg(dev, "sent 0x%p, len=%d\n", 609 desc->skb, desc->skb->len); 610 611 dev_kfree_skb_irq(desc->skb); 612 desc->skb = NULL; 613 if (__netif_subqueue_stopped(dev, queue)) 614 netif_wake_subqueue(dev, queue); 615 } else { 616 if (netif_msg_tx_err(priv) && net_ratelimit()) 617 netdev_warn(dev, "end_xmit: spurious interrupt\n"); 618 if (__netif_subqueue_stopped(dev, queue)) 619 netif_wake_subqueue(dev, queue); 620 } 621 } 622 623 static void cpmac_hw_stop(struct net_device *dev) 624 { 625 int i; 626 struct cpmac_priv *priv = netdev_priv(dev); 627 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); 628 629 ar7_device_reset(pdata->reset_bit); 630 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 631 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); 632 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 633 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); 634 for (i = 0; i < 8; i++) { 635 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 636 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 637 } 638 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 639 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 640 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 641 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 642 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 643 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); 644 } 645 646 static void cpmac_hw_start(struct net_device *dev) 647 { 648 int i; 649 struct cpmac_priv *priv = netdev_priv(dev); 650 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); 651 652 ar7_device_reset(pdata->reset_bit); 653 for (i = 0; i < 8; i++) { 654 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 655 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 656 } 657 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); 658 659 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | 660 MBP_RXMCAST); 661 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); 662 for (i = 0; i < 8; i++) 663 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); 664 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); 665 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | 666 (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | 667 (dev->dev_addr[3] << 24)); 668 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); 669 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 670 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 671 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 672 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 673 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); 674 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 675 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); 676 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 677 678 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 679 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); 680 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 681 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); 682 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 683 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | 684 MAC_FDX); 685 } 686 687 static void cpmac_clear_rx(struct net_device *dev) 688 { 689 struct cpmac_priv *priv = netdev_priv(dev); 690 struct cpmac_desc *desc; 691 int i; 692 693 if (unlikely(!priv->rx_head)) 694 return; 695 desc = priv->rx_head; 696 for (i = 0; i < priv->ring_size; i++) { 697 if ((desc->dataflags & CPMAC_OWN) == 0) { 698 if (netif_msg_rx_err(priv) && net_ratelimit()) 699 netdev_warn(dev, "packet dropped\n"); 700 if (unlikely(netif_msg_hw(priv))) 701 cpmac_dump_desc(dev, desc); 702 desc->dataflags = CPMAC_OWN; 703 dev->stats.rx_dropped++; 704 } 705 desc->hw_next = desc->next->mapping; 706 desc = desc->next; 707 } 708 priv->rx_head->prev->hw_next = 0; 709 } 710 711 static void cpmac_clear_tx(struct net_device *dev) 712 { 713 struct cpmac_priv *priv = netdev_priv(dev); 714 int i; 715 716 if (unlikely(!priv->desc_ring)) 717 return; 718 for (i = 0; i < CPMAC_QUEUES; i++) { 719 priv->desc_ring[i].dataflags = 0; 720 if (priv->desc_ring[i].skb) { 721 dev_kfree_skb_any(priv->desc_ring[i].skb); 722 priv->desc_ring[i].skb = NULL; 723 } 724 } 725 } 726 727 static void cpmac_hw_error(struct work_struct *work) 728 { 729 struct cpmac_priv *priv = 730 container_of(work, struct cpmac_priv, reset_work); 731 732 spin_lock(&priv->rx_lock); 733 cpmac_clear_rx(priv->dev); 734 spin_unlock(&priv->rx_lock); 735 cpmac_clear_tx(priv->dev); 736 cpmac_hw_start(priv->dev); 737 barrier(); 738 atomic_dec(&priv->reset_pending); 739 740 netif_tx_wake_all_queues(priv->dev); 741 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 742 } 743 744 static void cpmac_check_status(struct net_device *dev) 745 { 746 struct cpmac_priv *priv = netdev_priv(dev); 747 748 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); 749 int rx_channel = (macstatus >> 8) & 7; 750 int rx_code = (macstatus >> 12) & 15; 751 int tx_channel = (macstatus >> 16) & 7; 752 int tx_code = (macstatus >> 20) & 15; 753 754 if (rx_code || tx_code) { 755 if (netif_msg_drv(priv) && net_ratelimit()) { 756 /* Can't find any documentation on what these 757 * error codes actually are. So just log them and hope.. 758 */ 759 if (rx_code) 760 netdev_warn(dev, "host error %d on rx " 761 "channel %d (macstatus %08x), resetting\n", 762 rx_code, rx_channel, macstatus); 763 if (tx_code) 764 netdev_warn(dev, "host error %d on tx " 765 "channel %d (macstatus %08x), resetting\n", 766 tx_code, tx_channel, macstatus); 767 } 768 769 netif_tx_stop_all_queues(dev); 770 cpmac_hw_stop(dev); 771 if (schedule_work(&priv->reset_work)) 772 atomic_inc(&priv->reset_pending); 773 if (unlikely(netif_msg_hw(priv))) 774 cpmac_dump_regs(dev); 775 } 776 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 777 } 778 779 static irqreturn_t cpmac_irq(int irq, void *dev_id) 780 { 781 struct net_device *dev = dev_id; 782 struct cpmac_priv *priv; 783 int queue; 784 u32 status; 785 786 priv = netdev_priv(dev); 787 788 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); 789 790 if (unlikely(netif_msg_intr(priv))) 791 netdev_dbg(dev, "interrupt status: 0x%08x\n", status); 792 793 if (status & MAC_INT_TX) 794 cpmac_end_xmit(dev, (status & 7)); 795 796 if (status & MAC_INT_RX) { 797 queue = (status >> 8) & 7; 798 if (napi_schedule_prep(&priv->napi)) { 799 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 800 __napi_schedule(&priv->napi); 801 } 802 } 803 804 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 805 806 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) 807 cpmac_check_status(dev); 808 809 return IRQ_HANDLED; 810 } 811 812 static void cpmac_tx_timeout(struct net_device *dev) 813 { 814 struct cpmac_priv *priv = netdev_priv(dev); 815 816 spin_lock(&priv->lock); 817 dev->stats.tx_errors++; 818 spin_unlock(&priv->lock); 819 if (netif_msg_tx_err(priv) && net_ratelimit()) 820 netdev_warn(dev, "transmit timeout\n"); 821 822 atomic_inc(&priv->reset_pending); 823 barrier(); 824 cpmac_clear_tx(dev); 825 barrier(); 826 atomic_dec(&priv->reset_pending); 827 828 netif_tx_wake_all_queues(priv->dev); 829 } 830 831 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 832 { 833 struct cpmac_priv *priv = netdev_priv(dev); 834 835 if (!(netif_running(dev))) 836 return -EINVAL; 837 if (!priv->phy) 838 return -EINVAL; 839 840 return phy_mii_ioctl(priv->phy, ifr, cmd); 841 } 842 843 static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 844 { 845 struct cpmac_priv *priv = netdev_priv(dev); 846 847 if (priv->phy) 848 return phy_ethtool_gset(priv->phy, cmd); 849 850 return -EINVAL; 851 } 852 853 static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 854 { 855 struct cpmac_priv *priv = netdev_priv(dev); 856 857 if (!capable(CAP_NET_ADMIN)) 858 return -EPERM; 859 860 if (priv->phy) 861 return phy_ethtool_sset(priv->phy, cmd); 862 863 return -EINVAL; 864 } 865 866 static void cpmac_get_ringparam(struct net_device *dev, 867 struct ethtool_ringparam *ring) 868 { 869 struct cpmac_priv *priv = netdev_priv(dev); 870 871 ring->rx_max_pending = 1024; 872 ring->rx_mini_max_pending = 1; 873 ring->rx_jumbo_max_pending = 1; 874 ring->tx_max_pending = 1; 875 876 ring->rx_pending = priv->ring_size; 877 ring->rx_mini_pending = 1; 878 ring->rx_jumbo_pending = 1; 879 ring->tx_pending = 1; 880 } 881 882 static int cpmac_set_ringparam(struct net_device *dev, 883 struct ethtool_ringparam *ring) 884 { 885 struct cpmac_priv *priv = netdev_priv(dev); 886 887 if (netif_running(dev)) 888 return -EBUSY; 889 priv->ring_size = ring->rx_pending; 890 891 return 0; 892 } 893 894 static void cpmac_get_drvinfo(struct net_device *dev, 895 struct ethtool_drvinfo *info) 896 { 897 strlcpy(info->driver, "cpmac", sizeof(info->driver)); 898 strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); 899 snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); 900 info->regdump_len = 0; 901 } 902 903 static const struct ethtool_ops cpmac_ethtool_ops = { 904 .get_settings = cpmac_get_settings, 905 .set_settings = cpmac_set_settings, 906 .get_drvinfo = cpmac_get_drvinfo, 907 .get_link = ethtool_op_get_link, 908 .get_ringparam = cpmac_get_ringparam, 909 .set_ringparam = cpmac_set_ringparam, 910 }; 911 912 static void cpmac_adjust_link(struct net_device *dev) 913 { 914 struct cpmac_priv *priv = netdev_priv(dev); 915 int new_state = 0; 916 917 spin_lock(&priv->lock); 918 if (priv->phy->link) { 919 netif_tx_start_all_queues(dev); 920 if (priv->phy->duplex != priv->oldduplex) { 921 new_state = 1; 922 priv->oldduplex = priv->phy->duplex; 923 } 924 925 if (priv->phy->speed != priv->oldspeed) { 926 new_state = 1; 927 priv->oldspeed = priv->phy->speed; 928 } 929 930 if (!priv->oldlink) { 931 new_state = 1; 932 priv->oldlink = 1; 933 } 934 } else if (priv->oldlink) { 935 new_state = 1; 936 priv->oldlink = 0; 937 priv->oldspeed = 0; 938 priv->oldduplex = -1; 939 } 940 941 if (new_state && netif_msg_link(priv) && net_ratelimit()) 942 phy_print_status(priv->phy); 943 944 spin_unlock(&priv->lock); 945 } 946 947 static int cpmac_open(struct net_device *dev) 948 { 949 int i, size, res; 950 struct cpmac_priv *priv = netdev_priv(dev); 951 struct resource *mem; 952 struct cpmac_desc *desc; 953 struct sk_buff *skb; 954 955 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 956 if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { 957 if (netif_msg_drv(priv)) 958 netdev_err(dev, "failed to request registers\n"); 959 960 res = -ENXIO; 961 goto fail_reserve; 962 } 963 964 priv->regs = ioremap(mem->start, resource_size(mem)); 965 if (!priv->regs) { 966 if (netif_msg_drv(priv)) 967 netdev_err(dev, "failed to remap registers\n"); 968 969 res = -ENXIO; 970 goto fail_remap; 971 } 972 973 size = priv->ring_size + CPMAC_QUEUES; 974 priv->desc_ring = dma_alloc_coherent(&dev->dev, 975 sizeof(struct cpmac_desc) * size, 976 &priv->dma_ring, 977 GFP_KERNEL); 978 if (!priv->desc_ring) { 979 res = -ENOMEM; 980 goto fail_alloc; 981 } 982 983 for (i = 0; i < size; i++) 984 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; 985 986 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 987 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 988 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); 989 if (unlikely(!skb)) { 990 res = -ENOMEM; 991 goto fail_desc; 992 } 993 desc->skb = skb; 994 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 995 CPMAC_SKB_SIZE, 996 DMA_FROM_DEVICE); 997 desc->hw_data = (u32)desc->data_mapping; 998 desc->buflen = CPMAC_SKB_SIZE; 999 desc->dataflags = CPMAC_OWN; 1000 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1001 desc->next->prev = desc; 1002 desc->hw_next = (u32)desc->next->mapping; 1003 } 1004 1005 priv->rx_head->prev->hw_next = (u32)0; 1006 1007 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); 1008 if (res) { 1009 if (netif_msg_drv(priv)) 1010 netdev_err(dev, "failed to obtain irq\n"); 1011 1012 goto fail_irq; 1013 } 1014 1015 atomic_set(&priv->reset_pending, 0); 1016 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1017 cpmac_hw_start(dev); 1018 1019 napi_enable(&priv->napi); 1020 priv->phy->state = PHY_CHANGELINK; 1021 phy_start(priv->phy); 1022 1023 return 0; 1024 1025 fail_irq: 1026 fail_desc: 1027 for (i = 0; i < priv->ring_size; i++) { 1028 if (priv->rx_head[i].skb) { 1029 dma_unmap_single(&dev->dev, 1030 priv->rx_head[i].data_mapping, 1031 CPMAC_SKB_SIZE, 1032 DMA_FROM_DEVICE); 1033 kfree_skb(priv->rx_head[i].skb); 1034 } 1035 } 1036 fail_alloc: 1037 kfree(priv->desc_ring); 1038 iounmap(priv->regs); 1039 1040 fail_remap: 1041 release_mem_region(mem->start, resource_size(mem)); 1042 1043 fail_reserve: 1044 return res; 1045 } 1046 1047 static int cpmac_stop(struct net_device *dev) 1048 { 1049 int i; 1050 struct cpmac_priv *priv = netdev_priv(dev); 1051 struct resource *mem; 1052 1053 netif_tx_stop_all_queues(dev); 1054 1055 cancel_work_sync(&priv->reset_work); 1056 napi_disable(&priv->napi); 1057 phy_stop(priv->phy); 1058 1059 cpmac_hw_stop(dev); 1060 1061 for (i = 0; i < 8; i++) 1062 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 1063 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); 1064 cpmac_write(priv->regs, CPMAC_MBP, 0); 1065 1066 free_irq(dev->irq, dev); 1067 iounmap(priv->regs); 1068 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 1069 release_mem_region(mem->start, resource_size(mem)); 1070 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 1071 for (i = 0; i < priv->ring_size; i++) { 1072 if (priv->rx_head[i].skb) { 1073 dma_unmap_single(&dev->dev, 1074 priv->rx_head[i].data_mapping, 1075 CPMAC_SKB_SIZE, 1076 DMA_FROM_DEVICE); 1077 kfree_skb(priv->rx_head[i].skb); 1078 } 1079 } 1080 1081 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * 1082 (CPMAC_QUEUES + priv->ring_size), 1083 priv->desc_ring, priv->dma_ring); 1084 1085 return 0; 1086 } 1087 1088 static const struct net_device_ops cpmac_netdev_ops = { 1089 .ndo_open = cpmac_open, 1090 .ndo_stop = cpmac_stop, 1091 .ndo_start_xmit = cpmac_start_xmit, 1092 .ndo_tx_timeout = cpmac_tx_timeout, 1093 .ndo_set_rx_mode = cpmac_set_multicast_list, 1094 .ndo_do_ioctl = cpmac_ioctl, 1095 .ndo_change_mtu = eth_change_mtu, 1096 .ndo_validate_addr = eth_validate_addr, 1097 .ndo_set_mac_address = eth_mac_addr, 1098 }; 1099 1100 static int external_switch; 1101 1102 static int cpmac_probe(struct platform_device *pdev) 1103 { 1104 int rc, phy_id; 1105 char mdio_bus_id[MII_BUS_ID_SIZE]; 1106 struct resource *mem; 1107 struct cpmac_priv *priv; 1108 struct net_device *dev; 1109 struct plat_cpmac_data *pdata; 1110 1111 pdata = dev_get_platdata(&pdev->dev); 1112 1113 if (external_switch || dumb_switch) { 1114 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1115 phy_id = pdev->id; 1116 } else { 1117 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1118 if (!(pdata->phy_mask & (1 << phy_id))) 1119 continue; 1120 if (!cpmac_mii->phy_map[phy_id]) 1121 continue; 1122 strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); 1123 break; 1124 } 1125 } 1126 1127 if (phy_id == PHY_MAX_ADDR) { 1128 dev_err(&pdev->dev, "no PHY present, falling back " 1129 "to switch on MDIO bus 0\n"); 1130 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1131 phy_id = pdev->id; 1132 } 1133 mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; 1134 1135 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); 1136 if (!dev) 1137 return -ENOMEM; 1138 1139 platform_set_drvdata(pdev, dev); 1140 priv = netdev_priv(dev); 1141 1142 priv->pdev = pdev; 1143 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 1144 if (!mem) { 1145 rc = -ENODEV; 1146 goto out; 1147 } 1148 1149 dev->irq = platform_get_irq_byname(pdev, "irq"); 1150 1151 dev->netdev_ops = &cpmac_netdev_ops; 1152 dev->ethtool_ops = &cpmac_ethtool_ops; 1153 1154 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); 1155 1156 spin_lock_init(&priv->lock); 1157 spin_lock_init(&priv->rx_lock); 1158 priv->dev = dev; 1159 priv->ring_size = 64; 1160 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1161 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); 1162 1163 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, 1164 mdio_bus_id, phy_id); 1165 1166 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 1167 PHY_INTERFACE_MODE_MII); 1168 1169 if (IS_ERR(priv->phy)) { 1170 if (netif_msg_drv(priv)) 1171 dev_err(&pdev->dev, "Could not attach to PHY\n"); 1172 1173 rc = PTR_ERR(priv->phy); 1174 goto out; 1175 } 1176 1177 rc = register_netdev(dev); 1178 if (rc) { 1179 dev_err(&pdev->dev, "Could not register net device\n"); 1180 goto fail; 1181 } 1182 1183 if (netif_msg_probe(priv)) { 1184 dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " 1185 "mac: %pM\n", (void *)mem->start, dev->irq, 1186 priv->phy_name, dev->dev_addr); 1187 } 1188 1189 return 0; 1190 1191 fail: 1192 free_netdev(dev); 1193 out: 1194 return rc; 1195 } 1196 1197 static int cpmac_remove(struct platform_device *pdev) 1198 { 1199 struct net_device *dev = platform_get_drvdata(pdev); 1200 1201 unregister_netdev(dev); 1202 free_netdev(dev); 1203 1204 return 0; 1205 } 1206 1207 static struct platform_driver cpmac_driver = { 1208 .driver = { 1209 .name = "cpmac", 1210 }, 1211 .probe = cpmac_probe, 1212 .remove = cpmac_remove, 1213 }; 1214 1215 int cpmac_init(void) 1216 { 1217 u32 mask; 1218 int i, res; 1219 1220 cpmac_mii = mdiobus_alloc(); 1221 if (cpmac_mii == NULL) 1222 return -ENOMEM; 1223 1224 cpmac_mii->name = "cpmac-mii"; 1225 cpmac_mii->read = cpmac_mdio_read; 1226 cpmac_mii->write = cpmac_mdio_write; 1227 cpmac_mii->reset = cpmac_mdio_reset; 1228 cpmac_mii->irq = mii_irqs; 1229 1230 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); 1231 1232 if (!cpmac_mii->priv) { 1233 pr_err("Can't ioremap mdio registers\n"); 1234 res = -ENXIO; 1235 goto fail_alloc; 1236 } 1237 1238 #warning FIXME: unhardcode gpio&reset bits 1239 ar7_gpio_disable(26); 1240 ar7_gpio_disable(27); 1241 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); 1242 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); 1243 ar7_device_reset(AR7_RESET_BIT_EPHY); 1244 1245 cpmac_mii->reset(cpmac_mii); 1246 1247 for (i = 0; i < 300; i++) { 1248 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); 1249 if (mask) 1250 break; 1251 else 1252 msleep(10); 1253 } 1254 1255 mask &= 0x7fffffff; 1256 if (mask & (mask - 1)) { 1257 external_switch = 1; 1258 mask = 0; 1259 } 1260 1261 cpmac_mii->phy_mask = ~(mask | 0x80000000); 1262 snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); 1263 1264 res = mdiobus_register(cpmac_mii); 1265 if (res) 1266 goto fail_mii; 1267 1268 res = platform_driver_register(&cpmac_driver); 1269 if (res) 1270 goto fail_cpmac; 1271 1272 return 0; 1273 1274 fail_cpmac: 1275 mdiobus_unregister(cpmac_mii); 1276 1277 fail_mii: 1278 iounmap(cpmac_mii->priv); 1279 1280 fail_alloc: 1281 mdiobus_free(cpmac_mii); 1282 1283 return res; 1284 } 1285 1286 void cpmac_exit(void) 1287 { 1288 platform_driver_unregister(&cpmac_driver); 1289 mdiobus_unregister(cpmac_mii); 1290 iounmap(cpmac_mii->priv); 1291 mdiobus_free(cpmac_mii); 1292 } 1293 1294 module_init(cpmac_init); 1295 module_exit(cpmac_exit); 1296