1 /* 2 * Copyright (C) 2006, 2007 Eugene Konev 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/moduleparam.h> 22 23 #include <linux/sched.h> 24 #include <linux/kernel.h> 25 #include <linux/slab.h> 26 #include <linux/errno.h> 27 #include <linux/types.h> 28 #include <linux/delay.h> 29 30 #include <linux/netdevice.h> 31 #include <linux/if_vlan.h> 32 #include <linux/etherdevice.h> 33 #include <linux/ethtool.h> 34 #include <linux/skbuff.h> 35 #include <linux/mii.h> 36 #include <linux/phy.h> 37 #include <linux/phy_fixed.h> 38 #include <linux/platform_device.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/clk.h> 41 #include <linux/gpio.h> 42 #include <linux/atomic.h> 43 44 #include <asm/mach-ar7/ar7.h> 45 46 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 47 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 48 MODULE_LICENSE("GPL"); 49 MODULE_ALIAS("platform:cpmac"); 50 51 static int debug_level = 8; 52 static int dumb_switch; 53 54 /* Next 2 are only used in cpmac_probe, so it's pointless to change them */ 55 module_param(debug_level, int, 0444); 56 module_param(dumb_switch, int, 0444); 57 58 MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); 59 MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); 60 61 #define CPMAC_VERSION "0.5.2" 62 /* frame size + 802.1q tag + FCS size */ 63 #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 64 #define CPMAC_QUEUES 8 65 66 /* Ethernet registers */ 67 #define CPMAC_TX_CONTROL 0x0004 68 #define CPMAC_TX_TEARDOWN 0x0008 69 #define CPMAC_RX_CONTROL 0x0014 70 #define CPMAC_RX_TEARDOWN 0x0018 71 #define CPMAC_MBP 0x0100 72 #define MBP_RXPASSCRC 0x40000000 73 #define MBP_RXQOS 0x20000000 74 #define MBP_RXNOCHAIN 0x10000000 75 #define MBP_RXCMF 0x01000000 76 #define MBP_RXSHORT 0x00800000 77 #define MBP_RXCEF 0x00400000 78 #define MBP_RXPROMISC 0x00200000 79 #define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) 80 #define MBP_RXBCAST 0x00002000 81 #define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) 82 #define MBP_RXMCAST 0x00000020 83 #define MBP_MCASTCHAN(channel) ((channel) & 0x7) 84 #define CPMAC_UNICAST_ENABLE 0x0104 85 #define CPMAC_UNICAST_CLEAR 0x0108 86 #define CPMAC_MAX_LENGTH 0x010c 87 #define CPMAC_BUFFER_OFFSET 0x0110 88 #define CPMAC_MAC_CONTROL 0x0160 89 #define MAC_TXPTYPE 0x00000200 90 #define MAC_TXPACE 0x00000040 91 #define MAC_MII 0x00000020 92 #define MAC_TXFLOW 0x00000010 93 #define MAC_RXFLOW 0x00000008 94 #define MAC_MTEST 0x00000004 95 #define MAC_LOOPBACK 0x00000002 96 #define MAC_FDX 0x00000001 97 #define CPMAC_MAC_STATUS 0x0164 98 #define MAC_STATUS_QOS 0x00000004 99 #define MAC_STATUS_RXFLOW 0x00000002 100 #define MAC_STATUS_TXFLOW 0x00000001 101 #define CPMAC_TX_INT_ENABLE 0x0178 102 #define CPMAC_TX_INT_CLEAR 0x017c 103 #define CPMAC_MAC_INT_VECTOR 0x0180 104 #define MAC_INT_STATUS 0x00080000 105 #define MAC_INT_HOST 0x00040000 106 #define MAC_INT_RX 0x00020000 107 #define MAC_INT_TX 0x00010000 108 #define CPMAC_MAC_EOI_VECTOR 0x0184 109 #define CPMAC_RX_INT_ENABLE 0x0198 110 #define CPMAC_RX_INT_CLEAR 0x019c 111 #define CPMAC_MAC_INT_ENABLE 0x01a8 112 #define CPMAC_MAC_INT_CLEAR 0x01ac 113 #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) 114 #define CPMAC_MAC_ADDR_MID 0x01d0 115 #define CPMAC_MAC_ADDR_HI 0x01d4 116 #define CPMAC_MAC_HASH_LO 0x01d8 117 #define CPMAC_MAC_HASH_HI 0x01dc 118 #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) 119 #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) 120 #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) 121 #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) 122 #define CPMAC_REG_END 0x0680 123 124 /* Rx/Tx statistics 125 * TODO: use some of them to fill stats in cpmac_stats() 126 */ 127 #define CPMAC_STATS_RX_GOOD 0x0200 128 #define CPMAC_STATS_RX_BCAST 0x0204 129 #define CPMAC_STATS_RX_MCAST 0x0208 130 #define CPMAC_STATS_RX_PAUSE 0x020c 131 #define CPMAC_STATS_RX_CRC 0x0210 132 #define CPMAC_STATS_RX_ALIGN 0x0214 133 #define CPMAC_STATS_RX_OVER 0x0218 134 #define CPMAC_STATS_RX_JABBER 0x021c 135 #define CPMAC_STATS_RX_UNDER 0x0220 136 #define CPMAC_STATS_RX_FRAG 0x0224 137 #define CPMAC_STATS_RX_FILTER 0x0228 138 #define CPMAC_STATS_RX_QOSFILTER 0x022c 139 #define CPMAC_STATS_RX_OCTETS 0x0230 140 141 #define CPMAC_STATS_TX_GOOD 0x0234 142 #define CPMAC_STATS_TX_BCAST 0x0238 143 #define CPMAC_STATS_TX_MCAST 0x023c 144 #define CPMAC_STATS_TX_PAUSE 0x0240 145 #define CPMAC_STATS_TX_DEFER 0x0244 146 #define CPMAC_STATS_TX_COLLISION 0x0248 147 #define CPMAC_STATS_TX_SINGLECOLL 0x024c 148 #define CPMAC_STATS_TX_MULTICOLL 0x0250 149 #define CPMAC_STATS_TX_EXCESSCOLL 0x0254 150 #define CPMAC_STATS_TX_LATECOLL 0x0258 151 #define CPMAC_STATS_TX_UNDERRUN 0x025c 152 #define CPMAC_STATS_TX_CARRIERSENSE 0x0260 153 #define CPMAC_STATS_TX_OCTETS 0x0264 154 155 #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) 156 #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ 157 (reg))) 158 159 /* MDIO bus */ 160 #define CPMAC_MDIO_VERSION 0x0000 161 #define CPMAC_MDIO_CONTROL 0x0004 162 #define MDIOC_IDLE 0x80000000 163 #define MDIOC_ENABLE 0x40000000 164 #define MDIOC_PREAMBLE 0x00100000 165 #define MDIOC_FAULT 0x00080000 166 #define MDIOC_FAULTDETECT 0x00040000 167 #define MDIOC_INTTEST 0x00020000 168 #define MDIOC_CLKDIV(div) ((div) & 0xff) 169 #define CPMAC_MDIO_ALIVE 0x0008 170 #define CPMAC_MDIO_LINK 0x000c 171 #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) 172 #define MDIO_BUSY 0x80000000 173 #define MDIO_WRITE 0x40000000 174 #define MDIO_REG(reg) (((reg) & 0x1f) << 21) 175 #define MDIO_PHY(phy) (((phy) & 0x1f) << 16) 176 #define MDIO_DATA(data) ((data) & 0xffff) 177 #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) 178 #define PHYSEL_LINKSEL 0x00000040 179 #define PHYSEL_LINKINT 0x00000020 180 181 struct cpmac_desc { 182 u32 hw_next; 183 u32 hw_data; 184 u16 buflen; 185 u16 bufflags; 186 u16 datalen; 187 u16 dataflags; 188 #define CPMAC_SOP 0x8000 189 #define CPMAC_EOP 0x4000 190 #define CPMAC_OWN 0x2000 191 #define CPMAC_EOQ 0x1000 192 struct sk_buff *skb; 193 struct cpmac_desc *next; 194 struct cpmac_desc *prev; 195 dma_addr_t mapping; 196 dma_addr_t data_mapping; 197 }; 198 199 struct cpmac_priv { 200 spinlock_t lock; 201 spinlock_t rx_lock; 202 struct cpmac_desc *rx_head; 203 int ring_size; 204 struct cpmac_desc *desc_ring; 205 dma_addr_t dma_ring; 206 void __iomem *regs; 207 struct mii_bus *mii_bus; 208 struct phy_device *phy; 209 char phy_name[MII_BUS_ID_SIZE + 3]; 210 int oldlink, oldspeed, oldduplex; 211 u32 msg_enable; 212 struct net_device *dev; 213 struct work_struct reset_work; 214 struct platform_device *pdev; 215 struct napi_struct napi; 216 atomic_t reset_pending; 217 }; 218 219 static irqreturn_t cpmac_irq(int, void *); 220 static void cpmac_hw_start(struct net_device *dev); 221 static void cpmac_hw_stop(struct net_device *dev); 222 static int cpmac_stop(struct net_device *dev); 223 static int cpmac_open(struct net_device *dev); 224 225 static void cpmac_dump_regs(struct net_device *dev) 226 { 227 int i; 228 struct cpmac_priv *priv = netdev_priv(dev); 229 230 for (i = 0; i < CPMAC_REG_END; i += 4) { 231 if (i % 16 == 0) { 232 if (i) 233 printk("\n"); 234 printk("%s: reg[%p]:", dev->name, priv->regs + i); 235 } 236 printk(" %08x", cpmac_read(priv->regs, i)); 237 } 238 printk("\n"); 239 } 240 241 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) 242 { 243 int i; 244 245 printk("%s: desc[%p]:", dev->name, desc); 246 for (i = 0; i < sizeof(*desc) / 4; i++) 247 printk(" %08x", ((u32 *)desc)[i]); 248 printk("\n"); 249 } 250 251 static void cpmac_dump_all_desc(struct net_device *dev) 252 { 253 struct cpmac_priv *priv = netdev_priv(dev); 254 struct cpmac_desc *dump = priv->rx_head; 255 256 do { 257 cpmac_dump_desc(dev, dump); 258 dump = dump->next; 259 } while (dump != priv->rx_head); 260 } 261 262 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 263 { 264 int i; 265 266 printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); 267 for (i = 0; i < skb->len; i++) { 268 if (i % 16 == 0) { 269 if (i) 270 printk("\n"); 271 printk("%s: data[%p]:", dev->name, skb->data + i); 272 } 273 printk(" %02x", ((u8 *)skb->data)[i]); 274 } 275 printk("\n"); 276 } 277 278 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) 279 { 280 u32 val; 281 282 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 283 cpu_relax(); 284 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | 285 MDIO_PHY(phy_id)); 286 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) 287 cpu_relax(); 288 289 return MDIO_DATA(val); 290 } 291 292 static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, 293 int reg, u16 val) 294 { 295 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 296 cpu_relax(); 297 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | 298 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); 299 300 return 0; 301 } 302 303 static int cpmac_mdio_reset(struct mii_bus *bus) 304 { 305 struct clk *cpmac_clk; 306 307 cpmac_clk = clk_get(&bus->dev, "cpmac"); 308 if (IS_ERR(cpmac_clk)) { 309 pr_err("unable to get cpmac clock\n"); 310 return -1; 311 } 312 ar7_device_reset(AR7_RESET_BIT_MDIO); 313 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 314 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); 315 316 return 0; 317 } 318 319 static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; 320 321 static struct mii_bus *cpmac_mii; 322 323 static void cpmac_set_multicast_list(struct net_device *dev) 324 { 325 struct netdev_hw_addr *ha; 326 u8 tmp; 327 u32 mbp, bit, hash[2] = { 0, }; 328 struct cpmac_priv *priv = netdev_priv(dev); 329 330 mbp = cpmac_read(priv->regs, CPMAC_MBP); 331 if (dev->flags & IFF_PROMISC) { 332 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | 333 MBP_RXPROMISC); 334 } else { 335 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); 336 if (dev->flags & IFF_ALLMULTI) { 337 /* enable all multicast mode */ 338 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); 339 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); 340 } else { 341 /* cpmac uses some strange mac address hashing 342 * (not crc32) 343 */ 344 netdev_for_each_mc_addr(ha, dev) { 345 bit = 0; 346 tmp = ha->addr[0]; 347 bit ^= (tmp >> 2) ^ (tmp << 4); 348 tmp = ha->addr[1]; 349 bit ^= (tmp >> 4) ^ (tmp << 2); 350 tmp = ha->addr[2]; 351 bit ^= (tmp >> 6) ^ tmp; 352 tmp = ha->addr[3]; 353 bit ^= (tmp >> 2) ^ (tmp << 4); 354 tmp = ha->addr[4]; 355 bit ^= (tmp >> 4) ^ (tmp << 2); 356 tmp = ha->addr[5]; 357 bit ^= (tmp >> 6) ^ tmp; 358 bit &= 0x3f; 359 hash[bit / 32] |= 1 << (bit % 32); 360 } 361 362 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); 363 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); 364 } 365 } 366 } 367 368 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, 369 struct cpmac_desc *desc) 370 { 371 struct sk_buff *skb, *result = NULL; 372 373 if (unlikely(netif_msg_hw(priv))) 374 cpmac_dump_desc(priv->dev, desc); 375 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); 376 if (unlikely(!desc->datalen)) { 377 if (netif_msg_rx_err(priv) && net_ratelimit()) 378 netdev_warn(priv->dev, "rx: spurious interrupt\n"); 379 380 return NULL; 381 } 382 383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); 384 if (likely(skb)) { 385 skb_put(desc->skb, desc->datalen); 386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 387 skb_checksum_none_assert(desc->skb); 388 priv->dev->stats.rx_packets++; 389 priv->dev->stats.rx_bytes += desc->datalen; 390 result = desc->skb; 391 dma_unmap_single(&priv->dev->dev, desc->data_mapping, 392 CPMAC_SKB_SIZE, DMA_FROM_DEVICE); 393 desc->skb = skb; 394 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, 395 CPMAC_SKB_SIZE, 396 DMA_FROM_DEVICE); 397 desc->hw_data = (u32)desc->data_mapping; 398 if (unlikely(netif_msg_pktdata(priv))) { 399 netdev_dbg(priv->dev, "received packet:\n"); 400 cpmac_dump_skb(priv->dev, result); 401 } 402 } else { 403 if (netif_msg_rx_err(priv) && net_ratelimit()) 404 netdev_warn(priv->dev, 405 "low on skbs, dropping packet\n"); 406 407 priv->dev->stats.rx_dropped++; 408 } 409 410 desc->buflen = CPMAC_SKB_SIZE; 411 desc->dataflags = CPMAC_OWN; 412 413 return result; 414 } 415 416 static int cpmac_poll(struct napi_struct *napi, int budget) 417 { 418 struct sk_buff *skb; 419 struct cpmac_desc *desc, *restart; 420 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 421 int received = 0, processed = 0; 422 423 spin_lock(&priv->rx_lock); 424 if (unlikely(!priv->rx_head)) { 425 if (netif_msg_rx_err(priv) && net_ratelimit()) 426 netdev_warn(priv->dev, "rx: polling, but no queue\n"); 427 428 spin_unlock(&priv->rx_lock); 429 napi_complete(napi); 430 return 0; 431 } 432 433 desc = priv->rx_head; 434 restart = NULL; 435 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 436 processed++; 437 438 if ((desc->dataflags & CPMAC_EOQ) != 0) { 439 /* The last update to eoq->hw_next didn't happen 440 * soon enough, and the receiver stopped here. 441 * Remember this descriptor so we can restart 442 * the receiver after freeing some space. 443 */ 444 if (unlikely(restart)) { 445 if (netif_msg_rx_err(priv)) 446 netdev_err(priv->dev, "poll found a" 447 " duplicate EOQ: %p and %p\n", 448 restart, desc); 449 goto fatal_error; 450 } 451 452 restart = desc->next; 453 } 454 455 skb = cpmac_rx_one(priv, desc); 456 if (likely(skb)) { 457 netif_receive_skb(skb); 458 received++; 459 } 460 desc = desc->next; 461 } 462 463 if (desc != priv->rx_head) { 464 /* We freed some buffers, but not the whole ring, 465 * add what we did free to the rx list 466 */ 467 desc->prev->hw_next = (u32)0; 468 priv->rx_head->prev->hw_next = priv->rx_head->mapping; 469 } 470 471 /* Optimization: If we did not actually process an EOQ (perhaps because 472 * of quota limits), check to see if the tail of the queue has EOQ set. 473 * We should immediately restart in that case so that the receiver can 474 * restart and run in parallel with more packet processing. 475 * This lets us handle slightly larger bursts before running 476 * out of ring space (assuming dev->weight < ring_size) 477 */ 478 479 if (!restart && 480 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 481 == CPMAC_EOQ && 482 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 483 /* reset EOQ so the poll loop (above) doesn't try to 484 * restart this when it eventually gets to this descriptor. 485 */ 486 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 487 restart = priv->rx_head; 488 } 489 490 if (restart) { 491 priv->dev->stats.rx_errors++; 492 priv->dev->stats.rx_fifo_errors++; 493 if (netif_msg_rx_err(priv) && net_ratelimit()) 494 netdev_warn(priv->dev, "rx dma ring overrun\n"); 495 496 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 497 if (netif_msg_drv(priv)) 498 netdev_err(priv->dev, "cpmac_poll is trying " 499 "to restart rx from a descriptor " 500 "that's not free: %p\n", restart); 501 goto fatal_error; 502 } 503 504 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 505 } 506 507 priv->rx_head = desc; 508 spin_unlock(&priv->rx_lock); 509 if (unlikely(netif_msg_rx_status(priv))) 510 netdev_dbg(priv->dev, "poll processed %d packets\n", received); 511 512 if (processed == 0) { 513 /* we ran out of packets to read, 514 * revert to interrupt-driven mode 515 */ 516 napi_complete(napi); 517 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 518 return 0; 519 } 520 521 return 1; 522 523 fatal_error: 524 /* Something went horribly wrong. 525 * Reset hardware to try to recover rather than wedging. 526 */ 527 if (netif_msg_drv(priv)) { 528 netdev_err(priv->dev, "cpmac_poll is confused. " 529 "Resetting hardware\n"); 530 cpmac_dump_all_desc(priv->dev); 531 netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 532 cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 533 cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 534 } 535 536 spin_unlock(&priv->rx_lock); 537 napi_complete(napi); 538 netif_tx_stop_all_queues(priv->dev); 539 napi_disable(&priv->napi); 540 541 atomic_inc(&priv->reset_pending); 542 cpmac_hw_stop(priv->dev); 543 if (!schedule_work(&priv->reset_work)) 544 atomic_dec(&priv->reset_pending); 545 546 return 0; 547 548 } 549 550 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 551 { 552 int queue, len; 553 struct cpmac_desc *desc; 554 struct cpmac_priv *priv = netdev_priv(dev); 555 556 if (unlikely(atomic_read(&priv->reset_pending))) 557 return NETDEV_TX_BUSY; 558 559 if (unlikely(skb_padto(skb, ETH_ZLEN))) 560 return NETDEV_TX_OK; 561 562 len = max(skb->len, ETH_ZLEN); 563 queue = skb_get_queue_mapping(skb); 564 netif_stop_subqueue(dev, queue); 565 566 desc = &priv->desc_ring[queue]; 567 if (unlikely(desc->dataflags & CPMAC_OWN)) { 568 if (netif_msg_tx_err(priv) && net_ratelimit()) 569 netdev_warn(dev, "tx dma ring full\n"); 570 571 return NETDEV_TX_BUSY; 572 } 573 574 spin_lock(&priv->lock); 575 spin_unlock(&priv->lock); 576 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; 577 desc->skb = skb; 578 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, 579 DMA_TO_DEVICE); 580 desc->hw_data = (u32)desc->data_mapping; 581 desc->datalen = len; 582 desc->buflen = len; 583 if (unlikely(netif_msg_tx_queued(priv))) 584 netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); 585 if (unlikely(netif_msg_hw(priv))) 586 cpmac_dump_desc(dev, desc); 587 if (unlikely(netif_msg_pktdata(priv))) 588 cpmac_dump_skb(dev, skb); 589 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 590 591 return NETDEV_TX_OK; 592 } 593 594 static void cpmac_end_xmit(struct net_device *dev, int queue) 595 { 596 struct cpmac_desc *desc; 597 struct cpmac_priv *priv = netdev_priv(dev); 598 599 desc = &priv->desc_ring[queue]; 600 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); 601 if (likely(desc->skb)) { 602 spin_lock(&priv->lock); 603 dev->stats.tx_packets++; 604 dev->stats.tx_bytes += desc->skb->len; 605 spin_unlock(&priv->lock); 606 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, 607 DMA_TO_DEVICE); 608 609 if (unlikely(netif_msg_tx_done(priv))) 610 netdev_dbg(dev, "sent 0x%p, len=%d\n", 611 desc->skb, desc->skb->len); 612 613 dev_kfree_skb_irq(desc->skb); 614 desc->skb = NULL; 615 if (__netif_subqueue_stopped(dev, queue)) 616 netif_wake_subqueue(dev, queue); 617 } else { 618 if (netif_msg_tx_err(priv) && net_ratelimit()) 619 netdev_warn(dev, "end_xmit: spurious interrupt\n"); 620 if (__netif_subqueue_stopped(dev, queue)) 621 netif_wake_subqueue(dev, queue); 622 } 623 } 624 625 static void cpmac_hw_stop(struct net_device *dev) 626 { 627 int i; 628 struct cpmac_priv *priv = netdev_priv(dev); 629 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); 630 631 ar7_device_reset(pdata->reset_bit); 632 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 633 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); 634 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 635 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); 636 for (i = 0; i < 8; i++) { 637 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 638 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 639 } 640 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 641 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 642 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 643 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 644 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 645 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); 646 } 647 648 static void cpmac_hw_start(struct net_device *dev) 649 { 650 int i; 651 struct cpmac_priv *priv = netdev_priv(dev); 652 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); 653 654 ar7_device_reset(pdata->reset_bit); 655 for (i = 0; i < 8; i++) { 656 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 657 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 658 } 659 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); 660 661 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | 662 MBP_RXMCAST); 663 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); 664 for (i = 0; i < 8; i++) 665 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); 666 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); 667 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | 668 (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | 669 (dev->dev_addr[3] << 24)); 670 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); 671 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 672 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 673 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 674 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 675 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); 676 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 677 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); 678 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 679 680 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 681 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); 682 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 683 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); 684 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 685 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | 686 MAC_FDX); 687 } 688 689 static void cpmac_clear_rx(struct net_device *dev) 690 { 691 struct cpmac_priv *priv = netdev_priv(dev); 692 struct cpmac_desc *desc; 693 int i; 694 695 if (unlikely(!priv->rx_head)) 696 return; 697 desc = priv->rx_head; 698 for (i = 0; i < priv->ring_size; i++) { 699 if ((desc->dataflags & CPMAC_OWN) == 0) { 700 if (netif_msg_rx_err(priv) && net_ratelimit()) 701 netdev_warn(dev, "packet dropped\n"); 702 if (unlikely(netif_msg_hw(priv))) 703 cpmac_dump_desc(dev, desc); 704 desc->dataflags = CPMAC_OWN; 705 dev->stats.rx_dropped++; 706 } 707 desc->hw_next = desc->next->mapping; 708 desc = desc->next; 709 } 710 priv->rx_head->prev->hw_next = 0; 711 } 712 713 static void cpmac_clear_tx(struct net_device *dev) 714 { 715 struct cpmac_priv *priv = netdev_priv(dev); 716 int i; 717 718 if (unlikely(!priv->desc_ring)) 719 return; 720 for (i = 0; i < CPMAC_QUEUES; i++) { 721 priv->desc_ring[i].dataflags = 0; 722 if (priv->desc_ring[i].skb) { 723 dev_kfree_skb_any(priv->desc_ring[i].skb); 724 priv->desc_ring[i].skb = NULL; 725 } 726 } 727 } 728 729 static void cpmac_hw_error(struct work_struct *work) 730 { 731 struct cpmac_priv *priv = 732 container_of(work, struct cpmac_priv, reset_work); 733 734 spin_lock(&priv->rx_lock); 735 cpmac_clear_rx(priv->dev); 736 spin_unlock(&priv->rx_lock); 737 cpmac_clear_tx(priv->dev); 738 cpmac_hw_start(priv->dev); 739 barrier(); 740 atomic_dec(&priv->reset_pending); 741 742 netif_tx_wake_all_queues(priv->dev); 743 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 744 } 745 746 static void cpmac_check_status(struct net_device *dev) 747 { 748 struct cpmac_priv *priv = netdev_priv(dev); 749 750 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); 751 int rx_channel = (macstatus >> 8) & 7; 752 int rx_code = (macstatus >> 12) & 15; 753 int tx_channel = (macstatus >> 16) & 7; 754 int tx_code = (macstatus >> 20) & 15; 755 756 if (rx_code || tx_code) { 757 if (netif_msg_drv(priv) && net_ratelimit()) { 758 /* Can't find any documentation on what these 759 * error codes actually are. So just log them and hope.. 760 */ 761 if (rx_code) 762 netdev_warn(dev, "host error %d on rx " 763 "channel %d (macstatus %08x), resetting\n", 764 rx_code, rx_channel, macstatus); 765 if (tx_code) 766 netdev_warn(dev, "host error %d on tx " 767 "channel %d (macstatus %08x), resetting\n", 768 tx_code, tx_channel, macstatus); 769 } 770 771 netif_tx_stop_all_queues(dev); 772 cpmac_hw_stop(dev); 773 if (schedule_work(&priv->reset_work)) 774 atomic_inc(&priv->reset_pending); 775 if (unlikely(netif_msg_hw(priv))) 776 cpmac_dump_regs(dev); 777 } 778 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 779 } 780 781 static irqreturn_t cpmac_irq(int irq, void *dev_id) 782 { 783 struct net_device *dev = dev_id; 784 struct cpmac_priv *priv; 785 int queue; 786 u32 status; 787 788 priv = netdev_priv(dev); 789 790 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); 791 792 if (unlikely(netif_msg_intr(priv))) 793 netdev_dbg(dev, "interrupt status: 0x%08x\n", status); 794 795 if (status & MAC_INT_TX) 796 cpmac_end_xmit(dev, (status & 7)); 797 798 if (status & MAC_INT_RX) { 799 queue = (status >> 8) & 7; 800 if (napi_schedule_prep(&priv->napi)) { 801 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 802 __napi_schedule(&priv->napi); 803 } 804 } 805 806 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 807 808 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) 809 cpmac_check_status(dev); 810 811 return IRQ_HANDLED; 812 } 813 814 static void cpmac_tx_timeout(struct net_device *dev) 815 { 816 struct cpmac_priv *priv = netdev_priv(dev); 817 818 spin_lock(&priv->lock); 819 dev->stats.tx_errors++; 820 spin_unlock(&priv->lock); 821 if (netif_msg_tx_err(priv) && net_ratelimit()) 822 netdev_warn(dev, "transmit timeout\n"); 823 824 atomic_inc(&priv->reset_pending); 825 barrier(); 826 cpmac_clear_tx(dev); 827 barrier(); 828 atomic_dec(&priv->reset_pending); 829 830 netif_tx_wake_all_queues(priv->dev); 831 } 832 833 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 834 { 835 struct cpmac_priv *priv = netdev_priv(dev); 836 837 if (!(netif_running(dev))) 838 return -EINVAL; 839 if (!priv->phy) 840 return -EINVAL; 841 842 return phy_mii_ioctl(priv->phy, ifr, cmd); 843 } 844 845 static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 846 { 847 struct cpmac_priv *priv = netdev_priv(dev); 848 849 if (priv->phy) 850 return phy_ethtool_gset(priv->phy, cmd); 851 852 return -EINVAL; 853 } 854 855 static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 856 { 857 struct cpmac_priv *priv = netdev_priv(dev); 858 859 if (!capable(CAP_NET_ADMIN)) 860 return -EPERM; 861 862 if (priv->phy) 863 return phy_ethtool_sset(priv->phy, cmd); 864 865 return -EINVAL; 866 } 867 868 static void cpmac_get_ringparam(struct net_device *dev, 869 struct ethtool_ringparam *ring) 870 { 871 struct cpmac_priv *priv = netdev_priv(dev); 872 873 ring->rx_max_pending = 1024; 874 ring->rx_mini_max_pending = 1; 875 ring->rx_jumbo_max_pending = 1; 876 ring->tx_max_pending = 1; 877 878 ring->rx_pending = priv->ring_size; 879 ring->rx_mini_pending = 1; 880 ring->rx_jumbo_pending = 1; 881 ring->tx_pending = 1; 882 } 883 884 static int cpmac_set_ringparam(struct net_device *dev, 885 struct ethtool_ringparam *ring) 886 { 887 struct cpmac_priv *priv = netdev_priv(dev); 888 889 if (netif_running(dev)) 890 return -EBUSY; 891 priv->ring_size = ring->rx_pending; 892 893 return 0; 894 } 895 896 static void cpmac_get_drvinfo(struct net_device *dev, 897 struct ethtool_drvinfo *info) 898 { 899 strlcpy(info->driver, "cpmac", sizeof(info->driver)); 900 strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); 901 snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); 902 info->regdump_len = 0; 903 } 904 905 static const struct ethtool_ops cpmac_ethtool_ops = { 906 .get_settings = cpmac_get_settings, 907 .set_settings = cpmac_set_settings, 908 .get_drvinfo = cpmac_get_drvinfo, 909 .get_link = ethtool_op_get_link, 910 .get_ringparam = cpmac_get_ringparam, 911 .set_ringparam = cpmac_set_ringparam, 912 }; 913 914 static void cpmac_adjust_link(struct net_device *dev) 915 { 916 struct cpmac_priv *priv = netdev_priv(dev); 917 int new_state = 0; 918 919 spin_lock(&priv->lock); 920 if (priv->phy->link) { 921 netif_tx_start_all_queues(dev); 922 if (priv->phy->duplex != priv->oldduplex) { 923 new_state = 1; 924 priv->oldduplex = priv->phy->duplex; 925 } 926 927 if (priv->phy->speed != priv->oldspeed) { 928 new_state = 1; 929 priv->oldspeed = priv->phy->speed; 930 } 931 932 if (!priv->oldlink) { 933 new_state = 1; 934 priv->oldlink = 1; 935 } 936 } else if (priv->oldlink) { 937 new_state = 1; 938 priv->oldlink = 0; 939 priv->oldspeed = 0; 940 priv->oldduplex = -1; 941 } 942 943 if (new_state && netif_msg_link(priv) && net_ratelimit()) 944 phy_print_status(priv->phy); 945 946 spin_unlock(&priv->lock); 947 } 948 949 static int cpmac_open(struct net_device *dev) 950 { 951 int i, size, res; 952 struct cpmac_priv *priv = netdev_priv(dev); 953 struct resource *mem; 954 struct cpmac_desc *desc; 955 struct sk_buff *skb; 956 957 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 958 if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { 959 if (netif_msg_drv(priv)) 960 netdev_err(dev, "failed to request registers\n"); 961 962 res = -ENXIO; 963 goto fail_reserve; 964 } 965 966 priv->regs = ioremap(mem->start, resource_size(mem)); 967 if (!priv->regs) { 968 if (netif_msg_drv(priv)) 969 netdev_err(dev, "failed to remap registers\n"); 970 971 res = -ENXIO; 972 goto fail_remap; 973 } 974 975 size = priv->ring_size + CPMAC_QUEUES; 976 priv->desc_ring = dma_alloc_coherent(&dev->dev, 977 sizeof(struct cpmac_desc) * size, 978 &priv->dma_ring, 979 GFP_KERNEL); 980 if (!priv->desc_ring) { 981 res = -ENOMEM; 982 goto fail_alloc; 983 } 984 985 for (i = 0; i < size; i++) 986 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; 987 988 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 989 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 990 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); 991 if (unlikely(!skb)) { 992 res = -ENOMEM; 993 goto fail_desc; 994 } 995 desc->skb = skb; 996 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 997 CPMAC_SKB_SIZE, 998 DMA_FROM_DEVICE); 999 desc->hw_data = (u32)desc->data_mapping; 1000 desc->buflen = CPMAC_SKB_SIZE; 1001 desc->dataflags = CPMAC_OWN; 1002 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1003 desc->next->prev = desc; 1004 desc->hw_next = (u32)desc->next->mapping; 1005 } 1006 1007 priv->rx_head->prev->hw_next = (u32)0; 1008 1009 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); 1010 if (res) { 1011 if (netif_msg_drv(priv)) 1012 netdev_err(dev, "failed to obtain irq\n"); 1013 1014 goto fail_irq; 1015 } 1016 1017 atomic_set(&priv->reset_pending, 0); 1018 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1019 cpmac_hw_start(dev); 1020 1021 napi_enable(&priv->napi); 1022 priv->phy->state = PHY_CHANGELINK; 1023 phy_start(priv->phy); 1024 1025 return 0; 1026 1027 fail_irq: 1028 fail_desc: 1029 for (i = 0; i < priv->ring_size; i++) { 1030 if (priv->rx_head[i].skb) { 1031 dma_unmap_single(&dev->dev, 1032 priv->rx_head[i].data_mapping, 1033 CPMAC_SKB_SIZE, 1034 DMA_FROM_DEVICE); 1035 kfree_skb(priv->rx_head[i].skb); 1036 } 1037 } 1038 fail_alloc: 1039 kfree(priv->desc_ring); 1040 iounmap(priv->regs); 1041 1042 fail_remap: 1043 release_mem_region(mem->start, resource_size(mem)); 1044 1045 fail_reserve: 1046 return res; 1047 } 1048 1049 static int cpmac_stop(struct net_device *dev) 1050 { 1051 int i; 1052 struct cpmac_priv *priv = netdev_priv(dev); 1053 struct resource *mem; 1054 1055 netif_tx_stop_all_queues(dev); 1056 1057 cancel_work_sync(&priv->reset_work); 1058 napi_disable(&priv->napi); 1059 phy_stop(priv->phy); 1060 1061 cpmac_hw_stop(dev); 1062 1063 for (i = 0; i < 8; i++) 1064 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 1065 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); 1066 cpmac_write(priv->regs, CPMAC_MBP, 0); 1067 1068 free_irq(dev->irq, dev); 1069 iounmap(priv->regs); 1070 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 1071 release_mem_region(mem->start, resource_size(mem)); 1072 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 1073 for (i = 0; i < priv->ring_size; i++) { 1074 if (priv->rx_head[i].skb) { 1075 dma_unmap_single(&dev->dev, 1076 priv->rx_head[i].data_mapping, 1077 CPMAC_SKB_SIZE, 1078 DMA_FROM_DEVICE); 1079 kfree_skb(priv->rx_head[i].skb); 1080 } 1081 } 1082 1083 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * 1084 (CPMAC_QUEUES + priv->ring_size), 1085 priv->desc_ring, priv->dma_ring); 1086 1087 return 0; 1088 } 1089 1090 static const struct net_device_ops cpmac_netdev_ops = { 1091 .ndo_open = cpmac_open, 1092 .ndo_stop = cpmac_stop, 1093 .ndo_start_xmit = cpmac_start_xmit, 1094 .ndo_tx_timeout = cpmac_tx_timeout, 1095 .ndo_set_rx_mode = cpmac_set_multicast_list, 1096 .ndo_do_ioctl = cpmac_ioctl, 1097 .ndo_change_mtu = eth_change_mtu, 1098 .ndo_validate_addr = eth_validate_addr, 1099 .ndo_set_mac_address = eth_mac_addr, 1100 }; 1101 1102 static int external_switch; 1103 1104 static int cpmac_probe(struct platform_device *pdev) 1105 { 1106 int rc, phy_id; 1107 char mdio_bus_id[MII_BUS_ID_SIZE]; 1108 struct resource *mem; 1109 struct cpmac_priv *priv; 1110 struct net_device *dev; 1111 struct plat_cpmac_data *pdata; 1112 1113 pdata = dev_get_platdata(&pdev->dev); 1114 1115 if (external_switch || dumb_switch) { 1116 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1117 phy_id = pdev->id; 1118 } else { 1119 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1120 if (!(pdata->phy_mask & (1 << phy_id))) 1121 continue; 1122 if (!cpmac_mii->phy_map[phy_id]) 1123 continue; 1124 strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); 1125 break; 1126 } 1127 } 1128 1129 if (phy_id == PHY_MAX_ADDR) { 1130 dev_err(&pdev->dev, "no PHY present, falling back " 1131 "to switch on MDIO bus 0\n"); 1132 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1133 phy_id = pdev->id; 1134 } 1135 mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; 1136 1137 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); 1138 if (!dev) 1139 return -ENOMEM; 1140 1141 platform_set_drvdata(pdev, dev); 1142 priv = netdev_priv(dev); 1143 1144 priv->pdev = pdev; 1145 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 1146 if (!mem) { 1147 rc = -ENODEV; 1148 goto out; 1149 } 1150 1151 dev->irq = platform_get_irq_byname(pdev, "irq"); 1152 1153 dev->netdev_ops = &cpmac_netdev_ops; 1154 dev->ethtool_ops = &cpmac_ethtool_ops; 1155 1156 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); 1157 1158 spin_lock_init(&priv->lock); 1159 spin_lock_init(&priv->rx_lock); 1160 priv->dev = dev; 1161 priv->ring_size = 64; 1162 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1163 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); 1164 1165 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, 1166 mdio_bus_id, phy_id); 1167 1168 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 1169 PHY_INTERFACE_MODE_MII); 1170 1171 if (IS_ERR(priv->phy)) { 1172 if (netif_msg_drv(priv)) 1173 dev_err(&pdev->dev, "Could not attach to PHY\n"); 1174 1175 rc = PTR_ERR(priv->phy); 1176 goto out; 1177 } 1178 1179 rc = register_netdev(dev); 1180 if (rc) { 1181 dev_err(&pdev->dev, "Could not register net device\n"); 1182 goto fail; 1183 } 1184 1185 if (netif_msg_probe(priv)) { 1186 dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " 1187 "mac: %pM\n", (void *)mem->start, dev->irq, 1188 priv->phy_name, dev->dev_addr); 1189 } 1190 1191 return 0; 1192 1193 fail: 1194 free_netdev(dev); 1195 out: 1196 return rc; 1197 } 1198 1199 static int cpmac_remove(struct platform_device *pdev) 1200 { 1201 struct net_device *dev = platform_get_drvdata(pdev); 1202 1203 unregister_netdev(dev); 1204 free_netdev(dev); 1205 1206 return 0; 1207 } 1208 1209 static struct platform_driver cpmac_driver = { 1210 .driver = { 1211 .name = "cpmac", 1212 }, 1213 .probe = cpmac_probe, 1214 .remove = cpmac_remove, 1215 }; 1216 1217 int cpmac_init(void) 1218 { 1219 u32 mask; 1220 int i, res; 1221 1222 cpmac_mii = mdiobus_alloc(); 1223 if (cpmac_mii == NULL) 1224 return -ENOMEM; 1225 1226 cpmac_mii->name = "cpmac-mii"; 1227 cpmac_mii->read = cpmac_mdio_read; 1228 cpmac_mii->write = cpmac_mdio_write; 1229 cpmac_mii->reset = cpmac_mdio_reset; 1230 cpmac_mii->irq = mii_irqs; 1231 1232 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); 1233 1234 if (!cpmac_mii->priv) { 1235 pr_err("Can't ioremap mdio registers\n"); 1236 res = -ENXIO; 1237 goto fail_alloc; 1238 } 1239 1240 #warning FIXME: unhardcode gpio&reset bits 1241 ar7_gpio_disable(26); 1242 ar7_gpio_disable(27); 1243 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); 1244 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); 1245 ar7_device_reset(AR7_RESET_BIT_EPHY); 1246 1247 cpmac_mii->reset(cpmac_mii); 1248 1249 for (i = 0; i < 300; i++) { 1250 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); 1251 if (mask) 1252 break; 1253 else 1254 msleep(10); 1255 } 1256 1257 mask &= 0x7fffffff; 1258 if (mask & (mask - 1)) { 1259 external_switch = 1; 1260 mask = 0; 1261 } 1262 1263 cpmac_mii->phy_mask = ~(mask | 0x80000000); 1264 snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); 1265 1266 res = mdiobus_register(cpmac_mii); 1267 if (res) 1268 goto fail_mii; 1269 1270 res = platform_driver_register(&cpmac_driver); 1271 if (res) 1272 goto fail_cpmac; 1273 1274 return 0; 1275 1276 fail_cpmac: 1277 mdiobus_unregister(cpmac_mii); 1278 1279 fail_mii: 1280 iounmap(cpmac_mii->priv); 1281 1282 fail_alloc: 1283 mdiobus_free(cpmac_mii); 1284 1285 return res; 1286 } 1287 1288 void cpmac_exit(void) 1289 { 1290 platform_driver_unregister(&cpmac_driver); 1291 mdiobus_unregister(cpmac_mii); 1292 iounmap(cpmac_mii->priv); 1293 mdiobus_free(cpmac_mii); 1294 } 1295 1296 module_init(cpmac_init); 1297 module_exit(cpmac_exit); 1298