1 /* 2 * Texas Instruments Ethernet Switch Driver 3 * 4 * Copyright (C) 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/io.h> 18 #include <linux/clk.h> 19 #include <linux/timer.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/irqreturn.h> 23 #include <linux/interrupt.h> 24 #include <linux/if_ether.h> 25 #include <linux/etherdevice.h> 26 #include <linux/netdevice.h> 27 #include <linux/phy.h> 28 #include <linux/workqueue.h> 29 #include <linux/delay.h> 30 31 #include <linux/platform_data/cpsw.h> 32 33 #include "cpsw_ale.h" 34 #include "davinci_cpdma.h" 35 36 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 37 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 38 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 39 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 40 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 41 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 42 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 43 NETIF_MSG_RX_STATUS) 44 45 #define cpsw_info(priv, type, format, ...) \ 46 do { \ 47 if (netif_msg_##type(priv) && net_ratelimit()) \ 48 dev_info(priv->dev, format, ## __VA_ARGS__); \ 49 } while (0) 50 51 #define cpsw_err(priv, type, format, ...) \ 52 do { \ 53 if (netif_msg_##type(priv) && net_ratelimit()) \ 54 dev_err(priv->dev, format, ## __VA_ARGS__); \ 55 } while (0) 56 57 #define cpsw_dbg(priv, type, format, ...) \ 58 do { \ 59 if (netif_msg_##type(priv) && net_ratelimit()) \ 60 dev_dbg(priv->dev, format, ## __VA_ARGS__); \ 61 } while (0) 62 63 #define cpsw_notice(priv, type, format, ...) \ 64 do { \ 65 if (netif_msg_##type(priv) && net_ratelimit()) \ 66 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 67 } while (0) 68 69 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 70 #define CPSW_MINOR_VERSION(reg) (reg & 0xff) 71 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 72 73 #define CPDMA_RXTHRESH 0x0c0 74 #define CPDMA_RXFREE 0x0e0 75 #define CPDMA_TXHDP 0x00 76 #define CPDMA_RXHDP 0x20 77 #define CPDMA_TXCP 0x40 78 #define CPDMA_RXCP 0x60 79 80 #define cpsw_dma_regs(base, offset) \ 81 (void __iomem *)((base) + (offset)) 82 #define cpsw_dma_rxthresh(base, offset) \ 83 (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH) 84 #define cpsw_dma_rxfree(base, offset) \ 85 (void __iomem *)((base) + (offset) + CPDMA_RXFREE) 86 #define cpsw_dma_txhdp(base, offset) \ 87 (void __iomem *)((base) + (offset) + CPDMA_TXHDP) 88 #define cpsw_dma_rxhdp(base, offset) \ 89 (void __iomem *)((base) + (offset) + CPDMA_RXHDP) 90 #define cpsw_dma_txcp(base, offset) \ 91 (void __iomem *)((base) + (offset) + CPDMA_TXCP) 92 #define cpsw_dma_rxcp(base, offset) \ 93 (void __iomem *)((base) + (offset) + CPDMA_RXCP) 94 95 #define CPSW_POLL_WEIGHT 64 96 #define CPSW_MIN_PACKET_SIZE 60 97 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 98 99 #define RX_PRIORITY_MAPPING 0x76543210 100 #define TX_PRIORITY_MAPPING 0x33221100 101 #define CPDMA_TX_PRIORITY_MAP 0x76543210 102 103 #define cpsw_enable_irq(priv) \ 104 do { \ 105 u32 i; \ 106 for (i = 0; i < priv->num_irqs; i++) \ 107 enable_irq(priv->irqs_table[i]); \ 108 } while (0); 109 #define cpsw_disable_irq(priv) \ 110 do { \ 111 u32 i; \ 112 for (i = 0; i < priv->num_irqs; i++) \ 113 disable_irq_nosync(priv->irqs_table[i]); \ 114 } while (0); 115 116 static int debug_level; 117 module_param(debug_level, int, 0); 118 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 119 120 static int ale_ageout = 10; 121 module_param(ale_ageout, int, 0); 122 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 123 124 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 125 module_param(rx_packet_max, int, 0); 126 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 127 128 struct cpsw_ss_regs { 129 u32 id_ver; 130 u32 soft_reset; 131 u32 control; 132 u32 int_control; 133 u32 rx_thresh_en; 134 u32 rx_en; 135 u32 tx_en; 136 u32 misc_en; 137 }; 138 139 struct cpsw_regs { 140 u32 id_ver; 141 u32 control; 142 u32 soft_reset; 143 u32 stat_port_en; 144 u32 ptype; 145 }; 146 147 struct cpsw_slave_regs { 148 u32 max_blks; 149 u32 blk_cnt; 150 u32 flow_thresh; 151 u32 port_vlan; 152 u32 tx_pri_map; 153 u32 ts_ctl; 154 u32 ts_seq_ltype; 155 u32 ts_vlan; 156 u32 sa_lo; 157 u32 sa_hi; 158 }; 159 160 struct cpsw_host_regs { 161 u32 max_blks; 162 u32 blk_cnt; 163 u32 flow_thresh; 164 u32 port_vlan; 165 u32 tx_pri_map; 166 u32 cpdma_tx_pri_map; 167 u32 cpdma_rx_chan_map; 168 }; 169 170 struct cpsw_sliver_regs { 171 u32 id_ver; 172 u32 mac_control; 173 u32 mac_status; 174 u32 soft_reset; 175 u32 rx_maxlen; 176 u32 __reserved_0; 177 u32 rx_pause; 178 u32 tx_pause; 179 u32 __reserved_1; 180 u32 rx_pri_map; 181 }; 182 183 struct cpsw_slave { 184 struct cpsw_slave_regs __iomem *regs; 185 struct cpsw_sliver_regs __iomem *sliver; 186 int slave_num; 187 u32 mac_control; 188 struct cpsw_slave_data *data; 189 struct phy_device *phy; 190 }; 191 192 struct cpsw_priv { 193 spinlock_t lock; 194 struct platform_device *pdev; 195 struct net_device *ndev; 196 struct resource *cpsw_res; 197 struct resource *cpsw_ss_res; 198 struct napi_struct napi; 199 struct device *dev; 200 struct cpsw_platform_data data; 201 struct cpsw_regs __iomem *regs; 202 struct cpsw_ss_regs __iomem *ss_regs; 203 struct cpsw_host_regs __iomem *host_port_regs; 204 u32 msg_enable; 205 struct net_device_stats stats; 206 int rx_packet_max; 207 int host_port; 208 struct clk *clk; 209 u8 mac_addr[ETH_ALEN]; 210 struct cpsw_slave *slaves; 211 struct cpdma_ctlr *dma; 212 struct cpdma_chan *txch, *rxch; 213 struct cpsw_ale *ale; 214 /* snapshot of IRQ numbers */ 215 u32 irqs_table[4]; 216 u32 num_irqs; 217 }; 218 219 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 220 #define for_each_slave(priv, func, arg...) \ 221 do { \ 222 int idx; \ 223 for (idx = 0; idx < (priv)->data.slaves; idx++) \ 224 (func)((priv)->slaves + idx, ##arg); \ 225 } while (0) 226 227 static void cpsw_intr_enable(struct cpsw_priv *priv) 228 { 229 __raw_writel(0xFF, &priv->ss_regs->tx_en); 230 __raw_writel(0xFF, &priv->ss_regs->rx_en); 231 232 cpdma_ctlr_int_ctrl(priv->dma, true); 233 return; 234 } 235 236 static void cpsw_intr_disable(struct cpsw_priv *priv) 237 { 238 __raw_writel(0, &priv->ss_regs->tx_en); 239 __raw_writel(0, &priv->ss_regs->rx_en); 240 241 cpdma_ctlr_int_ctrl(priv->dma, false); 242 return; 243 } 244 245 void cpsw_tx_handler(void *token, int len, int status) 246 { 247 struct sk_buff *skb = token; 248 struct net_device *ndev = skb->dev; 249 struct cpsw_priv *priv = netdev_priv(ndev); 250 251 if (unlikely(netif_queue_stopped(ndev))) 252 netif_start_queue(ndev); 253 priv->stats.tx_packets++; 254 priv->stats.tx_bytes += len; 255 dev_kfree_skb_any(skb); 256 } 257 258 void cpsw_rx_handler(void *token, int len, int status) 259 { 260 struct sk_buff *skb = token; 261 struct net_device *ndev = skb->dev; 262 struct cpsw_priv *priv = netdev_priv(ndev); 263 int ret = 0; 264 265 /* free and bail if we are shutting down */ 266 if (unlikely(!netif_running(ndev)) || 267 unlikely(!netif_carrier_ok(ndev))) { 268 dev_kfree_skb_any(skb); 269 return; 270 } 271 if (likely(status >= 0)) { 272 skb_put(skb, len); 273 skb->protocol = eth_type_trans(skb, ndev); 274 netif_receive_skb(skb); 275 priv->stats.rx_bytes += len; 276 priv->stats.rx_packets++; 277 skb = NULL; 278 } 279 280 if (unlikely(!netif_running(ndev))) { 281 if (skb) 282 dev_kfree_skb_any(skb); 283 return; 284 } 285 286 if (likely(!skb)) { 287 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 288 if (WARN_ON(!skb)) 289 return; 290 291 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 292 skb_tailroom(skb), GFP_KERNEL); 293 } 294 WARN_ON(ret < 0); 295 } 296 297 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 298 { 299 struct cpsw_priv *priv = dev_id; 300 301 if (likely(netif_running(priv->ndev))) { 302 cpsw_intr_disable(priv); 303 cpsw_disable_irq(priv); 304 napi_schedule(&priv->napi); 305 } 306 return IRQ_HANDLED; 307 } 308 309 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 310 { 311 if (priv->host_port == 0) 312 return slave_num + 1; 313 else 314 return slave_num; 315 } 316 317 static int cpsw_poll(struct napi_struct *napi, int budget) 318 { 319 struct cpsw_priv *priv = napi_to_priv(napi); 320 int num_tx, num_rx; 321 322 num_tx = cpdma_chan_process(priv->txch, 128); 323 num_rx = cpdma_chan_process(priv->rxch, budget); 324 325 if (num_rx || num_tx) 326 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 327 num_rx, num_tx); 328 329 if (num_rx < budget) { 330 napi_complete(napi); 331 cpsw_intr_enable(priv); 332 cpdma_ctlr_eoi(priv->dma); 333 cpsw_enable_irq(priv); 334 } 335 336 return num_rx; 337 } 338 339 static inline void soft_reset(const char *module, void __iomem *reg) 340 { 341 unsigned long timeout = jiffies + HZ; 342 343 __raw_writel(1, reg); 344 do { 345 cpu_relax(); 346 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); 347 348 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); 349 } 350 351 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 352 ((mac)[2] << 16) | ((mac)[3] << 24)) 353 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 354 355 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 356 struct cpsw_priv *priv) 357 { 358 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi); 359 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo); 360 } 361 362 static void _cpsw_adjust_link(struct cpsw_slave *slave, 363 struct cpsw_priv *priv, bool *link) 364 { 365 struct phy_device *phy = slave->phy; 366 u32 mac_control = 0; 367 u32 slave_port; 368 369 if (!phy) 370 return; 371 372 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 373 374 if (phy->link) { 375 mac_control = priv->data.mac_control; 376 377 /* enable forwarding */ 378 cpsw_ale_control_set(priv->ale, slave_port, 379 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 380 381 if (phy->speed == 1000) 382 mac_control |= BIT(7); /* GIGABITEN */ 383 if (phy->duplex) 384 mac_control |= BIT(0); /* FULLDUPLEXEN */ 385 *link = true; 386 } else { 387 mac_control = 0; 388 /* disable forwarding */ 389 cpsw_ale_control_set(priv->ale, slave_port, 390 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 391 } 392 393 if (mac_control != slave->mac_control) { 394 phy_print_status(phy); 395 __raw_writel(mac_control, &slave->sliver->mac_control); 396 } 397 398 slave->mac_control = mac_control; 399 } 400 401 static void cpsw_adjust_link(struct net_device *ndev) 402 { 403 struct cpsw_priv *priv = netdev_priv(ndev); 404 bool link = false; 405 406 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 407 408 if (link) { 409 netif_carrier_on(ndev); 410 if (netif_running(ndev)) 411 netif_wake_queue(ndev); 412 } else { 413 netif_carrier_off(ndev); 414 netif_stop_queue(ndev); 415 } 416 } 417 418 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 419 { 420 static char *leader = "........................................"; 421 422 if (!val) 423 return 0; 424 else 425 return snprintf(buf, maxlen, "%s %s %10d\n", name, 426 leader + strlen(name), val); 427 } 428 429 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 430 { 431 char name[32]; 432 u32 slave_port; 433 434 sprintf(name, "slave-%d", slave->slave_num); 435 436 soft_reset(name, &slave->sliver->soft_reset); 437 438 /* setup priority mapping */ 439 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 440 __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map); 441 442 /* setup max packet size, and mac address */ 443 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 444 cpsw_set_slave_mac(slave, priv); 445 446 slave->mac_control = 0; /* no link yet */ 447 448 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 449 450 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 451 1 << slave_port, 0, ALE_MCAST_FWD_2); 452 453 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 454 &cpsw_adjust_link, 0, slave->data->phy_if); 455 if (IS_ERR(slave->phy)) { 456 dev_err(priv->dev, "phy %s not found on slave %d\n", 457 slave->data->phy_id, slave->slave_num); 458 slave->phy = NULL; 459 } else { 460 dev_info(priv->dev, "phy found : id is : 0x%x\n", 461 slave->phy->phy_id); 462 phy_start(slave->phy); 463 } 464 } 465 466 static void cpsw_init_host_port(struct cpsw_priv *priv) 467 { 468 /* soft reset the controller and initialize ale */ 469 soft_reset("cpsw", &priv->regs->soft_reset); 470 cpsw_ale_start(priv->ale); 471 472 /* switch to vlan unaware mode */ 473 cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); 474 475 /* setup host port priority mapping */ 476 __raw_writel(CPDMA_TX_PRIORITY_MAP, 477 &priv->host_port_regs->cpdma_tx_pri_map); 478 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 479 480 cpsw_ale_control_set(priv->ale, priv->host_port, 481 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 482 483 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); 484 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 485 1 << priv->host_port, 0, ALE_MCAST_FWD_2); 486 } 487 488 static int cpsw_ndo_open(struct net_device *ndev) 489 { 490 struct cpsw_priv *priv = netdev_priv(ndev); 491 int i, ret; 492 u32 reg; 493 494 cpsw_intr_disable(priv); 495 netif_carrier_off(ndev); 496 497 ret = clk_enable(priv->clk); 498 if (ret < 0) { 499 dev_err(priv->dev, "unable to turn on device clock\n"); 500 return ret; 501 } 502 503 reg = __raw_readl(&priv->regs->id_ver); 504 505 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 506 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 507 CPSW_RTL_VERSION(reg)); 508 509 /* initialize host and slave ports */ 510 cpsw_init_host_port(priv); 511 for_each_slave(priv, cpsw_slave_open, priv); 512 513 /* setup tx dma to fixed prio and zero offset */ 514 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 515 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); 516 517 /* disable priority elevation and enable statistics on all ports */ 518 __raw_writel(0, &priv->regs->ptype); 519 520 /* enable statistics collection only on the host port */ 521 __raw_writel(0x7, &priv->regs->stat_port_en); 522 523 if (WARN_ON(!priv->data.rx_descs)) 524 priv->data.rx_descs = 128; 525 526 for (i = 0; i < priv->data.rx_descs; i++) { 527 struct sk_buff *skb; 528 529 ret = -ENOMEM; 530 skb = netdev_alloc_skb_ip_align(priv->ndev, 531 priv->rx_packet_max); 532 if (!skb) 533 break; 534 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 535 skb_tailroom(skb), GFP_KERNEL); 536 if (WARN_ON(ret < 0)) 537 break; 538 } 539 /* continue even if we didn't manage to submit all receive descs */ 540 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 541 542 cpdma_ctlr_start(priv->dma); 543 cpsw_intr_enable(priv); 544 napi_enable(&priv->napi); 545 cpdma_ctlr_eoi(priv->dma); 546 547 return 0; 548 } 549 550 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 551 { 552 if (!slave->phy) 553 return; 554 phy_stop(slave->phy); 555 phy_disconnect(slave->phy); 556 slave->phy = NULL; 557 } 558 559 static int cpsw_ndo_stop(struct net_device *ndev) 560 { 561 struct cpsw_priv *priv = netdev_priv(ndev); 562 563 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 564 cpsw_intr_disable(priv); 565 cpdma_ctlr_int_ctrl(priv->dma, false); 566 cpdma_ctlr_stop(priv->dma); 567 netif_stop_queue(priv->ndev); 568 napi_disable(&priv->napi); 569 netif_carrier_off(priv->ndev); 570 cpsw_ale_stop(priv->ale); 571 for_each_slave(priv, cpsw_slave_stop, priv); 572 clk_disable(priv->clk); 573 return 0; 574 } 575 576 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 577 struct net_device *ndev) 578 { 579 struct cpsw_priv *priv = netdev_priv(ndev); 580 int ret; 581 582 ndev->trans_start = jiffies; 583 584 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 585 cpsw_err(priv, tx_err, "packet pad failed\n"); 586 priv->stats.tx_dropped++; 587 return NETDEV_TX_OK; 588 } 589 590 ret = cpdma_chan_submit(priv->txch, skb, skb->data, 591 skb->len, GFP_KERNEL); 592 if (unlikely(ret != 0)) { 593 cpsw_err(priv, tx_err, "desc submit failed\n"); 594 goto fail; 595 } 596 597 return NETDEV_TX_OK; 598 fail: 599 priv->stats.tx_dropped++; 600 netif_stop_queue(ndev); 601 return NETDEV_TX_BUSY; 602 } 603 604 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) 605 { 606 /* 607 * The switch cannot operate in promiscuous mode without substantial 608 * headache. For promiscuous mode to work, we would need to put the 609 * ALE in bypass mode and route all traffic to the host port. 610 * Subsequently, the host will need to operate as a "bridge", learn, 611 * and flood as needed. For now, we simply complain here and 612 * do nothing about it :-) 613 */ 614 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC)) 615 dev_err(&ndev->dev, "promiscuity ignored!\n"); 616 617 /* 618 * The switch cannot filter multicast traffic unless it is configured 619 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a 620 * whole bunch of additional logic that this driver does not implement 621 * at present. 622 */ 623 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI)) 624 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 625 } 626 627 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 628 { 629 struct cpsw_priv *priv = netdev_priv(ndev); 630 631 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 632 priv->stats.tx_errors++; 633 cpsw_intr_disable(priv); 634 cpdma_ctlr_int_ctrl(priv->dma, false); 635 cpdma_chan_stop(priv->txch); 636 cpdma_chan_start(priv->txch); 637 cpdma_ctlr_int_ctrl(priv->dma, true); 638 cpsw_intr_enable(priv); 639 cpdma_ctlr_eoi(priv->dma); 640 } 641 642 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 643 { 644 struct cpsw_priv *priv = netdev_priv(ndev); 645 return &priv->stats; 646 } 647 648 #ifdef CONFIG_NET_POLL_CONTROLLER 649 static void cpsw_ndo_poll_controller(struct net_device *ndev) 650 { 651 struct cpsw_priv *priv = netdev_priv(ndev); 652 653 cpsw_intr_disable(priv); 654 cpdma_ctlr_int_ctrl(priv->dma, false); 655 cpsw_interrupt(ndev->irq, priv); 656 cpdma_ctlr_int_ctrl(priv->dma, true); 657 cpsw_intr_enable(priv); 658 cpdma_ctlr_eoi(priv->dma); 659 } 660 #endif 661 662 static const struct net_device_ops cpsw_netdev_ops = { 663 .ndo_open = cpsw_ndo_open, 664 .ndo_stop = cpsw_ndo_stop, 665 .ndo_start_xmit = cpsw_ndo_start_xmit, 666 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 667 .ndo_validate_addr = eth_validate_addr, 668 .ndo_change_mtu = eth_change_mtu, 669 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 670 .ndo_get_stats = cpsw_ndo_get_stats, 671 #ifdef CONFIG_NET_POLL_CONTROLLER 672 .ndo_poll_controller = cpsw_ndo_poll_controller, 673 #endif 674 }; 675 676 static void cpsw_get_drvinfo(struct net_device *ndev, 677 struct ethtool_drvinfo *info) 678 { 679 struct cpsw_priv *priv = netdev_priv(ndev); 680 strcpy(info->driver, "TI CPSW Driver v1.0"); 681 strcpy(info->version, "1.0"); 682 strcpy(info->bus_info, priv->pdev->name); 683 } 684 685 static u32 cpsw_get_msglevel(struct net_device *ndev) 686 { 687 struct cpsw_priv *priv = netdev_priv(ndev); 688 return priv->msg_enable; 689 } 690 691 static void cpsw_set_msglevel(struct net_device *ndev, u32 value) 692 { 693 struct cpsw_priv *priv = netdev_priv(ndev); 694 priv->msg_enable = value; 695 } 696 697 static const struct ethtool_ops cpsw_ethtool_ops = { 698 .get_drvinfo = cpsw_get_drvinfo, 699 .get_msglevel = cpsw_get_msglevel, 700 .set_msglevel = cpsw_set_msglevel, 701 .get_link = ethtool_op_get_link, 702 }; 703 704 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 705 { 706 void __iomem *regs = priv->regs; 707 int slave_num = slave->slave_num; 708 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 709 710 slave->data = data; 711 slave->regs = regs + data->slave_reg_ofs; 712 slave->sliver = regs + data->sliver_reg_ofs; 713 } 714 715 static int __devinit cpsw_probe(struct platform_device *pdev) 716 { 717 struct cpsw_platform_data *data = pdev->dev.platform_data; 718 struct net_device *ndev; 719 struct cpsw_priv *priv; 720 struct cpdma_params dma_params; 721 struct cpsw_ale_params ale_params; 722 void __iomem *regs; 723 struct resource *res; 724 int ret = 0, i, k = 0; 725 726 if (!data) { 727 pr_err("platform data missing\n"); 728 return -ENODEV; 729 } 730 731 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 732 if (!ndev) { 733 pr_err("error allocating net_device\n"); 734 return -ENOMEM; 735 } 736 737 platform_set_drvdata(pdev, ndev); 738 priv = netdev_priv(ndev); 739 spin_lock_init(&priv->lock); 740 priv->data = *data; 741 priv->pdev = pdev; 742 priv->ndev = ndev; 743 priv->dev = &ndev->dev; 744 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 745 priv->rx_packet_max = max(rx_packet_max, 128); 746 747 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 748 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 749 pr_info("Detected MACID = %pM", priv->mac_addr); 750 } else { 751 random_ether_addr(priv->mac_addr); 752 pr_info("Random MACID = %pM", priv->mac_addr); 753 } 754 755 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 756 757 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, 758 GFP_KERNEL); 759 if (!priv->slaves) { 760 ret = -EBUSY; 761 goto clean_ndev_ret; 762 } 763 for (i = 0; i < data->slaves; i++) 764 priv->slaves[i].slave_num = i; 765 766 priv->clk = clk_get(&pdev->dev, NULL); 767 if (IS_ERR(priv->clk)) { 768 dev_err(priv->dev, "failed to get device clock)\n"); 769 ret = -EBUSY; 770 } 771 772 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 773 if (!priv->cpsw_res) { 774 dev_err(priv->dev, "error getting i/o resource\n"); 775 ret = -ENOENT; 776 goto clean_clk_ret; 777 } 778 779 if (!request_mem_region(priv->cpsw_res->start, 780 resource_size(priv->cpsw_res), ndev->name)) { 781 dev_err(priv->dev, "failed request i/o region\n"); 782 ret = -ENXIO; 783 goto clean_clk_ret; 784 } 785 786 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 787 if (!regs) { 788 dev_err(priv->dev, "unable to map i/o region\n"); 789 goto clean_cpsw_iores_ret; 790 } 791 priv->regs = regs; 792 priv->host_port = data->host_port_num; 793 priv->host_port_regs = regs + data->host_port_reg_ofs; 794 795 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 796 if (!priv->cpsw_ss_res) { 797 dev_err(priv->dev, "error getting i/o resource\n"); 798 ret = -ENOENT; 799 goto clean_clk_ret; 800 } 801 802 if (!request_mem_region(priv->cpsw_ss_res->start, 803 resource_size(priv->cpsw_ss_res), ndev->name)) { 804 dev_err(priv->dev, "failed request i/o region\n"); 805 ret = -ENXIO; 806 goto clean_clk_ret; 807 } 808 809 regs = ioremap(priv->cpsw_ss_res->start, 810 resource_size(priv->cpsw_ss_res)); 811 if (!regs) { 812 dev_err(priv->dev, "unable to map i/o region\n"); 813 goto clean_cpsw_ss_iores_ret; 814 } 815 priv->ss_regs = regs; 816 817 for_each_slave(priv, cpsw_slave_init, priv); 818 819 memset(&dma_params, 0, sizeof(dma_params)); 820 dma_params.dev = &pdev->dev; 821 dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs, 822 data->cpdma_reg_ofs); 823 dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs, 824 data->cpdma_reg_ofs); 825 dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs, 826 data->cpdma_reg_ofs); 827 dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs, 828 data->cpdma_sram_ofs); 829 dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs, 830 data->cpdma_sram_ofs); 831 dma_params.txcp = cpsw_dma_txcp((u32)priv->regs, 832 data->cpdma_sram_ofs); 833 dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs, 834 data->cpdma_sram_ofs); 835 836 dma_params.num_chan = data->channels; 837 dma_params.has_soft_reset = true; 838 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; 839 dma_params.desc_mem_size = data->bd_ram_size; 840 dma_params.desc_align = 16; 841 dma_params.has_ext_regs = true; 842 dma_params.desc_mem_phys = data->no_bd_ram ? 0 : 843 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs; 844 dma_params.desc_hw_addr = data->hw_ram_addr ? 845 data->hw_ram_addr : dma_params.desc_mem_phys ; 846 847 priv->dma = cpdma_ctlr_create(&dma_params); 848 if (!priv->dma) { 849 dev_err(priv->dev, "error initializing dma\n"); 850 ret = -ENOMEM; 851 goto clean_iomap_ret; 852 } 853 854 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 855 cpsw_tx_handler); 856 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), 857 cpsw_rx_handler); 858 859 if (WARN_ON(!priv->txch || !priv->rxch)) { 860 dev_err(priv->dev, "error initializing dma channels\n"); 861 ret = -ENOMEM; 862 goto clean_dma_ret; 863 } 864 865 memset(&ale_params, 0, sizeof(ale_params)); 866 ale_params.dev = &ndev->dev; 867 ale_params.ale_regs = (void *)((u32)priv->regs) + 868 ((u32)data->ale_reg_ofs); 869 ale_params.ale_ageout = ale_ageout; 870 ale_params.ale_entries = data->ale_entries; 871 ale_params.ale_ports = data->slaves; 872 873 priv->ale = cpsw_ale_create(&ale_params); 874 if (!priv->ale) { 875 dev_err(priv->dev, "error initializing ale engine\n"); 876 ret = -ENODEV; 877 goto clean_dma_ret; 878 } 879 880 ndev->irq = platform_get_irq(pdev, 0); 881 if (ndev->irq < 0) { 882 dev_err(priv->dev, "error getting irq resource\n"); 883 ret = -ENOENT; 884 goto clean_ale_ret; 885 } 886 887 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 888 for (i = res->start; i <= res->end; i++) { 889 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 890 dev_name(&pdev->dev), priv)) { 891 dev_err(priv->dev, "error attaching irq\n"); 892 goto clean_ale_ret; 893 } 894 priv->irqs_table[k] = i; 895 priv->num_irqs = k; 896 } 897 k++; 898 } 899 900 ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ 901 902 ndev->netdev_ops = &cpsw_netdev_ops; 903 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 904 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 905 906 /* register the network device */ 907 SET_NETDEV_DEV(ndev, &pdev->dev); 908 ret = register_netdev(ndev); 909 if (ret) { 910 dev_err(priv->dev, "error registering net device\n"); 911 ret = -ENODEV; 912 goto clean_irq_ret; 913 } 914 915 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 916 priv->cpsw_res->start, ndev->irq); 917 918 return 0; 919 920 clean_irq_ret: 921 free_irq(ndev->irq, priv); 922 clean_ale_ret: 923 cpsw_ale_destroy(priv->ale); 924 clean_dma_ret: 925 cpdma_chan_destroy(priv->txch); 926 cpdma_chan_destroy(priv->rxch); 927 cpdma_ctlr_destroy(priv->dma); 928 clean_iomap_ret: 929 iounmap(priv->regs); 930 clean_cpsw_ss_iores_ret: 931 release_mem_region(priv->cpsw_ss_res->start, 932 resource_size(priv->cpsw_ss_res)); 933 clean_cpsw_iores_ret: 934 release_mem_region(priv->cpsw_res->start, 935 resource_size(priv->cpsw_res)); 936 clean_clk_ret: 937 clk_put(priv->clk); 938 kfree(priv->slaves); 939 clean_ndev_ret: 940 free_netdev(ndev); 941 return ret; 942 } 943 944 static int __devexit cpsw_remove(struct platform_device *pdev) 945 { 946 struct net_device *ndev = platform_get_drvdata(pdev); 947 struct cpsw_priv *priv = netdev_priv(ndev); 948 949 pr_info("removing device"); 950 platform_set_drvdata(pdev, NULL); 951 952 free_irq(ndev->irq, priv); 953 cpsw_ale_destroy(priv->ale); 954 cpdma_chan_destroy(priv->txch); 955 cpdma_chan_destroy(priv->rxch); 956 cpdma_ctlr_destroy(priv->dma); 957 iounmap(priv->regs); 958 release_mem_region(priv->cpsw_res->start, 959 resource_size(priv->cpsw_res)); 960 release_mem_region(priv->cpsw_ss_res->start, 961 resource_size(priv->cpsw_ss_res)); 962 clk_put(priv->clk); 963 kfree(priv->slaves); 964 free_netdev(ndev); 965 966 return 0; 967 } 968 969 static int cpsw_suspend(struct device *dev) 970 { 971 struct platform_device *pdev = to_platform_device(dev); 972 struct net_device *ndev = platform_get_drvdata(pdev); 973 974 if (netif_running(ndev)) 975 cpsw_ndo_stop(ndev); 976 return 0; 977 } 978 979 static int cpsw_resume(struct device *dev) 980 { 981 struct platform_device *pdev = to_platform_device(dev); 982 struct net_device *ndev = platform_get_drvdata(pdev); 983 984 if (netif_running(ndev)) 985 cpsw_ndo_open(ndev); 986 return 0; 987 } 988 989 static const struct dev_pm_ops cpsw_pm_ops = { 990 .suspend = cpsw_suspend, 991 .resume = cpsw_resume, 992 }; 993 994 static struct platform_driver cpsw_driver = { 995 .driver = { 996 .name = "cpsw", 997 .owner = THIS_MODULE, 998 .pm = &cpsw_pm_ops, 999 }, 1000 .probe = cpsw_probe, 1001 .remove = __devexit_p(cpsw_remove), 1002 }; 1003 1004 static int __init cpsw_init(void) 1005 { 1006 return platform_driver_register(&cpsw_driver); 1007 } 1008 late_initcall(cpsw_init); 1009 1010 static void __exit cpsw_exit(void) 1011 { 1012 platform_driver_unregister(&cpsw_driver); 1013 } 1014 module_exit(cpsw_exit); 1015 1016 MODULE_LICENSE("GPL"); 1017 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); 1018 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); 1019 MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 1020