1 /* 2 * Texas Instruments Ethernet Switch Driver 3 * 4 * Copyright (C) 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/io.h> 18 #include <linux/clk.h> 19 #include <linux/timer.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/irqreturn.h> 23 #include <linux/interrupt.h> 24 #include <linux/if_ether.h> 25 #include <linux/etherdevice.h> 26 #include <linux/netdevice.h> 27 #include <linux/phy.h> 28 #include <linux/workqueue.h> 29 #include <linux/delay.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/of.h> 32 #include <linux/of_net.h> 33 #include <linux/of_device.h> 34 35 #include <linux/platform_data/cpsw.h> 36 37 #include "cpsw_ale.h" 38 #include "davinci_cpdma.h" 39 40 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 41 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 42 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 43 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 44 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 45 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 46 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 47 NETIF_MSG_RX_STATUS) 48 49 #define cpsw_info(priv, type, format, ...) \ 50 do { \ 51 if (netif_msg_##type(priv) && net_ratelimit()) \ 52 dev_info(priv->dev, format, ## __VA_ARGS__); \ 53 } while (0) 54 55 #define cpsw_err(priv, type, format, ...) \ 56 do { \ 57 if (netif_msg_##type(priv) && net_ratelimit()) \ 58 dev_err(priv->dev, format, ## __VA_ARGS__); \ 59 } while (0) 60 61 #define cpsw_dbg(priv, type, format, ...) \ 62 do { \ 63 if (netif_msg_##type(priv) && net_ratelimit()) \ 64 dev_dbg(priv->dev, format, ## __VA_ARGS__); \ 65 } while (0) 66 67 #define cpsw_notice(priv, type, format, ...) \ 68 do { \ 69 if (netif_msg_##type(priv) && net_ratelimit()) \ 70 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 71 } while (0) 72 73 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 74 #define CPSW_MINOR_VERSION(reg) (reg & 0xff) 75 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 76 77 #define CPDMA_RXTHRESH 0x0c0 78 #define CPDMA_RXFREE 0x0e0 79 #define CPDMA_TXHDP 0x00 80 #define CPDMA_RXHDP 0x20 81 #define CPDMA_TXCP 0x40 82 #define CPDMA_RXCP 0x60 83 84 #define cpsw_dma_regs(base, offset) \ 85 (void __iomem *)((base) + (offset)) 86 #define cpsw_dma_rxthresh(base, offset) \ 87 (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH) 88 #define cpsw_dma_rxfree(base, offset) \ 89 (void __iomem *)((base) + (offset) + CPDMA_RXFREE) 90 #define cpsw_dma_txhdp(base, offset) \ 91 (void __iomem *)((base) + (offset) + CPDMA_TXHDP) 92 #define cpsw_dma_rxhdp(base, offset) \ 93 (void __iomem *)((base) + (offset) + CPDMA_RXHDP) 94 #define cpsw_dma_txcp(base, offset) \ 95 (void __iomem *)((base) + (offset) + CPDMA_TXCP) 96 #define cpsw_dma_rxcp(base, offset) \ 97 (void __iomem *)((base) + (offset) + CPDMA_RXCP) 98 99 #define CPSW_POLL_WEIGHT 64 100 #define CPSW_MIN_PACKET_SIZE 60 101 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 102 103 #define RX_PRIORITY_MAPPING 0x76543210 104 #define TX_PRIORITY_MAPPING 0x33221100 105 #define CPDMA_TX_PRIORITY_MAP 0x76543210 106 107 #define cpsw_enable_irq(priv) \ 108 do { \ 109 u32 i; \ 110 for (i = 0; i < priv->num_irqs; i++) \ 111 enable_irq(priv->irqs_table[i]); \ 112 } while (0); 113 #define cpsw_disable_irq(priv) \ 114 do { \ 115 u32 i; \ 116 for (i = 0; i < priv->num_irqs; i++) \ 117 disable_irq_nosync(priv->irqs_table[i]); \ 118 } while (0); 119 120 static int debug_level; 121 module_param(debug_level, int, 0); 122 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 123 124 static int ale_ageout = 10; 125 module_param(ale_ageout, int, 0); 126 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 127 128 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 129 module_param(rx_packet_max, int, 0); 130 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 131 132 struct cpsw_ss_regs { 133 u32 id_ver; 134 u32 soft_reset; 135 u32 control; 136 u32 int_control; 137 u32 rx_thresh_en; 138 u32 rx_en; 139 u32 tx_en; 140 u32 misc_en; 141 }; 142 143 struct cpsw_regs { 144 u32 id_ver; 145 u32 control; 146 u32 soft_reset; 147 u32 stat_port_en; 148 u32 ptype; 149 }; 150 151 struct cpsw_slave_regs { 152 u32 max_blks; 153 u32 blk_cnt; 154 u32 flow_thresh; 155 u32 port_vlan; 156 u32 tx_pri_map; 157 u32 ts_ctl; 158 u32 ts_seq_ltype; 159 u32 ts_vlan; 160 u32 sa_lo; 161 u32 sa_hi; 162 }; 163 164 struct cpsw_host_regs { 165 u32 max_blks; 166 u32 blk_cnt; 167 u32 flow_thresh; 168 u32 port_vlan; 169 u32 tx_pri_map; 170 u32 cpdma_tx_pri_map; 171 u32 cpdma_rx_chan_map; 172 }; 173 174 struct cpsw_sliver_regs { 175 u32 id_ver; 176 u32 mac_control; 177 u32 mac_status; 178 u32 soft_reset; 179 u32 rx_maxlen; 180 u32 __reserved_0; 181 u32 rx_pause; 182 u32 tx_pause; 183 u32 __reserved_1; 184 u32 rx_pri_map; 185 }; 186 187 struct cpsw_slave { 188 struct cpsw_slave_regs __iomem *regs; 189 struct cpsw_sliver_regs __iomem *sliver; 190 int slave_num; 191 u32 mac_control; 192 struct cpsw_slave_data *data; 193 struct phy_device *phy; 194 }; 195 196 struct cpsw_priv { 197 spinlock_t lock; 198 struct platform_device *pdev; 199 struct net_device *ndev; 200 struct resource *cpsw_res; 201 struct resource *cpsw_ss_res; 202 struct napi_struct napi; 203 struct device *dev; 204 struct cpsw_platform_data data; 205 struct cpsw_regs __iomem *regs; 206 struct cpsw_ss_regs __iomem *ss_regs; 207 struct cpsw_host_regs __iomem *host_port_regs; 208 u32 msg_enable; 209 struct net_device_stats stats; 210 int rx_packet_max; 211 int host_port; 212 struct clk *clk; 213 u8 mac_addr[ETH_ALEN]; 214 struct cpsw_slave *slaves; 215 struct cpdma_ctlr *dma; 216 struct cpdma_chan *txch, *rxch; 217 struct cpsw_ale *ale; 218 /* snapshot of IRQ numbers */ 219 u32 irqs_table[4]; 220 u32 num_irqs; 221 }; 222 223 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 224 #define for_each_slave(priv, func, arg...) \ 225 do { \ 226 int idx; \ 227 for (idx = 0; idx < (priv)->data.slaves; idx++) \ 228 (func)((priv)->slaves + idx, ##arg); \ 229 } while (0) 230 231 static void cpsw_intr_enable(struct cpsw_priv *priv) 232 { 233 __raw_writel(0xFF, &priv->ss_regs->tx_en); 234 __raw_writel(0xFF, &priv->ss_regs->rx_en); 235 236 cpdma_ctlr_int_ctrl(priv->dma, true); 237 return; 238 } 239 240 static void cpsw_intr_disable(struct cpsw_priv *priv) 241 { 242 __raw_writel(0, &priv->ss_regs->tx_en); 243 __raw_writel(0, &priv->ss_regs->rx_en); 244 245 cpdma_ctlr_int_ctrl(priv->dma, false); 246 return; 247 } 248 249 void cpsw_tx_handler(void *token, int len, int status) 250 { 251 struct sk_buff *skb = token; 252 struct net_device *ndev = skb->dev; 253 struct cpsw_priv *priv = netdev_priv(ndev); 254 255 if (unlikely(netif_queue_stopped(ndev))) 256 netif_start_queue(ndev); 257 priv->stats.tx_packets++; 258 priv->stats.tx_bytes += len; 259 dev_kfree_skb_any(skb); 260 } 261 262 void cpsw_rx_handler(void *token, int len, int status) 263 { 264 struct sk_buff *skb = token; 265 struct net_device *ndev = skb->dev; 266 struct cpsw_priv *priv = netdev_priv(ndev); 267 int ret = 0; 268 269 /* free and bail if we are shutting down */ 270 if (unlikely(!netif_running(ndev)) || 271 unlikely(!netif_carrier_ok(ndev))) { 272 dev_kfree_skb_any(skb); 273 return; 274 } 275 if (likely(status >= 0)) { 276 skb_put(skb, len); 277 skb->protocol = eth_type_trans(skb, ndev); 278 netif_receive_skb(skb); 279 priv->stats.rx_bytes += len; 280 priv->stats.rx_packets++; 281 skb = NULL; 282 } 283 284 if (unlikely(!netif_running(ndev))) { 285 if (skb) 286 dev_kfree_skb_any(skb); 287 return; 288 } 289 290 if (likely(!skb)) { 291 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 292 if (WARN_ON(!skb)) 293 return; 294 295 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 296 skb_tailroom(skb), GFP_KERNEL); 297 } 298 WARN_ON(ret < 0); 299 } 300 301 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 302 { 303 struct cpsw_priv *priv = dev_id; 304 305 if (likely(netif_running(priv->ndev))) { 306 cpsw_intr_disable(priv); 307 cpsw_disable_irq(priv); 308 napi_schedule(&priv->napi); 309 } 310 return IRQ_HANDLED; 311 } 312 313 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 314 { 315 if (priv->host_port == 0) 316 return slave_num + 1; 317 else 318 return slave_num; 319 } 320 321 static int cpsw_poll(struct napi_struct *napi, int budget) 322 { 323 struct cpsw_priv *priv = napi_to_priv(napi); 324 int num_tx, num_rx; 325 326 num_tx = cpdma_chan_process(priv->txch, 128); 327 num_rx = cpdma_chan_process(priv->rxch, budget); 328 329 if (num_rx || num_tx) 330 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 331 num_rx, num_tx); 332 333 if (num_rx < budget) { 334 napi_complete(napi); 335 cpsw_intr_enable(priv); 336 cpdma_ctlr_eoi(priv->dma); 337 cpsw_enable_irq(priv); 338 } 339 340 return num_rx; 341 } 342 343 static inline void soft_reset(const char *module, void __iomem *reg) 344 { 345 unsigned long timeout = jiffies + HZ; 346 347 __raw_writel(1, reg); 348 do { 349 cpu_relax(); 350 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); 351 352 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); 353 } 354 355 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 356 ((mac)[2] << 16) | ((mac)[3] << 24)) 357 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 358 359 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 360 struct cpsw_priv *priv) 361 { 362 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi); 363 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo); 364 } 365 366 static void _cpsw_adjust_link(struct cpsw_slave *slave, 367 struct cpsw_priv *priv, bool *link) 368 { 369 struct phy_device *phy = slave->phy; 370 u32 mac_control = 0; 371 u32 slave_port; 372 373 if (!phy) 374 return; 375 376 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 377 378 if (phy->link) { 379 mac_control = priv->data.mac_control; 380 381 /* enable forwarding */ 382 cpsw_ale_control_set(priv->ale, slave_port, 383 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 384 385 if (phy->speed == 1000) 386 mac_control |= BIT(7); /* GIGABITEN */ 387 if (phy->duplex) 388 mac_control |= BIT(0); /* FULLDUPLEXEN */ 389 390 /* set speed_in input in case RMII mode is used in 100Mbps */ 391 if (phy->speed == 100) 392 mac_control |= BIT(15); 393 394 *link = true; 395 } else { 396 mac_control = 0; 397 /* disable forwarding */ 398 cpsw_ale_control_set(priv->ale, slave_port, 399 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 400 } 401 402 if (mac_control != slave->mac_control) { 403 phy_print_status(phy); 404 __raw_writel(mac_control, &slave->sliver->mac_control); 405 } 406 407 slave->mac_control = mac_control; 408 } 409 410 static void cpsw_adjust_link(struct net_device *ndev) 411 { 412 struct cpsw_priv *priv = netdev_priv(ndev); 413 bool link = false; 414 415 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 416 417 if (link) { 418 netif_carrier_on(ndev); 419 if (netif_running(ndev)) 420 netif_wake_queue(ndev); 421 } else { 422 netif_carrier_off(ndev); 423 netif_stop_queue(ndev); 424 } 425 } 426 427 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 428 { 429 static char *leader = "........................................"; 430 431 if (!val) 432 return 0; 433 else 434 return snprintf(buf, maxlen, "%s %s %10d\n", name, 435 leader + strlen(name), val); 436 } 437 438 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 439 { 440 char name[32]; 441 u32 slave_port; 442 443 sprintf(name, "slave-%d", slave->slave_num); 444 445 soft_reset(name, &slave->sliver->soft_reset); 446 447 /* setup priority mapping */ 448 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 449 __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map); 450 451 /* setup max packet size, and mac address */ 452 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 453 cpsw_set_slave_mac(slave, priv); 454 455 slave->mac_control = 0; /* no link yet */ 456 457 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 458 459 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 460 1 << slave_port, 0, ALE_MCAST_FWD_2); 461 462 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 463 &cpsw_adjust_link, 0, slave->data->phy_if); 464 if (IS_ERR(slave->phy)) { 465 dev_err(priv->dev, "phy %s not found on slave %d\n", 466 slave->data->phy_id, slave->slave_num); 467 slave->phy = NULL; 468 } else { 469 dev_info(priv->dev, "phy found : id is : 0x%x\n", 470 slave->phy->phy_id); 471 phy_start(slave->phy); 472 } 473 } 474 475 static void cpsw_init_host_port(struct cpsw_priv *priv) 476 { 477 /* soft reset the controller and initialize ale */ 478 soft_reset("cpsw", &priv->regs->soft_reset); 479 cpsw_ale_start(priv->ale); 480 481 /* switch to vlan unaware mode */ 482 cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); 483 484 /* setup host port priority mapping */ 485 __raw_writel(CPDMA_TX_PRIORITY_MAP, 486 &priv->host_port_regs->cpdma_tx_pri_map); 487 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 488 489 cpsw_ale_control_set(priv->ale, priv->host_port, 490 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 491 492 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0); 493 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 494 1 << priv->host_port, 0, ALE_MCAST_FWD_2); 495 } 496 497 static int cpsw_ndo_open(struct net_device *ndev) 498 { 499 struct cpsw_priv *priv = netdev_priv(ndev); 500 int i, ret; 501 u32 reg; 502 503 cpsw_intr_disable(priv); 504 netif_carrier_off(ndev); 505 506 pm_runtime_get_sync(&priv->pdev->dev); 507 508 reg = __raw_readl(&priv->regs->id_ver); 509 510 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 511 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 512 CPSW_RTL_VERSION(reg)); 513 514 /* initialize host and slave ports */ 515 cpsw_init_host_port(priv); 516 for_each_slave(priv, cpsw_slave_open, priv); 517 518 /* setup tx dma to fixed prio and zero offset */ 519 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 520 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); 521 522 /* disable priority elevation and enable statistics on all ports */ 523 __raw_writel(0, &priv->regs->ptype); 524 525 /* enable statistics collection only on the host port */ 526 __raw_writel(0x7, &priv->regs->stat_port_en); 527 528 if (WARN_ON(!priv->data.rx_descs)) 529 priv->data.rx_descs = 128; 530 531 for (i = 0; i < priv->data.rx_descs; i++) { 532 struct sk_buff *skb; 533 534 ret = -ENOMEM; 535 skb = netdev_alloc_skb_ip_align(priv->ndev, 536 priv->rx_packet_max); 537 if (!skb) 538 break; 539 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 540 skb_tailroom(skb), GFP_KERNEL); 541 if (WARN_ON(ret < 0)) 542 break; 543 } 544 /* continue even if we didn't manage to submit all receive descs */ 545 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 546 547 cpdma_ctlr_start(priv->dma); 548 cpsw_intr_enable(priv); 549 napi_enable(&priv->napi); 550 cpdma_ctlr_eoi(priv->dma); 551 552 return 0; 553 } 554 555 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 556 { 557 if (!slave->phy) 558 return; 559 phy_stop(slave->phy); 560 phy_disconnect(slave->phy); 561 slave->phy = NULL; 562 } 563 564 static int cpsw_ndo_stop(struct net_device *ndev) 565 { 566 struct cpsw_priv *priv = netdev_priv(ndev); 567 568 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 569 cpsw_intr_disable(priv); 570 cpdma_ctlr_int_ctrl(priv->dma, false); 571 cpdma_ctlr_stop(priv->dma); 572 netif_stop_queue(priv->ndev); 573 napi_disable(&priv->napi); 574 netif_carrier_off(priv->ndev); 575 cpsw_ale_stop(priv->ale); 576 for_each_slave(priv, cpsw_slave_stop, priv); 577 pm_runtime_put_sync(&priv->pdev->dev); 578 return 0; 579 } 580 581 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 582 struct net_device *ndev) 583 { 584 struct cpsw_priv *priv = netdev_priv(ndev); 585 int ret; 586 587 ndev->trans_start = jiffies; 588 589 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 590 cpsw_err(priv, tx_err, "packet pad failed\n"); 591 priv->stats.tx_dropped++; 592 return NETDEV_TX_OK; 593 } 594 595 ret = cpdma_chan_submit(priv->txch, skb, skb->data, 596 skb->len, GFP_KERNEL); 597 if (unlikely(ret != 0)) { 598 cpsw_err(priv, tx_err, "desc submit failed\n"); 599 goto fail; 600 } 601 602 return NETDEV_TX_OK; 603 fail: 604 priv->stats.tx_dropped++; 605 netif_stop_queue(ndev); 606 return NETDEV_TX_BUSY; 607 } 608 609 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) 610 { 611 /* 612 * The switch cannot operate in promiscuous mode without substantial 613 * headache. For promiscuous mode to work, we would need to put the 614 * ALE in bypass mode and route all traffic to the host port. 615 * Subsequently, the host will need to operate as a "bridge", learn, 616 * and flood as needed. For now, we simply complain here and 617 * do nothing about it :-) 618 */ 619 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC)) 620 dev_err(&ndev->dev, "promiscuity ignored!\n"); 621 622 /* 623 * The switch cannot filter multicast traffic unless it is configured 624 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a 625 * whole bunch of additional logic that this driver does not implement 626 * at present. 627 */ 628 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI)) 629 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 630 } 631 632 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 633 { 634 struct cpsw_priv *priv = netdev_priv(ndev); 635 636 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 637 priv->stats.tx_errors++; 638 cpsw_intr_disable(priv); 639 cpdma_ctlr_int_ctrl(priv->dma, false); 640 cpdma_chan_stop(priv->txch); 641 cpdma_chan_start(priv->txch); 642 cpdma_ctlr_int_ctrl(priv->dma, true); 643 cpsw_intr_enable(priv); 644 cpdma_ctlr_eoi(priv->dma); 645 } 646 647 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 648 { 649 struct cpsw_priv *priv = netdev_priv(ndev); 650 return &priv->stats; 651 } 652 653 #ifdef CONFIG_NET_POLL_CONTROLLER 654 static void cpsw_ndo_poll_controller(struct net_device *ndev) 655 { 656 struct cpsw_priv *priv = netdev_priv(ndev); 657 658 cpsw_intr_disable(priv); 659 cpdma_ctlr_int_ctrl(priv->dma, false); 660 cpsw_interrupt(ndev->irq, priv); 661 cpdma_ctlr_int_ctrl(priv->dma, true); 662 cpsw_intr_enable(priv); 663 cpdma_ctlr_eoi(priv->dma); 664 } 665 #endif 666 667 static const struct net_device_ops cpsw_netdev_ops = { 668 .ndo_open = cpsw_ndo_open, 669 .ndo_stop = cpsw_ndo_stop, 670 .ndo_start_xmit = cpsw_ndo_start_xmit, 671 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 672 .ndo_validate_addr = eth_validate_addr, 673 .ndo_change_mtu = eth_change_mtu, 674 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 675 .ndo_get_stats = cpsw_ndo_get_stats, 676 #ifdef CONFIG_NET_POLL_CONTROLLER 677 .ndo_poll_controller = cpsw_ndo_poll_controller, 678 #endif 679 }; 680 681 static void cpsw_get_drvinfo(struct net_device *ndev, 682 struct ethtool_drvinfo *info) 683 { 684 struct cpsw_priv *priv = netdev_priv(ndev); 685 strcpy(info->driver, "TI CPSW Driver v1.0"); 686 strcpy(info->version, "1.0"); 687 strcpy(info->bus_info, priv->pdev->name); 688 } 689 690 static u32 cpsw_get_msglevel(struct net_device *ndev) 691 { 692 struct cpsw_priv *priv = netdev_priv(ndev); 693 return priv->msg_enable; 694 } 695 696 static void cpsw_set_msglevel(struct net_device *ndev, u32 value) 697 { 698 struct cpsw_priv *priv = netdev_priv(ndev); 699 priv->msg_enable = value; 700 } 701 702 static const struct ethtool_ops cpsw_ethtool_ops = { 703 .get_drvinfo = cpsw_get_drvinfo, 704 .get_msglevel = cpsw_get_msglevel, 705 .set_msglevel = cpsw_set_msglevel, 706 .get_link = ethtool_op_get_link, 707 }; 708 709 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 710 { 711 void __iomem *regs = priv->regs; 712 int slave_num = slave->slave_num; 713 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 714 715 slave->data = data; 716 slave->regs = regs + data->slave_reg_ofs; 717 slave->sliver = regs + data->sliver_reg_ofs; 718 } 719 720 static int cpsw_probe_dt(struct cpsw_platform_data *data, 721 struct platform_device *pdev) 722 { 723 struct device_node *node = pdev->dev.of_node; 724 struct device_node *slave_node; 725 int i = 0, ret; 726 u32 prop; 727 728 if (!node) 729 return -EINVAL; 730 731 if (of_property_read_u32(node, "slaves", &prop)) { 732 pr_err("Missing slaves property in the DT.\n"); 733 return -EINVAL; 734 } 735 data->slaves = prop; 736 737 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * 738 data->slaves, GFP_KERNEL); 739 if (!data->slave_data) { 740 pr_err("Could not allocate slave memory.\n"); 741 return -EINVAL; 742 } 743 744 data->no_bd_ram = of_property_read_bool(node, "no_bd_ram"); 745 746 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 747 pr_err("Missing cpdma_channels property in the DT.\n"); 748 ret = -EINVAL; 749 goto error_ret; 750 } 751 data->channels = prop; 752 753 if (of_property_read_u32(node, "host_port_no", &prop)) { 754 pr_err("Missing host_port_no property in the DT.\n"); 755 ret = -EINVAL; 756 goto error_ret; 757 } 758 data->host_port_num = prop; 759 760 if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) { 761 pr_err("Missing cpdma_reg_ofs property in the DT.\n"); 762 ret = -EINVAL; 763 goto error_ret; 764 } 765 data->cpdma_reg_ofs = prop; 766 767 if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) { 768 pr_err("Missing cpdma_sram_ofs property in the DT.\n"); 769 ret = -EINVAL; 770 goto error_ret; 771 } 772 data->cpdma_sram_ofs = prop; 773 774 if (of_property_read_u32(node, "ale_reg_ofs", &prop)) { 775 pr_err("Missing ale_reg_ofs property in the DT.\n"); 776 ret = -EINVAL; 777 goto error_ret; 778 } 779 data->ale_reg_ofs = prop; 780 781 if (of_property_read_u32(node, "ale_entries", &prop)) { 782 pr_err("Missing ale_entries property in the DT.\n"); 783 ret = -EINVAL; 784 goto error_ret; 785 } 786 data->ale_entries = prop; 787 788 if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) { 789 pr_err("Missing host_port_reg_ofs property in the DT.\n"); 790 ret = -EINVAL; 791 goto error_ret; 792 } 793 data->host_port_reg_ofs = prop; 794 795 if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) { 796 pr_err("Missing hw_stats_reg_ofs property in the DT.\n"); 797 ret = -EINVAL; 798 goto error_ret; 799 } 800 data->hw_stats_reg_ofs = prop; 801 802 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) { 803 pr_err("Missing bd_ram_ofs property in the DT.\n"); 804 ret = -EINVAL; 805 goto error_ret; 806 } 807 data->bd_ram_ofs = prop; 808 809 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 810 pr_err("Missing bd_ram_size property in the DT.\n"); 811 ret = -EINVAL; 812 goto error_ret; 813 } 814 data->bd_ram_size = prop; 815 816 if (of_property_read_u32(node, "rx_descs", &prop)) { 817 pr_err("Missing rx_descs property in the DT.\n"); 818 ret = -EINVAL; 819 goto error_ret; 820 } 821 data->rx_descs = prop; 822 823 if (of_property_read_u32(node, "mac_control", &prop)) { 824 pr_err("Missing mac_control property in the DT.\n"); 825 ret = -EINVAL; 826 goto error_ret; 827 } 828 data->mac_control = prop; 829 830 for_each_child_of_node(node, slave_node) { 831 struct cpsw_slave_data *slave_data = data->slave_data + i; 832 const char *phy_id = NULL; 833 const void *mac_addr = NULL; 834 835 if (of_property_read_string(slave_node, "phy_id", &phy_id)) { 836 pr_err("Missing slave[%d] phy_id property\n", i); 837 ret = -EINVAL; 838 goto error_ret; 839 } 840 slave_data->phy_id = phy_id; 841 842 if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) { 843 pr_err("Missing slave[%d] slave_reg_ofs property\n", i); 844 ret = -EINVAL; 845 goto error_ret; 846 } 847 slave_data->slave_reg_ofs = prop; 848 849 if (of_property_read_u32(slave_node, "sliver_reg_ofs", 850 &prop)) { 851 pr_err("Missing slave[%d] sliver_reg_ofs property\n", 852 i); 853 ret = -EINVAL; 854 goto error_ret; 855 } 856 slave_data->sliver_reg_ofs = prop; 857 858 mac_addr = of_get_mac_address(slave_node); 859 if (mac_addr) 860 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 861 862 i++; 863 } 864 865 return 0; 866 867 error_ret: 868 kfree(data->slave_data); 869 return ret; 870 } 871 872 static int __devinit cpsw_probe(struct platform_device *pdev) 873 { 874 struct cpsw_platform_data *data = pdev->dev.platform_data; 875 struct net_device *ndev; 876 struct cpsw_priv *priv; 877 struct cpdma_params dma_params; 878 struct cpsw_ale_params ale_params; 879 void __iomem *regs; 880 struct resource *res; 881 int ret = 0, i, k = 0; 882 883 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 884 if (!ndev) { 885 pr_err("error allocating net_device\n"); 886 return -ENOMEM; 887 } 888 889 platform_set_drvdata(pdev, ndev); 890 priv = netdev_priv(ndev); 891 spin_lock_init(&priv->lock); 892 priv->pdev = pdev; 893 priv->ndev = ndev; 894 priv->dev = &ndev->dev; 895 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 896 priv->rx_packet_max = max(rx_packet_max, 128); 897 898 if (cpsw_probe_dt(&priv->data, pdev)) { 899 pr_err("cpsw: platform data missing\n"); 900 ret = -ENODEV; 901 goto clean_ndev_ret; 902 } 903 data = &priv->data; 904 905 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 906 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 907 pr_info("Detected MACID = %pM", priv->mac_addr); 908 } else { 909 eth_random_addr(priv->mac_addr); 910 pr_info("Random MACID = %pM", priv->mac_addr); 911 } 912 913 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 914 915 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, 916 GFP_KERNEL); 917 if (!priv->slaves) { 918 ret = -EBUSY; 919 goto clean_ndev_ret; 920 } 921 for (i = 0; i < data->slaves; i++) 922 priv->slaves[i].slave_num = i; 923 924 pm_runtime_enable(&pdev->dev); 925 priv->clk = clk_get(&pdev->dev, "fck"); 926 if (IS_ERR(priv->clk)) { 927 dev_err(&pdev->dev, "fck is not found\n"); 928 ret = -ENODEV; 929 goto clean_slave_ret; 930 } 931 932 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 933 if (!priv->cpsw_res) { 934 dev_err(priv->dev, "error getting i/o resource\n"); 935 ret = -ENOENT; 936 goto clean_clk_ret; 937 } 938 939 if (!request_mem_region(priv->cpsw_res->start, 940 resource_size(priv->cpsw_res), ndev->name)) { 941 dev_err(priv->dev, "failed request i/o region\n"); 942 ret = -ENXIO; 943 goto clean_clk_ret; 944 } 945 946 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 947 if (!regs) { 948 dev_err(priv->dev, "unable to map i/o region\n"); 949 goto clean_cpsw_iores_ret; 950 } 951 priv->regs = regs; 952 priv->host_port = data->host_port_num; 953 priv->host_port_regs = regs + data->host_port_reg_ofs; 954 955 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 956 if (!priv->cpsw_ss_res) { 957 dev_err(priv->dev, "error getting i/o resource\n"); 958 ret = -ENOENT; 959 goto clean_clk_ret; 960 } 961 962 if (!request_mem_region(priv->cpsw_ss_res->start, 963 resource_size(priv->cpsw_ss_res), ndev->name)) { 964 dev_err(priv->dev, "failed request i/o region\n"); 965 ret = -ENXIO; 966 goto clean_clk_ret; 967 } 968 969 regs = ioremap(priv->cpsw_ss_res->start, 970 resource_size(priv->cpsw_ss_res)); 971 if (!regs) { 972 dev_err(priv->dev, "unable to map i/o region\n"); 973 goto clean_cpsw_ss_iores_ret; 974 } 975 priv->ss_regs = regs; 976 977 for_each_slave(priv, cpsw_slave_init, priv); 978 979 memset(&dma_params, 0, sizeof(dma_params)); 980 dma_params.dev = &pdev->dev; 981 dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs, 982 data->cpdma_reg_ofs); 983 dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs, 984 data->cpdma_reg_ofs); 985 dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs, 986 data->cpdma_reg_ofs); 987 dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs, 988 data->cpdma_sram_ofs); 989 dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs, 990 data->cpdma_sram_ofs); 991 dma_params.txcp = cpsw_dma_txcp((u32)priv->regs, 992 data->cpdma_sram_ofs); 993 dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs, 994 data->cpdma_sram_ofs); 995 996 dma_params.num_chan = data->channels; 997 dma_params.has_soft_reset = true; 998 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; 999 dma_params.desc_mem_size = data->bd_ram_size; 1000 dma_params.desc_align = 16; 1001 dma_params.has_ext_regs = true; 1002 dma_params.desc_mem_phys = data->no_bd_ram ? 0 : 1003 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs; 1004 dma_params.desc_hw_addr = data->hw_ram_addr ? 1005 data->hw_ram_addr : dma_params.desc_mem_phys ; 1006 1007 priv->dma = cpdma_ctlr_create(&dma_params); 1008 if (!priv->dma) { 1009 dev_err(priv->dev, "error initializing dma\n"); 1010 ret = -ENOMEM; 1011 goto clean_iomap_ret; 1012 } 1013 1014 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 1015 cpsw_tx_handler); 1016 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), 1017 cpsw_rx_handler); 1018 1019 if (WARN_ON(!priv->txch || !priv->rxch)) { 1020 dev_err(priv->dev, "error initializing dma channels\n"); 1021 ret = -ENOMEM; 1022 goto clean_dma_ret; 1023 } 1024 1025 memset(&ale_params, 0, sizeof(ale_params)); 1026 ale_params.dev = &ndev->dev; 1027 ale_params.ale_regs = (void *)((u32)priv->regs) + 1028 ((u32)data->ale_reg_ofs); 1029 ale_params.ale_ageout = ale_ageout; 1030 ale_params.ale_entries = data->ale_entries; 1031 ale_params.ale_ports = data->slaves; 1032 1033 priv->ale = cpsw_ale_create(&ale_params); 1034 if (!priv->ale) { 1035 dev_err(priv->dev, "error initializing ale engine\n"); 1036 ret = -ENODEV; 1037 goto clean_dma_ret; 1038 } 1039 1040 ndev->irq = platform_get_irq(pdev, 0); 1041 if (ndev->irq < 0) { 1042 dev_err(priv->dev, "error getting irq resource\n"); 1043 ret = -ENOENT; 1044 goto clean_ale_ret; 1045 } 1046 1047 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1048 for (i = res->start; i <= res->end; i++) { 1049 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 1050 dev_name(&pdev->dev), priv)) { 1051 dev_err(priv->dev, "error attaching irq\n"); 1052 goto clean_ale_ret; 1053 } 1054 priv->irqs_table[k] = i; 1055 priv->num_irqs = k; 1056 } 1057 k++; 1058 } 1059 1060 ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ 1061 1062 ndev->netdev_ops = &cpsw_netdev_ops; 1063 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1064 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 1065 1066 /* register the network device */ 1067 SET_NETDEV_DEV(ndev, &pdev->dev); 1068 ret = register_netdev(ndev); 1069 if (ret) { 1070 dev_err(priv->dev, "error registering net device\n"); 1071 ret = -ENODEV; 1072 goto clean_irq_ret; 1073 } 1074 1075 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 1076 priv->cpsw_res->start, ndev->irq); 1077 1078 return 0; 1079 1080 clean_irq_ret: 1081 free_irq(ndev->irq, priv); 1082 clean_ale_ret: 1083 cpsw_ale_destroy(priv->ale); 1084 clean_dma_ret: 1085 cpdma_chan_destroy(priv->txch); 1086 cpdma_chan_destroy(priv->rxch); 1087 cpdma_ctlr_destroy(priv->dma); 1088 clean_iomap_ret: 1089 iounmap(priv->regs); 1090 clean_cpsw_ss_iores_ret: 1091 release_mem_region(priv->cpsw_ss_res->start, 1092 resource_size(priv->cpsw_ss_res)); 1093 clean_cpsw_iores_ret: 1094 release_mem_region(priv->cpsw_res->start, 1095 resource_size(priv->cpsw_res)); 1096 clean_clk_ret: 1097 clk_put(priv->clk); 1098 clean_slave_ret: 1099 pm_runtime_disable(&pdev->dev); 1100 kfree(priv->slaves); 1101 clean_ndev_ret: 1102 free_netdev(ndev); 1103 return ret; 1104 } 1105 1106 static int __devexit cpsw_remove(struct platform_device *pdev) 1107 { 1108 struct net_device *ndev = platform_get_drvdata(pdev); 1109 struct cpsw_priv *priv = netdev_priv(ndev); 1110 1111 pr_info("removing device"); 1112 platform_set_drvdata(pdev, NULL); 1113 1114 free_irq(ndev->irq, priv); 1115 cpsw_ale_destroy(priv->ale); 1116 cpdma_chan_destroy(priv->txch); 1117 cpdma_chan_destroy(priv->rxch); 1118 cpdma_ctlr_destroy(priv->dma); 1119 iounmap(priv->regs); 1120 release_mem_region(priv->cpsw_res->start, 1121 resource_size(priv->cpsw_res)); 1122 release_mem_region(priv->cpsw_ss_res->start, 1123 resource_size(priv->cpsw_ss_res)); 1124 pm_runtime_disable(&pdev->dev); 1125 clk_put(priv->clk); 1126 kfree(priv->slaves); 1127 free_netdev(ndev); 1128 1129 return 0; 1130 } 1131 1132 static int cpsw_suspend(struct device *dev) 1133 { 1134 struct platform_device *pdev = to_platform_device(dev); 1135 struct net_device *ndev = platform_get_drvdata(pdev); 1136 1137 if (netif_running(ndev)) 1138 cpsw_ndo_stop(ndev); 1139 pm_runtime_put_sync(&pdev->dev); 1140 1141 return 0; 1142 } 1143 1144 static int cpsw_resume(struct device *dev) 1145 { 1146 struct platform_device *pdev = to_platform_device(dev); 1147 struct net_device *ndev = platform_get_drvdata(pdev); 1148 1149 pm_runtime_get_sync(&pdev->dev); 1150 if (netif_running(ndev)) 1151 cpsw_ndo_open(ndev); 1152 return 0; 1153 } 1154 1155 static const struct dev_pm_ops cpsw_pm_ops = { 1156 .suspend = cpsw_suspend, 1157 .resume = cpsw_resume, 1158 }; 1159 1160 static const struct of_device_id cpsw_of_mtable[] = { 1161 { .compatible = "ti,cpsw", }, 1162 { /* sentinel */ }, 1163 }; 1164 1165 static struct platform_driver cpsw_driver = { 1166 .driver = { 1167 .name = "cpsw", 1168 .owner = THIS_MODULE, 1169 .pm = &cpsw_pm_ops, 1170 .of_match_table = of_match_ptr(cpsw_of_mtable), 1171 }, 1172 .probe = cpsw_probe, 1173 .remove = __devexit_p(cpsw_remove), 1174 }; 1175 1176 static int __init cpsw_init(void) 1177 { 1178 return platform_driver_register(&cpsw_driver); 1179 } 1180 late_initcall(cpsw_init); 1181 1182 static void __exit cpsw_exit(void) 1183 { 1184 platform_driver_unregister(&cpsw_driver); 1185 } 1186 module_exit(cpsw_exit); 1187 1188 MODULE_LICENSE("GPL"); 1189 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); 1190 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); 1191 MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 1192