1 /* 2 * Texas Instruments Ethernet Switch Driver 3 * 4 * Copyright (C) 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/io.h> 18 #include <linux/clk.h> 19 #include <linux/timer.h> 20 #include <linux/module.h> 21 #include <linux/platform_device.h> 22 #include <linux/irqreturn.h> 23 #include <linux/interrupt.h> 24 #include <linux/if_ether.h> 25 #include <linux/etherdevice.h> 26 #include <linux/netdevice.h> 27 #include <linux/net_tstamp.h> 28 #include <linux/phy.h> 29 #include <linux/workqueue.h> 30 #include <linux/delay.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/of.h> 33 #include <linux/of_net.h> 34 #include <linux/of_device.h> 35 #include <linux/if_vlan.h> 36 37 #include <linux/platform_data/cpsw.h> 38 39 #include "cpsw_ale.h" 40 #include "cpts.h" 41 #include "davinci_cpdma.h" 42 43 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 44 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 45 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 46 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 47 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 48 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 49 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 50 NETIF_MSG_RX_STATUS) 51 52 #define cpsw_info(priv, type, format, ...) \ 53 do { \ 54 if (netif_msg_##type(priv) && net_ratelimit()) \ 55 dev_info(priv->dev, format, ## __VA_ARGS__); \ 56 } while (0) 57 58 #define cpsw_err(priv, type, format, ...) \ 59 do { \ 60 if (netif_msg_##type(priv) && net_ratelimit()) \ 61 dev_err(priv->dev, format, ## __VA_ARGS__); \ 62 } while (0) 63 64 #define cpsw_dbg(priv, type, format, ...) \ 65 do { \ 66 if (netif_msg_##type(priv) && net_ratelimit()) \ 67 dev_dbg(priv->dev, format, ## __VA_ARGS__); \ 68 } while (0) 69 70 #define cpsw_notice(priv, type, format, ...) \ 71 do { \ 72 if (netif_msg_##type(priv) && net_ratelimit()) \ 73 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 74 } while (0) 75 76 #define ALE_ALL_PORTS 0x7 77 78 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 79 #define CPSW_MINOR_VERSION(reg) (reg & 0xff) 80 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 81 82 #define CPSW_VERSION_1 0x19010a 83 #define CPSW_VERSION_2 0x19010c 84 85 #define HOST_PORT_NUM 0 86 #define SLIVER_SIZE 0x40 87 88 #define CPSW1_HOST_PORT_OFFSET 0x028 89 #define CPSW1_SLAVE_OFFSET 0x050 90 #define CPSW1_SLAVE_SIZE 0x040 91 #define CPSW1_CPDMA_OFFSET 0x100 92 #define CPSW1_STATERAM_OFFSET 0x200 93 #define CPSW1_CPTS_OFFSET 0x500 94 #define CPSW1_ALE_OFFSET 0x600 95 #define CPSW1_SLIVER_OFFSET 0x700 96 97 #define CPSW2_HOST_PORT_OFFSET 0x108 98 #define CPSW2_SLAVE_OFFSET 0x200 99 #define CPSW2_SLAVE_SIZE 0x100 100 #define CPSW2_CPDMA_OFFSET 0x800 101 #define CPSW2_STATERAM_OFFSET 0xa00 102 #define CPSW2_CPTS_OFFSET 0xc00 103 #define CPSW2_ALE_OFFSET 0xd00 104 #define CPSW2_SLIVER_OFFSET 0xd80 105 #define CPSW2_BD_OFFSET 0x2000 106 107 #define CPDMA_RXTHRESH 0x0c0 108 #define CPDMA_RXFREE 0x0e0 109 #define CPDMA_TXHDP 0x00 110 #define CPDMA_RXHDP 0x20 111 #define CPDMA_TXCP 0x40 112 #define CPDMA_RXCP 0x60 113 114 #define CPSW_POLL_WEIGHT 64 115 #define CPSW_MIN_PACKET_SIZE 60 116 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 117 118 #define RX_PRIORITY_MAPPING 0x76543210 119 #define TX_PRIORITY_MAPPING 0x33221100 120 #define CPDMA_TX_PRIORITY_MAP 0x76543210 121 122 #define CPSW_VLAN_AWARE BIT(1) 123 #define CPSW_ALE_VLAN_AWARE 1 124 125 #define CPSW_FIFO_NORMAL_MODE (0 << 15) 126 #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) 127 #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) 128 129 #define CPSW_INTPACEEN (0x3f << 16) 130 #define CPSW_INTPRESCALE_MASK (0x7FF << 0) 131 #define CPSW_CMINTMAX_CNT 63 132 #define CPSW_CMINTMIN_CNT 2 133 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 134 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 135 136 #define cpsw_enable_irq(priv) \ 137 do { \ 138 u32 i; \ 139 for (i = 0; i < priv->num_irqs; i++) \ 140 enable_irq(priv->irqs_table[i]); \ 141 } while (0); 142 #define cpsw_disable_irq(priv) \ 143 do { \ 144 u32 i; \ 145 for (i = 0; i < priv->num_irqs; i++) \ 146 disable_irq_nosync(priv->irqs_table[i]); \ 147 } while (0); 148 149 #define cpsw_slave_index(priv) \ 150 ((priv->data.dual_emac) ? priv->emac_port : \ 151 priv->data.active_slave) 152 153 static int debug_level; 154 module_param(debug_level, int, 0); 155 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 156 157 static int ale_ageout = 10; 158 module_param(ale_ageout, int, 0); 159 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 160 161 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 162 module_param(rx_packet_max, int, 0); 163 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 164 165 struct cpsw_wr_regs { 166 u32 id_ver; 167 u32 soft_reset; 168 u32 control; 169 u32 int_control; 170 u32 rx_thresh_en; 171 u32 rx_en; 172 u32 tx_en; 173 u32 misc_en; 174 u32 mem_allign1[8]; 175 u32 rx_thresh_stat; 176 u32 rx_stat; 177 u32 tx_stat; 178 u32 misc_stat; 179 u32 mem_allign2[8]; 180 u32 rx_imax; 181 u32 tx_imax; 182 183 }; 184 185 struct cpsw_ss_regs { 186 u32 id_ver; 187 u32 control; 188 u32 soft_reset; 189 u32 stat_port_en; 190 u32 ptype; 191 u32 soft_idle; 192 u32 thru_rate; 193 u32 gap_thresh; 194 u32 tx_start_wds; 195 u32 flow_control; 196 u32 vlan_ltype; 197 u32 ts_ltype; 198 u32 dlr_ltype; 199 }; 200 201 /* CPSW_PORT_V1 */ 202 #define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */ 203 #define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */ 204 #define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */ 205 #define CPSW1_PORT_VLAN 0x0c /* VLAN Register */ 206 #define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */ 207 #define CPSW1_TS_CTL 0x14 /* Time Sync Control */ 208 #define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */ 209 #define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */ 210 211 /* CPSW_PORT_V2 */ 212 #define CPSW2_CONTROL 0x00 /* Control Register */ 213 #define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */ 214 #define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */ 215 #define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */ 216 #define CPSW2_PORT_VLAN 0x14 /* VLAN Register */ 217 #define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */ 218 #define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */ 219 220 /* CPSW_PORT_V1 and V2 */ 221 #define SA_LO 0x20 /* CPGMAC_SL Source Address Low */ 222 #define SA_HI 0x24 /* CPGMAC_SL Source Address High */ 223 #define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */ 224 225 /* CPSW_PORT_V2 only */ 226 #define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */ 227 #define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */ 228 #define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */ 229 #define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */ 230 #define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */ 231 #define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */ 232 #define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */ 233 #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ 234 235 /* Bit definitions for the CPSW2_CONTROL register */ 236 #define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ 237 #define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ 238 #define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ 239 #define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ 240 #define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ 241 #define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ 242 #define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ 243 #define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ 244 #define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ 245 #define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ 246 #define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ 247 #define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ 248 #define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ 249 #define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ 250 #define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ 251 #define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ 252 253 #define CTRL_TS_BITS \ 254 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ 255 TS_ANNEX_D_EN | TS_LTYPE1_EN) 256 257 #define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) 258 #define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) 259 #define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) 260 261 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ 262 #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ 263 #define TS_SEQ_ID_OFFSET_MASK (0x3f) 264 #define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */ 265 #define TS_MSG_TYPE_EN_MASK (0xffff) 266 267 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 268 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3)) 269 270 /* Bit definitions for the CPSW1_TS_CTL register */ 271 #define CPSW_V1_TS_RX_EN BIT(0) 272 #define CPSW_V1_TS_TX_EN BIT(4) 273 #define CPSW_V1_MSG_TYPE_OFS 16 274 275 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ 276 #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 277 278 struct cpsw_host_regs { 279 u32 max_blks; 280 u32 blk_cnt; 281 u32 tx_in_ctl; 282 u32 port_vlan; 283 u32 tx_pri_map; 284 u32 cpdma_tx_pri_map; 285 u32 cpdma_rx_chan_map; 286 }; 287 288 struct cpsw_sliver_regs { 289 u32 id_ver; 290 u32 mac_control; 291 u32 mac_status; 292 u32 soft_reset; 293 u32 rx_maxlen; 294 u32 __reserved_0; 295 u32 rx_pause; 296 u32 tx_pause; 297 u32 __reserved_1; 298 u32 rx_pri_map; 299 }; 300 301 struct cpsw_slave { 302 void __iomem *regs; 303 struct cpsw_sliver_regs __iomem *sliver; 304 int slave_num; 305 u32 mac_control; 306 struct cpsw_slave_data *data; 307 struct phy_device *phy; 308 struct net_device *ndev; 309 u32 port_vlan; 310 u32 open_stat; 311 }; 312 313 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) 314 { 315 return __raw_readl(slave->regs + offset); 316 } 317 318 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) 319 { 320 __raw_writel(val, slave->regs + offset); 321 } 322 323 struct cpsw_priv { 324 spinlock_t lock; 325 struct platform_device *pdev; 326 struct net_device *ndev; 327 struct resource *cpsw_res; 328 struct resource *cpsw_wr_res; 329 struct napi_struct napi; 330 struct device *dev; 331 struct cpsw_platform_data data; 332 struct cpsw_ss_regs __iomem *regs; 333 struct cpsw_wr_regs __iomem *wr_regs; 334 struct cpsw_host_regs __iomem *host_port_regs; 335 u32 msg_enable; 336 u32 version; 337 u32 coal_intvl; 338 u32 bus_freq_mhz; 339 struct net_device_stats stats; 340 int rx_packet_max; 341 int host_port; 342 struct clk *clk; 343 u8 mac_addr[ETH_ALEN]; 344 struct cpsw_slave *slaves; 345 struct cpdma_ctlr *dma; 346 struct cpdma_chan *txch, *rxch; 347 struct cpsw_ale *ale; 348 /* snapshot of IRQ numbers */ 349 u32 irqs_table[4]; 350 u32 num_irqs; 351 bool irq_enabled; 352 struct cpts *cpts; 353 u32 emac_port; 354 }; 355 356 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 357 #define for_each_slave(priv, func, arg...) \ 358 do { \ 359 struct cpsw_slave *slave; \ 360 int n; \ 361 if (priv->data.dual_emac) \ 362 (func)((priv)->slaves + priv->emac_port, ##arg);\ 363 else \ 364 for (n = (priv)->data.slaves, \ 365 slave = (priv)->slaves; \ 366 n; n--) \ 367 (func)(slave++, ##arg); \ 368 } while (0) 369 #define cpsw_get_slave_ndev(priv, __slave_no__) \ 370 (priv->slaves[__slave_no__].ndev) 371 #define cpsw_get_slave_priv(priv, __slave_no__) \ 372 ((priv->slaves[__slave_no__].ndev) ? \ 373 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ 374 375 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ 376 do { \ 377 if (!priv->data.dual_emac) \ 378 break; \ 379 if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ 380 ndev = cpsw_get_slave_ndev(priv, 0); \ 381 priv = netdev_priv(ndev); \ 382 skb->dev = ndev; \ 383 } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ 384 ndev = cpsw_get_slave_ndev(priv, 1); \ 385 priv = netdev_priv(ndev); \ 386 skb->dev = ndev; \ 387 } \ 388 } while (0) 389 #define cpsw_add_mcast(priv, addr) \ 390 do { \ 391 if (priv->data.dual_emac) { \ 392 struct cpsw_slave *slave = priv->slaves + \ 393 priv->emac_port; \ 394 int slave_port = cpsw_get_slave_port(priv, \ 395 slave->slave_num); \ 396 cpsw_ale_add_mcast(priv->ale, addr, \ 397 1 << slave_port | 1 << priv->host_port, \ 398 ALE_VLAN, slave->port_vlan, 0); \ 399 } else { \ 400 cpsw_ale_add_mcast(priv->ale, addr, \ 401 ALE_ALL_PORTS << priv->host_port, \ 402 0, 0, 0); \ 403 } \ 404 } while (0) 405 406 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 407 { 408 if (priv->host_port == 0) 409 return slave_num + 1; 410 else 411 return slave_num; 412 } 413 414 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 415 { 416 struct cpsw_priv *priv = netdev_priv(ndev); 417 418 if (ndev->flags & IFF_PROMISC) { 419 /* Enable promiscuous mode */ 420 dev_err(priv->dev, "Ignoring Promiscuous mode\n"); 421 return; 422 } 423 424 /* Clear all mcast from ALE */ 425 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 426 427 if (!netdev_mc_empty(ndev)) { 428 struct netdev_hw_addr *ha; 429 430 /* program multicast address list into ALE register */ 431 netdev_for_each_mc_addr(ha, ndev) { 432 cpsw_add_mcast(priv, (u8 *)ha->addr); 433 } 434 } 435 } 436 437 static void cpsw_intr_enable(struct cpsw_priv *priv) 438 { 439 __raw_writel(0xFF, &priv->wr_regs->tx_en); 440 __raw_writel(0xFF, &priv->wr_regs->rx_en); 441 442 cpdma_ctlr_int_ctrl(priv->dma, true); 443 return; 444 } 445 446 static void cpsw_intr_disable(struct cpsw_priv *priv) 447 { 448 __raw_writel(0, &priv->wr_regs->tx_en); 449 __raw_writel(0, &priv->wr_regs->rx_en); 450 451 cpdma_ctlr_int_ctrl(priv->dma, false); 452 return; 453 } 454 455 void cpsw_tx_handler(void *token, int len, int status) 456 { 457 struct sk_buff *skb = token; 458 struct net_device *ndev = skb->dev; 459 struct cpsw_priv *priv = netdev_priv(ndev); 460 461 /* Check whether the queue is stopped due to stalled tx dma, if the 462 * queue is stopped then start the queue as we have free desc for tx 463 */ 464 if (unlikely(netif_queue_stopped(ndev))) 465 netif_wake_queue(ndev); 466 cpts_tx_timestamp(priv->cpts, skb); 467 priv->stats.tx_packets++; 468 priv->stats.tx_bytes += len; 469 dev_kfree_skb_any(skb); 470 } 471 472 void cpsw_rx_handler(void *token, int len, int status) 473 { 474 struct sk_buff *skb = token; 475 struct sk_buff *new_skb; 476 struct net_device *ndev = skb->dev; 477 struct cpsw_priv *priv = netdev_priv(ndev); 478 int ret = 0; 479 480 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 481 482 if (unlikely(status < 0)) { 483 /* the interface is going down, skbs are purged */ 484 dev_kfree_skb_any(skb); 485 return; 486 } 487 488 new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 489 if (new_skb) { 490 skb_put(skb, len); 491 cpts_rx_timestamp(priv->cpts, skb); 492 skb->protocol = eth_type_trans(skb, ndev); 493 netif_receive_skb(skb); 494 priv->stats.rx_bytes += len; 495 priv->stats.rx_packets++; 496 } else { 497 priv->stats.rx_dropped++; 498 new_skb = skb; 499 } 500 501 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, 502 skb_tailroom(new_skb), 0); 503 if (WARN_ON(ret < 0)) 504 dev_kfree_skb_any(new_skb); 505 } 506 507 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 508 { 509 struct cpsw_priv *priv = dev_id; 510 u32 rx, tx, rx_thresh; 511 512 rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); 513 rx = __raw_readl(&priv->wr_regs->rx_stat); 514 tx = __raw_readl(&priv->wr_regs->tx_stat); 515 if (!rx_thresh && !rx && !tx) 516 return IRQ_NONE; 517 518 cpsw_intr_disable(priv); 519 if (priv->irq_enabled == true) { 520 cpsw_disable_irq(priv); 521 priv->irq_enabled = false; 522 } 523 524 if (netif_running(priv->ndev)) { 525 napi_schedule(&priv->napi); 526 return IRQ_HANDLED; 527 } 528 529 priv = cpsw_get_slave_priv(priv, 1); 530 if (!priv) 531 return IRQ_NONE; 532 533 if (netif_running(priv->ndev)) { 534 napi_schedule(&priv->napi); 535 return IRQ_HANDLED; 536 } 537 return IRQ_NONE; 538 } 539 540 static int cpsw_poll(struct napi_struct *napi, int budget) 541 { 542 struct cpsw_priv *priv = napi_to_priv(napi); 543 int num_tx, num_rx; 544 545 num_tx = cpdma_chan_process(priv->txch, 128); 546 if (num_tx) 547 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 548 549 num_rx = cpdma_chan_process(priv->rxch, budget); 550 if (num_rx < budget) { 551 struct cpsw_priv *prim_cpsw; 552 553 napi_complete(napi); 554 cpsw_intr_enable(priv); 555 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 556 prim_cpsw = cpsw_get_slave_priv(priv, 0); 557 if (prim_cpsw->irq_enabled == false) { 558 prim_cpsw->irq_enabled = true; 559 cpsw_enable_irq(priv); 560 } 561 } 562 563 if (num_rx || num_tx) 564 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 565 num_rx, num_tx); 566 567 return num_rx; 568 } 569 570 static inline void soft_reset(const char *module, void __iomem *reg) 571 { 572 unsigned long timeout = jiffies + HZ; 573 574 __raw_writel(1, reg); 575 do { 576 cpu_relax(); 577 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); 578 579 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); 580 } 581 582 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 583 ((mac)[2] << 16) | ((mac)[3] << 24)) 584 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 585 586 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 587 struct cpsw_priv *priv) 588 { 589 slave_write(slave, mac_hi(priv->mac_addr), SA_HI); 590 slave_write(slave, mac_lo(priv->mac_addr), SA_LO); 591 } 592 593 static void _cpsw_adjust_link(struct cpsw_slave *slave, 594 struct cpsw_priv *priv, bool *link) 595 { 596 struct phy_device *phy = slave->phy; 597 u32 mac_control = 0; 598 u32 slave_port; 599 600 if (!phy) 601 return; 602 603 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 604 605 if (phy->link) { 606 mac_control = priv->data.mac_control; 607 608 /* enable forwarding */ 609 cpsw_ale_control_set(priv->ale, slave_port, 610 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 611 612 if (phy->speed == 1000) 613 mac_control |= BIT(7); /* GIGABITEN */ 614 if (phy->duplex) 615 mac_control |= BIT(0); /* FULLDUPLEXEN */ 616 617 /* set speed_in input in case RMII mode is used in 100Mbps */ 618 if (phy->speed == 100) 619 mac_control |= BIT(15); 620 621 *link = true; 622 } else { 623 mac_control = 0; 624 /* disable forwarding */ 625 cpsw_ale_control_set(priv->ale, slave_port, 626 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 627 } 628 629 if (mac_control != slave->mac_control) { 630 phy_print_status(phy); 631 __raw_writel(mac_control, &slave->sliver->mac_control); 632 } 633 634 slave->mac_control = mac_control; 635 } 636 637 static void cpsw_adjust_link(struct net_device *ndev) 638 { 639 struct cpsw_priv *priv = netdev_priv(ndev); 640 bool link = false; 641 642 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 643 644 if (link) { 645 netif_carrier_on(ndev); 646 if (netif_running(ndev)) 647 netif_wake_queue(ndev); 648 } else { 649 netif_carrier_off(ndev); 650 netif_stop_queue(ndev); 651 } 652 } 653 654 static int cpsw_get_coalesce(struct net_device *ndev, 655 struct ethtool_coalesce *coal) 656 { 657 struct cpsw_priv *priv = netdev_priv(ndev); 658 659 coal->rx_coalesce_usecs = priv->coal_intvl; 660 return 0; 661 } 662 663 static int cpsw_set_coalesce(struct net_device *ndev, 664 struct ethtool_coalesce *coal) 665 { 666 struct cpsw_priv *priv = netdev_priv(ndev); 667 u32 int_ctrl; 668 u32 num_interrupts = 0; 669 u32 prescale = 0; 670 u32 addnl_dvdr = 1; 671 u32 coal_intvl = 0; 672 673 if (!coal->rx_coalesce_usecs) 674 return -EINVAL; 675 676 coal_intvl = coal->rx_coalesce_usecs; 677 678 int_ctrl = readl(&priv->wr_regs->int_control); 679 prescale = priv->bus_freq_mhz * 4; 680 681 if (coal_intvl < CPSW_CMINTMIN_INTVL) 682 coal_intvl = CPSW_CMINTMIN_INTVL; 683 684 if (coal_intvl > CPSW_CMINTMAX_INTVL) { 685 /* Interrupt pacer works with 4us Pulse, we can 686 * throttle further by dilating the 4us pulse. 687 */ 688 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; 689 690 if (addnl_dvdr > 1) { 691 prescale *= addnl_dvdr; 692 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) 693 coal_intvl = (CPSW_CMINTMAX_INTVL 694 * addnl_dvdr); 695 } else { 696 addnl_dvdr = 1; 697 coal_intvl = CPSW_CMINTMAX_INTVL; 698 } 699 } 700 701 num_interrupts = (1000 * addnl_dvdr) / coal_intvl; 702 writel(num_interrupts, &priv->wr_regs->rx_imax); 703 writel(num_interrupts, &priv->wr_regs->tx_imax); 704 705 int_ctrl |= CPSW_INTPACEEN; 706 int_ctrl &= (~CPSW_INTPRESCALE_MASK); 707 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); 708 writel(int_ctrl, &priv->wr_regs->int_control); 709 710 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); 711 if (priv->data.dual_emac) { 712 int i; 713 714 for (i = 0; i < priv->data.slaves; i++) { 715 priv = netdev_priv(priv->slaves[i].ndev); 716 priv->coal_intvl = coal_intvl; 717 } 718 } else { 719 priv->coal_intvl = coal_intvl; 720 } 721 722 return 0; 723 } 724 725 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 726 { 727 static char *leader = "........................................"; 728 729 if (!val) 730 return 0; 731 else 732 return snprintf(buf, maxlen, "%s %s %10d\n", name, 733 leader + strlen(name), val); 734 } 735 736 static int cpsw_common_res_usage_state(struct cpsw_priv *priv) 737 { 738 u32 i; 739 u32 usage_count = 0; 740 741 if (!priv->data.dual_emac) 742 return 0; 743 744 for (i = 0; i < priv->data.slaves; i++) 745 if (priv->slaves[i].open_stat) 746 usage_count++; 747 748 return usage_count; 749 } 750 751 static inline int cpsw_tx_packet_submit(struct net_device *ndev, 752 struct cpsw_priv *priv, struct sk_buff *skb) 753 { 754 if (!priv->data.dual_emac) 755 return cpdma_chan_submit(priv->txch, skb, skb->data, 756 skb->len, 0); 757 758 if (ndev == cpsw_get_slave_ndev(priv, 0)) 759 return cpdma_chan_submit(priv->txch, skb, skb->data, 760 skb->len, 1); 761 else 762 return cpdma_chan_submit(priv->txch, skb, skb->data, 763 skb->len, 2); 764 } 765 766 static inline void cpsw_add_dual_emac_def_ale_entries( 767 struct cpsw_priv *priv, struct cpsw_slave *slave, 768 u32 slave_port) 769 { 770 u32 port_mask = 1 << slave_port | 1 << priv->host_port; 771 772 if (priv->version == CPSW_VERSION_1) 773 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); 774 else 775 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); 776 cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, 777 port_mask, port_mask, 0); 778 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 779 port_mask, ALE_VLAN, slave->port_vlan, 0); 780 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 781 priv->host_port, ALE_VLAN, slave->port_vlan); 782 } 783 784 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 785 { 786 char name[32]; 787 u32 slave_port; 788 789 sprintf(name, "slave-%d", slave->slave_num); 790 791 soft_reset(name, &slave->sliver->soft_reset); 792 793 /* setup priority mapping */ 794 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 795 796 switch (priv->version) { 797 case CPSW_VERSION_1: 798 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 799 break; 800 case CPSW_VERSION_2: 801 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 802 break; 803 } 804 805 /* setup max packet size, and mac address */ 806 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 807 cpsw_set_slave_mac(slave, priv); 808 809 slave->mac_control = 0; /* no link yet */ 810 811 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 812 813 if (priv->data.dual_emac) 814 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); 815 else 816 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 817 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 818 819 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 820 &cpsw_adjust_link, slave->data->phy_if); 821 if (IS_ERR(slave->phy)) { 822 dev_err(priv->dev, "phy %s not found on slave %d\n", 823 slave->data->phy_id, slave->slave_num); 824 slave->phy = NULL; 825 } else { 826 dev_info(priv->dev, "phy found : id is : 0x%x\n", 827 slave->phy->phy_id); 828 phy_start(slave->phy); 829 } 830 } 831 832 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 833 { 834 const int vlan = priv->data.default_vlan; 835 const int port = priv->host_port; 836 u32 reg; 837 int i; 838 839 reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 840 CPSW2_PORT_VLAN; 841 842 writel(vlan, &priv->host_port_regs->port_vlan); 843 844 for (i = 0; i < priv->data.slaves; i++) 845 slave_write(priv->slaves + i, vlan, reg); 846 847 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, 848 ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, 849 (ALE_PORT_1 | ALE_PORT_2) << port); 850 } 851 852 static void cpsw_init_host_port(struct cpsw_priv *priv) 853 { 854 u32 control_reg; 855 u32 fifo_mode; 856 857 /* soft reset the controller and initialize ale */ 858 soft_reset("cpsw", &priv->regs->soft_reset); 859 cpsw_ale_start(priv->ale); 860 861 /* switch to vlan unaware mode */ 862 cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, 863 CPSW_ALE_VLAN_AWARE); 864 control_reg = readl(&priv->regs->control); 865 control_reg |= CPSW_VLAN_AWARE; 866 writel(control_reg, &priv->regs->control); 867 fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : 868 CPSW_FIFO_NORMAL_MODE; 869 writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); 870 871 /* setup host port priority mapping */ 872 __raw_writel(CPDMA_TX_PRIORITY_MAP, 873 &priv->host_port_regs->cpdma_tx_pri_map); 874 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 875 876 cpsw_ale_control_set(priv->ale, priv->host_port, 877 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 878 879 if (!priv->data.dual_emac) { 880 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 881 0, 0); 882 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 883 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); 884 } 885 } 886 887 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 888 { 889 if (!slave->phy) 890 return; 891 phy_stop(slave->phy); 892 phy_disconnect(slave->phy); 893 slave->phy = NULL; 894 } 895 896 static int cpsw_ndo_open(struct net_device *ndev) 897 { 898 struct cpsw_priv *priv = netdev_priv(ndev); 899 struct cpsw_priv *prim_cpsw; 900 int i, ret; 901 u32 reg; 902 903 if (!cpsw_common_res_usage_state(priv)) 904 cpsw_intr_disable(priv); 905 netif_carrier_off(ndev); 906 907 pm_runtime_get_sync(&priv->pdev->dev); 908 909 reg = priv->version; 910 911 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 912 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 913 CPSW_RTL_VERSION(reg)); 914 915 /* initialize host and slave ports */ 916 if (!cpsw_common_res_usage_state(priv)) 917 cpsw_init_host_port(priv); 918 for_each_slave(priv, cpsw_slave_open, priv); 919 920 /* Add default VLAN */ 921 if (!priv->data.dual_emac) 922 cpsw_add_default_vlan(priv); 923 924 if (!cpsw_common_res_usage_state(priv)) { 925 /* setup tx dma to fixed prio and zero offset */ 926 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 927 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); 928 929 /* disable priority elevation */ 930 __raw_writel(0, &priv->regs->ptype); 931 932 /* enable statistics collection only on all ports */ 933 __raw_writel(0x7, &priv->regs->stat_port_en); 934 935 if (WARN_ON(!priv->data.rx_descs)) 936 priv->data.rx_descs = 128; 937 938 for (i = 0; i < priv->data.rx_descs; i++) { 939 struct sk_buff *skb; 940 941 ret = -ENOMEM; 942 skb = __netdev_alloc_skb_ip_align(priv->ndev, 943 priv->rx_packet_max, GFP_KERNEL); 944 if (!skb) 945 goto err_cleanup; 946 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 947 skb_tailroom(skb), 0); 948 if (ret < 0) { 949 kfree_skb(skb); 950 goto err_cleanup; 951 } 952 } 953 /* continue even if we didn't manage to submit all 954 * receive descs 955 */ 956 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 957 } 958 959 /* Enable Interrupt pacing if configured */ 960 if (priv->coal_intvl != 0) { 961 struct ethtool_coalesce coal; 962 963 coal.rx_coalesce_usecs = (priv->coal_intvl << 4); 964 cpsw_set_coalesce(ndev, &coal); 965 } 966 967 prim_cpsw = cpsw_get_slave_priv(priv, 0); 968 if (prim_cpsw->irq_enabled == false) { 969 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 970 prim_cpsw->irq_enabled = true; 971 cpsw_enable_irq(prim_cpsw); 972 } 973 } 974 975 cpdma_ctlr_start(priv->dma); 976 cpsw_intr_enable(priv); 977 napi_enable(&priv->napi); 978 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 979 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 980 981 if (priv->data.dual_emac) 982 priv->slaves[priv->emac_port].open_stat = true; 983 return 0; 984 985 err_cleanup: 986 cpdma_ctlr_stop(priv->dma); 987 for_each_slave(priv, cpsw_slave_stop, priv); 988 pm_runtime_put_sync(&priv->pdev->dev); 989 netif_carrier_off(priv->ndev); 990 return ret; 991 } 992 993 static int cpsw_ndo_stop(struct net_device *ndev) 994 { 995 struct cpsw_priv *priv = netdev_priv(ndev); 996 997 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 998 netif_stop_queue(priv->ndev); 999 napi_disable(&priv->napi); 1000 netif_carrier_off(priv->ndev); 1001 1002 if (cpsw_common_res_usage_state(priv) <= 1) { 1003 cpsw_intr_disable(priv); 1004 cpdma_ctlr_int_ctrl(priv->dma, false); 1005 cpdma_ctlr_stop(priv->dma); 1006 cpsw_ale_stop(priv->ale); 1007 } 1008 for_each_slave(priv, cpsw_slave_stop, priv); 1009 pm_runtime_put_sync(&priv->pdev->dev); 1010 if (priv->data.dual_emac) 1011 priv->slaves[priv->emac_port].open_stat = false; 1012 return 0; 1013 } 1014 1015 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 1016 struct net_device *ndev) 1017 { 1018 struct cpsw_priv *priv = netdev_priv(ndev); 1019 int ret; 1020 1021 ndev->trans_start = jiffies; 1022 1023 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1024 cpsw_err(priv, tx_err, "packet pad failed\n"); 1025 priv->stats.tx_dropped++; 1026 return NETDEV_TX_OK; 1027 } 1028 1029 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 1030 priv->cpts->tx_enable) 1031 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1032 1033 skb_tx_timestamp(skb); 1034 1035 ret = cpsw_tx_packet_submit(ndev, priv, skb); 1036 if (unlikely(ret != 0)) { 1037 cpsw_err(priv, tx_err, "desc submit failed\n"); 1038 goto fail; 1039 } 1040 1041 /* If there is no more tx desc left free then we need to 1042 * tell the kernel to stop sending us tx frames. 1043 */ 1044 if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) 1045 netif_stop_queue(ndev); 1046 1047 return NETDEV_TX_OK; 1048 fail: 1049 priv->stats.tx_dropped++; 1050 netif_stop_queue(ndev); 1051 return NETDEV_TX_BUSY; 1052 } 1053 1054 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) 1055 { 1056 /* 1057 * The switch cannot operate in promiscuous mode without substantial 1058 * headache. For promiscuous mode to work, we would need to put the 1059 * ALE in bypass mode and route all traffic to the host port. 1060 * Subsequently, the host will need to operate as a "bridge", learn, 1061 * and flood as needed. For now, we simply complain here and 1062 * do nothing about it :-) 1063 */ 1064 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC)) 1065 dev_err(&ndev->dev, "promiscuity ignored!\n"); 1066 1067 /* 1068 * The switch cannot filter multicast traffic unless it is configured 1069 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a 1070 * whole bunch of additional logic that this driver does not implement 1071 * at present. 1072 */ 1073 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI)) 1074 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 1075 } 1076 1077 #ifdef CONFIG_TI_CPTS 1078 1079 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 1080 { 1081 struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; 1082 u32 ts_en, seq_id; 1083 1084 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { 1085 slave_write(slave, 0, CPSW1_TS_CTL); 1086 return; 1087 } 1088 1089 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; 1090 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; 1091 1092 if (priv->cpts->tx_enable) 1093 ts_en |= CPSW_V1_TS_TX_EN; 1094 1095 if (priv->cpts->rx_enable) 1096 ts_en |= CPSW_V1_TS_RX_EN; 1097 1098 slave_write(slave, ts_en, CPSW1_TS_CTL); 1099 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); 1100 } 1101 1102 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) 1103 { 1104 struct cpsw_slave *slave; 1105 u32 ctrl, mtype; 1106 1107 if (priv->data.dual_emac) 1108 slave = &priv->slaves[priv->emac_port]; 1109 else 1110 slave = &priv->slaves[priv->data.active_slave]; 1111 1112 ctrl = slave_read(slave, CPSW2_CONTROL); 1113 ctrl &= ~CTRL_ALL_TS_MASK; 1114 1115 if (priv->cpts->tx_enable) 1116 ctrl |= CTRL_TX_TS_BITS; 1117 1118 if (priv->cpts->rx_enable) 1119 ctrl |= CTRL_RX_TS_BITS; 1120 1121 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1122 1123 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); 1124 slave_write(slave, ctrl, CPSW2_CONTROL); 1125 __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); 1126 } 1127 1128 static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 1129 { 1130 struct cpsw_priv *priv = netdev_priv(dev); 1131 struct cpts *cpts = priv->cpts; 1132 struct hwtstamp_config cfg; 1133 1134 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1135 return -EFAULT; 1136 1137 /* reserved for future extensions */ 1138 if (cfg.flags) 1139 return -EINVAL; 1140 1141 switch (cfg.tx_type) { 1142 case HWTSTAMP_TX_OFF: 1143 cpts->tx_enable = 0; 1144 break; 1145 case HWTSTAMP_TX_ON: 1146 cpts->tx_enable = 1; 1147 break; 1148 default: 1149 return -ERANGE; 1150 } 1151 1152 switch (cfg.rx_filter) { 1153 case HWTSTAMP_FILTER_NONE: 1154 cpts->rx_enable = 0; 1155 break; 1156 case HWTSTAMP_FILTER_ALL: 1157 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1158 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1159 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1160 return -ERANGE; 1161 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1162 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1163 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1164 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1165 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1166 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1167 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1168 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1169 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1170 cpts->rx_enable = 1; 1171 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1172 break; 1173 default: 1174 return -ERANGE; 1175 } 1176 1177 switch (priv->version) { 1178 case CPSW_VERSION_1: 1179 cpsw_hwtstamp_v1(priv); 1180 break; 1181 case CPSW_VERSION_2: 1182 cpsw_hwtstamp_v2(priv); 1183 break; 1184 default: 1185 return -ENOTSUPP; 1186 } 1187 1188 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1189 } 1190 1191 #endif /*CONFIG_TI_CPTS*/ 1192 1193 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1194 { 1195 struct cpsw_priv *priv = netdev_priv(dev); 1196 struct mii_ioctl_data *data = if_mii(req); 1197 int slave_no = cpsw_slave_index(priv); 1198 1199 if (!netif_running(dev)) 1200 return -EINVAL; 1201 1202 switch (cmd) { 1203 #ifdef CONFIG_TI_CPTS 1204 case SIOCSHWTSTAMP: 1205 return cpsw_hwtstamp_ioctl(dev, req); 1206 #endif 1207 case SIOCGMIIPHY: 1208 data->phy_id = priv->slaves[slave_no].phy->addr; 1209 break; 1210 default: 1211 return -ENOTSUPP; 1212 } 1213 1214 return 0; 1215 } 1216 1217 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1218 { 1219 struct cpsw_priv *priv = netdev_priv(ndev); 1220 1221 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 1222 priv->stats.tx_errors++; 1223 cpsw_intr_disable(priv); 1224 cpdma_ctlr_int_ctrl(priv->dma, false); 1225 cpdma_chan_stop(priv->txch); 1226 cpdma_chan_start(priv->txch); 1227 cpdma_ctlr_int_ctrl(priv->dma, true); 1228 cpsw_intr_enable(priv); 1229 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1230 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1231 1232 } 1233 1234 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 1235 { 1236 struct cpsw_priv *priv = netdev_priv(ndev); 1237 return &priv->stats; 1238 } 1239 1240 #ifdef CONFIG_NET_POLL_CONTROLLER 1241 static void cpsw_ndo_poll_controller(struct net_device *ndev) 1242 { 1243 struct cpsw_priv *priv = netdev_priv(ndev); 1244 1245 cpsw_intr_disable(priv); 1246 cpdma_ctlr_int_ctrl(priv->dma, false); 1247 cpsw_interrupt(ndev->irq, priv); 1248 cpdma_ctlr_int_ctrl(priv->dma, true); 1249 cpsw_intr_enable(priv); 1250 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1251 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1252 1253 } 1254 #endif 1255 1256 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 1257 unsigned short vid) 1258 { 1259 int ret; 1260 1261 ret = cpsw_ale_add_vlan(priv->ale, vid, 1262 ALE_ALL_PORTS << priv->host_port, 1263 0, ALE_ALL_PORTS << priv->host_port, 1264 (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); 1265 if (ret != 0) 1266 return ret; 1267 1268 ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1269 priv->host_port, ALE_VLAN, vid); 1270 if (ret != 0) 1271 goto clean_vid; 1272 1273 ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1274 ALE_ALL_PORTS << priv->host_port, 1275 ALE_VLAN, vid, 0); 1276 if (ret != 0) 1277 goto clean_vlan_ucast; 1278 return 0; 1279 1280 clean_vlan_ucast: 1281 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, 1282 priv->host_port, ALE_VLAN, vid); 1283 clean_vid: 1284 cpsw_ale_del_vlan(priv->ale, vid, 0); 1285 return ret; 1286 } 1287 1288 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 1289 __be16 proto, u16 vid) 1290 { 1291 struct cpsw_priv *priv = netdev_priv(ndev); 1292 1293 if (vid == priv->data.default_vlan) 1294 return 0; 1295 1296 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1297 return cpsw_add_vlan_ale_entry(priv, vid); 1298 } 1299 1300 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1301 __be16 proto, u16 vid) 1302 { 1303 struct cpsw_priv *priv = netdev_priv(ndev); 1304 int ret; 1305 1306 if (vid == priv->data.default_vlan) 1307 return 0; 1308 1309 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1310 ret = cpsw_ale_del_vlan(priv->ale, vid, 0); 1311 if (ret != 0) 1312 return ret; 1313 1314 ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, 1315 priv->host_port, ALE_VLAN, vid); 1316 if (ret != 0) 1317 return ret; 1318 1319 return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, 1320 0, ALE_VLAN, vid); 1321 } 1322 1323 static const struct net_device_ops cpsw_netdev_ops = { 1324 .ndo_open = cpsw_ndo_open, 1325 .ndo_stop = cpsw_ndo_stop, 1326 .ndo_start_xmit = cpsw_ndo_start_xmit, 1327 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 1328 .ndo_do_ioctl = cpsw_ndo_ioctl, 1329 .ndo_validate_addr = eth_validate_addr, 1330 .ndo_change_mtu = eth_change_mtu, 1331 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1332 .ndo_get_stats = cpsw_ndo_get_stats, 1333 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1334 #ifdef CONFIG_NET_POLL_CONTROLLER 1335 .ndo_poll_controller = cpsw_ndo_poll_controller, 1336 #endif 1337 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 1338 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 1339 }; 1340 1341 static void cpsw_get_drvinfo(struct net_device *ndev, 1342 struct ethtool_drvinfo *info) 1343 { 1344 struct cpsw_priv *priv = netdev_priv(ndev); 1345 1346 strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver)); 1347 strlcpy(info->version, "1.0", sizeof(info->version)); 1348 strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); 1349 } 1350 1351 static u32 cpsw_get_msglevel(struct net_device *ndev) 1352 { 1353 struct cpsw_priv *priv = netdev_priv(ndev); 1354 return priv->msg_enable; 1355 } 1356 1357 static void cpsw_set_msglevel(struct net_device *ndev, u32 value) 1358 { 1359 struct cpsw_priv *priv = netdev_priv(ndev); 1360 priv->msg_enable = value; 1361 } 1362 1363 static int cpsw_get_ts_info(struct net_device *ndev, 1364 struct ethtool_ts_info *info) 1365 { 1366 #ifdef CONFIG_TI_CPTS 1367 struct cpsw_priv *priv = netdev_priv(ndev); 1368 1369 info->so_timestamping = 1370 SOF_TIMESTAMPING_TX_HARDWARE | 1371 SOF_TIMESTAMPING_TX_SOFTWARE | 1372 SOF_TIMESTAMPING_RX_HARDWARE | 1373 SOF_TIMESTAMPING_RX_SOFTWARE | 1374 SOF_TIMESTAMPING_SOFTWARE | 1375 SOF_TIMESTAMPING_RAW_HARDWARE; 1376 info->phc_index = priv->cpts->phc_index; 1377 info->tx_types = 1378 (1 << HWTSTAMP_TX_OFF) | 1379 (1 << HWTSTAMP_TX_ON); 1380 info->rx_filters = 1381 (1 << HWTSTAMP_FILTER_NONE) | 1382 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 1383 #else 1384 info->so_timestamping = 1385 SOF_TIMESTAMPING_TX_SOFTWARE | 1386 SOF_TIMESTAMPING_RX_SOFTWARE | 1387 SOF_TIMESTAMPING_SOFTWARE; 1388 info->phc_index = -1; 1389 info->tx_types = 0; 1390 info->rx_filters = 0; 1391 #endif 1392 return 0; 1393 } 1394 1395 static int cpsw_get_settings(struct net_device *ndev, 1396 struct ethtool_cmd *ecmd) 1397 { 1398 struct cpsw_priv *priv = netdev_priv(ndev); 1399 int slave_no = cpsw_slave_index(priv); 1400 1401 if (priv->slaves[slave_no].phy) 1402 return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); 1403 else 1404 return -EOPNOTSUPP; 1405 } 1406 1407 static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1408 { 1409 struct cpsw_priv *priv = netdev_priv(ndev); 1410 int slave_no = cpsw_slave_index(priv); 1411 1412 if (priv->slaves[slave_no].phy) 1413 return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); 1414 else 1415 return -EOPNOTSUPP; 1416 } 1417 1418 static const struct ethtool_ops cpsw_ethtool_ops = { 1419 .get_drvinfo = cpsw_get_drvinfo, 1420 .get_msglevel = cpsw_get_msglevel, 1421 .set_msglevel = cpsw_set_msglevel, 1422 .get_link = ethtool_op_get_link, 1423 .get_ts_info = cpsw_get_ts_info, 1424 .get_settings = cpsw_get_settings, 1425 .set_settings = cpsw_set_settings, 1426 .get_coalesce = cpsw_get_coalesce, 1427 .set_coalesce = cpsw_set_coalesce, 1428 }; 1429 1430 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1431 u32 slave_reg_ofs, u32 sliver_reg_ofs) 1432 { 1433 void __iomem *regs = priv->regs; 1434 int slave_num = slave->slave_num; 1435 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 1436 1437 slave->data = data; 1438 slave->regs = regs + slave_reg_ofs; 1439 slave->sliver = regs + sliver_reg_ofs; 1440 slave->port_vlan = data->dual_emac_res_vlan; 1441 } 1442 1443 static int cpsw_probe_dt(struct cpsw_platform_data *data, 1444 struct platform_device *pdev) 1445 { 1446 struct device_node *node = pdev->dev.of_node; 1447 struct device_node *slave_node; 1448 int i = 0, ret; 1449 u32 prop; 1450 1451 if (!node) 1452 return -EINVAL; 1453 1454 if (of_property_read_u32(node, "slaves", &prop)) { 1455 pr_err("Missing slaves property in the DT.\n"); 1456 return -EINVAL; 1457 } 1458 data->slaves = prop; 1459 1460 if (of_property_read_u32(node, "active_slave", &prop)) { 1461 pr_err("Missing active_slave property in the DT.\n"); 1462 ret = -EINVAL; 1463 goto error_ret; 1464 } 1465 data->active_slave = prop; 1466 1467 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1468 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1469 ret = -EINVAL; 1470 goto error_ret; 1471 } 1472 data->cpts_clock_mult = prop; 1473 1474 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1475 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1476 ret = -EINVAL; 1477 goto error_ret; 1478 } 1479 data->cpts_clock_shift = prop; 1480 1481 data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data), 1482 GFP_KERNEL); 1483 if (!data->slave_data) 1484 return -EINVAL; 1485 1486 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1487 pr_err("Missing cpdma_channels property in the DT.\n"); 1488 ret = -EINVAL; 1489 goto error_ret; 1490 } 1491 data->channels = prop; 1492 1493 if (of_property_read_u32(node, "ale_entries", &prop)) { 1494 pr_err("Missing ale_entries property in the DT.\n"); 1495 ret = -EINVAL; 1496 goto error_ret; 1497 } 1498 data->ale_entries = prop; 1499 1500 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1501 pr_err("Missing bd_ram_size property in the DT.\n"); 1502 ret = -EINVAL; 1503 goto error_ret; 1504 } 1505 data->bd_ram_size = prop; 1506 1507 if (of_property_read_u32(node, "rx_descs", &prop)) { 1508 pr_err("Missing rx_descs property in the DT.\n"); 1509 ret = -EINVAL; 1510 goto error_ret; 1511 } 1512 data->rx_descs = prop; 1513 1514 if (of_property_read_u32(node, "mac_control", &prop)) { 1515 pr_err("Missing mac_control property in the DT.\n"); 1516 ret = -EINVAL; 1517 goto error_ret; 1518 } 1519 data->mac_control = prop; 1520 1521 if (!of_property_read_u32(node, "dual_emac", &prop)) 1522 data->dual_emac = prop; 1523 1524 /* 1525 * Populate all the child nodes here... 1526 */ 1527 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 1528 /* We do not want to force this, as in some cases may not have child */ 1529 if (ret) 1530 pr_warn("Doesn't have any child node\n"); 1531 1532 for_each_node_by_name(slave_node, "slave") { 1533 struct cpsw_slave_data *slave_data = data->slave_data + i; 1534 const void *mac_addr = NULL; 1535 u32 phyid; 1536 int lenp; 1537 const __be32 *parp; 1538 struct device_node *mdio_node; 1539 struct platform_device *mdio; 1540 1541 parp = of_get_property(slave_node, "phy_id", &lenp); 1542 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1543 pr_err("Missing slave[%d] phy_id property\n", i); 1544 ret = -EINVAL; 1545 goto error_ret; 1546 } 1547 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1548 phyid = be32_to_cpup(parp+1); 1549 mdio = of_find_device_by_node(mdio_node); 1550 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 1551 PHY_ID_FMT, mdio->name, phyid); 1552 1553 mac_addr = of_get_mac_address(slave_node); 1554 if (mac_addr) 1555 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1556 1557 if (data->dual_emac) { 1558 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1559 &prop)) { 1560 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1561 slave_data->dual_emac_res_vlan = i+1; 1562 pr_err("Using %d as Reserved VLAN for %d slave\n", 1563 slave_data->dual_emac_res_vlan, i); 1564 } else { 1565 slave_data->dual_emac_res_vlan = prop; 1566 } 1567 } 1568 1569 i++; 1570 } 1571 1572 return 0; 1573 1574 error_ret: 1575 kfree(data->slave_data); 1576 return ret; 1577 } 1578 1579 static int cpsw_probe_dual_emac(struct platform_device *pdev, 1580 struct cpsw_priv *priv) 1581 { 1582 struct cpsw_platform_data *data = &priv->data; 1583 struct net_device *ndev; 1584 struct cpsw_priv *priv_sl2; 1585 int ret = 0, i; 1586 1587 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1588 if (!ndev) { 1589 pr_err("cpsw: error allocating net_device\n"); 1590 return -ENOMEM; 1591 } 1592 1593 priv_sl2 = netdev_priv(ndev); 1594 spin_lock_init(&priv_sl2->lock); 1595 priv_sl2->data = *data; 1596 priv_sl2->pdev = pdev; 1597 priv_sl2->ndev = ndev; 1598 priv_sl2->dev = &ndev->dev; 1599 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1600 priv_sl2->rx_packet_max = max(rx_packet_max, 128); 1601 1602 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1603 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 1604 ETH_ALEN); 1605 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); 1606 } else { 1607 random_ether_addr(priv_sl2->mac_addr); 1608 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); 1609 } 1610 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 1611 1612 priv_sl2->slaves = priv->slaves; 1613 priv_sl2->clk = priv->clk; 1614 1615 priv_sl2->coal_intvl = 0; 1616 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; 1617 1618 priv_sl2->cpsw_res = priv->cpsw_res; 1619 priv_sl2->regs = priv->regs; 1620 priv_sl2->host_port = priv->host_port; 1621 priv_sl2->host_port_regs = priv->host_port_regs; 1622 priv_sl2->wr_regs = priv->wr_regs; 1623 priv_sl2->dma = priv->dma; 1624 priv_sl2->txch = priv->txch; 1625 priv_sl2->rxch = priv->rxch; 1626 priv_sl2->ale = priv->ale; 1627 priv_sl2->emac_port = 1; 1628 priv->slaves[1].ndev = ndev; 1629 priv_sl2->cpts = priv->cpts; 1630 priv_sl2->version = priv->version; 1631 1632 for (i = 0; i < priv->num_irqs; i++) { 1633 priv_sl2->irqs_table[i] = priv->irqs_table[i]; 1634 priv_sl2->num_irqs = priv->num_irqs; 1635 } 1636 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1637 1638 ndev->netdev_ops = &cpsw_netdev_ops; 1639 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1640 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); 1641 1642 /* register the network device */ 1643 SET_NETDEV_DEV(ndev, &pdev->dev); 1644 ret = register_netdev(ndev); 1645 if (ret) { 1646 pr_err("cpsw: error registering net device\n"); 1647 free_netdev(ndev); 1648 ret = -ENODEV; 1649 } 1650 1651 return ret; 1652 } 1653 1654 static int cpsw_probe(struct platform_device *pdev) 1655 { 1656 struct cpsw_platform_data *data; 1657 struct net_device *ndev; 1658 struct cpsw_priv *priv; 1659 struct cpdma_params dma_params; 1660 struct cpsw_ale_params ale_params; 1661 void __iomem *ss_regs, *wr_regs; 1662 struct resource *res; 1663 u32 slave_offset, sliver_offset, slave_size; 1664 int ret = 0, i, k = 0; 1665 1666 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1667 if (!ndev) { 1668 pr_err("error allocating net_device\n"); 1669 return -ENOMEM; 1670 } 1671 1672 platform_set_drvdata(pdev, ndev); 1673 priv = netdev_priv(ndev); 1674 spin_lock_init(&priv->lock); 1675 priv->pdev = pdev; 1676 priv->ndev = ndev; 1677 priv->dev = &ndev->dev; 1678 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1679 priv->rx_packet_max = max(rx_packet_max, 128); 1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1681 priv->irq_enabled = true; 1682 if (!ndev) { 1683 pr_err("error allocating cpts\n"); 1684 goto clean_ndev_ret; 1685 } 1686 1687 /* 1688 * This may be required here for child devices. 1689 */ 1690 pm_runtime_enable(&pdev->dev); 1691 1692 if (cpsw_probe_dt(&priv->data, pdev)) { 1693 pr_err("cpsw: platform data missing\n"); 1694 ret = -ENODEV; 1695 goto clean_ndev_ret; 1696 } 1697 data = &priv->data; 1698 1699 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 1700 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 1701 pr_info("Detected MACID = %pM", priv->mac_addr); 1702 } else { 1703 eth_random_addr(priv->mac_addr); 1704 pr_info("Random MACID = %pM", priv->mac_addr); 1705 } 1706 1707 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1708 1709 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, 1710 GFP_KERNEL); 1711 if (!priv->slaves) { 1712 ret = -EBUSY; 1713 goto clean_ndev_ret; 1714 } 1715 for (i = 0; i < data->slaves; i++) 1716 priv->slaves[i].slave_num = i; 1717 1718 priv->slaves[0].ndev = ndev; 1719 priv->emac_port = 0; 1720 1721 priv->clk = clk_get(&pdev->dev, "fck"); 1722 if (IS_ERR(priv->clk)) { 1723 dev_err(&pdev->dev, "fck is not found\n"); 1724 ret = -ENODEV; 1725 goto clean_slave_ret; 1726 } 1727 priv->coal_intvl = 0; 1728 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; 1729 1730 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1731 if (!priv->cpsw_res) { 1732 dev_err(priv->dev, "error getting i/o resource\n"); 1733 ret = -ENOENT; 1734 goto clean_clk_ret; 1735 } 1736 if (!request_mem_region(priv->cpsw_res->start, 1737 resource_size(priv->cpsw_res), ndev->name)) { 1738 dev_err(priv->dev, "failed request i/o region\n"); 1739 ret = -ENXIO; 1740 goto clean_clk_ret; 1741 } 1742 ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 1743 if (!ss_regs) { 1744 dev_err(priv->dev, "unable to map i/o region\n"); 1745 goto clean_cpsw_iores_ret; 1746 } 1747 priv->regs = ss_regs; 1748 priv->version = __raw_readl(&priv->regs->id_ver); 1749 priv->host_port = HOST_PORT_NUM; 1750 1751 priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1752 if (!priv->cpsw_wr_res) { 1753 dev_err(priv->dev, "error getting i/o resource\n"); 1754 ret = -ENOENT; 1755 goto clean_iomap_ret; 1756 } 1757 if (!request_mem_region(priv->cpsw_wr_res->start, 1758 resource_size(priv->cpsw_wr_res), ndev->name)) { 1759 dev_err(priv->dev, "failed request i/o region\n"); 1760 ret = -ENXIO; 1761 goto clean_iomap_ret; 1762 } 1763 wr_regs = ioremap(priv->cpsw_wr_res->start, 1764 resource_size(priv->cpsw_wr_res)); 1765 if (!wr_regs) { 1766 dev_err(priv->dev, "unable to map i/o region\n"); 1767 goto clean_cpsw_wr_iores_ret; 1768 } 1769 priv->wr_regs = wr_regs; 1770 1771 memset(&dma_params, 0, sizeof(dma_params)); 1772 memset(&ale_params, 0, sizeof(ale_params)); 1773 1774 switch (priv->version) { 1775 case CPSW_VERSION_1: 1776 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; 1777 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; 1778 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; 1779 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; 1780 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; 1781 slave_offset = CPSW1_SLAVE_OFFSET; 1782 slave_size = CPSW1_SLAVE_SIZE; 1783 sliver_offset = CPSW1_SLIVER_OFFSET; 1784 dma_params.desc_mem_phys = 0; 1785 break; 1786 case CPSW_VERSION_2: 1787 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; 1788 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; 1789 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; 1790 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; 1791 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; 1792 slave_offset = CPSW2_SLAVE_OFFSET; 1793 slave_size = CPSW2_SLAVE_SIZE; 1794 sliver_offset = CPSW2_SLIVER_OFFSET; 1795 dma_params.desc_mem_phys = 1796 (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET; 1797 break; 1798 default: 1799 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); 1800 ret = -ENODEV; 1801 goto clean_cpsw_wr_iores_ret; 1802 } 1803 for (i = 0; i < priv->data.slaves; i++) { 1804 struct cpsw_slave *slave = &priv->slaves[i]; 1805 cpsw_slave_init(slave, priv, slave_offset, sliver_offset); 1806 slave_offset += slave_size; 1807 sliver_offset += SLIVER_SIZE; 1808 } 1809 1810 dma_params.dev = &pdev->dev; 1811 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; 1812 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; 1813 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; 1814 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; 1815 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; 1816 1817 dma_params.num_chan = data->channels; 1818 dma_params.has_soft_reset = true; 1819 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; 1820 dma_params.desc_mem_size = data->bd_ram_size; 1821 dma_params.desc_align = 16; 1822 dma_params.has_ext_regs = true; 1823 dma_params.desc_hw_addr = dma_params.desc_mem_phys; 1824 1825 priv->dma = cpdma_ctlr_create(&dma_params); 1826 if (!priv->dma) { 1827 dev_err(priv->dev, "error initializing dma\n"); 1828 ret = -ENOMEM; 1829 goto clean_wr_iomap_ret; 1830 } 1831 1832 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 1833 cpsw_tx_handler); 1834 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), 1835 cpsw_rx_handler); 1836 1837 if (WARN_ON(!priv->txch || !priv->rxch)) { 1838 dev_err(priv->dev, "error initializing dma channels\n"); 1839 ret = -ENOMEM; 1840 goto clean_dma_ret; 1841 } 1842 1843 ale_params.dev = &ndev->dev; 1844 ale_params.ale_ageout = ale_ageout; 1845 ale_params.ale_entries = data->ale_entries; 1846 ale_params.ale_ports = data->slaves; 1847 1848 priv->ale = cpsw_ale_create(&ale_params); 1849 if (!priv->ale) { 1850 dev_err(priv->dev, "error initializing ale engine\n"); 1851 ret = -ENODEV; 1852 goto clean_dma_ret; 1853 } 1854 1855 ndev->irq = platform_get_irq(pdev, 0); 1856 if (ndev->irq < 0) { 1857 dev_err(priv->dev, "error getting irq resource\n"); 1858 ret = -ENOENT; 1859 goto clean_ale_ret; 1860 } 1861 1862 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1863 for (i = res->start; i <= res->end; i++) { 1864 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 1865 dev_name(&pdev->dev), priv)) { 1866 dev_err(priv->dev, "error attaching irq\n"); 1867 goto clean_ale_ret; 1868 } 1869 priv->irqs_table[k] = i; 1870 priv->num_irqs = k + 1; 1871 } 1872 k++; 1873 } 1874 1875 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1876 1877 ndev->netdev_ops = &cpsw_netdev_ops; 1878 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1879 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 1880 1881 /* register the network device */ 1882 SET_NETDEV_DEV(ndev, &pdev->dev); 1883 ret = register_netdev(ndev); 1884 if (ret) { 1885 dev_err(priv->dev, "error registering net device\n"); 1886 ret = -ENODEV; 1887 goto clean_irq_ret; 1888 } 1889 1890 if (cpts_register(&pdev->dev, priv->cpts, 1891 data->cpts_clock_mult, data->cpts_clock_shift)) 1892 dev_err(priv->dev, "error registering cpts device\n"); 1893 1894 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 1895 priv->cpsw_res->start, ndev->irq); 1896 1897 if (priv->data.dual_emac) { 1898 ret = cpsw_probe_dual_emac(pdev, priv); 1899 if (ret) { 1900 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 1901 goto clean_irq_ret; 1902 } 1903 } 1904 1905 return 0; 1906 1907 clean_irq_ret: 1908 for (i = 0; i < priv->num_irqs; i++) 1909 free_irq(priv->irqs_table[i], priv); 1910 clean_ale_ret: 1911 cpsw_ale_destroy(priv->ale); 1912 clean_dma_ret: 1913 cpdma_chan_destroy(priv->txch); 1914 cpdma_chan_destroy(priv->rxch); 1915 cpdma_ctlr_destroy(priv->dma); 1916 clean_wr_iomap_ret: 1917 iounmap(priv->wr_regs); 1918 clean_cpsw_wr_iores_ret: 1919 release_mem_region(priv->cpsw_wr_res->start, 1920 resource_size(priv->cpsw_wr_res)); 1921 clean_iomap_ret: 1922 iounmap(priv->regs); 1923 clean_cpsw_iores_ret: 1924 release_mem_region(priv->cpsw_res->start, 1925 resource_size(priv->cpsw_res)); 1926 clean_clk_ret: 1927 clk_put(priv->clk); 1928 clean_slave_ret: 1929 pm_runtime_disable(&pdev->dev); 1930 kfree(priv->slaves); 1931 clean_ndev_ret: 1932 kfree(priv->data.slave_data); 1933 free_netdev(priv->ndev); 1934 return ret; 1935 } 1936 1937 static int cpsw_remove(struct platform_device *pdev) 1938 { 1939 struct net_device *ndev = platform_get_drvdata(pdev); 1940 struct cpsw_priv *priv = netdev_priv(ndev); 1941 int i; 1942 1943 platform_set_drvdata(pdev, NULL); 1944 if (priv->data.dual_emac) 1945 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 1946 unregister_netdev(ndev); 1947 1948 cpts_unregister(priv->cpts); 1949 for (i = 0; i < priv->num_irqs; i++) 1950 free_irq(priv->irqs_table[i], priv); 1951 1952 cpsw_ale_destroy(priv->ale); 1953 cpdma_chan_destroy(priv->txch); 1954 cpdma_chan_destroy(priv->rxch); 1955 cpdma_ctlr_destroy(priv->dma); 1956 iounmap(priv->regs); 1957 release_mem_region(priv->cpsw_res->start, 1958 resource_size(priv->cpsw_res)); 1959 iounmap(priv->wr_regs); 1960 release_mem_region(priv->cpsw_wr_res->start, 1961 resource_size(priv->cpsw_wr_res)); 1962 pm_runtime_disable(&pdev->dev); 1963 clk_put(priv->clk); 1964 kfree(priv->slaves); 1965 kfree(priv->data.slave_data); 1966 if (priv->data.dual_emac) 1967 free_netdev(cpsw_get_slave_ndev(priv, 1)); 1968 free_netdev(ndev); 1969 return 0; 1970 } 1971 1972 static int cpsw_suspend(struct device *dev) 1973 { 1974 struct platform_device *pdev = to_platform_device(dev); 1975 struct net_device *ndev = platform_get_drvdata(pdev); 1976 1977 if (netif_running(ndev)) 1978 cpsw_ndo_stop(ndev); 1979 pm_runtime_put_sync(&pdev->dev); 1980 1981 return 0; 1982 } 1983 1984 static int cpsw_resume(struct device *dev) 1985 { 1986 struct platform_device *pdev = to_platform_device(dev); 1987 struct net_device *ndev = platform_get_drvdata(pdev); 1988 1989 pm_runtime_get_sync(&pdev->dev); 1990 if (netif_running(ndev)) 1991 cpsw_ndo_open(ndev); 1992 return 0; 1993 } 1994 1995 static const struct dev_pm_ops cpsw_pm_ops = { 1996 .suspend = cpsw_suspend, 1997 .resume = cpsw_resume, 1998 }; 1999 2000 static const struct of_device_id cpsw_of_mtable[] = { 2001 { .compatible = "ti,cpsw", }, 2002 { /* sentinel */ }, 2003 }; 2004 MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 2005 2006 static struct platform_driver cpsw_driver = { 2007 .driver = { 2008 .name = "cpsw", 2009 .owner = THIS_MODULE, 2010 .pm = &cpsw_pm_ops, 2011 .of_match_table = of_match_ptr(cpsw_of_mtable), 2012 }, 2013 .probe = cpsw_probe, 2014 .remove = cpsw_remove, 2015 }; 2016 2017 static int __init cpsw_init(void) 2018 { 2019 return platform_driver_register(&cpsw_driver); 2020 } 2021 late_initcall(cpsw_init); 2022 2023 static void __exit cpsw_exit(void) 2024 { 2025 platform_driver_unregister(&cpsw_driver); 2026 } 2027 module_exit(cpsw_exit); 2028 2029 MODULE_LICENSE("GPL"); 2030 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); 2031 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); 2032 MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 2033