1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments Ethernet Switch Driver 4 * 5 * Copyright (C) 2019 Texas Instruments 6 */ 7 8 #include <linux/io.h> 9 #include <linux/clk.h> 10 #include <linux/timer.h> 11 #include <linux/module.h> 12 #include <linux/irqreturn.h> 13 #include <linux/interrupt.h> 14 #include <linux/if_ether.h> 15 #include <linux/etherdevice.h> 16 #include <linux/net_tstamp.h> 17 #include <linux/phy.h> 18 #include <linux/phy/phy.h> 19 #include <linux/delay.h> 20 #include <linux/pinctrl/consumer.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/of.h> 24 #include <linux/of_mdio.h> 25 #include <linux/of_net.h> 26 #include <linux/of_device.h> 27 #include <linux/if_vlan.h> 28 #include <linux/kmemleak.h> 29 #include <linux/sys_soc.h> 30 31 #include <net/page_pool.h> 32 #include <net/pkt_cls.h> 33 #include <net/devlink.h> 34 35 #include "cpsw.h" 36 #include "cpsw_ale.h" 37 #include "cpsw_priv.h" 38 #include "cpsw_sl.h" 39 #include "cpsw_switchdev.h" 40 #include "cpts.h" 41 #include "davinci_cpdma.h" 42 43 #include <net/pkt_sched.h> 44 45 static int debug_level; 46 static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT; 47 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 48 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; 49 50 struct cpsw_devlink { 51 struct cpsw_common *cpsw; 52 }; 53 54 enum cpsw_devlink_param_id { 55 CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 56 CPSW_DL_PARAM_SWITCH_MODE, 57 CPSW_DL_PARAM_ALE_BYPASS, 58 }; 59 60 /* struct cpsw_common is not needed, kept here for compatibility 61 * reasons witrh the old driver 62 */ 63 static int cpsw_slave_index_priv(struct cpsw_common *cpsw, 64 struct cpsw_priv *priv) 65 { 66 if (priv->emac_port == HOST_PORT_NUM) 67 return -1; 68 69 return priv->emac_port - 1; 70 } 71 72 static bool cpsw_is_switch_en(struct cpsw_common *cpsw) 73 { 74 return !cpsw->data.dual_emac; 75 } 76 77 static void cpsw_set_promiscious(struct net_device *ndev, bool enable) 78 { 79 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 80 bool enable_uni = false; 81 int i; 82 83 if (cpsw_is_switch_en(cpsw)) 84 return; 85 86 /* Enabling promiscuous mode for one interface will be 87 * common for both the interface as the interface shares 88 * the same hardware resource. 89 */ 90 for (i = 0; i < cpsw->data.slaves; i++) 91 if (cpsw->slaves[i].ndev && 92 (cpsw->slaves[i].ndev->flags & IFF_PROMISC)) 93 enable_uni = true; 94 95 if (!enable && enable_uni) { 96 enable = enable_uni; 97 dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); 98 } 99 100 if (enable) { 101 /* Enable unknown unicast, reg/unreg mcast */ 102 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 103 ALE_P0_UNI_FLOOD, 1); 104 105 dev_dbg(cpsw->dev, "promiscuity enabled\n"); 106 } else { 107 /* Disable unknown unicast */ 108 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 109 ALE_P0_UNI_FLOOD, 0); 110 dev_dbg(cpsw->dev, "promiscuity disabled\n"); 111 } 112 } 113 114 /** 115 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes 116 * if it's not deleted 117 * @ndev: device to sync 118 * @addr: address to be added or deleted 119 * @vid: vlan id, if vid < 0 set/unset address for real device 120 * @add: add address if the flag is set or remove otherwise 121 */ 122 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, 123 int vid, int add) 124 { 125 struct cpsw_priv *priv = netdev_priv(ndev); 126 struct cpsw_common *cpsw = priv->cpsw; 127 int mask, flags, ret, slave_no; 128 129 slave_no = cpsw_slave_index(cpsw, priv); 130 if (vid < 0) 131 vid = cpsw->slaves[slave_no].port_vlan; 132 133 mask = ALE_PORT_HOST; 134 flags = vid ? ALE_VLAN : 0; 135 136 if (add) 137 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); 138 else 139 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); 140 141 return ret; 142 } 143 144 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) 145 { 146 struct addr_sync_ctx *sync_ctx = ctx; 147 struct netdev_hw_addr *ha; 148 int found = 0, ret = 0; 149 150 if (!vdev || !(vdev->flags & IFF_UP)) 151 return 0; 152 153 /* vlan address is relevant if its sync_cnt != 0 */ 154 netdev_for_each_mc_addr(ha, vdev) { 155 if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 156 found = ha->sync_cnt; 157 break; 158 } 159 } 160 161 if (found) 162 sync_ctx->consumed++; 163 164 if (sync_ctx->flush) { 165 if (!found) 166 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 167 return 0; 168 } 169 170 if (found) 171 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); 172 173 return ret; 174 } 175 176 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) 177 { 178 struct addr_sync_ctx sync_ctx; 179 int ret; 180 181 sync_ctx.consumed = 0; 182 sync_ctx.addr = addr; 183 sync_ctx.ndev = ndev; 184 sync_ctx.flush = 0; 185 186 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 187 if (sync_ctx.consumed < num && !ret) 188 ret = cpsw_set_mc(ndev, addr, -1, 1); 189 190 return ret; 191 } 192 193 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) 194 { 195 struct addr_sync_ctx sync_ctx; 196 197 sync_ctx.consumed = 0; 198 sync_ctx.addr = addr; 199 sync_ctx.ndev = ndev; 200 sync_ctx.flush = 1; 201 202 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 203 if (sync_ctx.consumed == num) 204 cpsw_set_mc(ndev, addr, -1, 0); 205 206 return 0; 207 } 208 209 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) 210 { 211 struct addr_sync_ctx *sync_ctx = ctx; 212 struct netdev_hw_addr *ha; 213 int found = 0; 214 215 if (!vdev || !(vdev->flags & IFF_UP)) 216 return 0; 217 218 /* vlan address is relevant if its sync_cnt != 0 */ 219 netdev_for_each_mc_addr(ha, vdev) { 220 if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 221 found = ha->sync_cnt; 222 break; 223 } 224 } 225 226 if (!found) 227 return 0; 228 229 sync_ctx->consumed++; 230 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 231 return 0; 232 } 233 234 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) 235 { 236 struct addr_sync_ctx sync_ctx; 237 238 sync_ctx.addr = addr; 239 sync_ctx.ndev = ndev; 240 sync_ctx.consumed = 0; 241 242 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); 243 if (sync_ctx.consumed < num) 244 cpsw_set_mc(ndev, addr, -1, 0); 245 246 return 0; 247 } 248 249 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 250 { 251 struct cpsw_priv *priv = netdev_priv(ndev); 252 struct cpsw_common *cpsw = priv->cpsw; 253 254 if (ndev->flags & IFF_PROMISC) { 255 /* Enable promiscuous mode */ 256 cpsw_set_promiscious(ndev, true); 257 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); 258 return; 259 } 260 261 /* Disable promiscuous mode */ 262 cpsw_set_promiscious(ndev, false); 263 264 /* Restore allmulti on vlans if necessary */ 265 cpsw_ale_set_allmulti(cpsw->ale, 266 ndev->flags & IFF_ALLMULTI, priv->emac_port); 267 268 /* add/remove mcast address either for real netdev or for vlan */ 269 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 270 cpsw_del_mc_addr); 271 } 272 273 static unsigned int cpsw_rxbuf_total_len(unsigned int len) 274 { 275 len += CPSW_HEADROOM; 276 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 277 278 return SKB_DATA_ALIGN(len); 279 } 280 281 static void cpsw_rx_handler(void *token, int len, int status) 282 { 283 struct page *new_page, *page = token; 284 void *pa = page_address(page); 285 int headroom = CPSW_HEADROOM; 286 struct cpsw_meta_xdp *xmeta; 287 struct cpsw_common *cpsw; 288 struct net_device *ndev; 289 int port, ch, pkt_size; 290 struct cpsw_priv *priv; 291 struct page_pool *pool; 292 struct sk_buff *skb; 293 struct xdp_buff xdp; 294 int ret = 0; 295 dma_addr_t dma; 296 297 xmeta = pa + CPSW_XMETA_OFFSET; 298 cpsw = ndev_to_cpsw(xmeta->ndev); 299 ndev = xmeta->ndev; 300 pkt_size = cpsw->rx_packet_max; 301 ch = xmeta->ch; 302 303 if (status >= 0) { 304 port = CPDMA_RX_SOURCE_PORT(status); 305 if (port) 306 ndev = cpsw->slaves[--port].ndev; 307 } 308 309 priv = netdev_priv(ndev); 310 pool = cpsw->page_pool[ch]; 311 312 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 313 /* In dual emac mode check for all interfaces */ 314 if (cpsw->usage_count && status >= 0) { 315 /* The packet received is for the interface which 316 * is already down and the other interface is up 317 * and running, instead of freeing which results 318 * in reducing of the number of rx descriptor in 319 * DMA engine, requeue page back to cpdma. 320 */ 321 new_page = page; 322 goto requeue; 323 } 324 325 /* the interface is going down, pages are purged */ 326 page_pool_recycle_direct(pool, page); 327 return; 328 } 329 330 new_page = page_pool_dev_alloc_pages(pool); 331 if (unlikely(!new_page)) { 332 new_page = page; 333 ndev->stats.rx_dropped++; 334 goto requeue; 335 } 336 337 if (priv->xdp_prog) { 338 if (status & CPDMA_RX_VLAN_ENCAP) { 339 xdp.data = pa + CPSW_HEADROOM + 340 CPSW_RX_VLAN_ENCAP_HDR_SIZE; 341 xdp.data_end = xdp.data + len - 342 CPSW_RX_VLAN_ENCAP_HDR_SIZE; 343 } else { 344 xdp.data = pa + CPSW_HEADROOM; 345 xdp.data_end = xdp.data + len; 346 } 347 348 xdp_set_data_meta_invalid(&xdp); 349 350 xdp.data_hard_start = pa; 351 xdp.rxq = &priv->xdp_rxq[ch]; 352 xdp.frame_sz = PAGE_SIZE; 353 354 ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); 355 if (ret != CPSW_XDP_PASS) 356 goto requeue; 357 358 /* XDP prog might have changed packet data and boundaries */ 359 len = xdp.data_end - xdp.data; 360 headroom = xdp.data - xdp.data_hard_start; 361 362 /* XDP prog can modify vlan tag, so can't use encap header */ 363 status &= ~CPDMA_RX_VLAN_ENCAP; 364 } 365 366 /* pass skb to netstack if no XDP prog or returned XDP_PASS */ 367 skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); 368 if (!skb) { 369 ndev->stats.rx_dropped++; 370 page_pool_recycle_direct(pool, page); 371 goto requeue; 372 } 373 374 skb->offload_fwd_mark = priv->offload_fwd_mark; 375 skb_reserve(skb, headroom); 376 skb_put(skb, len); 377 skb->dev = ndev; 378 if (status & CPDMA_RX_VLAN_ENCAP) 379 cpsw_rx_vlan_encap(skb); 380 if (priv->rx_ts_enabled) 381 cpts_rx_timestamp(cpsw->cpts, skb); 382 skb->protocol = eth_type_trans(skb, ndev); 383 384 /* unmap page as no netstack skb page recycling */ 385 page_pool_release_page(pool, page); 386 netif_receive_skb(skb); 387 388 ndev->stats.rx_bytes += len; 389 ndev->stats.rx_packets++; 390 391 requeue: 392 xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; 393 xmeta->ndev = ndev; 394 xmeta->ch = ch; 395 396 dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; 397 ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, 398 pkt_size, 0); 399 if (ret < 0) { 400 WARN_ON(ret == -ENOMEM); 401 page_pool_recycle_direct(pool, new_page); 402 } 403 } 404 405 static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 406 unsigned short vid) 407 { 408 struct cpsw_common *cpsw = priv->cpsw; 409 int unreg_mcast_mask = 0; 410 int mcast_mask; 411 u32 port_mask; 412 int ret; 413 414 port_mask = (1 << priv->emac_port) | ALE_PORT_HOST; 415 416 mcast_mask = ALE_PORT_HOST; 417 if (priv->ndev->flags & IFF_ALLMULTI) 418 unreg_mcast_mask = mcast_mask; 419 420 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, 421 unreg_mcast_mask); 422 if (ret != 0) 423 return ret; 424 425 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 426 HOST_PORT_NUM, ALE_VLAN, vid); 427 if (ret != 0) 428 goto clean_vid; 429 430 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 431 mcast_mask, ALE_VLAN, vid, 0); 432 if (ret != 0) 433 goto clean_vlan_ucast; 434 return 0; 435 436 clean_vlan_ucast: 437 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 438 HOST_PORT_NUM, ALE_VLAN, vid); 439 clean_vid: 440 cpsw_ale_del_vlan(cpsw->ale, vid, 0); 441 return ret; 442 } 443 444 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 445 __be16 proto, u16 vid) 446 { 447 struct cpsw_priv *priv = netdev_priv(ndev); 448 struct cpsw_common *cpsw = priv->cpsw; 449 int ret, i; 450 451 if (cpsw_is_switch_en(cpsw)) { 452 dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n"); 453 return 0; 454 } 455 456 if (vid == cpsw->data.default_vlan) 457 return 0; 458 459 ret = pm_runtime_get_sync(cpsw->dev); 460 if (ret < 0) { 461 pm_runtime_put_noidle(cpsw->dev); 462 return ret; 463 } 464 465 /* In dual EMAC, reserved VLAN id should not be used for 466 * creating VLAN interfaces as this can break the dual 467 * EMAC port separation 468 */ 469 for (i = 0; i < cpsw->data.slaves; i++) { 470 if (cpsw->slaves[i].ndev && 471 vid == cpsw->slaves[i].port_vlan) { 472 ret = -EINVAL; 473 goto err; 474 } 475 } 476 477 dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 478 ret = cpsw_add_vlan_ale_entry(priv, vid); 479 err: 480 pm_runtime_put(cpsw->dev); 481 return ret; 482 } 483 484 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 485 { 486 struct cpsw_priv *priv = arg; 487 488 if (!vdev || !vid) 489 return 0; 490 491 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); 492 return 0; 493 } 494 495 /* restore resources after port reset */ 496 static void cpsw_restore(struct cpsw_priv *priv) 497 { 498 struct cpsw_common *cpsw = priv->cpsw; 499 500 /* restore vlan configurations */ 501 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); 502 503 /* restore MQPRIO offload */ 504 cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv); 505 506 /* restore CBS offload */ 507 cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); 508 } 509 510 static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) 511 { 512 char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; 513 514 cpsw_ale_add_mcast(cpsw->ale, stpa, 515 ALE_PORT_HOST, ALE_SUPER, 0, 516 ALE_MCAST_BLOCK_LEARN_FWD); 517 } 518 519 static void cpsw_init_host_port_switch(struct cpsw_common *cpsw) 520 { 521 int vlan = cpsw->data.default_vlan; 522 523 writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl); 524 525 writel(vlan, &cpsw->host_port_regs->port_vlan); 526 527 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, 528 ALE_ALL_PORTS, ALE_ALL_PORTS, 529 ALE_PORT_1 | ALE_PORT_2); 530 531 cpsw_init_stp_ale_entry(cpsw); 532 533 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); 534 dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n"); 535 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); 536 } 537 538 static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw) 539 { 540 int vlan = cpsw->data.default_vlan; 541 542 writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl); 543 544 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); 545 dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n"); 546 547 writel(vlan, &cpsw->host_port_regs->port_vlan); 548 549 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); 550 /* learning make no sense in dual_mac mode */ 551 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); 552 } 553 554 static void cpsw_init_host_port(struct cpsw_priv *priv) 555 { 556 struct cpsw_common *cpsw = priv->cpsw; 557 u32 control_reg; 558 559 /* soft reset the controller and initialize ale */ 560 soft_reset("cpsw", &cpsw->regs->soft_reset); 561 cpsw_ale_start(cpsw->ale); 562 563 /* switch to vlan unaware mode */ 564 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 565 CPSW_ALE_VLAN_AWARE); 566 control_reg = readl(&cpsw->regs->control); 567 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; 568 writel(control_reg, &cpsw->regs->control); 569 570 /* setup host port priority mapping */ 571 writel_relaxed(CPDMA_TX_PRIORITY_MAP, 572 &cpsw->host_port_regs->cpdma_tx_pri_map); 573 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); 574 575 /* disable priority elevation */ 576 writel_relaxed(0, &cpsw->regs->ptype); 577 578 /* enable statistics collection only on all ports */ 579 writel_relaxed(0x7, &cpsw->regs->stat_port_en); 580 581 /* Enable internal fifo flow control */ 582 writel(0x7, &cpsw->regs->flow_control); 583 584 if (cpsw_is_switch_en(cpsw)) 585 cpsw_init_host_port_switch(cpsw); 586 else 587 cpsw_init_host_port_dual_mac(cpsw); 588 589 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 590 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 591 } 592 593 static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv, 594 struct cpsw_slave *slave) 595 { 596 u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; 597 struct cpsw_common *cpsw = priv->cpsw; 598 u32 reg; 599 600 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 601 CPSW2_PORT_VLAN; 602 slave_write(slave, slave->port_vlan, reg); 603 604 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, 605 port_mask, port_mask, 0); 606 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 607 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 608 ALE_MCAST_FWD); 609 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 610 HOST_PORT_NUM, ALE_VLAN | 611 ALE_SECURE, slave->port_vlan); 612 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 613 ALE_PORT_DROP_UNKNOWN_VLAN, 1); 614 /* learning make no sense in dual_mac mode */ 615 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 616 ALE_PORT_NOLEARN, 1); 617 } 618 619 static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv, 620 struct cpsw_slave *slave) 621 { 622 u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; 623 struct cpsw_common *cpsw = priv->cpsw; 624 u32 reg; 625 626 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 627 ALE_PORT_DROP_UNKNOWN_VLAN, 0); 628 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 629 ALE_PORT_NOLEARN, 0); 630 /* disabling SA_UPDATE required to make stp work, without this setting 631 * Host MAC addresses will jump between ports. 632 * As per TRM MAC address can be defined as unicast supervisory (super) 633 * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent 634 * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE 635 * causes STP packets to be dropped due to ingress filter 636 * if (source address found) and (secure) and 637 * (receive port number != port_number)) 638 * then discard the packet 639 */ 640 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 641 ALE_PORT_NO_SA_UPDATE, 1); 642 643 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 644 port_mask, ALE_VLAN, slave->port_vlan, 645 ALE_MCAST_FWD_2); 646 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 647 HOST_PORT_NUM, ALE_VLAN, slave->port_vlan); 648 649 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 650 CPSW2_PORT_VLAN; 651 slave_write(slave, slave->port_vlan, reg); 652 } 653 654 static void cpsw_adjust_link(struct net_device *ndev) 655 { 656 struct cpsw_priv *priv = netdev_priv(ndev); 657 struct cpsw_common *cpsw = priv->cpsw; 658 struct cpsw_slave *slave; 659 struct phy_device *phy; 660 u32 mac_control = 0; 661 662 slave = &cpsw->slaves[priv->emac_port - 1]; 663 phy = slave->phy; 664 665 if (!phy) 666 return; 667 668 if (phy->link) { 669 mac_control = CPSW_SL_CTL_GMII_EN; 670 671 if (phy->speed == 1000) 672 mac_control |= CPSW_SL_CTL_GIG; 673 if (phy->duplex) 674 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 675 676 /* set speed_in input in case RMII mode is used in 100Mbps */ 677 if (phy->speed == 100) 678 mac_control |= CPSW_SL_CTL_IFCTL_A; 679 /* in band mode only works in 10Mbps RGMII mode */ 680 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) 681 mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ 682 683 if (priv->rx_pause) 684 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 685 686 if (priv->tx_pause) 687 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 688 689 if (mac_control != slave->mac_control) 690 cpsw_sl_ctl_set(slave->mac_sl, mac_control); 691 692 /* enable forwarding */ 693 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 694 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 695 696 netif_tx_wake_all_queues(ndev); 697 698 if (priv->shp_cfg_speed && 699 priv->shp_cfg_speed != slave->phy->speed && 700 !cpsw_shp_is_off(priv)) 701 dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!"); 702 } else { 703 netif_tx_stop_all_queues(ndev); 704 705 mac_control = 0; 706 /* disable forwarding */ 707 cpsw_ale_control_set(cpsw->ale, priv->emac_port, 708 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 709 710 cpsw_sl_wait_for_idle(slave->mac_sl, 100); 711 712 cpsw_sl_ctl_reset(slave->mac_sl); 713 } 714 715 if (mac_control != slave->mac_control) 716 phy_print_status(phy); 717 718 slave->mac_control = mac_control; 719 720 if (phy->link && cpsw_need_resplit(cpsw)) 721 cpsw_split_res(cpsw); 722 } 723 724 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 725 { 726 struct cpsw_common *cpsw = priv->cpsw; 727 struct phy_device *phy; 728 729 cpsw_sl_reset(slave->mac_sl, 100); 730 cpsw_sl_ctl_reset(slave->mac_sl); 731 732 /* setup priority mapping */ 733 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, 734 RX_PRIORITY_MAPPING); 735 736 switch (cpsw->version) { 737 case CPSW_VERSION_1: 738 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 739 /* Increase RX FIFO size to 5 for supporting fullduplex 740 * flow control mode 741 */ 742 slave_write(slave, 743 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 744 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); 745 break; 746 case CPSW_VERSION_2: 747 case CPSW_VERSION_3: 748 case CPSW_VERSION_4: 749 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 750 /* Increase RX FIFO size to 5 for supporting fullduplex 751 * flow control mode 752 */ 753 slave_write(slave, 754 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 755 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); 756 break; 757 } 758 759 /* setup max packet size, and mac address */ 760 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, 761 cpsw->rx_packet_max); 762 cpsw_set_slave_mac(slave, priv); 763 764 slave->mac_control = 0; /* no link yet */ 765 766 if (cpsw_is_switch_en(cpsw)) 767 cpsw_port_add_switch_def_ale_entries(priv, slave); 768 else 769 cpsw_port_add_dual_emac_def_ale_entries(priv, slave); 770 771 if (!slave->data->phy_node) 772 dev_err(priv->dev, "no phy found on slave %d\n", 773 slave->slave_num); 774 phy = of_phy_connect(priv->ndev, slave->data->phy_node, 775 &cpsw_adjust_link, 0, slave->data->phy_if); 776 if (!phy) { 777 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", 778 slave->data->phy_node, 779 slave->slave_num); 780 return; 781 } 782 slave->phy = phy; 783 784 phy_attached_info(slave->phy); 785 786 phy_start(slave->phy); 787 788 /* Configure GMII_SEL register */ 789 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, 790 slave->data->phy_if); 791 } 792 793 static int cpsw_ndo_stop(struct net_device *ndev) 794 { 795 struct cpsw_priv *priv = netdev_priv(ndev); 796 struct cpsw_common *cpsw = priv->cpsw; 797 struct cpsw_slave *slave; 798 799 cpsw_info(priv, ifdown, "shutting down ndev\n"); 800 slave = &cpsw->slaves[priv->emac_port - 1]; 801 if (slave->phy) 802 phy_stop(slave->phy); 803 804 netif_tx_stop_all_queues(priv->ndev); 805 806 if (slave->phy) { 807 phy_disconnect(slave->phy); 808 slave->phy = NULL; 809 } 810 811 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); 812 813 if (cpsw->usage_count <= 1) { 814 napi_disable(&cpsw->napi_rx); 815 napi_disable(&cpsw->napi_tx); 816 cpts_unregister(cpsw->cpts); 817 cpsw_intr_disable(cpsw); 818 cpdma_ctlr_stop(cpsw->dma); 819 cpsw_ale_stop(cpsw->ale); 820 cpsw_destroy_xdp_rxqs(cpsw); 821 } 822 823 if (cpsw_need_resplit(cpsw)) 824 cpsw_split_res(cpsw); 825 826 cpsw->usage_count--; 827 pm_runtime_put_sync(cpsw->dev); 828 return 0; 829 } 830 831 static int cpsw_ndo_open(struct net_device *ndev) 832 { 833 struct cpsw_priv *priv = netdev_priv(ndev); 834 struct cpsw_common *cpsw = priv->cpsw; 835 int ret; 836 837 dev_info(priv->dev, "starting ndev. mode: %s\n", 838 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac"); 839 ret = pm_runtime_get_sync(cpsw->dev); 840 if (ret < 0) { 841 pm_runtime_put_noidle(cpsw->dev); 842 return ret; 843 } 844 845 /* Notify the stack of the actual queue counts. */ 846 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); 847 if (ret) { 848 dev_err(priv->dev, "cannot set real number of tx queues\n"); 849 goto pm_cleanup; 850 } 851 852 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); 853 if (ret) { 854 dev_err(priv->dev, "cannot set real number of rx queues\n"); 855 goto pm_cleanup; 856 } 857 858 /* Initialize host and slave ports */ 859 if (!cpsw->usage_count) 860 cpsw_init_host_port(priv); 861 cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv); 862 863 /* initialize shared resources for every ndev */ 864 if (!cpsw->usage_count) { 865 /* create rxqs for both infs in dual mac as they use same pool 866 * and must be destroyed together when no users. 867 */ 868 ret = cpsw_create_xdp_rxqs(cpsw); 869 if (ret < 0) 870 goto err_cleanup; 871 872 ret = cpsw_fill_rx_channels(priv); 873 if (ret < 0) 874 goto err_cleanup; 875 876 if (cpsw->cpts) { 877 if (cpts_register(cpsw->cpts)) 878 dev_err(priv->dev, "error registering cpts device\n"); 879 else 880 writel(0x10, &cpsw->wr_regs->misc_en); 881 } 882 883 napi_enable(&cpsw->napi_rx); 884 napi_enable(&cpsw->napi_tx); 885 886 if (cpsw->tx_irq_disabled) { 887 cpsw->tx_irq_disabled = false; 888 enable_irq(cpsw->irqs_table[1]); 889 } 890 891 if (cpsw->rx_irq_disabled) { 892 cpsw->rx_irq_disabled = false; 893 enable_irq(cpsw->irqs_table[0]); 894 } 895 } 896 897 cpsw_restore(priv); 898 899 /* Enable Interrupt pacing if configured */ 900 if (cpsw->coal_intvl != 0) { 901 struct ethtool_coalesce coal; 902 903 coal.rx_coalesce_usecs = cpsw->coal_intvl; 904 cpsw_set_coalesce(ndev, &coal); 905 } 906 907 cpdma_ctlr_start(cpsw->dma); 908 cpsw_intr_enable(cpsw); 909 cpsw->usage_count++; 910 911 return 0; 912 913 err_cleanup: 914 cpsw_ndo_stop(ndev); 915 916 pm_cleanup: 917 pm_runtime_put_sync(cpsw->dev); 918 return ret; 919 } 920 921 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 922 struct net_device *ndev) 923 { 924 struct cpsw_priv *priv = netdev_priv(ndev); 925 struct cpsw_common *cpsw = priv->cpsw; 926 struct cpts *cpts = cpsw->cpts; 927 struct netdev_queue *txq; 928 struct cpdma_chan *txch; 929 int ret, q_idx; 930 931 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 932 cpsw_err(priv, tx_err, "packet pad failed\n"); 933 ndev->stats.tx_dropped++; 934 return NET_XMIT_DROP; 935 } 936 937 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 938 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) 939 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 940 941 q_idx = skb_get_queue_mapping(skb); 942 if (q_idx >= cpsw->tx_ch_num) 943 q_idx = q_idx % cpsw->tx_ch_num; 944 945 txch = cpsw->txv[q_idx].ch; 946 txq = netdev_get_tx_queue(ndev, q_idx); 947 skb_tx_timestamp(skb); 948 ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, 949 priv->emac_port); 950 if (unlikely(ret != 0)) { 951 cpsw_err(priv, tx_err, "desc submit failed\n"); 952 goto fail; 953 } 954 955 /* If there is no more tx desc left free then we need to 956 * tell the kernel to stop sending us tx frames. 957 */ 958 if (unlikely(!cpdma_check_free_tx_desc(txch))) { 959 netif_tx_stop_queue(txq); 960 961 /* Barrier, so that stop_queue visible to other cpus */ 962 smp_mb__after_atomic(); 963 964 if (cpdma_check_free_tx_desc(txch)) 965 netif_tx_wake_queue(txq); 966 } 967 968 return NETDEV_TX_OK; 969 fail: 970 ndev->stats.tx_dropped++; 971 netif_tx_stop_queue(txq); 972 973 /* Barrier, so that stop_queue visible to other cpus */ 974 smp_mb__after_atomic(); 975 976 if (cpdma_check_free_tx_desc(txch)) 977 netif_tx_wake_queue(txq); 978 979 return NETDEV_TX_BUSY; 980 } 981 982 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 983 { 984 struct sockaddr *addr = (struct sockaddr *)p; 985 struct cpsw_priv *priv = netdev_priv(ndev); 986 struct cpsw_common *cpsw = priv->cpsw; 987 int ret, slave_no; 988 int flags = 0; 989 u16 vid = 0; 990 991 slave_no = cpsw_slave_index(cpsw, priv); 992 if (!is_valid_ether_addr(addr->sa_data)) 993 return -EADDRNOTAVAIL; 994 995 ret = pm_runtime_get_sync(cpsw->dev); 996 if (ret < 0) { 997 pm_runtime_put_noidle(cpsw->dev); 998 return ret; 999 } 1000 1001 vid = cpsw->slaves[slave_no].port_vlan; 1002 flags = ALE_VLAN | ALE_SECURE; 1003 1004 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 1005 flags, vid); 1006 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, 1007 flags, vid); 1008 1009 ether_addr_copy(priv->mac_addr, addr->sa_data); 1010 ether_addr_copy(ndev->dev_addr, priv->mac_addr); 1011 cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); 1012 1013 pm_runtime_put(cpsw->dev); 1014 1015 return 0; 1016 } 1017 1018 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1019 __be16 proto, u16 vid) 1020 { 1021 struct cpsw_priv *priv = netdev_priv(ndev); 1022 struct cpsw_common *cpsw = priv->cpsw; 1023 int ret; 1024 int i; 1025 1026 if (cpsw_is_switch_en(cpsw)) { 1027 dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n"); 1028 return 0; 1029 } 1030 1031 if (vid == cpsw->data.default_vlan) 1032 return 0; 1033 1034 ret = pm_runtime_get_sync(cpsw->dev); 1035 if (ret < 0) { 1036 pm_runtime_put_noidle(cpsw->dev); 1037 return ret; 1038 } 1039 1040 /* reset the return code as pm_runtime_get_sync() can return 1041 * non zero values as well. 1042 */ 1043 ret = 0; 1044 for (i = 0; i < cpsw->data.slaves; i++) { 1045 if (cpsw->slaves[i].ndev && 1046 vid == cpsw->slaves[i].port_vlan) { 1047 ret = -EINVAL; 1048 goto err; 1049 } 1050 } 1051 1052 dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1053 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 1054 if (ret) 1055 dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret); 1056 ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 1057 HOST_PORT_NUM, ALE_VLAN, vid); 1058 if (ret) 1059 dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n", 1060 ret); 1061 ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 1062 0, ALE_VLAN, vid); 1063 if (ret) 1064 dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n", 1065 ret); 1066 cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid); 1067 ret = 0; 1068 err: 1069 pm_runtime_put(cpsw->dev); 1070 return ret; 1071 } 1072 1073 static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name, 1074 size_t len) 1075 { 1076 struct cpsw_priv *priv = netdev_priv(ndev); 1077 int err; 1078 1079 err = snprintf(name, len, "p%d", priv->emac_port); 1080 1081 if (err >= len) 1082 return -EINVAL; 1083 1084 return 0; 1085 } 1086 1087 #ifdef CONFIG_NET_POLL_CONTROLLER 1088 static void cpsw_ndo_poll_controller(struct net_device *ndev) 1089 { 1090 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1091 1092 cpsw_intr_disable(cpsw); 1093 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); 1094 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); 1095 cpsw_intr_enable(cpsw); 1096 } 1097 #endif 1098 1099 static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 1100 struct xdp_frame **frames, u32 flags) 1101 { 1102 struct cpsw_priv *priv = netdev_priv(ndev); 1103 struct xdp_frame *xdpf; 1104 int i, drops = 0; 1105 1106 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1107 return -EINVAL; 1108 1109 for (i = 0; i < n; i++) { 1110 xdpf = frames[i]; 1111 if (xdpf->len < CPSW_MIN_PACKET_SIZE) { 1112 xdp_return_frame_rx_napi(xdpf); 1113 drops++; 1114 continue; 1115 } 1116 1117 if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) 1118 drops++; 1119 } 1120 1121 return n - drops; 1122 } 1123 1124 static int cpsw_get_port_parent_id(struct net_device *ndev, 1125 struct netdev_phys_item_id *ppid) 1126 { 1127 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1128 1129 ppid->id_len = sizeof(cpsw->base_mac); 1130 memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len); 1131 1132 return 0; 1133 } 1134 1135 static const struct net_device_ops cpsw_netdev_ops = { 1136 .ndo_open = cpsw_ndo_open, 1137 .ndo_stop = cpsw_ndo_stop, 1138 .ndo_start_xmit = cpsw_ndo_start_xmit, 1139 .ndo_set_mac_address = cpsw_ndo_set_mac_address, 1140 .ndo_do_ioctl = cpsw_ndo_ioctl, 1141 .ndo_validate_addr = eth_validate_addr, 1142 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1143 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1144 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, 1145 #ifdef CONFIG_NET_POLL_CONTROLLER 1146 .ndo_poll_controller = cpsw_ndo_poll_controller, 1147 #endif 1148 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 1149 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 1150 .ndo_setup_tc = cpsw_ndo_setup_tc, 1151 .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name, 1152 .ndo_bpf = cpsw_ndo_bpf, 1153 .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, 1154 .ndo_get_port_parent_id = cpsw_get_port_parent_id, 1155 }; 1156 1157 static void cpsw_get_drvinfo(struct net_device *ndev, 1158 struct ethtool_drvinfo *info) 1159 { 1160 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1161 struct platform_device *pdev; 1162 1163 pdev = to_platform_device(cpsw->dev); 1164 strlcpy(info->driver, "cpsw-switch", sizeof(info->driver)); 1165 strlcpy(info->version, "2.0", sizeof(info->version)); 1166 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); 1167 } 1168 1169 static int cpsw_set_pauseparam(struct net_device *ndev, 1170 struct ethtool_pauseparam *pause) 1171 { 1172 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1173 struct cpsw_priv *priv = netdev_priv(ndev); 1174 int slave_no; 1175 1176 slave_no = cpsw_slave_index(cpsw, priv); 1177 if (!cpsw->slaves[slave_no].phy) 1178 return -EINVAL; 1179 1180 if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause)) 1181 return -EINVAL; 1182 1183 priv->rx_pause = pause->rx_pause ? true : false; 1184 priv->tx_pause = pause->tx_pause ? true : false; 1185 1186 phy_set_asym_pause(cpsw->slaves[slave_no].phy, 1187 priv->rx_pause, priv->tx_pause); 1188 1189 return 0; 1190 } 1191 1192 static int cpsw_set_channels(struct net_device *ndev, 1193 struct ethtool_channels *chs) 1194 { 1195 return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); 1196 } 1197 1198 static const struct ethtool_ops cpsw_ethtool_ops = { 1199 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS, 1200 .get_drvinfo = cpsw_get_drvinfo, 1201 .get_msglevel = cpsw_get_msglevel, 1202 .set_msglevel = cpsw_set_msglevel, 1203 .get_link = ethtool_op_get_link, 1204 .get_ts_info = cpsw_get_ts_info, 1205 .get_coalesce = cpsw_get_coalesce, 1206 .set_coalesce = cpsw_set_coalesce, 1207 .get_sset_count = cpsw_get_sset_count, 1208 .get_strings = cpsw_get_strings, 1209 .get_ethtool_stats = cpsw_get_ethtool_stats, 1210 .get_pauseparam = cpsw_get_pauseparam, 1211 .set_pauseparam = cpsw_set_pauseparam, 1212 .get_wol = cpsw_get_wol, 1213 .set_wol = cpsw_set_wol, 1214 .get_regs_len = cpsw_get_regs_len, 1215 .get_regs = cpsw_get_regs, 1216 .begin = cpsw_ethtool_op_begin, 1217 .complete = cpsw_ethtool_op_complete, 1218 .get_channels = cpsw_get_channels, 1219 .set_channels = cpsw_set_channels, 1220 .get_link_ksettings = cpsw_get_link_ksettings, 1221 .set_link_ksettings = cpsw_set_link_ksettings, 1222 .get_eee = cpsw_get_eee, 1223 .set_eee = cpsw_set_eee, 1224 .nway_reset = cpsw_nway_reset, 1225 .get_ringparam = cpsw_get_ringparam, 1226 .set_ringparam = cpsw_set_ringparam, 1227 }; 1228 1229 static int cpsw_probe_dt(struct cpsw_common *cpsw) 1230 { 1231 struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np; 1232 struct cpsw_platform_data *data = &cpsw->data; 1233 struct device *dev = cpsw->dev; 1234 int ret; 1235 u32 prop; 1236 1237 if (!node) 1238 return -EINVAL; 1239 1240 tmp_node = of_get_child_by_name(node, "ethernet-ports"); 1241 if (!tmp_node) 1242 return -ENOENT; 1243 data->slaves = of_get_child_count(tmp_node); 1244 if (data->slaves != CPSW_SLAVE_PORTS_NUM) { 1245 of_node_put(tmp_node); 1246 return -ENOENT; 1247 } 1248 1249 data->active_slave = 0; 1250 data->channels = CPSW_MAX_QUEUES; 1251 data->dual_emac = true; 1252 data->bd_ram_size = CPSW_BD_RAM_SIZE; 1253 data->mac_control = 0; 1254 1255 data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, 1256 sizeof(struct cpsw_slave_data), 1257 GFP_KERNEL); 1258 if (!data->slave_data) 1259 return -ENOMEM; 1260 1261 /* Populate all the child nodes here... 1262 */ 1263 ret = devm_of_platform_populate(dev); 1264 /* We do not want to force this, as in some cases may not have child */ 1265 if (ret) 1266 dev_warn(dev, "Doesn't have any child node\n"); 1267 1268 for_each_child_of_node(tmp_node, port_np) { 1269 struct cpsw_slave_data *slave_data; 1270 const void *mac_addr; 1271 u32 port_id; 1272 1273 ret = of_property_read_u32(port_np, "reg", &port_id); 1274 if (ret < 0) { 1275 dev_err(dev, "%pOF error reading port_id %d\n", 1276 port_np, ret); 1277 goto err_node_put; 1278 } 1279 1280 if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) { 1281 dev_err(dev, "%pOF has invalid port_id %u\n", 1282 port_np, port_id); 1283 ret = -EINVAL; 1284 goto err_node_put; 1285 } 1286 1287 slave_data = &data->slave_data[port_id - 1]; 1288 1289 slave_data->disabled = !of_device_is_available(port_np); 1290 if (slave_data->disabled) 1291 continue; 1292 1293 slave_data->slave_node = port_np; 1294 slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL); 1295 if (IS_ERR(slave_data->ifphy)) { 1296 ret = PTR_ERR(slave_data->ifphy); 1297 dev_err(dev, "%pOF: Error retrieving port phy: %d\n", 1298 port_np, ret); 1299 goto err_node_put; 1300 } 1301 1302 if (of_phy_is_fixed_link(port_np)) { 1303 ret = of_phy_register_fixed_link(port_np); 1304 if (ret) { 1305 if (ret != -EPROBE_DEFER) 1306 dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", 1307 port_np, ret); 1308 goto err_node_put; 1309 } 1310 slave_data->phy_node = of_node_get(port_np); 1311 } else { 1312 slave_data->phy_node = 1313 of_parse_phandle(port_np, "phy-handle", 0); 1314 } 1315 1316 if (!slave_data->phy_node) { 1317 dev_err(dev, "%pOF no phy found\n", port_np); 1318 ret = -ENODEV; 1319 goto err_node_put; 1320 } 1321 1322 ret = of_get_phy_mode(port_np, &slave_data->phy_if); 1323 if (ret) { 1324 dev_err(dev, "%pOF read phy-mode err %d\n", 1325 port_np, ret); 1326 goto err_node_put; 1327 } 1328 1329 mac_addr = of_get_mac_address(port_np); 1330 if (!IS_ERR(mac_addr)) { 1331 ether_addr_copy(slave_data->mac_addr, mac_addr); 1332 } else { 1333 ret = ti_cm_get_macid(dev, port_id - 1, 1334 slave_data->mac_addr); 1335 if (ret) 1336 goto err_node_put; 1337 } 1338 1339 if (of_property_read_u32(port_np, "ti,dual-emac-pvid", 1340 &prop)) { 1341 dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n", 1342 port_np); 1343 slave_data->dual_emac_res_vlan = port_id; 1344 dev_err(dev, "%pOF Using %d as Reserved VLAN\n", 1345 port_np, slave_data->dual_emac_res_vlan); 1346 } else { 1347 slave_data->dual_emac_res_vlan = prop; 1348 } 1349 } 1350 1351 of_node_put(tmp_node); 1352 return 0; 1353 1354 err_node_put: 1355 of_node_put(port_np); 1356 return ret; 1357 } 1358 1359 static void cpsw_remove_dt(struct cpsw_common *cpsw) 1360 { 1361 struct cpsw_platform_data *data = &cpsw->data; 1362 int i = 0; 1363 1364 for (i = 0; i < cpsw->data.slaves; i++) { 1365 struct cpsw_slave_data *slave_data = &data->slave_data[i]; 1366 struct device_node *port_np = slave_data->phy_node; 1367 1368 if (port_np) { 1369 if (of_phy_is_fixed_link(port_np)) 1370 of_phy_deregister_fixed_link(port_np); 1371 1372 of_node_put(port_np); 1373 } 1374 } 1375 } 1376 1377 static int cpsw_create_ports(struct cpsw_common *cpsw) 1378 { 1379 struct cpsw_platform_data *data = &cpsw->data; 1380 struct net_device *ndev, *napi_ndev = NULL; 1381 struct device *dev = cpsw->dev; 1382 struct cpsw_priv *priv; 1383 int ret = 0, i = 0; 1384 1385 for (i = 0; i < cpsw->data.slaves; i++) { 1386 struct cpsw_slave_data *slave_data = &data->slave_data[i]; 1387 1388 if (slave_data->disabled) 1389 continue; 1390 1391 ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), 1392 CPSW_MAX_QUEUES, 1393 CPSW_MAX_QUEUES); 1394 if (!ndev) { 1395 dev_err(dev, "error allocating net_device\n"); 1396 return -ENOMEM; 1397 } 1398 1399 priv = netdev_priv(ndev); 1400 priv->cpsw = cpsw; 1401 priv->ndev = ndev; 1402 priv->dev = dev; 1403 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1404 priv->emac_port = i + 1; 1405 1406 if (is_valid_ether_addr(slave_data->mac_addr)) { 1407 ether_addr_copy(priv->mac_addr, slave_data->mac_addr); 1408 dev_info(cpsw->dev, "Detected MACID = %pM\n", 1409 priv->mac_addr); 1410 } else { 1411 eth_random_addr(slave_data->mac_addr); 1412 dev_info(cpsw->dev, "Random MACID = %pM\n", 1413 priv->mac_addr); 1414 } 1415 ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); 1416 ether_addr_copy(priv->mac_addr, slave_data->mac_addr); 1417 1418 cpsw->slaves[i].ndev = ndev; 1419 1420 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | 1421 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; 1422 1423 ndev->netdev_ops = &cpsw_netdev_ops; 1424 ndev->ethtool_ops = &cpsw_ethtool_ops; 1425 SET_NETDEV_DEV(ndev, dev); 1426 1427 if (!napi_ndev) { 1428 /* CPSW Host port CPDMA interface is shared between 1429 * ports and there is only one TX and one RX IRQs 1430 * available for all possible TX and RX channels 1431 * accordingly. 1432 */ 1433 netif_napi_add(ndev, &cpsw->napi_rx, 1434 cpsw->quirk_irq ? 1435 cpsw_rx_poll : cpsw_rx_mq_poll, 1436 CPSW_POLL_WEIGHT); 1437 netif_tx_napi_add(ndev, &cpsw->napi_tx, 1438 cpsw->quirk_irq ? 1439 cpsw_tx_poll : cpsw_tx_mq_poll, 1440 CPSW_POLL_WEIGHT); 1441 } 1442 1443 napi_ndev = ndev; 1444 } 1445 1446 return ret; 1447 } 1448 1449 static void cpsw_unregister_ports(struct cpsw_common *cpsw) 1450 { 1451 int i = 0; 1452 1453 for (i = 0; i < cpsw->data.slaves; i++) { 1454 if (!cpsw->slaves[i].ndev) 1455 continue; 1456 1457 unregister_netdev(cpsw->slaves[i].ndev); 1458 } 1459 } 1460 1461 static int cpsw_register_ports(struct cpsw_common *cpsw) 1462 { 1463 int ret = 0, i = 0; 1464 1465 for (i = 0; i < cpsw->data.slaves; i++) { 1466 if (!cpsw->slaves[i].ndev) 1467 continue; 1468 1469 /* register the network device */ 1470 ret = register_netdev(cpsw->slaves[i].ndev); 1471 if (ret) { 1472 dev_err(cpsw->dev, 1473 "cpsw: err registering net device%d\n", i); 1474 cpsw->slaves[i].ndev = NULL; 1475 break; 1476 } 1477 } 1478 1479 if (ret) 1480 cpsw_unregister_ports(cpsw); 1481 return ret; 1482 } 1483 1484 bool cpsw_port_dev_check(const struct net_device *ndev) 1485 { 1486 if (ndev->netdev_ops == &cpsw_netdev_ops) { 1487 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 1488 1489 return !cpsw->data.dual_emac; 1490 } 1491 1492 return false; 1493 } 1494 1495 static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw) 1496 { 1497 int set_val = 0; 1498 int i; 1499 1500 if (!cpsw->ale_bypass && 1501 (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2))) 1502 set_val = 1; 1503 1504 dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val); 1505 1506 for (i = 0; i < cpsw->data.slaves; i++) { 1507 struct net_device *sl_ndev = cpsw->slaves[i].ndev; 1508 struct cpsw_priv *priv = netdev_priv(sl_ndev); 1509 1510 priv->offload_fwd_mark = set_val; 1511 } 1512 } 1513 1514 static int cpsw_netdevice_port_link(struct net_device *ndev, 1515 struct net_device *br_ndev) 1516 { 1517 struct cpsw_priv *priv = netdev_priv(ndev); 1518 struct cpsw_common *cpsw = priv->cpsw; 1519 1520 if (!cpsw->br_members) { 1521 cpsw->hw_bridge_dev = br_ndev; 1522 } else { 1523 /* This is adding the port to a second bridge, this is 1524 * unsupported 1525 */ 1526 if (cpsw->hw_bridge_dev != br_ndev) 1527 return -EOPNOTSUPP; 1528 } 1529 1530 cpsw->br_members |= BIT(priv->emac_port); 1531 1532 cpsw_port_offload_fwd_mark_update(cpsw); 1533 1534 return NOTIFY_DONE; 1535 } 1536 1537 static void cpsw_netdevice_port_unlink(struct net_device *ndev) 1538 { 1539 struct cpsw_priv *priv = netdev_priv(ndev); 1540 struct cpsw_common *cpsw = priv->cpsw; 1541 1542 cpsw->br_members &= ~BIT(priv->emac_port); 1543 1544 cpsw_port_offload_fwd_mark_update(cpsw); 1545 1546 if (!cpsw->br_members) 1547 cpsw->hw_bridge_dev = NULL; 1548 } 1549 1550 /* netdev notifier */ 1551 static int cpsw_netdevice_event(struct notifier_block *unused, 1552 unsigned long event, void *ptr) 1553 { 1554 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 1555 struct netdev_notifier_changeupper_info *info; 1556 int ret = NOTIFY_DONE; 1557 1558 if (!cpsw_port_dev_check(ndev)) 1559 return NOTIFY_DONE; 1560 1561 switch (event) { 1562 case NETDEV_CHANGEUPPER: 1563 info = ptr; 1564 1565 if (netif_is_bridge_master(info->upper_dev)) { 1566 if (info->linking) 1567 ret = cpsw_netdevice_port_link(ndev, 1568 info->upper_dev); 1569 else 1570 cpsw_netdevice_port_unlink(ndev); 1571 } 1572 break; 1573 default: 1574 return NOTIFY_DONE; 1575 } 1576 1577 return notifier_from_errno(ret); 1578 } 1579 1580 static struct notifier_block cpsw_netdevice_nb __read_mostly = { 1581 .notifier_call = cpsw_netdevice_event, 1582 }; 1583 1584 static int cpsw_register_notifiers(struct cpsw_common *cpsw) 1585 { 1586 int ret = 0; 1587 1588 ret = register_netdevice_notifier(&cpsw_netdevice_nb); 1589 if (ret) { 1590 dev_err(cpsw->dev, "can't register netdevice notifier\n"); 1591 return ret; 1592 } 1593 1594 ret = cpsw_switchdev_register_notifiers(cpsw); 1595 if (ret) 1596 unregister_netdevice_notifier(&cpsw_netdevice_nb); 1597 1598 return ret; 1599 } 1600 1601 static void cpsw_unregister_notifiers(struct cpsw_common *cpsw) 1602 { 1603 cpsw_switchdev_unregister_notifiers(cpsw); 1604 unregister_netdevice_notifier(&cpsw_netdevice_nb); 1605 } 1606 1607 static const struct devlink_ops cpsw_devlink_ops = { 1608 }; 1609 1610 static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, 1611 struct devlink_param_gset_ctx *ctx) 1612 { 1613 struct cpsw_devlink *dl_priv = devlink_priv(dl); 1614 struct cpsw_common *cpsw = dl_priv->cpsw; 1615 1616 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1617 1618 if (id != CPSW_DL_PARAM_SWITCH_MODE) 1619 return -EOPNOTSUPP; 1620 1621 ctx->val.vbool = !cpsw->data.dual_emac; 1622 1623 return 0; 1624 } 1625 1626 static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, 1627 struct devlink_param_gset_ctx *ctx) 1628 { 1629 struct cpsw_devlink *dl_priv = devlink_priv(dl); 1630 struct cpsw_common *cpsw = dl_priv->cpsw; 1631 int vlan = cpsw->data.default_vlan; 1632 bool switch_en = ctx->val.vbool; 1633 bool if_running = false; 1634 int i; 1635 1636 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1637 1638 if (id != CPSW_DL_PARAM_SWITCH_MODE) 1639 return -EOPNOTSUPP; 1640 1641 if (switch_en == !cpsw->data.dual_emac) 1642 return 0; 1643 1644 if (!switch_en && cpsw->br_members) { 1645 dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n"); 1646 return -EINVAL; 1647 } 1648 1649 rtnl_lock(); 1650 1651 for (i = 0; i < cpsw->data.slaves; i++) { 1652 struct cpsw_slave *slave = &cpsw->slaves[i]; 1653 struct net_device *sl_ndev = slave->ndev; 1654 1655 if (!sl_ndev || !netif_running(sl_ndev)) 1656 continue; 1657 1658 if_running = true; 1659 } 1660 1661 if (!if_running) { 1662 /* all ndevs are down */ 1663 cpsw->data.dual_emac = !switch_en; 1664 for (i = 0; i < cpsw->data.slaves; i++) { 1665 struct cpsw_slave *slave = &cpsw->slaves[i]; 1666 struct net_device *sl_ndev = slave->ndev; 1667 1668 if (!sl_ndev) 1669 continue; 1670 1671 if (switch_en) 1672 vlan = cpsw->data.default_vlan; 1673 else 1674 vlan = slave->data->dual_emac_res_vlan; 1675 slave->port_vlan = vlan; 1676 } 1677 goto exit; 1678 } 1679 1680 if (switch_en) { 1681 dev_info(cpsw->dev, "Enable switch mode\n"); 1682 1683 /* enable bypass - no forwarding; all traffic goes to Host */ 1684 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 1685 1686 /* clean up ALE table */ 1687 cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); 1688 cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); 1689 1690 cpsw_init_host_port_switch(cpsw); 1691 1692 for (i = 0; i < cpsw->data.slaves; i++) { 1693 struct cpsw_slave *slave = &cpsw->slaves[i]; 1694 struct net_device *sl_ndev = slave->ndev; 1695 struct cpsw_priv *priv; 1696 1697 if (!sl_ndev) 1698 continue; 1699 1700 priv = netdev_priv(sl_ndev); 1701 slave->port_vlan = vlan; 1702 if (netif_running(sl_ndev)) 1703 cpsw_port_add_switch_def_ale_entries(priv, 1704 slave); 1705 } 1706 1707 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); 1708 cpsw->data.dual_emac = false; 1709 } else { 1710 dev_info(cpsw->dev, "Disable switch mode\n"); 1711 1712 /* enable bypass - no forwarding; all traffic goes to Host */ 1713 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); 1714 1715 cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); 1716 cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); 1717 1718 cpsw_init_host_port_dual_mac(cpsw); 1719 1720 for (i = 0; i < cpsw->data.slaves; i++) { 1721 struct cpsw_slave *slave = &cpsw->slaves[i]; 1722 struct net_device *sl_ndev = slave->ndev; 1723 struct cpsw_priv *priv; 1724 1725 if (!sl_ndev) 1726 continue; 1727 1728 priv = netdev_priv(slave->ndev); 1729 slave->port_vlan = slave->data->dual_emac_res_vlan; 1730 cpsw_port_add_dual_emac_def_ale_entries(priv, slave); 1731 } 1732 1733 cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); 1734 cpsw->data.dual_emac = true; 1735 } 1736 exit: 1737 rtnl_unlock(); 1738 1739 return 0; 1740 } 1741 1742 static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, 1743 struct devlink_param_gset_ctx *ctx) 1744 { 1745 struct cpsw_devlink *dl_priv = devlink_priv(dl); 1746 struct cpsw_common *cpsw = dl_priv->cpsw; 1747 1748 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1749 1750 switch (id) { 1751 case CPSW_DL_PARAM_ALE_BYPASS: 1752 ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS); 1753 break; 1754 default: 1755 return -EOPNOTSUPP; 1756 } 1757 1758 return 0; 1759 } 1760 1761 static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id, 1762 struct devlink_param_gset_ctx *ctx) 1763 { 1764 struct cpsw_devlink *dl_priv = devlink_priv(dl); 1765 struct cpsw_common *cpsw = dl_priv->cpsw; 1766 int ret = -EOPNOTSUPP; 1767 1768 dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); 1769 1770 switch (id) { 1771 case CPSW_DL_PARAM_ALE_BYPASS: 1772 ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1773 ctx->val.vbool); 1774 if (!ret) { 1775 cpsw->ale_bypass = ctx->val.vbool; 1776 cpsw_port_offload_fwd_mark_update(cpsw); 1777 } 1778 break; 1779 default: 1780 return -EOPNOTSUPP; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static const struct devlink_param cpsw_devlink_params[] = { 1787 DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE, 1788 "switch_mode", DEVLINK_PARAM_TYPE_BOOL, 1789 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1790 cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set, 1791 NULL), 1792 DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS, 1793 "ale_bypass", DEVLINK_PARAM_TYPE_BOOL, 1794 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1795 cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL), 1796 }; 1797 1798 static int cpsw_register_devlink(struct cpsw_common *cpsw) 1799 { 1800 struct device *dev = cpsw->dev; 1801 struct cpsw_devlink *dl_priv; 1802 int ret = 0; 1803 1804 cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); 1805 if (!cpsw->devlink) 1806 return -ENOMEM; 1807 1808 dl_priv = devlink_priv(cpsw->devlink); 1809 dl_priv->cpsw = cpsw; 1810 1811 ret = devlink_register(cpsw->devlink, dev); 1812 if (ret) { 1813 dev_err(dev, "DL reg fail ret:%d\n", ret); 1814 goto dl_free; 1815 } 1816 1817 ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, 1818 ARRAY_SIZE(cpsw_devlink_params)); 1819 if (ret) { 1820 dev_err(dev, "DL params reg fail ret:%d\n", ret); 1821 goto dl_unreg; 1822 } 1823 1824 devlink_params_publish(cpsw->devlink); 1825 return ret; 1826 1827 dl_unreg: 1828 devlink_unregister(cpsw->devlink); 1829 dl_free: 1830 devlink_free(cpsw->devlink); 1831 return ret; 1832 } 1833 1834 static void cpsw_unregister_devlink(struct cpsw_common *cpsw) 1835 { 1836 devlink_params_unpublish(cpsw->devlink); 1837 devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, 1838 ARRAY_SIZE(cpsw_devlink_params)); 1839 devlink_unregister(cpsw->devlink); 1840 devlink_free(cpsw->devlink); 1841 } 1842 1843 static const struct of_device_id cpsw_of_mtable[] = { 1844 { .compatible = "ti,cpsw-switch"}, 1845 { .compatible = "ti,am335x-cpsw-switch"}, 1846 { .compatible = "ti,am4372-cpsw-switch"}, 1847 { .compatible = "ti,dra7-cpsw-switch"}, 1848 { /* sentinel */ }, 1849 }; 1850 MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 1851 1852 static const struct soc_device_attribute cpsw_soc_devices[] = { 1853 { .family = "AM33xx", .revision = "ES1.0"}, 1854 { /* sentinel */ } 1855 }; 1856 1857 static int cpsw_probe(struct platform_device *pdev) 1858 { 1859 const struct soc_device_attribute *soc; 1860 struct device *dev = &pdev->dev; 1861 struct cpsw_common *cpsw; 1862 struct resource *ss_res; 1863 struct gpio_descs *mode; 1864 void __iomem *ss_regs; 1865 int ret = 0, ch; 1866 struct clk *clk; 1867 int irq; 1868 1869 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); 1870 if (!cpsw) 1871 return -ENOMEM; 1872 1873 cpsw_slave_index = cpsw_slave_index_priv; 1874 1875 cpsw->dev = dev; 1876 1877 cpsw->slaves = devm_kcalloc(dev, 1878 CPSW_SLAVE_PORTS_NUM, 1879 sizeof(struct cpsw_slave), 1880 GFP_KERNEL); 1881 if (!cpsw->slaves) 1882 return -ENOMEM; 1883 1884 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 1885 if (IS_ERR(mode)) { 1886 ret = PTR_ERR(mode); 1887 dev_err(dev, "gpio request failed, ret %d\n", ret); 1888 return ret; 1889 } 1890 1891 clk = devm_clk_get(dev, "fck"); 1892 if (IS_ERR(clk)) { 1893 ret = PTR_ERR(clk); 1894 dev_err(dev, "fck is not found %d\n", ret); 1895 return ret; 1896 } 1897 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 1898 1899 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1900 ss_regs = devm_ioremap_resource(dev, ss_res); 1901 if (IS_ERR(ss_regs)) { 1902 ret = PTR_ERR(ss_regs); 1903 return ret; 1904 } 1905 cpsw->regs = ss_regs; 1906 1907 irq = platform_get_irq_byname(pdev, "rx"); 1908 if (irq < 0) 1909 return irq; 1910 cpsw->irqs_table[0] = irq; 1911 1912 irq = platform_get_irq_byname(pdev, "tx"); 1913 if (irq < 0) 1914 return irq; 1915 cpsw->irqs_table[1] = irq; 1916 1917 irq = platform_get_irq_byname(pdev, "misc"); 1918 if (irq <= 0) 1919 return irq; 1920 cpsw->misc_irq = irq; 1921 1922 platform_set_drvdata(pdev, cpsw); 1923 /* This may be required here for child devices. */ 1924 pm_runtime_enable(dev); 1925 1926 /* Need to enable clocks with runtime PM api to access module 1927 * registers 1928 */ 1929 ret = pm_runtime_get_sync(dev); 1930 if (ret < 0) { 1931 pm_runtime_put_noidle(dev); 1932 pm_runtime_disable(dev); 1933 return ret; 1934 } 1935 1936 ret = cpsw_probe_dt(cpsw); 1937 if (ret) 1938 goto clean_dt_ret; 1939 1940 soc = soc_device_match(cpsw_soc_devices); 1941 if (soc) 1942 cpsw->quirk_irq = true; 1943 1944 cpsw->rx_packet_max = rx_packet_max; 1945 cpsw->descs_pool_size = descs_pool_size; 1946 eth_random_addr(cpsw->base_mac); 1947 1948 ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, 1949 (u32 __force)ss_res->start + CPSW2_BD_OFFSET, 1950 descs_pool_size); 1951 if (ret) 1952 goto clean_dt_ret; 1953 1954 cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ? 1955 ss_regs + CPSW1_WR_OFFSET : 1956 ss_regs + CPSW2_WR_OFFSET; 1957 1958 ch = cpsw->quirk_irq ? 0 : 7; 1959 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); 1960 if (IS_ERR(cpsw->txv[0].ch)) { 1961 dev_err(dev, "error initializing tx dma channel\n"); 1962 ret = PTR_ERR(cpsw->txv[0].ch); 1963 goto clean_cpts; 1964 } 1965 1966 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); 1967 if (IS_ERR(cpsw->rxv[0].ch)) { 1968 dev_err(dev, "error initializing rx dma channel\n"); 1969 ret = PTR_ERR(cpsw->rxv[0].ch); 1970 goto clean_cpts; 1971 } 1972 cpsw_split_res(cpsw); 1973 1974 /* setup netdevs */ 1975 ret = cpsw_create_ports(cpsw); 1976 if (ret) 1977 goto clean_unregister_netdev; 1978 1979 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 1980 * MISC IRQs which are always kept disabled with this driver so 1981 * we will not request them. 1982 * 1983 * If anyone wants to implement support for those, make sure to 1984 * first request and append them to irqs_table array. 1985 */ 1986 1987 ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, 1988 0, dev_name(dev), cpsw); 1989 if (ret < 0) { 1990 dev_err(dev, "error attaching irq (%d)\n", ret); 1991 goto clean_unregister_netdev; 1992 } 1993 1994 ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, 1995 0, dev_name(dev), cpsw); 1996 if (ret < 0) { 1997 dev_err(dev, "error attaching irq (%d)\n", ret); 1998 goto clean_unregister_netdev; 1999 } 2000 2001 if (!cpsw->cpts) 2002 goto skip_cpts; 2003 2004 ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt, 2005 0, dev_name(&pdev->dev), cpsw); 2006 if (ret < 0) { 2007 dev_err(dev, "error attaching misc irq (%d)\n", ret); 2008 goto clean_unregister_netdev; 2009 } 2010 2011 /* Enable misc CPTS evnt_pend IRQ */ 2012 cpts_set_irqpoll(cpsw->cpts, false); 2013 2014 skip_cpts: 2015 ret = cpsw_register_notifiers(cpsw); 2016 if (ret) 2017 goto clean_unregister_netdev; 2018 2019 ret = cpsw_register_devlink(cpsw); 2020 if (ret) 2021 goto clean_unregister_notifiers; 2022 2023 ret = cpsw_register_ports(cpsw); 2024 if (ret) 2025 goto clean_unregister_notifiers; 2026 2027 dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n", 2028 &ss_res->start, descs_pool_size, 2029 cpsw->version, CPSW_MAJOR_VERSION(cpsw->version), 2030 CPSW_MINOR_VERSION(cpsw->version), 2031 CPSW_RTL_VERSION(cpsw->version)); 2032 2033 pm_runtime_put(dev); 2034 2035 return 0; 2036 2037 clean_unregister_notifiers: 2038 cpsw_unregister_notifiers(cpsw); 2039 clean_unregister_netdev: 2040 cpsw_unregister_ports(cpsw); 2041 clean_cpts: 2042 cpts_release(cpsw->cpts); 2043 cpdma_ctlr_destroy(cpsw->dma); 2044 clean_dt_ret: 2045 cpsw_remove_dt(cpsw); 2046 pm_runtime_put_sync(dev); 2047 pm_runtime_disable(dev); 2048 return ret; 2049 } 2050 2051 static int cpsw_remove(struct platform_device *pdev) 2052 { 2053 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 2054 int ret; 2055 2056 ret = pm_runtime_get_sync(&pdev->dev); 2057 if (ret < 0) { 2058 pm_runtime_put_noidle(&pdev->dev); 2059 return ret; 2060 } 2061 2062 cpsw_unregister_notifiers(cpsw); 2063 cpsw_unregister_devlink(cpsw); 2064 cpsw_unregister_ports(cpsw); 2065 2066 cpts_release(cpsw->cpts); 2067 cpdma_ctlr_destroy(cpsw->dma); 2068 cpsw_remove_dt(cpsw); 2069 pm_runtime_put_sync(&pdev->dev); 2070 pm_runtime_disable(&pdev->dev); 2071 return 0; 2072 } 2073 2074 static int __maybe_unused cpsw_suspend(struct device *dev) 2075 { 2076 struct cpsw_common *cpsw = dev_get_drvdata(dev); 2077 int i; 2078 2079 rtnl_lock(); 2080 2081 for (i = 0; i < cpsw->data.slaves; i++) { 2082 struct net_device *ndev = cpsw->slaves[i].ndev; 2083 2084 if (!(ndev && netif_running(ndev))) 2085 continue; 2086 2087 cpsw_ndo_stop(ndev); 2088 } 2089 2090 rtnl_unlock(); 2091 2092 /* Select sleep pin state */ 2093 pinctrl_pm_select_sleep_state(dev); 2094 2095 return 0; 2096 } 2097 2098 static int __maybe_unused cpsw_resume(struct device *dev) 2099 { 2100 struct cpsw_common *cpsw = dev_get_drvdata(dev); 2101 int i; 2102 2103 /* Select default pin state */ 2104 pinctrl_pm_select_default_state(dev); 2105 2106 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ 2107 rtnl_lock(); 2108 2109 for (i = 0; i < cpsw->data.slaves; i++) { 2110 struct net_device *ndev = cpsw->slaves[i].ndev; 2111 2112 if (!(ndev && netif_running(ndev))) 2113 continue; 2114 2115 cpsw_ndo_open(ndev); 2116 } 2117 2118 rtnl_unlock(); 2119 2120 return 0; 2121 } 2122 2123 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); 2124 2125 static struct platform_driver cpsw_driver = { 2126 .driver = { 2127 .name = "cpsw-switch", 2128 .pm = &cpsw_pm_ops, 2129 .of_match_table = cpsw_of_mtable, 2130 }, 2131 .probe = cpsw_probe, 2132 .remove = cpsw_remove, 2133 }; 2134 2135 module_platform_driver(cpsw_driver); 2136 2137 MODULE_LICENSE("GPL"); 2138 MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver"); 2139