168cf027fSGrygorii Strashko // SPDX-License-Identifier: GPL-2.0 2df828598SMugunthan V N /* 3df828598SMugunthan V N * Texas Instruments Ethernet Switch Driver 4df828598SMugunthan V N * 5df828598SMugunthan V N * Copyright (C) 2012 Texas Instruments 6df828598SMugunthan V N * 7df828598SMugunthan V N */ 8df828598SMugunthan V N 9df828598SMugunthan V N #include <linux/kernel.h> 10df828598SMugunthan V N #include <linux/io.h> 11df828598SMugunthan V N #include <linux/clk.h> 12df828598SMugunthan V N #include <linux/timer.h> 13df828598SMugunthan V N #include <linux/module.h> 14df828598SMugunthan V N #include <linux/platform_device.h> 15df828598SMugunthan V N #include <linux/irqreturn.h> 16df828598SMugunthan V N #include <linux/interrupt.h> 17df828598SMugunthan V N #include <linux/if_ether.h> 18df828598SMugunthan V N #include <linux/etherdevice.h> 19df828598SMugunthan V N #include <linux/netdevice.h> 202e5b38abSRichard Cochran #include <linux/net_tstamp.h> 21df828598SMugunthan V N #include <linux/phy.h> 223ff18849SGrygorii Strashko #include <linux/phy/phy.h> 23df828598SMugunthan V N #include <linux/workqueue.h> 24df828598SMugunthan V N #include <linux/delay.h> 25f150bd7fSMugunthan V N #include <linux/pm_runtime.h> 26e2b3e493SArnd Bergmann #include <linux/gpio/consumer.h> 272eb32b0aSMugunthan V N #include <linux/of.h> 289e42f715SHeiko Schocher #include <linux/of_mdio.h> 292eb32b0aSMugunthan V N #include <linux/of_net.h> 302eb32b0aSMugunthan V N #include <linux/of_device.h> 313b72c2feSMugunthan V N #include <linux/if_vlan.h> 32514c6032SRandy Dunlap #include <linux/kmemleak.h> 339611d6d6SIvan Khoronzhuk #include <linux/sys_soc.h> 349ed4050cSIvan Khoronzhuk #include <net/page_pool.h> 359ed4050cSIvan Khoronzhuk #include <linux/bpf.h> 369ed4050cSIvan Khoronzhuk #include <linux/bpf_trace.h> 379ed4050cSIvan Khoronzhuk #include <linux/filter.h> 38df828598SMugunthan V N 39739683b4SMugunthan V N #include <linux/pinctrl/consumer.h> 407929a668SIvan Khoronzhuk #include <net/pkt_cls.h> 41df828598SMugunthan V N 42dbe34724SMugunthan V N #include "cpsw.h" 43df828598SMugunthan V N #include "cpsw_ale.h" 44814b4a67SGrygorii Strashko #include "cpsw_priv.h" 45cfc08345SGrygorii Strashko #include "cpsw_sl.h" 462e5b38abSRichard Cochran #include "cpts.h" 47df828598SMugunthan V N #include "davinci_cpdma.h" 48df828598SMugunthan V N 4957d90148SIvan Khoronzhuk #include <net/pkt_sched.h> 5057d90148SIvan Khoronzhuk 51df828598SMugunthan V N static int debug_level; 52df828598SMugunthan V N module_param(debug_level, int, 0); 53df828598SMugunthan V N MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 54df828598SMugunthan V N 55df828598SMugunthan V N static int ale_ageout = 10; 56df828598SMugunthan V N module_param(ale_ageout, int, 0); 57df828598SMugunthan V N MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 58df828598SMugunthan V N 59df828598SMugunthan V N static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 60df828598SMugunthan V N module_param(rx_packet_max, int, 0); 61df828598SMugunthan V N MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 62df828598SMugunthan V N 6390225bf0SGrygorii Strashko static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; 6490225bf0SGrygorii Strashko module_param(descs_pool_size, int, 0444); 6590225bf0SGrygorii Strashko MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); 6690225bf0SGrygorii Strashko 679ed4050cSIvan Khoronzhuk /* The buf includes headroom compatible with both skb and xdpf */ 689ed4050cSIvan Khoronzhuk #define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) 699ed4050cSIvan Khoronzhuk #define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) 709ed4050cSIvan Khoronzhuk 71df828598SMugunthan V N #define for_each_slave(priv, func, arg...) \ 72df828598SMugunthan V N do { \ 736e6ceaedSSebastian Siewior struct cpsw_slave *slave; \ 74606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = (priv)->cpsw; \ 756e6ceaedSSebastian Siewior int n; \ 76606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) \ 77606f3993SIvan Khoronzhuk (func)((cpsw)->slaves + priv->emac_port, ##arg);\ 78d9ba8f9eSMugunthan V N else \ 79606f3993SIvan Khoronzhuk for (n = cpsw->data.slaves, \ 80606f3993SIvan Khoronzhuk slave = cpsw->slaves; \ 816e6ceaedSSebastian Siewior n; n--) \ 826e6ceaedSSebastian Siewior (func)(slave++, ##arg); \ 83df828598SMugunthan V N } while (0) 84d9ba8f9eSMugunthan V N 859ed4050cSIvan Khoronzhuk #define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long)) 869ed4050cSIvan Khoronzhuk 879ed4050cSIvan Khoronzhuk #define CPSW_XDP_CONSUMED 1 889ed4050cSIvan Khoronzhuk #define CPSW_XDP_PASS 0 899ed4050cSIvan Khoronzhuk 9000fe4712SIvan Khoronzhuk static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 9100fe4712SIvan Khoronzhuk __be16 proto, u16 vid); 9200fe4712SIvan Khoronzhuk 930cd8f9ccSMugunthan V N static void cpsw_set_promiscious(struct net_device *ndev, bool enable) 940cd8f9ccSMugunthan V N { 952a05a622SIvan Khoronzhuk struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 962a05a622SIvan Khoronzhuk struct cpsw_ale *ale = cpsw->ale; 970cd8f9ccSMugunthan V N int i; 980cd8f9ccSMugunthan V N 99606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) { 1000cd8f9ccSMugunthan V N bool flag = false; 1010cd8f9ccSMugunthan V N 1020cd8f9ccSMugunthan V N /* Enabling promiscuous mode for one interface will be 1030cd8f9ccSMugunthan V N * common for both the interface as the interface shares 1040cd8f9ccSMugunthan V N * the same hardware resource. 1050cd8f9ccSMugunthan V N */ 106606f3993SIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) 107606f3993SIvan Khoronzhuk if (cpsw->slaves[i].ndev->flags & IFF_PROMISC) 1080cd8f9ccSMugunthan V N flag = true; 1090cd8f9ccSMugunthan V N 1100cd8f9ccSMugunthan V N if (!enable && flag) { 1110cd8f9ccSMugunthan V N enable = true; 1120cd8f9ccSMugunthan V N dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); 1130cd8f9ccSMugunthan V N } 1140cd8f9ccSMugunthan V N 1150cd8f9ccSMugunthan V N if (enable) { 1160cd8f9ccSMugunthan V N /* Enable Bypass */ 1170cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1); 1180cd8f9ccSMugunthan V N 1190cd8f9ccSMugunthan V N dev_dbg(&ndev->dev, "promiscuity enabled\n"); 1200cd8f9ccSMugunthan V N } else { 1210cd8f9ccSMugunthan V N /* Disable Bypass */ 1220cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0); 1230cd8f9ccSMugunthan V N dev_dbg(&ndev->dev, "promiscuity disabled\n"); 1240cd8f9ccSMugunthan V N } 1250cd8f9ccSMugunthan V N } else { 1260cd8f9ccSMugunthan V N if (enable) { 1270cd8f9ccSMugunthan V N unsigned long timeout = jiffies + HZ; 1280cd8f9ccSMugunthan V N 1296f979eb3SLennart Sorensen /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ 130606f3993SIvan Khoronzhuk for (i = 0; i <= cpsw->data.slaves; i++) { 1310cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, i, 1320cd8f9ccSMugunthan V N ALE_PORT_NOLEARN, 1); 1330cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, i, 1340cd8f9ccSMugunthan V N ALE_PORT_NO_SA_UPDATE, 1); 1350cd8f9ccSMugunthan V N } 1360cd8f9ccSMugunthan V N 1370cd8f9ccSMugunthan V N /* Clear All Untouched entries */ 1380cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 1390cd8f9ccSMugunthan V N do { 1400cd8f9ccSMugunthan V N cpu_relax(); 1410cd8f9ccSMugunthan V N if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) 1420cd8f9ccSMugunthan V N break; 1430cd8f9ccSMugunthan V N } while (time_after(timeout, jiffies)); 1440cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 1450cd8f9ccSMugunthan V N 1460cd8f9ccSMugunthan V N /* Clear all mcast from ALE */ 14761f1cef9SGrygorii Strashko cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); 14815180ecaSIvan Khoronzhuk __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL); 1490cd8f9ccSMugunthan V N 1500cd8f9ccSMugunthan V N /* Flood All Unicast Packets to Host port */ 1510cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); 1520cd8f9ccSMugunthan V N dev_dbg(&ndev->dev, "promiscuity enabled\n"); 1530cd8f9ccSMugunthan V N } else { 1546f979eb3SLennart Sorensen /* Don't Flood All Unicast Packets to Host port */ 1550cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 1560cd8f9ccSMugunthan V N 1576f979eb3SLennart Sorensen /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ 158606f3993SIvan Khoronzhuk for (i = 0; i <= cpsw->data.slaves; i++) { 1590cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, i, 1600cd8f9ccSMugunthan V N ALE_PORT_NOLEARN, 0); 1610cd8f9ccSMugunthan V N cpsw_ale_control_set(ale, i, 1620cd8f9ccSMugunthan V N ALE_PORT_NO_SA_UPDATE, 0); 1630cd8f9ccSMugunthan V N } 1640cd8f9ccSMugunthan V N dev_dbg(&ndev->dev, "promiscuity disabled\n"); 1650cd8f9ccSMugunthan V N } 1660cd8f9ccSMugunthan V N } 1670cd8f9ccSMugunthan V N } 1680cd8f9ccSMugunthan V N 16915180ecaSIvan Khoronzhuk /** 17015180ecaSIvan Khoronzhuk * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes 17115180ecaSIvan Khoronzhuk * if it's not deleted 17215180ecaSIvan Khoronzhuk * @ndev: device to sync 17315180ecaSIvan Khoronzhuk * @addr: address to be added or deleted 17415180ecaSIvan Khoronzhuk * @vid: vlan id, if vid < 0 set/unset address for real device 17515180ecaSIvan Khoronzhuk * @add: add address if the flag is set or remove otherwise 17615180ecaSIvan Khoronzhuk */ 17715180ecaSIvan Khoronzhuk static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, 17815180ecaSIvan Khoronzhuk int vid, int add) 1795c50a856SMugunthan V N { 1805c50a856SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 181606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 18215180ecaSIvan Khoronzhuk int mask, flags, ret; 18325906052SMugunthan V N 18415180ecaSIvan Khoronzhuk if (vid < 0) { 18515180ecaSIvan Khoronzhuk if (cpsw->data.dual_emac) 186606f3993SIvan Khoronzhuk vid = cpsw->slaves[priv->emac_port].port_vlan; 18715180ecaSIvan Khoronzhuk else 1885da19489SIvan Khoronzhuk vid = 0; 1895da19489SIvan Khoronzhuk } 1905da19489SIvan Khoronzhuk 19115180ecaSIvan Khoronzhuk mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS; 19215180ecaSIvan Khoronzhuk flags = vid ? ALE_VLAN : 0; 19315180ecaSIvan Khoronzhuk 19415180ecaSIvan Khoronzhuk if (add) 19515180ecaSIvan Khoronzhuk ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); 19615180ecaSIvan Khoronzhuk else 19715180ecaSIvan Khoronzhuk ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); 19815180ecaSIvan Khoronzhuk 19915180ecaSIvan Khoronzhuk return ret; 20015180ecaSIvan Khoronzhuk } 20115180ecaSIvan Khoronzhuk 20215180ecaSIvan Khoronzhuk static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) 20315180ecaSIvan Khoronzhuk { 20415180ecaSIvan Khoronzhuk struct addr_sync_ctx *sync_ctx = ctx; 20515180ecaSIvan Khoronzhuk struct netdev_hw_addr *ha; 20615180ecaSIvan Khoronzhuk int found = 0, ret = 0; 20715180ecaSIvan Khoronzhuk 20815180ecaSIvan Khoronzhuk if (!vdev || !(vdev->flags & IFF_UP)) 20915180ecaSIvan Khoronzhuk return 0; 21015180ecaSIvan Khoronzhuk 21115180ecaSIvan Khoronzhuk /* vlan address is relevant if its sync_cnt != 0 */ 21215180ecaSIvan Khoronzhuk netdev_for_each_mc_addr(ha, vdev) { 21315180ecaSIvan Khoronzhuk if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 21415180ecaSIvan Khoronzhuk found = ha->sync_cnt; 21515180ecaSIvan Khoronzhuk break; 21615180ecaSIvan Khoronzhuk } 21715180ecaSIvan Khoronzhuk } 21815180ecaSIvan Khoronzhuk 21915180ecaSIvan Khoronzhuk if (found) 22015180ecaSIvan Khoronzhuk sync_ctx->consumed++; 22115180ecaSIvan Khoronzhuk 22215180ecaSIvan Khoronzhuk if (sync_ctx->flush) { 22315180ecaSIvan Khoronzhuk if (!found) 22415180ecaSIvan Khoronzhuk cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 22515180ecaSIvan Khoronzhuk return 0; 22615180ecaSIvan Khoronzhuk } 22715180ecaSIvan Khoronzhuk 22815180ecaSIvan Khoronzhuk if (found) 22915180ecaSIvan Khoronzhuk ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); 23015180ecaSIvan Khoronzhuk 23115180ecaSIvan Khoronzhuk return ret; 23215180ecaSIvan Khoronzhuk } 23315180ecaSIvan Khoronzhuk 23415180ecaSIvan Khoronzhuk static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) 23515180ecaSIvan Khoronzhuk { 23615180ecaSIvan Khoronzhuk struct addr_sync_ctx sync_ctx; 23715180ecaSIvan Khoronzhuk int ret; 23815180ecaSIvan Khoronzhuk 23915180ecaSIvan Khoronzhuk sync_ctx.consumed = 0; 24015180ecaSIvan Khoronzhuk sync_ctx.addr = addr; 24115180ecaSIvan Khoronzhuk sync_ctx.ndev = ndev; 24215180ecaSIvan Khoronzhuk sync_ctx.flush = 0; 24315180ecaSIvan Khoronzhuk 24415180ecaSIvan Khoronzhuk ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 24515180ecaSIvan Khoronzhuk if (sync_ctx.consumed < num && !ret) 24615180ecaSIvan Khoronzhuk ret = cpsw_set_mc(ndev, addr, -1, 1); 24715180ecaSIvan Khoronzhuk 24815180ecaSIvan Khoronzhuk return ret; 24915180ecaSIvan Khoronzhuk } 25015180ecaSIvan Khoronzhuk 25115180ecaSIvan Khoronzhuk static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) 25215180ecaSIvan Khoronzhuk { 25315180ecaSIvan Khoronzhuk struct addr_sync_ctx sync_ctx; 25415180ecaSIvan Khoronzhuk 25515180ecaSIvan Khoronzhuk sync_ctx.consumed = 0; 25615180ecaSIvan Khoronzhuk sync_ctx.addr = addr; 25715180ecaSIvan Khoronzhuk sync_ctx.ndev = ndev; 25815180ecaSIvan Khoronzhuk sync_ctx.flush = 1; 25915180ecaSIvan Khoronzhuk 26015180ecaSIvan Khoronzhuk vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 26115180ecaSIvan Khoronzhuk if (sync_ctx.consumed == num) 26215180ecaSIvan Khoronzhuk cpsw_set_mc(ndev, addr, -1, 0); 26315180ecaSIvan Khoronzhuk 26415180ecaSIvan Khoronzhuk return 0; 26515180ecaSIvan Khoronzhuk } 26615180ecaSIvan Khoronzhuk 26715180ecaSIvan Khoronzhuk static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) 26815180ecaSIvan Khoronzhuk { 26915180ecaSIvan Khoronzhuk struct addr_sync_ctx *sync_ctx = ctx; 27015180ecaSIvan Khoronzhuk struct netdev_hw_addr *ha; 27115180ecaSIvan Khoronzhuk int found = 0; 27215180ecaSIvan Khoronzhuk 27315180ecaSIvan Khoronzhuk if (!vdev || !(vdev->flags & IFF_UP)) 27415180ecaSIvan Khoronzhuk return 0; 27515180ecaSIvan Khoronzhuk 27615180ecaSIvan Khoronzhuk /* vlan address is relevant if its sync_cnt != 0 */ 27715180ecaSIvan Khoronzhuk netdev_for_each_mc_addr(ha, vdev) { 27815180ecaSIvan Khoronzhuk if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 27915180ecaSIvan Khoronzhuk found = ha->sync_cnt; 28015180ecaSIvan Khoronzhuk break; 28115180ecaSIvan Khoronzhuk } 28215180ecaSIvan Khoronzhuk } 28315180ecaSIvan Khoronzhuk 28415180ecaSIvan Khoronzhuk if (!found) 28515180ecaSIvan Khoronzhuk return 0; 28615180ecaSIvan Khoronzhuk 28715180ecaSIvan Khoronzhuk sync_ctx->consumed++; 28815180ecaSIvan Khoronzhuk cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 28915180ecaSIvan Khoronzhuk return 0; 29015180ecaSIvan Khoronzhuk } 29115180ecaSIvan Khoronzhuk 29215180ecaSIvan Khoronzhuk static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) 29315180ecaSIvan Khoronzhuk { 29415180ecaSIvan Khoronzhuk struct addr_sync_ctx sync_ctx; 29515180ecaSIvan Khoronzhuk 29615180ecaSIvan Khoronzhuk sync_ctx.addr = addr; 29715180ecaSIvan Khoronzhuk sync_ctx.ndev = ndev; 29815180ecaSIvan Khoronzhuk sync_ctx.consumed = 0; 29915180ecaSIvan Khoronzhuk 30015180ecaSIvan Khoronzhuk vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); 30115180ecaSIvan Khoronzhuk if (sync_ctx.consumed < num) 30215180ecaSIvan Khoronzhuk cpsw_set_mc(ndev, addr, -1, 0); 30315180ecaSIvan Khoronzhuk 3045da19489SIvan Khoronzhuk return 0; 3055da19489SIvan Khoronzhuk } 3065da19489SIvan Khoronzhuk 3075da19489SIvan Khoronzhuk static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 3085da19489SIvan Khoronzhuk { 30906095f34SGrygorii Strashko struct cpsw_priv *priv = netdev_priv(ndev); 31006095f34SGrygorii Strashko struct cpsw_common *cpsw = priv->cpsw; 31106095f34SGrygorii Strashko int slave_port = -1; 31206095f34SGrygorii Strashko 31306095f34SGrygorii Strashko if (cpsw->data.dual_emac) 31406095f34SGrygorii Strashko slave_port = priv->emac_port + 1; 3155c50a856SMugunthan V N 3165c50a856SMugunthan V N if (ndev->flags & IFF_PROMISC) { 3175c50a856SMugunthan V N /* Enable promiscuous mode */ 3180cd8f9ccSMugunthan V N cpsw_set_promiscious(ndev, true); 31906095f34SGrygorii Strashko cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); 3205c50a856SMugunthan V N return; 3210cd8f9ccSMugunthan V N } else { 3220cd8f9ccSMugunthan V N /* Disable promiscuous mode */ 3230cd8f9ccSMugunthan V N cpsw_set_promiscious(ndev, false); 3245c50a856SMugunthan V N } 3255c50a856SMugunthan V N 3261e5c4bc4SLennart Sorensen /* Restore allmulti on vlans if necessary */ 32706095f34SGrygorii Strashko cpsw_ale_set_allmulti(cpsw->ale, 32806095f34SGrygorii Strashko ndev->flags & IFF_ALLMULTI, slave_port); 3291e5c4bc4SLennart Sorensen 33015180ecaSIvan Khoronzhuk /* add/remove mcast address either for real netdev or for vlan */ 33115180ecaSIvan Khoronzhuk __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 33215180ecaSIvan Khoronzhuk cpsw_del_mc_addr); 3335c50a856SMugunthan V N } 3345c50a856SMugunthan V N 335c24eef28SGrygorii Strashko void cpsw_intr_enable(struct cpsw_common *cpsw) 336df828598SMugunthan V N { 337dda5f5feSGrygorii Strashko writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); 338dda5f5feSGrygorii Strashko writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); 339df828598SMugunthan V N 3402c836bd9SIvan Khoronzhuk cpdma_ctlr_int_ctrl(cpsw->dma, true); 341df828598SMugunthan V N return; 342df828598SMugunthan V N } 343df828598SMugunthan V N 344c24eef28SGrygorii Strashko void cpsw_intr_disable(struct cpsw_common *cpsw) 345df828598SMugunthan V N { 346dda5f5feSGrygorii Strashko writel_relaxed(0, &cpsw->wr_regs->tx_en); 347dda5f5feSGrygorii Strashko writel_relaxed(0, &cpsw->wr_regs->rx_en); 348df828598SMugunthan V N 3492c836bd9SIvan Khoronzhuk cpdma_ctlr_int_ctrl(cpsw->dma, false); 350df828598SMugunthan V N return; 351df828598SMugunthan V N } 352df828598SMugunthan V N 3539ed4050cSIvan Khoronzhuk static int cpsw_is_xdpf_handle(void *handle) 3549ed4050cSIvan Khoronzhuk { 3559ed4050cSIvan Khoronzhuk return (unsigned long)handle & BIT(0); 3569ed4050cSIvan Khoronzhuk } 3579ed4050cSIvan Khoronzhuk 3589ed4050cSIvan Khoronzhuk static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf) 3599ed4050cSIvan Khoronzhuk { 3609ed4050cSIvan Khoronzhuk return (void *)((unsigned long)xdpf | BIT(0)); 3619ed4050cSIvan Khoronzhuk } 3629ed4050cSIvan Khoronzhuk 3639ed4050cSIvan Khoronzhuk static struct xdp_frame *cpsw_handle_to_xdpf(void *handle) 3649ed4050cSIvan Khoronzhuk { 3659ed4050cSIvan Khoronzhuk return (struct xdp_frame *)((unsigned long)handle & ~BIT(0)); 3669ed4050cSIvan Khoronzhuk } 3679ed4050cSIvan Khoronzhuk 3689ed4050cSIvan Khoronzhuk struct __aligned(sizeof(long)) cpsw_meta_xdp { 3699ed4050cSIvan Khoronzhuk struct net_device *ndev; 3709ed4050cSIvan Khoronzhuk int ch; 3719ed4050cSIvan Khoronzhuk }; 3729ed4050cSIvan Khoronzhuk 373c24eef28SGrygorii Strashko void cpsw_tx_handler(void *token, int len, int status) 374df828598SMugunthan V N { 3759ed4050cSIvan Khoronzhuk struct cpsw_meta_xdp *xmeta; 3769ed4050cSIvan Khoronzhuk struct xdp_frame *xdpf; 3779ed4050cSIvan Khoronzhuk struct net_device *ndev; 378e05107e6SIvan Khoronzhuk struct netdev_queue *txq; 3799ed4050cSIvan Khoronzhuk struct sk_buff *skb; 3809ed4050cSIvan Khoronzhuk int ch; 3819ed4050cSIvan Khoronzhuk 3829ed4050cSIvan Khoronzhuk if (cpsw_is_xdpf_handle(token)) { 3839ed4050cSIvan Khoronzhuk xdpf = cpsw_handle_to_xdpf(token); 3849ed4050cSIvan Khoronzhuk xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; 3859ed4050cSIvan Khoronzhuk ndev = xmeta->ndev; 3869ed4050cSIvan Khoronzhuk ch = xmeta->ch; 3879ed4050cSIvan Khoronzhuk xdp_return_frame(xdpf); 3889ed4050cSIvan Khoronzhuk } else { 3899ed4050cSIvan Khoronzhuk skb = token; 3909ed4050cSIvan Khoronzhuk ndev = skb->dev; 3919ed4050cSIvan Khoronzhuk ch = skb_get_queue_mapping(skb); 3929ed4050cSIvan Khoronzhuk cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); 3939ed4050cSIvan Khoronzhuk dev_kfree_skb_any(skb); 3949ed4050cSIvan Khoronzhuk } 395df828598SMugunthan V N 396fae50823SMugunthan V N /* Check whether the queue is stopped due to stalled tx dma, if the 397fae50823SMugunthan V N * queue is stopped then start the queue as we have free desc for tx 398fae50823SMugunthan V N */ 3999ed4050cSIvan Khoronzhuk txq = netdev_get_tx_queue(ndev, ch); 400e05107e6SIvan Khoronzhuk if (unlikely(netif_tx_queue_stopped(txq))) 401e05107e6SIvan Khoronzhuk netif_tx_wake_queue(txq); 402e05107e6SIvan Khoronzhuk 4038dc43ddcSTobias Klauser ndev->stats.tx_packets++; 4048dc43ddcSTobias Klauser ndev->stats.tx_bytes += len; 405df828598SMugunthan V N } 406df828598SMugunthan V N 407a3a41d2fSGrygorii Strashko static void cpsw_rx_vlan_encap(struct sk_buff *skb) 408a3a41d2fSGrygorii Strashko { 409a3a41d2fSGrygorii Strashko struct cpsw_priv *priv = netdev_priv(skb->dev); 410a3a41d2fSGrygorii Strashko struct cpsw_common *cpsw = priv->cpsw; 411a3a41d2fSGrygorii Strashko u32 rx_vlan_encap_hdr = *((u32 *)skb->data); 412a3a41d2fSGrygorii Strashko u16 vtag, vid, prio, pkt_type; 413a3a41d2fSGrygorii Strashko 414a3a41d2fSGrygorii Strashko /* Remove VLAN header encapsulation word */ 415a3a41d2fSGrygorii Strashko skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); 416a3a41d2fSGrygorii Strashko 417a3a41d2fSGrygorii Strashko pkt_type = (rx_vlan_encap_hdr >> 418a3a41d2fSGrygorii Strashko CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & 419a3a41d2fSGrygorii Strashko CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; 420a3a41d2fSGrygorii Strashko /* Ignore unknown & Priority-tagged packets*/ 421a3a41d2fSGrygorii Strashko if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || 422a3a41d2fSGrygorii Strashko pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) 423a3a41d2fSGrygorii Strashko return; 424a3a41d2fSGrygorii Strashko 425a3a41d2fSGrygorii Strashko vid = (rx_vlan_encap_hdr >> 426a3a41d2fSGrygorii Strashko CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & 427a3a41d2fSGrygorii Strashko VLAN_VID_MASK; 428a3a41d2fSGrygorii Strashko /* Ignore vid 0 and pass packet as is */ 429a3a41d2fSGrygorii Strashko if (!vid) 430a3a41d2fSGrygorii Strashko return; 431a3a41d2fSGrygorii Strashko 4324b41d343SGrygorii Strashko /* Untag P0 packets if set for vlan */ 4334b41d343SGrygorii Strashko if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) { 434a3a41d2fSGrygorii Strashko prio = (rx_vlan_encap_hdr >> 435a3a41d2fSGrygorii Strashko CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & 436a3a41d2fSGrygorii Strashko CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; 437a3a41d2fSGrygorii Strashko 438a3a41d2fSGrygorii Strashko vtag = (prio << VLAN_PRIO_SHIFT) | vid; 439a3a41d2fSGrygorii Strashko __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 4404b41d343SGrygorii Strashko } 441a3a41d2fSGrygorii Strashko 442a3a41d2fSGrygorii Strashko /* strip vlan tag for VLAN-tagged packet */ 443a3a41d2fSGrygorii Strashko if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { 444a3a41d2fSGrygorii Strashko memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 445a3a41d2fSGrygorii Strashko skb_pull(skb, VLAN_HLEN); 446a3a41d2fSGrygorii Strashko } 447a3a41d2fSGrygorii Strashko } 448a3a41d2fSGrygorii Strashko 4499ed4050cSIvan Khoronzhuk static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, 4509ed4050cSIvan Khoronzhuk struct page *page) 4519ed4050cSIvan Khoronzhuk { 4529ed4050cSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 4539ed4050cSIvan Khoronzhuk struct cpsw_meta_xdp *xmeta; 4549ed4050cSIvan Khoronzhuk struct cpdma_chan *txch; 4559ed4050cSIvan Khoronzhuk dma_addr_t dma; 4569ed4050cSIvan Khoronzhuk int ret, port; 4579ed4050cSIvan Khoronzhuk 4589ed4050cSIvan Khoronzhuk xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; 4599ed4050cSIvan Khoronzhuk xmeta->ndev = priv->ndev; 4609ed4050cSIvan Khoronzhuk xmeta->ch = 0; 4619ed4050cSIvan Khoronzhuk txch = cpsw->txv[0].ch; 4629ed4050cSIvan Khoronzhuk 4639ed4050cSIvan Khoronzhuk port = priv->emac_port + cpsw->data.dual_emac; 4649ed4050cSIvan Khoronzhuk if (page) { 4659ed4050cSIvan Khoronzhuk dma = page_pool_get_dma_addr(page); 4669ed4050cSIvan Khoronzhuk dma += xdpf->headroom + sizeof(struct xdp_frame); 4679ed4050cSIvan Khoronzhuk ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), 4689ed4050cSIvan Khoronzhuk dma, xdpf->len, port); 4699ed4050cSIvan Khoronzhuk } else { 4709ed4050cSIvan Khoronzhuk if (sizeof(*xmeta) > xdpf->headroom) { 4719ed4050cSIvan Khoronzhuk xdp_return_frame_rx_napi(xdpf); 4729ed4050cSIvan Khoronzhuk return -EINVAL; 4739ed4050cSIvan Khoronzhuk } 4749ed4050cSIvan Khoronzhuk 4759ed4050cSIvan Khoronzhuk ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), 4769ed4050cSIvan Khoronzhuk xdpf->data, xdpf->len, port); 4779ed4050cSIvan Khoronzhuk } 4789ed4050cSIvan Khoronzhuk 4799ed4050cSIvan Khoronzhuk if (ret) { 4809ed4050cSIvan Khoronzhuk priv->ndev->stats.tx_dropped++; 4819ed4050cSIvan Khoronzhuk xdp_return_frame_rx_napi(xdpf); 4829ed4050cSIvan Khoronzhuk } 4839ed4050cSIvan Khoronzhuk 4849ed4050cSIvan Khoronzhuk return ret; 4859ed4050cSIvan Khoronzhuk } 4869ed4050cSIvan Khoronzhuk 4879ed4050cSIvan Khoronzhuk static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, 4889ed4050cSIvan Khoronzhuk struct page *page) 4899ed4050cSIvan Khoronzhuk { 4909ed4050cSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 4919ed4050cSIvan Khoronzhuk struct net_device *ndev = priv->ndev; 4929ed4050cSIvan Khoronzhuk int ret = CPSW_XDP_CONSUMED; 4939ed4050cSIvan Khoronzhuk struct xdp_frame *xdpf; 4949ed4050cSIvan Khoronzhuk struct bpf_prog *prog; 4959ed4050cSIvan Khoronzhuk u32 act; 4969ed4050cSIvan Khoronzhuk 4979ed4050cSIvan Khoronzhuk rcu_read_lock(); 4989ed4050cSIvan Khoronzhuk 4999ed4050cSIvan Khoronzhuk prog = READ_ONCE(priv->xdp_prog); 5009ed4050cSIvan Khoronzhuk if (!prog) { 5019ed4050cSIvan Khoronzhuk ret = CPSW_XDP_PASS; 5029ed4050cSIvan Khoronzhuk goto out; 5039ed4050cSIvan Khoronzhuk } 5049ed4050cSIvan Khoronzhuk 5059ed4050cSIvan Khoronzhuk act = bpf_prog_run_xdp(prog, xdp); 5069ed4050cSIvan Khoronzhuk switch (act) { 5079ed4050cSIvan Khoronzhuk case XDP_PASS: 5089ed4050cSIvan Khoronzhuk ret = CPSW_XDP_PASS; 5099ed4050cSIvan Khoronzhuk break; 5109ed4050cSIvan Khoronzhuk case XDP_TX: 5119ed4050cSIvan Khoronzhuk xdpf = convert_to_xdp_frame(xdp); 5129ed4050cSIvan Khoronzhuk if (unlikely(!xdpf)) 5139ed4050cSIvan Khoronzhuk goto drop; 5149ed4050cSIvan Khoronzhuk 5159ed4050cSIvan Khoronzhuk cpsw_xdp_tx_frame(priv, xdpf, page); 5169ed4050cSIvan Khoronzhuk break; 5179ed4050cSIvan Khoronzhuk case XDP_REDIRECT: 5189ed4050cSIvan Khoronzhuk if (xdp_do_redirect(ndev, xdp, prog)) 5199ed4050cSIvan Khoronzhuk goto drop; 5209ed4050cSIvan Khoronzhuk 5219ed4050cSIvan Khoronzhuk /* Have to flush here, per packet, instead of doing it in bulk 5229ed4050cSIvan Khoronzhuk * at the end of the napi handler. The RX devices on this 5239ed4050cSIvan Khoronzhuk * particular hardware is sharing a common queue, so the 5249ed4050cSIvan Khoronzhuk * incoming device might change per packet. 5259ed4050cSIvan Khoronzhuk */ 5269ed4050cSIvan Khoronzhuk xdp_do_flush_map(); 5279ed4050cSIvan Khoronzhuk break; 5289ed4050cSIvan Khoronzhuk default: 5299ed4050cSIvan Khoronzhuk bpf_warn_invalid_xdp_action(act); 5309ed4050cSIvan Khoronzhuk /* fall through */ 5319ed4050cSIvan Khoronzhuk case XDP_ABORTED: 5329ed4050cSIvan Khoronzhuk trace_xdp_exception(ndev, prog, act); 5339ed4050cSIvan Khoronzhuk /* fall through -- handle aborts by dropping packet */ 5349ed4050cSIvan Khoronzhuk case XDP_DROP: 5359ed4050cSIvan Khoronzhuk goto drop; 5369ed4050cSIvan Khoronzhuk } 5379ed4050cSIvan Khoronzhuk out: 5389ed4050cSIvan Khoronzhuk rcu_read_unlock(); 5399ed4050cSIvan Khoronzhuk return ret; 5409ed4050cSIvan Khoronzhuk drop: 5419ed4050cSIvan Khoronzhuk rcu_read_unlock(); 5429ed4050cSIvan Khoronzhuk page_pool_recycle_direct(cpsw->page_pool[ch], page); 5439ed4050cSIvan Khoronzhuk return ret; 5449ed4050cSIvan Khoronzhuk } 5459ed4050cSIvan Khoronzhuk 5469ed4050cSIvan Khoronzhuk static unsigned int cpsw_rxbuf_total_len(unsigned int len) 5479ed4050cSIvan Khoronzhuk { 5489ed4050cSIvan Khoronzhuk len += CPSW_HEADROOM; 5499ed4050cSIvan Khoronzhuk len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5509ed4050cSIvan Khoronzhuk 5519ed4050cSIvan Khoronzhuk return SKB_DATA_ALIGN(len); 5529ed4050cSIvan Khoronzhuk } 5539ed4050cSIvan Khoronzhuk 5549ed4050cSIvan Khoronzhuk static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, 5559ed4050cSIvan Khoronzhuk int size) 5569ed4050cSIvan Khoronzhuk { 5579ed4050cSIvan Khoronzhuk struct page_pool_params pp_params; 5589ed4050cSIvan Khoronzhuk struct page_pool *pool; 5599ed4050cSIvan Khoronzhuk 5609ed4050cSIvan Khoronzhuk pp_params.order = 0; 5619ed4050cSIvan Khoronzhuk pp_params.flags = PP_FLAG_DMA_MAP; 5629ed4050cSIvan Khoronzhuk pp_params.pool_size = size; 5639ed4050cSIvan Khoronzhuk pp_params.nid = NUMA_NO_NODE; 5649ed4050cSIvan Khoronzhuk pp_params.dma_dir = DMA_BIDIRECTIONAL; 5659ed4050cSIvan Khoronzhuk pp_params.dev = cpsw->dev; 5669ed4050cSIvan Khoronzhuk 5679ed4050cSIvan Khoronzhuk pool = page_pool_create(&pp_params); 5689ed4050cSIvan Khoronzhuk if (IS_ERR(pool)) 5699ed4050cSIvan Khoronzhuk dev_err(cpsw->dev, "cannot create rx page pool\n"); 5709ed4050cSIvan Khoronzhuk 5719ed4050cSIvan Khoronzhuk return pool; 5729ed4050cSIvan Khoronzhuk } 5739ed4050cSIvan Khoronzhuk 5749ed4050cSIvan Khoronzhuk static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) 5759ed4050cSIvan Khoronzhuk { 5769ed4050cSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 5779ed4050cSIvan Khoronzhuk struct xdp_rxq_info *rxq; 5789ed4050cSIvan Khoronzhuk struct page_pool *pool; 5799ed4050cSIvan Khoronzhuk int ret; 5809ed4050cSIvan Khoronzhuk 5819ed4050cSIvan Khoronzhuk pool = cpsw->page_pool[ch]; 5829ed4050cSIvan Khoronzhuk rxq = &priv->xdp_rxq[ch]; 5839ed4050cSIvan Khoronzhuk 5849ed4050cSIvan Khoronzhuk ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); 5859ed4050cSIvan Khoronzhuk if (ret) 5869ed4050cSIvan Khoronzhuk return ret; 5879ed4050cSIvan Khoronzhuk 5889ed4050cSIvan Khoronzhuk ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); 5899ed4050cSIvan Khoronzhuk if (ret) 5909ed4050cSIvan Khoronzhuk xdp_rxq_info_unreg(rxq); 5919ed4050cSIvan Khoronzhuk 5929ed4050cSIvan Khoronzhuk return ret; 5939ed4050cSIvan Khoronzhuk } 5949ed4050cSIvan Khoronzhuk 5959ed4050cSIvan Khoronzhuk static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) 5969ed4050cSIvan Khoronzhuk { 5979ed4050cSIvan Khoronzhuk struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; 5989ed4050cSIvan Khoronzhuk 5999ed4050cSIvan Khoronzhuk if (!xdp_rxq_info_is_reg(rxq)) 6009ed4050cSIvan Khoronzhuk return; 6019ed4050cSIvan Khoronzhuk 6029ed4050cSIvan Khoronzhuk xdp_rxq_info_unreg(rxq); 6039ed4050cSIvan Khoronzhuk } 6049ed4050cSIvan Khoronzhuk 6059ed4050cSIvan Khoronzhuk static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) 6069ed4050cSIvan Khoronzhuk { 6079ed4050cSIvan Khoronzhuk struct page_pool *pool; 6089ed4050cSIvan Khoronzhuk int ret = 0, pool_size; 6099ed4050cSIvan Khoronzhuk 6109ed4050cSIvan Khoronzhuk pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); 6119ed4050cSIvan Khoronzhuk pool = cpsw_create_page_pool(cpsw, pool_size); 6129ed4050cSIvan Khoronzhuk if (IS_ERR(pool)) 6139ed4050cSIvan Khoronzhuk ret = PTR_ERR(pool); 6149ed4050cSIvan Khoronzhuk else 6159ed4050cSIvan Khoronzhuk cpsw->page_pool[ch] = pool; 6169ed4050cSIvan Khoronzhuk 6179ed4050cSIvan Khoronzhuk return ret; 6189ed4050cSIvan Khoronzhuk } 6199ed4050cSIvan Khoronzhuk 6209ed4050cSIvan Khoronzhuk void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) 6219ed4050cSIvan Khoronzhuk { 6229ed4050cSIvan Khoronzhuk struct net_device *ndev; 6239ed4050cSIvan Khoronzhuk int i, ch; 6249ed4050cSIvan Khoronzhuk 6259ed4050cSIvan Khoronzhuk for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 6269ed4050cSIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) { 6279ed4050cSIvan Khoronzhuk ndev = cpsw->slaves[i].ndev; 6289ed4050cSIvan Khoronzhuk if (!ndev) 6299ed4050cSIvan Khoronzhuk continue; 6309ed4050cSIvan Khoronzhuk 6319ed4050cSIvan Khoronzhuk cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); 6329ed4050cSIvan Khoronzhuk } 6339ed4050cSIvan Khoronzhuk 6349ed4050cSIvan Khoronzhuk page_pool_destroy(cpsw->page_pool[ch]); 6359ed4050cSIvan Khoronzhuk cpsw->page_pool[ch] = NULL; 6369ed4050cSIvan Khoronzhuk } 6379ed4050cSIvan Khoronzhuk } 6389ed4050cSIvan Khoronzhuk 6399ed4050cSIvan Khoronzhuk int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) 6409ed4050cSIvan Khoronzhuk { 6419ed4050cSIvan Khoronzhuk struct net_device *ndev; 6429ed4050cSIvan Khoronzhuk int i, ch, ret; 6439ed4050cSIvan Khoronzhuk 6449ed4050cSIvan Khoronzhuk for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 6459ed4050cSIvan Khoronzhuk ret = cpsw_create_rx_pool(cpsw, ch); 6469ed4050cSIvan Khoronzhuk if (ret) 6479ed4050cSIvan Khoronzhuk goto err_cleanup; 6489ed4050cSIvan Khoronzhuk 6499ed4050cSIvan Khoronzhuk /* using same page pool is allowed as no running rx handlers 6509ed4050cSIvan Khoronzhuk * simultaneously for both ndevs 6519ed4050cSIvan Khoronzhuk */ 6529ed4050cSIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) { 6539ed4050cSIvan Khoronzhuk ndev = cpsw->slaves[i].ndev; 6549ed4050cSIvan Khoronzhuk if (!ndev) 6559ed4050cSIvan Khoronzhuk continue; 6569ed4050cSIvan Khoronzhuk 6579ed4050cSIvan Khoronzhuk ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); 6589ed4050cSIvan Khoronzhuk if (ret) 6599ed4050cSIvan Khoronzhuk goto err_cleanup; 6609ed4050cSIvan Khoronzhuk } 6619ed4050cSIvan Khoronzhuk } 6629ed4050cSIvan Khoronzhuk 6639ed4050cSIvan Khoronzhuk return 0; 6649ed4050cSIvan Khoronzhuk 6659ed4050cSIvan Khoronzhuk err_cleanup: 6669ed4050cSIvan Khoronzhuk cpsw_destroy_xdp_rxqs(cpsw); 6679ed4050cSIvan Khoronzhuk 6689ed4050cSIvan Khoronzhuk return ret; 6699ed4050cSIvan Khoronzhuk } 6709ed4050cSIvan Khoronzhuk 6711a3b5056SOlof Johansson static void cpsw_rx_handler(void *token, int len, int status) 672df828598SMugunthan V N { 6739ed4050cSIvan Khoronzhuk struct page *new_page, *page = token; 6749ed4050cSIvan Khoronzhuk void *pa = page_address(page); 6759ed4050cSIvan Khoronzhuk struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET; 6769ed4050cSIvan Khoronzhuk struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev); 6779ed4050cSIvan Khoronzhuk int pkt_size = cpsw->rx_packet_max; 6789ed4050cSIvan Khoronzhuk int ret = 0, port, ch = xmeta->ch; 6799ed4050cSIvan Khoronzhuk int headroom = CPSW_HEADROOM; 6809ed4050cSIvan Khoronzhuk struct net_device *ndev = xmeta->ndev; 681a9423120SIvan Khoronzhuk struct cpsw_priv *priv; 6829ed4050cSIvan Khoronzhuk struct page_pool *pool; 6839ed4050cSIvan Khoronzhuk struct sk_buff *skb; 6849ed4050cSIvan Khoronzhuk struct xdp_buff xdp; 6859ed4050cSIvan Khoronzhuk dma_addr_t dma; 686df828598SMugunthan V N 6879ed4050cSIvan Khoronzhuk if (cpsw->data.dual_emac && status >= 0) { 688fea49f60SIvan Khoronzhuk port = CPDMA_RX_SOURCE_PORT(status); 6899ed4050cSIvan Khoronzhuk if (port) 690fea49f60SIvan Khoronzhuk ndev = cpsw->slaves[--port].ndev; 691fea49f60SIvan Khoronzhuk } 692d9ba8f9eSMugunthan V N 6939ed4050cSIvan Khoronzhuk priv = netdev_priv(ndev); 6949ed4050cSIvan Khoronzhuk pool = cpsw->page_pool[ch]; 69516e5c57dSMugunthan V N if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 696a0e2c822SMugunthan V N /* In dual emac mode check for all interfaces */ 697d5bc1613SIvan Khoronzhuk if (cpsw->data.dual_emac && cpsw->usage_count && 698fe734d0aSIvan Khoronzhuk (status >= 0)) { 699a0e2c822SMugunthan V N /* The packet received is for the interface which 700a0e2c822SMugunthan V N * is already down and the other interface is up 701dbedd44eSJoe Perches * and running, instead of freeing which results 702a0e2c822SMugunthan V N * in reducing of the number of rx descriptor in 7039ed4050cSIvan Khoronzhuk * DMA engine, requeue page back to cpdma. 704a0e2c822SMugunthan V N */ 7059ed4050cSIvan Khoronzhuk new_page = page; 706a0e2c822SMugunthan V N goto requeue; 707a0e2c822SMugunthan V N } 708a0e2c822SMugunthan V N 7099ed4050cSIvan Khoronzhuk /* the interface is going down, pages are purged */ 7109ed4050cSIvan Khoronzhuk page_pool_recycle_direct(pool, page); 711df828598SMugunthan V N return; 712df828598SMugunthan V N } 713b4727e69SSebastian Siewior 7149ed4050cSIvan Khoronzhuk new_page = page_pool_dev_alloc_pages(pool); 7159ed4050cSIvan Khoronzhuk if (unlikely(!new_page)) { 7169ed4050cSIvan Khoronzhuk new_page = page; 7179ed4050cSIvan Khoronzhuk ndev->stats.rx_dropped++; 7189ed4050cSIvan Khoronzhuk goto requeue; 7199ed4050cSIvan Khoronzhuk } 7209ed4050cSIvan Khoronzhuk 7219ed4050cSIvan Khoronzhuk if (priv->xdp_prog) { 7229ed4050cSIvan Khoronzhuk if (status & CPDMA_RX_VLAN_ENCAP) { 7239ed4050cSIvan Khoronzhuk xdp.data = pa + CPSW_HEADROOM + 7249ed4050cSIvan Khoronzhuk CPSW_RX_VLAN_ENCAP_HDR_SIZE; 7259ed4050cSIvan Khoronzhuk xdp.data_end = xdp.data + len - 7269ed4050cSIvan Khoronzhuk CPSW_RX_VLAN_ENCAP_HDR_SIZE; 7279ed4050cSIvan Khoronzhuk } else { 7289ed4050cSIvan Khoronzhuk xdp.data = pa + CPSW_HEADROOM; 7299ed4050cSIvan Khoronzhuk xdp.data_end = xdp.data + len; 7309ed4050cSIvan Khoronzhuk } 7319ed4050cSIvan Khoronzhuk 7329ed4050cSIvan Khoronzhuk xdp_set_data_meta_invalid(&xdp); 7339ed4050cSIvan Khoronzhuk 7349ed4050cSIvan Khoronzhuk xdp.data_hard_start = pa; 7359ed4050cSIvan Khoronzhuk xdp.rxq = &priv->xdp_rxq[ch]; 7369ed4050cSIvan Khoronzhuk 7379ed4050cSIvan Khoronzhuk ret = cpsw_run_xdp(priv, ch, &xdp, page); 7389ed4050cSIvan Khoronzhuk if (ret != CPSW_XDP_PASS) 7399ed4050cSIvan Khoronzhuk goto requeue; 7409ed4050cSIvan Khoronzhuk 7419ed4050cSIvan Khoronzhuk /* XDP prog might have changed packet data and boundaries */ 7429ed4050cSIvan Khoronzhuk len = xdp.data_end - xdp.data; 7439ed4050cSIvan Khoronzhuk headroom = xdp.data - xdp.data_hard_start; 7449ed4050cSIvan Khoronzhuk 7459ed4050cSIvan Khoronzhuk /* XDP prog can modify vlan tag, so can't use encap header */ 7469ed4050cSIvan Khoronzhuk status &= ~CPDMA_RX_VLAN_ENCAP; 7479ed4050cSIvan Khoronzhuk } 7489ed4050cSIvan Khoronzhuk 7499ed4050cSIvan Khoronzhuk /* pass skb to netstack if no XDP prog or returned XDP_PASS */ 7509ed4050cSIvan Khoronzhuk skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); 7519ed4050cSIvan Khoronzhuk if (!skb) { 7529ed4050cSIvan Khoronzhuk ndev->stats.rx_dropped++; 7539ed4050cSIvan Khoronzhuk page_pool_recycle_direct(pool, page); 7549ed4050cSIvan Khoronzhuk goto requeue; 7559ed4050cSIvan Khoronzhuk } 7569ed4050cSIvan Khoronzhuk 7579ed4050cSIvan Khoronzhuk skb_reserve(skb, headroom); 758df828598SMugunthan V N skb_put(skb, len); 7599ed4050cSIvan Khoronzhuk skb->dev = ndev; 760a3a41d2fSGrygorii Strashko if (status & CPDMA_RX_VLAN_ENCAP) 761a3a41d2fSGrygorii Strashko cpsw_rx_vlan_encap(skb); 762a9423120SIvan Khoronzhuk if (priv->rx_ts_enabled) 7632a05a622SIvan Khoronzhuk cpts_rx_timestamp(cpsw->cpts, skb); 764df828598SMugunthan V N skb->protocol = eth_type_trans(skb, ndev); 7659ed4050cSIvan Khoronzhuk 7669ed4050cSIvan Khoronzhuk /* unmap page as no netstack skb page recycling */ 7679ed4050cSIvan Khoronzhuk page_pool_release_page(pool, page); 768df828598SMugunthan V N netif_receive_skb(skb); 7699ed4050cSIvan Khoronzhuk 7708dc43ddcSTobias Klauser ndev->stats.rx_bytes += len; 7718dc43ddcSTobias Klauser ndev->stats.rx_packets++; 772df828598SMugunthan V N 773a0e2c822SMugunthan V N requeue: 7749ed4050cSIvan Khoronzhuk xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; 7759ed4050cSIvan Khoronzhuk xmeta->ndev = ndev; 7769ed4050cSIvan Khoronzhuk xmeta->ch = ch; 7779ed4050cSIvan Khoronzhuk 7789ed4050cSIvan Khoronzhuk dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; 7799ed4050cSIvan Khoronzhuk ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, 7809ed4050cSIvan Khoronzhuk pkt_size, 0); 781871e8465SIvan Khoronzhuk if (ret < 0) { 782871e8465SIvan Khoronzhuk WARN_ON(ret == -ENOMEM); 7839ed4050cSIvan Khoronzhuk page_pool_recycle_direct(pool, new_page); 784df828598SMugunthan V N } 785871e8465SIvan Khoronzhuk } 786df828598SMugunthan V N 787c24eef28SGrygorii Strashko void cpsw_split_res(struct cpsw_common *cpsw) 78848e0a83eSIvan Khoronzhuk { 78932b78d85SIvan Khoronzhuk u32 consumed_rate = 0, bigest_rate = 0; 79048e0a83eSIvan Khoronzhuk struct cpsw_vector *txv = cpsw->txv; 79132b78d85SIvan Khoronzhuk int i, ch_weight, rlim_ch_num = 0; 79248e0a83eSIvan Khoronzhuk int budget, bigest_rate_ch = 0; 79348e0a83eSIvan Khoronzhuk u32 ch_rate, max_rate; 79448e0a83eSIvan Khoronzhuk int ch_budget = 0; 79548e0a83eSIvan Khoronzhuk 79648e0a83eSIvan Khoronzhuk for (i = 0; i < cpsw->tx_ch_num; i++) { 79748e0a83eSIvan Khoronzhuk ch_rate = cpdma_chan_get_rate(txv[i].ch); 79848e0a83eSIvan Khoronzhuk if (!ch_rate) 79948e0a83eSIvan Khoronzhuk continue; 80048e0a83eSIvan Khoronzhuk 80148e0a83eSIvan Khoronzhuk rlim_ch_num++; 80248e0a83eSIvan Khoronzhuk consumed_rate += ch_rate; 80348e0a83eSIvan Khoronzhuk } 80448e0a83eSIvan Khoronzhuk 80548e0a83eSIvan Khoronzhuk if (cpsw->tx_ch_num == rlim_ch_num) { 80648e0a83eSIvan Khoronzhuk max_rate = consumed_rate; 80732b78d85SIvan Khoronzhuk } else if (!rlim_ch_num) { 80832b78d85SIvan Khoronzhuk ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; 80932b78d85SIvan Khoronzhuk bigest_rate = 0; 81032b78d85SIvan Khoronzhuk max_rate = consumed_rate; 81148e0a83eSIvan Khoronzhuk } else { 8120be01b8eSIvan Khoronzhuk max_rate = cpsw->speed * 1000; 8130be01b8eSIvan Khoronzhuk 8140be01b8eSIvan Khoronzhuk /* if max_rate is less then expected due to reduced link speed, 8150be01b8eSIvan Khoronzhuk * split proportionally according next potential max speed 8160be01b8eSIvan Khoronzhuk */ 8170be01b8eSIvan Khoronzhuk if (max_rate < consumed_rate) 8180be01b8eSIvan Khoronzhuk max_rate *= 10; 8190be01b8eSIvan Khoronzhuk 8200be01b8eSIvan Khoronzhuk if (max_rate < consumed_rate) 8210be01b8eSIvan Khoronzhuk max_rate *= 10; 82232b78d85SIvan Khoronzhuk 82348e0a83eSIvan Khoronzhuk ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; 82448e0a83eSIvan Khoronzhuk ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / 82548e0a83eSIvan Khoronzhuk (cpsw->tx_ch_num - rlim_ch_num); 82648e0a83eSIvan Khoronzhuk bigest_rate = (max_rate - consumed_rate) / 82748e0a83eSIvan Khoronzhuk (cpsw->tx_ch_num - rlim_ch_num); 82848e0a83eSIvan Khoronzhuk } 82948e0a83eSIvan Khoronzhuk 83032b78d85SIvan Khoronzhuk /* split tx weight/budget */ 83148e0a83eSIvan Khoronzhuk budget = CPSW_POLL_WEIGHT; 83248e0a83eSIvan Khoronzhuk for (i = 0; i < cpsw->tx_ch_num; i++) { 83348e0a83eSIvan Khoronzhuk ch_rate = cpdma_chan_get_rate(txv[i].ch); 83448e0a83eSIvan Khoronzhuk if (ch_rate) { 83548e0a83eSIvan Khoronzhuk txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; 83648e0a83eSIvan Khoronzhuk if (!txv[i].budget) 83732b78d85SIvan Khoronzhuk txv[i].budget++; 83848e0a83eSIvan Khoronzhuk if (ch_rate > bigest_rate) { 83948e0a83eSIvan Khoronzhuk bigest_rate_ch = i; 84048e0a83eSIvan Khoronzhuk bigest_rate = ch_rate; 84148e0a83eSIvan Khoronzhuk } 84232b78d85SIvan Khoronzhuk 84332b78d85SIvan Khoronzhuk ch_weight = (ch_rate * 100) / max_rate; 84432b78d85SIvan Khoronzhuk if (!ch_weight) 84532b78d85SIvan Khoronzhuk ch_weight++; 84632b78d85SIvan Khoronzhuk cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); 84748e0a83eSIvan Khoronzhuk } else { 84848e0a83eSIvan Khoronzhuk txv[i].budget = ch_budget; 84948e0a83eSIvan Khoronzhuk if (!bigest_rate_ch) 85048e0a83eSIvan Khoronzhuk bigest_rate_ch = i; 85132b78d85SIvan Khoronzhuk cpdma_chan_set_weight(cpsw->txv[i].ch, 0); 85248e0a83eSIvan Khoronzhuk } 85348e0a83eSIvan Khoronzhuk 85448e0a83eSIvan Khoronzhuk budget -= txv[i].budget; 85548e0a83eSIvan Khoronzhuk } 85648e0a83eSIvan Khoronzhuk 85748e0a83eSIvan Khoronzhuk if (budget) 85848e0a83eSIvan Khoronzhuk txv[bigest_rate_ch].budget += budget; 85948e0a83eSIvan Khoronzhuk 86048e0a83eSIvan Khoronzhuk /* split rx budget */ 86148e0a83eSIvan Khoronzhuk budget = CPSW_POLL_WEIGHT; 86248e0a83eSIvan Khoronzhuk ch_budget = budget / cpsw->rx_ch_num; 86348e0a83eSIvan Khoronzhuk for (i = 0; i < cpsw->rx_ch_num; i++) { 86448e0a83eSIvan Khoronzhuk cpsw->rxv[i].budget = ch_budget; 86548e0a83eSIvan Khoronzhuk budget -= ch_budget; 86648e0a83eSIvan Khoronzhuk } 86748e0a83eSIvan Khoronzhuk 86848e0a83eSIvan Khoronzhuk if (budget) 86948e0a83eSIvan Khoronzhuk cpsw->rxv[0].budget += budget; 87048e0a83eSIvan Khoronzhuk } 87148e0a83eSIvan Khoronzhuk 872c03abd84SFelipe Balbi static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) 873df828598SMugunthan V N { 874dbc4ec52SIvan Khoronzhuk struct cpsw_common *cpsw = dev_id; 8757ce67a38SFelipe Balbi 8765d8d0d4dSIvan Khoronzhuk writel(0, &cpsw->wr_regs->tx_en); 8772c836bd9SIvan Khoronzhuk cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); 878c03abd84SFelipe Balbi 879e38b5a3dSIvan Khoronzhuk if (cpsw->quirk_irq) { 880e38b5a3dSIvan Khoronzhuk disable_irq_nosync(cpsw->irqs_table[1]); 881e38b5a3dSIvan Khoronzhuk cpsw->tx_irq_disabled = true; 8827da11600SMugunthan V N } 8837da11600SMugunthan V N 884dbc4ec52SIvan Khoronzhuk napi_schedule(&cpsw->napi_tx); 885c03abd84SFelipe Balbi return IRQ_HANDLED; 886c03abd84SFelipe Balbi } 887c03abd84SFelipe Balbi 888c03abd84SFelipe Balbi static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) 889c03abd84SFelipe Balbi { 890dbc4ec52SIvan Khoronzhuk struct cpsw_common *cpsw = dev_id; 891c03abd84SFelipe Balbi 8922c836bd9SIvan Khoronzhuk cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); 8935d8d0d4dSIvan Khoronzhuk writel(0, &cpsw->wr_regs->rx_en); 894fd51cf19SSebastian Siewior 895e38b5a3dSIvan Khoronzhuk if (cpsw->quirk_irq) { 896e38b5a3dSIvan Khoronzhuk disable_irq_nosync(cpsw->irqs_table[0]); 897e38b5a3dSIvan Khoronzhuk cpsw->rx_irq_disabled = true; 8987da11600SMugunthan V N } 8997da11600SMugunthan V N 900dbc4ec52SIvan Khoronzhuk napi_schedule(&cpsw->napi_rx); 901df828598SMugunthan V N return IRQ_HANDLED; 902df828598SMugunthan V N } 903df828598SMugunthan V N 9049611d6d6SIvan Khoronzhuk static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) 905df828598SMugunthan V N { 906e05107e6SIvan Khoronzhuk u32 ch_map; 9078feb0a19SIvan Khoronzhuk int num_tx, cur_budget, ch; 908dbc4ec52SIvan Khoronzhuk struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); 9098feb0a19SIvan Khoronzhuk struct cpsw_vector *txv; 91032a7432cSMugunthan V N 911e05107e6SIvan Khoronzhuk /* process every unprocessed channel */ 912e05107e6SIvan Khoronzhuk ch_map = cpdma_ctrl_txchs_state(cpsw->dma); 91379b3325dSIvan Khoronzhuk for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { 91479b3325dSIvan Khoronzhuk if (!(ch_map & 0x80)) 915e05107e6SIvan Khoronzhuk continue; 916e05107e6SIvan Khoronzhuk 9178feb0a19SIvan Khoronzhuk txv = &cpsw->txv[ch]; 9188feb0a19SIvan Khoronzhuk if (unlikely(txv->budget > budget - num_tx)) 9198feb0a19SIvan Khoronzhuk cur_budget = budget - num_tx; 9208feb0a19SIvan Khoronzhuk else 9218feb0a19SIvan Khoronzhuk cur_budget = txv->budget; 9228feb0a19SIvan Khoronzhuk 9238feb0a19SIvan Khoronzhuk num_tx += cpdma_chan_process(txv->ch, cur_budget); 924342934a5SIvan Khoronzhuk if (num_tx >= budget) 925342934a5SIvan Khoronzhuk break; 926e05107e6SIvan Khoronzhuk } 927e05107e6SIvan Khoronzhuk 92832a7432cSMugunthan V N if (num_tx < budget) { 92932a7432cSMugunthan V N napi_complete(napi_tx); 9305d8d0d4dSIvan Khoronzhuk writel(0xff, &cpsw->wr_regs->tx_en); 9319611d6d6SIvan Khoronzhuk } 9329611d6d6SIvan Khoronzhuk 9339611d6d6SIvan Khoronzhuk return num_tx; 9349611d6d6SIvan Khoronzhuk } 9359611d6d6SIvan Khoronzhuk 9369611d6d6SIvan Khoronzhuk static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) 9379611d6d6SIvan Khoronzhuk { 9389611d6d6SIvan Khoronzhuk struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); 9399611d6d6SIvan Khoronzhuk int num_tx; 9409611d6d6SIvan Khoronzhuk 9419611d6d6SIvan Khoronzhuk num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); 9429611d6d6SIvan Khoronzhuk if (num_tx < budget) { 9439611d6d6SIvan Khoronzhuk napi_complete(napi_tx); 9449611d6d6SIvan Khoronzhuk writel(0xff, &cpsw->wr_regs->tx_en); 9459611d6d6SIvan Khoronzhuk if (cpsw->tx_irq_disabled) { 946e38b5a3dSIvan Khoronzhuk cpsw->tx_irq_disabled = false; 947e38b5a3dSIvan Khoronzhuk enable_irq(cpsw->irqs_table[1]); 9487da11600SMugunthan V N } 94932a7432cSMugunthan V N } 95032a7432cSMugunthan V N 95132a7432cSMugunthan V N return num_tx; 95232a7432cSMugunthan V N } 95332a7432cSMugunthan V N 9549611d6d6SIvan Khoronzhuk static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) 95532a7432cSMugunthan V N { 956e05107e6SIvan Khoronzhuk u32 ch_map; 9578feb0a19SIvan Khoronzhuk int num_rx, cur_budget, ch; 958dbc4ec52SIvan Khoronzhuk struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); 9598feb0a19SIvan Khoronzhuk struct cpsw_vector *rxv; 960510a1e72SMugunthan V N 961e05107e6SIvan Khoronzhuk /* process every unprocessed channel */ 962e05107e6SIvan Khoronzhuk ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); 963342934a5SIvan Khoronzhuk for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { 964e05107e6SIvan Khoronzhuk if (!(ch_map & 0x01)) 965e05107e6SIvan Khoronzhuk continue; 966e05107e6SIvan Khoronzhuk 9678feb0a19SIvan Khoronzhuk rxv = &cpsw->rxv[ch]; 9688feb0a19SIvan Khoronzhuk if (unlikely(rxv->budget > budget - num_rx)) 9698feb0a19SIvan Khoronzhuk cur_budget = budget - num_rx; 9708feb0a19SIvan Khoronzhuk else 9718feb0a19SIvan Khoronzhuk cur_budget = rxv->budget; 9728feb0a19SIvan Khoronzhuk 9738feb0a19SIvan Khoronzhuk num_rx += cpdma_chan_process(rxv->ch, cur_budget); 974342934a5SIvan Khoronzhuk if (num_rx >= budget) 975342934a5SIvan Khoronzhuk break; 976e05107e6SIvan Khoronzhuk } 977e05107e6SIvan Khoronzhuk 978510a1e72SMugunthan V N if (num_rx < budget) { 9796ad20165SEric Dumazet napi_complete_done(napi_rx, num_rx); 9805d8d0d4dSIvan Khoronzhuk writel(0xff, &cpsw->wr_regs->rx_en); 9819611d6d6SIvan Khoronzhuk } 9829611d6d6SIvan Khoronzhuk 9839611d6d6SIvan Khoronzhuk return num_rx; 9849611d6d6SIvan Khoronzhuk } 9859611d6d6SIvan Khoronzhuk 9869611d6d6SIvan Khoronzhuk static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) 9879611d6d6SIvan Khoronzhuk { 9889611d6d6SIvan Khoronzhuk struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); 9899611d6d6SIvan Khoronzhuk int num_rx; 9909611d6d6SIvan Khoronzhuk 9919611d6d6SIvan Khoronzhuk num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); 9929611d6d6SIvan Khoronzhuk if (num_rx < budget) { 9939611d6d6SIvan Khoronzhuk napi_complete_done(napi_rx, num_rx); 9949611d6d6SIvan Khoronzhuk writel(0xff, &cpsw->wr_regs->rx_en); 9959611d6d6SIvan Khoronzhuk if (cpsw->rx_irq_disabled) { 996e38b5a3dSIvan Khoronzhuk cpsw->rx_irq_disabled = false; 997e38b5a3dSIvan Khoronzhuk enable_irq(cpsw->irqs_table[0]); 9987da11600SMugunthan V N } 999510a1e72SMugunthan V N } 1000df828598SMugunthan V N 1001df828598SMugunthan V N return num_rx; 1002df828598SMugunthan V N } 1003df828598SMugunthan V N 1004df828598SMugunthan V N static inline void soft_reset(const char *module, void __iomem *reg) 1005df828598SMugunthan V N { 1006df828598SMugunthan V N unsigned long timeout = jiffies + HZ; 1007df828598SMugunthan V N 1008dda5f5feSGrygorii Strashko writel_relaxed(1, reg); 1009df828598SMugunthan V N do { 1010df828598SMugunthan V N cpu_relax(); 1011dda5f5feSGrygorii Strashko } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); 1012df828598SMugunthan V N 1013dda5f5feSGrygorii Strashko WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); 1014df828598SMugunthan V N } 1015df828598SMugunthan V N 1016df828598SMugunthan V N static void cpsw_set_slave_mac(struct cpsw_slave *slave, 1017df828598SMugunthan V N struct cpsw_priv *priv) 1018df828598SMugunthan V N { 10199750a3adSRichard Cochran slave_write(slave, mac_hi(priv->mac_addr), SA_HI); 10209750a3adSRichard Cochran slave_write(slave, mac_lo(priv->mac_addr), SA_LO); 1021df828598SMugunthan V N } 1022df828598SMugunthan V N 102357d90148SIvan Khoronzhuk static bool cpsw_shp_is_off(struct cpsw_priv *priv) 102457d90148SIvan Khoronzhuk { 102557d90148SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 102657d90148SIvan Khoronzhuk struct cpsw_slave *slave; 102757d90148SIvan Khoronzhuk u32 shift, mask, val; 102857d90148SIvan Khoronzhuk 102957d90148SIvan Khoronzhuk val = readl_relaxed(&cpsw->regs->ptype); 103057d90148SIvan Khoronzhuk 103157d90148SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 103257d90148SIvan Khoronzhuk shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; 103357d90148SIvan Khoronzhuk mask = 7 << shift; 103457d90148SIvan Khoronzhuk val = val & mask; 103557d90148SIvan Khoronzhuk 103657d90148SIvan Khoronzhuk return !val; 103757d90148SIvan Khoronzhuk } 103857d90148SIvan Khoronzhuk 103957d90148SIvan Khoronzhuk static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) 104057d90148SIvan Khoronzhuk { 104157d90148SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 104257d90148SIvan Khoronzhuk struct cpsw_slave *slave; 104357d90148SIvan Khoronzhuk u32 shift, mask, val; 104457d90148SIvan Khoronzhuk 104557d90148SIvan Khoronzhuk val = readl_relaxed(&cpsw->regs->ptype); 104657d90148SIvan Khoronzhuk 104757d90148SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 104857d90148SIvan Khoronzhuk shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; 104957d90148SIvan Khoronzhuk mask = (1 << --fifo) << shift; 105057d90148SIvan Khoronzhuk val = on ? val | mask : val & ~mask; 105157d90148SIvan Khoronzhuk 105257d90148SIvan Khoronzhuk writel_relaxed(val, &cpsw->regs->ptype); 105357d90148SIvan Khoronzhuk } 105457d90148SIvan Khoronzhuk 1055df828598SMugunthan V N static void _cpsw_adjust_link(struct cpsw_slave *slave, 1056df828598SMugunthan V N struct cpsw_priv *priv, bool *link) 1057df828598SMugunthan V N { 1058df828598SMugunthan V N struct phy_device *phy = slave->phy; 1059df828598SMugunthan V N u32 mac_control = 0; 1060df828598SMugunthan V N u32 slave_port; 1061606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1062df828598SMugunthan V N 1063df828598SMugunthan V N if (!phy) 1064df828598SMugunthan V N return; 1065df828598SMugunthan V N 10666f1f5836SIvan Khoronzhuk slave_port = cpsw_get_slave_port(slave->slave_num); 1067df828598SMugunthan V N 1068df828598SMugunthan V N if (phy->link) { 1069cfc08345SGrygorii Strashko mac_control = CPSW_SL_CTL_GMII_EN; 1070cfc08345SGrygorii Strashko 1071cfc08345SGrygorii Strashko if (phy->speed == 1000) 1072cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_GIG; 1073cfc08345SGrygorii Strashko if (phy->duplex) 1074cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_FULLDUPLEX; 1075cfc08345SGrygorii Strashko 1076cfc08345SGrygorii Strashko /* set speed_in input in case RMII mode is used in 100Mbps */ 1077cfc08345SGrygorii Strashko if (phy->speed == 100) 1078cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_IFCTL_A; 1079cfc08345SGrygorii Strashko /* in band mode only works in 10Mbps RGMII mode */ 1080cfc08345SGrygorii Strashko else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) 1081cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ 1082cfc08345SGrygorii Strashko 1083cfc08345SGrygorii Strashko if (priv->rx_pause) 1084cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 1085cfc08345SGrygorii Strashko 1086cfc08345SGrygorii Strashko if (priv->tx_pause) 1087cfc08345SGrygorii Strashko mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 1088cfc08345SGrygorii Strashko 1089cfc08345SGrygorii Strashko if (mac_control != slave->mac_control) 1090cfc08345SGrygorii Strashko cpsw_sl_ctl_set(slave->mac_sl, mac_control); 1091df828598SMugunthan V N 1092df828598SMugunthan V N /* enable forwarding */ 10932a05a622SIvan Khoronzhuk cpsw_ale_control_set(cpsw->ale, slave_port, 1094df828598SMugunthan V N ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1095df828598SMugunthan V N 1096df828598SMugunthan V N *link = true; 109757d90148SIvan Khoronzhuk 109857d90148SIvan Khoronzhuk if (priv->shp_cfg_speed && 109957d90148SIvan Khoronzhuk priv->shp_cfg_speed != slave->phy->speed && 110057d90148SIvan Khoronzhuk !cpsw_shp_is_off(priv)) 110157d90148SIvan Khoronzhuk dev_warn(priv->dev, 110257d90148SIvan Khoronzhuk "Speed was changed, CBS shaper speeds are changed!"); 1103df828598SMugunthan V N } else { 1104df828598SMugunthan V N mac_control = 0; 1105df828598SMugunthan V N /* disable forwarding */ 11062a05a622SIvan Khoronzhuk cpsw_ale_control_set(cpsw->ale, slave_port, 1107df828598SMugunthan V N ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1108cfc08345SGrygorii Strashko 1109cfc08345SGrygorii Strashko cpsw_sl_wait_for_idle(slave->mac_sl, 100); 1110cfc08345SGrygorii Strashko 1111cfc08345SGrygorii Strashko cpsw_sl_ctl_reset(slave->mac_sl); 1112df828598SMugunthan V N } 1113df828598SMugunthan V N 1114cfc08345SGrygorii Strashko if (mac_control != slave->mac_control) 1115df828598SMugunthan V N phy_print_status(phy); 1116df828598SMugunthan V N 1117df828598SMugunthan V N slave->mac_control = mac_control; 1118df828598SMugunthan V N } 1119df828598SMugunthan V N 11200be01b8eSIvan Khoronzhuk static int cpsw_get_common_speed(struct cpsw_common *cpsw) 11210be01b8eSIvan Khoronzhuk { 11220be01b8eSIvan Khoronzhuk int i, speed; 11230be01b8eSIvan Khoronzhuk 11240be01b8eSIvan Khoronzhuk for (i = 0, speed = 0; i < cpsw->data.slaves; i++) 11250be01b8eSIvan Khoronzhuk if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) 11260be01b8eSIvan Khoronzhuk speed += cpsw->slaves[i].phy->speed; 11270be01b8eSIvan Khoronzhuk 11280be01b8eSIvan Khoronzhuk return speed; 11290be01b8eSIvan Khoronzhuk } 11300be01b8eSIvan Khoronzhuk 11310be01b8eSIvan Khoronzhuk static int cpsw_need_resplit(struct cpsw_common *cpsw) 11320be01b8eSIvan Khoronzhuk { 11330be01b8eSIvan Khoronzhuk int i, rlim_ch_num; 11340be01b8eSIvan Khoronzhuk int speed, ch_rate; 11350be01b8eSIvan Khoronzhuk 11360be01b8eSIvan Khoronzhuk /* re-split resources only in case speed was changed */ 11370be01b8eSIvan Khoronzhuk speed = cpsw_get_common_speed(cpsw); 11380be01b8eSIvan Khoronzhuk if (speed == cpsw->speed || !speed) 11390be01b8eSIvan Khoronzhuk return 0; 11400be01b8eSIvan Khoronzhuk 11410be01b8eSIvan Khoronzhuk cpsw->speed = speed; 11420be01b8eSIvan Khoronzhuk 11430be01b8eSIvan Khoronzhuk for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { 11440be01b8eSIvan Khoronzhuk ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); 11450be01b8eSIvan Khoronzhuk if (!ch_rate) 11460be01b8eSIvan Khoronzhuk break; 11470be01b8eSIvan Khoronzhuk 11480be01b8eSIvan Khoronzhuk rlim_ch_num++; 11490be01b8eSIvan Khoronzhuk } 11500be01b8eSIvan Khoronzhuk 11510be01b8eSIvan Khoronzhuk /* cases not dependent on speed */ 11520be01b8eSIvan Khoronzhuk if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) 11530be01b8eSIvan Khoronzhuk return 0; 11540be01b8eSIvan Khoronzhuk 11550be01b8eSIvan Khoronzhuk return 1; 11560be01b8eSIvan Khoronzhuk } 11570be01b8eSIvan Khoronzhuk 1158df828598SMugunthan V N static void cpsw_adjust_link(struct net_device *ndev) 1159df828598SMugunthan V N { 1160df828598SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 11610be01b8eSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1162df828598SMugunthan V N bool link = false; 1163df828598SMugunthan V N 1164df828598SMugunthan V N for_each_slave(priv, _cpsw_adjust_link, priv, &link); 1165df828598SMugunthan V N 1166df828598SMugunthan V N if (link) { 11670be01b8eSIvan Khoronzhuk if (cpsw_need_resplit(cpsw)) 11689763a891SGrygorii Strashko cpsw_split_res(cpsw); 11690be01b8eSIvan Khoronzhuk 1170df828598SMugunthan V N netif_carrier_on(ndev); 1171df828598SMugunthan V N if (netif_running(ndev)) 1172e05107e6SIvan Khoronzhuk netif_tx_wake_all_queues(ndev); 1173df828598SMugunthan V N } else { 1174df828598SMugunthan V N netif_carrier_off(ndev); 1175e05107e6SIvan Khoronzhuk netif_tx_stop_all_queues(ndev); 1176df828598SMugunthan V N } 1177df828598SMugunthan V N } 1178df828598SMugunthan V N 1179d9ba8f9eSMugunthan V N static inline void cpsw_add_dual_emac_def_ale_entries( 1180d9ba8f9eSMugunthan V N struct cpsw_priv *priv, struct cpsw_slave *slave, 1181d9ba8f9eSMugunthan V N u32 slave_port) 1182d9ba8f9eSMugunthan V N { 11832a05a622SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 118471a2cbb7SGrygorii Strashko u32 port_mask = 1 << slave_port | ALE_PORT_HOST; 1185d9ba8f9eSMugunthan V N 11862a05a622SIvan Khoronzhuk if (cpsw->version == CPSW_VERSION_1) 1187d9ba8f9eSMugunthan V N slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); 1188d9ba8f9eSMugunthan V N else 1189d9ba8f9eSMugunthan V N slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); 11902a05a622SIvan Khoronzhuk cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, 1191d9ba8f9eSMugunthan V N port_mask, port_mask, 0); 11922a05a622SIvan Khoronzhuk cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 11935b3a5a14SIvan Khoronzhuk ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0); 11942a05a622SIvan Khoronzhuk cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 11952a05a622SIvan Khoronzhuk HOST_PORT_NUM, ALE_VLAN | 11962a05a622SIvan Khoronzhuk ALE_SECURE, slave->port_vlan); 11975e5add17SGrygorii Strashko cpsw_ale_control_set(cpsw->ale, slave_port, 11985e5add17SGrygorii Strashko ALE_PORT_DROP_UNKNOWN_VLAN, 1); 1199d9ba8f9eSMugunthan V N } 1200d9ba8f9eSMugunthan V N 12011e7a2e21SDaniel Mack static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 12021e7a2e21SDaniel Mack { 1203df828598SMugunthan V N u32 slave_port; 120430c57f07SSekhar Nori struct phy_device *phy; 1205649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1206df828598SMugunthan V N 1207cfc08345SGrygorii Strashko cpsw_sl_reset(slave->mac_sl, 100); 1208cfc08345SGrygorii Strashko cpsw_sl_ctl_reset(slave->mac_sl); 1209df828598SMugunthan V N 1210df828598SMugunthan V N /* setup priority mapping */ 1211cfc08345SGrygorii Strashko cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, 1212cfc08345SGrygorii Strashko RX_PRIORITY_MAPPING); 12139750a3adSRichard Cochran 12142a05a622SIvan Khoronzhuk switch (cpsw->version) { 12159750a3adSRichard Cochran case CPSW_VERSION_1: 12169750a3adSRichard Cochran slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 121748f5bcccSGrygorii Strashko /* Increase RX FIFO size to 5 for supporting fullduplex 121848f5bcccSGrygorii Strashko * flow control mode 121948f5bcccSGrygorii Strashko */ 122048f5bcccSGrygorii Strashko slave_write(slave, 122148f5bcccSGrygorii Strashko (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 122248f5bcccSGrygorii Strashko CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); 12239750a3adSRichard Cochran break; 12249750a3adSRichard Cochran case CPSW_VERSION_2: 1225c193f365SMugunthan V N case CPSW_VERSION_3: 1226926489beSMugunthan V N case CPSW_VERSION_4: 12279750a3adSRichard Cochran slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 122848f5bcccSGrygorii Strashko /* Increase RX FIFO size to 5 for supporting fullduplex 122948f5bcccSGrygorii Strashko * flow control mode 123048f5bcccSGrygorii Strashko */ 123148f5bcccSGrygorii Strashko slave_write(slave, 123248f5bcccSGrygorii Strashko (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 123348f5bcccSGrygorii Strashko CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); 12349750a3adSRichard Cochran break; 12359750a3adSRichard Cochran } 1236df828598SMugunthan V N 1237df828598SMugunthan V N /* setup max packet size, and mac address */ 1238cfc08345SGrygorii Strashko cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, 1239cfc08345SGrygorii Strashko cpsw->rx_packet_max); 1240df828598SMugunthan V N cpsw_set_slave_mac(slave, priv); 1241df828598SMugunthan V N 1242df828598SMugunthan V N slave->mac_control = 0; /* no link yet */ 1243df828598SMugunthan V N 12446f1f5836SIvan Khoronzhuk slave_port = cpsw_get_slave_port(slave->slave_num); 1245df828598SMugunthan V N 1246606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) 1247d9ba8f9eSMugunthan V N cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); 1248d9ba8f9eSMugunthan V N else 12492a05a622SIvan Khoronzhuk cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 1250e11b220fSMugunthan V N 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1251df828598SMugunthan V N 1252d733f754SDavid Rivshin if (slave->data->phy_node) { 125330c57f07SSekhar Nori phy = of_phy_connect(priv->ndev, slave->data->phy_node, 12549e42f715SHeiko Schocher &cpsw_adjust_link, 0, slave->data->phy_if); 125530c57f07SSekhar Nori if (!phy) { 1256f7ce9103SRob Herring dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", 1257f7ce9103SRob Herring slave->data->phy_node, 1258d733f754SDavid Rivshin slave->slave_num); 1259d733f754SDavid Rivshin return; 1260d733f754SDavid Rivshin } 1261d733f754SDavid Rivshin } else { 126230c57f07SSekhar Nori phy = phy_connect(priv->ndev, slave->data->phy_id, 1263f9a8f83bSFlorian Fainelli &cpsw_adjust_link, slave->data->phy_if); 126430c57f07SSekhar Nori if (IS_ERR(phy)) { 1265d733f754SDavid Rivshin dev_err(priv->dev, 1266d733f754SDavid Rivshin "phy \"%s\" not found on slave %d, err %ld\n", 1267d733f754SDavid Rivshin slave->data->phy_id, slave->slave_num, 126830c57f07SSekhar Nori PTR_ERR(phy)); 1269d733f754SDavid Rivshin return; 1270d733f754SDavid Rivshin } 1271d733f754SDavid Rivshin } 1272d733f754SDavid Rivshin 127330c57f07SSekhar Nori slave->phy = phy; 127430c57f07SSekhar Nori 12752220943aSAndrew Lunn phy_attached_info(slave->phy); 12762220943aSAndrew Lunn 1277df828598SMugunthan V N phy_start(slave->phy); 1278388367a5SMugunthan V N 1279388367a5SMugunthan V N /* Configure GMII_SEL register */ 12803ff18849SGrygorii Strashko if (!IS_ERR(slave->data->ifphy)) 12813ff18849SGrygorii Strashko phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, 12823ff18849SGrygorii Strashko slave->data->phy_if); 12833ff18849SGrygorii Strashko else 12843ff18849SGrygorii Strashko cpsw_phy_sel(cpsw->dev, slave->phy->interface, 12853ff18849SGrygorii Strashko slave->slave_num); 1286df828598SMugunthan V N } 1287df828598SMugunthan V N 12883b72c2feSMugunthan V N static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 12893b72c2feSMugunthan V N { 1290606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1291606f3993SIvan Khoronzhuk const int vlan = cpsw->data.default_vlan; 12923b72c2feSMugunthan V N u32 reg; 12933b72c2feSMugunthan V N int i; 12941e5c4bc4SLennart Sorensen int unreg_mcast_mask; 12953b72c2feSMugunthan V N 12962a05a622SIvan Khoronzhuk reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 12973b72c2feSMugunthan V N CPSW2_PORT_VLAN; 12983b72c2feSMugunthan V N 12995d8d0d4dSIvan Khoronzhuk writel(vlan, &cpsw->host_port_regs->port_vlan); 13003b72c2feSMugunthan V N 1301606f3993SIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) 1302606f3993SIvan Khoronzhuk slave_write(cpsw->slaves + i, vlan, reg); 13033b72c2feSMugunthan V N 13041e5c4bc4SLennart Sorensen if (priv->ndev->flags & IFF_ALLMULTI) 13051e5c4bc4SLennart Sorensen unreg_mcast_mask = ALE_ALL_PORTS; 13061e5c4bc4SLennart Sorensen else 13071e5c4bc4SLennart Sorensen unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; 13081e5c4bc4SLennart Sorensen 13092a05a622SIvan Khoronzhuk cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, 131061f1cef9SGrygorii Strashko ALE_ALL_PORTS, ALE_ALL_PORTS, 131161f1cef9SGrygorii Strashko unreg_mcast_mask); 13123b72c2feSMugunthan V N } 13133b72c2feSMugunthan V N 1314df828598SMugunthan V N static void cpsw_init_host_port(struct cpsw_priv *priv) 1315df828598SMugunthan V N { 1316d9ba8f9eSMugunthan V N u32 fifo_mode; 13175d8d0d4dSIvan Khoronzhuk u32 control_reg; 13185d8d0d4dSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 13193b72c2feSMugunthan V N 1320df828598SMugunthan V N /* soft reset the controller and initialize ale */ 13215d8d0d4dSIvan Khoronzhuk soft_reset("cpsw", &cpsw->regs->soft_reset); 13222a05a622SIvan Khoronzhuk cpsw_ale_start(cpsw->ale); 1323df828598SMugunthan V N 1324df828598SMugunthan V N /* switch to vlan unaware mode */ 13252a05a622SIvan Khoronzhuk cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 13263b72c2feSMugunthan V N CPSW_ALE_VLAN_AWARE); 13275d8d0d4dSIvan Khoronzhuk control_reg = readl(&cpsw->regs->control); 1328a3a41d2fSGrygorii Strashko control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; 13295d8d0d4dSIvan Khoronzhuk writel(control_reg, &cpsw->regs->control); 1330606f3993SIvan Khoronzhuk fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : 1331d9ba8f9eSMugunthan V N CPSW_FIFO_NORMAL_MODE; 13325d8d0d4dSIvan Khoronzhuk writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl); 1333df828598SMugunthan V N 1334df828598SMugunthan V N /* setup host port priority mapping */ 1335dda5f5feSGrygorii Strashko writel_relaxed(CPDMA_TX_PRIORITY_MAP, 13365d8d0d4dSIvan Khoronzhuk &cpsw->host_port_regs->cpdma_tx_pri_map); 1337dda5f5feSGrygorii Strashko writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); 1338df828598SMugunthan V N 13392a05a622SIvan Khoronzhuk cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 1340df828598SMugunthan V N ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1341df828598SMugunthan V N 1342606f3993SIvan Khoronzhuk if (!cpsw->data.dual_emac) { 13432a05a622SIvan Khoronzhuk cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 1344d9ba8f9eSMugunthan V N 0, 0); 13452a05a622SIvan Khoronzhuk cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 134671a2cbb7SGrygorii Strashko ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); 1347df828598SMugunthan V N } 1348d9ba8f9eSMugunthan V N } 1349df828598SMugunthan V N 1350c24eef28SGrygorii Strashko int cpsw_fill_rx_channels(struct cpsw_priv *priv) 13513802dce1SIvan Khoronzhuk { 13523802dce1SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 13539ed4050cSIvan Khoronzhuk struct cpsw_meta_xdp *xmeta; 13549ed4050cSIvan Khoronzhuk struct page_pool *pool; 13559ed4050cSIvan Khoronzhuk struct page *page; 13563802dce1SIvan Khoronzhuk int ch_buf_num; 1357e05107e6SIvan Khoronzhuk int ch, i, ret; 13589ed4050cSIvan Khoronzhuk dma_addr_t dma; 13593802dce1SIvan Khoronzhuk 1360e05107e6SIvan Khoronzhuk for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 13619ed4050cSIvan Khoronzhuk pool = cpsw->page_pool[ch]; 13628feb0a19SIvan Khoronzhuk ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); 13633802dce1SIvan Khoronzhuk for (i = 0; i < ch_buf_num; i++) { 13649ed4050cSIvan Khoronzhuk page = page_pool_dev_alloc_pages(pool); 13659ed4050cSIvan Khoronzhuk if (!page) { 13669ed4050cSIvan Khoronzhuk cpsw_err(priv, ifup, "allocate rx page err\n"); 13673802dce1SIvan Khoronzhuk return -ENOMEM; 13683802dce1SIvan Khoronzhuk } 13693802dce1SIvan Khoronzhuk 13709ed4050cSIvan Khoronzhuk xmeta = page_address(page) + CPSW_XMETA_OFFSET; 13719ed4050cSIvan Khoronzhuk xmeta->ndev = priv->ndev; 13729ed4050cSIvan Khoronzhuk xmeta->ch = ch; 13739ed4050cSIvan Khoronzhuk 13749ed4050cSIvan Khoronzhuk dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; 13759ed4050cSIvan Khoronzhuk ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, 13769ed4050cSIvan Khoronzhuk page, dma, 13779ed4050cSIvan Khoronzhuk cpsw->rx_packet_max, 13789ed4050cSIvan Khoronzhuk 0); 13793802dce1SIvan Khoronzhuk if (ret < 0) { 13803802dce1SIvan Khoronzhuk cpsw_err(priv, ifup, 13819ed4050cSIvan Khoronzhuk "cannot submit page to channel %d rx, error %d\n", 1382e05107e6SIvan Khoronzhuk ch, ret); 13839ed4050cSIvan Khoronzhuk page_pool_recycle_direct(pool, page); 13843802dce1SIvan Khoronzhuk return ret; 13853802dce1SIvan Khoronzhuk } 13863802dce1SIvan Khoronzhuk } 13873802dce1SIvan Khoronzhuk 1388e05107e6SIvan Khoronzhuk cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", 1389e05107e6SIvan Khoronzhuk ch, ch_buf_num); 1390e05107e6SIvan Khoronzhuk } 13913802dce1SIvan Khoronzhuk 1392e05107e6SIvan Khoronzhuk return 0; 13933802dce1SIvan Khoronzhuk } 13943802dce1SIvan Khoronzhuk 13952a05a622SIvan Khoronzhuk static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) 1396aacebbf8SSebastian Siewior { 13973995d265SSchuyler Patton u32 slave_port; 13983995d265SSchuyler Patton 13996f1f5836SIvan Khoronzhuk slave_port = cpsw_get_slave_port(slave->slave_num); 14003995d265SSchuyler Patton 1401aacebbf8SSebastian Siewior if (!slave->phy) 1402aacebbf8SSebastian Siewior return; 1403aacebbf8SSebastian Siewior phy_stop(slave->phy); 1404aacebbf8SSebastian Siewior phy_disconnect(slave->phy); 1405aacebbf8SSebastian Siewior slave->phy = NULL; 14062a05a622SIvan Khoronzhuk cpsw_ale_control_set(cpsw->ale, slave_port, 14073995d265SSchuyler Patton ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1408cfc08345SGrygorii Strashko cpsw_sl_reset(slave->mac_sl, 100); 1409cfc08345SGrygorii Strashko cpsw_sl_ctl_reset(slave->mac_sl); 1410aacebbf8SSebastian Siewior } 1411aacebbf8SSebastian Siewior 14127929a668SIvan Khoronzhuk static int cpsw_tc_to_fifo(int tc, int num_tc) 14137929a668SIvan Khoronzhuk { 14147929a668SIvan Khoronzhuk if (tc == num_tc - 1) 14157929a668SIvan Khoronzhuk return 0; 14167929a668SIvan Khoronzhuk 14177929a668SIvan Khoronzhuk return CPSW_FIFO_SHAPERS_NUM - tc; 14187929a668SIvan Khoronzhuk } 14197929a668SIvan Khoronzhuk 142057d90148SIvan Khoronzhuk static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) 142157d90148SIvan Khoronzhuk { 142257d90148SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 142357d90148SIvan Khoronzhuk u32 val = 0, send_pct, shift; 142457d90148SIvan Khoronzhuk struct cpsw_slave *slave; 142557d90148SIvan Khoronzhuk int pct = 0, i; 142657d90148SIvan Khoronzhuk 142757d90148SIvan Khoronzhuk if (bw > priv->shp_cfg_speed * 1000) 142857d90148SIvan Khoronzhuk goto err; 142957d90148SIvan Khoronzhuk 143057d90148SIvan Khoronzhuk /* shaping has to stay enabled for highest fifos linearly 143157d90148SIvan Khoronzhuk * and fifo bw no more then interface can allow 143257d90148SIvan Khoronzhuk */ 143357d90148SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 143457d90148SIvan Khoronzhuk send_pct = slave_read(slave, SEND_PERCENT); 143557d90148SIvan Khoronzhuk for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { 143657d90148SIvan Khoronzhuk if (!bw) { 143757d90148SIvan Khoronzhuk if (i >= fifo || !priv->fifo_bw[i]) 143857d90148SIvan Khoronzhuk continue; 143957d90148SIvan Khoronzhuk 144057d90148SIvan Khoronzhuk dev_warn(priv->dev, "Prev FIFO%d is shaped", i); 144157d90148SIvan Khoronzhuk continue; 144257d90148SIvan Khoronzhuk } 144357d90148SIvan Khoronzhuk 144457d90148SIvan Khoronzhuk if (!priv->fifo_bw[i] && i > fifo) { 144557d90148SIvan Khoronzhuk dev_err(priv->dev, "Upper FIFO%d is not shaped", i); 144657d90148SIvan Khoronzhuk return -EINVAL; 144757d90148SIvan Khoronzhuk } 144857d90148SIvan Khoronzhuk 144957d90148SIvan Khoronzhuk shift = (i - 1) * 8; 145057d90148SIvan Khoronzhuk if (i == fifo) { 145157d90148SIvan Khoronzhuk send_pct &= ~(CPSW_PCT_MASK << shift); 145257d90148SIvan Khoronzhuk val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); 145357d90148SIvan Khoronzhuk if (!val) 145457d90148SIvan Khoronzhuk val = 1; 145557d90148SIvan Khoronzhuk 145657d90148SIvan Khoronzhuk send_pct |= val << shift; 145757d90148SIvan Khoronzhuk pct += val; 145857d90148SIvan Khoronzhuk continue; 145957d90148SIvan Khoronzhuk } 146057d90148SIvan Khoronzhuk 146157d90148SIvan Khoronzhuk if (priv->fifo_bw[i]) 146257d90148SIvan Khoronzhuk pct += (send_pct >> shift) & CPSW_PCT_MASK; 146357d90148SIvan Khoronzhuk } 146457d90148SIvan Khoronzhuk 146557d90148SIvan Khoronzhuk if (pct >= 100) 146657d90148SIvan Khoronzhuk goto err; 146757d90148SIvan Khoronzhuk 146857d90148SIvan Khoronzhuk slave_write(slave, send_pct, SEND_PERCENT); 146957d90148SIvan Khoronzhuk priv->fifo_bw[fifo] = bw; 147057d90148SIvan Khoronzhuk 147157d90148SIvan Khoronzhuk dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, 147257d90148SIvan Khoronzhuk DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); 147357d90148SIvan Khoronzhuk 147457d90148SIvan Khoronzhuk return 0; 147557d90148SIvan Khoronzhuk err: 147657d90148SIvan Khoronzhuk dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); 147757d90148SIvan Khoronzhuk return -EINVAL; 147857d90148SIvan Khoronzhuk } 147957d90148SIvan Khoronzhuk 148057d90148SIvan Khoronzhuk static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) 148157d90148SIvan Khoronzhuk { 148257d90148SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 148357d90148SIvan Khoronzhuk struct cpsw_slave *slave; 148457d90148SIvan Khoronzhuk u32 tx_in_ctl_rg, val; 148557d90148SIvan Khoronzhuk int ret; 148657d90148SIvan Khoronzhuk 148757d90148SIvan Khoronzhuk ret = cpsw_set_fifo_bw(priv, fifo, bw); 148857d90148SIvan Khoronzhuk if (ret) 148957d90148SIvan Khoronzhuk return ret; 149057d90148SIvan Khoronzhuk 149157d90148SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 149257d90148SIvan Khoronzhuk tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? 149357d90148SIvan Khoronzhuk CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; 149457d90148SIvan Khoronzhuk 149557d90148SIvan Khoronzhuk if (!bw) 149657d90148SIvan Khoronzhuk cpsw_fifo_shp_on(priv, fifo, bw); 149757d90148SIvan Khoronzhuk 149857d90148SIvan Khoronzhuk val = slave_read(slave, tx_in_ctl_rg); 149957d90148SIvan Khoronzhuk if (cpsw_shp_is_off(priv)) { 150057d90148SIvan Khoronzhuk /* disable FIFOs rate limited queues */ 150157d90148SIvan Khoronzhuk val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); 150257d90148SIvan Khoronzhuk 150357d90148SIvan Khoronzhuk /* set type of FIFO queues to normal priority mode */ 150457d90148SIvan Khoronzhuk val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); 150557d90148SIvan Khoronzhuk 150657d90148SIvan Khoronzhuk /* set type of FIFO queues to be rate limited */ 150757d90148SIvan Khoronzhuk if (bw) 150857d90148SIvan Khoronzhuk val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; 150957d90148SIvan Khoronzhuk else 151057d90148SIvan Khoronzhuk priv->shp_cfg_speed = 0; 151157d90148SIvan Khoronzhuk } 151257d90148SIvan Khoronzhuk 151357d90148SIvan Khoronzhuk /* toggle a FIFO rate limited queue */ 151457d90148SIvan Khoronzhuk if (bw) 151557d90148SIvan Khoronzhuk val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); 151657d90148SIvan Khoronzhuk else 151757d90148SIvan Khoronzhuk val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); 151857d90148SIvan Khoronzhuk slave_write(slave, val, tx_in_ctl_rg); 151957d90148SIvan Khoronzhuk 152057d90148SIvan Khoronzhuk /* FIFO transmit shape enable */ 152157d90148SIvan Khoronzhuk cpsw_fifo_shp_on(priv, fifo, bw); 152257d90148SIvan Khoronzhuk return 0; 152357d90148SIvan Khoronzhuk } 152457d90148SIvan Khoronzhuk 152557d90148SIvan Khoronzhuk /* Defaults: 152657d90148SIvan Khoronzhuk * class A - prio 3 152757d90148SIvan Khoronzhuk * class B - prio 2 152857d90148SIvan Khoronzhuk * shaping for class A should be set first 152957d90148SIvan Khoronzhuk */ 153057d90148SIvan Khoronzhuk static int cpsw_set_cbs(struct net_device *ndev, 153157d90148SIvan Khoronzhuk struct tc_cbs_qopt_offload *qopt) 153257d90148SIvan Khoronzhuk { 153357d90148SIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(ndev); 153457d90148SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 153557d90148SIvan Khoronzhuk struct cpsw_slave *slave; 153657d90148SIvan Khoronzhuk int prev_speed = 0; 153757d90148SIvan Khoronzhuk int tc, ret, fifo; 153857d90148SIvan Khoronzhuk u32 bw = 0; 153957d90148SIvan Khoronzhuk 154057d90148SIvan Khoronzhuk tc = netdev_txq_to_tc(priv->ndev, qopt->queue); 154157d90148SIvan Khoronzhuk 154257d90148SIvan Khoronzhuk /* enable channels in backward order, as highest FIFOs must be rate 154357d90148SIvan Khoronzhuk * limited first and for compliance with CPDMA rate limited channels 154457d90148SIvan Khoronzhuk * that also used in bacward order. FIFO0 cannot be rate limited. 154557d90148SIvan Khoronzhuk */ 154657d90148SIvan Khoronzhuk fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); 154757d90148SIvan Khoronzhuk if (!fifo) { 154857d90148SIvan Khoronzhuk dev_err(priv->dev, "Last tc%d can't be rate limited", tc); 154957d90148SIvan Khoronzhuk return -EINVAL; 155057d90148SIvan Khoronzhuk } 155157d90148SIvan Khoronzhuk 155257d90148SIvan Khoronzhuk /* do nothing, it's disabled anyway */ 155357d90148SIvan Khoronzhuk if (!qopt->enable && !priv->fifo_bw[fifo]) 155457d90148SIvan Khoronzhuk return 0; 155557d90148SIvan Khoronzhuk 155657d90148SIvan Khoronzhuk /* shapers can be set if link speed is known */ 155757d90148SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 155857d90148SIvan Khoronzhuk if (slave->phy && slave->phy->link) { 155957d90148SIvan Khoronzhuk if (priv->shp_cfg_speed && 156057d90148SIvan Khoronzhuk priv->shp_cfg_speed != slave->phy->speed) 156157d90148SIvan Khoronzhuk prev_speed = priv->shp_cfg_speed; 156257d90148SIvan Khoronzhuk 156357d90148SIvan Khoronzhuk priv->shp_cfg_speed = slave->phy->speed; 156457d90148SIvan Khoronzhuk } 156557d90148SIvan Khoronzhuk 156657d90148SIvan Khoronzhuk if (!priv->shp_cfg_speed) { 156757d90148SIvan Khoronzhuk dev_err(priv->dev, "Link speed is not known"); 156857d90148SIvan Khoronzhuk return -1; 156957d90148SIvan Khoronzhuk } 157057d90148SIvan Khoronzhuk 157157d90148SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 157257d90148SIvan Khoronzhuk if (ret < 0) { 157357d90148SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 157457d90148SIvan Khoronzhuk return ret; 157557d90148SIvan Khoronzhuk } 157657d90148SIvan Khoronzhuk 157757d90148SIvan Khoronzhuk bw = qopt->enable ? qopt->idleslope : 0; 157857d90148SIvan Khoronzhuk ret = cpsw_set_fifo_rlimit(priv, fifo, bw); 157957d90148SIvan Khoronzhuk if (ret) { 158057d90148SIvan Khoronzhuk priv->shp_cfg_speed = prev_speed; 158157d90148SIvan Khoronzhuk prev_speed = 0; 158257d90148SIvan Khoronzhuk } 158357d90148SIvan Khoronzhuk 158457d90148SIvan Khoronzhuk if (bw && prev_speed) 158557d90148SIvan Khoronzhuk dev_warn(priv->dev, 158657d90148SIvan Khoronzhuk "Speed was changed, CBS shaper speeds are changed!"); 158757d90148SIvan Khoronzhuk 158857d90148SIvan Khoronzhuk pm_runtime_put_sync(cpsw->dev); 158957d90148SIvan Khoronzhuk return ret; 159057d90148SIvan Khoronzhuk } 159157d90148SIvan Khoronzhuk 15924b4255edSIvan Khoronzhuk static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) 15934b4255edSIvan Khoronzhuk { 15944b4255edSIvan Khoronzhuk int fifo, bw; 15954b4255edSIvan Khoronzhuk 15964b4255edSIvan Khoronzhuk for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { 15974b4255edSIvan Khoronzhuk bw = priv->fifo_bw[fifo]; 15984b4255edSIvan Khoronzhuk if (!bw) 15994b4255edSIvan Khoronzhuk continue; 16004b4255edSIvan Khoronzhuk 16014b4255edSIvan Khoronzhuk cpsw_set_fifo_rlimit(priv, fifo, bw); 16024b4255edSIvan Khoronzhuk } 16034b4255edSIvan Khoronzhuk } 16044b4255edSIvan Khoronzhuk 16054b4255edSIvan Khoronzhuk static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) 16064b4255edSIvan Khoronzhuk { 16074b4255edSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 16084b4255edSIvan Khoronzhuk u32 tx_prio_map = 0; 16094b4255edSIvan Khoronzhuk int i, tc, fifo; 16104b4255edSIvan Khoronzhuk u32 tx_prio_rg; 16114b4255edSIvan Khoronzhuk 16124b4255edSIvan Khoronzhuk if (!priv->mqprio_hw) 16134b4255edSIvan Khoronzhuk return; 16144b4255edSIvan Khoronzhuk 16154b4255edSIvan Khoronzhuk for (i = 0; i < 8; i++) { 16164b4255edSIvan Khoronzhuk tc = netdev_get_prio_tc_map(priv->ndev, i); 16174b4255edSIvan Khoronzhuk fifo = CPSW_FIFO_SHAPERS_NUM - tc; 16184b4255edSIvan Khoronzhuk tx_prio_map |= fifo << (4 * i); 16194b4255edSIvan Khoronzhuk } 16204b4255edSIvan Khoronzhuk 16214b4255edSIvan Khoronzhuk tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? 16224b4255edSIvan Khoronzhuk CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; 16234b4255edSIvan Khoronzhuk 16244b4255edSIvan Khoronzhuk slave_write(slave, tx_prio_map, tx_prio_rg); 16254b4255edSIvan Khoronzhuk } 16264b4255edSIvan Khoronzhuk 162700fe4712SIvan Khoronzhuk static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 162800fe4712SIvan Khoronzhuk { 162900fe4712SIvan Khoronzhuk struct cpsw_priv *priv = arg; 163000fe4712SIvan Khoronzhuk 163100fe4712SIvan Khoronzhuk if (!vdev) 163200fe4712SIvan Khoronzhuk return 0; 163300fe4712SIvan Khoronzhuk 163400fe4712SIvan Khoronzhuk cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); 163500fe4712SIvan Khoronzhuk return 0; 163600fe4712SIvan Khoronzhuk } 163700fe4712SIvan Khoronzhuk 16384b4255edSIvan Khoronzhuk /* restore resources after port reset */ 16394b4255edSIvan Khoronzhuk static void cpsw_restore(struct cpsw_priv *priv) 16404b4255edSIvan Khoronzhuk { 164100fe4712SIvan Khoronzhuk /* restore vlan configurations */ 164200fe4712SIvan Khoronzhuk vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); 164300fe4712SIvan Khoronzhuk 16444b4255edSIvan Khoronzhuk /* restore MQPRIO offload */ 16454b4255edSIvan Khoronzhuk for_each_slave(priv, cpsw_mqprio_resume, priv); 16464b4255edSIvan Khoronzhuk 16474b4255edSIvan Khoronzhuk /* restore CBS offload */ 16484b4255edSIvan Khoronzhuk for_each_slave(priv, cpsw_cbs_resume, priv); 16494b4255edSIvan Khoronzhuk } 16504b4255edSIvan Khoronzhuk 1651df828598SMugunthan V N static int cpsw_ndo_open(struct net_device *ndev) 1652df828598SMugunthan V N { 1653df828598SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 1654649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 16553802dce1SIvan Khoronzhuk int ret; 1656df828598SMugunthan V N u32 reg; 1657df828598SMugunthan V N 165856e31bd8SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 1659108a6537SGrygorii Strashko if (ret < 0) { 166056e31bd8SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 1661108a6537SGrygorii Strashko return ret; 1662108a6537SGrygorii Strashko } 16633fa88c51SGrygorii Strashko 1664df828598SMugunthan V N netif_carrier_off(ndev); 1665df828598SMugunthan V N 1666e05107e6SIvan Khoronzhuk /* Notify the stack of the actual queue counts. */ 1667e05107e6SIvan Khoronzhuk ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); 1668e05107e6SIvan Khoronzhuk if (ret) { 1669e05107e6SIvan Khoronzhuk dev_err(priv->dev, "cannot set real number of tx queues\n"); 1670e05107e6SIvan Khoronzhuk goto err_cleanup; 1671e05107e6SIvan Khoronzhuk } 1672e05107e6SIvan Khoronzhuk 1673e05107e6SIvan Khoronzhuk ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); 1674e05107e6SIvan Khoronzhuk if (ret) { 1675e05107e6SIvan Khoronzhuk dev_err(priv->dev, "cannot set real number of rx queues\n"); 1676e05107e6SIvan Khoronzhuk goto err_cleanup; 1677e05107e6SIvan Khoronzhuk } 1678e05107e6SIvan Khoronzhuk 16792a05a622SIvan Khoronzhuk reg = cpsw->version; 1680df828598SMugunthan V N 1681df828598SMugunthan V N dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1682df828598SMugunthan V N CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 1683df828598SMugunthan V N CPSW_RTL_VERSION(reg)); 1684df828598SMugunthan V N 1685d5bc1613SIvan Khoronzhuk /* Initialize host and slave ports */ 1686d5bc1613SIvan Khoronzhuk if (!cpsw->usage_count) 1687df828598SMugunthan V N cpsw_init_host_port(priv); 1688df828598SMugunthan V N for_each_slave(priv, cpsw_slave_open, priv); 1689df828598SMugunthan V N 16903b72c2feSMugunthan V N /* Add default VLAN */ 1691606f3993SIvan Khoronzhuk if (!cpsw->data.dual_emac) 16923b72c2feSMugunthan V N cpsw_add_default_vlan(priv); 1693e6afea0bSMugunthan V N else 16942a05a622SIvan Khoronzhuk cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, 169561f1cef9SGrygorii Strashko ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); 16963b72c2feSMugunthan V N 1697d5bc1613SIvan Khoronzhuk /* initialize shared resources for every ndev */ 1698d5bc1613SIvan Khoronzhuk if (!cpsw->usage_count) { 1699d9ba8f9eSMugunthan V N /* disable priority elevation */ 1700dda5f5feSGrygorii Strashko writel_relaxed(0, &cpsw->regs->ptype); 1701df828598SMugunthan V N 1702d9ba8f9eSMugunthan V N /* enable statistics collection only on all ports */ 1703dda5f5feSGrygorii Strashko writel_relaxed(0x7, &cpsw->regs->stat_port_en); 1704df828598SMugunthan V N 17051923d6e4SMugunthan V N /* Enable internal fifo flow control */ 17065d8d0d4dSIvan Khoronzhuk writel(0x7, &cpsw->regs->flow_control); 17071923d6e4SMugunthan V N 1708dbc4ec52SIvan Khoronzhuk napi_enable(&cpsw->napi_rx); 1709dbc4ec52SIvan Khoronzhuk napi_enable(&cpsw->napi_tx); 1710d354eb85SMugunthan V N 1711e38b5a3dSIvan Khoronzhuk if (cpsw->tx_irq_disabled) { 1712e38b5a3dSIvan Khoronzhuk cpsw->tx_irq_disabled = false; 1713e38b5a3dSIvan Khoronzhuk enable_irq(cpsw->irqs_table[1]); 17147da11600SMugunthan V N } 17157da11600SMugunthan V N 1716e38b5a3dSIvan Khoronzhuk if (cpsw->rx_irq_disabled) { 1717e38b5a3dSIvan Khoronzhuk cpsw->rx_irq_disabled = false; 1718e38b5a3dSIvan Khoronzhuk enable_irq(cpsw->irqs_table[0]); 17197da11600SMugunthan V N } 17207da11600SMugunthan V N 17219ed4050cSIvan Khoronzhuk /* create rxqs for both infs in dual mac as they use same pool 17229ed4050cSIvan Khoronzhuk * and must be destroyed together when no users. 17239ed4050cSIvan Khoronzhuk */ 17249ed4050cSIvan Khoronzhuk ret = cpsw_create_xdp_rxqs(cpsw); 17259ed4050cSIvan Khoronzhuk if (ret < 0) 17269ed4050cSIvan Khoronzhuk goto err_cleanup; 17279ed4050cSIvan Khoronzhuk 17283802dce1SIvan Khoronzhuk ret = cpsw_fill_rx_channels(priv); 17293802dce1SIvan Khoronzhuk if (ret < 0) 1730aacebbf8SSebastian Siewior goto err_cleanup; 1731f280e89aSMugunthan V N 17328a2c9a5aSGrygorii Strashko if (cpts_register(cpsw->cpts)) 1733f280e89aSMugunthan V N dev_err(priv->dev, "error registering cpts device\n"); 1734f280e89aSMugunthan V N 1735d9ba8f9eSMugunthan V N } 1736df828598SMugunthan V N 17374b4255edSIvan Khoronzhuk cpsw_restore(priv); 17384b4255edSIvan Khoronzhuk 1739ff5b8ef2SMugunthan V N /* Enable Interrupt pacing if configured */ 17402a05a622SIvan Khoronzhuk if (cpsw->coal_intvl != 0) { 1741ff5b8ef2SMugunthan V N struct ethtool_coalesce coal; 1742ff5b8ef2SMugunthan V N 17432a05a622SIvan Khoronzhuk coal.rx_coalesce_usecs = cpsw->coal_intvl; 1744ff5b8ef2SMugunthan V N cpsw_set_coalesce(ndev, &coal); 1745ff5b8ef2SMugunthan V N } 1746ff5b8ef2SMugunthan V N 17472c836bd9SIvan Khoronzhuk cpdma_ctlr_start(cpsw->dma); 17482c836bd9SIvan Khoronzhuk cpsw_intr_enable(cpsw); 1749d5bc1613SIvan Khoronzhuk cpsw->usage_count++; 1750f63a975eSMugunthan V N 1751df828598SMugunthan V N return 0; 1752df828598SMugunthan V N 1753aacebbf8SSebastian Siewior err_cleanup: 175402cacedeSIvan Khoronzhuk if (!cpsw->usage_count) { 17552c836bd9SIvan Khoronzhuk cpdma_ctlr_stop(cpsw->dma); 17569ed4050cSIvan Khoronzhuk cpsw_destroy_xdp_rxqs(cpsw); 175702cacedeSIvan Khoronzhuk } 175802cacedeSIvan Khoronzhuk 17599ed4050cSIvan Khoronzhuk for_each_slave(priv, cpsw_slave_stop, cpsw); 176056e31bd8SIvan Khoronzhuk pm_runtime_put_sync(cpsw->dev); 1761aacebbf8SSebastian Siewior netif_carrier_off(priv->ndev); 1762aacebbf8SSebastian Siewior return ret; 1763df828598SMugunthan V N } 1764df828598SMugunthan V N 1765df828598SMugunthan V N static int cpsw_ndo_stop(struct net_device *ndev) 1766df828598SMugunthan V N { 1767df828598SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 1768649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1769df828598SMugunthan V N 1770df828598SMugunthan V N cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 177115180ecaSIvan Khoronzhuk __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); 1772e05107e6SIvan Khoronzhuk netif_tx_stop_all_queues(priv->ndev); 1773df828598SMugunthan V N netif_carrier_off(priv->ndev); 1774d9ba8f9eSMugunthan V N 1775d5bc1613SIvan Khoronzhuk if (cpsw->usage_count <= 1) { 1776dbc4ec52SIvan Khoronzhuk napi_disable(&cpsw->napi_rx); 1777dbc4ec52SIvan Khoronzhuk napi_disable(&cpsw->napi_tx); 17782a05a622SIvan Khoronzhuk cpts_unregister(cpsw->cpts); 17792c836bd9SIvan Khoronzhuk cpsw_intr_disable(cpsw); 17802c836bd9SIvan Khoronzhuk cpdma_ctlr_stop(cpsw->dma); 17812a05a622SIvan Khoronzhuk cpsw_ale_stop(cpsw->ale); 17829ed4050cSIvan Khoronzhuk cpsw_destroy_xdp_rxqs(cpsw); 1783d9ba8f9eSMugunthan V N } 17842a05a622SIvan Khoronzhuk for_each_slave(priv, cpsw_slave_stop, cpsw); 17850be01b8eSIvan Khoronzhuk 17860be01b8eSIvan Khoronzhuk if (cpsw_need_resplit(cpsw)) 17879763a891SGrygorii Strashko cpsw_split_res(cpsw); 17880be01b8eSIvan Khoronzhuk 1789d5bc1613SIvan Khoronzhuk cpsw->usage_count--; 179056e31bd8SIvan Khoronzhuk pm_runtime_put_sync(cpsw->dev); 1791df828598SMugunthan V N return 0; 1792df828598SMugunthan V N } 1793df828598SMugunthan V N 1794df828598SMugunthan V N static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 1795df828598SMugunthan V N struct net_device *ndev) 1796df828598SMugunthan V N { 1797df828598SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 17982c836bd9SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1799f44f8417SIvan Khoronzhuk struct cpts *cpts = cpsw->cpts; 1800e05107e6SIvan Khoronzhuk struct netdev_queue *txq; 1801e05107e6SIvan Khoronzhuk struct cpdma_chan *txch; 1802e05107e6SIvan Khoronzhuk int ret, q_idx; 1803df828598SMugunthan V N 1804df828598SMugunthan V N if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1805df828598SMugunthan V N cpsw_err(priv, tx_err, "packet pad failed\n"); 18068dc43ddcSTobias Klauser ndev->stats.tx_dropped++; 18071bf96050SIvan Khoronzhuk return NET_XMIT_DROP; 1808df828598SMugunthan V N } 1809df828598SMugunthan V N 18109232b16dSMugunthan V N if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 1811a9423120SIvan Khoronzhuk priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) 18122e5b38abSRichard Cochran skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 18132e5b38abSRichard Cochran 1814e05107e6SIvan Khoronzhuk q_idx = skb_get_queue_mapping(skb); 1815e05107e6SIvan Khoronzhuk if (q_idx >= cpsw->tx_ch_num) 1816e05107e6SIvan Khoronzhuk q_idx = q_idx % cpsw->tx_ch_num; 1817e05107e6SIvan Khoronzhuk 18188feb0a19SIvan Khoronzhuk txch = cpsw->txv[q_idx].ch; 181962f94c21SGrygorii Strashko txq = netdev_get_tx_queue(ndev, q_idx); 182010ae8054SGrygorii Strashko skb_tx_timestamp(skb); 182110ae8054SGrygorii Strashko ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, 182210ae8054SGrygorii Strashko priv->emac_port + cpsw->data.dual_emac); 1823df828598SMugunthan V N if (unlikely(ret != 0)) { 1824df828598SMugunthan V N cpsw_err(priv, tx_err, "desc submit failed\n"); 1825df828598SMugunthan V N goto fail; 1826df828598SMugunthan V N } 1827df828598SMugunthan V N 1828fae50823SMugunthan V N /* If there is no more tx desc left free then we need to 1829fae50823SMugunthan V N * tell the kernel to stop sending us tx frames. 1830fae50823SMugunthan V N */ 1831e05107e6SIvan Khoronzhuk if (unlikely(!cpdma_check_free_tx_desc(txch))) { 1832e05107e6SIvan Khoronzhuk netif_tx_stop_queue(txq); 183362f94c21SGrygorii Strashko 183462f94c21SGrygorii Strashko /* Barrier, so that stop_queue visible to other cpus */ 183562f94c21SGrygorii Strashko smp_mb__after_atomic(); 183662f94c21SGrygorii Strashko 183762f94c21SGrygorii Strashko if (cpdma_check_free_tx_desc(txch)) 183862f94c21SGrygorii Strashko netif_tx_wake_queue(txq); 1839e05107e6SIvan Khoronzhuk } 1840fae50823SMugunthan V N 1841df828598SMugunthan V N return NETDEV_TX_OK; 1842df828598SMugunthan V N fail: 18438dc43ddcSTobias Klauser ndev->stats.tx_dropped++; 1844e05107e6SIvan Khoronzhuk netif_tx_stop_queue(txq); 184562f94c21SGrygorii Strashko 184662f94c21SGrygorii Strashko /* Barrier, so that stop_queue visible to other cpus */ 184762f94c21SGrygorii Strashko smp_mb__after_atomic(); 184862f94c21SGrygorii Strashko 184962f94c21SGrygorii Strashko if (cpdma_check_free_tx_desc(txch)) 185062f94c21SGrygorii Strashko netif_tx_wake_queue(txq); 185162f94c21SGrygorii Strashko 1852df828598SMugunthan V N return NETDEV_TX_BUSY; 1853df828598SMugunthan V N } 1854df828598SMugunthan V N 1855c8395d4eSGrygorii Strashko #if IS_ENABLED(CONFIG_TI_CPTS) 18562e5b38abSRichard Cochran 1857a9423120SIvan Khoronzhuk static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 18582e5b38abSRichard Cochran { 1859a9423120SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 1860606f3993SIvan Khoronzhuk struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; 18612e5b38abSRichard Cochran u32 ts_en, seq_id; 18622e5b38abSRichard Cochran 1863a9423120SIvan Khoronzhuk if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { 18642e5b38abSRichard Cochran slave_write(slave, 0, CPSW1_TS_CTL); 18652e5b38abSRichard Cochran return; 18662e5b38abSRichard Cochran } 18672e5b38abSRichard Cochran 18682e5b38abSRichard Cochran seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; 18692e5b38abSRichard Cochran ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; 18702e5b38abSRichard Cochran 1871a9423120SIvan Khoronzhuk if (priv->tx_ts_enabled) 18722e5b38abSRichard Cochran ts_en |= CPSW_V1_TS_TX_EN; 18732e5b38abSRichard Cochran 1874a9423120SIvan Khoronzhuk if (priv->rx_ts_enabled) 18752e5b38abSRichard Cochran ts_en |= CPSW_V1_TS_RX_EN; 18762e5b38abSRichard Cochran 18772e5b38abSRichard Cochran slave_write(slave, ts_en, CPSW1_TS_CTL); 18782e5b38abSRichard Cochran slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); 18792e5b38abSRichard Cochran } 18802e5b38abSRichard Cochran 18812e5b38abSRichard Cochran static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) 18822e5b38abSRichard Cochran { 1883d9ba8f9eSMugunthan V N struct cpsw_slave *slave; 18845d8d0d4dSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 18852e5b38abSRichard Cochran u32 ctrl, mtype; 18862e5b38abSRichard Cochran 1887cb7d78d0SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1888d9ba8f9eSMugunthan V N 18892e5b38abSRichard Cochran ctrl = slave_read(slave, CPSW2_CONTROL); 18902a05a622SIvan Khoronzhuk switch (cpsw->version) { 189109c55372SGeorge Cherian case CPSW_VERSION_2: 189209c55372SGeorge Cherian ctrl &= ~CTRL_V2_ALL_TS_MASK; 18932e5b38abSRichard Cochran 1894a9423120SIvan Khoronzhuk if (priv->tx_ts_enabled) 189509c55372SGeorge Cherian ctrl |= CTRL_V2_TX_TS_BITS; 18962e5b38abSRichard Cochran 1897a9423120SIvan Khoronzhuk if (priv->rx_ts_enabled) 189809c55372SGeorge Cherian ctrl |= CTRL_V2_RX_TS_BITS; 189909c55372SGeorge Cherian break; 190009c55372SGeorge Cherian case CPSW_VERSION_3: 190109c55372SGeorge Cherian default: 190209c55372SGeorge Cherian ctrl &= ~CTRL_V3_ALL_TS_MASK; 190309c55372SGeorge Cherian 1904a9423120SIvan Khoronzhuk if (priv->tx_ts_enabled) 190509c55372SGeorge Cherian ctrl |= CTRL_V3_TX_TS_BITS; 190609c55372SGeorge Cherian 1907a9423120SIvan Khoronzhuk if (priv->rx_ts_enabled) 190809c55372SGeorge Cherian ctrl |= CTRL_V3_RX_TS_BITS; 190909c55372SGeorge Cherian break; 191009c55372SGeorge Cherian } 19112e5b38abSRichard Cochran 19122e5b38abSRichard Cochran mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 19132e5b38abSRichard Cochran 19142e5b38abSRichard Cochran slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); 19152e5b38abSRichard Cochran slave_write(slave, ctrl, CPSW2_CONTROL); 1916dda5f5feSGrygorii Strashko writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); 19171ebb2446SIvan Khoronzhuk writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); 19182e5b38abSRichard Cochran } 19192e5b38abSRichard Cochran 1920a5b4145bSBen Hutchings static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 19212e5b38abSRichard Cochran { 19223177bf6fSMugunthan V N struct cpsw_priv *priv = netdev_priv(dev); 19232e5b38abSRichard Cochran struct hwtstamp_config cfg; 19242a05a622SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 19252e5b38abSRichard Cochran 19262a05a622SIvan Khoronzhuk if (cpsw->version != CPSW_VERSION_1 && 19272a05a622SIvan Khoronzhuk cpsw->version != CPSW_VERSION_2 && 19282a05a622SIvan Khoronzhuk cpsw->version != CPSW_VERSION_3) 19292ee91e54SBen Hutchings return -EOPNOTSUPP; 19302ee91e54SBen Hutchings 19312e5b38abSRichard Cochran if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 19322e5b38abSRichard Cochran return -EFAULT; 19332e5b38abSRichard Cochran 19342e5b38abSRichard Cochran /* reserved for future extensions */ 19352e5b38abSRichard Cochran if (cfg.flags) 19362e5b38abSRichard Cochran return -EINVAL; 19372e5b38abSRichard Cochran 19382ee91e54SBen Hutchings if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) 19392e5b38abSRichard Cochran return -ERANGE; 19402e5b38abSRichard Cochran 19412e5b38abSRichard Cochran switch (cfg.rx_filter) { 19422e5b38abSRichard Cochran case HWTSTAMP_FILTER_NONE: 1943a9423120SIvan Khoronzhuk priv->rx_ts_enabled = 0; 19442e5b38abSRichard Cochran break; 19452e5b38abSRichard Cochran case HWTSTAMP_FILTER_ALL: 1946e9523a5aSGrygorii Strashko case HWTSTAMP_FILTER_NTP_ALL: 1947e9523a5aSGrygorii Strashko return -ERANGE; 19482e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 19492e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 19502e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1951a9423120SIvan Khoronzhuk priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1952e9523a5aSGrygorii Strashko cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1953e9523a5aSGrygorii Strashko break; 19542e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 19552e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 19562e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 19572e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 19582e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 19592e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 19602e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_EVENT: 19612e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_SYNC: 19622e5b38abSRichard Cochran case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1963a9423120SIvan Khoronzhuk priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; 19642e5b38abSRichard Cochran cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 19652e5b38abSRichard Cochran break; 19662e5b38abSRichard Cochran default: 19672e5b38abSRichard Cochran return -ERANGE; 19682e5b38abSRichard Cochran } 19692e5b38abSRichard Cochran 1970a9423120SIvan Khoronzhuk priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; 19712ee91e54SBen Hutchings 19722a05a622SIvan Khoronzhuk switch (cpsw->version) { 19732e5b38abSRichard Cochran case CPSW_VERSION_1: 1974a9423120SIvan Khoronzhuk cpsw_hwtstamp_v1(priv); 19752e5b38abSRichard Cochran break; 19762e5b38abSRichard Cochran case CPSW_VERSION_2: 1977f7d403cbSGeorge Cherian case CPSW_VERSION_3: 19782e5b38abSRichard Cochran cpsw_hwtstamp_v2(priv); 19792e5b38abSRichard Cochran break; 19802e5b38abSRichard Cochran default: 19812ee91e54SBen Hutchings WARN_ON(1); 19822e5b38abSRichard Cochran } 19832e5b38abSRichard Cochran 19842e5b38abSRichard Cochran return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 19852e5b38abSRichard Cochran } 19862e5b38abSRichard Cochran 1987a5b4145bSBen Hutchings static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 1988a5b4145bSBen Hutchings { 19892a05a622SIvan Khoronzhuk struct cpsw_common *cpsw = ndev_to_cpsw(dev); 1990a9423120SIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(dev); 1991a5b4145bSBen Hutchings struct hwtstamp_config cfg; 1992a5b4145bSBen Hutchings 19932a05a622SIvan Khoronzhuk if (cpsw->version != CPSW_VERSION_1 && 19942a05a622SIvan Khoronzhuk cpsw->version != CPSW_VERSION_2 && 19952a05a622SIvan Khoronzhuk cpsw->version != CPSW_VERSION_3) 1996a5b4145bSBen Hutchings return -EOPNOTSUPP; 1997a5b4145bSBen Hutchings 1998a5b4145bSBen Hutchings cfg.flags = 0; 1999a9423120SIvan Khoronzhuk cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 2000a9423120SIvan Khoronzhuk cfg.rx_filter = priv->rx_ts_enabled; 2001a5b4145bSBen Hutchings 2002a5b4145bSBen Hutchings return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 2003a5b4145bSBen Hutchings } 2004c8395d4eSGrygorii Strashko #else 2005c8395d4eSGrygorii Strashko static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 2006c8395d4eSGrygorii Strashko { 2007c8395d4eSGrygorii Strashko return -EOPNOTSUPP; 2008c8395d4eSGrygorii Strashko } 2009a5b4145bSBen Hutchings 2010c8395d4eSGrygorii Strashko static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 2011c8395d4eSGrygorii Strashko { 2012c8395d4eSGrygorii Strashko return -EOPNOTSUPP; 2013c8395d4eSGrygorii Strashko } 20142e5b38abSRichard Cochran #endif /*CONFIG_TI_CPTS*/ 20152e5b38abSRichard Cochran 20162e5b38abSRichard Cochran static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 20172e5b38abSRichard Cochran { 201811f2c988SMugunthan V N struct cpsw_priv *priv = netdev_priv(dev); 2019606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 2020606f3993SIvan Khoronzhuk int slave_no = cpsw_slave_index(cpsw, priv); 202111f2c988SMugunthan V N 20222e5b38abSRichard Cochran if (!netif_running(dev)) 20232e5b38abSRichard Cochran return -EINVAL; 20242e5b38abSRichard Cochran 202511f2c988SMugunthan V N switch (cmd) { 202611f2c988SMugunthan V N case SIOCSHWTSTAMP: 2027a5b4145bSBen Hutchings return cpsw_hwtstamp_set(dev, req); 2028a5b4145bSBen Hutchings case SIOCGHWTSTAMP: 2029a5b4145bSBen Hutchings return cpsw_hwtstamp_get(dev, req); 20302e5b38abSRichard Cochran } 20312e5b38abSRichard Cochran 2032606f3993SIvan Khoronzhuk if (!cpsw->slaves[slave_no].phy) 2033c1b59947SStefan Sørensen return -EOPNOTSUPP; 2034606f3993SIvan Khoronzhuk return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); 203511f2c988SMugunthan V N } 203611f2c988SMugunthan V N 2037df828598SMugunthan V N static void cpsw_ndo_tx_timeout(struct net_device *ndev) 2038df828598SMugunthan V N { 2039df828598SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 20402c836bd9SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 2041e05107e6SIvan Khoronzhuk int ch; 2042df828598SMugunthan V N 2043df828598SMugunthan V N cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 20448dc43ddcSTobias Klauser ndev->stats.tx_errors++; 20452c836bd9SIvan Khoronzhuk cpsw_intr_disable(cpsw); 2046e05107e6SIvan Khoronzhuk for (ch = 0; ch < cpsw->tx_ch_num; ch++) { 20478feb0a19SIvan Khoronzhuk cpdma_chan_stop(cpsw->txv[ch].ch); 20488feb0a19SIvan Khoronzhuk cpdma_chan_start(cpsw->txv[ch].ch); 2049e05107e6SIvan Khoronzhuk } 2050e05107e6SIvan Khoronzhuk 20512c836bd9SIvan Khoronzhuk cpsw_intr_enable(cpsw); 205275514b66SGrygorii Strashko netif_trans_update(ndev); 205375514b66SGrygorii Strashko netif_tx_wake_all_queues(ndev); 2054df828598SMugunthan V N } 2055df828598SMugunthan V N 2056dcfd8d58SMugunthan V N static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 2057dcfd8d58SMugunthan V N { 2058dcfd8d58SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 2059dcfd8d58SMugunthan V N struct sockaddr *addr = (struct sockaddr *)p; 2060649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 2061dcfd8d58SMugunthan V N int flags = 0; 2062dcfd8d58SMugunthan V N u16 vid = 0; 2063a6c5d14fSGrygorii Strashko int ret; 2064dcfd8d58SMugunthan V N 2065dcfd8d58SMugunthan V N if (!is_valid_ether_addr(addr->sa_data)) 2066dcfd8d58SMugunthan V N return -EADDRNOTAVAIL; 2067dcfd8d58SMugunthan V N 206856e31bd8SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 2069a6c5d14fSGrygorii Strashko if (ret < 0) { 207056e31bd8SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 2071a6c5d14fSGrygorii Strashko return ret; 2072a6c5d14fSGrygorii Strashko } 2073a6c5d14fSGrygorii Strashko 2074606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) { 2075606f3993SIvan Khoronzhuk vid = cpsw->slaves[priv->emac_port].port_vlan; 2076dcfd8d58SMugunthan V N flags = ALE_VLAN; 2077dcfd8d58SMugunthan V N } 2078dcfd8d58SMugunthan V N 20792a05a622SIvan Khoronzhuk cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 2080dcfd8d58SMugunthan V N flags, vid); 20812a05a622SIvan Khoronzhuk cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, 2082dcfd8d58SMugunthan V N flags, vid); 2083dcfd8d58SMugunthan V N 2084dcfd8d58SMugunthan V N memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 2085dcfd8d58SMugunthan V N memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2086dcfd8d58SMugunthan V N for_each_slave(priv, cpsw_set_slave_mac, priv); 2087dcfd8d58SMugunthan V N 208856e31bd8SIvan Khoronzhuk pm_runtime_put(cpsw->dev); 2089a6c5d14fSGrygorii Strashko 2090dcfd8d58SMugunthan V N return 0; 2091dcfd8d58SMugunthan V N } 2092dcfd8d58SMugunthan V N 20933b72c2feSMugunthan V N static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 20943b72c2feSMugunthan V N unsigned short vid) 20953b72c2feSMugunthan V N { 20963b72c2feSMugunthan V N int ret; 20979f6bd8faSMugunthan V N int unreg_mcast_mask = 0; 20985b3a5a14SIvan Khoronzhuk int mcast_mask; 20999f6bd8faSMugunthan V N u32 port_mask; 2100606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 21019f6bd8faSMugunthan V N 2102606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) { 21039f6bd8faSMugunthan V N port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; 21049f6bd8faSMugunthan V N 21055b3a5a14SIvan Khoronzhuk mcast_mask = ALE_PORT_HOST; 21069f6bd8faSMugunthan V N if (priv->ndev->flags & IFF_ALLMULTI) 21075b3a5a14SIvan Khoronzhuk unreg_mcast_mask = mcast_mask; 21089f6bd8faSMugunthan V N } else { 21099f6bd8faSMugunthan V N port_mask = ALE_ALL_PORTS; 21105b3a5a14SIvan Khoronzhuk mcast_mask = port_mask; 21111e5c4bc4SLennart Sorensen 21121e5c4bc4SLennart Sorensen if (priv->ndev->flags & IFF_ALLMULTI) 21131e5c4bc4SLennart Sorensen unreg_mcast_mask = ALE_ALL_PORTS; 21141e5c4bc4SLennart Sorensen else 21151e5c4bc4SLennart Sorensen unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; 21169f6bd8faSMugunthan V N } 21173b72c2feSMugunthan V N 21182a05a622SIvan Khoronzhuk ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, 211961f1cef9SGrygorii Strashko unreg_mcast_mask); 21203b72c2feSMugunthan V N if (ret != 0) 21213b72c2feSMugunthan V N return ret; 21223b72c2feSMugunthan V N 21232a05a622SIvan Khoronzhuk ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 212471a2cbb7SGrygorii Strashko HOST_PORT_NUM, ALE_VLAN, vid); 21253b72c2feSMugunthan V N if (ret != 0) 21263b72c2feSMugunthan V N goto clean_vid; 21273b72c2feSMugunthan V N 21282a05a622SIvan Khoronzhuk ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 21295b3a5a14SIvan Khoronzhuk mcast_mask, ALE_VLAN, vid, 0); 21303b72c2feSMugunthan V N if (ret != 0) 21313b72c2feSMugunthan V N goto clean_vlan_ucast; 21323b72c2feSMugunthan V N return 0; 21333b72c2feSMugunthan V N 21343b72c2feSMugunthan V N clean_vlan_ucast: 21352a05a622SIvan Khoronzhuk cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 213671a2cbb7SGrygorii Strashko HOST_PORT_NUM, ALE_VLAN, vid); 21373b72c2feSMugunthan V N clean_vid: 21382a05a622SIvan Khoronzhuk cpsw_ale_del_vlan(cpsw->ale, vid, 0); 21393b72c2feSMugunthan V N return ret; 21403b72c2feSMugunthan V N } 21413b72c2feSMugunthan V N 21423b72c2feSMugunthan V N static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 214380d5c368SPatrick McHardy __be16 proto, u16 vid) 21443b72c2feSMugunthan V N { 21453b72c2feSMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 2146649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 2147a6c5d14fSGrygorii Strashko int ret; 21483b72c2feSMugunthan V N 2149606f3993SIvan Khoronzhuk if (vid == cpsw->data.default_vlan) 21503b72c2feSMugunthan V N return 0; 21513b72c2feSMugunthan V N 215256e31bd8SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 2153a6c5d14fSGrygorii Strashko if (ret < 0) { 215456e31bd8SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 2155a6c5d14fSGrygorii Strashko return ret; 2156a6c5d14fSGrygorii Strashko } 2157a6c5d14fSGrygorii Strashko 2158606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) { 215902a54164SMugunthan V N /* In dual EMAC, reserved VLAN id should not be used for 216002a54164SMugunthan V N * creating VLAN interfaces as this can break the dual 216102a54164SMugunthan V N * EMAC port separation 216202a54164SMugunthan V N */ 216302a54164SMugunthan V N int i; 216402a54164SMugunthan V N 2165606f3993SIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) { 2166803c4f64SIvan Khoronzhuk if (vid == cpsw->slaves[i].port_vlan) { 2167803c4f64SIvan Khoronzhuk ret = -EINVAL; 2168803c4f64SIvan Khoronzhuk goto err; 2169803c4f64SIvan Khoronzhuk } 217002a54164SMugunthan V N } 217102a54164SMugunthan V N } 217202a54164SMugunthan V N 21733b72c2feSMugunthan V N dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 2174a6c5d14fSGrygorii Strashko ret = cpsw_add_vlan_ale_entry(priv, vid); 2175803c4f64SIvan Khoronzhuk err: 217656e31bd8SIvan Khoronzhuk pm_runtime_put(cpsw->dev); 2177a6c5d14fSGrygorii Strashko return ret; 21783b72c2feSMugunthan V N } 21793b72c2feSMugunthan V N 21803b72c2feSMugunthan V N static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 218180d5c368SPatrick McHardy __be16 proto, u16 vid) 21823b72c2feSMugunthan V N { 21833b72c2feSMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 2184649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 21853b72c2feSMugunthan V N int ret; 21863b72c2feSMugunthan V N 2187606f3993SIvan Khoronzhuk if (vid == cpsw->data.default_vlan) 21883b72c2feSMugunthan V N return 0; 21893b72c2feSMugunthan V N 219056e31bd8SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 2191a6c5d14fSGrygorii Strashko if (ret < 0) { 219256e31bd8SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 2193a6c5d14fSGrygorii Strashko return ret; 2194a6c5d14fSGrygorii Strashko } 2195a6c5d14fSGrygorii Strashko 2196606f3993SIvan Khoronzhuk if (cpsw->data.dual_emac) { 219702a54164SMugunthan V N int i; 219802a54164SMugunthan V N 2199606f3993SIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) { 2200606f3993SIvan Khoronzhuk if (vid == cpsw->slaves[i].port_vlan) 2201803c4f64SIvan Khoronzhuk goto err; 220202a54164SMugunthan V N } 220302a54164SMugunthan V N } 220402a54164SMugunthan V N 22053b72c2feSMugunthan V N dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 22062a05a622SIvan Khoronzhuk ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 2207be35b982SIvan Khoronzhuk ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 220861f1cef9SGrygorii Strashko HOST_PORT_NUM, ALE_VLAN, vid); 2209be35b982SIvan Khoronzhuk ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 22103b72c2feSMugunthan V N 0, ALE_VLAN, vid); 221115180ecaSIvan Khoronzhuk ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); 2212803c4f64SIvan Khoronzhuk err: 221356e31bd8SIvan Khoronzhuk pm_runtime_put(cpsw->dev); 2214a6c5d14fSGrygorii Strashko return ret; 22153b72c2feSMugunthan V N } 22163b72c2feSMugunthan V N 221783fcad0cSIvan Khoronzhuk static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) 221883fcad0cSIvan Khoronzhuk { 221983fcad0cSIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(ndev); 222083fcad0cSIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 222152986a2fSIvan Khoronzhuk struct cpsw_slave *slave; 222232b78d85SIvan Khoronzhuk u32 min_rate; 222383fcad0cSIvan Khoronzhuk u32 ch_rate; 222452986a2fSIvan Khoronzhuk int i, ret; 222583fcad0cSIvan Khoronzhuk 222683fcad0cSIvan Khoronzhuk ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; 222783fcad0cSIvan Khoronzhuk if (ch_rate == rate) 222883fcad0cSIvan Khoronzhuk return 0; 222983fcad0cSIvan Khoronzhuk 223032b78d85SIvan Khoronzhuk ch_rate = rate * 1000; 223183fcad0cSIvan Khoronzhuk min_rate = cpdma_chan_get_min_rate(cpsw->dma); 223232b78d85SIvan Khoronzhuk if ((ch_rate < min_rate && ch_rate)) { 223332b78d85SIvan Khoronzhuk dev_err(priv->dev, "The channel rate cannot be less than %dMbps", 223483fcad0cSIvan Khoronzhuk min_rate); 223583fcad0cSIvan Khoronzhuk return -EINVAL; 223683fcad0cSIvan Khoronzhuk } 223783fcad0cSIvan Khoronzhuk 22380be01b8eSIvan Khoronzhuk if (rate > cpsw->speed) { 223932b78d85SIvan Khoronzhuk dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); 224032b78d85SIvan Khoronzhuk return -EINVAL; 224132b78d85SIvan Khoronzhuk } 224232b78d85SIvan Khoronzhuk 224383fcad0cSIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 224483fcad0cSIvan Khoronzhuk if (ret < 0) { 224583fcad0cSIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 224683fcad0cSIvan Khoronzhuk return ret; 224783fcad0cSIvan Khoronzhuk } 224883fcad0cSIvan Khoronzhuk 224932b78d85SIvan Khoronzhuk ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); 225083fcad0cSIvan Khoronzhuk pm_runtime_put(cpsw->dev); 225132b78d85SIvan Khoronzhuk 225232b78d85SIvan Khoronzhuk if (ret) 225332b78d85SIvan Khoronzhuk return ret; 225432b78d85SIvan Khoronzhuk 225552986a2fSIvan Khoronzhuk /* update rates for slaves tx queues */ 225652986a2fSIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) { 225752986a2fSIvan Khoronzhuk slave = &cpsw->slaves[i]; 225852986a2fSIvan Khoronzhuk if (!slave->ndev) 225952986a2fSIvan Khoronzhuk continue; 226052986a2fSIvan Khoronzhuk 226152986a2fSIvan Khoronzhuk netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; 226252986a2fSIvan Khoronzhuk } 226352986a2fSIvan Khoronzhuk 22649763a891SGrygorii Strashko cpsw_split_res(cpsw); 226583fcad0cSIvan Khoronzhuk return ret; 226683fcad0cSIvan Khoronzhuk } 226783fcad0cSIvan Khoronzhuk 22687929a668SIvan Khoronzhuk static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) 22697929a668SIvan Khoronzhuk { 22707929a668SIvan Khoronzhuk struct tc_mqprio_qopt_offload *mqprio = type_data; 22717929a668SIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(ndev); 22727929a668SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 22737929a668SIvan Khoronzhuk int fifo, num_tc, count, offset; 22747929a668SIvan Khoronzhuk struct cpsw_slave *slave; 22757929a668SIvan Khoronzhuk u32 tx_prio_map = 0; 22767929a668SIvan Khoronzhuk int i, tc, ret; 22777929a668SIvan Khoronzhuk 22787929a668SIvan Khoronzhuk num_tc = mqprio->qopt.num_tc; 22797929a668SIvan Khoronzhuk if (num_tc > CPSW_TC_NUM) 22807929a668SIvan Khoronzhuk return -EINVAL; 22817929a668SIvan Khoronzhuk 22827929a668SIvan Khoronzhuk if (mqprio->mode != TC_MQPRIO_MODE_DCB) 22837929a668SIvan Khoronzhuk return -EINVAL; 22847929a668SIvan Khoronzhuk 22857929a668SIvan Khoronzhuk ret = pm_runtime_get_sync(cpsw->dev); 22867929a668SIvan Khoronzhuk if (ret < 0) { 22877929a668SIvan Khoronzhuk pm_runtime_put_noidle(cpsw->dev); 22887929a668SIvan Khoronzhuk return ret; 22897929a668SIvan Khoronzhuk } 22907929a668SIvan Khoronzhuk 22917929a668SIvan Khoronzhuk if (num_tc) { 22927929a668SIvan Khoronzhuk for (i = 0; i < 8; i++) { 22937929a668SIvan Khoronzhuk tc = mqprio->qopt.prio_tc_map[i]; 22947929a668SIvan Khoronzhuk fifo = cpsw_tc_to_fifo(tc, num_tc); 22957929a668SIvan Khoronzhuk tx_prio_map |= fifo << (4 * i); 22967929a668SIvan Khoronzhuk } 22977929a668SIvan Khoronzhuk 22987929a668SIvan Khoronzhuk netdev_set_num_tc(ndev, num_tc); 22997929a668SIvan Khoronzhuk for (i = 0; i < num_tc; i++) { 23007929a668SIvan Khoronzhuk count = mqprio->qopt.count[i]; 23017929a668SIvan Khoronzhuk offset = mqprio->qopt.offset[i]; 23027929a668SIvan Khoronzhuk netdev_set_tc_queue(ndev, i, count, offset); 23037929a668SIvan Khoronzhuk } 23047929a668SIvan Khoronzhuk } 23057929a668SIvan Khoronzhuk 23067929a668SIvan Khoronzhuk if (!mqprio->qopt.hw) { 23077929a668SIvan Khoronzhuk /* restore default configuration */ 23087929a668SIvan Khoronzhuk netdev_reset_tc(ndev); 23097929a668SIvan Khoronzhuk tx_prio_map = TX_PRIORITY_MAPPING; 23107929a668SIvan Khoronzhuk } 23117929a668SIvan Khoronzhuk 23127929a668SIvan Khoronzhuk priv->mqprio_hw = mqprio->qopt.hw; 23137929a668SIvan Khoronzhuk 23147929a668SIvan Khoronzhuk offset = cpsw->version == CPSW_VERSION_1 ? 23157929a668SIvan Khoronzhuk CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; 23167929a668SIvan Khoronzhuk 23177929a668SIvan Khoronzhuk slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 23187929a668SIvan Khoronzhuk slave_write(slave, tx_prio_map, offset); 23197929a668SIvan Khoronzhuk 23207929a668SIvan Khoronzhuk pm_runtime_put_sync(cpsw->dev); 23217929a668SIvan Khoronzhuk 23227929a668SIvan Khoronzhuk return 0; 23237929a668SIvan Khoronzhuk } 23247929a668SIvan Khoronzhuk 23257929a668SIvan Khoronzhuk static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, 23267929a668SIvan Khoronzhuk void *type_data) 23277929a668SIvan Khoronzhuk { 23287929a668SIvan Khoronzhuk switch (type) { 232957d90148SIvan Khoronzhuk case TC_SETUP_QDISC_CBS: 233057d90148SIvan Khoronzhuk return cpsw_set_cbs(ndev, type_data); 233157d90148SIvan Khoronzhuk 23327929a668SIvan Khoronzhuk case TC_SETUP_QDISC_MQPRIO: 23337929a668SIvan Khoronzhuk return cpsw_set_mqprio(ndev, type_data); 23347929a668SIvan Khoronzhuk 23357929a668SIvan Khoronzhuk default: 23367929a668SIvan Khoronzhuk return -EOPNOTSUPP; 23377929a668SIvan Khoronzhuk } 23387929a668SIvan Khoronzhuk } 23397929a668SIvan Khoronzhuk 23409ed4050cSIvan Khoronzhuk static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) 23419ed4050cSIvan Khoronzhuk { 23429ed4050cSIvan Khoronzhuk struct bpf_prog *prog = bpf->prog; 23439ed4050cSIvan Khoronzhuk 23449ed4050cSIvan Khoronzhuk if (!priv->xdpi.prog && !prog) 23459ed4050cSIvan Khoronzhuk return 0; 23469ed4050cSIvan Khoronzhuk 23479ed4050cSIvan Khoronzhuk if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) 23489ed4050cSIvan Khoronzhuk return -EBUSY; 23499ed4050cSIvan Khoronzhuk 23509ed4050cSIvan Khoronzhuk WRITE_ONCE(priv->xdp_prog, prog); 23519ed4050cSIvan Khoronzhuk 23529ed4050cSIvan Khoronzhuk xdp_attachment_setup(&priv->xdpi, bpf); 23539ed4050cSIvan Khoronzhuk 23549ed4050cSIvan Khoronzhuk return 0; 23559ed4050cSIvan Khoronzhuk } 23569ed4050cSIvan Khoronzhuk 23579ed4050cSIvan Khoronzhuk static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 23589ed4050cSIvan Khoronzhuk { 23599ed4050cSIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(ndev); 23609ed4050cSIvan Khoronzhuk 23619ed4050cSIvan Khoronzhuk switch (bpf->command) { 23629ed4050cSIvan Khoronzhuk case XDP_SETUP_PROG: 23639ed4050cSIvan Khoronzhuk return cpsw_xdp_prog_setup(priv, bpf); 23649ed4050cSIvan Khoronzhuk 23659ed4050cSIvan Khoronzhuk case XDP_QUERY_PROG: 23669ed4050cSIvan Khoronzhuk return xdp_attachment_query(&priv->xdpi, bpf); 23679ed4050cSIvan Khoronzhuk 23689ed4050cSIvan Khoronzhuk default: 23699ed4050cSIvan Khoronzhuk return -EINVAL; 23709ed4050cSIvan Khoronzhuk } 23719ed4050cSIvan Khoronzhuk } 23729ed4050cSIvan Khoronzhuk 23739ed4050cSIvan Khoronzhuk static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, 23749ed4050cSIvan Khoronzhuk struct xdp_frame **frames, u32 flags) 23759ed4050cSIvan Khoronzhuk { 23769ed4050cSIvan Khoronzhuk struct cpsw_priv *priv = netdev_priv(ndev); 23779ed4050cSIvan Khoronzhuk struct xdp_frame *xdpf; 23789ed4050cSIvan Khoronzhuk int i, drops = 0; 23799ed4050cSIvan Khoronzhuk 23809ed4050cSIvan Khoronzhuk if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 23819ed4050cSIvan Khoronzhuk return -EINVAL; 23829ed4050cSIvan Khoronzhuk 23839ed4050cSIvan Khoronzhuk for (i = 0; i < n; i++) { 23849ed4050cSIvan Khoronzhuk xdpf = frames[i]; 23859ed4050cSIvan Khoronzhuk if (xdpf->len < CPSW_MIN_PACKET_SIZE) { 23869ed4050cSIvan Khoronzhuk xdp_return_frame_rx_napi(xdpf); 23879ed4050cSIvan Khoronzhuk drops++; 23889ed4050cSIvan Khoronzhuk continue; 23899ed4050cSIvan Khoronzhuk } 23909ed4050cSIvan Khoronzhuk 23919ed4050cSIvan Khoronzhuk if (cpsw_xdp_tx_frame(priv, xdpf, NULL)) 23929ed4050cSIvan Khoronzhuk drops++; 23939ed4050cSIvan Khoronzhuk } 23949ed4050cSIvan Khoronzhuk 23959ed4050cSIvan Khoronzhuk return n - drops; 23969ed4050cSIvan Khoronzhuk } 23979ed4050cSIvan Khoronzhuk 2398026cc9c3SDavid S. Miller #ifdef CONFIG_NET_POLL_CONTROLLER 2399026cc9c3SDavid S. Miller static void cpsw_ndo_poll_controller(struct net_device *ndev) 2400026cc9c3SDavid S. Miller { 2401026cc9c3SDavid S. Miller struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2402026cc9c3SDavid S. Miller 2403026cc9c3SDavid S. Miller cpsw_intr_disable(cpsw); 2404026cc9c3SDavid S. Miller cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); 2405026cc9c3SDavid S. Miller cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); 2406026cc9c3SDavid S. Miller cpsw_intr_enable(cpsw); 2407026cc9c3SDavid S. Miller } 2408026cc9c3SDavid S. Miller #endif 2409026cc9c3SDavid S. Miller 2410df828598SMugunthan V N static const struct net_device_ops cpsw_netdev_ops = { 2411df828598SMugunthan V N .ndo_open = cpsw_ndo_open, 2412df828598SMugunthan V N .ndo_stop = cpsw_ndo_stop, 2413df828598SMugunthan V N .ndo_start_xmit = cpsw_ndo_start_xmit, 2414dcfd8d58SMugunthan V N .ndo_set_mac_address = cpsw_ndo_set_mac_address, 24152e5b38abSRichard Cochran .ndo_do_ioctl = cpsw_ndo_ioctl, 2416df828598SMugunthan V N .ndo_validate_addr = eth_validate_addr, 2417df828598SMugunthan V N .ndo_tx_timeout = cpsw_ndo_tx_timeout, 24185c50a856SMugunthan V N .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 241983fcad0cSIvan Khoronzhuk .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, 2420df828598SMugunthan V N #ifdef CONFIG_NET_POLL_CONTROLLER 2421df828598SMugunthan V N .ndo_poll_controller = cpsw_ndo_poll_controller, 2422df828598SMugunthan V N #endif 24233b72c2feSMugunthan V N .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 24243b72c2feSMugunthan V N .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 24257929a668SIvan Khoronzhuk .ndo_setup_tc = cpsw_ndo_setup_tc, 24269ed4050cSIvan Khoronzhuk .ndo_bpf = cpsw_ndo_bpf, 24279ed4050cSIvan Khoronzhuk .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, 2428df828598SMugunthan V N }; 2429df828598SMugunthan V N 2430df828598SMugunthan V N static void cpsw_get_drvinfo(struct net_device *ndev, 2431df828598SMugunthan V N struct ethtool_drvinfo *info) 2432df828598SMugunthan V N { 2433649a1688SIvan Khoronzhuk struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 243456e31bd8SIvan Khoronzhuk struct platform_device *pdev = to_platform_device(cpsw->dev); 24357826d43fSJiri Pirko 243652c4f0ecSMugunthan V N strlcpy(info->driver, "cpsw", sizeof(info->driver)); 24377826d43fSJiri Pirko strlcpy(info->version, "1.0", sizeof(info->version)); 243856e31bd8SIvan Khoronzhuk strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); 2439df828598SMugunthan V N } 2440df828598SMugunthan V N 24411923d6e4SMugunthan V N static int cpsw_set_pauseparam(struct net_device *ndev, 24421923d6e4SMugunthan V N struct ethtool_pauseparam *pause) 24431923d6e4SMugunthan V N { 24441923d6e4SMugunthan V N struct cpsw_priv *priv = netdev_priv(ndev); 24451923d6e4SMugunthan V N bool link; 24461923d6e4SMugunthan V N 24471923d6e4SMugunthan V N priv->rx_pause = pause->rx_pause ? true : false; 24481923d6e4SMugunthan V N priv->tx_pause = pause->tx_pause ? true : false; 24491923d6e4SMugunthan V N 24501923d6e4SMugunthan V N for_each_slave(priv, _cpsw_adjust_link, priv, &link); 24511923d6e4SMugunthan V N return 0; 24521923d6e4SMugunthan V N } 24531923d6e4SMugunthan V N 2454022d7ad7SIvan Khoronzhuk static int cpsw_set_channels(struct net_device *ndev, 2455022d7ad7SIvan Khoronzhuk struct ethtool_channels *chs) 2456022d7ad7SIvan Khoronzhuk { 2457c24eef28SGrygorii Strashko return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); 2458be034fc1SGrygorii Strashko } 2459be034fc1SGrygorii Strashko 2460df828598SMugunthan V N static const struct ethtool_ops cpsw_ethtool_ops = { 2461df828598SMugunthan V N .get_drvinfo = cpsw_get_drvinfo, 2462df828598SMugunthan V N .get_msglevel = cpsw_get_msglevel, 2463df828598SMugunthan V N .set_msglevel = cpsw_set_msglevel, 2464df828598SMugunthan V N .get_link = ethtool_op_get_link, 24652e5b38abSRichard Cochran .get_ts_info = cpsw_get_ts_info, 2466ff5b8ef2SMugunthan V N .get_coalesce = cpsw_get_coalesce, 2467ff5b8ef2SMugunthan V N .set_coalesce = cpsw_set_coalesce, 2468d9718546SMugunthan V N .get_sset_count = cpsw_get_sset_count, 2469d9718546SMugunthan V N .get_strings = cpsw_get_strings, 2470d9718546SMugunthan V N .get_ethtool_stats = cpsw_get_ethtool_stats, 24711923d6e4SMugunthan V N .get_pauseparam = cpsw_get_pauseparam, 24721923d6e4SMugunthan V N .set_pauseparam = cpsw_set_pauseparam, 2473d8a64420SMatus Ujhelyi .get_wol = cpsw_get_wol, 2474d8a64420SMatus Ujhelyi .set_wol = cpsw_set_wol, 247552c4f0ecSMugunthan V N .get_regs_len = cpsw_get_regs_len, 247652c4f0ecSMugunthan V N .get_regs = cpsw_get_regs, 24777898b1daSGrygorii Strashko .begin = cpsw_ethtool_op_begin, 24787898b1daSGrygorii Strashko .complete = cpsw_ethtool_op_complete, 2479ce52c744SIvan Khoronzhuk .get_channels = cpsw_get_channels, 2480ce52c744SIvan Khoronzhuk .set_channels = cpsw_set_channels, 24812479876dSPhilippe Reynes .get_link_ksettings = cpsw_get_link_ksettings, 24822479876dSPhilippe Reynes .set_link_ksettings = cpsw_set_link_ksettings, 2483a0909949SYegor Yefremov .get_eee = cpsw_get_eee, 2484a0909949SYegor Yefremov .set_eee = cpsw_set_eee, 24856bb10c2bSYegor Yefremov .nway_reset = cpsw_nway_reset, 2486be034fc1SGrygorii Strashko .get_ringparam = cpsw_get_ringparam, 2487be034fc1SGrygorii Strashko .set_ringparam = cpsw_set_ringparam, 2488df828598SMugunthan V N }; 2489df828598SMugunthan V N 2490552165bcSDavid Rivshin static int cpsw_probe_dt(struct cpsw_platform_data *data, 24912eb32b0aSMugunthan V N struct platform_device *pdev) 24922eb32b0aSMugunthan V N { 24932eb32b0aSMugunthan V N struct device_node *node = pdev->dev.of_node; 24942eb32b0aSMugunthan V N struct device_node *slave_node; 24952eb32b0aSMugunthan V N int i = 0, ret; 24962eb32b0aSMugunthan V N u32 prop; 24972eb32b0aSMugunthan V N 24982eb32b0aSMugunthan V N if (!node) 24992eb32b0aSMugunthan V N return -EINVAL; 25002eb32b0aSMugunthan V N 25012eb32b0aSMugunthan V N if (of_property_read_u32(node, "slaves", &prop)) { 250288c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing slaves property in the DT.\n"); 25032eb32b0aSMugunthan V N return -EINVAL; 25042eb32b0aSMugunthan V N } 25052eb32b0aSMugunthan V N data->slaves = prop; 25062eb32b0aSMugunthan V N 2507e86ac13bSMugunthan V N if (of_property_read_u32(node, "active_slave", &prop)) { 250888c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing active_slave property in the DT.\n"); 2509aa1a15e2SDaniel Mack return -EINVAL; 251078ca0b28SRichard Cochran } 2511e86ac13bSMugunthan V N data->active_slave = prop; 251278ca0b28SRichard Cochran 2513a86854d0SKees Cook data->slave_data = devm_kcalloc(&pdev->dev, 2514a86854d0SKees Cook data->slaves, 2515a86854d0SKees Cook sizeof(struct cpsw_slave_data), 2516b2adaca9SJoe Perches GFP_KERNEL); 2517b2adaca9SJoe Perches if (!data->slave_data) 2518aa1a15e2SDaniel Mack return -ENOMEM; 25192eb32b0aSMugunthan V N 25202eb32b0aSMugunthan V N if (of_property_read_u32(node, "cpdma_channels", &prop)) { 252188c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n"); 2522aa1a15e2SDaniel Mack return -EINVAL; 25232eb32b0aSMugunthan V N } 25242eb32b0aSMugunthan V N data->channels = prop; 25252eb32b0aSMugunthan V N 25262eb32b0aSMugunthan V N if (of_property_read_u32(node, "ale_entries", &prop)) { 252788c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n"); 2528aa1a15e2SDaniel Mack return -EINVAL; 25292eb32b0aSMugunthan V N } 25302eb32b0aSMugunthan V N data->ale_entries = prop; 25312eb32b0aSMugunthan V N 25322eb32b0aSMugunthan V N if (of_property_read_u32(node, "bd_ram_size", &prop)) { 253388c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n"); 2534aa1a15e2SDaniel Mack return -EINVAL; 25352eb32b0aSMugunthan V N } 25362eb32b0aSMugunthan V N data->bd_ram_size = prop; 25372eb32b0aSMugunthan V N 25382eb32b0aSMugunthan V N if (of_property_read_u32(node, "mac_control", &prop)) { 253988c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); 2540aa1a15e2SDaniel Mack return -EINVAL; 25412eb32b0aSMugunthan V N } 25422eb32b0aSMugunthan V N data->mac_control = prop; 25432eb32b0aSMugunthan V N 2544281abd96SMarkus Pargmann if (of_property_read_bool(node, "dual_emac")) 2545281abd96SMarkus Pargmann data->dual_emac = 1; 2546d9ba8f9eSMugunthan V N 25471fb19aa7SVaibhav Hiremath /* 25481fb19aa7SVaibhav Hiremath * Populate all the child nodes here... 25491fb19aa7SVaibhav Hiremath */ 25501fb19aa7SVaibhav Hiremath ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 25511fb19aa7SVaibhav Hiremath /* We do not want to force this, as in some cases may not have child */ 25521fb19aa7SVaibhav Hiremath if (ret) 255388c99ff6SGeorge Cherian dev_warn(&pdev->dev, "Doesn't have any child node\n"); 25541fb19aa7SVaibhav Hiremath 25558658aaf2SBen Hutchings for_each_available_child_of_node(node, slave_node) { 2556549985eeSRichard Cochran struct cpsw_slave_data *slave_data = data->slave_data + i; 2557549985eeSRichard Cochran const void *mac_addr = NULL; 2558549985eeSRichard Cochran int lenp; 2559549985eeSRichard Cochran const __be32 *parp; 2560549985eeSRichard Cochran 2561f468b10eSMarkus Pargmann /* This is no slave child node, continue */ 2562bf5849f1SRob Herring if (!of_node_name_eq(slave_node, "slave")) 2563f468b10eSMarkus Pargmann continue; 2564f468b10eSMarkus Pargmann 25653ff18849SGrygorii Strashko slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node, 25663ff18849SGrygorii Strashko NULL); 25673ff18849SGrygorii Strashko if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) && 25683ff18849SGrygorii Strashko IS_ERR(slave_data->ifphy)) { 25693ff18849SGrygorii Strashko ret = PTR_ERR(slave_data->ifphy); 25703ff18849SGrygorii Strashko dev_err(&pdev->dev, 25713ff18849SGrygorii Strashko "%d: Error retrieving port phy: %d\n", i, ret); 25723cd6e20fSNishka Dasgupta goto err_node_put; 25733ff18849SGrygorii Strashko } 25743ff18849SGrygorii Strashko 2575337d1727SMarek Vasut slave_data->slave_node = slave_node; 2576552165bcSDavid Rivshin slave_data->phy_node = of_parse_phandle(slave_node, 2577552165bcSDavid Rivshin "phy-handle", 0); 2578f1eea5c1SDavid Rivshin parp = of_get_property(slave_node, "phy_id", &lenp); 2579ae092b5bSDavid Rivshin if (slave_data->phy_node) { 2580ae092b5bSDavid Rivshin dev_dbg(&pdev->dev, 2581f7ce9103SRob Herring "slave[%d] using phy-handle=\"%pOF\"\n", 2582f7ce9103SRob Herring i, slave_data->phy_node); 2583ae092b5bSDavid Rivshin } else if (of_phy_is_fixed_link(slave_node)) { 2584dfc0a6d3SDavid Rivshin /* In the case of a fixed PHY, the DT node associated 2585dfc0a6d3SDavid Rivshin * to the PHY is the Ethernet MAC DT node. 2586dfc0a6d3SDavid Rivshin */ 25871f71e8c9SMarkus Brunner ret = of_phy_register_fixed_link(slave_node); 258823a09873SJohan Hovold if (ret) { 258923a09873SJohan Hovold if (ret != -EPROBE_DEFER) 259023a09873SJohan Hovold dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); 25913cd6e20fSNishka Dasgupta goto err_node_put; 259223a09873SJohan Hovold } 259306cd6d6eSDavid Rivshin slave_data->phy_node = of_node_get(slave_node); 2594f1eea5c1SDavid Rivshin } else if (parp) { 2595f1eea5c1SDavid Rivshin u32 phyid; 2596f1eea5c1SDavid Rivshin struct device_node *mdio_node; 2597f1eea5c1SDavid Rivshin struct platform_device *mdio; 2598f1eea5c1SDavid Rivshin 2599f1eea5c1SDavid Rivshin if (lenp != (sizeof(__be32) * 2)) { 2600f1eea5c1SDavid Rivshin dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i); 260147276fccSMugunthan V N goto no_phy_slave; 2602549985eeSRichard Cochran } 2603549985eeSRichard Cochran mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2604549985eeSRichard Cochran phyid = be32_to_cpup(parp+1); 2605549985eeSRichard Cochran mdio = of_find_device_by_node(mdio_node); 260660e71ab5SJohan Hovold of_node_put(mdio_node); 26076954cc1fSJohan Hovold if (!mdio) { 260856fdb2e0SMarkus Pargmann dev_err(&pdev->dev, "Missing mdio platform device\n"); 26093cd6e20fSNishka Dasgupta ret = -EINVAL; 26103cd6e20fSNishka Dasgupta goto err_node_put; 26116954cc1fSJohan Hovold } 2612549985eeSRichard Cochran snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2613549985eeSRichard Cochran PHY_ID_FMT, mdio->name, phyid); 261486e1d5adSJohan Hovold put_device(&mdio->dev); 2615f1eea5c1SDavid Rivshin } else { 2616ae092b5bSDavid Rivshin dev_err(&pdev->dev, 2617ae092b5bSDavid Rivshin "No slave[%d] phy_id, phy-handle, or fixed-link property\n", 2618ae092b5bSDavid Rivshin i); 2619f1eea5c1SDavid Rivshin goto no_phy_slave; 2620f1eea5c1SDavid Rivshin } 26210c65b2b9SAndrew Lunn ret = of_get_phy_mode(slave_node, &slave_data->phy_if); 26220c65b2b9SAndrew Lunn if (ret) { 262347276fccSMugunthan V N dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", 262447276fccSMugunthan V N i); 26253cd6e20fSNishka Dasgupta goto err_node_put; 262647276fccSMugunthan V N } 262747276fccSMugunthan V N 262847276fccSMugunthan V N no_phy_slave: 2629549985eeSRichard Cochran mac_addr = of_get_mac_address(slave_node); 2630a51645f7SPetr Štetiar if (!IS_ERR(mac_addr)) { 26312d2924afSPetr Štetiar ether_addr_copy(slave_data->mac_addr, mac_addr); 26320ba517b1SMarkus Pargmann } else { 2633b6745f6eSMugunthan V N ret = ti_cm_get_macid(&pdev->dev, i, 26340ba517b1SMarkus Pargmann slave_data->mac_addr); 26350ba517b1SMarkus Pargmann if (ret) 26363cd6e20fSNishka Dasgupta goto err_node_put; 26370ba517b1SMarkus Pargmann } 2638d9ba8f9eSMugunthan V N if (data->dual_emac) { 263991c4166cSMugunthan V N if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 2640d9ba8f9eSMugunthan V N &prop)) { 264188c99ff6SGeorge Cherian dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n"); 2642d9ba8f9eSMugunthan V N slave_data->dual_emac_res_vlan = i+1; 264388c99ff6SGeorge Cherian dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n", 2644d9ba8f9eSMugunthan V N slave_data->dual_emac_res_vlan, i); 2645d9ba8f9eSMugunthan V N } else { 2646d9ba8f9eSMugunthan V N slave_data->dual_emac_res_vlan = prop; 2647d9ba8f9eSMugunthan V N } 2648d9ba8f9eSMugunthan V N } 2649d9ba8f9eSMugunthan V N 2650549985eeSRichard Cochran i++; 26513cd6e20fSNishka Dasgupta if (i == data->slaves) { 26523cd6e20fSNishka Dasgupta ret = 0; 26533cd6e20fSNishka Dasgupta goto err_node_put; 26543cd6e20fSNishka Dasgupta } 2655549985eeSRichard Cochran } 2656549985eeSRichard Cochran 26572eb32b0aSMugunthan V N return 0; 26583cd6e20fSNishka Dasgupta 26593cd6e20fSNishka Dasgupta err_node_put: 26603cd6e20fSNishka Dasgupta of_node_put(slave_node); 26613cd6e20fSNishka Dasgupta return ret; 26622eb32b0aSMugunthan V N } 26632eb32b0aSMugunthan V N 2664a4e32b0dSJohan Hovold static void cpsw_remove_dt(struct platform_device *pdev) 2665a4e32b0dSJohan Hovold { 2666bfe59032SIvan Khoronzhuk struct cpsw_common *cpsw = platform_get_drvdata(pdev); 26678cbcc466SJohan Hovold struct cpsw_platform_data *data = &cpsw->data; 26688cbcc466SJohan Hovold struct device_node *node = pdev->dev.of_node; 26698cbcc466SJohan Hovold struct device_node *slave_node; 26708cbcc466SJohan Hovold int i = 0; 26718cbcc466SJohan Hovold 26728cbcc466SJohan Hovold for_each_available_child_of_node(node, slave_node) { 26738cbcc466SJohan Hovold struct cpsw_slave_data *slave_data = &data->slave_data[i]; 26748cbcc466SJohan Hovold 2675bf5849f1SRob Herring if (!of_node_name_eq(slave_node, "slave")) 26768cbcc466SJohan Hovold continue; 26778cbcc466SJohan Hovold 26783f65047cSJohan Hovold if (of_phy_is_fixed_link(slave_node)) 26793f65047cSJohan Hovold of_phy_deregister_fixed_link(slave_node); 26808cbcc466SJohan Hovold 26818cbcc466SJohan Hovold of_node_put(slave_data->phy_node); 26828cbcc466SJohan Hovold 26838cbcc466SJohan Hovold i++; 26843cd6e20fSNishka Dasgupta if (i == data->slaves) { 26853cd6e20fSNishka Dasgupta of_node_put(slave_node); 26868cbcc466SJohan Hovold break; 26878cbcc466SJohan Hovold } 26883cd6e20fSNishka Dasgupta } 26898cbcc466SJohan Hovold 2690a4e32b0dSJohan Hovold of_platform_depopulate(&pdev->dev); 2691a4e32b0dSJohan Hovold } 2692a4e32b0dSJohan Hovold 269356e31bd8SIvan Khoronzhuk static int cpsw_probe_dual_emac(struct cpsw_priv *priv) 2694d9ba8f9eSMugunthan V N { 2695606f3993SIvan Khoronzhuk struct cpsw_common *cpsw = priv->cpsw; 2696606f3993SIvan Khoronzhuk struct cpsw_platform_data *data = &cpsw->data; 2697d9ba8f9eSMugunthan V N struct net_device *ndev; 2698d9ba8f9eSMugunthan V N struct cpsw_priv *priv_sl2; 2699e38b5a3dSIvan Khoronzhuk int ret = 0; 2700d9ba8f9eSMugunthan V N 2701d183a942SGrygorii Strashko ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv), 2702d183a942SGrygorii Strashko CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); 2703d9ba8f9eSMugunthan V N if (!ndev) { 270456e31bd8SIvan Khoronzhuk dev_err(cpsw->dev, "cpsw: error allocating net_device\n"); 2705d9ba8f9eSMugunthan V N return -ENOMEM; 2706d9ba8f9eSMugunthan V N } 2707d9ba8f9eSMugunthan V N 2708d9ba8f9eSMugunthan V N priv_sl2 = netdev_priv(ndev); 2709606f3993SIvan Khoronzhuk priv_sl2->cpsw = cpsw; 2710d9ba8f9eSMugunthan V N priv_sl2->ndev = ndev; 2711d9ba8f9eSMugunthan V N priv_sl2->dev = &ndev->dev; 2712d9ba8f9eSMugunthan V N priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 2713d9ba8f9eSMugunthan V N 2714d9ba8f9eSMugunthan V N if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 2715d9ba8f9eSMugunthan V N memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 2716d9ba8f9eSMugunthan V N ETH_ALEN); 271756e31bd8SIvan Khoronzhuk dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", 271856e31bd8SIvan Khoronzhuk priv_sl2->mac_addr); 2719d9ba8f9eSMugunthan V N } else { 27206c1f0a1fSJoe Perches eth_random_addr(priv_sl2->mac_addr); 272156e31bd8SIvan Khoronzhuk dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", 272256e31bd8SIvan Khoronzhuk priv_sl2->mac_addr); 2723d9ba8f9eSMugunthan V N } 2724d9ba8f9eSMugunthan V N memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 2725d9ba8f9eSMugunthan V N 2726d9ba8f9eSMugunthan V N priv_sl2->emac_port = 1; 2727606f3993SIvan Khoronzhuk cpsw->slaves[1].ndev = ndev; 2728193736c8SIvan Khoronzhuk ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; 2729d9ba8f9eSMugunthan V N 2730d9ba8f9eSMugunthan V N ndev->netdev_ops = &cpsw_netdev_ops; 27317ad24ea4SWilfried Klaebe ndev->ethtool_ops = &cpsw_ethtool_ops; 2732d9ba8f9eSMugunthan V N 2733d9ba8f9eSMugunthan V N /* register the network device */ 273456e31bd8SIvan Khoronzhuk SET_NETDEV_DEV(ndev, cpsw->dev); 2735337d1727SMarek Vasut ndev->dev.of_node = cpsw->slaves[1].data->slave_node; 2736d9ba8f9eSMugunthan V N ret = register_netdev(ndev); 2737d183a942SGrygorii Strashko if (ret) 273856e31bd8SIvan Khoronzhuk dev_err(cpsw->dev, "cpsw: error registering net device\n"); 2739d9ba8f9eSMugunthan V N 2740d9ba8f9eSMugunthan V N return ret; 2741d9ba8f9eSMugunthan V N } 2742d9ba8f9eSMugunthan V N 27437da11600SMugunthan V N static const struct of_device_id cpsw_of_mtable[] = { 27449611d6d6SIvan Khoronzhuk { .compatible = "ti,cpsw"}, 27459611d6d6SIvan Khoronzhuk { .compatible = "ti,am335x-cpsw"}, 27469611d6d6SIvan Khoronzhuk { .compatible = "ti,am4372-cpsw"}, 27479611d6d6SIvan Khoronzhuk { .compatible = "ti,dra7-cpsw"}, 27487da11600SMugunthan V N { /* sentinel */ }, 27497da11600SMugunthan V N }; 27507da11600SMugunthan V N MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 27517da11600SMugunthan V N 27529611d6d6SIvan Khoronzhuk static const struct soc_device_attribute cpsw_soc_devices[] = { 27539611d6d6SIvan Khoronzhuk { .family = "AM33xx", .revision = "ES1.0"}, 27549611d6d6SIvan Khoronzhuk { /* sentinel */ } 27559611d6d6SIvan Khoronzhuk }; 27569611d6d6SIvan Khoronzhuk 2757663e12e6SBill Pemberton static int cpsw_probe(struct platform_device *pdev) 2758df828598SMugunthan V N { 2759c8fb5668SGrygorii Strashko struct device *dev = &pdev->dev; 2760ef4183a1SIvan Khoronzhuk struct clk *clk; 2761d1bd9acfSSebastian Siewior struct cpsw_platform_data *data; 2762df828598SMugunthan V N struct net_device *ndev; 2763df828598SMugunthan V N struct cpsw_priv *priv; 2764aa1a15e2SDaniel Mack void __iomem *ss_regs; 2765c8ace62fSYueHaibing struct resource *ss_res; 27661d147ccbSMugunthan V N struct gpio_descs *mode; 27679611d6d6SIvan Khoronzhuk const struct soc_device_attribute *soc; 2768649a1688SIvan Khoronzhuk struct cpsw_common *cpsw; 2769e6a84624SGrygorii Strashko int ret = 0, ch; 27705087b915SFelipe Balbi int irq; 2771df828598SMugunthan V N 2772c8fb5668SGrygorii Strashko cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); 27733420ea88SJohan Hovold if (!cpsw) 27743420ea88SJohan Hovold return -ENOMEM; 27753420ea88SJohan Hovold 27762d683eaaSAntoine Tenart platform_set_drvdata(pdev, cpsw); 2777c8fb5668SGrygorii Strashko cpsw->dev = dev; 2778649a1688SIvan Khoronzhuk 2779c8fb5668SGrygorii Strashko mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 27801d147ccbSMugunthan V N if (IS_ERR(mode)) { 27811d147ccbSMugunthan V N ret = PTR_ERR(mode); 2782c8fb5668SGrygorii Strashko dev_err(dev, "gpio request failed, ret %d\n", ret); 2783d183a942SGrygorii Strashko return ret; 27841d147ccbSMugunthan V N } 27851d147ccbSMugunthan V N 278683a8471bSGrygorii Strashko clk = devm_clk_get(dev, "fck"); 278783a8471bSGrygorii Strashko if (IS_ERR(clk)) { 2788ac97a359SYueHaibing ret = PTR_ERR(clk); 278983a8471bSGrygorii Strashko dev_err(dev, "fck is not found %d\n", ret); 279083a8471bSGrygorii Strashko return ret; 279183a8471bSGrygorii Strashko } 279283a8471bSGrygorii Strashko cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 279383a8471bSGrygorii Strashko 279483a8471bSGrygorii Strashko ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 279583a8471bSGrygorii Strashko ss_regs = devm_ioremap_resource(dev, ss_res); 279683a8471bSGrygorii Strashko if (IS_ERR(ss_regs)) 279783a8471bSGrygorii Strashko return PTR_ERR(ss_regs); 279883a8471bSGrygorii Strashko cpsw->regs = ss_regs; 279983a8471bSGrygorii Strashko 2800c8ace62fSYueHaibing cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1); 280183a8471bSGrygorii Strashko if (IS_ERR(cpsw->wr_regs)) 280283a8471bSGrygorii Strashko return PTR_ERR(cpsw->wr_regs); 280383a8471bSGrygorii Strashko 280483a8471bSGrygorii Strashko /* RX IRQ */ 280583a8471bSGrygorii Strashko irq = platform_get_irq(pdev, 1); 280683a8471bSGrygorii Strashko if (irq < 0) 280783a8471bSGrygorii Strashko return irq; 280883a8471bSGrygorii Strashko cpsw->irqs_table[0] = irq; 280983a8471bSGrygorii Strashko 281083a8471bSGrygorii Strashko /* TX IRQ */ 281183a8471bSGrygorii Strashko irq = platform_get_irq(pdev, 2); 281283a8471bSGrygorii Strashko if (irq < 0) 281383a8471bSGrygorii Strashko return irq; 281483a8471bSGrygorii Strashko cpsw->irqs_table[1] = irq; 281583a8471bSGrygorii Strashko 28161fb19aa7SVaibhav Hiremath /* 28171fb19aa7SVaibhav Hiremath * This may be required here for child devices. 28181fb19aa7SVaibhav Hiremath */ 2819c8fb5668SGrygorii Strashko pm_runtime_enable(dev); 28201fb19aa7SVaibhav Hiremath 2821a4e32b0dSJohan Hovold /* Need to enable clocks with runtime PM api to access module 2822a4e32b0dSJohan Hovold * registers 2823a4e32b0dSJohan Hovold */ 2824c8fb5668SGrygorii Strashko ret = pm_runtime_get_sync(dev); 2825a4e32b0dSJohan Hovold if (ret < 0) { 2826c8fb5668SGrygorii Strashko pm_runtime_put_noidle(dev); 2827aa1a15e2SDaniel Mack goto clean_runtime_disable_ret; 28282eb32b0aSMugunthan V N } 2829a4e32b0dSJohan Hovold 283023a09873SJohan Hovold ret = cpsw_probe_dt(&cpsw->data, pdev); 283123a09873SJohan Hovold if (ret) 2832a4e32b0dSJohan Hovold goto clean_dt_ret; 283323a09873SJohan Hovold 283483a8471bSGrygorii Strashko soc = soc_device_match(cpsw_soc_devices); 283583a8471bSGrygorii Strashko if (soc) 283683a8471bSGrygorii Strashko cpsw->quirk_irq = 1; 283783a8471bSGrygorii Strashko 2838606f3993SIvan Khoronzhuk data = &cpsw->data; 2839c8fb5668SGrygorii Strashko cpsw->slaves = devm_kcalloc(dev, 2840a86854d0SKees Cook data->slaves, sizeof(struct cpsw_slave), 2841df828598SMugunthan V N GFP_KERNEL); 2842606f3993SIvan Khoronzhuk if (!cpsw->slaves) { 2843aa1a15e2SDaniel Mack ret = -ENOMEM; 2844a4e32b0dSJohan Hovold goto clean_dt_ret; 2845df828598SMugunthan V N } 2846df828598SMugunthan V N 284783a8471bSGrygorii Strashko cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE); 2848c24eef28SGrygorii Strashko cpsw->descs_pool_size = descs_pool_size; 2849df828598SMugunthan V N 2850e6a84624SGrygorii Strashko ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, 2851e6a84624SGrygorii Strashko ss_res->start + CPSW2_BD_OFFSET, 2852e6a84624SGrygorii Strashko descs_pool_size); 2853e6a84624SGrygorii Strashko if (ret) 2854a4e32b0dSJohan Hovold goto clean_dt_ret; 28558a2c9a5aSGrygorii Strashko 285683a8471bSGrygorii Strashko ch = cpsw->quirk_irq ? 0 : 7; 285783a8471bSGrygorii Strashko cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); 285883a8471bSGrygorii Strashko if (IS_ERR(cpsw->txv[0].ch)) { 285983a8471bSGrygorii Strashko dev_err(dev, "error initializing tx dma channel\n"); 286083a8471bSGrygorii Strashko ret = PTR_ERR(cpsw->txv[0].ch); 286183a8471bSGrygorii Strashko goto clean_cpts; 2862df828598SMugunthan V N } 2863df828598SMugunthan V N 286483a8471bSGrygorii Strashko cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); 286583a8471bSGrygorii Strashko if (IS_ERR(cpsw->rxv[0].ch)) { 286683a8471bSGrygorii Strashko dev_err(dev, "error initializing rx dma channel\n"); 286783a8471bSGrygorii Strashko ret = PTR_ERR(cpsw->rxv[0].ch); 286883a8471bSGrygorii Strashko goto clean_cpts; 286983a8471bSGrygorii Strashko } 287083a8471bSGrygorii Strashko cpsw_split_res(cpsw); 287183a8471bSGrygorii Strashko 287283a8471bSGrygorii Strashko /* setup netdev */ 287383a8471bSGrygorii Strashko ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), 287483a8471bSGrygorii Strashko CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); 287583a8471bSGrygorii Strashko if (!ndev) { 287683a8471bSGrygorii Strashko dev_err(dev, "error allocating net_device\n"); 287783a8471bSGrygorii Strashko goto clean_cpts; 287883a8471bSGrygorii Strashko } 287983a8471bSGrygorii Strashko 288083a8471bSGrygorii Strashko priv = netdev_priv(ndev); 288183a8471bSGrygorii Strashko priv->cpsw = cpsw; 288283a8471bSGrygorii Strashko priv->ndev = ndev; 288383a8471bSGrygorii Strashko priv->dev = dev; 288483a8471bSGrygorii Strashko priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 288583a8471bSGrygorii Strashko priv->emac_port = 0; 288683a8471bSGrygorii Strashko 288783a8471bSGrygorii Strashko if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 288883a8471bSGrygorii Strashko memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 288983a8471bSGrygorii Strashko dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr); 289083a8471bSGrygorii Strashko } else { 289183a8471bSGrygorii Strashko eth_random_addr(priv->mac_addr); 289283a8471bSGrygorii Strashko dev_info(dev, "Random MACID = %pM\n", priv->mac_addr); 289383a8471bSGrygorii Strashko } 289483a8471bSGrygorii Strashko 289583a8471bSGrygorii Strashko memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 289683a8471bSGrygorii Strashko 289783a8471bSGrygorii Strashko cpsw->slaves[0].ndev = ndev; 289883a8471bSGrygorii Strashko 2899a3a41d2fSGrygorii Strashko ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; 2900070f9c65SKeerthy 2901070f9c65SKeerthy ndev->netdev_ops = &cpsw_netdev_ops; 2902070f9c65SKeerthy ndev->ethtool_ops = &cpsw_ethtool_ops; 29039611d6d6SIvan Khoronzhuk netif_napi_add(ndev, &cpsw->napi_rx, 29049611d6d6SIvan Khoronzhuk cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, 29059611d6d6SIvan Khoronzhuk CPSW_POLL_WEIGHT); 29069611d6d6SIvan Khoronzhuk netif_tx_napi_add(ndev, &cpsw->napi_tx, 29079611d6d6SIvan Khoronzhuk cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, 29089611d6d6SIvan Khoronzhuk CPSW_POLL_WEIGHT); 2909070f9c65SKeerthy 2910070f9c65SKeerthy /* register the network device */ 2911c8fb5668SGrygorii Strashko SET_NETDEV_DEV(ndev, dev); 2912337d1727SMarek Vasut ndev->dev.of_node = cpsw->slaves[0].data->slave_node; 2913070f9c65SKeerthy ret = register_netdev(ndev); 2914070f9c65SKeerthy if (ret) { 2915c8fb5668SGrygorii Strashko dev_err(dev, "error registering net device\n"); 2916070f9c65SKeerthy ret = -ENODEV; 291783a8471bSGrygorii Strashko goto clean_cpts; 2918070f9c65SKeerthy } 2919070f9c65SKeerthy 2920070f9c65SKeerthy if (cpsw->data.dual_emac) { 2921070f9c65SKeerthy ret = cpsw_probe_dual_emac(priv); 2922070f9c65SKeerthy if (ret) { 2923070f9c65SKeerthy cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2924070f9c65SKeerthy goto clean_unregister_netdev_ret; 2925070f9c65SKeerthy } 2926070f9c65SKeerthy } 2927070f9c65SKeerthy 2928c03abd84SFelipe Balbi /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 2929c03abd84SFelipe Balbi * MISC IRQs which are always kept disabled with this driver so 2930c03abd84SFelipe Balbi * we will not request them. 2931c03abd84SFelipe Balbi * 2932c03abd84SFelipe Balbi * If anyone wants to implement support for those, make sure to 2933c03abd84SFelipe Balbi * first request and append them to irqs_table array. 2934c03abd84SFelipe Balbi */ 293583a8471bSGrygorii Strashko ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, 2936c8fb5668SGrygorii Strashko 0, dev_name(dev), cpsw); 29375087b915SFelipe Balbi if (ret < 0) { 2938c8fb5668SGrygorii Strashko dev_err(dev, "error attaching irq (%d)\n", ret); 293983a8471bSGrygorii Strashko goto clean_unregister_netdev_ret; 2940df828598SMugunthan V N } 2941df828598SMugunthan V N 29425087b915SFelipe Balbi 294383a8471bSGrygorii Strashko ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, 2944dbc4ec52SIvan Khoronzhuk 0, dev_name(&pdev->dev), cpsw); 29455087b915SFelipe Balbi if (ret < 0) { 2946c8fb5668SGrygorii Strashko dev_err(dev, "error attaching irq (%d)\n", ret); 294783a8471bSGrygorii Strashko goto clean_unregister_netdev_ret; 29485087b915SFelipe Balbi } 2949c2b32e58SDaniel Mack 295090225bf0SGrygorii Strashko cpsw_notice(priv, probe, 295190225bf0SGrygorii Strashko "initialized device (regs %pa, irq %d, pool size %d)\n", 295283a8471bSGrygorii Strashko &ss_res->start, cpsw->irqs_table[0], descs_pool_size); 2953d9ba8f9eSMugunthan V N 2954c46ab7e0SJohan Hovold pm_runtime_put(&pdev->dev); 2955c46ab7e0SJohan Hovold 2956df828598SMugunthan V N return 0; 2957df828598SMugunthan V N 2958a7fe9d46SJohan Hovold clean_unregister_netdev_ret: 2959a7fe9d46SJohan Hovold unregister_netdev(ndev); 296083a8471bSGrygorii Strashko clean_cpts: 296183a8471bSGrygorii Strashko cpts_release(cpsw->cpts); 29622c836bd9SIvan Khoronzhuk cpdma_ctlr_destroy(cpsw->dma); 2963a4e32b0dSJohan Hovold clean_dt_ret: 2964a4e32b0dSJohan Hovold cpsw_remove_dt(pdev); 2965c46ab7e0SJohan Hovold pm_runtime_put_sync(&pdev->dev); 2966aa1a15e2SDaniel Mack clean_runtime_disable_ret: 2967f150bd7fSMugunthan V N pm_runtime_disable(&pdev->dev); 2968df828598SMugunthan V N return ret; 2969df828598SMugunthan V N } 2970df828598SMugunthan V N 2971663e12e6SBill Pemberton static int cpsw_remove(struct platform_device *pdev) 2972df828598SMugunthan V N { 2973bfe59032SIvan Khoronzhuk struct cpsw_common *cpsw = platform_get_drvdata(pdev); 2974bfe59032SIvan Khoronzhuk int i, ret; 29758a0b6dc9SGrygorii Strashko 29768a0b6dc9SGrygorii Strashko ret = pm_runtime_get_sync(&pdev->dev); 29778a0b6dc9SGrygorii Strashko if (ret < 0) { 29788a0b6dc9SGrygorii Strashko pm_runtime_put_noidle(&pdev->dev); 29798a0b6dc9SGrygorii Strashko return ret; 29808a0b6dc9SGrygorii Strashko } 2981df828598SMugunthan V N 2982bfe59032SIvan Khoronzhuk for (i = 0; i < cpsw->data.slaves; i++) 2983bfe59032SIvan Khoronzhuk if (cpsw->slaves[i].ndev) 2984bfe59032SIvan Khoronzhuk unregister_netdev(cpsw->slaves[i].ndev); 2985df828598SMugunthan V N 29868a2c9a5aSGrygorii Strashko cpts_release(cpsw->cpts); 29872c836bd9SIvan Khoronzhuk cpdma_ctlr_destroy(cpsw->dma); 2988a4e32b0dSJohan Hovold cpsw_remove_dt(pdev); 29898a0b6dc9SGrygorii Strashko pm_runtime_put_sync(&pdev->dev); 29908a0b6dc9SGrygorii Strashko pm_runtime_disable(&pdev->dev); 2991df828598SMugunthan V N return 0; 2992df828598SMugunthan V N } 2993df828598SMugunthan V N 29948963a504SGrygorii Strashko #ifdef CONFIG_PM_SLEEP 2995df828598SMugunthan V N static int cpsw_suspend(struct device *dev) 2996df828598SMugunthan V N { 29972f9b0d93SKeerthy struct cpsw_common *cpsw = dev_get_drvdata(dev); 2998618073e3SMugunthan V N int i; 2999618073e3SMugunthan V N 30002f9b0d93SKeerthy for (i = 0; i < cpsw->data.slaves; i++) 30012f9b0d93SKeerthy if (cpsw->slaves[i].ndev) 3002606f3993SIvan Khoronzhuk if (netif_running(cpsw->slaves[i].ndev)) 3003606f3993SIvan Khoronzhuk cpsw_ndo_stop(cpsw->slaves[i].ndev); 30041e7a2e21SDaniel Mack 3005739683b4SMugunthan V N /* Select sleep pin state */ 300656e31bd8SIvan Khoronzhuk pinctrl_pm_select_sleep_state(dev); 3007739683b4SMugunthan V N 3008df828598SMugunthan V N return 0; 3009df828598SMugunthan V N } 3010df828598SMugunthan V N 3011df828598SMugunthan V N static int cpsw_resume(struct device *dev) 3012df828598SMugunthan V N { 30132f9b0d93SKeerthy struct cpsw_common *cpsw = dev_get_drvdata(dev); 30142f9b0d93SKeerthy int i; 3015df828598SMugunthan V N 3016739683b4SMugunthan V N /* Select default pin state */ 301756e31bd8SIvan Khoronzhuk pinctrl_pm_select_default_state(dev); 3018739683b4SMugunthan V N 30194ccfd638SGrygorii Strashko /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ 30204ccfd638SGrygorii Strashko rtnl_lock(); 3021618073e3SMugunthan V N 30222f9b0d93SKeerthy for (i = 0; i < cpsw->data.slaves; i++) 30232f9b0d93SKeerthy if (cpsw->slaves[i].ndev) 3024606f3993SIvan Khoronzhuk if (netif_running(cpsw->slaves[i].ndev)) 3025606f3993SIvan Khoronzhuk cpsw_ndo_open(cpsw->slaves[i].ndev); 30262f9b0d93SKeerthy 30274ccfd638SGrygorii Strashko rtnl_unlock(); 30284ccfd638SGrygorii Strashko 3029df828598SMugunthan V N return 0; 3030df828598SMugunthan V N } 30318963a504SGrygorii Strashko #endif 3032df828598SMugunthan V N 30338963a504SGrygorii Strashko static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); 3034df828598SMugunthan V N 3035df828598SMugunthan V N static struct platform_driver cpsw_driver = { 3036df828598SMugunthan V N .driver = { 3037df828598SMugunthan V N .name = "cpsw", 3038df828598SMugunthan V N .pm = &cpsw_pm_ops, 30391e5c76d4SSachin Kamat .of_match_table = cpsw_of_mtable, 3040df828598SMugunthan V N }, 3041df828598SMugunthan V N .probe = cpsw_probe, 3042663e12e6SBill Pemberton .remove = cpsw_remove, 3043df828598SMugunthan V N }; 3044df828598SMugunthan V N 30456fb3b6b5SGrygorii Strashko module_platform_driver(cpsw_driver); 3046df828598SMugunthan V N 3047df828598SMugunthan V N MODULE_LICENSE("GPL"); 3048df828598SMugunthan V N MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); 3049df828598SMugunthan V N MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); 3050df828598SMugunthan V N MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 3051