14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27ac6653aSJeff Kirsher /******************************************************************************* 37ac6653aSJeff Kirsher This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 47ac6653aSJeff Kirsher ST Ethernet IPs are built around a Synopsys IP Core. 57ac6653aSJeff Kirsher 6286a8372SGiuseppe CAVALLARO Copyright(C) 2007-2011 STMicroelectronics Ltd 77ac6653aSJeff Kirsher 87ac6653aSJeff Kirsher 97ac6653aSJeff Kirsher Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 107ac6653aSJeff Kirsher 117ac6653aSJeff Kirsher Documentation available at: 127ac6653aSJeff Kirsher http://www.stlinux.com 137ac6653aSJeff Kirsher Support available at: 147ac6653aSJeff Kirsher https://bugzilla.stlinux.com/ 157ac6653aSJeff Kirsher *******************************************************************************/ 167ac6653aSJeff Kirsher 176a81c26fSViresh Kumar #include <linux/clk.h> 187ac6653aSJeff Kirsher #include <linux/kernel.h> 197ac6653aSJeff Kirsher #include <linux/interrupt.h> 207ac6653aSJeff Kirsher #include <linux/ip.h> 217ac6653aSJeff Kirsher #include <linux/tcp.h> 227ac6653aSJeff Kirsher #include <linux/skbuff.h> 237ac6653aSJeff Kirsher #include <linux/ethtool.h> 247ac6653aSJeff Kirsher #include <linux/if_ether.h> 257ac6653aSJeff Kirsher #include <linux/crc32.h> 267ac6653aSJeff Kirsher #include <linux/mii.h> 2701789349SJiri Pirko #include <linux/if.h> 287ac6653aSJeff Kirsher #include <linux/if_vlan.h> 297ac6653aSJeff Kirsher #include <linux/dma-mapping.h> 307ac6653aSJeff Kirsher #include <linux/slab.h> 315ec55823SJoakim Zhang #include <linux/pm_runtime.h> 327ac6653aSJeff Kirsher #include <linux/prefetch.h> 33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h> 3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h> 367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h> 3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h> 39eeef2f6bSJose Abreu #include <linux/phylink.h> 40b7766206SJose Abreu #include <linux/udp.h> 415fabb012SOng Boon Leong #include <linux/bpf_trace.h> 424dbbe8ddSJose Abreu #include <net/pkt_cls.h> 43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h> 44891434b1SRayagond Kokatanur #include "stmmac_ptp.h" 45286a8372SGiuseppe CAVALLARO #include "stmmac.h" 465fabb012SOng Boon Leong #include "stmmac_xdp.h" 47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h> 485790cf3cSMathieu Olivari #include <linux/of_mdio.h> 4919d857c9SPhil Reid #include "dwmac1000.h" 507d9e6c5aSJose Abreu #include "dwxgmac2.h" 5142de047dSJose Abreu #include "hwif.h" 527ac6653aSJeff Kirsher 53a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled 54a6da2bbbSHolger Assmann * with fine resolution and binary rollover. This avoid non-monotonic behavior 55a6da2bbbSHolger Assmann * (clock jumps) when changing timestamping settings at runtime. 56a6da2bbbSHolger Assmann */ 57a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 58a6da2bbbSHolger Assmann PTP_TCR_TSCTRLSSR) 59a6da2bbbSHolger Assmann 608d558f02SJose Abreu #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 61f748be53SAlexandre TORGUE #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 627ac6653aSJeff Kirsher 637ac6653aSJeff Kirsher /* Module parameters */ 6432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO 5000 657ac6653aSJeff Kirsher static int watchdog = TX_TIMEO; 66d3757ba4SJoe Perches module_param(watchdog, int, 0644); 6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 687ac6653aSJeff Kirsher 6932ceabcaSGiuseppe CAVALLARO static int debug = -1; 70d3757ba4SJoe Perches module_param(debug, int, 0644); 7132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 727ac6653aSJeff Kirsher 7347d1f71fSstephen hemminger static int phyaddr = -1; 74d3757ba4SJoe Perches module_param(phyaddr, int, 0444); 757ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address"); 767ac6653aSJeff Kirsher 778531c808SChristian Marangi #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 788531c808SChristian Marangi #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 797ac6653aSJeff Kirsher 80132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */ 81132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX 256 82132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL 16 83bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH 16 84bba2556eSOng Boon Leong 855fabb012SOng Boon Leong #define STMMAC_XDP_PASS 0 865fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED BIT(0) 87be8b38a7SOng Boon Leong #define STMMAC_XDP_TX BIT(1) 888b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT BIT(2) 895fabb012SOng Boon Leong 90e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO; 91d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644); 927ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 937ac6653aSJeff Kirsher 947ac6653aSJeff Kirsher static int pause = PAUSE_TIME; 95d3757ba4SJoe Perches module_param(pause, int, 0644); 967ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 977ac6653aSJeff Kirsher 987ac6653aSJeff Kirsher #define TC_DEFAULT 64 997ac6653aSJeff Kirsher static int tc = TC_DEFAULT; 100d3757ba4SJoe Perches module_param(tc, int, 0644); 1017ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value"); 1027ac6653aSJeff Kirsher 103d916701cSGiuseppe CAVALLARO #define DEFAULT_BUFSIZE 1536 104d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE; 105d3757ba4SJoe Perches module_param(buf_sz, int, 0644); 1067ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 1077ac6653aSJeff Kirsher 10822ad3838SGiuseppe Cavallaro #define STMMAC_RX_COPYBREAK 256 10922ad3838SGiuseppe Cavallaro 1107ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 1117ac6653aSJeff Kirsher NETIF_MSG_LINK | NETIF_MSG_IFUP | 1127ac6653aSJeff Kirsher NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 1137ac6653aSJeff Kirsher 114d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER 1000 115d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 116d3757ba4SJoe Perches module_param(eee_timer, int, 0644); 117d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 118388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 119d765955dSGiuseppe CAVALLARO 12022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors, 12122d3efe5SPavel Machek * but allow user to force to use the chain instead of the ring 1224a7d666aSGiuseppe CAVALLARO */ 1234a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode; 124d3757ba4SJoe Perches module_param(chain_mode, int, 0444); 1254a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 1264a7d666aSGiuseppe CAVALLARO 1277ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 1288532f613SOng Boon Leong /* For MSI interrupts handling */ 1298532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 1308532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 1318532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 133f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 134f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 135f9ec5723SChristian Marangi static void stmmac_reset_queues_param(struct stmmac_priv *priv); 136132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 137132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 1383a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 1393a6c12a0SXiaoliang Yang u32 rxmode, u32 chan); 1407ac6653aSJeff Kirsher 14150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 142481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops; 1438d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev); 144466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev); 145bfab27a1SGiuseppe CAVALLARO #endif 146bfab27a1SGiuseppe CAVALLARO 147d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 1489125cdd1SGiuseppe CAVALLARO 1495ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 1505ec55823SJoakim Zhang { 1515ec55823SJoakim Zhang int ret = 0; 1525ec55823SJoakim Zhang 1535ec55823SJoakim Zhang if (enabled) { 1545ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->stmmac_clk); 1555ec55823SJoakim Zhang if (ret) 1565ec55823SJoakim Zhang return ret; 1575ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->pclk); 1585ec55823SJoakim Zhang if (ret) { 1595ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1605ec55823SJoakim Zhang return ret; 1615ec55823SJoakim Zhang } 162b4d45aeeSJoakim Zhang if (priv->plat->clks_config) { 163b4d45aeeSJoakim Zhang ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 164b4d45aeeSJoakim Zhang if (ret) { 165b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 166b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 167b4d45aeeSJoakim Zhang return ret; 168b4d45aeeSJoakim Zhang } 169b4d45aeeSJoakim Zhang } 1705ec55823SJoakim Zhang } else { 1715ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1725ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 173b4d45aeeSJoakim Zhang if (priv->plat->clks_config) 174b4d45aeeSJoakim Zhang priv->plat->clks_config(priv->plat->bsp_priv, enabled); 1755ec55823SJoakim Zhang } 1765ec55823SJoakim Zhang 1775ec55823SJoakim Zhang return ret; 1785ec55823SJoakim Zhang } 1795ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 1805ec55823SJoakim Zhang 1817ac6653aSJeff Kirsher /** 1827ac6653aSJeff Kirsher * stmmac_verify_args - verify the driver parameters. 183732fdf0eSGiuseppe CAVALLARO * Description: it checks the driver parameters and set a default in case of 184732fdf0eSGiuseppe CAVALLARO * errors. 1857ac6653aSJeff Kirsher */ 1867ac6653aSJeff Kirsher static void stmmac_verify_args(void) 1877ac6653aSJeff Kirsher { 1887ac6653aSJeff Kirsher if (unlikely(watchdog < 0)) 1897ac6653aSJeff Kirsher watchdog = TX_TIMEO; 190d916701cSGiuseppe CAVALLARO if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 191d916701cSGiuseppe CAVALLARO buf_sz = DEFAULT_BUFSIZE; 1927ac6653aSJeff Kirsher if (unlikely(flow_ctrl > 1)) 1937ac6653aSJeff Kirsher flow_ctrl = FLOW_AUTO; 1947ac6653aSJeff Kirsher else if (likely(flow_ctrl < 0)) 1957ac6653aSJeff Kirsher flow_ctrl = FLOW_OFF; 1967ac6653aSJeff Kirsher if (unlikely((pause < 0) || (pause > 0xffff))) 1977ac6653aSJeff Kirsher pause = PAUSE_TIME; 198d765955dSGiuseppe CAVALLARO if (eee_timer < 0) 199d765955dSGiuseppe CAVALLARO eee_timer = STMMAC_DEFAULT_LPI_TIMER; 2007ac6653aSJeff Kirsher } 2017ac6653aSJeff Kirsher 202bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 203c22a3f48SJoao Pinto { 204c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2058fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2068fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 207c22a3f48SJoao Pinto u32 queue; 208c22a3f48SJoao Pinto 2098fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2108fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 211c22a3f48SJoao Pinto 212132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 213132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 214132c32eeSOng Boon Leong napi_disable(&ch->rxtx_napi); 215132c32eeSOng Boon Leong continue; 216132c32eeSOng Boon Leong } 217132c32eeSOng Boon Leong 2184ccb4585SJose Abreu if (queue < rx_queues_cnt) 2194ccb4585SJose Abreu napi_disable(&ch->rx_napi); 2204ccb4585SJose Abreu if (queue < tx_queues_cnt) 2214ccb4585SJose Abreu napi_disable(&ch->tx_napi); 222c22a3f48SJoao Pinto } 223c22a3f48SJoao Pinto } 224c22a3f48SJoao Pinto 225c22a3f48SJoao Pinto /** 226bba2556eSOng Boon Leong * stmmac_disable_all_queues - Disable all queues 227bba2556eSOng Boon Leong * @priv: driver private structure 228bba2556eSOng Boon Leong */ 229bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv) 230bba2556eSOng Boon Leong { 231bba2556eSOng Boon Leong u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 232bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 233bba2556eSOng Boon Leong u32 queue; 234bba2556eSOng Boon Leong 235bba2556eSOng Boon Leong /* synchronize_rcu() needed for pending XDP buffers to drain */ 236bba2556eSOng Boon Leong for (queue = 0; queue < rx_queues_cnt; queue++) { 2378531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[queue]; 238bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 239bba2556eSOng Boon Leong synchronize_rcu(); 240bba2556eSOng Boon Leong break; 241bba2556eSOng Boon Leong } 242bba2556eSOng Boon Leong } 243bba2556eSOng Boon Leong 244bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 245bba2556eSOng Boon Leong } 246bba2556eSOng Boon Leong 247bba2556eSOng Boon Leong /** 248c22a3f48SJoao Pinto * stmmac_enable_all_queues - Enable all queues 249c22a3f48SJoao Pinto * @priv: driver private structure 250c22a3f48SJoao Pinto */ 251c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv) 252c22a3f48SJoao Pinto { 253c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2548fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2558fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 256c22a3f48SJoao Pinto u32 queue; 257c22a3f48SJoao Pinto 2588fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2598fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 260c22a3f48SJoao Pinto 261132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 262132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 263132c32eeSOng Boon Leong napi_enable(&ch->rxtx_napi); 264132c32eeSOng Boon Leong continue; 265132c32eeSOng Boon Leong } 266132c32eeSOng Boon Leong 2674ccb4585SJose Abreu if (queue < rx_queues_cnt) 2684ccb4585SJose Abreu napi_enable(&ch->rx_napi); 2694ccb4585SJose Abreu if (queue < tx_queues_cnt) 2704ccb4585SJose Abreu napi_enable(&ch->tx_napi); 271c22a3f48SJoao Pinto } 272c22a3f48SJoao Pinto } 273c22a3f48SJoao Pinto 27434877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv) 27534877a15SJose Abreu { 27634877a15SJose Abreu if (!test_bit(STMMAC_DOWN, &priv->state) && 27734877a15SJose Abreu !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 27834877a15SJose Abreu queue_work(priv->wq, &priv->service_task); 27934877a15SJose Abreu } 28034877a15SJose Abreu 28134877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv) 28234877a15SJose Abreu { 28334877a15SJose Abreu netif_carrier_off(priv->dev); 28434877a15SJose Abreu set_bit(STMMAC_RESET_REQUESTED, &priv->state); 28534877a15SJose Abreu stmmac_service_event_schedule(priv); 28634877a15SJose Abreu } 28734877a15SJose Abreu 288c22a3f48SJoao Pinto /** 28932ceabcaSGiuseppe CAVALLARO * stmmac_clk_csr_set - dynamically set the MDC clock 29032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29132ceabcaSGiuseppe CAVALLARO * Description: this is to dynamically set the MDC clock according to the csr 29232ceabcaSGiuseppe CAVALLARO * clock input. 29332ceabcaSGiuseppe CAVALLARO * Note: 29432ceabcaSGiuseppe CAVALLARO * If a specific clk_csr value is passed from the platform 29532ceabcaSGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 29632ceabcaSGiuseppe CAVALLARO * changed at run-time and it is fixed (as reported in the driver 29732ceabcaSGiuseppe CAVALLARO * documentation). Viceversa the driver will try to set the MDC 29832ceabcaSGiuseppe CAVALLARO * clock dynamically according to the actual clock input. 29932ceabcaSGiuseppe CAVALLARO */ 300cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv) 301cd7201f4SGiuseppe CAVALLARO { 302cd7201f4SGiuseppe CAVALLARO u32 clk_rate; 303cd7201f4SGiuseppe CAVALLARO 304f573c0b9Sjpinto clk_rate = clk_get_rate(priv->plat->stmmac_clk); 305cd7201f4SGiuseppe CAVALLARO 306cd7201f4SGiuseppe CAVALLARO /* Platform provided default clk_csr would be assumed valid 307ceb69499SGiuseppe CAVALLARO * for all other cases except for the below mentioned ones. 308ceb69499SGiuseppe CAVALLARO * For values higher than the IEEE 802.3 specified frequency 309ceb69499SGiuseppe CAVALLARO * we can not estimate the proper divider as it is not known 310ceb69499SGiuseppe CAVALLARO * the frequency of clk_csr_i. So we do not change the default 311ceb69499SGiuseppe CAVALLARO * divider. 312ceb69499SGiuseppe CAVALLARO */ 313cd7201f4SGiuseppe CAVALLARO if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 314cd7201f4SGiuseppe CAVALLARO if (clk_rate < CSR_F_35M) 315cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_20_35M; 316cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 317cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_35_60M; 318cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 319cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_60_100M; 320cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 321cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_100_150M; 322cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 323cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_150_250M; 32408dad2f4SJesper Nilsson else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 325cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_250_300M; 326ceb69499SGiuseppe CAVALLARO } 3279f93ac8dSLABBE Corentin 3289f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) { 3299f93ac8dSLABBE Corentin if (clk_rate > 160000000) 3309f93ac8dSLABBE Corentin priv->clk_csr = 0x03; 3319f93ac8dSLABBE Corentin else if (clk_rate > 80000000) 3329f93ac8dSLABBE Corentin priv->clk_csr = 0x02; 3339f93ac8dSLABBE Corentin else if (clk_rate > 40000000) 3349f93ac8dSLABBE Corentin priv->clk_csr = 0x01; 3359f93ac8dSLABBE Corentin else 3369f93ac8dSLABBE Corentin priv->clk_csr = 0; 3379f93ac8dSLABBE Corentin } 3387d9e6c5aSJose Abreu 3397d9e6c5aSJose Abreu if (priv->plat->has_xgmac) { 3407d9e6c5aSJose Abreu if (clk_rate > 400000000) 3417d9e6c5aSJose Abreu priv->clk_csr = 0x5; 3427d9e6c5aSJose Abreu else if (clk_rate > 350000000) 3437d9e6c5aSJose Abreu priv->clk_csr = 0x4; 3447d9e6c5aSJose Abreu else if (clk_rate > 300000000) 3457d9e6c5aSJose Abreu priv->clk_csr = 0x3; 3467d9e6c5aSJose Abreu else if (clk_rate > 250000000) 3477d9e6c5aSJose Abreu priv->clk_csr = 0x2; 3487d9e6c5aSJose Abreu else if (clk_rate > 150000000) 3497d9e6c5aSJose Abreu priv->clk_csr = 0x1; 3507d9e6c5aSJose Abreu else 3517d9e6c5aSJose Abreu priv->clk_csr = 0x0; 3527d9e6c5aSJose Abreu } 353cd7201f4SGiuseppe CAVALLARO } 354cd7201f4SGiuseppe CAVALLARO 3557ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len) 3567ac6653aSJeff Kirsher { 357424c4f78SAndy Shevchenko pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 358424c4f78SAndy Shevchenko print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 3597ac6653aSJeff Kirsher } 3607ac6653aSJeff Kirsher 361ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 3627ac6653aSJeff Kirsher { 3638531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 364a6a3e026SLABBE Corentin u32 avail; 365e3ad57c9SGiuseppe Cavallaro 366ce736788SJoao Pinto if (tx_q->dirty_tx > tx_q->cur_tx) 367ce736788SJoao Pinto avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 368e3ad57c9SGiuseppe Cavallaro else 3698531c808SChristian Marangi avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 370e3ad57c9SGiuseppe Cavallaro 371e3ad57c9SGiuseppe Cavallaro return avail; 372e3ad57c9SGiuseppe Cavallaro } 373e3ad57c9SGiuseppe Cavallaro 37454139cf3SJoao Pinto /** 37554139cf3SJoao Pinto * stmmac_rx_dirty - Get RX queue dirty 37654139cf3SJoao Pinto * @priv: driver private structure 37754139cf3SJoao Pinto * @queue: RX queue index 37854139cf3SJoao Pinto */ 37954139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 380e3ad57c9SGiuseppe Cavallaro { 3818531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 382a6a3e026SLABBE Corentin u32 dirty; 383e3ad57c9SGiuseppe Cavallaro 38454139cf3SJoao Pinto if (rx_q->dirty_rx <= rx_q->cur_rx) 38554139cf3SJoao Pinto dirty = rx_q->cur_rx - rx_q->dirty_rx; 386e3ad57c9SGiuseppe Cavallaro else 3878531c808SChristian Marangi dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 388e3ad57c9SGiuseppe Cavallaro 389e3ad57c9SGiuseppe Cavallaro return dirty; 3907ac6653aSJeff Kirsher } 3917ac6653aSJeff Kirsher 392be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 393be1c7eaeSVineetha G. Jaya Kumaran { 394be1c7eaeSVineetha G. Jaya Kumaran int tx_lpi_timer; 395be1c7eaeSVineetha G. Jaya Kumaran 396be1c7eaeSVineetha G. Jaya Kumaran /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 397be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en = en ? 0 : 1; 398be1c7eaeSVineetha G. Jaya Kumaran tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 399be1c7eaeSVineetha G. Jaya Kumaran stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 400be1c7eaeSVineetha G. Jaya Kumaran } 401be1c7eaeSVineetha G. Jaya Kumaran 40232ceabcaSGiuseppe CAVALLARO /** 403732fdf0eSGiuseppe CAVALLARO * stmmac_enable_eee_mode - check and enter in LPI mode 40432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 405732fdf0eSGiuseppe CAVALLARO * Description: this function is to verify and enter in LPI mode in case of 406732fdf0eSGiuseppe CAVALLARO * EEE. 40732ceabcaSGiuseppe CAVALLARO */ 408c74ead22SJisheng Zhang static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 409d765955dSGiuseppe CAVALLARO { 410ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 411ce736788SJoao Pinto u32 queue; 412ce736788SJoao Pinto 413ce736788SJoao Pinto /* check if all TX queues have the work finished */ 414ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 4158531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 416ce736788SJoao Pinto 417ce736788SJoao Pinto if (tx_q->dirty_tx != tx_q->cur_tx) 418c74ead22SJisheng Zhang return -EBUSY; /* still unfinished work */ 419ce736788SJoao Pinto } 420ce736788SJoao Pinto 421d765955dSGiuseppe CAVALLARO /* Check and enter in LPI mode */ 422ce736788SJoao Pinto if (!priv->tx_path_in_lpi_mode) 423c10d4c82SJose Abreu stmmac_set_eee_mode(priv, priv->hw, 424b4b7b772Sjpinto priv->plat->en_tx_lpi_clockgating); 425c74ead22SJisheng Zhang return 0; 426d765955dSGiuseppe CAVALLARO } 427d765955dSGiuseppe CAVALLARO 42832ceabcaSGiuseppe CAVALLARO /** 429732fdf0eSGiuseppe CAVALLARO * stmmac_disable_eee_mode - disable and exit from LPI mode 43032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 43132ceabcaSGiuseppe CAVALLARO * Description: this function is to exit and disable EEE in case of 43232ceabcaSGiuseppe CAVALLARO * LPI state is true. This is called by the xmit. 43332ceabcaSGiuseppe CAVALLARO */ 434d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv) 435d765955dSGiuseppe CAVALLARO { 436be1c7eaeSVineetha G. Jaya Kumaran if (!priv->eee_sw_timer_en) { 437be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 438be1c7eaeSVineetha G. Jaya Kumaran return; 439be1c7eaeSVineetha G. Jaya Kumaran } 440be1c7eaeSVineetha G. Jaya Kumaran 441c10d4c82SJose Abreu stmmac_reset_eee_mode(priv, priv->hw); 442d765955dSGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 443d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 444d765955dSGiuseppe CAVALLARO } 445d765955dSGiuseppe CAVALLARO 446d765955dSGiuseppe CAVALLARO /** 447732fdf0eSGiuseppe CAVALLARO * stmmac_eee_ctrl_timer - EEE TX SW timer. 448d0ea5cbdSJesse Brandeburg * @t: timer_list struct containing private info 449d765955dSGiuseppe CAVALLARO * Description: 45032ceabcaSGiuseppe CAVALLARO * if there is no data transfer and if we are not in LPI state, 451d765955dSGiuseppe CAVALLARO * then MAC Transmitter can be moved to LPI state. 452d765955dSGiuseppe CAVALLARO */ 453e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t) 454d765955dSGiuseppe CAVALLARO { 455e99e88a9SKees Cook struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 456d765955dSGiuseppe CAVALLARO 457c74ead22SJisheng Zhang if (stmmac_enable_eee_mode(priv)) 458388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 459d765955dSGiuseppe CAVALLARO } 460d765955dSGiuseppe CAVALLARO 461d765955dSGiuseppe CAVALLARO /** 462732fdf0eSGiuseppe CAVALLARO * stmmac_eee_init - init EEE 46332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 464d765955dSGiuseppe CAVALLARO * Description: 465732fdf0eSGiuseppe CAVALLARO * if the GMAC supports the EEE (from the HW cap reg) and the phy device 466732fdf0eSGiuseppe CAVALLARO * can also manage EEE, this function enable the LPI state and start related 467732fdf0eSGiuseppe CAVALLARO * timer. 468d765955dSGiuseppe CAVALLARO */ 469d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv) 470d765955dSGiuseppe CAVALLARO { 471388e201dSVineetha G. Jaya Kumaran int eee_tw_timer = priv->eee_tw_timer; 472879626e3SJerome Brunet 473f5351ef7SGiuseppe CAVALLARO /* Using PCS we cannot dial with the phy registers at this stage 474f5351ef7SGiuseppe CAVALLARO * so we do not support extra feature like EEE. 475f5351ef7SGiuseppe CAVALLARO */ 476a47b9e15SDejin Zheng if (priv->hw->pcs == STMMAC_PCS_TBI || 477a47b9e15SDejin Zheng priv->hw->pcs == STMMAC_PCS_RTBI) 47874371272SJose Abreu return false; 479f5351ef7SGiuseppe CAVALLARO 48074371272SJose Abreu /* Check if MAC core supports the EEE feature. */ 48174371272SJose Abreu if (!priv->dma_cap.eee) 48274371272SJose Abreu return false; 483d765955dSGiuseppe CAVALLARO 48429555fa3SThierry Reding mutex_lock(&priv->lock); 48574371272SJose Abreu 48674371272SJose Abreu /* Check if it needs to be deactivated */ 487177d935aSJon Hunter if (!priv->eee_active) { 488177d935aSJon Hunter if (priv->eee_enabled) { 48938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "disable EEE\n"); 490be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 49183bf79b6SGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 492388e201dSVineetha G. Jaya Kumaran stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 493d4aeaed8SWong Vee Khee if (priv->hw->xpcs) 494d4aeaed8SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 495d4aeaed8SWong Vee Khee priv->plat->mult_fact_100ns, 496d4aeaed8SWong Vee Khee false); 497177d935aSJon Hunter } 4980867bb97SJon Hunter mutex_unlock(&priv->lock); 49974371272SJose Abreu return false; 50074371272SJose Abreu } 50174371272SJose Abreu 50274371272SJose Abreu if (priv->eee_active && !priv->eee_enabled) { 50374371272SJose Abreu timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 50474371272SJose Abreu stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 505388e201dSVineetha G. Jaya Kumaran eee_tw_timer); 506656ed8b0SWong Vee Khee if (priv->hw->xpcs) 507656ed8b0SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 508656ed8b0SWong Vee Khee priv->plat->mult_fact_100ns, 509656ed8b0SWong Vee Khee true); 51083bf79b6SGiuseppe CAVALLARO } 51174371272SJose Abreu 512be1c7eaeSVineetha G. Jaya Kumaran if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 513be1c7eaeSVineetha G. Jaya Kumaran del_timer_sync(&priv->eee_ctrl_timer); 514be1c7eaeSVineetha G. Jaya Kumaran priv->tx_path_in_lpi_mode = false; 515be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 1); 516be1c7eaeSVineetha G. Jaya Kumaran } else { 517be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 518be1c7eaeSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, 519be1c7eaeSVineetha G. Jaya Kumaran STMMAC_LPI_T(priv->tx_lpi_timer)); 520be1c7eaeSVineetha G. Jaya Kumaran } 521388e201dSVineetha G. Jaya Kumaran 52229555fa3SThierry Reding mutex_unlock(&priv->lock); 52338ddc59dSLABBE Corentin netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 52474371272SJose Abreu return true; 525d765955dSGiuseppe CAVALLARO } 526d765955dSGiuseppe CAVALLARO 527732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps 52832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 529ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 530891434b1SRayagond Kokatanur * @skb : the socket buffer 531891434b1SRayagond Kokatanur * Description : 532891434b1SRayagond Kokatanur * This function will read timestamp from the descriptor & pass it to stack. 533891434b1SRayagond Kokatanur * and also perform some sanity checks. 534891434b1SRayagond Kokatanur */ 535891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 536ba1ffd74SGiuseppe CAVALLARO struct dma_desc *p, struct sk_buff *skb) 537891434b1SRayagond Kokatanur { 538891434b1SRayagond Kokatanur struct skb_shared_hwtstamps shhwtstamp; 53925e80cd0SJose Abreu bool found = false; 540df103170SNathan Chancellor u64 ns = 0; 541891434b1SRayagond Kokatanur 542891434b1SRayagond Kokatanur if (!priv->hwts_tx_en) 543891434b1SRayagond Kokatanur return; 544891434b1SRayagond Kokatanur 545ceb69499SGiuseppe CAVALLARO /* exit if skb doesn't support hw tstamp */ 54675e4364fSdamuzi000 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 547891434b1SRayagond Kokatanur return; 548891434b1SRayagond Kokatanur 549891434b1SRayagond Kokatanur /* check tx tstamp status */ 55042de047dSJose Abreu if (stmmac_get_tx_timestamp_status(priv, p)) { 55142de047dSJose Abreu stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 55225e80cd0SJose Abreu found = true; 55325e80cd0SJose Abreu } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 55425e80cd0SJose Abreu found = true; 55525e80cd0SJose Abreu } 556891434b1SRayagond Kokatanur 55725e80cd0SJose Abreu if (found) { 558c6d5f193SKurt Kanzenbach ns -= priv->plat->cdc_error_adj; 5593600be5fSVoon Weifeng 560891434b1SRayagond Kokatanur memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 561891434b1SRayagond Kokatanur shhwtstamp.hwtstamp = ns_to_ktime(ns); 562ba1ffd74SGiuseppe CAVALLARO 56333d4c482SMario Molitor netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 564891434b1SRayagond Kokatanur /* pass tstamp to stack */ 565891434b1SRayagond Kokatanur skb_tstamp_tx(skb, &shhwtstamp); 566ba1ffd74SGiuseppe CAVALLARO } 567891434b1SRayagond Kokatanur } 568891434b1SRayagond Kokatanur 569732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps 57032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 571ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 572ba1ffd74SGiuseppe CAVALLARO * @np : next descriptor pointer 573891434b1SRayagond Kokatanur * @skb : the socket buffer 574891434b1SRayagond Kokatanur * Description : 575891434b1SRayagond Kokatanur * This function will read received packet's timestamp from the descriptor 576891434b1SRayagond Kokatanur * and pass it to stack. It also perform some sanity checks. 577891434b1SRayagond Kokatanur */ 578ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 579ba1ffd74SGiuseppe CAVALLARO struct dma_desc *np, struct sk_buff *skb) 580891434b1SRayagond Kokatanur { 581891434b1SRayagond Kokatanur struct skb_shared_hwtstamps *shhwtstamp = NULL; 58298870943SJose Abreu struct dma_desc *desc = p; 583df103170SNathan Chancellor u64 ns = 0; 584891434b1SRayagond Kokatanur 585891434b1SRayagond Kokatanur if (!priv->hwts_rx_en) 586891434b1SRayagond Kokatanur return; 587ba1ffd74SGiuseppe CAVALLARO /* For GMAC4, the valid timestamp is from CTX next desc. */ 5887d9e6c5aSJose Abreu if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 58998870943SJose Abreu desc = np; 590891434b1SRayagond Kokatanur 59198870943SJose Abreu /* Check if timestamp is available */ 59242de047dSJose Abreu if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 59342de047dSJose Abreu stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 5943600be5fSVoon Weifeng 595c6d5f193SKurt Kanzenbach ns -= priv->plat->cdc_error_adj; 5963600be5fSVoon Weifeng 59733d4c482SMario Molitor netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 598891434b1SRayagond Kokatanur shhwtstamp = skb_hwtstamps(skb); 599891434b1SRayagond Kokatanur memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 600891434b1SRayagond Kokatanur shhwtstamp->hwtstamp = ns_to_ktime(ns); 601ba1ffd74SGiuseppe CAVALLARO } else { 60233d4c482SMario Molitor netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 603ba1ffd74SGiuseppe CAVALLARO } 604891434b1SRayagond Kokatanur } 605891434b1SRayagond Kokatanur 606891434b1SRayagond Kokatanur /** 607d6228b7cSArtem Panfilov * stmmac_hwtstamp_set - control hardware timestamping. 608891434b1SRayagond Kokatanur * @dev: device pointer. 6098d45e42bSLABBE Corentin * @ifr: An IOCTL specific structure, that can contain a pointer to 610891434b1SRayagond Kokatanur * a proprietary structure used to pass information to the driver. 611891434b1SRayagond Kokatanur * Description: 612891434b1SRayagond Kokatanur * This function configures the MAC to enable/disable both outgoing(TX) 613891434b1SRayagond Kokatanur * and incoming(RX) packets time stamping based on user input. 614891434b1SRayagond Kokatanur * Return Value: 615891434b1SRayagond Kokatanur * 0 on success and an appropriate -ve integer on failure. 616891434b1SRayagond Kokatanur */ 617d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 618891434b1SRayagond Kokatanur { 619891434b1SRayagond Kokatanur struct stmmac_priv *priv = netdev_priv(dev); 620891434b1SRayagond Kokatanur struct hwtstamp_config config; 621891434b1SRayagond Kokatanur u32 ptp_v2 = 0; 622891434b1SRayagond Kokatanur u32 tstamp_all = 0; 623891434b1SRayagond Kokatanur u32 ptp_over_ipv4_udp = 0; 624891434b1SRayagond Kokatanur u32 ptp_over_ipv6_udp = 0; 625891434b1SRayagond Kokatanur u32 ptp_over_ethernet = 0; 626891434b1SRayagond Kokatanur u32 snap_type_sel = 0; 627891434b1SRayagond Kokatanur u32 ts_master_en = 0; 628891434b1SRayagond Kokatanur u32 ts_event_en = 0; 629891434b1SRayagond Kokatanur 630891434b1SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 631891434b1SRayagond Kokatanur netdev_alert(priv->dev, "No support for HW time stamping\n"); 632891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 633891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 634891434b1SRayagond Kokatanur 635891434b1SRayagond Kokatanur return -EOPNOTSUPP; 636891434b1SRayagond Kokatanur } 637891434b1SRayagond Kokatanur 638891434b1SRayagond Kokatanur if (copy_from_user(&config, ifr->ifr_data, 639d6228b7cSArtem Panfilov sizeof(config))) 640891434b1SRayagond Kokatanur return -EFAULT; 641891434b1SRayagond Kokatanur 64238ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 643891434b1SRayagond Kokatanur __func__, config.flags, config.tx_type, config.rx_filter); 644891434b1SRayagond Kokatanur 6455f3da328SBen Hutchings if (config.tx_type != HWTSTAMP_TX_OFF && 6465f3da328SBen Hutchings config.tx_type != HWTSTAMP_TX_ON) 647891434b1SRayagond Kokatanur return -ERANGE; 648891434b1SRayagond Kokatanur 649891434b1SRayagond Kokatanur if (priv->adv_ts) { 650891434b1SRayagond Kokatanur switch (config.rx_filter) { 651891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 652ceb69499SGiuseppe CAVALLARO /* time stamp no incoming packet at all */ 653891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 654891434b1SRayagond Kokatanur break; 655891434b1SRayagond Kokatanur 656891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 657ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, any kind of event packet */ 658891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 6597d8e249fSIlias Apalodimas /* 'xmac' hardware can support Sync, Pdelay_Req and 6607d8e249fSIlias Apalodimas * Pdelay_resp by setting bit14 and bits17/16 to 01 6617d8e249fSIlias Apalodimas * This leaves Delay_Req timestamps out. 6627d8e249fSIlias Apalodimas * Enable all events *and* general purpose message 6637d8e249fSIlias Apalodimas * timestamping 6647d8e249fSIlias Apalodimas */ 665891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 666891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 667891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 668891434b1SRayagond Kokatanur break; 669891434b1SRayagond Kokatanur 670891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 671ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Sync packet */ 672891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 673891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 674891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 675891434b1SRayagond Kokatanur 676891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 677891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 678891434b1SRayagond Kokatanur break; 679891434b1SRayagond Kokatanur 680891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 681ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Delay_req packet */ 682891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 683891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 684891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 685891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 686891434b1SRayagond Kokatanur 687891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 688891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 689891434b1SRayagond Kokatanur break; 690891434b1SRayagond Kokatanur 691891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 692ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, any kind of event packet */ 693891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 694891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 695891434b1SRayagond Kokatanur /* take time stamp for all event messages */ 696891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 697891434b1SRayagond Kokatanur 698891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 699891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 700891434b1SRayagond Kokatanur break; 701891434b1SRayagond Kokatanur 702891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 703ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Sync packet */ 704891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 705891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 706891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 707891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 708891434b1SRayagond Kokatanur 709891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 710891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 711891434b1SRayagond Kokatanur break; 712891434b1SRayagond Kokatanur 713891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 714ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Delay_req packet */ 715891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 716891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 717891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 718891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 719891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 720891434b1SRayagond Kokatanur 721891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 722891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 723891434b1SRayagond Kokatanur break; 724891434b1SRayagond Kokatanur 725891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_EVENT: 726ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1 any layer, any kind of event packet */ 727891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 728891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 729891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 7303cb95802SKurt Kanzenbach if (priv->synopsys_id < DWMAC_CORE_4_10) 73114f34733SJose Abreu ts_event_en = PTP_TCR_TSEVNTENA; 732891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 733891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 734891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 735891434b1SRayagond Kokatanur break; 736891434b1SRayagond Kokatanur 737891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_SYNC: 738ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Sync packet */ 739891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 740891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 741891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 742891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 743891434b1SRayagond Kokatanur 744891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 745891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 746891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 747891434b1SRayagond Kokatanur break; 748891434b1SRayagond Kokatanur 749891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 750ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Delay_req packet */ 751891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 752891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 753891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 754891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 755891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 756891434b1SRayagond Kokatanur 757891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 758891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 759891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 760891434b1SRayagond Kokatanur break; 761891434b1SRayagond Kokatanur 762e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 763891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_ALL: 764ceb69499SGiuseppe CAVALLARO /* time stamp any incoming packet */ 765891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_ALL; 766891434b1SRayagond Kokatanur tstamp_all = PTP_TCR_TSENALL; 767891434b1SRayagond Kokatanur break; 768891434b1SRayagond Kokatanur 769891434b1SRayagond Kokatanur default: 770891434b1SRayagond Kokatanur return -ERANGE; 771891434b1SRayagond Kokatanur } 772891434b1SRayagond Kokatanur } else { 773891434b1SRayagond Kokatanur switch (config.rx_filter) { 774891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 775891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 776891434b1SRayagond Kokatanur break; 777891434b1SRayagond Kokatanur default: 778891434b1SRayagond Kokatanur /* PTP v1, UDP, any kind of event packet */ 779891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 780891434b1SRayagond Kokatanur break; 781891434b1SRayagond Kokatanur } 782891434b1SRayagond Kokatanur } 783891434b1SRayagond Kokatanur priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 7845f3da328SBen Hutchings priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 785891434b1SRayagond Kokatanur 786a6da2bbbSHolger Assmann priv->systime_flags = STMMAC_HWTS_ACTIVE; 787891434b1SRayagond Kokatanur 788a6da2bbbSHolger Assmann if (priv->hwts_tx_en || priv->hwts_rx_en) { 789a6da2bbbSHolger Assmann priv->systime_flags |= tstamp_all | ptp_v2 | 790a6da2bbbSHolger Assmann ptp_over_ethernet | ptp_over_ipv6_udp | 791a6da2bbbSHolger Assmann ptp_over_ipv4_udp | ts_event_en | 792a6da2bbbSHolger Assmann ts_master_en | snap_type_sel; 793891434b1SRayagond Kokatanur } 794891434b1SRayagond Kokatanur 795a6da2bbbSHolger Assmann stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 796a6da2bbbSHolger Assmann 797d6228b7cSArtem Panfilov memcpy(&priv->tstamp_config, &config, sizeof(config)); 798d6228b7cSArtem Panfilov 799891434b1SRayagond Kokatanur return copy_to_user(ifr->ifr_data, &config, 800d6228b7cSArtem Panfilov sizeof(config)) ? -EFAULT : 0; 801d6228b7cSArtem Panfilov } 802d6228b7cSArtem Panfilov 803d6228b7cSArtem Panfilov /** 804d6228b7cSArtem Panfilov * stmmac_hwtstamp_get - read hardware timestamping. 805d6228b7cSArtem Panfilov * @dev: device pointer. 806d6228b7cSArtem Panfilov * @ifr: An IOCTL specific structure, that can contain a pointer to 807d6228b7cSArtem Panfilov * a proprietary structure used to pass information to the driver. 808d6228b7cSArtem Panfilov * Description: 809d6228b7cSArtem Panfilov * This function obtain the current hardware timestamping settings 810d0ea5cbdSJesse Brandeburg * as requested. 811d6228b7cSArtem Panfilov */ 812d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 813d6228b7cSArtem Panfilov { 814d6228b7cSArtem Panfilov struct stmmac_priv *priv = netdev_priv(dev); 815d6228b7cSArtem Panfilov struct hwtstamp_config *config = &priv->tstamp_config; 816d6228b7cSArtem Panfilov 817d6228b7cSArtem Panfilov if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 818d6228b7cSArtem Panfilov return -EOPNOTSUPP; 819d6228b7cSArtem Panfilov 820d6228b7cSArtem Panfilov return copy_to_user(ifr->ifr_data, config, 821d6228b7cSArtem Panfilov sizeof(*config)) ? -EFAULT : 0; 822891434b1SRayagond Kokatanur } 823891434b1SRayagond Kokatanur 82432ceabcaSGiuseppe CAVALLARO /** 825a6da2bbbSHolger Assmann * stmmac_init_tstamp_counter - init hardware timestamping counter 826a6da2bbbSHolger Assmann * @priv: driver private structure 827a6da2bbbSHolger Assmann * @systime_flags: timestamping flags 828a6da2bbbSHolger Assmann * Description: 829a6da2bbbSHolger Assmann * Initialize hardware counter for packet timestamping. 830a6da2bbbSHolger Assmann * This is valid as long as the interface is open and not suspended. 831a6da2bbbSHolger Assmann * Will be rerun after resuming from suspend, case in which the timestamping 832a6da2bbbSHolger Assmann * flags updated by stmmac_hwtstamp_set() also need to be restored. 833a6da2bbbSHolger Assmann */ 834a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 835a6da2bbbSHolger Assmann { 836a6da2bbbSHolger Assmann bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 837a6da2bbbSHolger Assmann struct timespec64 now; 838a6da2bbbSHolger Assmann u32 sec_inc = 0; 839a6da2bbbSHolger Assmann u64 temp = 0; 840a6da2bbbSHolger Assmann 841a6da2bbbSHolger Assmann if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 842a6da2bbbSHolger Assmann return -EOPNOTSUPP; 843a6da2bbbSHolger Assmann 844a6da2bbbSHolger Assmann stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 845a6da2bbbSHolger Assmann priv->systime_flags = systime_flags; 846a6da2bbbSHolger Assmann 847a6da2bbbSHolger Assmann /* program Sub Second Increment reg */ 848a6da2bbbSHolger Assmann stmmac_config_sub_second_increment(priv, priv->ptpaddr, 849a6da2bbbSHolger Assmann priv->plat->clk_ptp_rate, 850a6da2bbbSHolger Assmann xmac, &sec_inc); 851a6da2bbbSHolger Assmann temp = div_u64(1000000000ULL, sec_inc); 852a6da2bbbSHolger Assmann 853a6da2bbbSHolger Assmann /* Store sub second increment for later use */ 854a6da2bbbSHolger Assmann priv->sub_second_inc = sec_inc; 855a6da2bbbSHolger Assmann 856a6da2bbbSHolger Assmann /* calculate default added value: 857a6da2bbbSHolger Assmann * formula is : 858a6da2bbbSHolger Assmann * addend = (2^32)/freq_div_ratio; 859a6da2bbbSHolger Assmann * where, freq_div_ratio = 1e9ns/sec_inc 860a6da2bbbSHolger Assmann */ 861a6da2bbbSHolger Assmann temp = (u64)(temp << 32); 862a6da2bbbSHolger Assmann priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 863a6da2bbbSHolger Assmann stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 864a6da2bbbSHolger Assmann 865a6da2bbbSHolger Assmann /* initialize system time */ 866a6da2bbbSHolger Assmann ktime_get_real_ts64(&now); 867a6da2bbbSHolger Assmann 868a6da2bbbSHolger Assmann /* lower 32 bits of tv_sec are safe until y2106 */ 869a6da2bbbSHolger Assmann stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 870a6da2bbbSHolger Assmann 871a6da2bbbSHolger Assmann return 0; 872a6da2bbbSHolger Assmann } 873a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 874a6da2bbbSHolger Assmann 875a6da2bbbSHolger Assmann /** 876732fdf0eSGiuseppe CAVALLARO * stmmac_init_ptp - init PTP 87732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 878732fdf0eSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 87932ceabcaSGiuseppe CAVALLARO * This is done by looking at the HW cap. register. 880732fdf0eSGiuseppe CAVALLARO * This function also registers the ptp driver. 88132ceabcaSGiuseppe CAVALLARO */ 88292ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv) 883891434b1SRayagond Kokatanur { 8847d9e6c5aSJose Abreu bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 885a6da2bbbSHolger Assmann int ret; 8867d9e6c5aSJose Abreu 88794c82de4SMohammad Athari Bin Ismail if (priv->plat->ptp_clk_freq_config) 88894c82de4SMohammad Athari Bin Ismail priv->plat->ptp_clk_freq_config(priv); 88994c82de4SMohammad Athari Bin Ismail 890a6da2bbbSHolger Assmann ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 891a6da2bbbSHolger Assmann if (ret) 892a6da2bbbSHolger Assmann return ret; 89392ba6888SRayagond Kokatanur 894891434b1SRayagond Kokatanur priv->adv_ts = 0; 8957d9e6c5aSJose Abreu /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 8967d9e6c5aSJose Abreu if (xmac && priv->dma_cap.atime_stamp) 897be9b3174SGiuseppe CAVALLARO priv->adv_ts = 1; 898be9b3174SGiuseppe CAVALLARO /* Dwmac 3.x core with extend_desc can support adv_ts */ 899be9b3174SGiuseppe CAVALLARO else if (priv->extend_desc && priv->dma_cap.atime_stamp) 900891434b1SRayagond Kokatanur priv->adv_ts = 1; 9017cd01399SVince Bridgers 902be9b3174SGiuseppe CAVALLARO if (priv->dma_cap.time_stamp) 903be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 9047cd01399SVince Bridgers 905be9b3174SGiuseppe CAVALLARO if (priv->adv_ts) 906be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, 907be9b3174SGiuseppe CAVALLARO "IEEE 1588-2008 Advanced Timestamp supported\n"); 908891434b1SRayagond Kokatanur 909891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 910891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 91192ba6888SRayagond Kokatanur 912c30a70d3SGiuseppe CAVALLARO return 0; 91392ba6888SRayagond Kokatanur } 91492ba6888SRayagond Kokatanur 91592ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv) 91692ba6888SRayagond Kokatanur { 917f573c0b9Sjpinto clk_disable_unprepare(priv->plat->clk_ptp_ref); 91892ba6888SRayagond Kokatanur stmmac_ptp_unregister(priv); 919891434b1SRayagond Kokatanur } 920891434b1SRayagond Kokatanur 9217ac6653aSJeff Kirsher /** 92229feff39SJoao Pinto * stmmac_mac_flow_ctrl - Configure flow control in all queues 92329feff39SJoao Pinto * @priv: driver private structure 924d0ea5cbdSJesse Brandeburg * @duplex: duplex passed to the next function 92529feff39SJoao Pinto * Description: It is used for configuring the flow control in all queues 92629feff39SJoao Pinto */ 92729feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 92829feff39SJoao Pinto { 92929feff39SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 93029feff39SJoao Pinto 931c10d4c82SJose Abreu stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 93229feff39SJoao Pinto priv->pause, tx_cnt); 93329feff39SJoao Pinto } 93429feff39SJoao Pinto 93572e94511SRussell King (Oracle) static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 93672e94511SRussell King (Oracle) phy_interface_t interface) 93772e94511SRussell King (Oracle) { 93872e94511SRussell King (Oracle) struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 93972e94511SRussell King (Oracle) 94072e94511SRussell King (Oracle) if (!priv->hw->xpcs) 94172e94511SRussell King (Oracle) return NULL; 94272e94511SRussell King (Oracle) 94372e94511SRussell King (Oracle) return &priv->hw->xpcs->pcs; 94472e94511SRussell King (Oracle) } 94572e94511SRussell King (Oracle) 94674371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 94774371272SJose Abreu const struct phylink_link_state *state) 9489ad372fcSJose Abreu { 94911059740SVladimir Oltean /* Nothing to do, xpcs_config() handles everything */ 950eeef2f6bSJose Abreu } 951eeef2f6bSJose Abreu 9525a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 9535a558611SOng Boon Leong { 9545a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 9555a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 9565a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 9575a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 9585a558611SOng Boon Leong 9595a558611SOng Boon Leong if (is_up && *hs_enable) { 9605a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 9615a558611SOng Boon Leong } else { 9621f7096f0SWong Vee Khee *lo_state = FPE_STATE_OFF; 9631f7096f0SWong Vee Khee *lp_state = FPE_STATE_OFF; 9645a558611SOng Boon Leong } 9655a558611SOng Boon Leong } 9665a558611SOng Boon Leong 96774371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config, 96874371272SJose Abreu unsigned int mode, phy_interface_t interface) 9699ad372fcSJose Abreu { 97074371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 9719ad372fcSJose Abreu 9729ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 97374371272SJose Abreu priv->eee_active = false; 974388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = false; 975d4aeaed8SWong Vee Khee priv->eee_enabled = stmmac_eee_init(priv); 97674371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, false); 9775a558611SOng Boon Leong 97863c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 9795a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, false); 9809ad372fcSJose Abreu } 9819ad372fcSJose Abreu 98274371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config, 98391a208f2SRussell King struct phy_device *phy, 98474371272SJose Abreu unsigned int mode, phy_interface_t interface, 98591a208f2SRussell King int speed, int duplex, 98691a208f2SRussell King bool tx_pause, bool rx_pause) 9879ad372fcSJose Abreu { 98874371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 989a3a57bf0SHeiner Kallweit u32 old_ctrl, ctrl; 99046f69dedSJose Abreu 991a3a57bf0SHeiner Kallweit old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 992a3a57bf0SHeiner Kallweit ctrl = old_ctrl & ~priv->hw->link.speed_mask; 99346f69dedSJose Abreu 99446f69dedSJose Abreu if (interface == PHY_INTERFACE_MODE_USXGMII) { 99546f69dedSJose Abreu switch (speed) { 99646f69dedSJose Abreu case SPEED_10000: 99746f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 99846f69dedSJose Abreu break; 99946f69dedSJose Abreu case SPEED_5000: 100046f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed5000; 100146f69dedSJose Abreu break; 100246f69dedSJose Abreu case SPEED_2500: 100346f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed2500; 100446f69dedSJose Abreu break; 100546f69dedSJose Abreu default: 100646f69dedSJose Abreu return; 100746f69dedSJose Abreu } 10088a880936SJose Abreu } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 10098a880936SJose Abreu switch (speed) { 10108a880936SJose Abreu case SPEED_100000: 10118a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed100000; 10128a880936SJose Abreu break; 10138a880936SJose Abreu case SPEED_50000: 10148a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed50000; 10158a880936SJose Abreu break; 10168a880936SJose Abreu case SPEED_40000: 10178a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed40000; 10188a880936SJose Abreu break; 10198a880936SJose Abreu case SPEED_25000: 10208a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed25000; 10218a880936SJose Abreu break; 10228a880936SJose Abreu case SPEED_10000: 10238a880936SJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 10248a880936SJose Abreu break; 10258a880936SJose Abreu case SPEED_2500: 10268a880936SJose Abreu ctrl |= priv->hw->link.speed2500; 10278a880936SJose Abreu break; 10288a880936SJose Abreu case SPEED_1000: 10298a880936SJose Abreu ctrl |= priv->hw->link.speed1000; 10308a880936SJose Abreu break; 10318a880936SJose Abreu default: 10328a880936SJose Abreu return; 10338a880936SJose Abreu } 103446f69dedSJose Abreu } else { 103546f69dedSJose Abreu switch (speed) { 103646f69dedSJose Abreu case SPEED_2500: 103746f69dedSJose Abreu ctrl |= priv->hw->link.speed2500; 103846f69dedSJose Abreu break; 103946f69dedSJose Abreu case SPEED_1000: 104046f69dedSJose Abreu ctrl |= priv->hw->link.speed1000; 104146f69dedSJose Abreu break; 104246f69dedSJose Abreu case SPEED_100: 104346f69dedSJose Abreu ctrl |= priv->hw->link.speed100; 104446f69dedSJose Abreu break; 104546f69dedSJose Abreu case SPEED_10: 104646f69dedSJose Abreu ctrl |= priv->hw->link.speed10; 104746f69dedSJose Abreu break; 104846f69dedSJose Abreu default: 104946f69dedSJose Abreu return; 105046f69dedSJose Abreu } 105146f69dedSJose Abreu } 105246f69dedSJose Abreu 105346f69dedSJose Abreu priv->speed = speed; 105446f69dedSJose Abreu 105546f69dedSJose Abreu if (priv->plat->fix_mac_speed) 105646f69dedSJose Abreu priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 105746f69dedSJose Abreu 105846f69dedSJose Abreu if (!duplex) 105946f69dedSJose Abreu ctrl &= ~priv->hw->link.duplex; 106046f69dedSJose Abreu else 106146f69dedSJose Abreu ctrl |= priv->hw->link.duplex; 106246f69dedSJose Abreu 106346f69dedSJose Abreu /* Flow Control operation */ 1064cc3d2b5fSGoh, Wei Sheng if (rx_pause && tx_pause) 1065cc3d2b5fSGoh, Wei Sheng priv->flow_ctrl = FLOW_AUTO; 1066cc3d2b5fSGoh, Wei Sheng else if (rx_pause && !tx_pause) 1067cc3d2b5fSGoh, Wei Sheng priv->flow_ctrl = FLOW_RX; 1068cc3d2b5fSGoh, Wei Sheng else if (!rx_pause && tx_pause) 1069cc3d2b5fSGoh, Wei Sheng priv->flow_ctrl = FLOW_TX; 1070cc3d2b5fSGoh, Wei Sheng else 1071cc3d2b5fSGoh, Wei Sheng priv->flow_ctrl = FLOW_OFF; 1072cc3d2b5fSGoh, Wei Sheng 107346f69dedSJose Abreu stmmac_mac_flow_ctrl(priv, duplex); 107446f69dedSJose Abreu 1075a3a57bf0SHeiner Kallweit if (ctrl != old_ctrl) 107646f69dedSJose Abreu writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 10779ad372fcSJose Abreu 10789ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 10795b111770SJose Abreu if (phy && priv->dma_cap.eee) { 108074371272SJose Abreu priv->eee_active = phy_init_eee(phy, 1) >= 0; 108174371272SJose Abreu priv->eee_enabled = stmmac_eee_init(priv); 1082388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = priv->eee_enabled; 108374371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, true); 108474371272SJose Abreu } 10855a558611SOng Boon Leong 108663c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 10875a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, true); 10889ad372fcSJose Abreu } 10899ad372fcSJose Abreu 109074371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 109104a0683fSRussell King (Oracle) .validate = phylink_generic_validate, 109272e94511SRussell King (Oracle) .mac_select_pcs = stmmac_mac_select_pcs, 109374371272SJose Abreu .mac_config = stmmac_mac_config, 109474371272SJose Abreu .mac_link_down = stmmac_mac_link_down, 109574371272SJose Abreu .mac_link_up = stmmac_mac_link_up, 1096eeef2f6bSJose Abreu }; 1097eeef2f6bSJose Abreu 109829feff39SJoao Pinto /** 1099732fdf0eSGiuseppe CAVALLARO * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 110032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 110132ceabcaSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PCS. 110232ceabcaSGiuseppe CAVALLARO * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 110332ceabcaSGiuseppe CAVALLARO * configured for the TBI, RTBI, or SGMII PHY interface. 110432ceabcaSGiuseppe CAVALLARO */ 1105e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1106e58bb43fSGiuseppe CAVALLARO { 1107e58bb43fSGiuseppe CAVALLARO int interface = priv->plat->interface; 1108e58bb43fSGiuseppe CAVALLARO 1109e58bb43fSGiuseppe CAVALLARO if (priv->dma_cap.pcs) { 11100d909dcdSByungho An if ((interface == PHY_INTERFACE_MODE_RGMII) || 11110d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_ID) || 11120d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 11130d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 111438ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 11153fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_RGMII; 11160d909dcdSByungho An } else if (interface == PHY_INTERFACE_MODE_SGMII) { 111738ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 11183fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_SGMII; 1119e58bb43fSGiuseppe CAVALLARO } 1120e58bb43fSGiuseppe CAVALLARO } 1121e58bb43fSGiuseppe CAVALLARO } 1122e58bb43fSGiuseppe CAVALLARO 11237ac6653aSJeff Kirsher /** 11247ac6653aSJeff Kirsher * stmmac_init_phy - PHY initialization 11257ac6653aSJeff Kirsher * @dev: net device structure 11267ac6653aSJeff Kirsher * Description: it initializes the driver's PHY state, and attaches the PHY 11277ac6653aSJeff Kirsher * to the mac driver. 11287ac6653aSJeff Kirsher * Return value: 11297ac6653aSJeff Kirsher * 0 on success 11307ac6653aSJeff Kirsher */ 11317ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev) 11327ac6653aSJeff Kirsher { 11337ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 1134ab21cf92SOng Boon Leong struct fwnode_handle *fwnode; 113574371272SJose Abreu int ret; 11367ac6653aSJeff Kirsher 1137ab21cf92SOng Boon Leong fwnode = of_fwnode_handle(priv->plat->phylink_node); 1138ab21cf92SOng Boon Leong if (!fwnode) 1139ab21cf92SOng Boon Leong fwnode = dev_fwnode(priv->device); 114074371272SJose Abreu 1141ab21cf92SOng Boon Leong if (fwnode) 1142ab21cf92SOng Boon Leong ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 114342e87024SJose Abreu 114442e87024SJose Abreu /* Some DT bindings do not set-up the PHY handle. Let's try to 114542e87024SJose Abreu * manually parse it 114642e87024SJose Abreu */ 1147ab21cf92SOng Boon Leong if (!fwnode || ret) { 114874371272SJose Abreu int addr = priv->plat->phy_addr; 114974371272SJose Abreu struct phy_device *phydev; 1150f142af2eSSrinivas Kandagatla 115174371272SJose Abreu phydev = mdiobus_get_phy(priv->mii, addr); 115274371272SJose Abreu if (!phydev) { 115374371272SJose Abreu netdev_err(priv->dev, "no phy at addr %d\n", addr); 11547ac6653aSJeff Kirsher return -ENODEV; 11557ac6653aSJeff Kirsher } 11568e99fc5fSGiuseppe Cavallaro 115774371272SJose Abreu ret = phylink_connect_phy(priv->phylink, phydev); 115874371272SJose Abreu } 1159c51e424dSFlorian Fainelli 1160576f9eacSJoakim Zhang if (!priv->plat->pmt) { 1161576f9eacSJoakim Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1162576f9eacSJoakim Zhang 11631d8e5b0fSJisheng Zhang phylink_ethtool_get_wol(priv->phylink, &wol); 11641d8e5b0fSJisheng Zhang device_set_wakeup_capable(priv->device, !!wol.supported); 1165576f9eacSJoakim Zhang } 11661d8e5b0fSJisheng Zhang 116774371272SJose Abreu return ret; 116874371272SJose Abreu } 116974371272SJose Abreu 117074371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv) 117174371272SJose Abreu { 117211059740SVladimir Oltean struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 1173c63d1e5cSArnd Bergmann struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 117492c3807bSRussell King (Oracle) int max_speed = priv->plat->max_speed; 11750060c878SAlexandru Ardelean int mode = priv->plat->phy_interface; 117674371272SJose Abreu struct phylink *phylink; 117774371272SJose Abreu 117874371272SJose Abreu priv->phylink_config.dev = &priv->dev->dev; 117974371272SJose Abreu priv->phylink_config.type = PHYLINK_NETDEV; 1180593f555fSSriranjani P if (priv->plat->mdio_bus_data) 1181e5e5b771SOng Boon Leong priv->phylink_config.ovr_an_inband = 118212628565SDavid S. Miller mdio_bus_data->xpcs_an_inband; 118374371272SJose Abreu 11848dc6051cSJose Abreu if (!fwnode) 11858dc6051cSJose Abreu fwnode = dev_fwnode(priv->device); 11868dc6051cSJose Abreu 1187d194923dSRussell King (Oracle) /* Set the platform/firmware specified interface mode */ 1188d194923dSRussell King (Oracle) __set_bit(mode, priv->phylink_config.supported_interfaces); 1189d194923dSRussell King (Oracle) 1190d194923dSRussell King (Oracle) /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1191d194923dSRussell King (Oracle) if (priv->hw->xpcs) 1192d194923dSRussell King (Oracle) xpcs_get_interfaces(priv->hw->xpcs, 1193d194923dSRussell King (Oracle) priv->phylink_config.supported_interfaces); 1194d194923dSRussell King (Oracle) 119592c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 119692c3807bSRussell King (Oracle) MAC_10 | MAC_100; 119792c3807bSRussell King (Oracle) 119892c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 1000) 119992c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_1000; 120092c3807bSRussell King (Oracle) 120192c3807bSRussell King (Oracle) if (priv->plat->has_gmac4) { 120292c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 2500) 120392c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_2500FD; 120492c3807bSRussell King (Oracle) } else if (priv->plat->has_xgmac) { 120592c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 2500) 120692c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_2500FD; 120792c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 5000) 120892c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_5000FD; 120992c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 10000) 121092c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_10000FD; 121192c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 25000) 121292c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_25000FD; 121392c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 40000) 121492c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_40000FD; 121592c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 50000) 121692c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_50000FD; 121792c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 100000) 121892c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_100000FD; 121992c3807bSRussell King (Oracle) } 122092c3807bSRussell King (Oracle) 122192c3807bSRussell King (Oracle) /* Half-Duplex can only work with single queue */ 122292c3807bSRussell King (Oracle) if (priv->plat->tx_queues_to_use > 1) 122392c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities &= 122492c3807bSRussell King (Oracle) ~(MAC_10HD | MAC_100HD | MAC_1000HD); 1225f151c147SShenwei Wang priv->phylink_config.mac_managed_pm = true; 122692c3807bSRussell King (Oracle) 1227c63d1e5cSArnd Bergmann phylink = phylink_create(&priv->phylink_config, fwnode, 122874371272SJose Abreu mode, &stmmac_phylink_mac_ops); 122974371272SJose Abreu if (IS_ERR(phylink)) 123074371272SJose Abreu return PTR_ERR(phylink); 123174371272SJose Abreu 123274371272SJose Abreu priv->phylink = phylink; 12337ac6653aSJeff Kirsher return 0; 12347ac6653aSJeff Kirsher } 12357ac6653aSJeff Kirsher 1236ba39b344SChristian Marangi static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1237ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1238c24602efSGiuseppe CAVALLARO { 123954139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 1240bfaf91caSJoakim Zhang unsigned int desc_size; 124171fedb01SJoao Pinto void *head_rx; 124254139cf3SJoao Pinto u32 queue; 124354139cf3SJoao Pinto 124454139cf3SJoao Pinto /* Display RX rings */ 124554139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 1246ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 124754139cf3SJoao Pinto 124854139cf3SJoao Pinto pr_info("\tRX Queue %u rings\n", queue); 1249d0225e7dSAlexandre TORGUE 1250bfaf91caSJoakim Zhang if (priv->extend_desc) { 125154139cf3SJoao Pinto head_rx = (void *)rx_q->dma_erx; 1252bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1253bfaf91caSJoakim Zhang } else { 125454139cf3SJoao Pinto head_rx = (void *)rx_q->dma_rx; 1255bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1256bfaf91caSJoakim Zhang } 125771fedb01SJoao Pinto 125871fedb01SJoao Pinto /* Display RX ring */ 1259ba39b344SChristian Marangi stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1260bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 12615bacd778SLABBE Corentin } 126254139cf3SJoao Pinto } 1263d0225e7dSAlexandre TORGUE 1264ba39b344SChristian Marangi static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1265ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 126671fedb01SJoao Pinto { 1267ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 1268bfaf91caSJoakim Zhang unsigned int desc_size; 126971fedb01SJoao Pinto void *head_tx; 1270ce736788SJoao Pinto u32 queue; 1271ce736788SJoao Pinto 1272ce736788SJoao Pinto /* Display TX rings */ 1273ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 1274ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1275ce736788SJoao Pinto 1276ce736788SJoao Pinto pr_info("\tTX Queue %d rings\n", queue); 127771fedb01SJoao Pinto 1278bfaf91caSJoakim Zhang if (priv->extend_desc) { 1279ce736788SJoao Pinto head_tx = (void *)tx_q->dma_etx; 1280bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1281bfaf91caSJoakim Zhang } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1282579a25a8SJose Abreu head_tx = (void *)tx_q->dma_entx; 1283bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_edesc); 1284bfaf91caSJoakim Zhang } else { 1285ce736788SJoao Pinto head_tx = (void *)tx_q->dma_tx; 1286bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1287bfaf91caSJoakim Zhang } 128871fedb01SJoao Pinto 1289ba39b344SChristian Marangi stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1290bfaf91caSJoakim Zhang tx_q->dma_tx_phy, desc_size); 1291c24602efSGiuseppe CAVALLARO } 1292ce736788SJoao Pinto } 1293c24602efSGiuseppe CAVALLARO 1294ba39b344SChristian Marangi static void stmmac_display_rings(struct stmmac_priv *priv, 1295ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 129671fedb01SJoao Pinto { 129771fedb01SJoao Pinto /* Display RX ring */ 1298ba39b344SChristian Marangi stmmac_display_rx_rings(priv, dma_conf); 129971fedb01SJoao Pinto 130071fedb01SJoao Pinto /* Display TX ring */ 1301ba39b344SChristian Marangi stmmac_display_tx_rings(priv, dma_conf); 130271fedb01SJoao Pinto } 130371fedb01SJoao Pinto 1304286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize) 1305286a8372SGiuseppe CAVALLARO { 1306286a8372SGiuseppe CAVALLARO int ret = bufsize; 1307286a8372SGiuseppe CAVALLARO 1308b2f3a481SJose Abreu if (mtu >= BUF_SIZE_8KiB) 1309b2f3a481SJose Abreu ret = BUF_SIZE_16KiB; 1310b2f3a481SJose Abreu else if (mtu >= BUF_SIZE_4KiB) 1311286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_8KiB; 1312286a8372SGiuseppe CAVALLARO else if (mtu >= BUF_SIZE_2KiB) 1313286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_4KiB; 1314d916701cSGiuseppe CAVALLARO else if (mtu > DEFAULT_BUFSIZE) 1315286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_2KiB; 1316286a8372SGiuseppe CAVALLARO else 1317d916701cSGiuseppe CAVALLARO ret = DEFAULT_BUFSIZE; 1318286a8372SGiuseppe CAVALLARO 1319286a8372SGiuseppe CAVALLARO return ret; 1320286a8372SGiuseppe CAVALLARO } 1321286a8372SGiuseppe CAVALLARO 132232ceabcaSGiuseppe CAVALLARO /** 132371fedb01SJoao Pinto * stmmac_clear_rx_descriptors - clear RX descriptors 132432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 1325ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 132654139cf3SJoao Pinto * @queue: RX queue index 132771fedb01SJoao Pinto * Description: this function is called to clear the RX descriptors 132832ceabcaSGiuseppe CAVALLARO * in case of both basic and extended descriptors are used. 132932ceabcaSGiuseppe CAVALLARO */ 1330ba39b344SChristian Marangi static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1331ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1332ba39b344SChristian Marangi u32 queue) 1333c24602efSGiuseppe CAVALLARO { 1334ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 13355bacd778SLABBE Corentin int i; 1336c24602efSGiuseppe CAVALLARO 133771fedb01SJoao Pinto /* Clear the RX descriptors */ 1338ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) 13395bacd778SLABBE Corentin if (priv->extend_desc) 134042de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 13415bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1342ba39b344SChristian Marangi (i == dma_conf->dma_rx_size - 1), 1343ba39b344SChristian Marangi dma_conf->dma_buf_sz); 13445bacd778SLABBE Corentin else 134542de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 13465bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1347ba39b344SChristian Marangi (i == dma_conf->dma_rx_size - 1), 1348ba39b344SChristian Marangi dma_conf->dma_buf_sz); 134971fedb01SJoao Pinto } 135071fedb01SJoao Pinto 135171fedb01SJoao Pinto /** 135271fedb01SJoao Pinto * stmmac_clear_tx_descriptors - clear tx descriptors 135371fedb01SJoao Pinto * @priv: driver private structure 1354ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1355ce736788SJoao Pinto * @queue: TX queue index. 135671fedb01SJoao Pinto * Description: this function is called to clear the TX descriptors 135771fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 135871fedb01SJoao Pinto */ 1359ba39b344SChristian Marangi static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1360ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1361ba39b344SChristian Marangi u32 queue) 136271fedb01SJoao Pinto { 1363ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 136471fedb01SJoao Pinto int i; 136571fedb01SJoao Pinto 136671fedb01SJoao Pinto /* Clear the TX descriptors */ 1367ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) { 1368ba39b344SChristian Marangi int last = (i == (dma_conf->dma_tx_size - 1)); 1369579a25a8SJose Abreu struct dma_desc *p; 1370579a25a8SJose Abreu 13715bacd778SLABBE Corentin if (priv->extend_desc) 1372579a25a8SJose Abreu p = &tx_q->dma_etx[i].basic; 1373579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1374579a25a8SJose Abreu p = &tx_q->dma_entx[i].basic; 13755bacd778SLABBE Corentin else 1376579a25a8SJose Abreu p = &tx_q->dma_tx[i]; 1377579a25a8SJose Abreu 1378579a25a8SJose Abreu stmmac_init_tx_desc(priv, p, priv->mode, last); 1379579a25a8SJose Abreu } 1380c24602efSGiuseppe CAVALLARO } 1381c24602efSGiuseppe CAVALLARO 1382732fdf0eSGiuseppe CAVALLARO /** 138371fedb01SJoao Pinto * stmmac_clear_descriptors - clear descriptors 138471fedb01SJoao Pinto * @priv: driver private structure 1385ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 138671fedb01SJoao Pinto * Description: this function is called to clear the TX and RX descriptors 138771fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 138871fedb01SJoao Pinto */ 1389ba39b344SChristian Marangi static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1390ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 139171fedb01SJoao Pinto { 139254139cf3SJoao Pinto u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1393ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 139454139cf3SJoao Pinto u32 queue; 139554139cf3SJoao Pinto 139671fedb01SJoao Pinto /* Clear the RX descriptors */ 139754139cf3SJoao Pinto for (queue = 0; queue < rx_queue_cnt; queue++) 1398ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, dma_conf, queue); 139971fedb01SJoao Pinto 140071fedb01SJoao Pinto /* Clear the TX descriptors */ 1401ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) 1402ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, dma_conf, queue); 140371fedb01SJoao Pinto } 140471fedb01SJoao Pinto 140571fedb01SJoao Pinto /** 1406732fdf0eSGiuseppe CAVALLARO * stmmac_init_rx_buffers - init the RX descriptor buffer. 1407732fdf0eSGiuseppe CAVALLARO * @priv: driver private structure 1408ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1409732fdf0eSGiuseppe CAVALLARO * @p: descriptor pointer 1410732fdf0eSGiuseppe CAVALLARO * @i: descriptor index 141154139cf3SJoao Pinto * @flags: gfp flag 141254139cf3SJoao Pinto * @queue: RX queue index 1413732fdf0eSGiuseppe CAVALLARO * Description: this function is called to allocate a receive buffer, perform 1414732fdf0eSGiuseppe CAVALLARO * the DMA mapping and init the descriptor. 1415732fdf0eSGiuseppe CAVALLARO */ 1416ba39b344SChristian Marangi static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1417ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1418ba39b344SChristian Marangi struct dma_desc *p, 141954139cf3SJoao Pinto int i, gfp_t flags, u32 queue) 1420c24602efSGiuseppe CAVALLARO { 1421ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 14222af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1423884d2b84SDavid Wu gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1424884d2b84SDavid Wu 1425884d2b84SDavid Wu if (priv->dma_cap.addr64 <= 32) 1426884d2b84SDavid Wu gfp |= GFP_DMA32; 1427c24602efSGiuseppe CAVALLARO 1428da5ec7f2SOng Boon Leong if (!buf->page) { 1429884d2b84SDavid Wu buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 14302af6106aSJose Abreu if (!buf->page) 143156329137SBartlomiej Zolnierkiewicz return -ENOMEM; 14325fabb012SOng Boon Leong buf->page_offset = stmmac_rx_offset(priv); 1433da5ec7f2SOng Boon Leong } 1434c24602efSGiuseppe CAVALLARO 1435da5ec7f2SOng Boon Leong if (priv->sph && !buf->sec_page) { 1436884d2b84SDavid Wu buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 143767afd6d1SJose Abreu if (!buf->sec_page) 143867afd6d1SJose Abreu return -ENOMEM; 143967afd6d1SJose Abreu 144067afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1441396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 144267afd6d1SJose Abreu } else { 144367afd6d1SJose Abreu buf->sec_page = NULL; 1444396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 144567afd6d1SJose Abreu } 144667afd6d1SJose Abreu 14475fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 14485fabb012SOng Boon Leong 14492af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 1450ba39b344SChristian Marangi if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 14512c520b1cSJose Abreu stmmac_init_desc3(priv, p); 1452c24602efSGiuseppe CAVALLARO 1453c24602efSGiuseppe CAVALLARO return 0; 1454c24602efSGiuseppe CAVALLARO } 1455c24602efSGiuseppe CAVALLARO 145671fedb01SJoao Pinto /** 145771fedb01SJoao Pinto * stmmac_free_rx_buffer - free RX dma buffers 145871fedb01SJoao Pinto * @priv: private structure 1459ba39b344SChristian Marangi * @rx_q: RX queue 146071fedb01SJoao Pinto * @i: buffer index. 146171fedb01SJoao Pinto */ 1462ba39b344SChristian Marangi static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1463ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q, 1464ba39b344SChristian Marangi int i) 146556329137SBartlomiej Zolnierkiewicz { 14662af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 146754139cf3SJoao Pinto 14682af6106aSJose Abreu if (buf->page) 1469458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->page, false); 14702af6106aSJose Abreu buf->page = NULL; 147167afd6d1SJose Abreu 147267afd6d1SJose Abreu if (buf->sec_page) 1473458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 147467afd6d1SJose Abreu buf->sec_page = NULL; 147556329137SBartlomiej Zolnierkiewicz } 147656329137SBartlomiej Zolnierkiewicz 14777ac6653aSJeff Kirsher /** 147871fedb01SJoao Pinto * stmmac_free_tx_buffer - free RX dma buffers 147971fedb01SJoao Pinto * @priv: private structure 1480ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1481ce736788SJoao Pinto * @queue: RX queue index 148271fedb01SJoao Pinto * @i: buffer index. 148371fedb01SJoao Pinto */ 1484ba39b344SChristian Marangi static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1485ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1486ba39b344SChristian Marangi u32 queue, int i) 148771fedb01SJoao Pinto { 1488ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1489ce736788SJoao Pinto 1490be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf && 1491be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1492ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].map_as_page) 149371fedb01SJoao Pinto dma_unmap_page(priv->device, 1494ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1495ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 149671fedb01SJoao Pinto DMA_TO_DEVICE); 149771fedb01SJoao Pinto else 149871fedb01SJoao Pinto dma_unmap_single(priv->device, 1499ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1500ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 150171fedb01SJoao Pinto DMA_TO_DEVICE); 150271fedb01SJoao Pinto } 150371fedb01SJoao Pinto 1504be8b38a7SOng Boon Leong if (tx_q->xdpf[i] && 15058b278a5bSOng Boon Leong (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 15068b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1507be8b38a7SOng Boon Leong xdp_return_frame(tx_q->xdpf[i]); 1508be8b38a7SOng Boon Leong tx_q->xdpf[i] = NULL; 1509be8b38a7SOng Boon Leong } 1510be8b38a7SOng Boon Leong 1511132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1512132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 1513132c32eeSOng Boon Leong 1514be8b38a7SOng Boon Leong if (tx_q->tx_skbuff[i] && 1515be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1516ce736788SJoao Pinto dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1517ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 1518be8b38a7SOng Boon Leong } 1519be8b38a7SOng Boon Leong 1520ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1521ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 152271fedb01SJoao Pinto } 152371fedb01SJoao Pinto 152471fedb01SJoao Pinto /** 15254298255fSOng Boon Leong * dma_free_rx_skbufs - free RX dma buffers 15264298255fSOng Boon Leong * @priv: private structure 1527ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 15284298255fSOng Boon Leong * @queue: RX queue index 15294298255fSOng Boon Leong */ 1530ba39b344SChristian Marangi static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1531ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1532ba39b344SChristian Marangi u32 queue) 15334298255fSOng Boon Leong { 1534ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 15354298255fSOng Boon Leong int i; 15364298255fSOng Boon Leong 1537ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) 1538ba39b344SChristian Marangi stmmac_free_rx_buffer(priv, rx_q, i); 15394298255fSOng Boon Leong } 15404298255fSOng Boon Leong 1541ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1542ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1543ba39b344SChristian Marangi u32 queue, gfp_t flags) 15444298255fSOng Boon Leong { 1545ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 15464298255fSOng Boon Leong int i; 15474298255fSOng Boon Leong 1548ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 15494298255fSOng Boon Leong struct dma_desc *p; 15504298255fSOng Boon Leong int ret; 15514298255fSOng Boon Leong 15524298255fSOng Boon Leong if (priv->extend_desc) 15534298255fSOng Boon Leong p = &((rx_q->dma_erx + i)->basic); 15544298255fSOng Boon Leong else 15554298255fSOng Boon Leong p = rx_q->dma_rx + i; 15564298255fSOng Boon Leong 1557ba39b344SChristian Marangi ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 15584298255fSOng Boon Leong queue); 15594298255fSOng Boon Leong if (ret) 15604298255fSOng Boon Leong return ret; 1561bba2556eSOng Boon Leong 1562bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 15634298255fSOng Boon Leong } 15644298255fSOng Boon Leong 15654298255fSOng Boon Leong return 0; 15664298255fSOng Boon Leong } 15674298255fSOng Boon Leong 15684298255fSOng Boon Leong /** 1569bba2556eSOng Boon Leong * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1570bba2556eSOng Boon Leong * @priv: private structure 1571ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1572bba2556eSOng Boon Leong * @queue: RX queue index 1573bba2556eSOng Boon Leong */ 1574ba39b344SChristian Marangi static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1575ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1576ba39b344SChristian Marangi u32 queue) 1577bba2556eSOng Boon Leong { 1578ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1579bba2556eSOng Boon Leong int i; 1580bba2556eSOng Boon Leong 1581ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 1582bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1583bba2556eSOng Boon Leong 1584bba2556eSOng Boon Leong if (!buf->xdp) 1585bba2556eSOng Boon Leong continue; 1586bba2556eSOng Boon Leong 1587bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 1588bba2556eSOng Boon Leong buf->xdp = NULL; 1589bba2556eSOng Boon Leong } 1590bba2556eSOng Boon Leong } 1591bba2556eSOng Boon Leong 1592ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1593ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1594ba39b344SChristian Marangi u32 queue) 1595bba2556eSOng Boon Leong { 1596ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1597bba2556eSOng Boon Leong int i; 1598bba2556eSOng Boon Leong 1599ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 1600bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 1601bba2556eSOng Boon Leong dma_addr_t dma_addr; 1602bba2556eSOng Boon Leong struct dma_desc *p; 1603bba2556eSOng Boon Leong 1604bba2556eSOng Boon Leong if (priv->extend_desc) 1605bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + i); 1606bba2556eSOng Boon Leong else 1607bba2556eSOng Boon Leong p = rx_q->dma_rx + i; 1608bba2556eSOng Boon Leong 1609bba2556eSOng Boon Leong buf = &rx_q->buf_pool[i]; 1610bba2556eSOng Boon Leong 1611bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1612bba2556eSOng Boon Leong if (!buf->xdp) 1613bba2556eSOng Boon Leong return -ENOMEM; 1614bba2556eSOng Boon Leong 1615bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1616bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, p, dma_addr); 1617bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 1618bba2556eSOng Boon Leong } 1619bba2556eSOng Boon Leong 1620bba2556eSOng Boon Leong return 0; 1621bba2556eSOng Boon Leong } 1622bba2556eSOng Boon Leong 1623bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1624bba2556eSOng Boon Leong { 1625bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1626bba2556eSOng Boon Leong return NULL; 1627bba2556eSOng Boon Leong 1628bba2556eSOng Boon Leong return xsk_get_pool_from_qid(priv->dev, queue); 1629bba2556eSOng Boon Leong } 1630bba2556eSOng Boon Leong 16319c63faaaSJoakim Zhang /** 1632de0b90e5SOng Boon Leong * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1633de0b90e5SOng Boon Leong * @priv: driver private structure 1634ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1635de0b90e5SOng Boon Leong * @queue: RX queue index 16365bacd778SLABBE Corentin * @flags: gfp flag. 163771fedb01SJoao Pinto * Description: this function initializes the DMA RX descriptors 16385bacd778SLABBE Corentin * and allocates the socket buffers. It supports the chained and ring 1639286a8372SGiuseppe CAVALLARO * modes. 16407ac6653aSJeff Kirsher */ 1641ba39b344SChristian Marangi static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1642ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1643ba39b344SChristian Marangi u32 queue, gfp_t flags) 16447ac6653aSJeff Kirsher { 1645ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1646de0b90e5SOng Boon Leong int ret; 164754139cf3SJoao Pinto 164854139cf3SJoao Pinto netif_dbg(priv, probe, priv->dev, 164954139cf3SJoao Pinto "(%s) dma_rx_phy=0x%08x\n", __func__, 165054139cf3SJoao Pinto (u32)rx_q->dma_rx_phy); 165154139cf3SJoao Pinto 1652ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1653cbcf0999SJose Abreu 1654bba2556eSOng Boon Leong xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1655bba2556eSOng Boon Leong 1656bba2556eSOng Boon Leong rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1657bba2556eSOng Boon Leong 1658bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1659bba2556eSOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1660bba2556eSOng Boon Leong MEM_TYPE_XSK_BUFF_POOL, 1661bba2556eSOng Boon Leong NULL)); 1662bba2556eSOng Boon Leong netdev_info(priv->dev, 1663bba2556eSOng Boon Leong "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1664bba2556eSOng Boon Leong rx_q->queue_index); 1665bba2556eSOng Boon Leong xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1666bba2556eSOng Boon Leong } else { 1667be8b38a7SOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1668be8b38a7SOng Boon Leong MEM_TYPE_PAGE_POOL, 1669be8b38a7SOng Boon Leong rx_q->page_pool)); 1670be8b38a7SOng Boon Leong netdev_info(priv->dev, 1671be8b38a7SOng Boon Leong "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1672be8b38a7SOng Boon Leong rx_q->queue_index); 1673bba2556eSOng Boon Leong } 1674be8b38a7SOng Boon Leong 1675bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1676bba2556eSOng Boon Leong /* RX XDP ZC buffer pool may not be populated, e.g. 1677bba2556eSOng Boon Leong * xdpsock TX-only. 1678bba2556eSOng Boon Leong */ 1679ba39b344SChristian Marangi stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1680bba2556eSOng Boon Leong } else { 1681ba39b344SChristian Marangi ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 16824298255fSOng Boon Leong if (ret < 0) 1683de0b90e5SOng Boon Leong return -ENOMEM; 1684bba2556eSOng Boon Leong } 168554139cf3SJoao Pinto 1686c24602efSGiuseppe CAVALLARO /* Setup the chained descriptor addresses */ 1687c24602efSGiuseppe CAVALLARO if (priv->mode == STMMAC_CHAIN_MODE) { 168871fedb01SJoao Pinto if (priv->extend_desc) 16892c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_erx, 1690aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1691ba39b344SChristian Marangi dma_conf->dma_rx_size, 1); 169271fedb01SJoao Pinto else 16932c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_rx, 1694aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1695ba39b344SChristian Marangi dma_conf->dma_rx_size, 0); 169671fedb01SJoao Pinto } 1697de0b90e5SOng Boon Leong 1698de0b90e5SOng Boon Leong return 0; 1699de0b90e5SOng Boon Leong } 1700de0b90e5SOng Boon Leong 1701ba39b344SChristian Marangi static int init_dma_rx_desc_rings(struct net_device *dev, 1702ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1703ba39b344SChristian Marangi gfp_t flags) 1704de0b90e5SOng Boon Leong { 1705de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1706de0b90e5SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 170758e06d05SDan Carpenter int queue; 1708de0b90e5SOng Boon Leong int ret; 1709de0b90e5SOng Boon Leong 1710de0b90e5SOng Boon Leong /* RX INITIALIZATION */ 1711de0b90e5SOng Boon Leong netif_dbg(priv, probe, priv->dev, 1712de0b90e5SOng Boon Leong "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1713de0b90e5SOng Boon Leong 1714de0b90e5SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 1715ba39b344SChristian Marangi ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1716de0b90e5SOng Boon Leong if (ret) 1717de0b90e5SOng Boon Leong goto err_init_rx_buffers; 171854139cf3SJoao Pinto } 171954139cf3SJoao Pinto 172071fedb01SJoao Pinto return 0; 172154139cf3SJoao Pinto 172271fedb01SJoao Pinto err_init_rx_buffers: 172354139cf3SJoao Pinto while (queue >= 0) { 1724ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1725bba2556eSOng Boon Leong 1726bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1727ba39b344SChristian Marangi dma_free_rx_xskbufs(priv, dma_conf, queue); 1728bba2556eSOng Boon Leong else 1729ba39b344SChristian Marangi dma_free_rx_skbufs(priv, dma_conf, queue); 173054139cf3SJoao Pinto 1731bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1732bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1733bba2556eSOng Boon Leong 173454139cf3SJoao Pinto queue--; 173554139cf3SJoao Pinto } 173654139cf3SJoao Pinto 173771fedb01SJoao Pinto return ret; 173871fedb01SJoao Pinto } 173971fedb01SJoao Pinto 174071fedb01SJoao Pinto /** 1741de0b90e5SOng Boon Leong * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1742de0b90e5SOng Boon Leong * @priv: driver private structure 1743ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1744de0b90e5SOng Boon Leong * @queue: TX queue index 174571fedb01SJoao Pinto * Description: this function initializes the DMA TX descriptors 174671fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 174771fedb01SJoao Pinto * modes. 174871fedb01SJoao Pinto */ 1749ba39b344SChristian Marangi static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1750ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1751ba39b344SChristian Marangi u32 queue) 175271fedb01SJoao Pinto { 1753ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1754de0b90e5SOng Boon Leong int i; 1755ce736788SJoao Pinto 175671fedb01SJoao Pinto netif_dbg(priv, probe, priv->dev, 1757ce736788SJoao Pinto "(%s) dma_tx_phy=0x%08x\n", __func__, 1758ce736788SJoao Pinto (u32)tx_q->dma_tx_phy); 175971fedb01SJoao Pinto 176071fedb01SJoao Pinto /* Setup the chained descriptor addresses */ 176171fedb01SJoao Pinto if (priv->mode == STMMAC_CHAIN_MODE) { 176271fedb01SJoao Pinto if (priv->extend_desc) 17632c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_etx, 1764aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1765ba39b344SChristian Marangi dma_conf->dma_tx_size, 1); 1766579a25a8SJose Abreu else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 17672c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_tx, 1768aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1769ba39b344SChristian Marangi dma_conf->dma_tx_size, 0); 1770c24602efSGiuseppe CAVALLARO } 1771286a8372SGiuseppe CAVALLARO 1772132c32eeSOng Boon Leong tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1773132c32eeSOng Boon Leong 1774ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) { 1775c24602efSGiuseppe CAVALLARO struct dma_desc *p; 1776de0b90e5SOng Boon Leong 1777c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 1778ce736788SJoao Pinto p = &((tx_q->dma_etx + i)->basic); 1779579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1780579a25a8SJose Abreu p = &((tx_q->dma_entx + i)->basic); 1781c24602efSGiuseppe CAVALLARO else 1782ce736788SJoao Pinto p = tx_q->dma_tx + i; 1783f748be53SAlexandre TORGUE 178444c67f85SJose Abreu stmmac_clear_desc(priv, p); 1785f748be53SAlexandre TORGUE 1786ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1787ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 1788ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len = 0; 1789ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].last_segment = false; 1790ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 17914a7d666aSGiuseppe CAVALLARO } 1792c24602efSGiuseppe CAVALLARO 1793de0b90e5SOng Boon Leong return 0; 1794c22a3f48SJoao Pinto } 17957ac6653aSJeff Kirsher 1796ba39b344SChristian Marangi static int init_dma_tx_desc_rings(struct net_device *dev, 1797ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1798de0b90e5SOng Boon Leong { 1799de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1800de0b90e5SOng Boon Leong u32 tx_queue_cnt; 1801de0b90e5SOng Boon Leong u32 queue; 1802de0b90e5SOng Boon Leong 1803de0b90e5SOng Boon Leong tx_queue_cnt = priv->plat->tx_queues_to_use; 1804de0b90e5SOng Boon Leong 1805de0b90e5SOng Boon Leong for (queue = 0; queue < tx_queue_cnt; queue++) 1806ba39b344SChristian Marangi __init_dma_tx_desc_rings(priv, dma_conf, queue); 1807de0b90e5SOng Boon Leong 180871fedb01SJoao Pinto return 0; 180971fedb01SJoao Pinto } 181071fedb01SJoao Pinto 181171fedb01SJoao Pinto /** 181271fedb01SJoao Pinto * init_dma_desc_rings - init the RX/TX descriptor rings 181371fedb01SJoao Pinto * @dev: net device structure 1814ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 181571fedb01SJoao Pinto * @flags: gfp flag. 181671fedb01SJoao Pinto * Description: this function initializes the DMA RX/TX descriptors 181771fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 181871fedb01SJoao Pinto * modes. 181971fedb01SJoao Pinto */ 1820ba39b344SChristian Marangi static int init_dma_desc_rings(struct net_device *dev, 1821ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1822ba39b344SChristian Marangi gfp_t flags) 182371fedb01SJoao Pinto { 182471fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 182571fedb01SJoao Pinto int ret; 182671fedb01SJoao Pinto 1827ba39b344SChristian Marangi ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 182871fedb01SJoao Pinto if (ret) 182971fedb01SJoao Pinto return ret; 183071fedb01SJoao Pinto 1831ba39b344SChristian Marangi ret = init_dma_tx_desc_rings(dev, dma_conf); 183271fedb01SJoao Pinto 1833ba39b344SChristian Marangi stmmac_clear_descriptors(priv, dma_conf); 18347ac6653aSJeff Kirsher 1835c24602efSGiuseppe CAVALLARO if (netif_msg_hw(priv)) 1836ba39b344SChristian Marangi stmmac_display_rings(priv, dma_conf); 183756329137SBartlomiej Zolnierkiewicz 183856329137SBartlomiej Zolnierkiewicz return ret; 18397ac6653aSJeff Kirsher } 18407ac6653aSJeff Kirsher 184171fedb01SJoao Pinto /** 184271fedb01SJoao Pinto * dma_free_tx_skbufs - free TX dma buffers 184371fedb01SJoao Pinto * @priv: private structure 1844ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1845ce736788SJoao Pinto * @queue: TX queue index 184671fedb01SJoao Pinto */ 1847ba39b344SChristian Marangi static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1848ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1849ba39b344SChristian Marangi u32 queue) 18507ac6653aSJeff Kirsher { 1851ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 18527ac6653aSJeff Kirsher int i; 18537ac6653aSJeff Kirsher 1854132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1855132c32eeSOng Boon Leong 1856ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) 1857ba39b344SChristian Marangi stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1858132c32eeSOng Boon Leong 1859132c32eeSOng Boon Leong if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1860132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1861132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1862132c32eeSOng Boon Leong tx_q->xsk_pool = NULL; 1863132c32eeSOng Boon Leong } 18647ac6653aSJeff Kirsher } 18657ac6653aSJeff Kirsher 1866732fdf0eSGiuseppe CAVALLARO /** 18674ec236c7SFugang Duan * stmmac_free_tx_skbufs - free TX skb buffers 18684ec236c7SFugang Duan * @priv: private structure 18694ec236c7SFugang Duan */ 18704ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 18714ec236c7SFugang Duan { 18724ec236c7SFugang Duan u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 18734ec236c7SFugang Duan u32 queue; 18744ec236c7SFugang Duan 18754ec236c7SFugang Duan for (queue = 0; queue < tx_queue_cnt; queue++) 1876ba39b344SChristian Marangi dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 18774ec236c7SFugang Duan } 18784ec236c7SFugang Duan 18794ec236c7SFugang Duan /** 1880da5ec7f2SOng Boon Leong * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 188154139cf3SJoao Pinto * @priv: private structure 1882ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1883da5ec7f2SOng Boon Leong * @queue: RX queue index 188454139cf3SJoao Pinto */ 1885ba39b344SChristian Marangi static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1886ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1887ba39b344SChristian Marangi u32 queue) 188854139cf3SJoao Pinto { 1889ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 189054139cf3SJoao Pinto 189154139cf3SJoao Pinto /* Release the DMA RX socket buffers */ 1892bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1893ba39b344SChristian Marangi dma_free_rx_xskbufs(priv, dma_conf, queue); 1894bba2556eSOng Boon Leong else 1895ba39b344SChristian Marangi dma_free_rx_skbufs(priv, dma_conf, queue); 189654139cf3SJoao Pinto 1897bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1898bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1899bba2556eSOng Boon Leong 190054139cf3SJoao Pinto /* Free DMA regions of consistent memory previously allocated */ 190154139cf3SJoao Pinto if (!priv->extend_desc) 1902ba39b344SChristian Marangi dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1903aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 190454139cf3SJoao Pinto rx_q->dma_rx, rx_q->dma_rx_phy); 190554139cf3SJoao Pinto else 1906ba39b344SChristian Marangi dma_free_coherent(priv->device, dma_conf->dma_rx_size * 190754139cf3SJoao Pinto sizeof(struct dma_extended_desc), 190854139cf3SJoao Pinto rx_q->dma_erx, rx_q->dma_rx_phy); 190954139cf3SJoao Pinto 1910be8b38a7SOng Boon Leong if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1911be8b38a7SOng Boon Leong xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1912be8b38a7SOng Boon Leong 19132af6106aSJose Abreu kfree(rx_q->buf_pool); 1914c3f812ceSJonathan Lemon if (rx_q->page_pool) 19152af6106aSJose Abreu page_pool_destroy(rx_q->page_pool); 19162af6106aSJose Abreu } 1917da5ec7f2SOng Boon Leong 1918ba39b344SChristian Marangi static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1919ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1920da5ec7f2SOng Boon Leong { 1921da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1922da5ec7f2SOng Boon Leong u32 queue; 1923da5ec7f2SOng Boon Leong 1924da5ec7f2SOng Boon Leong /* Free RX queue resources */ 1925da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) 1926ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, dma_conf, queue); 192754139cf3SJoao Pinto } 192854139cf3SJoao Pinto 192954139cf3SJoao Pinto /** 1930da5ec7f2SOng Boon Leong * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1931ce736788SJoao Pinto * @priv: private structure 1932ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1933da5ec7f2SOng Boon Leong * @queue: TX queue index 1934ce736788SJoao Pinto */ 1935ba39b344SChristian Marangi static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1936ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1937ba39b344SChristian Marangi u32 queue) 1938ce736788SJoao Pinto { 1939ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1940579a25a8SJose Abreu size_t size; 1941579a25a8SJose Abreu void *addr; 1942ce736788SJoao Pinto 1943ce736788SJoao Pinto /* Release the DMA TX socket buffers */ 1944ba39b344SChristian Marangi dma_free_tx_skbufs(priv, dma_conf, queue); 1945ce736788SJoao Pinto 1946579a25a8SJose Abreu if (priv->extend_desc) { 1947579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1948579a25a8SJose Abreu addr = tx_q->dma_etx; 1949579a25a8SJose Abreu } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1950579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1951579a25a8SJose Abreu addr = tx_q->dma_entx; 1952579a25a8SJose Abreu } else { 1953579a25a8SJose Abreu size = sizeof(struct dma_desc); 1954579a25a8SJose Abreu addr = tx_q->dma_tx; 1955579a25a8SJose Abreu } 1956579a25a8SJose Abreu 1957ba39b344SChristian Marangi size *= dma_conf->dma_tx_size; 1958579a25a8SJose Abreu 1959579a25a8SJose Abreu dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1960ce736788SJoao Pinto 1961ce736788SJoao Pinto kfree(tx_q->tx_skbuff_dma); 1962ce736788SJoao Pinto kfree(tx_q->tx_skbuff); 1963ce736788SJoao Pinto } 1964da5ec7f2SOng Boon Leong 1965ba39b344SChristian Marangi static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 1966ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1967da5ec7f2SOng Boon Leong { 1968da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 1969da5ec7f2SOng Boon Leong u32 queue; 1970da5ec7f2SOng Boon Leong 1971da5ec7f2SOng Boon Leong /* Free TX queue resources */ 1972da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) 1973ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, dma_conf, queue); 1974ce736788SJoao Pinto } 1975ce736788SJoao Pinto 1976ce736788SJoao Pinto /** 1977da5ec7f2SOng Boon Leong * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 1978732fdf0eSGiuseppe CAVALLARO * @priv: private structure 1979ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1980da5ec7f2SOng Boon Leong * @queue: RX queue index 1981732fdf0eSGiuseppe CAVALLARO * Description: according to which descriptor can be used (extend or basic) 1982732fdf0eSGiuseppe CAVALLARO * this function allocates the resources for TX and RX paths. In case of 1983732fdf0eSGiuseppe CAVALLARO * reception, for example, it pre-allocated the RX socket buffer in order to 1984732fdf0eSGiuseppe CAVALLARO * allow zero-copy mechanism. 1985732fdf0eSGiuseppe CAVALLARO */ 1986ba39b344SChristian Marangi static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 1987ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1988ba39b344SChristian Marangi u32 queue) 198909f8d696SSrinivas Kandagatla { 1990ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1991be8b38a7SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 1992da5ec7f2SOng Boon Leong bool xdp_prog = stmmac_xdp_is_enabled(priv); 19932af6106aSJose Abreu struct page_pool_params pp_params = { 0 }; 19944f28bd95SThierry Reding unsigned int num_pages; 1995132c32eeSOng Boon Leong unsigned int napi_id; 1996be8b38a7SOng Boon Leong int ret; 199754139cf3SJoao Pinto 199854139cf3SJoao Pinto rx_q->queue_index = queue; 199954139cf3SJoao Pinto rx_q->priv_data = priv; 200054139cf3SJoao Pinto 20015fabb012SOng Boon Leong pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2002ba39b344SChristian Marangi pp_params.pool_size = dma_conf->dma_rx_size; 2003ba39b344SChristian Marangi num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 20044f28bd95SThierry Reding pp_params.order = ilog2(num_pages); 20052af6106aSJose Abreu pp_params.nid = dev_to_node(priv->device); 20062af6106aSJose Abreu pp_params.dev = priv->device; 20075fabb012SOng Boon Leong pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 20085fabb012SOng Boon Leong pp_params.offset = stmmac_rx_offset(priv); 20095fabb012SOng Boon Leong pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 20105bacd778SLABBE Corentin 20112af6106aSJose Abreu rx_q->page_pool = page_pool_create(&pp_params); 20122af6106aSJose Abreu if (IS_ERR(rx_q->page_pool)) { 20132af6106aSJose Abreu ret = PTR_ERR(rx_q->page_pool); 20142af6106aSJose Abreu rx_q->page_pool = NULL; 2015da5ec7f2SOng Boon Leong return ret; 20162af6106aSJose Abreu } 20172af6106aSJose Abreu 2018ba39b344SChristian Marangi rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2019aa042f60SSong, Yoong Siang sizeof(*rx_q->buf_pool), 20205bacd778SLABBE Corentin GFP_KERNEL); 20212af6106aSJose Abreu if (!rx_q->buf_pool) 2022da5ec7f2SOng Boon Leong return -ENOMEM; 20235bacd778SLABBE Corentin 20245bacd778SLABBE Corentin if (priv->extend_desc) { 2025750afb08SLuis Chamberlain rx_q->dma_erx = dma_alloc_coherent(priv->device, 2026ba39b344SChristian Marangi dma_conf->dma_rx_size * 2027aa042f60SSong, Yoong Siang sizeof(struct dma_extended_desc), 202854139cf3SJoao Pinto &rx_q->dma_rx_phy, 20295bacd778SLABBE Corentin GFP_KERNEL); 203054139cf3SJoao Pinto if (!rx_q->dma_erx) 2031da5ec7f2SOng Boon Leong return -ENOMEM; 20325bacd778SLABBE Corentin 203371fedb01SJoao Pinto } else { 2034750afb08SLuis Chamberlain rx_q->dma_rx = dma_alloc_coherent(priv->device, 2035ba39b344SChristian Marangi dma_conf->dma_rx_size * 2036aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 203754139cf3SJoao Pinto &rx_q->dma_rx_phy, 203871fedb01SJoao Pinto GFP_KERNEL); 203954139cf3SJoao Pinto if (!rx_q->dma_rx) 2040da5ec7f2SOng Boon Leong return -ENOMEM; 204171fedb01SJoao Pinto } 2042be8b38a7SOng Boon Leong 2043132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 2044132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) 2045132c32eeSOng Boon Leong napi_id = ch->rxtx_napi.napi_id; 2046132c32eeSOng Boon Leong else 2047132c32eeSOng Boon Leong napi_id = ch->rx_napi.napi_id; 2048132c32eeSOng Boon Leong 2049be8b38a7SOng Boon Leong ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2050be8b38a7SOng Boon Leong rx_q->queue_index, 2051132c32eeSOng Boon Leong napi_id); 2052be8b38a7SOng Boon Leong if (ret) { 2053be8b38a7SOng Boon Leong netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2054da5ec7f2SOng Boon Leong return -EINVAL; 2055be8b38a7SOng Boon Leong } 2056da5ec7f2SOng Boon Leong 2057da5ec7f2SOng Boon Leong return 0; 2058da5ec7f2SOng Boon Leong } 2059da5ec7f2SOng Boon Leong 2060ba39b344SChristian Marangi static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2061ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 2062da5ec7f2SOng Boon Leong { 2063da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 2064da5ec7f2SOng Boon Leong u32 queue; 2065da5ec7f2SOng Boon Leong int ret; 2066da5ec7f2SOng Boon Leong 2067da5ec7f2SOng Boon Leong /* RX queues buffers and DMA */ 2068da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 2069ba39b344SChristian Marangi ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2070da5ec7f2SOng Boon Leong if (ret) 2071da5ec7f2SOng Boon Leong goto err_dma; 207254139cf3SJoao Pinto } 207371fedb01SJoao Pinto 207471fedb01SJoao Pinto return 0; 207571fedb01SJoao Pinto 207671fedb01SJoao Pinto err_dma: 2077ba39b344SChristian Marangi free_dma_rx_desc_resources(priv, dma_conf); 207854139cf3SJoao Pinto 207971fedb01SJoao Pinto return ret; 208071fedb01SJoao Pinto } 208171fedb01SJoao Pinto 208271fedb01SJoao Pinto /** 2083da5ec7f2SOng Boon Leong * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 208471fedb01SJoao Pinto * @priv: private structure 2085ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 2086da5ec7f2SOng Boon Leong * @queue: TX queue index 208771fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 208871fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 208971fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 209071fedb01SJoao Pinto * allow zero-copy mechanism. 209171fedb01SJoao Pinto */ 2092ba39b344SChristian Marangi static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2093ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 2094ba39b344SChristian Marangi u32 queue) 209571fedb01SJoao Pinto { 2096ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2097579a25a8SJose Abreu size_t size; 2098579a25a8SJose Abreu void *addr; 2099ce736788SJoao Pinto 2100ce736788SJoao Pinto tx_q->queue_index = queue; 2101ce736788SJoao Pinto tx_q->priv_data = priv; 2102ce736788SJoao Pinto 2103ba39b344SChristian Marangi tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2104ce736788SJoao Pinto sizeof(*tx_q->tx_skbuff_dma), 210571fedb01SJoao Pinto GFP_KERNEL); 2106ce736788SJoao Pinto if (!tx_q->tx_skbuff_dma) 2107da5ec7f2SOng Boon Leong return -ENOMEM; 210871fedb01SJoao Pinto 2109ba39b344SChristian Marangi tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2110ce736788SJoao Pinto sizeof(struct sk_buff *), 211171fedb01SJoao Pinto GFP_KERNEL); 2112ce736788SJoao Pinto if (!tx_q->tx_skbuff) 2113da5ec7f2SOng Boon Leong return -ENOMEM; 211471fedb01SJoao Pinto 2115579a25a8SJose Abreu if (priv->extend_desc) 2116579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 2117579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2118579a25a8SJose Abreu size = sizeof(struct dma_edesc); 2119579a25a8SJose Abreu else 2120579a25a8SJose Abreu size = sizeof(struct dma_desc); 2121579a25a8SJose Abreu 2122ba39b344SChristian Marangi size *= dma_conf->dma_tx_size; 2123579a25a8SJose Abreu 2124579a25a8SJose Abreu addr = dma_alloc_coherent(priv->device, size, 2125579a25a8SJose Abreu &tx_q->dma_tx_phy, GFP_KERNEL); 2126579a25a8SJose Abreu if (!addr) 2127da5ec7f2SOng Boon Leong return -ENOMEM; 2128579a25a8SJose Abreu 2129579a25a8SJose Abreu if (priv->extend_desc) 2130579a25a8SJose Abreu tx_q->dma_etx = addr; 2131579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2132579a25a8SJose Abreu tx_q->dma_entx = addr; 2133579a25a8SJose Abreu else 2134579a25a8SJose Abreu tx_q->dma_tx = addr; 2135da5ec7f2SOng Boon Leong 2136da5ec7f2SOng Boon Leong return 0; 2137da5ec7f2SOng Boon Leong } 2138da5ec7f2SOng Boon Leong 2139ba39b344SChristian Marangi static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2140ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 2141da5ec7f2SOng Boon Leong { 2142da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 2143da5ec7f2SOng Boon Leong u32 queue; 2144da5ec7f2SOng Boon Leong int ret; 2145da5ec7f2SOng Boon Leong 2146da5ec7f2SOng Boon Leong /* TX queues buffers and DMA */ 2147da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) { 2148ba39b344SChristian Marangi ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2149da5ec7f2SOng Boon Leong if (ret) 2150da5ec7f2SOng Boon Leong goto err_dma; 21515bacd778SLABBE Corentin } 21525bacd778SLABBE Corentin 21535bacd778SLABBE Corentin return 0; 21545bacd778SLABBE Corentin 215562242260SChristophe Jaillet err_dma: 2156ba39b344SChristian Marangi free_dma_tx_desc_resources(priv, dma_conf); 215709f8d696SSrinivas Kandagatla return ret; 21585bacd778SLABBE Corentin } 215909f8d696SSrinivas Kandagatla 216071fedb01SJoao Pinto /** 216171fedb01SJoao Pinto * alloc_dma_desc_resources - alloc TX/RX resources. 216271fedb01SJoao Pinto * @priv: private structure 2163ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 216471fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 216571fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 216671fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 216771fedb01SJoao Pinto * allow zero-copy mechanism. 216871fedb01SJoao Pinto */ 2169ba39b344SChristian Marangi static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2170ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 21715bacd778SLABBE Corentin { 217254139cf3SJoao Pinto /* RX Allocation */ 2173ba39b344SChristian Marangi int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 217471fedb01SJoao Pinto 217571fedb01SJoao Pinto if (ret) 217671fedb01SJoao Pinto return ret; 217771fedb01SJoao Pinto 2178ba39b344SChristian Marangi ret = alloc_dma_tx_desc_resources(priv, dma_conf); 217971fedb01SJoao Pinto 218071fedb01SJoao Pinto return ret; 218171fedb01SJoao Pinto } 218271fedb01SJoao Pinto 218371fedb01SJoao Pinto /** 218471fedb01SJoao Pinto * free_dma_desc_resources - free dma desc resources 218571fedb01SJoao Pinto * @priv: private structure 2186ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 218771fedb01SJoao Pinto */ 2188ba39b344SChristian Marangi static void free_dma_desc_resources(struct stmmac_priv *priv, 2189ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 219071fedb01SJoao Pinto { 219171fedb01SJoao Pinto /* Release the DMA TX socket buffers */ 2192ba39b344SChristian Marangi free_dma_tx_desc_resources(priv, dma_conf); 2193be8b38a7SOng Boon Leong 2194be8b38a7SOng Boon Leong /* Release the DMA RX socket buffers later 2195be8b38a7SOng Boon Leong * to ensure all pending XDP_TX buffers are returned. 2196be8b38a7SOng Boon Leong */ 2197ba39b344SChristian Marangi free_dma_rx_desc_resources(priv, dma_conf); 219871fedb01SJoao Pinto } 219971fedb01SJoao Pinto 220071fedb01SJoao Pinto /** 22019eb12474Sjpinto * stmmac_mac_enable_rx_queues - Enable MAC rx queues 22029eb12474Sjpinto * @priv: driver private structure 22039eb12474Sjpinto * Description: It is used for enabling the rx queues in the MAC 22049eb12474Sjpinto */ 22059eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 22069eb12474Sjpinto { 22074f6046f5SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 22084f6046f5SJoao Pinto int queue; 22094f6046f5SJoao Pinto u8 mode; 22109eb12474Sjpinto 22114f6046f5SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 22124f6046f5SJoao Pinto mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2213c10d4c82SJose Abreu stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 22144f6046f5SJoao Pinto } 22159eb12474Sjpinto } 22169eb12474Sjpinto 22179eb12474Sjpinto /** 2218ae4f0d46SJoao Pinto * stmmac_start_rx_dma - start RX DMA channel 2219ae4f0d46SJoao Pinto * @priv: driver private structure 2220ae4f0d46SJoao Pinto * @chan: RX channel index 2221ae4f0d46SJoao Pinto * Description: 2222ae4f0d46SJoao Pinto * This starts a RX DMA channel 2223ae4f0d46SJoao Pinto */ 2224ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2225ae4f0d46SJoao Pinto { 2226ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2227a4e887faSJose Abreu stmmac_start_rx(priv, priv->ioaddr, chan); 2228ae4f0d46SJoao Pinto } 2229ae4f0d46SJoao Pinto 2230ae4f0d46SJoao Pinto /** 2231ae4f0d46SJoao Pinto * stmmac_start_tx_dma - start TX DMA channel 2232ae4f0d46SJoao Pinto * @priv: driver private structure 2233ae4f0d46SJoao Pinto * @chan: TX channel index 2234ae4f0d46SJoao Pinto * Description: 2235ae4f0d46SJoao Pinto * This starts a TX DMA channel 2236ae4f0d46SJoao Pinto */ 2237ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2238ae4f0d46SJoao Pinto { 2239ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2240a4e887faSJose Abreu stmmac_start_tx(priv, priv->ioaddr, chan); 2241ae4f0d46SJoao Pinto } 2242ae4f0d46SJoao Pinto 2243ae4f0d46SJoao Pinto /** 2244ae4f0d46SJoao Pinto * stmmac_stop_rx_dma - stop RX DMA channel 2245ae4f0d46SJoao Pinto * @priv: driver private structure 2246ae4f0d46SJoao Pinto * @chan: RX channel index 2247ae4f0d46SJoao Pinto * Description: 2248ae4f0d46SJoao Pinto * This stops a RX DMA channel 2249ae4f0d46SJoao Pinto */ 2250ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2251ae4f0d46SJoao Pinto { 2252ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2253a4e887faSJose Abreu stmmac_stop_rx(priv, priv->ioaddr, chan); 2254ae4f0d46SJoao Pinto } 2255ae4f0d46SJoao Pinto 2256ae4f0d46SJoao Pinto /** 2257ae4f0d46SJoao Pinto * stmmac_stop_tx_dma - stop TX DMA channel 2258ae4f0d46SJoao Pinto * @priv: driver private structure 2259ae4f0d46SJoao Pinto * @chan: TX channel index 2260ae4f0d46SJoao Pinto * Description: 2261ae4f0d46SJoao Pinto * This stops a TX DMA channel 2262ae4f0d46SJoao Pinto */ 2263ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2264ae4f0d46SJoao Pinto { 2265ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2266a4e887faSJose Abreu stmmac_stop_tx(priv, priv->ioaddr, chan); 2267ae4f0d46SJoao Pinto } 2268ae4f0d46SJoao Pinto 2269087a7b94SVincent Whitchurch static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2270087a7b94SVincent Whitchurch { 2271087a7b94SVincent Whitchurch u32 rx_channels_count = priv->plat->rx_queues_to_use; 2272087a7b94SVincent Whitchurch u32 tx_channels_count = priv->plat->tx_queues_to_use; 2273087a7b94SVincent Whitchurch u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2274087a7b94SVincent Whitchurch u32 chan; 2275087a7b94SVincent Whitchurch 2276087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 2277087a7b94SVincent Whitchurch struct stmmac_channel *ch = &priv->channel[chan]; 2278087a7b94SVincent Whitchurch unsigned long flags; 2279087a7b94SVincent Whitchurch 2280087a7b94SVincent Whitchurch spin_lock_irqsave(&ch->lock, flags); 2281087a7b94SVincent Whitchurch stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2282087a7b94SVincent Whitchurch spin_unlock_irqrestore(&ch->lock, flags); 2283087a7b94SVincent Whitchurch } 2284087a7b94SVincent Whitchurch } 2285087a7b94SVincent Whitchurch 2286ae4f0d46SJoao Pinto /** 2287ae4f0d46SJoao Pinto * stmmac_start_all_dma - start all RX and TX DMA channels 2288ae4f0d46SJoao Pinto * @priv: driver private structure 2289ae4f0d46SJoao Pinto * Description: 2290ae4f0d46SJoao Pinto * This starts all the RX and TX DMA channels 2291ae4f0d46SJoao Pinto */ 2292ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv) 2293ae4f0d46SJoao Pinto { 2294ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2295ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2296ae4f0d46SJoao Pinto u32 chan = 0; 2297ae4f0d46SJoao Pinto 2298ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2299ae4f0d46SJoao Pinto stmmac_start_rx_dma(priv, chan); 2300ae4f0d46SJoao Pinto 2301ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2302ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 2303ae4f0d46SJoao Pinto } 2304ae4f0d46SJoao Pinto 2305ae4f0d46SJoao Pinto /** 2306ae4f0d46SJoao Pinto * stmmac_stop_all_dma - stop all RX and TX DMA channels 2307ae4f0d46SJoao Pinto * @priv: driver private structure 2308ae4f0d46SJoao Pinto * Description: 2309ae4f0d46SJoao Pinto * This stops the RX and TX DMA channels 2310ae4f0d46SJoao Pinto */ 2311ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2312ae4f0d46SJoao Pinto { 2313ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2314ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2315ae4f0d46SJoao Pinto u32 chan = 0; 2316ae4f0d46SJoao Pinto 2317ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2318ae4f0d46SJoao Pinto stmmac_stop_rx_dma(priv, chan); 2319ae4f0d46SJoao Pinto 2320ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2321ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2322ae4f0d46SJoao Pinto } 2323ae4f0d46SJoao Pinto 2324ae4f0d46SJoao Pinto /** 23257ac6653aSJeff Kirsher * stmmac_dma_operation_mode - HW DMA operation mode 232632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2327732fdf0eSGiuseppe CAVALLARO * Description: it is used for configuring the DMA operation mode register in 2328732fdf0eSGiuseppe CAVALLARO * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 23297ac6653aSJeff Kirsher */ 23307ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 23317ac6653aSJeff Kirsher { 23326deee222SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 23336deee222SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2334f88203a2SVince Bridgers int rxfifosz = priv->plat->rx_fifo_size; 233552a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 23366deee222SJoao Pinto u32 txmode = 0; 23376deee222SJoao Pinto u32 rxmode = 0; 23386deee222SJoao Pinto u32 chan = 0; 2339a0daae13SJose Abreu u8 qmode = 0; 2340f88203a2SVince Bridgers 234111fbf811SThierry Reding if (rxfifosz == 0) 234211fbf811SThierry Reding rxfifosz = priv->dma_cap.rx_fifo_size; 234352a76235SJose Abreu if (txfifosz == 0) 234452a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 234552a76235SJose Abreu 234652a76235SJose Abreu /* Adjust for real per queue fifo size */ 234752a76235SJose Abreu rxfifosz /= rx_channels_count; 234852a76235SJose Abreu txfifosz /= tx_channels_count; 234911fbf811SThierry Reding 23506deee222SJoao Pinto if (priv->plat->force_thresh_dma_mode) { 23516deee222SJoao Pinto txmode = tc; 23526deee222SJoao Pinto rxmode = tc; 23536deee222SJoao Pinto } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 23547ac6653aSJeff Kirsher /* 23557ac6653aSJeff Kirsher * In case of GMAC, SF mode can be enabled 23567ac6653aSJeff Kirsher * to perform the TX COE in HW. This depends on: 23577ac6653aSJeff Kirsher * 1) TX COE if actually supported 23587ac6653aSJeff Kirsher * 2) There is no bugged Jumbo frame support 23597ac6653aSJeff Kirsher * that needs to not insert csum in the TDES. 23607ac6653aSJeff Kirsher */ 23616deee222SJoao Pinto txmode = SF_DMA_MODE; 23626deee222SJoao Pinto rxmode = SF_DMA_MODE; 2363b2dec116SSonic Zhang priv->xstats.threshold = SF_DMA_MODE; 23646deee222SJoao Pinto } else { 23656deee222SJoao Pinto txmode = tc; 23666deee222SJoao Pinto rxmode = SF_DMA_MODE; 23676deee222SJoao Pinto } 23686deee222SJoao Pinto 23696deee222SJoao Pinto /* configure all channels */ 2370a0daae13SJose Abreu for (chan = 0; chan < rx_channels_count; chan++) { 23718531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2372bba2556eSOng Boon Leong u32 buf_size; 2373bba2556eSOng Boon Leong 2374a0daae13SJose Abreu qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 23756deee222SJoao Pinto 2376a4e887faSJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2377a0daae13SJose Abreu rxfifosz, qmode); 2378bba2556eSOng Boon Leong 2379bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 2380bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2381bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2382bba2556eSOng Boon Leong buf_size, 23834205c88eSJose Abreu chan); 2384bba2556eSOng Boon Leong } else { 2385bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 23868531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 2387bba2556eSOng Boon Leong chan); 2388bba2556eSOng Boon Leong } 2389a0daae13SJose Abreu } 2390a0daae13SJose Abreu 2391a0daae13SJose Abreu for (chan = 0; chan < tx_channels_count; chan++) { 2392a0daae13SJose Abreu qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2393a0daae13SJose Abreu 2394a4e887faSJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2395a0daae13SJose Abreu txfifosz, qmode); 2396a0daae13SJose Abreu } 23977ac6653aSJeff Kirsher } 23987ac6653aSJeff Kirsher 2399132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2400132c32eeSOng Boon Leong { 2401132c32eeSOng Boon Leong struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 24028531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2403132c32eeSOng Boon Leong struct xsk_buff_pool *pool = tx_q->xsk_pool; 2404132c32eeSOng Boon Leong unsigned int entry = tx_q->cur_tx; 2405132c32eeSOng Boon Leong struct dma_desc *tx_desc = NULL; 2406132c32eeSOng Boon Leong struct xdp_desc xdp_desc; 2407132c32eeSOng Boon Leong bool work_done = true; 2408132c32eeSOng Boon Leong 2409132c32eeSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 2410e92af33eSAlexander Lobakin txq_trans_cond_update(nq); 2411132c32eeSOng Boon Leong 2412132c32eeSOng Boon Leong budget = min(budget, stmmac_tx_avail(priv, queue)); 2413132c32eeSOng Boon Leong 2414132c32eeSOng Boon Leong while (budget-- > 0) { 2415132c32eeSOng Boon Leong dma_addr_t dma_addr; 2416132c32eeSOng Boon Leong bool set_ic; 2417132c32eeSOng Boon Leong 2418132c32eeSOng Boon Leong /* We are sharing with slow path and stop XSK TX desc submission when 2419132c32eeSOng Boon Leong * available TX ring is less than threshold. 2420132c32eeSOng Boon Leong */ 2421132c32eeSOng Boon Leong if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2422132c32eeSOng Boon Leong !netif_carrier_ok(priv->dev)) { 2423132c32eeSOng Boon Leong work_done = false; 2424132c32eeSOng Boon Leong break; 2425132c32eeSOng Boon Leong } 2426132c32eeSOng Boon Leong 2427132c32eeSOng Boon Leong if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2428132c32eeSOng Boon Leong break; 2429132c32eeSOng Boon Leong 2430132c32eeSOng Boon Leong if (likely(priv->extend_desc)) 2431132c32eeSOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2432132c32eeSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2433132c32eeSOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 2434132c32eeSOng Boon Leong else 2435132c32eeSOng Boon Leong tx_desc = tx_q->dma_tx + entry; 2436132c32eeSOng Boon Leong 2437132c32eeSOng Boon Leong dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2438132c32eeSOng Boon Leong xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2439132c32eeSOng Boon Leong 2440132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2441132c32eeSOng Boon Leong 2442132c32eeSOng Boon Leong /* To return XDP buffer to XSK pool, we simple call 2443132c32eeSOng Boon Leong * xsk_tx_completed(), so we don't need to fill up 2444132c32eeSOng Boon Leong * 'buf' and 'xdpf'. 2445132c32eeSOng Boon Leong */ 2446132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = 0; 2447132c32eeSOng Boon Leong tx_q->xdpf[entry] = NULL; 2448132c32eeSOng Boon Leong 2449132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 2450132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2451132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 2452132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2453132c32eeSOng Boon Leong 2454132c32eeSOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2455132c32eeSOng Boon Leong 2456132c32eeSOng Boon Leong tx_q->tx_count_frames++; 2457132c32eeSOng Boon Leong 2458132c32eeSOng Boon Leong if (!priv->tx_coal_frames[queue]) 2459132c32eeSOng Boon Leong set_ic = false; 2460132c32eeSOng Boon Leong else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2461132c32eeSOng Boon Leong set_ic = true; 2462132c32eeSOng Boon Leong else 2463132c32eeSOng Boon Leong set_ic = false; 2464132c32eeSOng Boon Leong 2465132c32eeSOng Boon Leong if (set_ic) { 2466132c32eeSOng Boon Leong tx_q->tx_count_frames = 0; 2467132c32eeSOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 2468132c32eeSOng Boon Leong priv->xstats.tx_set_ic_bit++; 2469132c32eeSOng Boon Leong } 2470132c32eeSOng Boon Leong 2471132c32eeSOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2472132c32eeSOng Boon Leong true, priv->mode, true, true, 2473132c32eeSOng Boon Leong xdp_desc.len); 2474132c32eeSOng Boon Leong 2475132c32eeSOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 2476132c32eeSOng Boon Leong 24778531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2478132c32eeSOng Boon Leong entry = tx_q->cur_tx; 2479132c32eeSOng Boon Leong } 2480132c32eeSOng Boon Leong 2481132c32eeSOng Boon Leong if (tx_desc) { 2482132c32eeSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 2483132c32eeSOng Boon Leong xsk_tx_release(pool); 2484132c32eeSOng Boon Leong } 2485132c32eeSOng Boon Leong 2486132c32eeSOng Boon Leong /* Return true if all of the 3 conditions are met 2487132c32eeSOng Boon Leong * a) TX Budget is still available 2488132c32eeSOng Boon Leong * b) work_done = true when XSK TX desc peek is empty (no more 2489132c32eeSOng Boon Leong * pending XSK TX for transmission) 2490132c32eeSOng Boon Leong */ 2491132c32eeSOng Boon Leong return !!budget && work_done; 2492132c32eeSOng Boon Leong } 2493132c32eeSOng Boon Leong 24943a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 24953a6c12a0SXiaoliang Yang { 24963a6c12a0SXiaoliang Yang if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 24973a6c12a0SXiaoliang Yang tc += 64; 24983a6c12a0SXiaoliang Yang 24993a6c12a0SXiaoliang Yang if (priv->plat->force_thresh_dma_mode) 25003a6c12a0SXiaoliang Yang stmmac_set_dma_operation_mode(priv, tc, tc, chan); 25013a6c12a0SXiaoliang Yang else 25023a6c12a0SXiaoliang Yang stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 25033a6c12a0SXiaoliang Yang chan); 25043a6c12a0SXiaoliang Yang 25053a6c12a0SXiaoliang Yang priv->xstats.threshold = tc; 25063a6c12a0SXiaoliang Yang } 25073a6c12a0SXiaoliang Yang } 25083a6c12a0SXiaoliang Yang 25097ac6653aSJeff Kirsher /** 2510732fdf0eSGiuseppe CAVALLARO * stmmac_tx_clean - to manage the transmission completion 251132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2512d0ea5cbdSJesse Brandeburg * @budget: napi budget limiting this functions packet handling 2513ce736788SJoao Pinto * @queue: TX queue index 2514732fdf0eSGiuseppe CAVALLARO * Description: it reclaims the transmit resources after transmission completes. 25157ac6653aSJeff Kirsher */ 25168fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 25177ac6653aSJeff Kirsher { 25188531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 251938979574SBeniamino Galvani unsigned int bytes_compl = 0, pkts_compl = 0; 2520132c32eeSOng Boon Leong unsigned int entry, xmits = 0, count = 0; 25217ac6653aSJeff Kirsher 25228fce3331SJose Abreu __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2523a9097a96SGiuseppe CAVALLARO 25249125cdd1SGiuseppe CAVALLARO priv->xstats.tx_clean++; 25259125cdd1SGiuseppe CAVALLARO 2526132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 2527132c32eeSOng Boon Leong 25288d5f4b07SBernd Edlinger entry = tx_q->dirty_tx; 2529132c32eeSOng Boon Leong 2530132c32eeSOng Boon Leong /* Try to clean all TX complete frame in 1 shot */ 25318531c808SChristian Marangi while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2532be8b38a7SOng Boon Leong struct xdp_frame *xdpf; 2533be8b38a7SOng Boon Leong struct sk_buff *skb; 2534c24602efSGiuseppe CAVALLARO struct dma_desc *p; 2535c363b658SFabrice Gasnier int status; 2536c24602efSGiuseppe CAVALLARO 25378b278a5bSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 25388b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2539be8b38a7SOng Boon Leong xdpf = tx_q->xdpf[entry]; 2540be8b38a7SOng Boon Leong skb = NULL; 2541be8b38a7SOng Boon Leong } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2542be8b38a7SOng Boon Leong xdpf = NULL; 2543be8b38a7SOng Boon Leong skb = tx_q->tx_skbuff[entry]; 2544be8b38a7SOng Boon Leong } else { 2545be8b38a7SOng Boon Leong xdpf = NULL; 2546be8b38a7SOng Boon Leong skb = NULL; 2547be8b38a7SOng Boon Leong } 2548be8b38a7SOng Boon Leong 2549c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 2550ce736788SJoao Pinto p = (struct dma_desc *)(tx_q->dma_etx + entry); 2551579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2552579a25a8SJose Abreu p = &tx_q->dma_entx[entry].basic; 2553c24602efSGiuseppe CAVALLARO else 2554ce736788SJoao Pinto p = tx_q->dma_tx + entry; 25557ac6653aSJeff Kirsher 255642de047dSJose Abreu status = stmmac_tx_status(priv, &priv->dev->stats, 255742de047dSJose Abreu &priv->xstats, p, priv->ioaddr); 2558c363b658SFabrice Gasnier /* Check if the descriptor is owned by the DMA */ 2559c363b658SFabrice Gasnier if (unlikely(status & tx_dma_own)) 2560c363b658SFabrice Gasnier break; 2561c363b658SFabrice Gasnier 25628fce3331SJose Abreu count++; 25638fce3331SJose Abreu 2564a6b25da5SNiklas Cassel /* Make sure descriptor fields are read after reading 2565a6b25da5SNiklas Cassel * the own bit. 2566a6b25da5SNiklas Cassel */ 2567a6b25da5SNiklas Cassel dma_rmb(); 2568a6b25da5SNiklas Cassel 2569c363b658SFabrice Gasnier /* Just consider the last segment and ...*/ 2570c363b658SFabrice Gasnier if (likely(!(status & tx_not_ls))) { 2571c363b658SFabrice Gasnier /* ... verify the status error condition */ 2572c363b658SFabrice Gasnier if (unlikely(status & tx_err)) { 2573c363b658SFabrice Gasnier priv->dev->stats.tx_errors++; 25743a6c12a0SXiaoliang Yang if (unlikely(status & tx_err_bump_tc)) 25753a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, queue); 2576c363b658SFabrice Gasnier } else { 25777ac6653aSJeff Kirsher priv->dev->stats.tx_packets++; 25787ac6653aSJeff Kirsher priv->xstats.tx_pkt_n++; 257968e9c5deSVijayakannan Ayyathurai priv->xstats.txq_stats[queue].tx_pkt_n++; 2580c363b658SFabrice Gasnier } 2581be8b38a7SOng Boon Leong if (skb) 2582ba1ffd74SGiuseppe CAVALLARO stmmac_get_tx_hwtstamp(priv, p, skb); 25837ac6653aSJeff Kirsher } 25847ac6653aSJeff Kirsher 2585be8b38a7SOng Boon Leong if (likely(tx_q->tx_skbuff_dma[entry].buf && 2586be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2587ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[entry].map_as_page) 2588362b37beSGiuseppe CAVALLARO dma_unmap_page(priv->device, 2589ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2590ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 25917ac6653aSJeff Kirsher DMA_TO_DEVICE); 2592362b37beSGiuseppe CAVALLARO else 2593362b37beSGiuseppe CAVALLARO dma_unmap_single(priv->device, 2594ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2595ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 2596362b37beSGiuseppe CAVALLARO DMA_TO_DEVICE); 2597ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = 0; 2598ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = 0; 2599ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = false; 2600cf32deecSRayagond Kokatanur } 2601f748be53SAlexandre TORGUE 26022c520b1cSJose Abreu stmmac_clean_desc3(priv, tx_q, p); 2603f748be53SAlexandre TORGUE 2604ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = false; 2605ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].is_jumbo = false; 26067ac6653aSJeff Kirsher 2607be8b38a7SOng Boon Leong if (xdpf && 2608be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2609be8b38a7SOng Boon Leong xdp_return_frame_rx_napi(xdpf); 2610be8b38a7SOng Boon Leong tx_q->xdpf[entry] = NULL; 2611be8b38a7SOng Boon Leong } 2612be8b38a7SOng Boon Leong 26138b278a5bSOng Boon Leong if (xdpf && 26148b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 26158b278a5bSOng Boon Leong xdp_return_frame(xdpf); 26168b278a5bSOng Boon Leong tx_q->xdpf[entry] = NULL; 26178b278a5bSOng Boon Leong } 26188b278a5bSOng Boon Leong 2619132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2620132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 2621132c32eeSOng Boon Leong 2622be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2623be8b38a7SOng Boon Leong if (likely(skb)) { 262438979574SBeniamino Galvani pkts_compl++; 262538979574SBeniamino Galvani bytes_compl += skb->len; 26267c565c33SEric W. Biederman dev_consume_skb_any(skb); 2627ce736788SJoao Pinto tx_q->tx_skbuff[entry] = NULL; 26287ac6653aSJeff Kirsher } 2629be8b38a7SOng Boon Leong } 26307ac6653aSJeff Kirsher 263142de047dSJose Abreu stmmac_release_tx_desc(priv, p, priv->mode); 26327ac6653aSJeff Kirsher 26338531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 26347ac6653aSJeff Kirsher } 2635ce736788SJoao Pinto tx_q->dirty_tx = entry; 263638979574SBeniamino Galvani 2637c22a3f48SJoao Pinto netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2638c22a3f48SJoao Pinto pkts_compl, bytes_compl); 263938979574SBeniamino Galvani 2640c22a3f48SJoao Pinto if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2641c22a3f48SJoao Pinto queue))) && 2642aa042f60SSong, Yoong Siang stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2643c22a3f48SJoao Pinto 2644b3e51069SLABBE Corentin netif_dbg(priv, tx_done, priv->dev, 2645b3e51069SLABBE Corentin "%s: restart transmit\n", __func__); 2646c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 26477ac6653aSJeff Kirsher } 2648d765955dSGiuseppe CAVALLARO 2649132c32eeSOng Boon Leong if (tx_q->xsk_pool) { 2650132c32eeSOng Boon Leong bool work_done; 2651132c32eeSOng Boon Leong 2652132c32eeSOng Boon Leong if (tx_q->xsk_frames_done) 2653132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2654132c32eeSOng Boon Leong 2655132c32eeSOng Boon Leong if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2656132c32eeSOng Boon Leong xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2657132c32eeSOng Boon Leong 2658132c32eeSOng Boon Leong /* For XSK TX, we try to send as many as possible. 2659132c32eeSOng Boon Leong * If XSK work done (XSK TX desc empty and budget still 2660132c32eeSOng Boon Leong * available), return "budget - 1" to reenable TX IRQ. 2661132c32eeSOng Boon Leong * Else, return "budget" to make NAPI continue polling. 2662132c32eeSOng Boon Leong */ 2663132c32eeSOng Boon Leong work_done = stmmac_xdp_xmit_zc(priv, queue, 2664132c32eeSOng Boon Leong STMMAC_XSK_TX_BUDGET_MAX); 2665132c32eeSOng Boon Leong if (work_done) 2666132c32eeSOng Boon Leong xmits = budget - 1; 2667132c32eeSOng Boon Leong else 2668132c32eeSOng Boon Leong xmits = budget; 2669132c32eeSOng Boon Leong } 2670132c32eeSOng Boon Leong 2671be1c7eaeSVineetha G. Jaya Kumaran if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2672be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en) { 2673c74ead22SJisheng Zhang if (stmmac_enable_eee_mode(priv)) 2674388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2675d765955dSGiuseppe CAVALLARO } 26768fce3331SJose Abreu 26774ccb4585SJose Abreu /* We still have pending packets, let's call for a new scheduling */ 26784ccb4585SJose Abreu if (tx_q->dirty_tx != tx_q->cur_tx) 2679db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2680db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2681d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 26824ccb4585SJose Abreu 26838fce3331SJose Abreu __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 26848fce3331SJose Abreu 2685132c32eeSOng Boon Leong /* Combine decisions from TX clean and XSK TX */ 2686132c32eeSOng Boon Leong return max(count, xmits); 26877ac6653aSJeff Kirsher } 26887ac6653aSJeff Kirsher 26897ac6653aSJeff Kirsher /** 2690732fdf0eSGiuseppe CAVALLARO * stmmac_tx_err - to manage the tx error 269132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 26925bacd778SLABBE Corentin * @chan: channel index 26937ac6653aSJeff Kirsher * Description: it cleans the descriptors and restarts the transmission 2694732fdf0eSGiuseppe CAVALLARO * in case of transmission errors. 26957ac6653aSJeff Kirsher */ 26965bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 26977ac6653aSJeff Kirsher { 26988531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2699ce736788SJoao Pinto 2700c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 27017ac6653aSJeff Kirsher 2702ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2703ba39b344SChristian Marangi dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2704ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2705f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, chan); 2706f421031eSJongsung Kim stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2707f421031eSJongsung Kim tx_q->dma_tx_phy, chan); 2708ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 27097ac6653aSJeff Kirsher 27107ac6653aSJeff Kirsher priv->dev->stats.tx_errors++; 2711c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 27127ac6653aSJeff Kirsher } 27137ac6653aSJeff Kirsher 271432ceabcaSGiuseppe CAVALLARO /** 27156deee222SJoao Pinto * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 27166deee222SJoao Pinto * @priv: driver private structure 27176deee222SJoao Pinto * @txmode: TX operating mode 27186deee222SJoao Pinto * @rxmode: RX operating mode 27196deee222SJoao Pinto * @chan: channel index 27206deee222SJoao Pinto * Description: it is used for configuring of the DMA operation mode in 27216deee222SJoao Pinto * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 27226deee222SJoao Pinto * mode. 27236deee222SJoao Pinto */ 27246deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 27256deee222SJoao Pinto u32 rxmode, u32 chan) 27266deee222SJoao Pinto { 2727a0daae13SJose Abreu u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2728a0daae13SJose Abreu u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 272952a76235SJose Abreu u32 rx_channels_count = priv->plat->rx_queues_to_use; 273052a76235SJose Abreu u32 tx_channels_count = priv->plat->tx_queues_to_use; 27316deee222SJoao Pinto int rxfifosz = priv->plat->rx_fifo_size; 273252a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 27336deee222SJoao Pinto 27346deee222SJoao Pinto if (rxfifosz == 0) 27356deee222SJoao Pinto rxfifosz = priv->dma_cap.rx_fifo_size; 273652a76235SJose Abreu if (txfifosz == 0) 273752a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 273852a76235SJose Abreu 273952a76235SJose Abreu /* Adjust for real per queue fifo size */ 274052a76235SJose Abreu rxfifosz /= rx_channels_count; 274152a76235SJose Abreu txfifosz /= tx_channels_count; 27426deee222SJoao Pinto 2743ab0204e3SJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2744ab0204e3SJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 27456deee222SJoao Pinto } 27466deee222SJoao Pinto 27478bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 27488bf993a5SJose Abreu { 274963a550fcSJose Abreu int ret; 27508bf993a5SJose Abreu 2751c10d4c82SJose Abreu ret = stmmac_safety_feat_irq_status(priv, priv->dev, 27528bf993a5SJose Abreu priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2753c10d4c82SJose Abreu if (ret && (ret != -EINVAL)) { 27548bf993a5SJose Abreu stmmac_global_err(priv); 2755c10d4c82SJose Abreu return true; 2756c10d4c82SJose Abreu } 2757c10d4c82SJose Abreu 2758c10d4c82SJose Abreu return false; 27598bf993a5SJose Abreu } 27608bf993a5SJose Abreu 27617e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 27628fce3331SJose Abreu { 27638fce3331SJose Abreu int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 27647e1c520cSOng Boon Leong &priv->xstats, chan, dir); 27658531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 27668531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 27678fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[chan]; 2768132c32eeSOng Boon Leong struct napi_struct *rx_napi; 2769132c32eeSOng Boon Leong struct napi_struct *tx_napi; 2770021bd5e3SJose Abreu unsigned long flags; 27718fce3331SJose Abreu 2772132c32eeSOng Boon Leong rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2773132c32eeSOng Boon Leong tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2774132c32eeSOng Boon Leong 27754ccb4585SJose Abreu if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2776132c32eeSOng Boon Leong if (napi_schedule_prep(rx_napi)) { 2777021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2778021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2779021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2780132c32eeSOng Boon Leong __napi_schedule(rx_napi); 27813ba07debSJose Abreu } 27824ccb4585SJose Abreu } 27834ccb4585SJose Abreu 2784021bd5e3SJose Abreu if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2785132c32eeSOng Boon Leong if (napi_schedule_prep(tx_napi)) { 2786021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2787021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2788021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2789132c32eeSOng Boon Leong __napi_schedule(tx_napi); 2790021bd5e3SJose Abreu } 2791021bd5e3SJose Abreu } 27928fce3331SJose Abreu 27938fce3331SJose Abreu return status; 27948fce3331SJose Abreu } 27958fce3331SJose Abreu 27966deee222SJoao Pinto /** 2797732fdf0eSGiuseppe CAVALLARO * stmmac_dma_interrupt - DMA ISR 279832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 279932ceabcaSGiuseppe CAVALLARO * Description: this is the DMA ISR. It is called by the main ISR. 2800732fdf0eSGiuseppe CAVALLARO * It calls the dwmac dma routine and schedule poll method in case of some 2801732fdf0eSGiuseppe CAVALLARO * work can be done. 280232ceabcaSGiuseppe CAVALLARO */ 28037ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv) 28047ac6653aSJeff Kirsher { 2805d62a107aSJoao Pinto u32 tx_channel_count = priv->plat->tx_queues_to_use; 28065a6a0445SNiklas Cassel u32 rx_channel_count = priv->plat->rx_queues_to_use; 28075a6a0445SNiklas Cassel u32 channels_to_check = tx_channel_count > rx_channel_count ? 28085a6a0445SNiklas Cassel tx_channel_count : rx_channel_count; 2809d62a107aSJoao Pinto u32 chan; 28108ac60ffbSKees Cook int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 28118ac60ffbSKees Cook 28128ac60ffbSKees Cook /* Make sure we never check beyond our status buffer. */ 28138ac60ffbSKees Cook if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 28148ac60ffbSKees Cook channels_to_check = ARRAY_SIZE(status); 281568e5cfafSJoao Pinto 28165a6a0445SNiklas Cassel for (chan = 0; chan < channels_to_check; chan++) 28177e1c520cSOng Boon Leong status[chan] = stmmac_napi_check(priv, chan, 28187e1c520cSOng Boon Leong DMA_DIR_RXTX); 2819d62a107aSJoao Pinto 28205a6a0445SNiklas Cassel for (chan = 0; chan < tx_channel_count; chan++) { 28215a6a0445SNiklas Cassel if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 28227ac6653aSJeff Kirsher /* Try to bump up the dma threshold on this failure */ 28233a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, chan); 28245a6a0445SNiklas Cassel } else if (unlikely(status[chan] == tx_hard_error)) { 28254e593262SJoao Pinto stmmac_tx_err(priv, chan); 28267ac6653aSJeff Kirsher } 2827d62a107aSJoao Pinto } 2828d62a107aSJoao Pinto } 28297ac6653aSJeff Kirsher 283032ceabcaSGiuseppe CAVALLARO /** 283132ceabcaSGiuseppe CAVALLARO * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 283232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 283332ceabcaSGiuseppe CAVALLARO * Description: this masks the MMC irq, in fact, the counters are managed in SW. 283432ceabcaSGiuseppe CAVALLARO */ 28351c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv) 28361c901a46SGiuseppe CAVALLARO { 28371c901a46SGiuseppe CAVALLARO unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 28381c901a46SGiuseppe CAVALLARO MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 28391c901a46SGiuseppe CAVALLARO 28403b1dd2c5SJose Abreu stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 28414f795b25SGiuseppe CAVALLARO 28424f795b25SGiuseppe CAVALLARO if (priv->dma_cap.rmon) { 28433b1dd2c5SJose Abreu stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 28441c901a46SGiuseppe CAVALLARO memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 28454f795b25SGiuseppe CAVALLARO } else 284638ddc59dSLABBE Corentin netdev_info(priv->dev, "No MAC Management Counters available\n"); 28471c901a46SGiuseppe CAVALLARO } 28481c901a46SGiuseppe CAVALLARO 2849732fdf0eSGiuseppe CAVALLARO /** 2850732fdf0eSGiuseppe CAVALLARO * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 285132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 285219e30c14SGiuseppe CAVALLARO * Description: 285319e30c14SGiuseppe CAVALLARO * new GMAC chip generations have a new register to indicate the 2854e7434821SGiuseppe CAVALLARO * presence of the optional feature/functions. 285519e30c14SGiuseppe CAVALLARO * This can be also used to override the value passed through the 285619e30c14SGiuseppe CAVALLARO * platform and necessary for old MAC10/100 and GMAC chips. 2857e7434821SGiuseppe CAVALLARO */ 2858e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv) 2859e7434821SGiuseppe CAVALLARO { 2860a4e887faSJose Abreu return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2861e7434821SGiuseppe CAVALLARO } 2862e7434821SGiuseppe CAVALLARO 286332ceabcaSGiuseppe CAVALLARO /** 2864732fdf0eSGiuseppe CAVALLARO * stmmac_check_ether_addr - check if the MAC addr is valid 286532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 286632ceabcaSGiuseppe CAVALLARO * Description: 286732ceabcaSGiuseppe CAVALLARO * it is to verify if the MAC address is valid, in case of failures it 286832ceabcaSGiuseppe CAVALLARO * generates a random MAC address 286932ceabcaSGiuseppe CAVALLARO */ 2870bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2871bfab27a1SGiuseppe CAVALLARO { 28727f9b8fe5SJakub Kicinski u8 addr[ETH_ALEN]; 28737f9b8fe5SJakub Kicinski 2874bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) { 28757f9b8fe5SJakub Kicinski stmmac_get_umac_addr(priv, priv->hw, addr, 0); 28767f9b8fe5SJakub Kicinski if (is_valid_ether_addr(addr)) 28777f9b8fe5SJakub Kicinski eth_hw_addr_set(priv->dev, addr); 28787f9b8fe5SJakub Kicinski else 2879f2cedb63SDanny Kukawka eth_hw_addr_random(priv->dev); 2880af649352SJisheng Zhang dev_info(priv->device, "device MAC address %pM\n", 2881bfab27a1SGiuseppe CAVALLARO priv->dev->dev_addr); 2882bfab27a1SGiuseppe CAVALLARO } 2883c88460b7SHans de Goede } 2884bfab27a1SGiuseppe CAVALLARO 288532ceabcaSGiuseppe CAVALLARO /** 2886732fdf0eSGiuseppe CAVALLARO * stmmac_init_dma_engine - DMA init. 288732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 288832ceabcaSGiuseppe CAVALLARO * Description: 288932ceabcaSGiuseppe CAVALLARO * It inits the DMA invoking the specific MAC/GMAC callback. 289032ceabcaSGiuseppe CAVALLARO * Some DMA parameters can be passed from the platform; 289132ceabcaSGiuseppe CAVALLARO * in case of these are not passed a default is kept for the MAC or GMAC. 289232ceabcaSGiuseppe CAVALLARO */ 28930f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv) 28940f1f88a8SGiuseppe CAVALLARO { 289547f2a9ceSJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 289647f2a9ceSJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 289724aaed0cSJose Abreu u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 289854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q; 2899ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 290047f2a9ceSJoao Pinto u32 chan = 0; 2901c24602efSGiuseppe CAVALLARO int atds = 0; 2902495db273SGiuseppe Cavallaro int ret = 0; 29030f1f88a8SGiuseppe CAVALLARO 2904a332e2faSNiklas Cassel if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2905a332e2faSNiklas Cassel dev_err(priv->device, "Invalid DMA configuration\n"); 290689ab75bfSNiklas Cassel return -EINVAL; 29070f1f88a8SGiuseppe CAVALLARO } 29080f1f88a8SGiuseppe CAVALLARO 2909c24602efSGiuseppe CAVALLARO if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2910c24602efSGiuseppe CAVALLARO atds = 1; 2911c24602efSGiuseppe CAVALLARO 2912a4e887faSJose Abreu ret = stmmac_reset(priv, priv->ioaddr); 2913495db273SGiuseppe Cavallaro if (ret) { 2914495db273SGiuseppe Cavallaro dev_err(priv->device, "Failed to reset the dma\n"); 2915495db273SGiuseppe Cavallaro return ret; 2916495db273SGiuseppe Cavallaro } 2917495db273SGiuseppe Cavallaro 29187d9e6c5aSJose Abreu /* DMA Configuration */ 29197d9e6c5aSJose Abreu stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 29207d9e6c5aSJose Abreu 29217d9e6c5aSJose Abreu if (priv->plat->axi) 29227d9e6c5aSJose Abreu stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 29237d9e6c5aSJose Abreu 2924af8f3fb7SWeifeng Voon /* DMA CSR Channel configuration */ 2925087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 2926af8f3fb7SWeifeng Voon stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2927087a7b94SVincent Whitchurch stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2928087a7b94SVincent Whitchurch } 2929af8f3fb7SWeifeng Voon 293047f2a9ceSJoao Pinto /* DMA RX Channel Configuration */ 293147f2a9ceSJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) { 29328531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[chan]; 293354139cf3SJoao Pinto 293424aaed0cSJose Abreu stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 293524aaed0cSJose Abreu rx_q->dma_rx_phy, chan); 293647f2a9ceSJoao Pinto 293754139cf3SJoao Pinto rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2938bba2556eSOng Boon Leong (rx_q->buf_alloc_num * 2939aa042f60SSong, Yoong Siang sizeof(struct dma_desc)); 2940a4e887faSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2941a4e887faSJose Abreu rx_q->rx_tail_addr, chan); 294247f2a9ceSJoao Pinto } 294347f2a9ceSJoao Pinto 294447f2a9ceSJoao Pinto /* DMA TX Channel Configuration */ 294547f2a9ceSJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) { 29468531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[chan]; 2947ce736788SJoao Pinto 294824aaed0cSJose Abreu stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 294924aaed0cSJose Abreu tx_q->dma_tx_phy, chan); 2950f748be53SAlexandre TORGUE 29510431100bSJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2952a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2953a4e887faSJose Abreu tx_q->tx_tail_addr, chan); 295447f2a9ceSJoao Pinto } 295524aaed0cSJose Abreu 2956495db273SGiuseppe Cavallaro return ret; 29570f1f88a8SGiuseppe CAVALLARO } 29580f1f88a8SGiuseppe CAVALLARO 29598fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 29608fce3331SJose Abreu { 29618531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 29628fce3331SJose Abreu 2963db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2964db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2965d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 29668fce3331SJose Abreu } 29678fce3331SJose Abreu 2968bfab27a1SGiuseppe CAVALLARO /** 2969732fdf0eSGiuseppe CAVALLARO * stmmac_tx_timer - mitigation sw timer for tx. 2970d0ea5cbdSJesse Brandeburg * @t: data pointer 29719125cdd1SGiuseppe CAVALLARO * Description: 29729125cdd1SGiuseppe CAVALLARO * This is the timer handler to directly invoke the stmmac_tx_clean. 29739125cdd1SGiuseppe CAVALLARO */ 2974d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 29759125cdd1SGiuseppe CAVALLARO { 2976d5a05e69SVincent Whitchurch struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 29778fce3331SJose Abreu struct stmmac_priv *priv = tx_q->priv_data; 29788fce3331SJose Abreu struct stmmac_channel *ch; 2979132c32eeSOng Boon Leong struct napi_struct *napi; 29809125cdd1SGiuseppe CAVALLARO 29818fce3331SJose Abreu ch = &priv->channel[tx_q->queue_index]; 2982132c32eeSOng Boon Leong napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 29838fce3331SJose Abreu 2984132c32eeSOng Boon Leong if (likely(napi_schedule_prep(napi))) { 2985021bd5e3SJose Abreu unsigned long flags; 2986021bd5e3SJose Abreu 2987021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2988021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2989021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2990132c32eeSOng Boon Leong __napi_schedule(napi); 2991021bd5e3SJose Abreu } 2992d5a05e69SVincent Whitchurch 2993d5a05e69SVincent Whitchurch return HRTIMER_NORESTART; 29949125cdd1SGiuseppe CAVALLARO } 29959125cdd1SGiuseppe CAVALLARO 29969125cdd1SGiuseppe CAVALLARO /** 2997d429b66eSJose Abreu * stmmac_init_coalesce - init mitigation options. 299832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29999125cdd1SGiuseppe CAVALLARO * Description: 3000d429b66eSJose Abreu * This inits the coalesce parameters: i.e. timer rate, 30019125cdd1SGiuseppe CAVALLARO * timer handler and default threshold used for enabling the 30029125cdd1SGiuseppe CAVALLARO * interrupt on completion bit. 30039125cdd1SGiuseppe CAVALLARO */ 3004d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv) 30059125cdd1SGiuseppe CAVALLARO { 30068fce3331SJose Abreu u32 tx_channel_count = priv->plat->tx_queues_to_use; 3007db2f2842SOng Boon Leong u32 rx_channel_count = priv->plat->rx_queues_to_use; 30088fce3331SJose Abreu u32 chan; 30098fce3331SJose Abreu 30108fce3331SJose Abreu for (chan = 0; chan < tx_channel_count; chan++) { 30118531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 30128fce3331SJose Abreu 3013db2f2842SOng Boon Leong priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3014db2f2842SOng Boon Leong priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3015db2f2842SOng Boon Leong 3016d5a05e69SVincent Whitchurch hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3017d5a05e69SVincent Whitchurch tx_q->txtimer.function = stmmac_tx_timer; 30188fce3331SJose Abreu } 3019db2f2842SOng Boon Leong 3020db2f2842SOng Boon Leong for (chan = 0; chan < rx_channel_count; chan++) 3021db2f2842SOng Boon Leong priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 30229125cdd1SGiuseppe CAVALLARO } 30239125cdd1SGiuseppe CAVALLARO 30244854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv) 30254854ab99SJoao Pinto { 30264854ab99SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 30274854ab99SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 30284854ab99SJoao Pinto u32 chan; 30294854ab99SJoao Pinto 30304854ab99SJoao Pinto /* set TX ring length */ 30314854ab99SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 3032a4e887faSJose Abreu stmmac_set_tx_ring_len(priv, priv->ioaddr, 30338531c808SChristian Marangi (priv->dma_conf.dma_tx_size - 1), chan); 30344854ab99SJoao Pinto 30354854ab99SJoao Pinto /* set RX ring length */ 30364854ab99SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 3037a4e887faSJose Abreu stmmac_set_rx_ring_len(priv, priv->ioaddr, 30388531c808SChristian Marangi (priv->dma_conf.dma_rx_size - 1), chan); 30394854ab99SJoao Pinto } 30404854ab99SJoao Pinto 30419125cdd1SGiuseppe CAVALLARO /** 30426a3a7193SJoao Pinto * stmmac_set_tx_queue_weight - Set TX queue weight 30436a3a7193SJoao Pinto * @priv: driver private structure 30446a3a7193SJoao Pinto * Description: It is used for setting TX queues weight 30456a3a7193SJoao Pinto */ 30466a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 30476a3a7193SJoao Pinto { 30486a3a7193SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 30496a3a7193SJoao Pinto u32 weight; 30506a3a7193SJoao Pinto u32 queue; 30516a3a7193SJoao Pinto 30526a3a7193SJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 30536a3a7193SJoao Pinto weight = priv->plat->tx_queues_cfg[queue].weight; 3054c10d4c82SJose Abreu stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 30556a3a7193SJoao Pinto } 30566a3a7193SJoao Pinto } 30576a3a7193SJoao Pinto 30586a3a7193SJoao Pinto /** 305919d91873SJoao Pinto * stmmac_configure_cbs - Configure CBS in TX queue 306019d91873SJoao Pinto * @priv: driver private structure 306119d91873SJoao Pinto * Description: It is used for configuring CBS in AVB TX queues 306219d91873SJoao Pinto */ 306319d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv) 306419d91873SJoao Pinto { 306519d91873SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 306619d91873SJoao Pinto u32 mode_to_use; 306719d91873SJoao Pinto u32 queue; 306819d91873SJoao Pinto 306944781fefSJoao Pinto /* queue 0 is reserved for legacy traffic */ 307044781fefSJoao Pinto for (queue = 1; queue < tx_queues_count; queue++) { 307119d91873SJoao Pinto mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 307219d91873SJoao Pinto if (mode_to_use == MTL_QUEUE_DCB) 307319d91873SJoao Pinto continue; 307419d91873SJoao Pinto 3075c10d4c82SJose Abreu stmmac_config_cbs(priv, priv->hw, 307619d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].send_slope, 307719d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].idle_slope, 307819d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].high_credit, 307919d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].low_credit, 308019d91873SJoao Pinto queue); 308119d91873SJoao Pinto } 308219d91873SJoao Pinto } 308319d91873SJoao Pinto 308419d91873SJoao Pinto /** 3085d43042f4SJoao Pinto * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3086d43042f4SJoao Pinto * @priv: driver private structure 3087d43042f4SJoao Pinto * Description: It is used for mapping RX queues to RX dma channels 3088d43042f4SJoao Pinto */ 3089d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3090d43042f4SJoao Pinto { 3091d43042f4SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3092d43042f4SJoao Pinto u32 queue; 3093d43042f4SJoao Pinto u32 chan; 3094d43042f4SJoao Pinto 3095d43042f4SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3096d43042f4SJoao Pinto chan = priv->plat->rx_queues_cfg[queue].chan; 3097c10d4c82SJose Abreu stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3098d43042f4SJoao Pinto } 3099d43042f4SJoao Pinto } 3100d43042f4SJoao Pinto 3101d43042f4SJoao Pinto /** 3102a8f5102aSJoao Pinto * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3103a8f5102aSJoao Pinto * @priv: driver private structure 3104a8f5102aSJoao Pinto * Description: It is used for configuring the RX Queue Priority 3105a8f5102aSJoao Pinto */ 3106a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3107a8f5102aSJoao Pinto { 3108a8f5102aSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3109a8f5102aSJoao Pinto u32 queue; 3110a8f5102aSJoao Pinto u32 prio; 3111a8f5102aSJoao Pinto 3112a8f5102aSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3113a8f5102aSJoao Pinto if (!priv->plat->rx_queues_cfg[queue].use_prio) 3114a8f5102aSJoao Pinto continue; 3115a8f5102aSJoao Pinto 3116a8f5102aSJoao Pinto prio = priv->plat->rx_queues_cfg[queue].prio; 3117c10d4c82SJose Abreu stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3118a8f5102aSJoao Pinto } 3119a8f5102aSJoao Pinto } 3120a8f5102aSJoao Pinto 3121a8f5102aSJoao Pinto /** 3122a8f5102aSJoao Pinto * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3123a8f5102aSJoao Pinto * @priv: driver private structure 3124a8f5102aSJoao Pinto * Description: It is used for configuring the TX Queue Priority 3125a8f5102aSJoao Pinto */ 3126a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3127a8f5102aSJoao Pinto { 3128a8f5102aSJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3129a8f5102aSJoao Pinto u32 queue; 3130a8f5102aSJoao Pinto u32 prio; 3131a8f5102aSJoao Pinto 3132a8f5102aSJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 3133a8f5102aSJoao Pinto if (!priv->plat->tx_queues_cfg[queue].use_prio) 3134a8f5102aSJoao Pinto continue; 3135a8f5102aSJoao Pinto 3136a8f5102aSJoao Pinto prio = priv->plat->tx_queues_cfg[queue].prio; 3137c10d4c82SJose Abreu stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3138a8f5102aSJoao Pinto } 3139a8f5102aSJoao Pinto } 3140a8f5102aSJoao Pinto 3141a8f5102aSJoao Pinto /** 3142abe80fdcSJoao Pinto * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3143abe80fdcSJoao Pinto * @priv: driver private structure 3144abe80fdcSJoao Pinto * Description: It is used for configuring the RX queue routing 3145abe80fdcSJoao Pinto */ 3146abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3147abe80fdcSJoao Pinto { 3148abe80fdcSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3149abe80fdcSJoao Pinto u32 queue; 3150abe80fdcSJoao Pinto u8 packet; 3151abe80fdcSJoao Pinto 3152abe80fdcSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3153abe80fdcSJoao Pinto /* no specific packet type routing specified for the queue */ 3154abe80fdcSJoao Pinto if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3155abe80fdcSJoao Pinto continue; 3156abe80fdcSJoao Pinto 3157abe80fdcSJoao Pinto packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3158c10d4c82SJose Abreu stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3159abe80fdcSJoao Pinto } 3160abe80fdcSJoao Pinto } 3161abe80fdcSJoao Pinto 316276067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv) 316376067459SJose Abreu { 316476067459SJose Abreu if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 316576067459SJose Abreu priv->rss.enable = false; 316676067459SJose Abreu return; 316776067459SJose Abreu } 316876067459SJose Abreu 316976067459SJose Abreu if (priv->dev->features & NETIF_F_RXHASH) 317076067459SJose Abreu priv->rss.enable = true; 317176067459SJose Abreu else 317276067459SJose Abreu priv->rss.enable = false; 317376067459SJose Abreu 317476067459SJose Abreu stmmac_rss_configure(priv, priv->hw, &priv->rss, 317576067459SJose Abreu priv->plat->rx_queues_to_use); 317676067459SJose Abreu } 317776067459SJose Abreu 3178abe80fdcSJoao Pinto /** 3179d0a9c9f9SJoao Pinto * stmmac_mtl_configuration - Configure MTL 3180d0a9c9f9SJoao Pinto * @priv: driver private structure 3181d0a9c9f9SJoao Pinto * Description: It is used for configurring MTL 3182d0a9c9f9SJoao Pinto */ 3183d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3184d0a9c9f9SJoao Pinto { 3185d0a9c9f9SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3186d0a9c9f9SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3187d0a9c9f9SJoao Pinto 3188c10d4c82SJose Abreu if (tx_queues_count > 1) 31896a3a7193SJoao Pinto stmmac_set_tx_queue_weight(priv); 31906a3a7193SJoao Pinto 3191d0a9c9f9SJoao Pinto /* Configure MTL RX algorithms */ 3192c10d4c82SJose Abreu if (rx_queues_count > 1) 3193c10d4c82SJose Abreu stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3194d0a9c9f9SJoao Pinto priv->plat->rx_sched_algorithm); 3195d0a9c9f9SJoao Pinto 3196d0a9c9f9SJoao Pinto /* Configure MTL TX algorithms */ 3197c10d4c82SJose Abreu if (tx_queues_count > 1) 3198c10d4c82SJose Abreu stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3199d0a9c9f9SJoao Pinto priv->plat->tx_sched_algorithm); 3200d0a9c9f9SJoao Pinto 320119d91873SJoao Pinto /* Configure CBS in AVB TX queues */ 3202c10d4c82SJose Abreu if (tx_queues_count > 1) 320319d91873SJoao Pinto stmmac_configure_cbs(priv); 320419d91873SJoao Pinto 3205d43042f4SJoao Pinto /* Map RX MTL to DMA channels */ 3206d43042f4SJoao Pinto stmmac_rx_queue_dma_chan_map(priv); 3207d43042f4SJoao Pinto 3208d0a9c9f9SJoao Pinto /* Enable MAC RX Queues */ 3209d0a9c9f9SJoao Pinto stmmac_mac_enable_rx_queues(priv); 32106deee222SJoao Pinto 3211a8f5102aSJoao Pinto /* Set RX priorities */ 3212c10d4c82SJose Abreu if (rx_queues_count > 1) 3213a8f5102aSJoao Pinto stmmac_mac_config_rx_queues_prio(priv); 3214a8f5102aSJoao Pinto 3215a8f5102aSJoao Pinto /* Set TX priorities */ 3216c10d4c82SJose Abreu if (tx_queues_count > 1) 3217a8f5102aSJoao Pinto stmmac_mac_config_tx_queues_prio(priv); 3218abe80fdcSJoao Pinto 3219abe80fdcSJoao Pinto /* Set RX routing */ 3220c10d4c82SJose Abreu if (rx_queues_count > 1) 3221abe80fdcSJoao Pinto stmmac_mac_config_rx_queues_routing(priv); 322276067459SJose Abreu 322376067459SJose Abreu /* Receive Side Scaling */ 322476067459SJose Abreu if (rx_queues_count > 1) 322576067459SJose Abreu stmmac_mac_config_rss(priv); 3226d0a9c9f9SJoao Pinto } 3227d0a9c9f9SJoao Pinto 32288bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 32298bf993a5SJose Abreu { 3230c10d4c82SJose Abreu if (priv->dma_cap.asp) { 32318bf993a5SJose Abreu netdev_info(priv->dev, "Enabling Safety Features\n"); 32325ac712dcSWong Vee Khee stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 32335ac712dcSWong Vee Khee priv->plat->safety_feat_cfg); 32348bf993a5SJose Abreu } else { 32358bf993a5SJose Abreu netdev_info(priv->dev, "No Safety Features support found\n"); 32368bf993a5SJose Abreu } 32378bf993a5SJose Abreu } 32388bf993a5SJose Abreu 32395a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 32405a558611SOng Boon Leong { 32415a558611SOng Boon Leong char *name; 32425a558611SOng Boon Leong 32435a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3244db7c691dSMohammad Athari Bin Ismail clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 32455a558611SOng Boon Leong 32465a558611SOng Boon Leong name = priv->wq_name; 32475a558611SOng Boon Leong sprintf(name, "%s-fpe", priv->dev->name); 32485a558611SOng Boon Leong 32495a558611SOng Boon Leong priv->fpe_wq = create_singlethread_workqueue(name); 32505a558611SOng Boon Leong if (!priv->fpe_wq) { 32515a558611SOng Boon Leong netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 32525a558611SOng Boon Leong 32535a558611SOng Boon Leong return -ENOMEM; 32545a558611SOng Boon Leong } 32555a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue start"); 32565a558611SOng Boon Leong 32575a558611SOng Boon Leong return 0; 32585a558611SOng Boon Leong } 32595a558611SOng Boon Leong 3260d0a9c9f9SJoao Pinto /** 3261732fdf0eSGiuseppe CAVALLARO * stmmac_hw_setup - setup mac in a usable state. 3262523f11b5SSrinivas Kandagatla * @dev : pointer to the device structure. 32630735e639SMohammad Athari Bin Ismail * @ptp_register: register PTP if set 3264523f11b5SSrinivas Kandagatla * Description: 3265732fdf0eSGiuseppe CAVALLARO * this is the main function to setup the HW in a usable state because the 3266732fdf0eSGiuseppe CAVALLARO * dma engine is reset, the core registers are configured (e.g. AXI, 3267732fdf0eSGiuseppe CAVALLARO * Checksum features, timers). The DMA is ready to start receiving and 3268732fdf0eSGiuseppe CAVALLARO * transmitting. 3269523f11b5SSrinivas Kandagatla * Return value: 3270523f11b5SSrinivas Kandagatla * 0 on success and an appropriate (-)ve integer as defined in errno.h 3271523f11b5SSrinivas Kandagatla * file on failure. 3272523f11b5SSrinivas Kandagatla */ 32730735e639SMohammad Athari Bin Ismail static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3274523f11b5SSrinivas Kandagatla { 3275523f11b5SSrinivas Kandagatla struct stmmac_priv *priv = netdev_priv(dev); 32763c55d4d0SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 3277146617b8SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 3278d08d32d1SOng Boon Leong bool sph_en; 3279146617b8SJoao Pinto u32 chan; 3280523f11b5SSrinivas Kandagatla int ret; 3281523f11b5SSrinivas Kandagatla 3282523f11b5SSrinivas Kandagatla /* DMA initialization and SW reset */ 3283523f11b5SSrinivas Kandagatla ret = stmmac_init_dma_engine(priv); 3284523f11b5SSrinivas Kandagatla if (ret < 0) { 328538ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 328638ddc59dSLABBE Corentin __func__); 3287523f11b5SSrinivas Kandagatla return ret; 3288523f11b5SSrinivas Kandagatla } 3289523f11b5SSrinivas Kandagatla 3290523f11b5SSrinivas Kandagatla /* Copy the MAC addr into the HW */ 3291c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3292523f11b5SSrinivas Kandagatla 329302e57b9dSGiuseppe CAVALLARO /* PS and related bits will be programmed according to the speed */ 329402e57b9dSGiuseppe CAVALLARO if (priv->hw->pcs) { 329502e57b9dSGiuseppe CAVALLARO int speed = priv->plat->mac_port_sel_speed; 329602e57b9dSGiuseppe CAVALLARO 329702e57b9dSGiuseppe CAVALLARO if ((speed == SPEED_10) || (speed == SPEED_100) || 329802e57b9dSGiuseppe CAVALLARO (speed == SPEED_1000)) { 329902e57b9dSGiuseppe CAVALLARO priv->hw->ps = speed; 330002e57b9dSGiuseppe CAVALLARO } else { 330102e57b9dSGiuseppe CAVALLARO dev_warn(priv->device, "invalid port speed\n"); 330202e57b9dSGiuseppe CAVALLARO priv->hw->ps = 0; 330302e57b9dSGiuseppe CAVALLARO } 330402e57b9dSGiuseppe CAVALLARO } 330502e57b9dSGiuseppe CAVALLARO 3306523f11b5SSrinivas Kandagatla /* Initialize the MAC Core */ 3307c10d4c82SJose Abreu stmmac_core_init(priv, priv->hw, dev); 3308523f11b5SSrinivas Kandagatla 3309d0a9c9f9SJoao Pinto /* Initialize MTL*/ 3310d0a9c9f9SJoao Pinto stmmac_mtl_configuration(priv); 33119eb12474Sjpinto 33128bf993a5SJose Abreu /* Initialize Safety Features */ 33138bf993a5SJose Abreu stmmac_safety_feat_configuration(priv); 33148bf993a5SJose Abreu 3315c10d4c82SJose Abreu ret = stmmac_rx_ipc(priv, priv->hw); 3316978aded4SGiuseppe CAVALLARO if (!ret) { 331738ddc59dSLABBE Corentin netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3318978aded4SGiuseppe CAVALLARO priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3319d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 3320978aded4SGiuseppe CAVALLARO } 3321978aded4SGiuseppe CAVALLARO 3322523f11b5SSrinivas Kandagatla /* Enable the MAC Rx/Tx */ 3323c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 3324523f11b5SSrinivas Kandagatla 3325b4f0a661SJoao Pinto /* Set the HW DMA mode and the COE */ 3326b4f0a661SJoao Pinto stmmac_dma_operation_mode(priv); 3327b4f0a661SJoao Pinto 3328523f11b5SSrinivas Kandagatla stmmac_mmc_setup(priv); 3329523f11b5SSrinivas Kandagatla 3330f4c7d894SBiao Huang if (ptp_register) { 3331f4c7d894SBiao Huang ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3332f4c7d894SBiao Huang if (ret < 0) 3333f4c7d894SBiao Huang netdev_warn(priv->dev, 3334f4c7d894SBiao Huang "failed to enable PTP reference clock: %pe\n", 3335f4c7d894SBiao Huang ERR_PTR(ret)); 3336f4c7d894SBiao Huang } 3337f4c7d894SBiao Huang 3338523f11b5SSrinivas Kandagatla ret = stmmac_init_ptp(priv); 3339722eef28SHeiner Kallweit if (ret == -EOPNOTSUPP) 33401a212771SHeiner Kallweit netdev_info(priv->dev, "PTP not supported by HW\n"); 3341722eef28SHeiner Kallweit else if (ret) 3342722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP init failed\n"); 33430735e639SMohammad Athari Bin Ismail else if (ptp_register) 33440735e639SMohammad Athari Bin Ismail stmmac_ptp_register(priv); 3345523f11b5SSrinivas Kandagatla 3346388e201dSVineetha G. Jaya Kumaran priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3347388e201dSVineetha G. Jaya Kumaran 3348388e201dSVineetha G. Jaya Kumaran /* Convert the timer from msec to usec */ 3349388e201dSVineetha G. Jaya Kumaran if (!priv->tx_lpi_timer) 3350388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_timer = eee_timer * 1000; 3351523f11b5SSrinivas Kandagatla 3352a4e887faSJose Abreu if (priv->use_riwt) { 3353db2f2842SOng Boon Leong u32 queue; 33544e4337ccSJose Abreu 3355db2f2842SOng Boon Leong for (queue = 0; queue < rx_cnt; queue++) { 3356db2f2842SOng Boon Leong if (!priv->rx_riwt[queue]) 3357db2f2842SOng Boon Leong priv->rx_riwt[queue] = DEF_DMA_RIWT; 3358db2f2842SOng Boon Leong 3359db2f2842SOng Boon Leong stmmac_rx_watchdog(priv, priv->ioaddr, 3360db2f2842SOng Boon Leong priv->rx_riwt[queue], queue); 3361db2f2842SOng Boon Leong } 3362523f11b5SSrinivas Kandagatla } 3363523f11b5SSrinivas Kandagatla 3364c10d4c82SJose Abreu if (priv->hw->pcs) 3365c9ad4c10SBen Dooks (Codethink) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3366523f11b5SSrinivas Kandagatla 33674854ab99SJoao Pinto /* set TX and RX rings length */ 33684854ab99SJoao Pinto stmmac_set_rings_length(priv); 33694854ab99SJoao Pinto 3370f748be53SAlexandre TORGUE /* Enable TSO */ 3371146617b8SJoao Pinto if (priv->tso) { 33725e6038b8SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 33738531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 33745e6038b8SOng Boon Leong 33755e6038b8SOng Boon Leong /* TSO and TBS cannot co-exist */ 33765e6038b8SOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 33775e6038b8SOng Boon Leong continue; 33785e6038b8SOng Boon Leong 3379a4e887faSJose Abreu stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3380146617b8SJoao Pinto } 33815e6038b8SOng Boon Leong } 3382f748be53SAlexandre TORGUE 338367afd6d1SJose Abreu /* Enable Split Header */ 3384d08d32d1SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 338567afd6d1SJose Abreu for (chan = 0; chan < rx_cnt; chan++) 3386d08d32d1SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3387d08d32d1SOng Boon Leong 338867afd6d1SJose Abreu 338930d93227SJose Abreu /* VLAN Tag Insertion */ 339030d93227SJose Abreu if (priv->dma_cap.vlins) 339130d93227SJose Abreu stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 339230d93227SJose Abreu 3393579a25a8SJose Abreu /* TBS */ 3394579a25a8SJose Abreu for (chan = 0; chan < tx_cnt; chan++) { 33958531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3396579a25a8SJose Abreu int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3397579a25a8SJose Abreu 3398579a25a8SJose Abreu stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3399579a25a8SJose Abreu } 3400579a25a8SJose Abreu 3401686cff3dSAashish Verma /* Configure real RX and TX queues */ 3402686cff3dSAashish Verma netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3403686cff3dSAashish Verma netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3404686cff3dSAashish Verma 34057d9e6c5aSJose Abreu /* Start the ball rolling... */ 34067d9e6c5aSJose Abreu stmmac_start_all_dma(priv); 34077d9e6c5aSJose Abreu 34085a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 34095a558611SOng Boon Leong stmmac_fpe_start_wq(priv); 34105a558611SOng Boon Leong 34115a558611SOng Boon Leong if (priv->plat->fpe_cfg->enable) 34125a558611SOng Boon Leong stmmac_fpe_handshake(priv, true); 34135a558611SOng Boon Leong } 34145a558611SOng Boon Leong 3415523f11b5SSrinivas Kandagatla return 0; 3416523f11b5SSrinivas Kandagatla } 3417523f11b5SSrinivas Kandagatla 3418c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev) 3419c66f6c37SThierry Reding { 3420c66f6c37SThierry Reding struct stmmac_priv *priv = netdev_priv(dev); 3421c66f6c37SThierry Reding 3422c66f6c37SThierry Reding clk_disable_unprepare(priv->plat->clk_ptp_ref); 3423c66f6c37SThierry Reding } 3424c66f6c37SThierry Reding 34258532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev, 34268532f613SOng Boon Leong enum request_irq_err irq_err, int irq_idx) 34278532f613SOng Boon Leong { 34288532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34298532f613SOng Boon Leong int j; 34308532f613SOng Boon Leong 34318532f613SOng Boon Leong switch (irq_err) { 34328532f613SOng Boon Leong case REQ_IRQ_ERR_ALL: 34338532f613SOng Boon Leong irq_idx = priv->plat->tx_queues_to_use; 34348532f613SOng Boon Leong fallthrough; 34358532f613SOng Boon Leong case REQ_IRQ_ERR_TX: 34368532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 34378deec94cSOng Boon Leong if (priv->tx_irq[j] > 0) { 34388deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[j], NULL); 34398531c808SChristian Marangi free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 34408532f613SOng Boon Leong } 34418deec94cSOng Boon Leong } 34428532f613SOng Boon Leong irq_idx = priv->plat->rx_queues_to_use; 34438532f613SOng Boon Leong fallthrough; 34448532f613SOng Boon Leong case REQ_IRQ_ERR_RX: 34458532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 34468deec94cSOng Boon Leong if (priv->rx_irq[j] > 0) { 34478deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[j], NULL); 34488531c808SChristian Marangi free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 34498532f613SOng Boon Leong } 34508deec94cSOng Boon Leong } 34518532f613SOng Boon Leong 34528532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 34538532f613SOng Boon Leong free_irq(priv->sfty_ue_irq, dev); 34548532f613SOng Boon Leong fallthrough; 34558532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_UE: 34568532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 34578532f613SOng Boon Leong free_irq(priv->sfty_ce_irq, dev); 34588532f613SOng Boon Leong fallthrough; 34598532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_CE: 34608532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 34618532f613SOng Boon Leong free_irq(priv->lpi_irq, dev); 34628532f613SOng Boon Leong fallthrough; 34638532f613SOng Boon Leong case REQ_IRQ_ERR_LPI: 34648532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 34658532f613SOng Boon Leong free_irq(priv->wol_irq, dev); 34668532f613SOng Boon Leong fallthrough; 34678532f613SOng Boon Leong case REQ_IRQ_ERR_WOL: 34688532f613SOng Boon Leong free_irq(dev->irq, dev); 34698532f613SOng Boon Leong fallthrough; 34708532f613SOng Boon Leong case REQ_IRQ_ERR_MAC: 34718532f613SOng Boon Leong case REQ_IRQ_ERR_NO: 34728532f613SOng Boon Leong /* If MAC IRQ request error, no more IRQ to free */ 34738532f613SOng Boon Leong break; 34748532f613SOng Boon Leong } 34758532f613SOng Boon Leong } 34768532f613SOng Boon Leong 34778532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev) 34788532f613SOng Boon Leong { 34798532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34803e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 34818deec94cSOng Boon Leong cpumask_t cpu_mask; 34828532f613SOng Boon Leong int irq_idx = 0; 34838532f613SOng Boon Leong char *int_name; 34848532f613SOng Boon Leong int ret; 34858532f613SOng Boon Leong int i; 34868532f613SOng Boon Leong 34878532f613SOng Boon Leong /* For common interrupt */ 34888532f613SOng Boon Leong int_name = priv->int_name_mac; 34898532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "mac"); 34908532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_mac_interrupt, 34918532f613SOng Boon Leong 0, int_name, dev); 34928532f613SOng Boon Leong if (unlikely(ret < 0)) { 34938532f613SOng Boon Leong netdev_err(priv->dev, 34948532f613SOng Boon Leong "%s: alloc mac MSI %d (error: %d)\n", 34958532f613SOng Boon Leong __func__, dev->irq, ret); 34968532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 34978532f613SOng Boon Leong goto irq_error; 34988532f613SOng Boon Leong } 34998532f613SOng Boon Leong 35008532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 35018532f613SOng Boon Leong * is used for WoL 35028532f613SOng Boon Leong */ 35038532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 35048532f613SOng Boon Leong int_name = priv->int_name_wol; 35058532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "wol"); 35068532f613SOng Boon Leong ret = request_irq(priv->wol_irq, 35078532f613SOng Boon Leong stmmac_mac_interrupt, 35088532f613SOng Boon Leong 0, int_name, dev); 35098532f613SOng Boon Leong if (unlikely(ret < 0)) { 35108532f613SOng Boon Leong netdev_err(priv->dev, 35118532f613SOng Boon Leong "%s: alloc wol MSI %d (error: %d)\n", 35128532f613SOng Boon Leong __func__, priv->wol_irq, ret); 35138532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 35148532f613SOng Boon Leong goto irq_error; 35158532f613SOng Boon Leong } 35168532f613SOng Boon Leong } 35178532f613SOng Boon Leong 35188532f613SOng Boon Leong /* Request the LPI IRQ in case of another line 35198532f613SOng Boon Leong * is used for LPI 35208532f613SOng Boon Leong */ 35218532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 35228532f613SOng Boon Leong int_name = priv->int_name_lpi; 35238532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "lpi"); 35248532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, 35258532f613SOng Boon Leong stmmac_mac_interrupt, 35268532f613SOng Boon Leong 0, int_name, dev); 35278532f613SOng Boon Leong if (unlikely(ret < 0)) { 35288532f613SOng Boon Leong netdev_err(priv->dev, 35298532f613SOng Boon Leong "%s: alloc lpi MSI %d (error: %d)\n", 35308532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 35318532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 35328532f613SOng Boon Leong goto irq_error; 35338532f613SOng Boon Leong } 35348532f613SOng Boon Leong } 35358532f613SOng Boon Leong 35368532f613SOng Boon Leong /* Request the Safety Feature Correctible Error line in 35378532f613SOng Boon Leong * case of another line is used 35388532f613SOng Boon Leong */ 35398532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 35408532f613SOng Boon Leong int_name = priv->int_name_sfty_ce; 35418532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 35428532f613SOng Boon Leong ret = request_irq(priv->sfty_ce_irq, 35438532f613SOng Boon Leong stmmac_safety_interrupt, 35448532f613SOng Boon Leong 0, int_name, dev); 35458532f613SOng Boon Leong if (unlikely(ret < 0)) { 35468532f613SOng Boon Leong netdev_err(priv->dev, 35478532f613SOng Boon Leong "%s: alloc sfty ce MSI %d (error: %d)\n", 35488532f613SOng Boon Leong __func__, priv->sfty_ce_irq, ret); 35498532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_CE; 35508532f613SOng Boon Leong goto irq_error; 35518532f613SOng Boon Leong } 35528532f613SOng Boon Leong } 35538532f613SOng Boon Leong 35548532f613SOng Boon Leong /* Request the Safety Feature Uncorrectible Error line in 35558532f613SOng Boon Leong * case of another line is used 35568532f613SOng Boon Leong */ 35578532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 35588532f613SOng Boon Leong int_name = priv->int_name_sfty_ue; 35598532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 35608532f613SOng Boon Leong ret = request_irq(priv->sfty_ue_irq, 35618532f613SOng Boon Leong stmmac_safety_interrupt, 35628532f613SOng Boon Leong 0, int_name, dev); 35638532f613SOng Boon Leong if (unlikely(ret < 0)) { 35648532f613SOng Boon Leong netdev_err(priv->dev, 35658532f613SOng Boon Leong "%s: alloc sfty ue MSI %d (error: %d)\n", 35668532f613SOng Boon Leong __func__, priv->sfty_ue_irq, ret); 35678532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_UE; 35688532f613SOng Boon Leong goto irq_error; 35698532f613SOng Boon Leong } 35708532f613SOng Boon Leong } 35718532f613SOng Boon Leong 35728532f613SOng Boon Leong /* Request Rx MSI irq */ 35738532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3574d68c2e1dSArnd Bergmann if (i >= MTL_MAX_RX_QUEUES) 35753e0d5699SArnd Bergmann break; 35768532f613SOng Boon Leong if (priv->rx_irq[i] == 0) 35778532f613SOng Boon Leong continue; 35788532f613SOng Boon Leong 35798532f613SOng Boon Leong int_name = priv->int_name_rx_irq[i]; 35808532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 35818532f613SOng Boon Leong ret = request_irq(priv->rx_irq[i], 35828532f613SOng Boon Leong stmmac_msi_intr_rx, 35838531c808SChristian Marangi 0, int_name, &priv->dma_conf.rx_queue[i]); 35848532f613SOng Boon Leong if (unlikely(ret < 0)) { 35858532f613SOng Boon Leong netdev_err(priv->dev, 35868532f613SOng Boon Leong "%s: alloc rx-%d MSI %d (error: %d)\n", 35878532f613SOng Boon Leong __func__, i, priv->rx_irq[i], ret); 35888532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_RX; 35898532f613SOng Boon Leong irq_idx = i; 35908532f613SOng Boon Leong goto irq_error; 35918532f613SOng Boon Leong } 35928deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35938deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35948deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 35958532f613SOng Boon Leong } 35968532f613SOng Boon Leong 35978532f613SOng Boon Leong /* Request Tx MSI irq */ 35988532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3599d68c2e1dSArnd Bergmann if (i >= MTL_MAX_TX_QUEUES) 36003e0d5699SArnd Bergmann break; 36018532f613SOng Boon Leong if (priv->tx_irq[i] == 0) 36028532f613SOng Boon Leong continue; 36038532f613SOng Boon Leong 36048532f613SOng Boon Leong int_name = priv->int_name_tx_irq[i]; 36058532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 36068532f613SOng Boon Leong ret = request_irq(priv->tx_irq[i], 36078532f613SOng Boon Leong stmmac_msi_intr_tx, 36088531c808SChristian Marangi 0, int_name, &priv->dma_conf.tx_queue[i]); 36098532f613SOng Boon Leong if (unlikely(ret < 0)) { 36108532f613SOng Boon Leong netdev_err(priv->dev, 36118532f613SOng Boon Leong "%s: alloc tx-%d MSI %d (error: %d)\n", 36128532f613SOng Boon Leong __func__, i, priv->tx_irq[i], ret); 36138532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_TX; 36148532f613SOng Boon Leong irq_idx = i; 36158532f613SOng Boon Leong goto irq_error; 36168532f613SOng Boon Leong } 36178deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 36188deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 36198deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 36208532f613SOng Boon Leong } 36218532f613SOng Boon Leong 36228532f613SOng Boon Leong return 0; 36238532f613SOng Boon Leong 36248532f613SOng Boon Leong irq_error: 36258532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, irq_idx); 36268532f613SOng Boon Leong return ret; 36278532f613SOng Boon Leong } 36288532f613SOng Boon Leong 36298532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev) 36308532f613SOng Boon Leong { 36318532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36323e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 36338532f613SOng Boon Leong int ret; 36348532f613SOng Boon Leong 36358532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_interrupt, 36368532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36378532f613SOng Boon Leong if (unlikely(ret < 0)) { 36388532f613SOng Boon Leong netdev_err(priv->dev, 36398532f613SOng Boon Leong "%s: ERROR: allocating the IRQ %d (error: %d)\n", 36408532f613SOng Boon Leong __func__, dev->irq, ret); 36418532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 36423e6dc7b6SWong Vee Khee goto irq_error; 36438532f613SOng Boon Leong } 36448532f613SOng Boon Leong 36458532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 36468532f613SOng Boon Leong * is used for WoL 36478532f613SOng Boon Leong */ 36488532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 36498532f613SOng Boon Leong ret = request_irq(priv->wol_irq, stmmac_interrupt, 36508532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36518532f613SOng Boon Leong if (unlikely(ret < 0)) { 36528532f613SOng Boon Leong netdev_err(priv->dev, 36538532f613SOng Boon Leong "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 36548532f613SOng Boon Leong __func__, priv->wol_irq, ret); 36558532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 36563e6dc7b6SWong Vee Khee goto irq_error; 36578532f613SOng Boon Leong } 36588532f613SOng Boon Leong } 36598532f613SOng Boon Leong 36608532f613SOng Boon Leong /* Request the IRQ lines */ 36618532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 36628532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, stmmac_interrupt, 36638532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36648532f613SOng Boon Leong if (unlikely(ret < 0)) { 36658532f613SOng Boon Leong netdev_err(priv->dev, 36668532f613SOng Boon Leong "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 36678532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 36688532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 36698532f613SOng Boon Leong goto irq_error; 36708532f613SOng Boon Leong } 36718532f613SOng Boon Leong } 36728532f613SOng Boon Leong 36738532f613SOng Boon Leong return 0; 36748532f613SOng Boon Leong 36758532f613SOng Boon Leong irq_error: 36768532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, 0); 36778532f613SOng Boon Leong return ret; 36788532f613SOng Boon Leong } 36798532f613SOng Boon Leong 36808532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev) 36818532f613SOng Boon Leong { 36828532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36838532f613SOng Boon Leong int ret; 36848532f613SOng Boon Leong 36858532f613SOng Boon Leong /* Request the IRQ lines */ 36868532f613SOng Boon Leong if (priv->plat->multi_msi_en) 36878532f613SOng Boon Leong ret = stmmac_request_irq_multi_msi(dev); 36888532f613SOng Boon Leong else 36898532f613SOng Boon Leong ret = stmmac_request_irq_single(dev); 36908532f613SOng Boon Leong 36918532f613SOng Boon Leong return ret; 36928532f613SOng Boon Leong } 36938532f613SOng Boon Leong 3694523f11b5SSrinivas Kandagatla /** 3695ba39b344SChristian Marangi * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3696ba39b344SChristian Marangi * @priv: driver private structure 3697ba39b344SChristian Marangi * @mtu: MTU to setup the dma queue and buf with 3698ba39b344SChristian Marangi * Description: Allocate and generate a dma_conf based on the provided MTU. 3699ba39b344SChristian Marangi * Allocate the Tx/Rx DMA queue and init them. 3700ba39b344SChristian Marangi * Return value: 3701ba39b344SChristian Marangi * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3702ba39b344SChristian Marangi */ 3703ba39b344SChristian Marangi static struct stmmac_dma_conf * 3704ba39b344SChristian Marangi stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3705ba39b344SChristian Marangi { 3706ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf; 3707ba39b344SChristian Marangi int chan, bfsize, ret; 3708ba39b344SChristian Marangi 3709ba39b344SChristian Marangi dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3710ba39b344SChristian Marangi if (!dma_conf) { 3711ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3712ba39b344SChristian Marangi __func__); 3713ba39b344SChristian Marangi return ERR_PTR(-ENOMEM); 3714ba39b344SChristian Marangi } 3715ba39b344SChristian Marangi 3716ba39b344SChristian Marangi bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3717ba39b344SChristian Marangi if (bfsize < 0) 3718ba39b344SChristian Marangi bfsize = 0; 3719ba39b344SChristian Marangi 3720ba39b344SChristian Marangi if (bfsize < BUF_SIZE_16KiB) 3721ba39b344SChristian Marangi bfsize = stmmac_set_bfsize(mtu, 0); 3722ba39b344SChristian Marangi 3723ba39b344SChristian Marangi dma_conf->dma_buf_sz = bfsize; 3724ba39b344SChristian Marangi /* Chose the tx/rx size from the already defined one in the 3725ba39b344SChristian Marangi * priv struct. (if defined) 3726ba39b344SChristian Marangi */ 3727ba39b344SChristian Marangi dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3728ba39b344SChristian Marangi dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3729ba39b344SChristian Marangi 3730ba39b344SChristian Marangi if (!dma_conf->dma_tx_size) 3731ba39b344SChristian Marangi dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3732ba39b344SChristian Marangi if (!dma_conf->dma_rx_size) 3733ba39b344SChristian Marangi dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3734ba39b344SChristian Marangi 3735ba39b344SChristian Marangi /* Earlier check for TBS */ 3736ba39b344SChristian Marangi for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3737ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3738ba39b344SChristian Marangi int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3739ba39b344SChristian Marangi 3740ba39b344SChristian Marangi /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3741ba39b344SChristian Marangi tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3742ba39b344SChristian Marangi } 3743ba39b344SChristian Marangi 3744ba39b344SChristian Marangi ret = alloc_dma_desc_resources(priv, dma_conf); 3745ba39b344SChristian Marangi if (ret < 0) { 3746ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3747ba39b344SChristian Marangi __func__); 3748ba39b344SChristian Marangi goto alloc_error; 3749ba39b344SChristian Marangi } 3750ba39b344SChristian Marangi 3751ba39b344SChristian Marangi ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3752ba39b344SChristian Marangi if (ret < 0) { 3753ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3754ba39b344SChristian Marangi __func__); 3755ba39b344SChristian Marangi goto init_error; 3756ba39b344SChristian Marangi } 3757ba39b344SChristian Marangi 3758ba39b344SChristian Marangi return dma_conf; 3759ba39b344SChristian Marangi 3760ba39b344SChristian Marangi init_error: 3761ba39b344SChristian Marangi free_dma_desc_resources(priv, dma_conf); 3762ba39b344SChristian Marangi alloc_error: 3763ba39b344SChristian Marangi kfree(dma_conf); 3764ba39b344SChristian Marangi return ERR_PTR(ret); 3765ba39b344SChristian Marangi } 3766ba39b344SChristian Marangi 3767ba39b344SChristian Marangi /** 3768ba39b344SChristian Marangi * __stmmac_open - open entry point of the driver 37697ac6653aSJeff Kirsher * @dev : pointer to the device structure. 3770ba39b344SChristian Marangi * @dma_conf : structure to take the dma data 37717ac6653aSJeff Kirsher * Description: 37727ac6653aSJeff Kirsher * This function is the open entry point of the driver. 37737ac6653aSJeff Kirsher * Return value: 37747ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 37757ac6653aSJeff Kirsher * file on failure. 37767ac6653aSJeff Kirsher */ 3777ba39b344SChristian Marangi static int __stmmac_open(struct net_device *dev, 3778ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 37797ac6653aSJeff Kirsher { 37807ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 37819900074eSVladimir Oltean int mode = priv->plat->phy_interface; 37828fce3331SJose Abreu u32 chan; 37837ac6653aSJeff Kirsher int ret; 37847ac6653aSJeff Kirsher 378585648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 378685648865SMinghao Chi if (ret < 0) 37875ec55823SJoakim Zhang return ret; 37885ec55823SJoakim Zhang 3789a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 3790f213bbe8SJose Abreu priv->hw->pcs != STMMAC_PCS_RTBI && 37919900074eSVladimir Oltean (!priv->hw->xpcs || 379211059740SVladimir Oltean xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { 37937ac6653aSJeff Kirsher ret = stmmac_init_phy(dev); 3794e58bb43fSGiuseppe CAVALLARO if (ret) { 379538ddc59dSLABBE Corentin netdev_err(priv->dev, 379638ddc59dSLABBE Corentin "%s: Cannot attach to PHY (error: %d)\n", 3797e58bb43fSGiuseppe CAVALLARO __func__, ret); 37985ec55823SJoakim Zhang goto init_phy_error; 37997ac6653aSJeff Kirsher } 3800e58bb43fSGiuseppe CAVALLARO } 38017ac6653aSJeff Kirsher 3802523f11b5SSrinivas Kandagatla /* Extra statistics */ 3803523f11b5SSrinivas Kandagatla memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3804523f11b5SSrinivas Kandagatla priv->xstats.threshold = tc; 3805523f11b5SSrinivas Kandagatla 380622ad3838SGiuseppe Cavallaro priv->rx_copybreak = STMMAC_RX_COPYBREAK; 380756329137SBartlomiej Zolnierkiewicz 3808ba39b344SChristian Marangi buf_sz = dma_conf->dma_buf_sz; 3809ba39b344SChristian Marangi memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 38105bacd778SLABBE Corentin 3811f9ec5723SChristian Marangi stmmac_reset_queues_param(priv); 3812f9ec5723SChristian Marangi 381349725ffcSJunxiao Chang if (priv->plat->serdes_powerup) { 381449725ffcSJunxiao Chang ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); 381549725ffcSJunxiao Chang if (ret < 0) { 381649725ffcSJunxiao Chang netdev_err(priv->dev, "%s: Serdes powerup failed\n", 381749725ffcSJunxiao Chang __func__); 381849725ffcSJunxiao Chang goto init_error; 381949725ffcSJunxiao Chang } 382049725ffcSJunxiao Chang } 382149725ffcSJunxiao Chang 3822fe131929SHuacai Chen ret = stmmac_hw_setup(dev, true); 382356329137SBartlomiej Zolnierkiewicz if (ret < 0) { 382438ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3825c9324d18SGiuseppe CAVALLARO goto init_error; 38267ac6653aSJeff Kirsher } 38277ac6653aSJeff Kirsher 3828d429b66eSJose Abreu stmmac_init_coalesce(priv); 3829777da230SGiuseppe CAVALLARO 383074371272SJose Abreu phylink_start(priv->phylink); 383177b28983SJisheng Zhang /* We may have called phylink_speed_down before */ 383277b28983SJisheng Zhang phylink_speed_up(priv->phylink); 38337ac6653aSJeff Kirsher 38348532f613SOng Boon Leong ret = stmmac_request_irq(dev); 38358532f613SOng Boon Leong if (ret) 38366c1e5abeSThierry Reding goto irq_error; 3837d765955dSGiuseppe CAVALLARO 3838c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 38399f19306dSOng Boon Leong netif_tx_start_all_queues(priv->dev); 3840087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 38417ac6653aSJeff Kirsher 38427ac6653aSJeff Kirsher return 0; 38437ac6653aSJeff Kirsher 38446c1e5abeSThierry Reding irq_error: 384574371272SJose Abreu phylink_stop(priv->phylink); 38467a13f8f5SFrancesco Virlinzi 38478fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 38488531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 38498fce3331SJose Abreu 3850c66f6c37SThierry Reding stmmac_hw_teardown(dev); 3851c9324d18SGiuseppe CAVALLARO init_error: 3852ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 385374371272SJose Abreu phylink_disconnect_phy(priv->phylink); 38545ec55823SJoakim Zhang init_phy_error: 38555ec55823SJoakim Zhang pm_runtime_put(priv->device); 38567ac6653aSJeff Kirsher return ret; 38577ac6653aSJeff Kirsher } 38587ac6653aSJeff Kirsher 3859ba39b344SChristian Marangi static int stmmac_open(struct net_device *dev) 3860ba39b344SChristian Marangi { 3861ba39b344SChristian Marangi struct stmmac_priv *priv = netdev_priv(dev); 3862ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf; 3863ba39b344SChristian Marangi int ret; 3864ba39b344SChristian Marangi 3865ba39b344SChristian Marangi dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3866ba39b344SChristian Marangi if (IS_ERR(dma_conf)) 3867ba39b344SChristian Marangi return PTR_ERR(dma_conf); 3868ba39b344SChristian Marangi 3869ba39b344SChristian Marangi ret = __stmmac_open(dev, dma_conf); 3870ba39b344SChristian Marangi kfree(dma_conf); 3871ba39b344SChristian Marangi return ret; 3872ba39b344SChristian Marangi } 3873ba39b344SChristian Marangi 38745a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 38755a558611SOng Boon Leong { 38765a558611SOng Boon Leong set_bit(__FPE_REMOVING, &priv->fpe_task_state); 38775a558611SOng Boon Leong 38785a558611SOng Boon Leong if (priv->fpe_wq) 38795a558611SOng Boon Leong destroy_workqueue(priv->fpe_wq); 38805a558611SOng Boon Leong 38815a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue stop"); 38825a558611SOng Boon Leong } 38835a558611SOng Boon Leong 38847ac6653aSJeff Kirsher /** 38857ac6653aSJeff Kirsher * stmmac_release - close entry point of the driver 38867ac6653aSJeff Kirsher * @dev : device pointer. 38877ac6653aSJeff Kirsher * Description: 38887ac6653aSJeff Kirsher * This is the stop entry point of the driver. 38897ac6653aSJeff Kirsher */ 3890ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev) 38917ac6653aSJeff Kirsher { 38927ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 38938fce3331SJose Abreu u32 chan; 38947ac6653aSJeff Kirsher 389577b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 389677b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 38977ac6653aSJeff Kirsher /* Stop and disconnect the PHY */ 389874371272SJose Abreu phylink_stop(priv->phylink); 389974371272SJose Abreu phylink_disconnect_phy(priv->phylink); 39007ac6653aSJeff Kirsher 3901c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 39027ac6653aSJeff Kirsher 39038fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 39048531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 39059125cdd1SGiuseppe CAVALLARO 39067028471eSChristian Marangi netif_tx_disable(dev); 39077028471eSChristian Marangi 39087ac6653aSJeff Kirsher /* Free the IRQ lines */ 39098532f613SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 39107ac6653aSJeff Kirsher 39115f585913SFugang Duan if (priv->eee_enabled) { 39125f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 39135f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 39145f585913SFugang Duan } 39155f585913SFugang Duan 39167ac6653aSJeff Kirsher /* Stop TX/RX DMA and clear the descriptors */ 3917ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 39187ac6653aSJeff Kirsher 39197ac6653aSJeff Kirsher /* Release and free the Rx/Tx resources */ 3920ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 39217ac6653aSJeff Kirsher 39227ac6653aSJeff Kirsher /* Disable the MAC Rx/Tx */ 3923c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 39247ac6653aSJeff Kirsher 392549725ffcSJunxiao Chang /* Powerdown Serdes if there is */ 392649725ffcSJunxiao Chang if (priv->plat->serdes_powerdown) 392749725ffcSJunxiao Chang priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); 392849725ffcSJunxiao Chang 39297ac6653aSJeff Kirsher netif_carrier_off(dev); 39307ac6653aSJeff Kirsher 393192ba6888SRayagond Kokatanur stmmac_release_ptp(priv); 393292ba6888SRayagond Kokatanur 39335ec55823SJoakim Zhang pm_runtime_put(priv->device); 39345ec55823SJoakim Zhang 39355a558611SOng Boon Leong if (priv->dma_cap.fpesel) 39365a558611SOng Boon Leong stmmac_fpe_stop_wq(priv); 39375a558611SOng Boon Leong 39387ac6653aSJeff Kirsher return 0; 39397ac6653aSJeff Kirsher } 39407ac6653aSJeff Kirsher 394130d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 394230d93227SJose Abreu struct stmmac_tx_queue *tx_q) 394330d93227SJose Abreu { 394430d93227SJose Abreu u16 tag = 0x0, inner_tag = 0x0; 394530d93227SJose Abreu u32 inner_type = 0x0; 394630d93227SJose Abreu struct dma_desc *p; 394730d93227SJose Abreu 394830d93227SJose Abreu if (!priv->dma_cap.vlins) 394930d93227SJose Abreu return false; 395030d93227SJose Abreu if (!skb_vlan_tag_present(skb)) 395130d93227SJose Abreu return false; 395230d93227SJose Abreu if (skb->vlan_proto == htons(ETH_P_8021AD)) { 395330d93227SJose Abreu inner_tag = skb_vlan_tag_get(skb); 395430d93227SJose Abreu inner_type = STMMAC_VLAN_INSERT; 395530d93227SJose Abreu } 395630d93227SJose Abreu 395730d93227SJose Abreu tag = skb_vlan_tag_get(skb); 395830d93227SJose Abreu 3959579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3960579a25a8SJose Abreu p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3961579a25a8SJose Abreu else 3962579a25a8SJose Abreu p = &tx_q->dma_tx[tx_q->cur_tx]; 3963579a25a8SJose Abreu 396430d93227SJose Abreu if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 396530d93227SJose Abreu return false; 396630d93227SJose Abreu 396730d93227SJose Abreu stmmac_set_tx_owner(priv, p); 39688531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 396930d93227SJose Abreu return true; 397030d93227SJose Abreu } 397130d93227SJose Abreu 39727ac6653aSJeff Kirsher /** 3973f748be53SAlexandre TORGUE * stmmac_tso_allocator - close entry point of the driver 3974f748be53SAlexandre TORGUE * @priv: driver private structure 3975f748be53SAlexandre TORGUE * @des: buffer start address 3976f748be53SAlexandre TORGUE * @total_len: total length to fill in descriptors 3977d0ea5cbdSJesse Brandeburg * @last_segment: condition for the last descriptor 3978ce736788SJoao Pinto * @queue: TX queue index 3979f748be53SAlexandre TORGUE * Description: 3980f748be53SAlexandre TORGUE * This function fills descriptor and request new descriptors according to 3981f748be53SAlexandre TORGUE * buffer length to fill 3982f748be53SAlexandre TORGUE */ 3983a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3984ce736788SJoao Pinto int total_len, bool last_segment, u32 queue) 3985f748be53SAlexandre TORGUE { 39868531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 3987f748be53SAlexandre TORGUE struct dma_desc *desc; 39885bacd778SLABBE Corentin u32 buff_size; 3989ce736788SJoao Pinto int tmp_len; 3990f748be53SAlexandre TORGUE 3991f748be53SAlexandre TORGUE tmp_len = total_len; 3992f748be53SAlexandre TORGUE 3993f748be53SAlexandre TORGUE while (tmp_len > 0) { 3994a993db88SJose Abreu dma_addr_t curr_addr; 3995a993db88SJose Abreu 3996aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 39978531c808SChristian Marangi priv->dma_conf.dma_tx_size); 3998b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3999579a25a8SJose Abreu 4000579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4001579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4002579a25a8SJose Abreu else 4003579a25a8SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 4004f748be53SAlexandre TORGUE 4005a993db88SJose Abreu curr_addr = des + (total_len - tmp_len); 4006a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) 4007a993db88SJose Abreu desc->des0 = cpu_to_le32(curr_addr); 4008a993db88SJose Abreu else 4009a993db88SJose Abreu stmmac_set_desc_addr(priv, desc, curr_addr); 4010a993db88SJose Abreu 4011f748be53SAlexandre TORGUE buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 4012f748be53SAlexandre TORGUE TSO_MAX_BUFF_SIZE : tmp_len; 4013f748be53SAlexandre TORGUE 401442de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 4015f748be53SAlexandre TORGUE 0, 1, 4016426849e6SNiklas Cassel (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 4017f748be53SAlexandre TORGUE 0, 0); 4018f748be53SAlexandre TORGUE 4019f748be53SAlexandre TORGUE tmp_len -= TSO_MAX_BUFF_SIZE; 4020f748be53SAlexandre TORGUE } 4021f748be53SAlexandre TORGUE } 4022f748be53SAlexandre TORGUE 4023d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4024d96febedSOng Boon Leong { 40258531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4026d96febedSOng Boon Leong int desc_size; 4027d96febedSOng Boon Leong 4028d96febedSOng Boon Leong if (likely(priv->extend_desc)) 4029d96febedSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 4030d96febedSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4031d96febedSOng Boon Leong desc_size = sizeof(struct dma_edesc); 4032d96febedSOng Boon Leong else 4033d96febedSOng Boon Leong desc_size = sizeof(struct dma_desc); 4034d96febedSOng Boon Leong 4035d96febedSOng Boon Leong /* The own bit must be the latest setting done when prepare the 4036d96febedSOng Boon Leong * descriptor and then barrier is needed to make sure that 4037d96febedSOng Boon Leong * all is coherent before granting the DMA engine. 4038d96febedSOng Boon Leong */ 4039d96febedSOng Boon Leong wmb(); 4040d96febedSOng Boon Leong 4041d96febedSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4042d96febedSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4043d96febedSOng Boon Leong } 4044d96febedSOng Boon Leong 4045f748be53SAlexandre TORGUE /** 4046f748be53SAlexandre TORGUE * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4047f748be53SAlexandre TORGUE * @skb : the socket buffer 4048f748be53SAlexandre TORGUE * @dev : device pointer 4049f748be53SAlexandre TORGUE * Description: this is the transmit function that is called on TSO frames 4050f748be53SAlexandre TORGUE * (support available on GMAC4 and newer chips). 4051f748be53SAlexandre TORGUE * Diagram below show the ring programming in case of TSO frames: 4052f748be53SAlexandre TORGUE * 4053f748be53SAlexandre TORGUE * First Descriptor 4054f748be53SAlexandre TORGUE * -------- 4055f748be53SAlexandre TORGUE * | DES0 |---> buffer1 = L2/L3/L4 header 4056f748be53SAlexandre TORGUE * | DES1 |---> TCP Payload (can continue on next descr...) 4057f748be53SAlexandre TORGUE * | DES2 |---> buffer 1 and 2 len 4058f748be53SAlexandre TORGUE * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4059f748be53SAlexandre TORGUE * -------- 4060f748be53SAlexandre TORGUE * | 4061f748be53SAlexandre TORGUE * ... 4062f748be53SAlexandre TORGUE * | 4063f748be53SAlexandre TORGUE * -------- 4064f748be53SAlexandre TORGUE * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4065f748be53SAlexandre TORGUE * | DES1 | --| 4066f748be53SAlexandre TORGUE * | DES2 | --> buffer 1 and 2 len 4067f748be53SAlexandre TORGUE * | DES3 | 4068f748be53SAlexandre TORGUE * -------- 4069f748be53SAlexandre TORGUE * 4070f748be53SAlexandre TORGUE * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4071f748be53SAlexandre TORGUE */ 4072f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4073f748be53SAlexandre TORGUE { 4074ce736788SJoao Pinto struct dma_desc *desc, *first, *mss_desc = NULL; 4075f748be53SAlexandre TORGUE struct stmmac_priv *priv = netdev_priv(dev); 4076f748be53SAlexandre TORGUE int nfrags = skb_shinfo(skb)->nr_frags; 4077ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 4078c2837423SJose Abreu unsigned int first_entry, tx_packets; 4079d96febedSOng Boon Leong int tmp_pay_len = 0, first_tx; 4080ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4081c2837423SJose Abreu bool has_vlan, set_ic; 4082579a25a8SJose Abreu u8 proto_hdr_len, hdr; 4083ce736788SJoao Pinto u32 pay_len, mss; 4084a993db88SJose Abreu dma_addr_t des; 4085f748be53SAlexandre TORGUE int i; 4086f748be53SAlexandre TORGUE 40878531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 4088c2837423SJose Abreu first_tx = tx_q->cur_tx; 4089ce736788SJoao Pinto 4090f748be53SAlexandre TORGUE /* Compute header lengths */ 4091b7766206SJose Abreu if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4092b7766206SJose Abreu proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4093b7766206SJose Abreu hdr = sizeof(struct udphdr); 4094b7766206SJose Abreu } else { 4095504148feSEric Dumazet proto_hdr_len = skb_tcp_all_headers(skb); 4096b7766206SJose Abreu hdr = tcp_hdrlen(skb); 4097b7766206SJose Abreu } 4098f748be53SAlexandre TORGUE 4099f748be53SAlexandre TORGUE /* Desc availability based on threshold should be enough safe */ 4100ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < 4101f748be53SAlexandre TORGUE (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4102c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4103c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4104c22a3f48SJoao Pinto queue)); 4105f748be53SAlexandre TORGUE /* This is a hard error, log it. */ 410638ddc59dSLABBE Corentin netdev_err(priv->dev, 410738ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 410838ddc59dSLABBE Corentin __func__); 4109f748be53SAlexandre TORGUE } 4110f748be53SAlexandre TORGUE return NETDEV_TX_BUSY; 4111f748be53SAlexandre TORGUE } 4112f748be53SAlexandre TORGUE 4113f748be53SAlexandre TORGUE pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4114f748be53SAlexandre TORGUE 4115f748be53SAlexandre TORGUE mss = skb_shinfo(skb)->gso_size; 4116f748be53SAlexandre TORGUE 4117f748be53SAlexandre TORGUE /* set new MSS value if needed */ 41188d212a9eSNiklas Cassel if (mss != tx_q->mss) { 4119579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4120579a25a8SJose Abreu mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4121579a25a8SJose Abreu else 4122579a25a8SJose Abreu mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4123579a25a8SJose Abreu 412442de047dSJose Abreu stmmac_set_mss(priv, mss_desc, mss); 41258d212a9eSNiklas Cassel tx_q->mss = mss; 4126aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 41278531c808SChristian Marangi priv->dma_conf.dma_tx_size); 4128b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4129f748be53SAlexandre TORGUE } 4130f748be53SAlexandre TORGUE 4131f748be53SAlexandre TORGUE if (netif_msg_tx_queued(priv)) { 4132b7766206SJose Abreu pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4133b7766206SJose Abreu __func__, hdr, proto_hdr_len, pay_len, mss); 4134f748be53SAlexandre TORGUE pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4135f748be53SAlexandre TORGUE skb->data_len); 4136f748be53SAlexandre TORGUE } 4137f748be53SAlexandre TORGUE 413830d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 413930d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 414030d93227SJose Abreu 4141ce736788SJoao Pinto first_entry = tx_q->cur_tx; 4142b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 4143f748be53SAlexandre TORGUE 4144579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4145579a25a8SJose Abreu desc = &tx_q->dma_entx[first_entry].basic; 4146579a25a8SJose Abreu else 4147579a25a8SJose Abreu desc = &tx_q->dma_tx[first_entry]; 4148f748be53SAlexandre TORGUE first = desc; 4149f748be53SAlexandre TORGUE 415030d93227SJose Abreu if (has_vlan) 415130d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 415230d93227SJose Abreu 4153f748be53SAlexandre TORGUE /* first descriptor: fill Headers on Buf1 */ 4154f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4155f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4156f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4157f748be53SAlexandre TORGUE goto dma_map_err; 4158f748be53SAlexandre TORGUE 4159ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4160ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4161be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4162be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4163f748be53SAlexandre TORGUE 4164a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) { 4165f8be0d78SMichael Weiser first->des0 = cpu_to_le32(des); 4166f748be53SAlexandre TORGUE 4167f748be53SAlexandre TORGUE /* Fill start of payload in buff2 of first descriptor */ 4168f748be53SAlexandre TORGUE if (pay_len) 4169f8be0d78SMichael Weiser first->des1 = cpu_to_le32(des + proto_hdr_len); 4170f748be53SAlexandre TORGUE 4171f748be53SAlexandre TORGUE /* If needed take extra descriptors to fill the remaining payload */ 4172f748be53SAlexandre TORGUE tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4173a993db88SJose Abreu } else { 4174a993db88SJose Abreu stmmac_set_desc_addr(priv, first, des); 4175a993db88SJose Abreu tmp_pay_len = pay_len; 417634c15202Syuqi jin des += proto_hdr_len; 4177b2f07199SJose Abreu pay_len = 0; 4178a993db88SJose Abreu } 4179f748be53SAlexandre TORGUE 4180ce736788SJoao Pinto stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4181f748be53SAlexandre TORGUE 4182f748be53SAlexandre TORGUE /* Prepare fragments */ 4183f748be53SAlexandre TORGUE for (i = 0; i < nfrags; i++) { 4184f748be53SAlexandre TORGUE const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4185f748be53SAlexandre TORGUE 4186f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, 4187f748be53SAlexandre TORGUE skb_frag_size(frag), 4188f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4189937071c1SThierry Reding if (dma_mapping_error(priv->device, des)) 4190937071c1SThierry Reding goto dma_map_err; 4191f748be53SAlexandre TORGUE 4192f748be53SAlexandre TORGUE stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4193ce736788SJoao Pinto (i == nfrags - 1), queue); 4194f748be53SAlexandre TORGUE 4195ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4196ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4197ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4198be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4199f748be53SAlexandre TORGUE } 4200f748be53SAlexandre TORGUE 4201ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4202f748be53SAlexandre TORGUE 420305cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 420405cf0d1bSNiklas Cassel tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4205be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 420605cf0d1bSNiklas Cassel 42077df4a3a7SJose Abreu /* Manage tx mitigation */ 4208c2837423SJose Abreu tx_packets = (tx_q->cur_tx + 1) - first_tx; 4209c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4210c2837423SJose Abreu 4211c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4212c2837423SJose Abreu set_ic = true; 4213db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4214c2837423SJose Abreu set_ic = false; 4215db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4216c2837423SJose Abreu set_ic = true; 4217db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4218db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4219c2837423SJose Abreu set_ic = true; 4220c2837423SJose Abreu else 4221c2837423SJose Abreu set_ic = false; 4222c2837423SJose Abreu 4223c2837423SJose Abreu if (set_ic) { 4224579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4225579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4226579a25a8SJose Abreu else 42277df4a3a7SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 4228579a25a8SJose Abreu 42297df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 42307df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 42317df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 42327df4a3a7SJose Abreu } 42337df4a3a7SJose Abreu 423405cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 423505cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 423605cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 423705cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 423805cf0d1bSNiklas Cassel */ 42398531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4240f748be53SAlexandre TORGUE 4241ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4242b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 424338ddc59dSLABBE Corentin __func__); 4244c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4245f748be53SAlexandre TORGUE } 4246f748be53SAlexandre TORGUE 4247f748be53SAlexandre TORGUE dev->stats.tx_bytes += skb->len; 4248f748be53SAlexandre TORGUE priv->xstats.tx_tso_frames++; 4249f748be53SAlexandre TORGUE priv->xstats.tx_tso_nfrags += nfrags; 4250f748be53SAlexandre TORGUE 42518000ddc0SJose Abreu if (priv->sarc_type) 42528000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 42538000ddc0SJose Abreu 4254f748be53SAlexandre TORGUE skb_tx_timestamp(skb); 4255f748be53SAlexandre TORGUE 4256f748be53SAlexandre TORGUE if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4257f748be53SAlexandre TORGUE priv->hwts_tx_en)) { 4258f748be53SAlexandre TORGUE /* declare that device is doing timestamping */ 4259f748be53SAlexandre TORGUE skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 426042de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4261f748be53SAlexandre TORGUE } 4262f748be53SAlexandre TORGUE 4263f748be53SAlexandre TORGUE /* Complete the first descriptor before granting the DMA */ 426442de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, first, 1, 4265f748be53SAlexandre TORGUE proto_hdr_len, 4266f748be53SAlexandre TORGUE pay_len, 4267ce736788SJoao Pinto 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4268b7766206SJose Abreu hdr / 4, (skb->len - proto_hdr_len)); 4269f748be53SAlexandre TORGUE 4270f748be53SAlexandre TORGUE /* If context desc is used to change MSS */ 427115d2ee42SNiklas Cassel if (mss_desc) { 427215d2ee42SNiklas Cassel /* Make sure that first descriptor has been completely 427315d2ee42SNiklas Cassel * written, including its own bit. This is because MSS is 427415d2ee42SNiklas Cassel * actually before first descriptor, so we need to make 427515d2ee42SNiklas Cassel * sure that MSS's own bit is the last thing written. 427615d2ee42SNiklas Cassel */ 427715d2ee42SNiklas Cassel dma_wmb(); 427842de047dSJose Abreu stmmac_set_tx_owner(priv, mss_desc); 427915d2ee42SNiklas Cassel } 4280f748be53SAlexandre TORGUE 4281f748be53SAlexandre TORGUE if (netif_msg_pktdata(priv)) { 4282f748be53SAlexandre TORGUE pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4283ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4284ce736788SJoao Pinto tx_q->cur_tx, first, nfrags); 4285f748be53SAlexandre TORGUE pr_info(">>> frame to be transmitted: "); 4286f748be53SAlexandre TORGUE print_pkt(skb->data, skb_headlen(skb)); 4287f748be53SAlexandre TORGUE } 4288f748be53SAlexandre TORGUE 4289c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4290f748be53SAlexandre TORGUE 4291d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 42924772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 4293f748be53SAlexandre TORGUE 4294f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4295f748be53SAlexandre TORGUE 4296f748be53SAlexandre TORGUE dma_map_err: 4297f748be53SAlexandre TORGUE dev_err(priv->device, "Tx dma map failed\n"); 4298f748be53SAlexandre TORGUE dev_kfree_skb(skb); 4299f748be53SAlexandre TORGUE priv->dev->stats.tx_dropped++; 4300f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4301f748be53SAlexandre TORGUE } 4302f748be53SAlexandre TORGUE 4303f748be53SAlexandre TORGUE /** 4304732fdf0eSGiuseppe CAVALLARO * stmmac_xmit - Tx entry point of the driver 43057ac6653aSJeff Kirsher * @skb : the socket buffer 43067ac6653aSJeff Kirsher * @dev : device pointer 430732ceabcaSGiuseppe CAVALLARO * Description : this is the tx entry point of the driver. 430832ceabcaSGiuseppe CAVALLARO * It programs the chain or the ring and supports oversized frames 430932ceabcaSGiuseppe CAVALLARO * and SG feature. 43107ac6653aSJeff Kirsher */ 43117ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 43127ac6653aSJeff Kirsher { 4313c2837423SJose Abreu unsigned int first_entry, tx_packets, enh_desc; 43147ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 43150e80bdc9SGiuseppe Cavallaro unsigned int nopaged_len = skb_headlen(skb); 43164a7d666aSGiuseppe CAVALLARO int i, csum_insertion = 0, is_jumbo = 0; 4317ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 43187ac6653aSJeff Kirsher int nfrags = skb_shinfo(skb)->nr_frags; 4319b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 4320579a25a8SJose Abreu struct dma_edesc *tbs_desc = NULL; 43217ac6653aSJeff Kirsher struct dma_desc *desc, *first; 4322ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4323c2837423SJose Abreu bool has_vlan, set_ic; 4324d96febedSOng Boon Leong int entry, first_tx; 4325a993db88SJose Abreu dma_addr_t des; 4326f748be53SAlexandre TORGUE 43278531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 4328c2837423SJose Abreu first_tx = tx_q->cur_tx; 4329ce736788SJoao Pinto 4330be1c7eaeSVineetha G. Jaya Kumaran if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4331e2cd682dSJose Abreu stmmac_disable_eee_mode(priv); 4332e2cd682dSJose Abreu 4333f748be53SAlexandre TORGUE /* Manage oversized TCP frames for GMAC4 device */ 4334f748be53SAlexandre TORGUE if (skb_is_gso(skb) && priv->tso) { 4335b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4336b7766206SJose Abreu return stmmac_tso_xmit(skb, dev); 4337b7766206SJose Abreu if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4338f748be53SAlexandre TORGUE return stmmac_tso_xmit(skb, dev); 4339f748be53SAlexandre TORGUE } 43407ac6653aSJeff Kirsher 4341ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4342c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4343c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4344c22a3f48SJoao Pinto queue)); 43457ac6653aSJeff Kirsher /* This is a hard error, log it. */ 434638ddc59dSLABBE Corentin netdev_err(priv->dev, 434738ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 434838ddc59dSLABBE Corentin __func__); 43497ac6653aSJeff Kirsher } 43507ac6653aSJeff Kirsher return NETDEV_TX_BUSY; 43517ac6653aSJeff Kirsher } 43527ac6653aSJeff Kirsher 435330d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 435430d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 435530d93227SJose Abreu 4356ce736788SJoao Pinto entry = tx_q->cur_tx; 43570e80bdc9SGiuseppe Cavallaro first_entry = entry; 4358b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 43597ac6653aSJeff Kirsher 43607ac6653aSJeff Kirsher csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 43617ac6653aSJeff Kirsher 43620e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4363ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4364579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4365579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4366c24602efSGiuseppe CAVALLARO else 4367ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 4368c24602efSGiuseppe CAVALLARO 43697ac6653aSJeff Kirsher first = desc; 43707ac6653aSJeff Kirsher 437130d93227SJose Abreu if (has_vlan) 437230d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 437330d93227SJose Abreu 43740e80bdc9SGiuseppe Cavallaro enh_desc = priv->plat->enh_desc; 43754a7d666aSGiuseppe CAVALLARO /* To program the descriptors according to the size of the frame */ 437629896a67SGiuseppe CAVALLARO if (enh_desc) 43772c520b1cSJose Abreu is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 437829896a67SGiuseppe CAVALLARO 437963a550fcSJose Abreu if (unlikely(is_jumbo)) { 43802c520b1cSJose Abreu entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 438163a550fcSJose Abreu if (unlikely(entry < 0) && (entry != -EINVAL)) 4382362b37beSGiuseppe CAVALLARO goto dma_map_err; 438329896a67SGiuseppe CAVALLARO } 43847ac6653aSJeff Kirsher 43857ac6653aSJeff Kirsher for (i = 0; i < nfrags; i++) { 43869e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 43879e903e08SEric Dumazet int len = skb_frag_size(frag); 4388be434d50SGiuseppe Cavallaro bool last_segment = (i == (nfrags - 1)); 43897ac6653aSJeff Kirsher 43908531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4391b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[entry]); 4392e3ad57c9SGiuseppe Cavallaro 43930e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4394ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4395579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4396579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4397c24602efSGiuseppe CAVALLARO else 4398ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 43997ac6653aSJeff Kirsher 4400f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, len, 4401f722380dSIan Campbell DMA_TO_DEVICE); 4402f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4403362b37beSGiuseppe CAVALLARO goto dma_map_err; /* should reuse desc w/o issues */ 4404362b37beSGiuseppe CAVALLARO 4405ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = des; 44066844171dSJose Abreu 44076844171dSJose Abreu stmmac_set_desc_addr(priv, desc, des); 4408f748be53SAlexandre TORGUE 4409ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = true; 4410ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = len; 4411ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4412be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 44130e80bdc9SGiuseppe Cavallaro 44140e80bdc9SGiuseppe Cavallaro /* Prepare the descriptor and set the own bit too */ 441542de047dSJose Abreu stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 441642de047dSJose Abreu priv->mode, 1, last_segment, skb->len); 44177ac6653aSJeff Kirsher } 44187ac6653aSJeff Kirsher 441905cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 442005cf0d1bSNiklas Cassel tx_q->tx_skbuff[entry] = skb; 4421be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4422e3ad57c9SGiuseppe Cavallaro 44237df4a3a7SJose Abreu /* According to the coalesce parameter the IC bit for the latest 44247df4a3a7SJose Abreu * segment is reset and the timer re-started to clean the tx status. 44257df4a3a7SJose Abreu * This approach takes care about the fragments: desc is the first 44267df4a3a7SJose Abreu * element in case of no SG. 44277df4a3a7SJose Abreu */ 4428c2837423SJose Abreu tx_packets = (entry + 1) - first_tx; 4429c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4430c2837423SJose Abreu 4431c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4432c2837423SJose Abreu set_ic = true; 4433db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4434c2837423SJose Abreu set_ic = false; 4435db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4436c2837423SJose Abreu set_ic = true; 4437db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4438db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4439c2837423SJose Abreu set_ic = true; 4440c2837423SJose Abreu else 4441c2837423SJose Abreu set_ic = false; 4442c2837423SJose Abreu 4443c2837423SJose Abreu if (set_ic) { 44447df4a3a7SJose Abreu if (likely(priv->extend_desc)) 44457df4a3a7SJose Abreu desc = &tx_q->dma_etx[entry].basic; 4446579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4447579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 44487df4a3a7SJose Abreu else 44497df4a3a7SJose Abreu desc = &tx_q->dma_tx[entry]; 44507df4a3a7SJose Abreu 44517df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 44527df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 44537df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 44547df4a3a7SJose Abreu } 44557df4a3a7SJose Abreu 445605cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 445705cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 445805cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 445905cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 446005cf0d1bSNiklas Cassel */ 44618531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4462ce736788SJoao Pinto tx_q->cur_tx = entry; 44637ac6653aSJeff Kirsher 44647ac6653aSJeff Kirsher if (netif_msg_pktdata(priv)) { 446538ddc59dSLABBE Corentin netdev_dbg(priv->dev, 446638ddc59dSLABBE Corentin "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4467ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 44680e80bdc9SGiuseppe Cavallaro entry, first, nfrags); 446983d7af64SGiuseppe CAVALLARO 447038ddc59dSLABBE Corentin netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 44717ac6653aSJeff Kirsher print_pkt(skb->data, skb->len); 44727ac6653aSJeff Kirsher } 44730e80bdc9SGiuseppe Cavallaro 4474ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4475b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4476b3e51069SLABBE Corentin __func__); 4477c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 44787ac6653aSJeff Kirsher } 44797ac6653aSJeff Kirsher 44807ac6653aSJeff Kirsher dev->stats.tx_bytes += skb->len; 44817ac6653aSJeff Kirsher 44828000ddc0SJose Abreu if (priv->sarc_type) 44838000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 44848000ddc0SJose Abreu 44850e80bdc9SGiuseppe Cavallaro skb_tx_timestamp(skb); 44860e80bdc9SGiuseppe Cavallaro 44870e80bdc9SGiuseppe Cavallaro /* Ready to fill the first descriptor and set the OWN bit w/o any 44880e80bdc9SGiuseppe Cavallaro * problems because all the descriptors are actually ready to be 44890e80bdc9SGiuseppe Cavallaro * passed to the DMA engine. 44900e80bdc9SGiuseppe Cavallaro */ 44910e80bdc9SGiuseppe Cavallaro if (likely(!is_jumbo)) { 44920e80bdc9SGiuseppe Cavallaro bool last_segment = (nfrags == 0); 44930e80bdc9SGiuseppe Cavallaro 4494f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, 44950e80bdc9SGiuseppe Cavallaro nopaged_len, DMA_TO_DEVICE); 4496f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 44970e80bdc9SGiuseppe Cavallaro goto dma_map_err; 44980e80bdc9SGiuseppe Cavallaro 4499ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4500be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4501be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 45026844171dSJose Abreu 45036844171dSJose Abreu stmmac_set_desc_addr(priv, first, des); 4504f748be53SAlexandre TORGUE 4505ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4506ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 45070e80bdc9SGiuseppe Cavallaro 4508891434b1SRayagond Kokatanur if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4509891434b1SRayagond Kokatanur priv->hwts_tx_en)) { 4510891434b1SRayagond Kokatanur /* declare that device is doing timestamping */ 4511891434b1SRayagond Kokatanur skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 451242de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4513891434b1SRayagond Kokatanur } 4514891434b1SRayagond Kokatanur 45150e80bdc9SGiuseppe Cavallaro /* Prepare the first descriptor setting the OWN bit too */ 451642de047dSJose Abreu stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4517579a25a8SJose Abreu csum_insertion, priv->mode, 0, last_segment, 451842de047dSJose Abreu skb->len); 451980acbed9SAaro Koskinen } 45200e80bdc9SGiuseppe Cavallaro 4521579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_EN) { 4522579a25a8SJose Abreu struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4523579a25a8SJose Abreu 4524579a25a8SJose Abreu tbs_desc = &tx_q->dma_entx[first_entry]; 4525579a25a8SJose Abreu stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4526579a25a8SJose Abreu } 4527579a25a8SJose Abreu 4528579a25a8SJose Abreu stmmac_set_tx_owner(priv, first); 4529579a25a8SJose Abreu 4530c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4531f748be53SAlexandre TORGUE 4532a4e887faSJose Abreu stmmac_enable_dma_transmission(priv, priv->ioaddr); 45338fce3331SJose Abreu 4534d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 45354772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 45367ac6653aSJeff Kirsher 4537362b37beSGiuseppe CAVALLARO return NETDEV_TX_OK; 4538a9097a96SGiuseppe CAVALLARO 4539362b37beSGiuseppe CAVALLARO dma_map_err: 454038ddc59dSLABBE Corentin netdev_err(priv->dev, "Tx DMA map failed\n"); 4541362b37beSGiuseppe CAVALLARO dev_kfree_skb(skb); 4542362b37beSGiuseppe CAVALLARO priv->dev->stats.tx_dropped++; 45437ac6653aSJeff Kirsher return NETDEV_TX_OK; 45447ac6653aSJeff Kirsher } 45457ac6653aSJeff Kirsher 4546b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4547b9381985SVince Bridgers { 4548ab188e8fSElad Nachman struct vlan_ethhdr *veth; 4549ab188e8fSElad Nachman __be16 vlan_proto; 4550b9381985SVince Bridgers u16 vlanid; 4551b9381985SVince Bridgers 4552ab188e8fSElad Nachman veth = (struct vlan_ethhdr *)skb->data; 4553ab188e8fSElad Nachman vlan_proto = veth->h_vlan_proto; 4554ab188e8fSElad Nachman 4555ab188e8fSElad Nachman if ((vlan_proto == htons(ETH_P_8021Q) && 4556ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4557ab188e8fSElad Nachman (vlan_proto == htons(ETH_P_8021AD) && 4558ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4559b9381985SVince Bridgers /* pop the vlan tag */ 4560ab188e8fSElad Nachman vlanid = ntohs(veth->h_vlan_TCI); 4561ab188e8fSElad Nachman memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4562b9381985SVince Bridgers skb_pull(skb, VLAN_HLEN); 4563ab188e8fSElad Nachman __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4564b9381985SVince Bridgers } 4565b9381985SVince Bridgers } 4566b9381985SVince Bridgers 456732ceabcaSGiuseppe CAVALLARO /** 4568732fdf0eSGiuseppe CAVALLARO * stmmac_rx_refill - refill used skb preallocated buffers 456932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 457054139cf3SJoao Pinto * @queue: RX queue index 457132ceabcaSGiuseppe CAVALLARO * Description : this is to reallocate the skb for the reception process 457232ceabcaSGiuseppe CAVALLARO * that is based on zero-copy. 457332ceabcaSGiuseppe CAVALLARO */ 457454139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 45757ac6653aSJeff Kirsher { 45768531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 45775fabb012SOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 457854139cf3SJoao Pinto unsigned int entry = rx_q->dirty_rx; 4579884d2b84SDavid Wu gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4580884d2b84SDavid Wu 4581884d2b84SDavid Wu if (priv->dma_cap.addr64 <= 32) 4582884d2b84SDavid Wu gfp |= GFP_DMA32; 458354139cf3SJoao Pinto 4584e3ad57c9SGiuseppe Cavallaro while (dirty-- > 0) { 45852af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4586c24602efSGiuseppe CAVALLARO struct dma_desc *p; 4587d429b66eSJose Abreu bool use_rx_wd; 4588c24602efSGiuseppe CAVALLARO 4589c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 459054139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 4591c24602efSGiuseppe CAVALLARO else 459254139cf3SJoao Pinto p = rx_q->dma_rx + entry; 4593c24602efSGiuseppe CAVALLARO 45942af6106aSJose Abreu if (!buf->page) { 4595884d2b84SDavid Wu buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 45962af6106aSJose Abreu if (!buf->page) 45977ac6653aSJeff Kirsher break; 4598120e87f9SGiuseppe Cavallaro } 45997ac6653aSJeff Kirsher 460067afd6d1SJose Abreu if (priv->sph && !buf->sec_page) { 4601884d2b84SDavid Wu buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 460267afd6d1SJose Abreu if (!buf->sec_page) 460367afd6d1SJose Abreu break; 460467afd6d1SJose Abreu 460567afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 460667afd6d1SJose Abreu } 460767afd6d1SJose Abreu 46085fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 46093caa61c2SJose Abreu 46102af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 4611396e13e1SJoakim Zhang if (priv->sph) 4612396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4613396e13e1SJoakim Zhang else 4614396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 46152c520b1cSJose Abreu stmmac_refill_desc3(priv, rx_q, p); 4616286a8372SGiuseppe CAVALLARO 4617d429b66eSJose Abreu rx_q->rx_count_frames++; 4618db2f2842SOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4619db2f2842SOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 46206fa9d691SJose Abreu rx_q->rx_count_frames = 0; 462109146abeSJose Abreu 4622db2f2842SOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 462309146abeSJose Abreu use_rx_wd |= rx_q->rx_count_frames > 0; 462409146abeSJose Abreu if (!priv->use_riwt) 462509146abeSJose Abreu use_rx_wd = false; 4626d429b66eSJose Abreu 4627ad688cdbSPavel Machek dma_wmb(); 46282af6106aSJose Abreu stmmac_set_rx_owner(priv, p, use_rx_wd); 4629e3ad57c9SGiuseppe Cavallaro 46308531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 46317ac6653aSJeff Kirsher } 463254139cf3SJoao Pinto rx_q->dirty_rx = entry; 4633858a31ffSJose Abreu rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4634858a31ffSJose Abreu (rx_q->dirty_rx * sizeof(struct dma_desc)); 46354523a561SBiao Huang stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 46367ac6653aSJeff Kirsher } 46377ac6653aSJeff Kirsher 463888ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 463988ebe2cfSJose Abreu struct dma_desc *p, 464088ebe2cfSJose Abreu int status, unsigned int len) 464188ebe2cfSJose Abreu { 464288ebe2cfSJose Abreu unsigned int plen = 0, hlen = 0; 464331f2760eSLuo Jiaxing int coe = priv->hw->rx_csum; 464488ebe2cfSJose Abreu 464588ebe2cfSJose Abreu /* Not first descriptor, buffer is always zero */ 464688ebe2cfSJose Abreu if (priv->sph && len) 464788ebe2cfSJose Abreu return 0; 464888ebe2cfSJose Abreu 464988ebe2cfSJose Abreu /* First descriptor, get split header length */ 465031f2760eSLuo Jiaxing stmmac_get_rx_header_len(priv, p, &hlen); 465188ebe2cfSJose Abreu if (priv->sph && hlen) { 465288ebe2cfSJose Abreu priv->xstats.rx_split_hdr_pkt_n++; 465388ebe2cfSJose Abreu return hlen; 465488ebe2cfSJose Abreu } 465588ebe2cfSJose Abreu 465688ebe2cfSJose Abreu /* First descriptor, not last descriptor and not split header */ 465788ebe2cfSJose Abreu if (status & rx_not_ls) 46588531c808SChristian Marangi return priv->dma_conf.dma_buf_sz; 465988ebe2cfSJose Abreu 466088ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 466188ebe2cfSJose Abreu 466288ebe2cfSJose Abreu /* First descriptor and last descriptor and not split header */ 46638531c808SChristian Marangi return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 466488ebe2cfSJose Abreu } 466588ebe2cfSJose Abreu 466688ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 466788ebe2cfSJose Abreu struct dma_desc *p, 466888ebe2cfSJose Abreu int status, unsigned int len) 466988ebe2cfSJose Abreu { 467088ebe2cfSJose Abreu int coe = priv->hw->rx_csum; 467188ebe2cfSJose Abreu unsigned int plen = 0; 467288ebe2cfSJose Abreu 467388ebe2cfSJose Abreu /* Not split header, buffer is not available */ 467488ebe2cfSJose Abreu if (!priv->sph) 467588ebe2cfSJose Abreu return 0; 467688ebe2cfSJose Abreu 467788ebe2cfSJose Abreu /* Not last descriptor */ 467888ebe2cfSJose Abreu if (status & rx_not_ls) 46798531c808SChristian Marangi return priv->dma_conf.dma_buf_sz; 468088ebe2cfSJose Abreu 468188ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 468288ebe2cfSJose Abreu 468388ebe2cfSJose Abreu /* Last descriptor */ 468488ebe2cfSJose Abreu return plen - len; 468588ebe2cfSJose Abreu } 468688ebe2cfSJose Abreu 4687be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 46888b278a5bSOng Boon Leong struct xdp_frame *xdpf, bool dma_map) 4689be8b38a7SOng Boon Leong { 46908531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4691be8b38a7SOng Boon Leong unsigned int entry = tx_q->cur_tx; 4692be8b38a7SOng Boon Leong struct dma_desc *tx_desc; 4693be8b38a7SOng Boon Leong dma_addr_t dma_addr; 4694be8b38a7SOng Boon Leong bool set_ic; 4695be8b38a7SOng Boon Leong 4696be8b38a7SOng Boon Leong if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4697be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4698be8b38a7SOng Boon Leong 4699be8b38a7SOng Boon Leong if (likely(priv->extend_desc)) 4700be8b38a7SOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4701be8b38a7SOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4702be8b38a7SOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 4703be8b38a7SOng Boon Leong else 4704be8b38a7SOng Boon Leong tx_desc = tx_q->dma_tx + entry; 4705be8b38a7SOng Boon Leong 47068b278a5bSOng Boon Leong if (dma_map) { 47078b278a5bSOng Boon Leong dma_addr = dma_map_single(priv->device, xdpf->data, 47088b278a5bSOng Boon Leong xdpf->len, DMA_TO_DEVICE); 47098b278a5bSOng Boon Leong if (dma_mapping_error(priv->device, dma_addr)) 47108b278a5bSOng Boon Leong return STMMAC_XDP_CONSUMED; 47118b278a5bSOng Boon Leong 47128b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 47138b278a5bSOng Boon Leong } else { 47148b278a5bSOng Boon Leong struct page *page = virt_to_page(xdpf->data); 47158b278a5bSOng Boon Leong 4716be8b38a7SOng Boon Leong dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4717be8b38a7SOng Boon Leong xdpf->headroom; 4718be8b38a7SOng Boon Leong dma_sync_single_for_device(priv->device, dma_addr, 4719be8b38a7SOng Boon Leong xdpf->len, DMA_BIDIRECTIONAL); 4720be8b38a7SOng Boon Leong 4721be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 47228b278a5bSOng Boon Leong } 4723be8b38a7SOng Boon Leong 4724be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4725be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 4726be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4727be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 4728be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4729be8b38a7SOng Boon Leong 4730be8b38a7SOng Boon Leong tx_q->xdpf[entry] = xdpf; 4731be8b38a7SOng Boon Leong 4732be8b38a7SOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4733be8b38a7SOng Boon Leong 4734be8b38a7SOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4735be8b38a7SOng Boon Leong true, priv->mode, true, true, 4736be8b38a7SOng Boon Leong xdpf->len); 4737be8b38a7SOng Boon Leong 4738be8b38a7SOng Boon Leong tx_q->tx_count_frames++; 4739be8b38a7SOng Boon Leong 4740be8b38a7SOng Boon Leong if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4741be8b38a7SOng Boon Leong set_ic = true; 4742be8b38a7SOng Boon Leong else 4743be8b38a7SOng Boon Leong set_ic = false; 4744be8b38a7SOng Boon Leong 4745be8b38a7SOng Boon Leong if (set_ic) { 4746be8b38a7SOng Boon Leong tx_q->tx_count_frames = 0; 4747be8b38a7SOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 4748be8b38a7SOng Boon Leong priv->xstats.tx_set_ic_bit++; 4749be8b38a7SOng Boon Leong } 4750be8b38a7SOng Boon Leong 4751be8b38a7SOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 4752be8b38a7SOng Boon Leong 47538531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4754be8b38a7SOng Boon Leong tx_q->cur_tx = entry; 4755be8b38a7SOng Boon Leong 4756be8b38a7SOng Boon Leong return STMMAC_XDP_TX; 4757be8b38a7SOng Boon Leong } 4758be8b38a7SOng Boon Leong 4759be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4760be8b38a7SOng Boon Leong int cpu) 4761be8b38a7SOng Boon Leong { 4762be8b38a7SOng Boon Leong int index = cpu; 4763be8b38a7SOng Boon Leong 4764be8b38a7SOng Boon Leong if (unlikely(index < 0)) 4765be8b38a7SOng Boon Leong index = 0; 4766be8b38a7SOng Boon Leong 4767be8b38a7SOng Boon Leong while (index >= priv->plat->tx_queues_to_use) 4768be8b38a7SOng Boon Leong index -= priv->plat->tx_queues_to_use; 4769be8b38a7SOng Boon Leong 4770be8b38a7SOng Boon Leong return index; 4771be8b38a7SOng Boon Leong } 4772be8b38a7SOng Boon Leong 4773be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4774be8b38a7SOng Boon Leong struct xdp_buff *xdp) 4775be8b38a7SOng Boon Leong { 4776be8b38a7SOng Boon Leong struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4777be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4778be8b38a7SOng Boon Leong struct netdev_queue *nq; 4779be8b38a7SOng Boon Leong int queue; 4780be8b38a7SOng Boon Leong int res; 4781be8b38a7SOng Boon Leong 4782be8b38a7SOng Boon Leong if (unlikely(!xdpf)) 4783be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4784be8b38a7SOng Boon Leong 4785be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4786be8b38a7SOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 4787be8b38a7SOng Boon Leong 4788be8b38a7SOng Boon Leong __netif_tx_lock(nq, cpu); 4789be8b38a7SOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 4790e92af33eSAlexander Lobakin txq_trans_cond_update(nq); 4791be8b38a7SOng Boon Leong 47928b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4793be8b38a7SOng Boon Leong if (res == STMMAC_XDP_TX) 4794be8b38a7SOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 4795be8b38a7SOng Boon Leong 4796be8b38a7SOng Boon Leong __netif_tx_unlock(nq); 4797be8b38a7SOng Boon Leong 4798be8b38a7SOng Boon Leong return res; 4799be8b38a7SOng Boon Leong } 4800be8b38a7SOng Boon Leong 4801bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4802bba71cacSOng Boon Leong struct bpf_prog *prog, 48035fabb012SOng Boon Leong struct xdp_buff *xdp) 48045fabb012SOng Boon Leong { 48055fabb012SOng Boon Leong u32 act; 4806bba71cacSOng Boon Leong int res; 48075fabb012SOng Boon Leong 48085fabb012SOng Boon Leong act = bpf_prog_run_xdp(prog, xdp); 48095fabb012SOng Boon Leong switch (act) { 48105fabb012SOng Boon Leong case XDP_PASS: 48115fabb012SOng Boon Leong res = STMMAC_XDP_PASS; 48125fabb012SOng Boon Leong break; 4813be8b38a7SOng Boon Leong case XDP_TX: 4814be8b38a7SOng Boon Leong res = stmmac_xdp_xmit_back(priv, xdp); 4815be8b38a7SOng Boon Leong break; 48168b278a5bSOng Boon Leong case XDP_REDIRECT: 48178b278a5bSOng Boon Leong if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 48188b278a5bSOng Boon Leong res = STMMAC_XDP_CONSUMED; 48198b278a5bSOng Boon Leong else 48208b278a5bSOng Boon Leong res = STMMAC_XDP_REDIRECT; 48218b278a5bSOng Boon Leong break; 48225fabb012SOng Boon Leong default: 4823c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(priv->dev, prog, act); 48245fabb012SOng Boon Leong fallthrough; 48255fabb012SOng Boon Leong case XDP_ABORTED: 48265fabb012SOng Boon Leong trace_xdp_exception(priv->dev, prog, act); 48275fabb012SOng Boon Leong fallthrough; 48285fabb012SOng Boon Leong case XDP_DROP: 48295fabb012SOng Boon Leong res = STMMAC_XDP_CONSUMED; 48305fabb012SOng Boon Leong break; 48315fabb012SOng Boon Leong } 48325fabb012SOng Boon Leong 4833bba71cacSOng Boon Leong return res; 4834bba71cacSOng Boon Leong } 4835bba71cacSOng Boon Leong 4836bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4837bba71cacSOng Boon Leong struct xdp_buff *xdp) 4838bba71cacSOng Boon Leong { 4839bba71cacSOng Boon Leong struct bpf_prog *prog; 4840bba71cacSOng Boon Leong int res; 4841bba71cacSOng Boon Leong 4842bba71cacSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4843bba71cacSOng Boon Leong if (!prog) { 4844bba71cacSOng Boon Leong res = STMMAC_XDP_PASS; 48452f1e432dSToke Høiland-Jørgensen goto out; 4846bba71cacSOng Boon Leong } 4847bba71cacSOng Boon Leong 4848bba71cacSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, xdp); 48492f1e432dSToke Høiland-Jørgensen out: 48505fabb012SOng Boon Leong return ERR_PTR(-res); 48515fabb012SOng Boon Leong } 48525fabb012SOng Boon Leong 4853be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4854be8b38a7SOng Boon Leong int xdp_status) 4855be8b38a7SOng Boon Leong { 4856be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4857be8b38a7SOng Boon Leong int queue; 4858be8b38a7SOng Boon Leong 4859be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4860be8b38a7SOng Boon Leong 4861be8b38a7SOng Boon Leong if (xdp_status & STMMAC_XDP_TX) 4862be8b38a7SOng Boon Leong stmmac_tx_timer_arm(priv, queue); 48638b278a5bSOng Boon Leong 48648b278a5bSOng Boon Leong if (xdp_status & STMMAC_XDP_REDIRECT) 48658b278a5bSOng Boon Leong xdp_do_flush(); 4866be8b38a7SOng Boon Leong } 4867be8b38a7SOng Boon Leong 4868bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4869bba2556eSOng Boon Leong struct xdp_buff *xdp) 4870bba2556eSOng Boon Leong { 4871bba2556eSOng Boon Leong unsigned int metasize = xdp->data - xdp->data_meta; 4872bba2556eSOng Boon Leong unsigned int datasize = xdp->data_end - xdp->data; 4873bba2556eSOng Boon Leong struct sk_buff *skb; 4874bba2556eSOng Boon Leong 4875132c32eeSOng Boon Leong skb = __napi_alloc_skb(&ch->rxtx_napi, 4876bba2556eSOng Boon Leong xdp->data_end - xdp->data_hard_start, 4877bba2556eSOng Boon Leong GFP_ATOMIC | __GFP_NOWARN); 4878bba2556eSOng Boon Leong if (unlikely(!skb)) 4879bba2556eSOng Boon Leong return NULL; 4880bba2556eSOng Boon Leong 4881bba2556eSOng Boon Leong skb_reserve(skb, xdp->data - xdp->data_hard_start); 4882bba2556eSOng Boon Leong memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4883bba2556eSOng Boon Leong if (metasize) 4884bba2556eSOng Boon Leong skb_metadata_set(skb, metasize); 4885bba2556eSOng Boon Leong 4886bba2556eSOng Boon Leong return skb; 4887bba2556eSOng Boon Leong } 4888bba2556eSOng Boon Leong 4889bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4890bba2556eSOng Boon Leong struct dma_desc *p, struct dma_desc *np, 4891bba2556eSOng Boon Leong struct xdp_buff *xdp) 4892bba2556eSOng Boon Leong { 4893bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 4894bba2556eSOng Boon Leong unsigned int len = xdp->data_end - xdp->data; 4895bba2556eSOng Boon Leong enum pkt_hash_types hash_type; 4896bba2556eSOng Boon Leong int coe = priv->hw->rx_csum; 4897bba2556eSOng Boon Leong struct sk_buff *skb; 4898bba2556eSOng Boon Leong u32 hash; 4899bba2556eSOng Boon Leong 4900bba2556eSOng Boon Leong skb = stmmac_construct_skb_zc(ch, xdp); 4901bba2556eSOng Boon Leong if (!skb) { 4902bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4903bba2556eSOng Boon Leong return; 4904bba2556eSOng Boon Leong } 4905bba2556eSOng Boon Leong 4906bba2556eSOng Boon Leong stmmac_get_rx_hwtstamp(priv, p, np, skb); 4907bba2556eSOng Boon Leong stmmac_rx_vlan(priv->dev, skb); 4908bba2556eSOng Boon Leong skb->protocol = eth_type_trans(skb, priv->dev); 4909bba2556eSOng Boon Leong 4910bba2556eSOng Boon Leong if (unlikely(!coe)) 4911bba2556eSOng Boon Leong skb_checksum_none_assert(skb); 4912bba2556eSOng Boon Leong else 4913bba2556eSOng Boon Leong skb->ip_summed = CHECKSUM_UNNECESSARY; 4914bba2556eSOng Boon Leong 4915bba2556eSOng Boon Leong if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4916bba2556eSOng Boon Leong skb_set_hash(skb, hash, hash_type); 4917bba2556eSOng Boon Leong 4918bba2556eSOng Boon Leong skb_record_rx_queue(skb, queue); 4919132c32eeSOng Boon Leong napi_gro_receive(&ch->rxtx_napi, skb); 4920bba2556eSOng Boon Leong 4921bba2556eSOng Boon Leong priv->dev->stats.rx_packets++; 4922bba2556eSOng Boon Leong priv->dev->stats.rx_bytes += len; 4923bba2556eSOng Boon Leong } 4924bba2556eSOng Boon Leong 4925bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4926bba2556eSOng Boon Leong { 49278531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4928bba2556eSOng Boon Leong unsigned int entry = rx_q->dirty_rx; 4929bba2556eSOng Boon Leong struct dma_desc *rx_desc = NULL; 4930bba2556eSOng Boon Leong bool ret = true; 4931bba2556eSOng Boon Leong 4932bba2556eSOng Boon Leong budget = min(budget, stmmac_rx_dirty(priv, queue)); 4933bba2556eSOng Boon Leong 4934bba2556eSOng Boon Leong while (budget-- > 0 && entry != rx_q->cur_rx) { 4935bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4936bba2556eSOng Boon Leong dma_addr_t dma_addr; 4937bba2556eSOng Boon Leong bool use_rx_wd; 4938bba2556eSOng Boon Leong 4939bba2556eSOng Boon Leong if (!buf->xdp) { 4940bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 4941bba2556eSOng Boon Leong if (!buf->xdp) { 4942bba2556eSOng Boon Leong ret = false; 4943bba2556eSOng Boon Leong break; 4944bba2556eSOng Boon Leong } 4945bba2556eSOng Boon Leong } 4946bba2556eSOng Boon Leong 4947bba2556eSOng Boon Leong if (priv->extend_desc) 4948bba2556eSOng Boon Leong rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 4949bba2556eSOng Boon Leong else 4950bba2556eSOng Boon Leong rx_desc = rx_q->dma_rx + entry; 4951bba2556eSOng Boon Leong 4952bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 4953bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, rx_desc, dma_addr); 4954bba2556eSOng Boon Leong stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 4955bba2556eSOng Boon Leong stmmac_refill_desc3(priv, rx_q, rx_desc); 4956bba2556eSOng Boon Leong 4957bba2556eSOng Boon Leong rx_q->rx_count_frames++; 4958bba2556eSOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4959bba2556eSOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4960bba2556eSOng Boon Leong rx_q->rx_count_frames = 0; 4961bba2556eSOng Boon Leong 4962bba2556eSOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 4963bba2556eSOng Boon Leong use_rx_wd |= rx_q->rx_count_frames > 0; 4964bba2556eSOng Boon Leong if (!priv->use_riwt) 4965bba2556eSOng Boon Leong use_rx_wd = false; 4966bba2556eSOng Boon Leong 4967bba2556eSOng Boon Leong dma_wmb(); 4968bba2556eSOng Boon Leong stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 4969bba2556eSOng Boon Leong 49708531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4971bba2556eSOng Boon Leong } 4972bba2556eSOng Boon Leong 4973bba2556eSOng Boon Leong if (rx_desc) { 4974bba2556eSOng Boon Leong rx_q->dirty_rx = entry; 4975bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4976bba2556eSOng Boon Leong (rx_q->dirty_rx * sizeof(struct dma_desc)); 4977bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4978bba2556eSOng Boon Leong } 4979bba2556eSOng Boon Leong 4980bba2556eSOng Boon Leong return ret; 4981bba2556eSOng Boon Leong } 4982bba2556eSOng Boon Leong 4983bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 4984bba2556eSOng Boon Leong { 49858531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4986bba2556eSOng Boon Leong unsigned int count = 0, error = 0, len = 0; 4987bba2556eSOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 4988bba2556eSOng Boon Leong unsigned int next_entry = rx_q->cur_rx; 4989bba2556eSOng Boon Leong unsigned int desc_size; 4990bba2556eSOng Boon Leong struct bpf_prog *prog; 4991bba2556eSOng Boon Leong bool failure = false; 4992bba2556eSOng Boon Leong int xdp_status = 0; 4993bba2556eSOng Boon Leong int status = 0; 4994bba2556eSOng Boon Leong 4995bba2556eSOng Boon Leong if (netif_msg_rx_status(priv)) { 4996bba2556eSOng Boon Leong void *rx_head; 4997bba2556eSOng Boon Leong 4998bba2556eSOng Boon Leong netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4999bba2556eSOng Boon Leong if (priv->extend_desc) { 5000bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_erx; 5001bba2556eSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 5002bba2556eSOng Boon Leong } else { 5003bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_rx; 5004bba2556eSOng Boon Leong desc_size = sizeof(struct dma_desc); 5005bba2556eSOng Boon Leong } 5006bba2556eSOng Boon Leong 50078531c808SChristian Marangi stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5008bba2556eSOng Boon Leong rx_q->dma_rx_phy, desc_size); 5009bba2556eSOng Boon Leong } 5010bba2556eSOng Boon Leong while (count < limit) { 5011bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 5012bba2556eSOng Boon Leong unsigned int buf1_len = 0; 5013bba2556eSOng Boon Leong struct dma_desc *np, *p; 5014bba2556eSOng Boon Leong int entry; 5015bba2556eSOng Boon Leong int res; 5016bba2556eSOng Boon Leong 5017bba2556eSOng Boon Leong if (!count && rx_q->state_saved) { 5018bba2556eSOng Boon Leong error = rx_q->state.error; 5019bba2556eSOng Boon Leong len = rx_q->state.len; 5020bba2556eSOng Boon Leong } else { 5021bba2556eSOng Boon Leong rx_q->state_saved = false; 5022bba2556eSOng Boon Leong error = 0; 5023bba2556eSOng Boon Leong len = 0; 5024bba2556eSOng Boon Leong } 5025bba2556eSOng Boon Leong 5026bba2556eSOng Boon Leong if (count >= limit) 5027bba2556eSOng Boon Leong break; 5028bba2556eSOng Boon Leong 5029bba2556eSOng Boon Leong read_again: 5030bba2556eSOng Boon Leong buf1_len = 0; 5031bba2556eSOng Boon Leong entry = next_entry; 5032bba2556eSOng Boon Leong buf = &rx_q->buf_pool[entry]; 5033bba2556eSOng Boon Leong 5034bba2556eSOng Boon Leong if (dirty >= STMMAC_RX_FILL_BATCH) { 5035bba2556eSOng Boon Leong failure = failure || 5036bba2556eSOng Boon Leong !stmmac_rx_refill_zc(priv, queue, dirty); 5037bba2556eSOng Boon Leong dirty = 0; 5038bba2556eSOng Boon Leong } 5039bba2556eSOng Boon Leong 5040bba2556eSOng Boon Leong if (priv->extend_desc) 5041bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + entry); 5042bba2556eSOng Boon Leong else 5043bba2556eSOng Boon Leong p = rx_q->dma_rx + entry; 5044bba2556eSOng Boon Leong 5045bba2556eSOng Boon Leong /* read the status of the incoming frame */ 5046bba2556eSOng Boon Leong status = stmmac_rx_status(priv, &priv->dev->stats, 5047bba2556eSOng Boon Leong &priv->xstats, p); 5048bba2556eSOng Boon Leong /* check if managed by the DMA otherwise go ahead */ 5049bba2556eSOng Boon Leong if (unlikely(status & dma_own)) 5050bba2556eSOng Boon Leong break; 5051bba2556eSOng Boon Leong 5052bba2556eSOng Boon Leong /* Prefetch the next RX descriptor */ 5053bba2556eSOng Boon Leong rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 50548531c808SChristian Marangi priv->dma_conf.dma_rx_size); 5055bba2556eSOng Boon Leong next_entry = rx_q->cur_rx; 5056bba2556eSOng Boon Leong 5057bba2556eSOng Boon Leong if (priv->extend_desc) 5058bba2556eSOng Boon Leong np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5059bba2556eSOng Boon Leong else 5060bba2556eSOng Boon Leong np = rx_q->dma_rx + next_entry; 5061bba2556eSOng Boon Leong 5062bba2556eSOng Boon Leong prefetch(np); 5063bba2556eSOng Boon Leong 50642b9fff64SSong Yoong Siang /* Ensure a valid XSK buffer before proceed */ 50652b9fff64SSong Yoong Siang if (!buf->xdp) 50662b9fff64SSong Yoong Siang break; 50672b9fff64SSong Yoong Siang 5068bba2556eSOng Boon Leong if (priv->extend_desc) 5069bba2556eSOng Boon Leong stmmac_rx_extended_status(priv, &priv->dev->stats, 5070bba2556eSOng Boon Leong &priv->xstats, 5071bba2556eSOng Boon Leong rx_q->dma_erx + entry); 5072bba2556eSOng Boon Leong if (unlikely(status == discard_frame)) { 5073bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5074bba2556eSOng Boon Leong buf->xdp = NULL; 5075bba2556eSOng Boon Leong dirty++; 5076bba2556eSOng Boon Leong error = 1; 5077bba2556eSOng Boon Leong if (!priv->hwts_rx_en) 5078bba2556eSOng Boon Leong priv->dev->stats.rx_errors++; 5079bba2556eSOng Boon Leong } 5080bba2556eSOng Boon Leong 5081bba2556eSOng Boon Leong if (unlikely(error && (status & rx_not_ls))) 5082bba2556eSOng Boon Leong goto read_again; 5083bba2556eSOng Boon Leong if (unlikely(error)) { 5084bba2556eSOng Boon Leong count++; 5085bba2556eSOng Boon Leong continue; 5086bba2556eSOng Boon Leong } 5087bba2556eSOng Boon Leong 5088bba2556eSOng Boon Leong /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5089bba2556eSOng Boon Leong if (likely(status & rx_not_ls)) { 5090bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5091bba2556eSOng Boon Leong buf->xdp = NULL; 5092bba2556eSOng Boon Leong dirty++; 5093bba2556eSOng Boon Leong count++; 5094bba2556eSOng Boon Leong goto read_again; 5095bba2556eSOng Boon Leong } 5096bba2556eSOng Boon Leong 5097bba2556eSOng Boon Leong /* XDP ZC Frame only support primary buffers for now */ 5098bba2556eSOng Boon Leong buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5099bba2556eSOng Boon Leong len += buf1_len; 5100bba2556eSOng Boon Leong 5101929d4342SKurt Kanzenbach /* ACS is disabled; strip manually. */ 5102929d4342SKurt Kanzenbach if (likely(!(status & rx_not_ls))) { 5103bba2556eSOng Boon Leong buf1_len -= ETH_FCS_LEN; 5104bba2556eSOng Boon Leong len -= ETH_FCS_LEN; 5105bba2556eSOng Boon Leong } 5106bba2556eSOng Boon Leong 5107bba2556eSOng Boon Leong /* RX buffer is good and fit into a XSK pool buffer */ 5108bba2556eSOng Boon Leong buf->xdp->data_end = buf->xdp->data + buf1_len; 5109bba2556eSOng Boon Leong xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5110bba2556eSOng Boon Leong 5111bba2556eSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 5112bba2556eSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5113bba2556eSOng Boon Leong 5114bba2556eSOng Boon Leong switch (res) { 5115bba2556eSOng Boon Leong case STMMAC_XDP_PASS: 5116bba2556eSOng Boon Leong stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5117bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5118bba2556eSOng Boon Leong break; 5119bba2556eSOng Boon Leong case STMMAC_XDP_CONSUMED: 5120bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5121bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 5122bba2556eSOng Boon Leong break; 5123bba2556eSOng Boon Leong case STMMAC_XDP_TX: 5124bba2556eSOng Boon Leong case STMMAC_XDP_REDIRECT: 5125bba2556eSOng Boon Leong xdp_status |= res; 5126bba2556eSOng Boon Leong break; 5127bba2556eSOng Boon Leong } 5128bba2556eSOng Boon Leong 5129bba2556eSOng Boon Leong buf->xdp = NULL; 5130bba2556eSOng Boon Leong dirty++; 5131bba2556eSOng Boon Leong count++; 5132bba2556eSOng Boon Leong } 5133bba2556eSOng Boon Leong 5134bba2556eSOng Boon Leong if (status & rx_not_ls) { 5135bba2556eSOng Boon Leong rx_q->state_saved = true; 5136bba2556eSOng Boon Leong rx_q->state.error = error; 5137bba2556eSOng Boon Leong rx_q->state.len = len; 5138bba2556eSOng Boon Leong } 5139bba2556eSOng Boon Leong 5140bba2556eSOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5141bba2556eSOng Boon Leong 514268e9c5deSVijayakannan Ayyathurai priv->xstats.rx_pkt_n += count; 514368e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 514468e9c5deSVijayakannan Ayyathurai 5145bba2556eSOng Boon Leong if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5146bba2556eSOng Boon Leong if (failure || stmmac_rx_dirty(priv, queue) > 0) 5147bba2556eSOng Boon Leong xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5148bba2556eSOng Boon Leong else 5149bba2556eSOng Boon Leong xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5150bba2556eSOng Boon Leong 5151bba2556eSOng Boon Leong return (int)count; 5152bba2556eSOng Boon Leong } 5153bba2556eSOng Boon Leong 5154bba2556eSOng Boon Leong return failure ? limit : (int)count; 5155bba2556eSOng Boon Leong } 5156bba2556eSOng Boon Leong 515732ceabcaSGiuseppe CAVALLARO /** 5158732fdf0eSGiuseppe CAVALLARO * stmmac_rx - manage the receive process 515932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 516054139cf3SJoao Pinto * @limit: napi bugget 516154139cf3SJoao Pinto * @queue: RX queue index. 516232ceabcaSGiuseppe CAVALLARO * Description : this the function called by the napi poll method. 516332ceabcaSGiuseppe CAVALLARO * It gets all the frames inside the ring. 516432ceabcaSGiuseppe CAVALLARO */ 516554139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 51667ac6653aSJeff Kirsher { 51678531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 51688fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 5169ec222003SJose Abreu unsigned int count = 0, error = 0, len = 0; 5170ec222003SJose Abreu int status = 0, coe = priv->hw->rx_csum; 517107b39753SAaro Koskinen unsigned int next_entry = rx_q->cur_rx; 51725fabb012SOng Boon Leong enum dma_data_direction dma_dir; 5173bfaf91caSJoakim Zhang unsigned int desc_size; 5174ec222003SJose Abreu struct sk_buff *skb = NULL; 51755fabb012SOng Boon Leong struct xdp_buff xdp; 5176be8b38a7SOng Boon Leong int xdp_status = 0; 51775fabb012SOng Boon Leong int buf_sz; 51785fabb012SOng Boon Leong 51795fabb012SOng Boon Leong dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 51808531c808SChristian Marangi buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 51817ac6653aSJeff Kirsher 518283d7af64SGiuseppe CAVALLARO if (netif_msg_rx_status(priv)) { 5183d0225e7dSAlexandre TORGUE void *rx_head; 5184d0225e7dSAlexandre TORGUE 518538ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5186bfaf91caSJoakim Zhang if (priv->extend_desc) { 518754139cf3SJoao Pinto rx_head = (void *)rx_q->dma_erx; 5188bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 5189bfaf91caSJoakim Zhang } else { 519054139cf3SJoao Pinto rx_head = (void *)rx_q->dma_rx; 5191bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 5192bfaf91caSJoakim Zhang } 5193d0225e7dSAlexandre TORGUE 51948531c808SChristian Marangi stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5195bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 51967ac6653aSJeff Kirsher } 5197c24602efSGiuseppe CAVALLARO while (count < limit) { 519888ebe2cfSJose Abreu unsigned int buf1_len = 0, buf2_len = 0; 5199ec222003SJose Abreu enum pkt_hash_types hash_type; 52002af6106aSJose Abreu struct stmmac_rx_buffer *buf; 52012af6106aSJose Abreu struct dma_desc *np, *p; 5202ec222003SJose Abreu int entry; 5203ec222003SJose Abreu u32 hash; 52047ac6653aSJeff Kirsher 5205ec222003SJose Abreu if (!count && rx_q->state_saved) { 5206ec222003SJose Abreu skb = rx_q->state.skb; 5207ec222003SJose Abreu error = rx_q->state.error; 5208ec222003SJose Abreu len = rx_q->state.len; 5209ec222003SJose Abreu } else { 5210ec222003SJose Abreu rx_q->state_saved = false; 5211ec222003SJose Abreu skb = NULL; 5212ec222003SJose Abreu error = 0; 5213ec222003SJose Abreu len = 0; 5214ec222003SJose Abreu } 5215ec222003SJose Abreu 5216ec222003SJose Abreu if (count >= limit) 5217ec222003SJose Abreu break; 5218ec222003SJose Abreu 5219ec222003SJose Abreu read_again: 522088ebe2cfSJose Abreu buf1_len = 0; 522188ebe2cfSJose Abreu buf2_len = 0; 522207b39753SAaro Koskinen entry = next_entry; 52232af6106aSJose Abreu buf = &rx_q->buf_pool[entry]; 522407b39753SAaro Koskinen 5225c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 522654139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 5227c24602efSGiuseppe CAVALLARO else 522854139cf3SJoao Pinto p = rx_q->dma_rx + entry; 5229c24602efSGiuseppe CAVALLARO 5230c1fa3212SFabrice Gasnier /* read the status of the incoming frame */ 523142de047dSJose Abreu status = stmmac_rx_status(priv, &priv->dev->stats, 5232c1fa3212SFabrice Gasnier &priv->xstats, p); 5233c1fa3212SFabrice Gasnier /* check if managed by the DMA otherwise go ahead */ 5234c1fa3212SFabrice Gasnier if (unlikely(status & dma_own)) 52357ac6653aSJeff Kirsher break; 52367ac6653aSJeff Kirsher 5237aa042f60SSong, Yoong Siang rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 52388531c808SChristian Marangi priv->dma_conf.dma_rx_size); 523954139cf3SJoao Pinto next_entry = rx_q->cur_rx; 5240e3ad57c9SGiuseppe Cavallaro 5241c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 524254139cf3SJoao Pinto np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5243c24602efSGiuseppe CAVALLARO else 524454139cf3SJoao Pinto np = rx_q->dma_rx + next_entry; 5245ba1ffd74SGiuseppe CAVALLARO 5246ba1ffd74SGiuseppe CAVALLARO prefetch(np); 52477ac6653aSJeff Kirsher 524842de047dSJose Abreu if (priv->extend_desc) 524942de047dSJose Abreu stmmac_rx_extended_status(priv, &priv->dev->stats, 525042de047dSJose Abreu &priv->xstats, rx_q->dma_erx + entry); 5251891434b1SRayagond Kokatanur if (unlikely(status == discard_frame)) { 52522af6106aSJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 52532af6106aSJose Abreu buf->page = NULL; 5254ec222003SJose Abreu error = 1; 52550b273ca4SJose Abreu if (!priv->hwts_rx_en) 52560b273ca4SJose Abreu priv->dev->stats.rx_errors++; 5257ec222003SJose Abreu } 5258f748be53SAlexandre TORGUE 5259ec222003SJose Abreu if (unlikely(error && (status & rx_not_ls))) 5260ec222003SJose Abreu goto read_again; 5261ec222003SJose Abreu if (unlikely(error)) { 5262ec222003SJose Abreu dev_kfree_skb(skb); 526388ebe2cfSJose Abreu skb = NULL; 5264cda4985aSJose Abreu count++; 526507b39753SAaro Koskinen continue; 5266e527c4a7SGiuseppe CAVALLARO } 5267e527c4a7SGiuseppe CAVALLARO 5268ec222003SJose Abreu /* Buffer is good. Go on. */ 5269ec222003SJose Abreu 52704744bf07SMatteo Croce prefetch(page_address(buf->page) + buf->page_offset); 527188ebe2cfSJose Abreu if (buf->sec_page) 527288ebe2cfSJose Abreu prefetch(page_address(buf->sec_page)); 527388ebe2cfSJose Abreu 527488ebe2cfSJose Abreu buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 527588ebe2cfSJose Abreu len += buf1_len; 527688ebe2cfSJose Abreu buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 527788ebe2cfSJose Abreu len += buf2_len; 5278ec222003SJose Abreu 5279929d4342SKurt Kanzenbach /* ACS is disabled; strip manually. */ 5280929d4342SKurt Kanzenbach if (likely(!(status & rx_not_ls))) { 52810f296e78SZekun Shen if (buf2_len) { 528288ebe2cfSJose Abreu buf2_len -= ETH_FCS_LEN; 5283ec222003SJose Abreu len -= ETH_FCS_LEN; 52840f296e78SZekun Shen } else if (buf1_len) { 52850f296e78SZekun Shen buf1_len -= ETH_FCS_LEN; 52860f296e78SZekun Shen len -= ETH_FCS_LEN; 52870f296e78SZekun Shen } 528883d7af64SGiuseppe CAVALLARO } 528922ad3838SGiuseppe Cavallaro 5290ec222003SJose Abreu if (!skb) { 5291be8b38a7SOng Boon Leong unsigned int pre_len, sync_len; 5292be8b38a7SOng Boon Leong 52935fabb012SOng Boon Leong dma_sync_single_for_cpu(priv->device, buf->addr, 52945fabb012SOng Boon Leong buf1_len, dma_dir); 52955fabb012SOng Boon Leong 5296d172268fSMatteo Croce xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); 5297d172268fSMatteo Croce xdp_prepare_buff(&xdp, page_address(buf->page), 5298d172268fSMatteo Croce buf->page_offset, buf1_len, false); 52995fabb012SOng Boon Leong 5300be8b38a7SOng Boon Leong pre_len = xdp.data_end - xdp.data_hard_start - 5301be8b38a7SOng Boon Leong buf->page_offset; 53025fabb012SOng Boon Leong skb = stmmac_xdp_run_prog(priv, &xdp); 5303be8b38a7SOng Boon Leong /* Due xdp_adjust_tail: DMA sync for_device 5304be8b38a7SOng Boon Leong * cover max len CPU touch 5305be8b38a7SOng Boon Leong */ 5306be8b38a7SOng Boon Leong sync_len = xdp.data_end - xdp.data_hard_start - 5307be8b38a7SOng Boon Leong buf->page_offset; 5308be8b38a7SOng Boon Leong sync_len = max(sync_len, pre_len); 53095fabb012SOng Boon Leong 53105fabb012SOng Boon Leong /* For Not XDP_PASS verdict */ 53115fabb012SOng Boon Leong if (IS_ERR(skb)) { 53125fabb012SOng Boon Leong unsigned int xdp_res = -PTR_ERR(skb); 53135fabb012SOng Boon Leong 53145fabb012SOng Boon Leong if (xdp_res & STMMAC_XDP_CONSUMED) { 5315be8b38a7SOng Boon Leong page_pool_put_page(rx_q->page_pool, 5316be8b38a7SOng Boon Leong virt_to_head_page(xdp.data), 5317be8b38a7SOng Boon Leong sync_len, true); 53185fabb012SOng Boon Leong buf->page = NULL; 53195fabb012SOng Boon Leong priv->dev->stats.rx_dropped++; 53205fabb012SOng Boon Leong 53215fabb012SOng Boon Leong /* Clear skb as it was set as 53225fabb012SOng Boon Leong * status by XDP program. 53235fabb012SOng Boon Leong */ 53245fabb012SOng Boon Leong skb = NULL; 53255fabb012SOng Boon Leong 53265fabb012SOng Boon Leong if (unlikely((status & rx_not_ls))) 53275fabb012SOng Boon Leong goto read_again; 53285fabb012SOng Boon Leong 53295fabb012SOng Boon Leong count++; 53305fabb012SOng Boon Leong continue; 53318b278a5bSOng Boon Leong } else if (xdp_res & (STMMAC_XDP_TX | 53328b278a5bSOng Boon Leong STMMAC_XDP_REDIRECT)) { 5333be8b38a7SOng Boon Leong xdp_status |= xdp_res; 5334be8b38a7SOng Boon Leong buf->page = NULL; 5335be8b38a7SOng Boon Leong skb = NULL; 5336be8b38a7SOng Boon Leong count++; 5337be8b38a7SOng Boon Leong continue; 53385fabb012SOng Boon Leong } 53395fabb012SOng Boon Leong } 53405fabb012SOng Boon Leong } 53415fabb012SOng Boon Leong 53425fabb012SOng Boon Leong if (!skb) { 53435fabb012SOng Boon Leong /* XDP program may expand or reduce tail */ 53445fabb012SOng Boon Leong buf1_len = xdp.data_end - xdp.data; 53455fabb012SOng Boon Leong 534688ebe2cfSJose Abreu skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5347ec222003SJose Abreu if (!skb) { 534822ad3838SGiuseppe Cavallaro priv->dev->stats.rx_dropped++; 5349cda4985aSJose Abreu count++; 535088ebe2cfSJose Abreu goto drain_data; 535122ad3838SGiuseppe Cavallaro } 535222ad3838SGiuseppe Cavallaro 53535fabb012SOng Boon Leong /* XDP program may adjust header */ 53545fabb012SOng Boon Leong skb_copy_to_linear_data(skb, xdp.data, buf1_len); 535588ebe2cfSJose Abreu skb_put(skb, buf1_len); 535622ad3838SGiuseppe Cavallaro 5357ec222003SJose Abreu /* Data payload copied into SKB, page ready for recycle */ 5358ec222003SJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 5359ec222003SJose Abreu buf->page = NULL; 536088ebe2cfSJose Abreu } else if (buf1_len) { 5361ec222003SJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 53625fabb012SOng Boon Leong buf1_len, dma_dir); 5363ec222003SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 53645fabb012SOng Boon Leong buf->page, buf->page_offset, buf1_len, 53658531c808SChristian Marangi priv->dma_conf.dma_buf_sz); 5366ec222003SJose Abreu 5367ec222003SJose Abreu /* Data payload appended into SKB */ 5368ec222003SJose Abreu page_pool_release_page(rx_q->page_pool, buf->page); 5369ec222003SJose Abreu buf->page = NULL; 53707ac6653aSJeff Kirsher } 537183d7af64SGiuseppe CAVALLARO 537288ebe2cfSJose Abreu if (buf2_len) { 537367afd6d1SJose Abreu dma_sync_single_for_cpu(priv->device, buf->sec_addr, 53745fabb012SOng Boon Leong buf2_len, dma_dir); 537567afd6d1SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 537688ebe2cfSJose Abreu buf->sec_page, 0, buf2_len, 53778531c808SChristian Marangi priv->dma_conf.dma_buf_sz); 537867afd6d1SJose Abreu 537967afd6d1SJose Abreu /* Data payload appended into SKB */ 538067afd6d1SJose Abreu page_pool_release_page(rx_q->page_pool, buf->sec_page); 538167afd6d1SJose Abreu buf->sec_page = NULL; 538267afd6d1SJose Abreu } 538367afd6d1SJose Abreu 538488ebe2cfSJose Abreu drain_data: 5385ec222003SJose Abreu if (likely(status & rx_not_ls)) 5386ec222003SJose Abreu goto read_again; 538788ebe2cfSJose Abreu if (!skb) 538888ebe2cfSJose Abreu continue; 5389ec222003SJose Abreu 5390ec222003SJose Abreu /* Got entire packet into SKB. Finish it. */ 5391ec222003SJose Abreu 5392ba1ffd74SGiuseppe CAVALLARO stmmac_get_rx_hwtstamp(priv, p, np, skb); 5393b9381985SVince Bridgers stmmac_rx_vlan(priv->dev, skb); 53947ac6653aSJeff Kirsher skb->protocol = eth_type_trans(skb, priv->dev); 53957ac6653aSJeff Kirsher 5396ceb69499SGiuseppe CAVALLARO if (unlikely(!coe)) 53977ac6653aSJeff Kirsher skb_checksum_none_assert(skb); 539862a2ab93SGiuseppe CAVALLARO else 53997ac6653aSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 540062a2ab93SGiuseppe CAVALLARO 540176067459SJose Abreu if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 540276067459SJose Abreu skb_set_hash(skb, hash, hash_type); 540376067459SJose Abreu 540476067459SJose Abreu skb_record_rx_queue(skb, queue); 54054ccb4585SJose Abreu napi_gro_receive(&ch->rx_napi, skb); 540688ebe2cfSJose Abreu skb = NULL; 54077ac6653aSJeff Kirsher 54087ac6653aSJeff Kirsher priv->dev->stats.rx_packets++; 5409ec222003SJose Abreu priv->dev->stats.rx_bytes += len; 5410cda4985aSJose Abreu count++; 54117ac6653aSJeff Kirsher } 5412ec222003SJose Abreu 541388ebe2cfSJose Abreu if (status & rx_not_ls || skb) { 5414ec222003SJose Abreu rx_q->state_saved = true; 5415ec222003SJose Abreu rx_q->state.skb = skb; 5416ec222003SJose Abreu rx_q->state.error = error; 5417ec222003SJose Abreu rx_q->state.len = len; 54187ac6653aSJeff Kirsher } 54197ac6653aSJeff Kirsher 5420be8b38a7SOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5421be8b38a7SOng Boon Leong 542254139cf3SJoao Pinto stmmac_rx_refill(priv, queue); 54237ac6653aSJeff Kirsher 54247ac6653aSJeff Kirsher priv->xstats.rx_pkt_n += count; 542568e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 54267ac6653aSJeff Kirsher 54277ac6653aSJeff Kirsher return count; 54287ac6653aSJeff Kirsher } 54297ac6653aSJeff Kirsher 54304ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 54317ac6653aSJeff Kirsher { 54328fce3331SJose Abreu struct stmmac_channel *ch = 54334ccb4585SJose Abreu container_of(napi, struct stmmac_channel, rx_napi); 54348fce3331SJose Abreu struct stmmac_priv *priv = ch->priv_data; 54358fce3331SJose Abreu u32 chan = ch->index; 54364ccb4585SJose Abreu int work_done; 54377ac6653aSJeff Kirsher 54389125cdd1SGiuseppe CAVALLARO priv->xstats.napi_poll++; 5439ce736788SJoao Pinto 5440132c32eeSOng Boon Leong work_done = stmmac_rx(priv, budget, chan); 5441021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5442021bd5e3SJose Abreu unsigned long flags; 5443021bd5e3SJose Abreu 5444021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5445021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5446021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5447021bd5e3SJose Abreu } 5448021bd5e3SJose Abreu 54494ccb4585SJose Abreu return work_done; 54504ccb4585SJose Abreu } 5451ce736788SJoao Pinto 54524ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 54534ccb4585SJose Abreu { 54544ccb4585SJose Abreu struct stmmac_channel *ch = 54554ccb4585SJose Abreu container_of(napi, struct stmmac_channel, tx_napi); 54564ccb4585SJose Abreu struct stmmac_priv *priv = ch->priv_data; 54574ccb4585SJose Abreu u32 chan = ch->index; 54584ccb4585SJose Abreu int work_done; 54594ccb4585SJose Abreu 54604ccb4585SJose Abreu priv->xstats.napi_poll++; 54614ccb4585SJose Abreu 5462132c32eeSOng Boon Leong work_done = stmmac_tx_clean(priv, budget, chan); 5463fa0be0a4SJose Abreu work_done = min(work_done, budget); 54648fce3331SJose Abreu 5465021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5466021bd5e3SJose Abreu unsigned long flags; 54674ccb4585SJose Abreu 5468021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5469021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5470021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5471fa0be0a4SJose Abreu } 54728fce3331SJose Abreu 54737ac6653aSJeff Kirsher return work_done; 54747ac6653aSJeff Kirsher } 54757ac6653aSJeff Kirsher 5476132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5477132c32eeSOng Boon Leong { 5478132c32eeSOng Boon Leong struct stmmac_channel *ch = 5479132c32eeSOng Boon Leong container_of(napi, struct stmmac_channel, rxtx_napi); 5480132c32eeSOng Boon Leong struct stmmac_priv *priv = ch->priv_data; 548181d0885dSSong Yoong Siang int rx_done, tx_done, rxtx_done; 5482132c32eeSOng Boon Leong u32 chan = ch->index; 5483132c32eeSOng Boon Leong 5484132c32eeSOng Boon Leong priv->xstats.napi_poll++; 5485132c32eeSOng Boon Leong 5486132c32eeSOng Boon Leong tx_done = stmmac_tx_clean(priv, budget, chan); 5487132c32eeSOng Boon Leong tx_done = min(tx_done, budget); 5488132c32eeSOng Boon Leong 5489132c32eeSOng Boon Leong rx_done = stmmac_rx_zc(priv, budget, chan); 5490132c32eeSOng Boon Leong 549181d0885dSSong Yoong Siang rxtx_done = max(tx_done, rx_done); 549281d0885dSSong Yoong Siang 5493132c32eeSOng Boon Leong /* If either TX or RX work is not complete, return budget 5494132c32eeSOng Boon Leong * and keep pooling 5495132c32eeSOng Boon Leong */ 549681d0885dSSong Yoong Siang if (rxtx_done >= budget) 5497132c32eeSOng Boon Leong return budget; 5498132c32eeSOng Boon Leong 5499132c32eeSOng Boon Leong /* all work done, exit the polling mode */ 550081d0885dSSong Yoong Siang if (napi_complete_done(napi, rxtx_done)) { 5501132c32eeSOng Boon Leong unsigned long flags; 5502132c32eeSOng Boon Leong 5503132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 5504132c32eeSOng Boon Leong /* Both RX and TX work done are compelte, 5505132c32eeSOng Boon Leong * so enable both RX & TX IRQs. 5506132c32eeSOng Boon Leong */ 5507132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5508132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 5509132c32eeSOng Boon Leong } 5510132c32eeSOng Boon Leong 551181d0885dSSong Yoong Siang return min(rxtx_done, budget - 1); 5512132c32eeSOng Boon Leong } 5513132c32eeSOng Boon Leong 55147ac6653aSJeff Kirsher /** 55157ac6653aSJeff Kirsher * stmmac_tx_timeout 55167ac6653aSJeff Kirsher * @dev : Pointer to net device structure 5517d0ea5cbdSJesse Brandeburg * @txqueue: the index of the hanging transmit queue 55187ac6653aSJeff Kirsher * Description: this function is called when a packet transmission fails to 55197284a3f1SGiuseppe CAVALLARO * complete within a reasonable time. The driver will mark the error in the 55207ac6653aSJeff Kirsher * netdev structure and arrange for the device to be reset to a sane state 55217ac6653aSJeff Kirsher * in order to transmit a new packet. 55227ac6653aSJeff Kirsher */ 55230290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 55247ac6653aSJeff Kirsher { 55257ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 55267ac6653aSJeff Kirsher 552734877a15SJose Abreu stmmac_global_err(priv); 55287ac6653aSJeff Kirsher } 55297ac6653aSJeff Kirsher 55307ac6653aSJeff Kirsher /** 553101789349SJiri Pirko * stmmac_set_rx_mode - entry point for multicast addressing 55327ac6653aSJeff Kirsher * @dev : pointer to the device structure 55337ac6653aSJeff Kirsher * Description: 55347ac6653aSJeff Kirsher * This function is a driver entry point which gets called by the kernel 55357ac6653aSJeff Kirsher * whenever multicast addresses must be enabled/disabled. 55367ac6653aSJeff Kirsher * Return value: 55377ac6653aSJeff Kirsher * void. 55387ac6653aSJeff Kirsher */ 553901789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev) 55407ac6653aSJeff Kirsher { 55417ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 55427ac6653aSJeff Kirsher 5543c10d4c82SJose Abreu stmmac_set_filter(priv, priv->hw, dev); 55447ac6653aSJeff Kirsher } 55457ac6653aSJeff Kirsher 55467ac6653aSJeff Kirsher /** 55477ac6653aSJeff Kirsher * stmmac_change_mtu - entry point to change MTU size for the device. 55487ac6653aSJeff Kirsher * @dev : device pointer. 55497ac6653aSJeff Kirsher * @new_mtu : the new MTU size for the device. 55507ac6653aSJeff Kirsher * Description: the Maximum Transfer Unit (MTU) is used by the network layer 55517ac6653aSJeff Kirsher * to drive packet transmission. Ethernet has an MTU of 1500 octets 55527ac6653aSJeff Kirsher * (ETH_DATA_LEN). This value can be changed with ifconfig. 55537ac6653aSJeff Kirsher * Return value: 55547ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 55557ac6653aSJeff Kirsher * file on failure. 55567ac6653aSJeff Kirsher */ 55577ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 55587ac6653aSJeff Kirsher { 555938ddc59dSLABBE Corentin struct stmmac_priv *priv = netdev_priv(dev); 5560eaf4fac4SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 556134700796SChristian Marangi struct stmmac_dma_conf *dma_conf; 55625b55299eSDavid Wu const int mtu = new_mtu; 556334700796SChristian Marangi int ret; 5564eaf4fac4SJose Abreu 5565eaf4fac4SJose Abreu if (txfifosz == 0) 5566eaf4fac4SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 5567eaf4fac4SJose Abreu 5568eaf4fac4SJose Abreu txfifosz /= priv->plat->tx_queues_to_use; 556938ddc59dSLABBE Corentin 55705fabb012SOng Boon Leong if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 55715fabb012SOng Boon Leong netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 55725fabb012SOng Boon Leong return -EINVAL; 55735fabb012SOng Boon Leong } 55745fabb012SOng Boon Leong 5575eaf4fac4SJose Abreu new_mtu = STMMAC_ALIGN(new_mtu); 5576eaf4fac4SJose Abreu 5577eaf4fac4SJose Abreu /* If condition true, FIFO is too small or MTU too large */ 5578eaf4fac4SJose Abreu if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5579eaf4fac4SJose Abreu return -EINVAL; 5580eaf4fac4SJose Abreu 558134700796SChristian Marangi if (netif_running(dev)) { 558234700796SChristian Marangi netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 558334700796SChristian Marangi /* Try to allocate the new DMA conf with the new mtu */ 558434700796SChristian Marangi dma_conf = stmmac_setup_dma_desc(priv, mtu); 558534700796SChristian Marangi if (IS_ERR(dma_conf)) { 558634700796SChristian Marangi netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 558734700796SChristian Marangi mtu); 558834700796SChristian Marangi return PTR_ERR(dma_conf); 558934700796SChristian Marangi } 5590f748be53SAlexandre TORGUE 559134700796SChristian Marangi stmmac_release(dev); 559234700796SChristian Marangi 559334700796SChristian Marangi ret = __stmmac_open(dev, dma_conf); 559434700796SChristian Marangi kfree(dma_conf); 559534700796SChristian Marangi if (ret) { 559634700796SChristian Marangi netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 559734700796SChristian Marangi return ret; 559834700796SChristian Marangi } 559934700796SChristian Marangi 560034700796SChristian Marangi stmmac_set_rx_mode(dev); 560134700796SChristian Marangi } 560234700796SChristian Marangi 560334700796SChristian Marangi dev->mtu = mtu; 56047ac6653aSJeff Kirsher netdev_update_features(dev); 56057ac6653aSJeff Kirsher 56067ac6653aSJeff Kirsher return 0; 56077ac6653aSJeff Kirsher } 56087ac6653aSJeff Kirsher 5609c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev, 5610c8f44affSMichał Mirosław netdev_features_t features) 56117ac6653aSJeff Kirsher { 56127ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 56137ac6653aSJeff Kirsher 561438912bdbSDeepak SIKRI if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 56157ac6653aSJeff Kirsher features &= ~NETIF_F_RXCSUM; 5616d2afb5bdSGiuseppe CAVALLARO 56177ac6653aSJeff Kirsher if (!priv->plat->tx_coe) 5618a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 56197ac6653aSJeff Kirsher 56207ac6653aSJeff Kirsher /* Some GMAC devices have a bugged Jumbo frame support that 56217ac6653aSJeff Kirsher * needs to have the Tx COE disabled for oversized frames 56227ac6653aSJeff Kirsher * (due to limited buffer sizes). In this case we disable 5623ceb69499SGiuseppe CAVALLARO * the TX csum insertion in the TDES and not use SF. 5624ceb69499SGiuseppe CAVALLARO */ 56257ac6653aSJeff Kirsher if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5626a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 56277ac6653aSJeff Kirsher 5628f748be53SAlexandre TORGUE /* Disable tso if asked by ethtool */ 5629f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5630f748be53SAlexandre TORGUE if (features & NETIF_F_TSO) 5631f748be53SAlexandre TORGUE priv->tso = true; 5632f748be53SAlexandre TORGUE else 5633f748be53SAlexandre TORGUE priv->tso = false; 5634f748be53SAlexandre TORGUE } 5635f748be53SAlexandre TORGUE 56367ac6653aSJeff Kirsher return features; 56377ac6653aSJeff Kirsher } 56387ac6653aSJeff Kirsher 5639d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev, 5640d2afb5bdSGiuseppe CAVALLARO netdev_features_t features) 5641d2afb5bdSGiuseppe CAVALLARO { 5642d2afb5bdSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(netdev); 5643d2afb5bdSGiuseppe CAVALLARO 5644d2afb5bdSGiuseppe CAVALLARO /* Keep the COE Type in case of csum is supporting */ 5645d2afb5bdSGiuseppe CAVALLARO if (features & NETIF_F_RXCSUM) 5646d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 5647d2afb5bdSGiuseppe CAVALLARO else 5648d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 5649d2afb5bdSGiuseppe CAVALLARO /* No check needed because rx_coe has been set before and it will be 5650d2afb5bdSGiuseppe CAVALLARO * fixed in case of issue. 5651d2afb5bdSGiuseppe CAVALLARO */ 5652c10d4c82SJose Abreu stmmac_rx_ipc(priv, priv->hw); 5653d2afb5bdSGiuseppe CAVALLARO 5654f8e7dfd6SVincent Whitchurch if (priv->sph_cap) { 5655f8e7dfd6SVincent Whitchurch bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5656f8e7dfd6SVincent Whitchurch u32 chan; 56575fabb012SOng Boon Leong 565867afd6d1SJose Abreu for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 565967afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5660f8e7dfd6SVincent Whitchurch } 566167afd6d1SJose Abreu 5662d2afb5bdSGiuseppe CAVALLARO return 0; 5663d2afb5bdSGiuseppe CAVALLARO } 5664d2afb5bdSGiuseppe CAVALLARO 56655a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 56665a558611SOng Boon Leong { 56675a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 56685a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 56695a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 56705a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 56715a558611SOng Boon Leong 56725a558611SOng Boon Leong if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 56735a558611SOng Boon Leong return; 56745a558611SOng Boon Leong 56755a558611SOng Boon Leong /* If LP has sent verify mPacket, LP is FPE capable */ 56765a558611SOng Boon Leong if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 56775a558611SOng Boon Leong if (*lp_state < FPE_STATE_CAPABLE) 56785a558611SOng Boon Leong *lp_state = FPE_STATE_CAPABLE; 56795a558611SOng Boon Leong 56805a558611SOng Boon Leong /* If user has requested FPE enable, quickly response */ 56815a558611SOng Boon Leong if (*hs_enable) 56825a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 56835a558611SOng Boon Leong MPACKET_RESPONSE); 56845a558611SOng Boon Leong } 56855a558611SOng Boon Leong 56865a558611SOng Boon Leong /* If Local has sent verify mPacket, Local is FPE capable */ 56875a558611SOng Boon Leong if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 56885a558611SOng Boon Leong if (*lo_state < FPE_STATE_CAPABLE) 56895a558611SOng Boon Leong *lo_state = FPE_STATE_CAPABLE; 56905a558611SOng Boon Leong } 56915a558611SOng Boon Leong 56925a558611SOng Boon Leong /* If LP has sent response mPacket, LP is entering FPE ON */ 56935a558611SOng Boon Leong if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 56945a558611SOng Boon Leong *lp_state = FPE_STATE_ENTERING_ON; 56955a558611SOng Boon Leong 56965a558611SOng Boon Leong /* If Local has sent response mPacket, Local is entering FPE ON */ 56975a558611SOng Boon Leong if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 56985a558611SOng Boon Leong *lo_state = FPE_STATE_ENTERING_ON; 56995a558611SOng Boon Leong 57005a558611SOng Boon Leong if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 57015a558611SOng Boon Leong !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 57025a558611SOng Boon Leong priv->fpe_wq) { 57035a558611SOng Boon Leong queue_work(priv->fpe_wq, &priv->fpe_task); 57045a558611SOng Boon Leong } 57055a558611SOng Boon Leong } 57065a558611SOng Boon Leong 570729e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv) 57087ac6653aSJeff Kirsher { 57097bac4e1eSJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 57107bac4e1eSJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 57117bac4e1eSJoao Pinto u32 queues_count; 57127bac4e1eSJoao Pinto u32 queue; 57137d9e6c5aSJose Abreu bool xmac; 57147bac4e1eSJoao Pinto 57157d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 57167bac4e1eSJoao Pinto queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 57177ac6653aSJeff Kirsher 571889f7f2cfSSrinivas Kandagatla if (priv->irq_wake) 571989f7f2cfSSrinivas Kandagatla pm_wakeup_event(priv->device, 0); 572089f7f2cfSSrinivas Kandagatla 5721e49aa315SVoon Weifeng if (priv->dma_cap.estsel) 57229f298959SOng Boon Leong stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 57239f298959SOng Boon Leong &priv->xstats, tx_cnt); 5724e49aa315SVoon Weifeng 57255a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 57265a558611SOng Boon Leong int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 57275a558611SOng Boon Leong priv->dev); 57285a558611SOng Boon Leong 57295a558611SOng Boon Leong stmmac_fpe_event_status(priv, status); 57305a558611SOng Boon Leong } 57315a558611SOng Boon Leong 57327ac6653aSJeff Kirsher /* To handle GMAC own interrupts */ 57337d9e6c5aSJose Abreu if ((priv->plat->has_gmac) || xmac) { 5734c10d4c82SJose Abreu int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 57358f71a88dSJoao Pinto 5736d765955dSGiuseppe CAVALLARO if (unlikely(status)) { 5737d765955dSGiuseppe CAVALLARO /* For LPI we need to save the tx status */ 57380982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5739d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = true; 57400982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5741d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 57427bac4e1eSJoao Pinto } 57437bac4e1eSJoao Pinto 57447bac4e1eSJoao Pinto for (queue = 0; queue < queues_count; queue++) { 57458a7cb245SYannick Vignon status = stmmac_host_mtl_irq_status(priv, priv->hw, 57467bac4e1eSJoao Pinto queue); 57477bac4e1eSJoao Pinto } 574870523e63SGiuseppe CAVALLARO 574970523e63SGiuseppe CAVALLARO /* PCS link status */ 57503fe5cadbSGiuseppe CAVALLARO if (priv->hw->pcs) { 575170523e63SGiuseppe CAVALLARO if (priv->xstats.pcs_link) 575229e6573cSOng Boon Leong netif_carrier_on(priv->dev); 575370523e63SGiuseppe CAVALLARO else 575429e6573cSOng Boon Leong netif_carrier_off(priv->dev); 575570523e63SGiuseppe CAVALLARO } 5756f4da5652STan Tee Min 5757f4da5652STan Tee Min stmmac_timestamp_interrupt(priv, priv); 5758d765955dSGiuseppe CAVALLARO } 575929e6573cSOng Boon Leong } 576029e6573cSOng Boon Leong 576129e6573cSOng Boon Leong /** 576229e6573cSOng Boon Leong * stmmac_interrupt - main ISR 576329e6573cSOng Boon Leong * @irq: interrupt number. 576429e6573cSOng Boon Leong * @dev_id: to pass the net device pointer. 576529e6573cSOng Boon Leong * Description: this is the main driver interrupt service routine. 576629e6573cSOng Boon Leong * It can call: 576729e6573cSOng Boon Leong * o DMA service routine (to manage incoming frame reception and transmission 576829e6573cSOng Boon Leong * status) 576929e6573cSOng Boon Leong * o Core interrupts to manage: remote wake-up, management counter, LPI 577029e6573cSOng Boon Leong * interrupts. 577129e6573cSOng Boon Leong */ 577229e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 577329e6573cSOng Boon Leong { 577429e6573cSOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 577529e6573cSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 577629e6573cSOng Boon Leong 577729e6573cSOng Boon Leong /* Check if adapter is up */ 577829e6573cSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 577929e6573cSOng Boon Leong return IRQ_HANDLED; 578029e6573cSOng Boon Leong 578129e6573cSOng Boon Leong /* Check if a fatal error happened */ 578229e6573cSOng Boon Leong if (stmmac_safety_feat_interrupt(priv)) 578329e6573cSOng Boon Leong return IRQ_HANDLED; 578429e6573cSOng Boon Leong 578529e6573cSOng Boon Leong /* To handle Common interrupts */ 578629e6573cSOng Boon Leong stmmac_common_interrupt(priv); 5787d765955dSGiuseppe CAVALLARO 5788d765955dSGiuseppe CAVALLARO /* To handle DMA interrupts */ 57897ac6653aSJeff Kirsher stmmac_dma_interrupt(priv); 57907ac6653aSJeff Kirsher 57917ac6653aSJeff Kirsher return IRQ_HANDLED; 57927ac6653aSJeff Kirsher } 57937ac6653aSJeff Kirsher 57948532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 57958532f613SOng Boon Leong { 57968532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 57978532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 57988532f613SOng Boon Leong 57998532f613SOng Boon Leong if (unlikely(!dev)) { 58008532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58018532f613SOng Boon Leong return IRQ_NONE; 58028532f613SOng Boon Leong } 58038532f613SOng Boon Leong 58048532f613SOng Boon Leong /* Check if adapter is up */ 58058532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58068532f613SOng Boon Leong return IRQ_HANDLED; 58078532f613SOng Boon Leong 58088532f613SOng Boon Leong /* To handle Common interrupts */ 58098532f613SOng Boon Leong stmmac_common_interrupt(priv); 58108532f613SOng Boon Leong 58118532f613SOng Boon Leong return IRQ_HANDLED; 58128532f613SOng Boon Leong } 58138532f613SOng Boon Leong 58148532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 58158532f613SOng Boon Leong { 58168532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 58178532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 58188532f613SOng Boon Leong 58198532f613SOng Boon Leong if (unlikely(!dev)) { 58208532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58218532f613SOng Boon Leong return IRQ_NONE; 58228532f613SOng Boon Leong } 58238532f613SOng Boon Leong 58248532f613SOng Boon Leong /* Check if adapter is up */ 58258532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58268532f613SOng Boon Leong return IRQ_HANDLED; 58278532f613SOng Boon Leong 58288532f613SOng Boon Leong /* Check if a fatal error happened */ 58298532f613SOng Boon Leong stmmac_safety_feat_interrupt(priv); 58308532f613SOng Boon Leong 58318532f613SOng Boon Leong return IRQ_HANDLED; 58328532f613SOng Boon Leong } 58338532f613SOng Boon Leong 58348532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 58358532f613SOng Boon Leong { 58368532f613SOng Boon Leong struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 58378531c808SChristian Marangi struct stmmac_dma_conf *dma_conf; 58388532f613SOng Boon Leong int chan = tx_q->queue_index; 58398532f613SOng Boon Leong struct stmmac_priv *priv; 58408532f613SOng Boon Leong int status; 58418532f613SOng Boon Leong 58428531c808SChristian Marangi dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 58438531c808SChristian Marangi priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 58448532f613SOng Boon Leong 58458532f613SOng Boon Leong if (unlikely(!data)) { 58468532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58478532f613SOng Boon Leong return IRQ_NONE; 58488532f613SOng Boon Leong } 58498532f613SOng Boon Leong 58508532f613SOng Boon Leong /* Check if adapter is up */ 58518532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58528532f613SOng Boon Leong return IRQ_HANDLED; 58538532f613SOng Boon Leong 58548532f613SOng Boon Leong status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 58558532f613SOng Boon Leong 58568532f613SOng Boon Leong if (unlikely(status & tx_hard_error_bump_tc)) { 58578532f613SOng Boon Leong /* Try to bump up the dma threshold on this failure */ 58583a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, chan); 58598532f613SOng Boon Leong } else if (unlikely(status == tx_hard_error)) { 58608532f613SOng Boon Leong stmmac_tx_err(priv, chan); 58618532f613SOng Boon Leong } 58628532f613SOng Boon Leong 58638532f613SOng Boon Leong return IRQ_HANDLED; 58648532f613SOng Boon Leong } 58658532f613SOng Boon Leong 58668532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 58678532f613SOng Boon Leong { 58688532f613SOng Boon Leong struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 58698531c808SChristian Marangi struct stmmac_dma_conf *dma_conf; 58708532f613SOng Boon Leong int chan = rx_q->queue_index; 58718532f613SOng Boon Leong struct stmmac_priv *priv; 58728532f613SOng Boon Leong 58738531c808SChristian Marangi dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 58748531c808SChristian Marangi priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 58758532f613SOng Boon Leong 58768532f613SOng Boon Leong if (unlikely(!data)) { 58778532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58788532f613SOng Boon Leong return IRQ_NONE; 58798532f613SOng Boon Leong } 58808532f613SOng Boon Leong 58818532f613SOng Boon Leong /* Check if adapter is up */ 58828532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58838532f613SOng Boon Leong return IRQ_HANDLED; 58848532f613SOng Boon Leong 58858532f613SOng Boon Leong stmmac_napi_check(priv, chan, DMA_DIR_RX); 58868532f613SOng Boon Leong 58878532f613SOng Boon Leong return IRQ_HANDLED; 58888532f613SOng Boon Leong } 58898532f613SOng Boon Leong 58907ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 58917ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools 5892ceb69499SGiuseppe CAVALLARO * to allow network I/O with interrupts disabled. 5893ceb69499SGiuseppe CAVALLARO */ 58947ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev) 58957ac6653aSJeff Kirsher { 58968532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 58978532f613SOng Boon Leong int i; 58988532f613SOng Boon Leong 58998532f613SOng Boon Leong /* If adapter is down, do nothing */ 59008532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 59018532f613SOng Boon Leong return; 59028532f613SOng Boon Leong 59038532f613SOng Boon Leong if (priv->plat->multi_msi_en) { 59048532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) 59058531c808SChristian Marangi stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); 59068532f613SOng Boon Leong 59078532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) 59088531c808SChristian Marangi stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); 59098532f613SOng Boon Leong } else { 59107ac6653aSJeff Kirsher disable_irq(dev->irq); 59117ac6653aSJeff Kirsher stmmac_interrupt(dev->irq, dev); 59127ac6653aSJeff Kirsher enable_irq(dev->irq); 59137ac6653aSJeff Kirsher } 59148532f613SOng Boon Leong } 59157ac6653aSJeff Kirsher #endif 59167ac6653aSJeff Kirsher 59177ac6653aSJeff Kirsher /** 59187ac6653aSJeff Kirsher * stmmac_ioctl - Entry point for the Ioctl 59197ac6653aSJeff Kirsher * @dev: Device pointer. 59207ac6653aSJeff Kirsher * @rq: An IOCTL specefic structure, that can contain a pointer to 59217ac6653aSJeff Kirsher * a proprietary structure used to pass information to the driver. 59227ac6653aSJeff Kirsher * @cmd: IOCTL command 59237ac6653aSJeff Kirsher * Description: 592432ceabcaSGiuseppe CAVALLARO * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 59257ac6653aSJeff Kirsher */ 59267ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 59277ac6653aSJeff Kirsher { 592874371272SJose Abreu struct stmmac_priv *priv = netdev_priv (dev); 5929891434b1SRayagond Kokatanur int ret = -EOPNOTSUPP; 59307ac6653aSJeff Kirsher 59317ac6653aSJeff Kirsher if (!netif_running(dev)) 59327ac6653aSJeff Kirsher return -EINVAL; 59337ac6653aSJeff Kirsher 5934891434b1SRayagond Kokatanur switch (cmd) { 5935891434b1SRayagond Kokatanur case SIOCGMIIPHY: 5936891434b1SRayagond Kokatanur case SIOCGMIIREG: 5937891434b1SRayagond Kokatanur case SIOCSMIIREG: 593874371272SJose Abreu ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 5939891434b1SRayagond Kokatanur break; 5940891434b1SRayagond Kokatanur case SIOCSHWTSTAMP: 5941d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_set(dev, rq); 5942d6228b7cSArtem Panfilov break; 5943d6228b7cSArtem Panfilov case SIOCGHWTSTAMP: 5944d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_get(dev, rq); 5945891434b1SRayagond Kokatanur break; 5946891434b1SRayagond Kokatanur default: 5947891434b1SRayagond Kokatanur break; 5948891434b1SRayagond Kokatanur } 59497ac6653aSJeff Kirsher 59507ac6653aSJeff Kirsher return ret; 59517ac6653aSJeff Kirsher } 59527ac6653aSJeff Kirsher 59534dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 59544dbbe8ddSJose Abreu void *cb_priv) 59554dbbe8ddSJose Abreu { 59564dbbe8ddSJose Abreu struct stmmac_priv *priv = cb_priv; 59574dbbe8ddSJose Abreu int ret = -EOPNOTSUPP; 59584dbbe8ddSJose Abreu 5959425eabddSJose Abreu if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 5960425eabddSJose Abreu return ret; 5961425eabddSJose Abreu 5962bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 59634dbbe8ddSJose Abreu 59644dbbe8ddSJose Abreu switch (type) { 59654dbbe8ddSJose Abreu case TC_SETUP_CLSU32: 59664dbbe8ddSJose Abreu ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 59674dbbe8ddSJose Abreu break; 5968425eabddSJose Abreu case TC_SETUP_CLSFLOWER: 5969425eabddSJose Abreu ret = stmmac_tc_setup_cls(priv, priv, type_data); 5970425eabddSJose Abreu break; 59714dbbe8ddSJose Abreu default: 59724dbbe8ddSJose Abreu break; 59734dbbe8ddSJose Abreu } 59744dbbe8ddSJose Abreu 59754dbbe8ddSJose Abreu stmmac_enable_all_queues(priv); 59764dbbe8ddSJose Abreu return ret; 59774dbbe8ddSJose Abreu } 59784dbbe8ddSJose Abreu 5979955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list); 5980955bcb6eSPablo Neira Ayuso 59814dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 59824dbbe8ddSJose Abreu void *type_data) 59834dbbe8ddSJose Abreu { 59844dbbe8ddSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 59854dbbe8ddSJose Abreu 59864dbbe8ddSJose Abreu switch (type) { 59874dbbe8ddSJose Abreu case TC_SETUP_BLOCK: 5988955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data, 5989955bcb6eSPablo Neira Ayuso &stmmac_block_cb_list, 59904e95bc26SPablo Neira Ayuso stmmac_setup_tc_block_cb, 59914e95bc26SPablo Neira Ayuso priv, priv, true); 59921f705bc6SJose Abreu case TC_SETUP_QDISC_CBS: 59931f705bc6SJose Abreu return stmmac_tc_setup_cbs(priv, priv, type_data); 5994b60189e0SJose Abreu case TC_SETUP_QDISC_TAPRIO: 5995b60189e0SJose Abreu return stmmac_tc_setup_taprio(priv, priv, type_data); 5996430b383cSJose Abreu case TC_SETUP_QDISC_ETF: 5997430b383cSJose Abreu return stmmac_tc_setup_etf(priv, priv, type_data); 59984dbbe8ddSJose Abreu default: 59994dbbe8ddSJose Abreu return -EOPNOTSUPP; 60004dbbe8ddSJose Abreu } 60014dbbe8ddSJose Abreu } 60024dbbe8ddSJose Abreu 60034993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 60044993e5b3SJose Abreu struct net_device *sb_dev) 60054993e5b3SJose Abreu { 6006b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 6007b7766206SJose Abreu 6008b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 60094993e5b3SJose Abreu /* 6010b7766206SJose Abreu * There is no way to determine the number of TSO/USO 60114993e5b3SJose Abreu * capable Queues. Let's use always the Queue 0 6012b7766206SJose Abreu * because if TSO/USO is supported then at least this 60134993e5b3SJose Abreu * one will be capable. 60144993e5b3SJose Abreu */ 60154993e5b3SJose Abreu return 0; 60164993e5b3SJose Abreu } 60174993e5b3SJose Abreu 60184993e5b3SJose Abreu return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 60194993e5b3SJose Abreu } 60204993e5b3SJose Abreu 6021a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6022a830405eSBhadram Varka { 6023a830405eSBhadram Varka struct stmmac_priv *priv = netdev_priv(ndev); 6024a830405eSBhadram Varka int ret = 0; 6025a830405eSBhadram Varka 602685648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 602785648865SMinghao Chi if (ret < 0) 60284691ffb1SJoakim Zhang return ret; 60294691ffb1SJoakim Zhang 6030a830405eSBhadram Varka ret = eth_mac_addr(ndev, addr); 6031a830405eSBhadram Varka if (ret) 60324691ffb1SJoakim Zhang goto set_mac_error; 6033a830405eSBhadram Varka 6034c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6035a830405eSBhadram Varka 60364691ffb1SJoakim Zhang set_mac_error: 60374691ffb1SJoakim Zhang pm_runtime_put(priv->device); 60384691ffb1SJoakim Zhang 6039a830405eSBhadram Varka return ret; 6040a830405eSBhadram Varka } 6041a830405eSBhadram Varka 604250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 60437ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir; 60447ac29055SGiuseppe CAVALLARO 6045c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc, 6046bfaf91caSJoakim Zhang struct seq_file *seq, dma_addr_t dma_phy_addr) 60477ac29055SGiuseppe CAVALLARO { 60487ac29055SGiuseppe CAVALLARO int i; 6049c24602efSGiuseppe CAVALLARO struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6050c24602efSGiuseppe CAVALLARO struct dma_desc *p = (struct dma_desc *)head; 6051bfaf91caSJoakim Zhang dma_addr_t dma_addr; 60527ac29055SGiuseppe CAVALLARO 6053c24602efSGiuseppe CAVALLARO for (i = 0; i < size; i++) { 6054c24602efSGiuseppe CAVALLARO if (extend_desc) { 6055bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*ep); 6056bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6057bfaf91caSJoakim Zhang i, &dma_addr, 6058f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des0), 6059f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des1), 6060f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des2), 6061f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des3)); 6062c24602efSGiuseppe CAVALLARO ep++; 6063c24602efSGiuseppe CAVALLARO } else { 6064bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*p); 6065bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6066bfaf91caSJoakim Zhang i, &dma_addr, 6067f8be0d78SMichael Weiser le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6068f8be0d78SMichael Weiser le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6069c24602efSGiuseppe CAVALLARO p++; 6070c24602efSGiuseppe CAVALLARO } 60717ac29055SGiuseppe CAVALLARO seq_printf(seq, "\n"); 60727ac29055SGiuseppe CAVALLARO } 6073c24602efSGiuseppe CAVALLARO } 60747ac29055SGiuseppe CAVALLARO 6075fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6076c24602efSGiuseppe CAVALLARO { 6077c24602efSGiuseppe CAVALLARO struct net_device *dev = seq->private; 6078c24602efSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 607954139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 6080ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 608154139cf3SJoao Pinto u32 queue; 608254139cf3SJoao Pinto 60835f2b8b62SThierry Reding if ((dev->flags & IFF_UP) == 0) 60845f2b8b62SThierry Reding return 0; 60855f2b8b62SThierry Reding 608654139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 60878531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 608854139cf3SJoao Pinto 608954139cf3SJoao Pinto seq_printf(seq, "RX Queue %d:\n", queue); 60907ac29055SGiuseppe CAVALLARO 6091c24602efSGiuseppe CAVALLARO if (priv->extend_desc) { 609254139cf3SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 609354139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_erx, 60948531c808SChristian Marangi priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 609554139cf3SJoao Pinto } else { 609654139cf3SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 609754139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_rx, 60988531c808SChristian Marangi priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 609954139cf3SJoao Pinto } 610054139cf3SJoao Pinto } 610154139cf3SJoao Pinto 6102ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 61038531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6104ce736788SJoao Pinto 6105ce736788SJoao Pinto seq_printf(seq, "TX Queue %d:\n", queue); 6106ce736788SJoao Pinto 610754139cf3SJoao Pinto if (priv->extend_desc) { 6108ce736788SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 6109ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_etx, 61108531c808SChristian Marangi priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6111579a25a8SJose Abreu } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6112ce736788SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 6113ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_tx, 61148531c808SChristian Marangi priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6115ce736788SJoao Pinto } 61167ac29055SGiuseppe CAVALLARO } 61177ac29055SGiuseppe CAVALLARO 61187ac29055SGiuseppe CAVALLARO return 0; 61197ac29055SGiuseppe CAVALLARO } 6120fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 61217ac29055SGiuseppe CAVALLARO 6122fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6123e7434821SGiuseppe CAVALLARO { 6124e7434821SGiuseppe CAVALLARO struct net_device *dev = seq->private; 6125e7434821SGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 6126e7434821SGiuseppe CAVALLARO 612719e30c14SGiuseppe CAVALLARO if (!priv->hw_cap_support) { 6128e7434821SGiuseppe CAVALLARO seq_printf(seq, "DMA HW features not supported\n"); 6129e7434821SGiuseppe CAVALLARO return 0; 6130e7434821SGiuseppe CAVALLARO } 6131e7434821SGiuseppe CAVALLARO 6132e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6133e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tDMA HW features\n"); 6134e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6135e7434821SGiuseppe CAVALLARO 613622d3efe5SPavel Machek seq_printf(seq, "\t10/100 Mbps: %s\n", 6137e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 613822d3efe5SPavel Machek seq_printf(seq, "\t1000 Mbps: %s\n", 6139e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_1000) ? "Y" : "N"); 614022d3efe5SPavel Machek seq_printf(seq, "\tHalf duplex: %s\n", 6141e7434821SGiuseppe CAVALLARO (priv->dma_cap.half_duplex) ? "Y" : "N"); 6142e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tHash Filter: %s\n", 6143e7434821SGiuseppe CAVALLARO (priv->dma_cap.hash_filter) ? "Y" : "N"); 6144e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6145e7434821SGiuseppe CAVALLARO (priv->dma_cap.multi_addr) ? "Y" : "N"); 61468d45e42bSLABBE Corentin seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6147e7434821SGiuseppe CAVALLARO (priv->dma_cap.pcs) ? "Y" : "N"); 6148e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6149e7434821SGiuseppe CAVALLARO (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6150e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Remote wake up: %s\n", 6151e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6152e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Magic Frame: %s\n", 6153e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6154e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRMON module: %s\n", 6155e7434821SGiuseppe CAVALLARO (priv->dma_cap.rmon) ? "Y" : "N"); 6156e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6157e7434821SGiuseppe CAVALLARO (priv->dma_cap.time_stamp) ? "Y" : "N"); 6158e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6159e7434821SGiuseppe CAVALLARO (priv->dma_cap.atime_stamp) ? "Y" : "N"); 616022d3efe5SPavel Machek seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6161e7434821SGiuseppe CAVALLARO (priv->dma_cap.eee) ? "Y" : "N"); 6162e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6163e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6164e7434821SGiuseppe CAVALLARO (priv->dma_cap.tx_coe) ? "Y" : "N"); 6165f748be53SAlexandre TORGUE if (priv->synopsys_id >= DWMAC_CORE_4_00) { 6166f748be53SAlexandre TORGUE seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6167f748be53SAlexandre TORGUE (priv->dma_cap.rx_coe) ? "Y" : "N"); 6168f748be53SAlexandre TORGUE } else { 6169e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6170e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6171e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6172e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6173f748be53SAlexandre TORGUE } 6174e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6175e7434821SGiuseppe CAVALLARO (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6176e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6177e7434821SGiuseppe CAVALLARO priv->dma_cap.number_rx_channel); 6178e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6179e7434821SGiuseppe CAVALLARO priv->dma_cap.number_tx_channel); 61807d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 61817d0b447aSJose Abreu priv->dma_cap.number_rx_queues); 61827d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 61837d0b447aSJose Abreu priv->dma_cap.number_tx_queues); 6184e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tEnhanced descriptors: %s\n", 6185e7434821SGiuseppe CAVALLARO (priv->dma_cap.enh_desc) ? "Y" : "N"); 61867d0b447aSJose Abreu seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 61877d0b447aSJose Abreu seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 61887d0b447aSJose Abreu seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 61897d0b447aSJose Abreu seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 61907d0b447aSJose Abreu seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 61917d0b447aSJose Abreu priv->dma_cap.pps_out_num); 61927d0b447aSJose Abreu seq_printf(seq, "\tSafety Features: %s\n", 61937d0b447aSJose Abreu priv->dma_cap.asp ? "Y" : "N"); 61947d0b447aSJose Abreu seq_printf(seq, "\tFlexible RX Parser: %s\n", 61957d0b447aSJose Abreu priv->dma_cap.frpsel ? "Y" : "N"); 61967d0b447aSJose Abreu seq_printf(seq, "\tEnhanced Addressing: %d\n", 61977d0b447aSJose Abreu priv->dma_cap.addr64); 61987d0b447aSJose Abreu seq_printf(seq, "\tReceive Side Scaling: %s\n", 61997d0b447aSJose Abreu priv->dma_cap.rssen ? "Y" : "N"); 62007d0b447aSJose Abreu seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 62017d0b447aSJose Abreu priv->dma_cap.vlhash ? "Y" : "N"); 62027d0b447aSJose Abreu seq_printf(seq, "\tSplit Header: %s\n", 62037d0b447aSJose Abreu priv->dma_cap.sphen ? "Y" : "N"); 62047d0b447aSJose Abreu seq_printf(seq, "\tVLAN TX Insertion: %s\n", 62057d0b447aSJose Abreu priv->dma_cap.vlins ? "Y" : "N"); 62067d0b447aSJose Abreu seq_printf(seq, "\tDouble VLAN: %s\n", 62077d0b447aSJose Abreu priv->dma_cap.dvlan ? "Y" : "N"); 62087d0b447aSJose Abreu seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 62097d0b447aSJose Abreu priv->dma_cap.l3l4fnum); 62107d0b447aSJose Abreu seq_printf(seq, "\tARP Offloading: %s\n", 62117d0b447aSJose Abreu priv->dma_cap.arpoffsel ? "Y" : "N"); 621244e65475SJose Abreu seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 621344e65475SJose Abreu priv->dma_cap.estsel ? "Y" : "N"); 621444e65475SJose Abreu seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 621544e65475SJose Abreu priv->dma_cap.fpesel ? "Y" : "N"); 621644e65475SJose Abreu seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 621744e65475SJose Abreu priv->dma_cap.tbssel ? "Y" : "N"); 6218e7434821SGiuseppe CAVALLARO return 0; 6219e7434821SGiuseppe CAVALLARO } 6220fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6221e7434821SGiuseppe CAVALLARO 6222481a7d15SJiping Ma /* Use network device events to rename debugfs file entries. 6223481a7d15SJiping Ma */ 6224481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused, 6225481a7d15SJiping Ma unsigned long event, void *ptr) 6226481a7d15SJiping Ma { 6227481a7d15SJiping Ma struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6228481a7d15SJiping Ma struct stmmac_priv *priv = netdev_priv(dev); 6229481a7d15SJiping Ma 6230481a7d15SJiping Ma if (dev->netdev_ops != &stmmac_netdev_ops) 6231481a7d15SJiping Ma goto done; 6232481a7d15SJiping Ma 6233481a7d15SJiping Ma switch (event) { 6234481a7d15SJiping Ma case NETDEV_CHANGENAME: 6235481a7d15SJiping Ma if (priv->dbgfs_dir) 6236481a7d15SJiping Ma priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6237481a7d15SJiping Ma priv->dbgfs_dir, 6238481a7d15SJiping Ma stmmac_fs_dir, 6239481a7d15SJiping Ma dev->name); 6240481a7d15SJiping Ma break; 6241481a7d15SJiping Ma } 6242481a7d15SJiping Ma done: 6243481a7d15SJiping Ma return NOTIFY_DONE; 6244481a7d15SJiping Ma } 6245481a7d15SJiping Ma 6246481a7d15SJiping Ma static struct notifier_block stmmac_notifier = { 6247481a7d15SJiping Ma .notifier_call = stmmac_device_event, 6248481a7d15SJiping Ma }; 6249481a7d15SJiping Ma 62508d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev) 62517ac29055SGiuseppe CAVALLARO { 6252466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 62537ac29055SGiuseppe CAVALLARO 6254474a31e1SAaro Koskinen rtnl_lock(); 6255474a31e1SAaro Koskinen 6256466c5ac8SMathieu Olivari /* Create per netdev entries */ 6257466c5ac8SMathieu Olivari priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6258466c5ac8SMathieu Olivari 62597ac29055SGiuseppe CAVALLARO /* Entry to report DMA RX/TX rings */ 62608d72ab11SGreg Kroah-Hartman debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 62617ac29055SGiuseppe CAVALLARO &stmmac_rings_status_fops); 62627ac29055SGiuseppe CAVALLARO 6263e7434821SGiuseppe CAVALLARO /* Entry to report the DMA HW features */ 62648d72ab11SGreg Kroah-Hartman debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 62658d72ab11SGreg Kroah-Hartman &stmmac_dma_cap_fops); 6266481a7d15SJiping Ma 6267474a31e1SAaro Koskinen rtnl_unlock(); 62687ac29055SGiuseppe CAVALLARO } 62697ac29055SGiuseppe CAVALLARO 6270466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev) 62717ac29055SGiuseppe CAVALLARO { 6272466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 6273466c5ac8SMathieu Olivari 6274466c5ac8SMathieu Olivari debugfs_remove_recursive(priv->dbgfs_dir); 62757ac29055SGiuseppe CAVALLARO } 627650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 62777ac29055SGiuseppe CAVALLARO 62783cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le) 62793cd1cfcbSJose Abreu { 62803cd1cfcbSJose Abreu unsigned char *data = (unsigned char *)&vid_le; 62813cd1cfcbSJose Abreu unsigned char data_byte = 0; 62823cd1cfcbSJose Abreu u32 crc = ~0x0; 62833cd1cfcbSJose Abreu u32 temp = 0; 62843cd1cfcbSJose Abreu int i, bits; 62853cd1cfcbSJose Abreu 62863cd1cfcbSJose Abreu bits = get_bitmask_order(VLAN_VID_MASK); 62873cd1cfcbSJose Abreu for (i = 0; i < bits; i++) { 62883cd1cfcbSJose Abreu if ((i % 8) == 0) 62893cd1cfcbSJose Abreu data_byte = data[i / 8]; 62903cd1cfcbSJose Abreu 62913cd1cfcbSJose Abreu temp = ((crc & 1) ^ data_byte) & 1; 62923cd1cfcbSJose Abreu crc >>= 1; 62933cd1cfcbSJose Abreu data_byte >>= 1; 62943cd1cfcbSJose Abreu 62953cd1cfcbSJose Abreu if (temp) 62963cd1cfcbSJose Abreu crc ^= 0xedb88320; 62973cd1cfcbSJose Abreu } 62983cd1cfcbSJose Abreu 62993cd1cfcbSJose Abreu return crc; 63003cd1cfcbSJose Abreu } 63013cd1cfcbSJose Abreu 63023cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 63033cd1cfcbSJose Abreu { 63043cd1cfcbSJose Abreu u32 crc, hash = 0; 6305a24cae70SJose Abreu __le16 pmatch = 0; 6306c7ab0b80SJose Abreu int count = 0; 6307c7ab0b80SJose Abreu u16 vid = 0; 63083cd1cfcbSJose Abreu 63093cd1cfcbSJose Abreu for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 63103cd1cfcbSJose Abreu __le16 vid_le = cpu_to_le16(vid); 63113cd1cfcbSJose Abreu crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 63123cd1cfcbSJose Abreu hash |= (1 << crc); 6313c7ab0b80SJose Abreu count++; 63143cd1cfcbSJose Abreu } 63153cd1cfcbSJose Abreu 6316c7ab0b80SJose Abreu if (!priv->dma_cap.vlhash) { 6317c7ab0b80SJose Abreu if (count > 2) /* VID = 0 always passes filter */ 6318c7ab0b80SJose Abreu return -EOPNOTSUPP; 6319c7ab0b80SJose Abreu 6320a24cae70SJose Abreu pmatch = cpu_to_le16(vid); 6321c7ab0b80SJose Abreu hash = 0; 6322c7ab0b80SJose Abreu } 6323c7ab0b80SJose Abreu 6324a24cae70SJose Abreu return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 63253cd1cfcbSJose Abreu } 63263cd1cfcbSJose Abreu 63273cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 63283cd1cfcbSJose Abreu { 63293cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 63303cd1cfcbSJose Abreu bool is_double = false; 63313cd1cfcbSJose Abreu int ret; 63323cd1cfcbSJose Abreu 63333cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 63343cd1cfcbSJose Abreu is_double = true; 63353cd1cfcbSJose Abreu 63363cd1cfcbSJose Abreu set_bit(vid, priv->active_vlans); 63373cd1cfcbSJose Abreu ret = stmmac_vlan_update(priv, is_double); 63383cd1cfcbSJose Abreu if (ret) { 63393cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 63403cd1cfcbSJose Abreu return ret; 63413cd1cfcbSJose Abreu } 63423cd1cfcbSJose Abreu 6343dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6344ed64639bSWong Vee Khee ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6345dd6a4998SJose Abreu if (ret) 63463cd1cfcbSJose Abreu return ret; 63473cd1cfcbSJose Abreu } 63483cd1cfcbSJose Abreu 6349dd6a4998SJose Abreu return 0; 6350dd6a4998SJose Abreu } 6351dd6a4998SJose Abreu 63523cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 63533cd1cfcbSJose Abreu { 63543cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 63553cd1cfcbSJose Abreu bool is_double = false; 6356ed64639bSWong Vee Khee int ret; 63573cd1cfcbSJose Abreu 635885648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 635985648865SMinghao Chi if (ret < 0) 6360b3dcb312SJoakim Zhang return ret; 6361b3dcb312SJoakim Zhang 63623cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 63633cd1cfcbSJose Abreu is_double = true; 63643cd1cfcbSJose Abreu 63653cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 6366dd6a4998SJose Abreu 6367dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6368ed64639bSWong Vee Khee ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6369ed64639bSWong Vee Khee if (ret) 63705ec55823SJoakim Zhang goto del_vlan_error; 6371dd6a4998SJose Abreu } 6372ed64639bSWong Vee Khee 63735ec55823SJoakim Zhang ret = stmmac_vlan_update(priv, is_double); 63745ec55823SJoakim Zhang 63755ec55823SJoakim Zhang del_vlan_error: 63765ec55823SJoakim Zhang pm_runtime_put(priv->device); 63775ec55823SJoakim Zhang 63785ec55823SJoakim Zhang return ret; 63793cd1cfcbSJose Abreu } 63803cd1cfcbSJose Abreu 63815fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 63825fabb012SOng Boon Leong { 63835fabb012SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 63845fabb012SOng Boon Leong 63855fabb012SOng Boon Leong switch (bpf->command) { 63865fabb012SOng Boon Leong case XDP_SETUP_PROG: 63875fabb012SOng Boon Leong return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6388bba2556eSOng Boon Leong case XDP_SETUP_XSK_POOL: 6389bba2556eSOng Boon Leong return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6390bba2556eSOng Boon Leong bpf->xsk.queue_id); 63915fabb012SOng Boon Leong default: 63925fabb012SOng Boon Leong return -EOPNOTSUPP; 63935fabb012SOng Boon Leong } 63945fabb012SOng Boon Leong } 63955fabb012SOng Boon Leong 63968b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 63978b278a5bSOng Boon Leong struct xdp_frame **frames, u32 flags) 63988b278a5bSOng Boon Leong { 63998b278a5bSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 64008b278a5bSOng Boon Leong int cpu = smp_processor_id(); 64018b278a5bSOng Boon Leong struct netdev_queue *nq; 64028b278a5bSOng Boon Leong int i, nxmit = 0; 64038b278a5bSOng Boon Leong int queue; 64048b278a5bSOng Boon Leong 64058b278a5bSOng Boon Leong if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 64068b278a5bSOng Boon Leong return -ENETDOWN; 64078b278a5bSOng Boon Leong 64088b278a5bSOng Boon Leong if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 64098b278a5bSOng Boon Leong return -EINVAL; 64108b278a5bSOng Boon Leong 64118b278a5bSOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 64128b278a5bSOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 64138b278a5bSOng Boon Leong 64148b278a5bSOng Boon Leong __netif_tx_lock(nq, cpu); 64158b278a5bSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 64165337824fSEric Dumazet txq_trans_cond_update(nq); 64178b278a5bSOng Boon Leong 64188b278a5bSOng Boon Leong for (i = 0; i < num_frames; i++) { 64198b278a5bSOng Boon Leong int res; 64208b278a5bSOng Boon Leong 64218b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 64228b278a5bSOng Boon Leong if (res == STMMAC_XDP_CONSUMED) 64238b278a5bSOng Boon Leong break; 64248b278a5bSOng Boon Leong 64258b278a5bSOng Boon Leong nxmit++; 64268b278a5bSOng Boon Leong } 64278b278a5bSOng Boon Leong 64288b278a5bSOng Boon Leong if (flags & XDP_XMIT_FLUSH) { 64298b278a5bSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 64308b278a5bSOng Boon Leong stmmac_tx_timer_arm(priv, queue); 64318b278a5bSOng Boon Leong } 64328b278a5bSOng Boon Leong 64338b278a5bSOng Boon Leong __netif_tx_unlock(nq); 64348b278a5bSOng Boon Leong 64358b278a5bSOng Boon Leong return nxmit; 64368b278a5bSOng Boon Leong } 64378b278a5bSOng Boon Leong 6438bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6439bba2556eSOng Boon Leong { 6440bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6441bba2556eSOng Boon Leong unsigned long flags; 6442bba2556eSOng Boon Leong 6443bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6444bba2556eSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6445bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6446bba2556eSOng Boon Leong 6447bba2556eSOng Boon Leong stmmac_stop_rx_dma(priv, queue); 6448ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6449bba2556eSOng Boon Leong } 6450bba2556eSOng Boon Leong 6451bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6452bba2556eSOng Boon Leong { 64538531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6454bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6455bba2556eSOng Boon Leong unsigned long flags; 6456bba2556eSOng Boon Leong u32 buf_size; 6457bba2556eSOng Boon Leong int ret; 6458bba2556eSOng Boon Leong 6459ba39b344SChristian Marangi ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6460bba2556eSOng Boon Leong if (ret) { 6461bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6462bba2556eSOng Boon Leong return; 6463bba2556eSOng Boon Leong } 6464bba2556eSOng Boon Leong 6465ba39b344SChristian Marangi ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6466bba2556eSOng Boon Leong if (ret) { 6467ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6468bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to init RX desc.\n"); 6469bba2556eSOng Boon Leong return; 6470bba2556eSOng Boon Leong } 6471bba2556eSOng Boon Leong 6472f9ec5723SChristian Marangi stmmac_reset_rx_queue(priv, queue); 6473ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6474bba2556eSOng Boon Leong 6475bba2556eSOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6476bba2556eSOng Boon Leong rx_q->dma_rx_phy, rx_q->queue_index); 6477bba2556eSOng Boon Leong 6478bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6479bba2556eSOng Boon Leong sizeof(struct dma_desc)); 6480bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6481bba2556eSOng Boon Leong rx_q->rx_tail_addr, rx_q->queue_index); 6482bba2556eSOng Boon Leong 6483bba2556eSOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6484bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6485bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6486bba2556eSOng Boon Leong buf_size, 6487bba2556eSOng Boon Leong rx_q->queue_index); 6488bba2556eSOng Boon Leong } else { 6489bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 64908531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 6491bba2556eSOng Boon Leong rx_q->queue_index); 6492bba2556eSOng Boon Leong } 6493bba2556eSOng Boon Leong 6494bba2556eSOng Boon Leong stmmac_start_rx_dma(priv, queue); 6495bba2556eSOng Boon Leong 6496bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6497bba2556eSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6498bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6499bba2556eSOng Boon Leong } 6500bba2556eSOng Boon Leong 6501132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6502132c32eeSOng Boon Leong { 6503132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6504132c32eeSOng Boon Leong unsigned long flags; 6505132c32eeSOng Boon Leong 6506132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6507132c32eeSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6508132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6509132c32eeSOng Boon Leong 6510132c32eeSOng Boon Leong stmmac_stop_tx_dma(priv, queue); 6511ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6512132c32eeSOng Boon Leong } 6513132c32eeSOng Boon Leong 6514132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6515132c32eeSOng Boon Leong { 65168531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6517132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6518132c32eeSOng Boon Leong unsigned long flags; 6519132c32eeSOng Boon Leong int ret; 6520132c32eeSOng Boon Leong 6521ba39b344SChristian Marangi ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6522132c32eeSOng Boon Leong if (ret) { 6523132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6524132c32eeSOng Boon Leong return; 6525132c32eeSOng Boon Leong } 6526132c32eeSOng Boon Leong 6527ba39b344SChristian Marangi ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6528132c32eeSOng Boon Leong if (ret) { 6529ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6530132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to init TX desc.\n"); 6531132c32eeSOng Boon Leong return; 6532132c32eeSOng Boon Leong } 6533132c32eeSOng Boon Leong 6534f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, queue); 6535ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6536132c32eeSOng Boon Leong 6537132c32eeSOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6538132c32eeSOng Boon Leong tx_q->dma_tx_phy, tx_q->queue_index); 6539132c32eeSOng Boon Leong 6540132c32eeSOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 6541132c32eeSOng Boon Leong stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6542132c32eeSOng Boon Leong 6543132c32eeSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6544132c32eeSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6545132c32eeSOng Boon Leong tx_q->tx_tail_addr, tx_q->queue_index); 6546132c32eeSOng Boon Leong 6547132c32eeSOng Boon Leong stmmac_start_tx_dma(priv, queue); 6548132c32eeSOng Boon Leong 6549132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6550132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6551132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6552132c32eeSOng Boon Leong } 6553132c32eeSOng Boon Leong 6554ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev) 6555ac746c85SOng Boon Leong { 6556ac746c85SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6557ac746c85SOng Boon Leong u32 chan; 6558ac746c85SOng Boon Leong 655977711683SMohd Faizal Abdul Rahim /* Ensure tx function is not running */ 656077711683SMohd Faizal Abdul Rahim netif_tx_disable(dev); 656177711683SMohd Faizal Abdul Rahim 6562ac746c85SOng Boon Leong /* Disable NAPI process */ 6563ac746c85SOng Boon Leong stmmac_disable_all_queues(priv); 6564ac746c85SOng Boon Leong 6565ac746c85SOng Boon Leong for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 65668531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6567ac746c85SOng Boon Leong 6568ac746c85SOng Boon Leong /* Free the IRQ lines */ 6569ac746c85SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6570ac746c85SOng Boon Leong 6571ac746c85SOng Boon Leong /* Stop TX/RX DMA channels */ 6572ac746c85SOng Boon Leong stmmac_stop_all_dma(priv); 6573ac746c85SOng Boon Leong 6574ac746c85SOng Boon Leong /* Release and free the Rx/Tx resources */ 6575ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 6576ac746c85SOng Boon Leong 6577ac746c85SOng Boon Leong /* Disable the MAC Rx/Tx */ 6578ac746c85SOng Boon Leong stmmac_mac_set(priv, priv->ioaddr, false); 6579ac746c85SOng Boon Leong 6580ac746c85SOng Boon Leong /* set trans_start so we don't get spurious 6581ac746c85SOng Boon Leong * watchdogs during reset 6582ac746c85SOng Boon Leong */ 6583ac746c85SOng Boon Leong netif_trans_update(dev); 6584ac746c85SOng Boon Leong netif_carrier_off(dev); 6585ac746c85SOng Boon Leong } 6586ac746c85SOng Boon Leong 6587ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev) 6588ac746c85SOng Boon Leong { 6589ac746c85SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6590ac746c85SOng Boon Leong u32 rx_cnt = priv->plat->rx_queues_to_use; 6591ac746c85SOng Boon Leong u32 tx_cnt = priv->plat->tx_queues_to_use; 6592ac746c85SOng Boon Leong u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6593ac746c85SOng Boon Leong struct stmmac_rx_queue *rx_q; 6594ac746c85SOng Boon Leong struct stmmac_tx_queue *tx_q; 6595ac746c85SOng Boon Leong u32 buf_size; 6596ac746c85SOng Boon Leong bool sph_en; 6597ac746c85SOng Boon Leong u32 chan; 6598ac746c85SOng Boon Leong int ret; 6599ac746c85SOng Boon Leong 6600ba39b344SChristian Marangi ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6601ac746c85SOng Boon Leong if (ret < 0) { 6602ac746c85SOng Boon Leong netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6603ac746c85SOng Boon Leong __func__); 6604ac746c85SOng Boon Leong goto dma_desc_error; 6605ac746c85SOng Boon Leong } 6606ac746c85SOng Boon Leong 6607ba39b344SChristian Marangi ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6608ac746c85SOng Boon Leong if (ret < 0) { 6609ac746c85SOng Boon Leong netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6610ac746c85SOng Boon Leong __func__); 6611ac746c85SOng Boon Leong goto init_error; 6612ac746c85SOng Boon Leong } 6613ac746c85SOng Boon Leong 6614ac746c85SOng Boon Leong /* DMA CSR Channel configuration */ 6615087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 6616ac746c85SOng Boon Leong stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6617087a7b94SVincent Whitchurch stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6618087a7b94SVincent Whitchurch } 6619ac746c85SOng Boon Leong 6620ac746c85SOng Boon Leong /* Adjust Split header */ 6621ac746c85SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6622ac746c85SOng Boon Leong 6623ac746c85SOng Boon Leong /* DMA RX Channel Configuration */ 6624ac746c85SOng Boon Leong for (chan = 0; chan < rx_cnt; chan++) { 66258531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[chan]; 6626ac746c85SOng Boon Leong 6627ac746c85SOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6628ac746c85SOng Boon Leong rx_q->dma_rx_phy, chan); 6629ac746c85SOng Boon Leong 6630ac746c85SOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6631ac746c85SOng Boon Leong (rx_q->buf_alloc_num * 6632ac746c85SOng Boon Leong sizeof(struct dma_desc)); 6633ac746c85SOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6634ac746c85SOng Boon Leong rx_q->rx_tail_addr, chan); 6635ac746c85SOng Boon Leong 6636ac746c85SOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6637ac746c85SOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6638ac746c85SOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6639ac746c85SOng Boon Leong buf_size, 6640ac746c85SOng Boon Leong rx_q->queue_index); 6641ac746c85SOng Boon Leong } else { 6642ac746c85SOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 66438531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 6644ac746c85SOng Boon Leong rx_q->queue_index); 6645ac746c85SOng Boon Leong } 6646ac746c85SOng Boon Leong 6647ac746c85SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6648ac746c85SOng Boon Leong } 6649ac746c85SOng Boon Leong 6650ac746c85SOng Boon Leong /* DMA TX Channel Configuration */ 6651ac746c85SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 66528531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[chan]; 6653ac746c85SOng Boon Leong 6654ac746c85SOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6655ac746c85SOng Boon Leong tx_q->dma_tx_phy, chan); 6656ac746c85SOng Boon Leong 6657ac746c85SOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6658ac746c85SOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6659ac746c85SOng Boon Leong tx_q->tx_tail_addr, chan); 666061da6ac7SOng Boon Leong 666161da6ac7SOng Boon Leong hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 666261da6ac7SOng Boon Leong tx_q->txtimer.function = stmmac_tx_timer; 6663ac746c85SOng Boon Leong } 6664ac746c85SOng Boon Leong 6665ac746c85SOng Boon Leong /* Enable the MAC Rx/Tx */ 6666ac746c85SOng Boon Leong stmmac_mac_set(priv, priv->ioaddr, true); 6667ac746c85SOng Boon Leong 6668ac746c85SOng Boon Leong /* Start Rx & Tx DMA Channels */ 6669ac746c85SOng Boon Leong stmmac_start_all_dma(priv); 6670ac746c85SOng Boon Leong 6671ac746c85SOng Boon Leong ret = stmmac_request_irq(dev); 6672ac746c85SOng Boon Leong if (ret) 6673ac746c85SOng Boon Leong goto irq_error; 6674ac746c85SOng Boon Leong 6675ac746c85SOng Boon Leong /* Enable NAPI process*/ 6676ac746c85SOng Boon Leong stmmac_enable_all_queues(priv); 6677ac746c85SOng Boon Leong netif_carrier_on(dev); 6678ac746c85SOng Boon Leong netif_tx_start_all_queues(dev); 6679087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 6680ac746c85SOng Boon Leong 6681ac746c85SOng Boon Leong return 0; 6682ac746c85SOng Boon Leong 6683ac746c85SOng Boon Leong irq_error: 6684ac746c85SOng Boon Leong for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 66858531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6686ac746c85SOng Boon Leong 6687ac746c85SOng Boon Leong stmmac_hw_teardown(dev); 6688ac746c85SOng Boon Leong init_error: 6689ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 6690ac746c85SOng Boon Leong dma_desc_error: 6691ac746c85SOng Boon Leong return ret; 6692ac746c85SOng Boon Leong } 6693ac746c85SOng Boon Leong 6694bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6695bba2556eSOng Boon Leong { 6696bba2556eSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6697bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 6698132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q; 6699bba2556eSOng Boon Leong struct stmmac_channel *ch; 6700bba2556eSOng Boon Leong 6701bba2556eSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state) || 6702bba2556eSOng Boon Leong !netif_carrier_ok(priv->dev)) 6703bba2556eSOng Boon Leong return -ENETDOWN; 6704bba2556eSOng Boon Leong 6705bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv)) 6706a817ead4SMaciej Fijalkowski return -EINVAL; 6707bba2556eSOng Boon Leong 6708132c32eeSOng Boon Leong if (queue >= priv->plat->rx_queues_to_use || 6709132c32eeSOng Boon Leong queue >= priv->plat->tx_queues_to_use) 6710bba2556eSOng Boon Leong return -EINVAL; 6711bba2556eSOng Boon Leong 67128531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[queue]; 67138531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 6714bba2556eSOng Boon Leong ch = &priv->channel[queue]; 6715bba2556eSOng Boon Leong 6716132c32eeSOng Boon Leong if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6717a817ead4SMaciej Fijalkowski return -EINVAL; 6718bba2556eSOng Boon Leong 6719132c32eeSOng Boon Leong if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6720bba2556eSOng Boon Leong /* EQoS does not have per-DMA channel SW interrupt, 6721bba2556eSOng Boon Leong * so we schedule RX Napi straight-away. 6722bba2556eSOng Boon Leong */ 6723132c32eeSOng Boon Leong if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6724132c32eeSOng Boon Leong __napi_schedule(&ch->rxtx_napi); 6725bba2556eSOng Boon Leong } 6726bba2556eSOng Boon Leong 6727bba2556eSOng Boon Leong return 0; 6728bba2556eSOng Boon Leong } 6729bba2556eSOng Boon Leong 67307ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = { 67317ac6653aSJeff Kirsher .ndo_open = stmmac_open, 67327ac6653aSJeff Kirsher .ndo_start_xmit = stmmac_xmit, 67337ac6653aSJeff Kirsher .ndo_stop = stmmac_release, 67347ac6653aSJeff Kirsher .ndo_change_mtu = stmmac_change_mtu, 67357ac6653aSJeff Kirsher .ndo_fix_features = stmmac_fix_features, 6736d2afb5bdSGiuseppe CAVALLARO .ndo_set_features = stmmac_set_features, 673701789349SJiri Pirko .ndo_set_rx_mode = stmmac_set_rx_mode, 67387ac6653aSJeff Kirsher .ndo_tx_timeout = stmmac_tx_timeout, 6739a7605370SArnd Bergmann .ndo_eth_ioctl = stmmac_ioctl, 67404dbbe8ddSJose Abreu .ndo_setup_tc = stmmac_setup_tc, 67414993e5b3SJose Abreu .ndo_select_queue = stmmac_select_queue, 67427ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 67437ac6653aSJeff Kirsher .ndo_poll_controller = stmmac_poll_controller, 67447ac6653aSJeff Kirsher #endif 6745a830405eSBhadram Varka .ndo_set_mac_address = stmmac_set_mac_address, 67463cd1cfcbSJose Abreu .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 67473cd1cfcbSJose Abreu .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 67485fabb012SOng Boon Leong .ndo_bpf = stmmac_bpf, 67498b278a5bSOng Boon Leong .ndo_xdp_xmit = stmmac_xdp_xmit, 6750bba2556eSOng Boon Leong .ndo_xsk_wakeup = stmmac_xsk_wakeup, 67517ac6653aSJeff Kirsher }; 67527ac6653aSJeff Kirsher 675334877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv) 675434877a15SJose Abreu { 675534877a15SJose Abreu if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 675634877a15SJose Abreu return; 675734877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 675834877a15SJose Abreu return; 675934877a15SJose Abreu 676034877a15SJose Abreu netdev_err(priv->dev, "Reset adapter.\n"); 676134877a15SJose Abreu 676234877a15SJose Abreu rtnl_lock(); 676334877a15SJose Abreu netif_trans_update(priv->dev); 676434877a15SJose Abreu while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 676534877a15SJose Abreu usleep_range(1000, 2000); 676634877a15SJose Abreu 676734877a15SJose Abreu set_bit(STMMAC_DOWN, &priv->state); 676834877a15SJose Abreu dev_close(priv->dev); 676900f54e68SPetr Machata dev_open(priv->dev, NULL); 677034877a15SJose Abreu clear_bit(STMMAC_DOWN, &priv->state); 677134877a15SJose Abreu clear_bit(STMMAC_RESETING, &priv->state); 677234877a15SJose Abreu rtnl_unlock(); 677334877a15SJose Abreu } 677434877a15SJose Abreu 677534877a15SJose Abreu static void stmmac_service_task(struct work_struct *work) 677634877a15SJose Abreu { 677734877a15SJose Abreu struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 677834877a15SJose Abreu service_task); 677934877a15SJose Abreu 678034877a15SJose Abreu stmmac_reset_subtask(priv); 678134877a15SJose Abreu clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 678234877a15SJose Abreu } 678334877a15SJose Abreu 67847ac6653aSJeff Kirsher /** 6785cf3f047bSGiuseppe CAVALLARO * stmmac_hw_init - Init the MAC device 678632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 6787732fdf0eSGiuseppe CAVALLARO * Description: this function is to configure the MAC device according to 6788732fdf0eSGiuseppe CAVALLARO * some platform parameters or the HW capability register. It prepares the 6789732fdf0eSGiuseppe CAVALLARO * driver to use either ring or chain modes and to setup either enhanced or 6790732fdf0eSGiuseppe CAVALLARO * normal descriptors. 6791cf3f047bSGiuseppe CAVALLARO */ 6792cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv) 6793cf3f047bSGiuseppe CAVALLARO { 67945f0456b4SJose Abreu int ret; 6795cf3f047bSGiuseppe CAVALLARO 67969f93ac8dSLABBE Corentin /* dwmac-sun8i only work in chain mode */ 67979f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) 67989f93ac8dSLABBE Corentin chain_mode = 1; 67995f0456b4SJose Abreu priv->chain_mode = chain_mode; 68009f93ac8dSLABBE Corentin 68015f0456b4SJose Abreu /* Initialize HW Interface */ 68025f0456b4SJose Abreu ret = stmmac_hwif_init(priv); 68035f0456b4SJose Abreu if (ret) 68045f0456b4SJose Abreu return ret; 68054a7d666aSGiuseppe CAVALLARO 6806cf3f047bSGiuseppe CAVALLARO /* Get the HW capability (new GMAC newer than 3.50a) */ 6807cf3f047bSGiuseppe CAVALLARO priv->hw_cap_support = stmmac_get_hw_features(priv); 6808cf3f047bSGiuseppe CAVALLARO if (priv->hw_cap_support) { 680938ddc59dSLABBE Corentin dev_info(priv->device, "DMA HW capability register supported\n"); 6810cf3f047bSGiuseppe CAVALLARO 6811cf3f047bSGiuseppe CAVALLARO /* We can override some gmac/dma configuration fields: e.g. 6812cf3f047bSGiuseppe CAVALLARO * enh_desc, tx_coe (e.g. that are passed through the 6813cf3f047bSGiuseppe CAVALLARO * platform) with the values from the HW capability 6814cf3f047bSGiuseppe CAVALLARO * register (if supported). 6815cf3f047bSGiuseppe CAVALLARO */ 6816cf3f047bSGiuseppe CAVALLARO priv->plat->enh_desc = priv->dma_cap.enh_desc; 68175a9b876eSLing Pei Lee priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 68185a9b876eSLing Pei Lee !priv->plat->use_phy_wol; 68193fe5cadbSGiuseppe CAVALLARO priv->hw->pmt = priv->plat->pmt; 6820b8ef7020SBiao Huang if (priv->dma_cap.hash_tb_sz) { 6821b8ef7020SBiao Huang priv->hw->multicast_filter_bins = 6822b8ef7020SBiao Huang (BIT(priv->dma_cap.hash_tb_sz) << 5); 6823b8ef7020SBiao Huang priv->hw->mcast_bits_log2 = 6824b8ef7020SBiao Huang ilog2(priv->hw->multicast_filter_bins); 6825b8ef7020SBiao Huang } 682638912bdbSDeepak SIKRI 6827a8df35d4SEzequiel Garcia /* TXCOE doesn't work in thresh DMA mode */ 6828a8df35d4SEzequiel Garcia if (priv->plat->force_thresh_dma_mode) 6829a8df35d4SEzequiel Garcia priv->plat->tx_coe = 0; 6830a8df35d4SEzequiel Garcia else 683138912bdbSDeepak SIKRI priv->plat->tx_coe = priv->dma_cap.tx_coe; 6832a8df35d4SEzequiel Garcia 6833f748be53SAlexandre TORGUE /* In case of GMAC4 rx_coe is from HW cap register. */ 6834f748be53SAlexandre TORGUE priv->plat->rx_coe = priv->dma_cap.rx_coe; 683538912bdbSDeepak SIKRI 683638912bdbSDeepak SIKRI if (priv->dma_cap.rx_coe_type2) 683738912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 683838912bdbSDeepak SIKRI else if (priv->dma_cap.rx_coe_type1) 683938912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 684038912bdbSDeepak SIKRI 684138ddc59dSLABBE Corentin } else { 684238ddc59dSLABBE Corentin dev_info(priv->device, "No HW DMA feature register supported\n"); 684338ddc59dSLABBE Corentin } 6844cf3f047bSGiuseppe CAVALLARO 6845d2afb5bdSGiuseppe CAVALLARO if (priv->plat->rx_coe) { 6846d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 684738ddc59dSLABBE Corentin dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 6848f748be53SAlexandre TORGUE if (priv->synopsys_id < DWMAC_CORE_4_00) 684938ddc59dSLABBE Corentin dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 6850d2afb5bdSGiuseppe CAVALLARO } 6851cf3f047bSGiuseppe CAVALLARO if (priv->plat->tx_coe) 685238ddc59dSLABBE Corentin dev_info(priv->device, "TX Checksum insertion supported\n"); 6853cf3f047bSGiuseppe CAVALLARO 6854cf3f047bSGiuseppe CAVALLARO if (priv->plat->pmt) { 685538ddc59dSLABBE Corentin dev_info(priv->device, "Wake-Up On Lan supported\n"); 6856cf3f047bSGiuseppe CAVALLARO device_set_wakeup_capable(priv->device, 1); 6857cf3f047bSGiuseppe CAVALLARO } 6858cf3f047bSGiuseppe CAVALLARO 6859f748be53SAlexandre TORGUE if (priv->dma_cap.tsoen) 686038ddc59dSLABBE Corentin dev_info(priv->device, "TSO supported\n"); 6861f748be53SAlexandre TORGUE 6862e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 6863e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 6864e0f9956aSChuah, Kim Tatt 68657cfde0afSJose Abreu /* Run HW quirks, if any */ 68667cfde0afSJose Abreu if (priv->hwif_quirks) { 68677cfde0afSJose Abreu ret = priv->hwif_quirks(priv); 68687cfde0afSJose Abreu if (ret) 68697cfde0afSJose Abreu return ret; 68707cfde0afSJose Abreu } 68717cfde0afSJose Abreu 68723b509466SJose Abreu /* Rx Watchdog is available in the COREs newer than the 3.40. 68733b509466SJose Abreu * In some case, for example on bugged HW this feature 68743b509466SJose Abreu * has to be disable and this can be done by passing the 68753b509466SJose Abreu * riwt_off field from the platform. 68763b509466SJose Abreu */ 68773b509466SJose Abreu if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 68783b509466SJose Abreu (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 68793b509466SJose Abreu priv->use_riwt = 1; 68803b509466SJose Abreu dev_info(priv->device, 68813b509466SJose Abreu "Enable RX Mitigation via HW Watchdog Timer\n"); 68823b509466SJose Abreu } 68833b509466SJose Abreu 6884c24602efSGiuseppe CAVALLARO return 0; 6885cf3f047bSGiuseppe CAVALLARO } 6886cf3f047bSGiuseppe CAVALLARO 68870366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev) 68880366f7e0SOng Boon Leong { 68890366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 68900366f7e0SOng Boon Leong u32 queue, maxq; 68910366f7e0SOng Boon Leong 68920366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 68930366f7e0SOng Boon Leong 68940366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 68950366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 68960366f7e0SOng Boon Leong 68970366f7e0SOng Boon Leong ch->priv_data = priv; 68980366f7e0SOng Boon Leong ch->index = queue; 68992b94f526SMarek Szyprowski spin_lock_init(&ch->lock); 69000366f7e0SOng Boon Leong 69010366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) { 6902b48b89f9SJakub Kicinski netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 69030366f7e0SOng Boon Leong } 69040366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) { 690516d083e2SJakub Kicinski netif_napi_add_tx(dev, &ch->tx_napi, 690616d083e2SJakub Kicinski stmmac_napi_poll_tx); 69070366f7e0SOng Boon Leong } 6908132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6909132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6910132c32eeSOng Boon Leong netif_napi_add(dev, &ch->rxtx_napi, 6911b48b89f9SJakub Kicinski stmmac_napi_poll_rxtx); 6912132c32eeSOng Boon Leong } 69130366f7e0SOng Boon Leong } 69140366f7e0SOng Boon Leong } 69150366f7e0SOng Boon Leong 69160366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev) 69170366f7e0SOng Boon Leong { 69180366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 69190366f7e0SOng Boon Leong u32 queue, maxq; 69200366f7e0SOng Boon Leong 69210366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 69220366f7e0SOng Boon Leong 69230366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 69240366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 69250366f7e0SOng Boon Leong 69260366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) 69270366f7e0SOng Boon Leong netif_napi_del(&ch->rx_napi); 69280366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) 69290366f7e0SOng Boon Leong netif_napi_del(&ch->tx_napi); 6930132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6931132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6932132c32eeSOng Boon Leong netif_napi_del(&ch->rxtx_napi); 6933132c32eeSOng Boon Leong } 69340366f7e0SOng Boon Leong } 69350366f7e0SOng Boon Leong } 69360366f7e0SOng Boon Leong 69370366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 69380366f7e0SOng Boon Leong { 69390366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 69400366f7e0SOng Boon Leong int ret = 0; 69410366f7e0SOng Boon Leong 69420366f7e0SOng Boon Leong if (netif_running(dev)) 69430366f7e0SOng Boon Leong stmmac_release(dev); 69440366f7e0SOng Boon Leong 69450366f7e0SOng Boon Leong stmmac_napi_del(dev); 69460366f7e0SOng Boon Leong 69470366f7e0SOng Boon Leong priv->plat->rx_queues_to_use = rx_cnt; 69480366f7e0SOng Boon Leong priv->plat->tx_queues_to_use = tx_cnt; 69490366f7e0SOng Boon Leong 69500366f7e0SOng Boon Leong stmmac_napi_add(dev); 69510366f7e0SOng Boon Leong 69520366f7e0SOng Boon Leong if (netif_running(dev)) 69530366f7e0SOng Boon Leong ret = stmmac_open(dev); 69540366f7e0SOng Boon Leong 69550366f7e0SOng Boon Leong return ret; 69560366f7e0SOng Boon Leong } 69570366f7e0SOng Boon Leong 6958aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 6959aa042f60SSong, Yoong Siang { 6960aa042f60SSong, Yoong Siang struct stmmac_priv *priv = netdev_priv(dev); 6961aa042f60SSong, Yoong Siang int ret = 0; 6962aa042f60SSong, Yoong Siang 6963aa042f60SSong, Yoong Siang if (netif_running(dev)) 6964aa042f60SSong, Yoong Siang stmmac_release(dev); 6965aa042f60SSong, Yoong Siang 69668531c808SChristian Marangi priv->dma_conf.dma_rx_size = rx_size; 69678531c808SChristian Marangi priv->dma_conf.dma_tx_size = tx_size; 6968aa042f60SSong, Yoong Siang 6969aa042f60SSong, Yoong Siang if (netif_running(dev)) 6970aa042f60SSong, Yoong Siang ret = stmmac_open(dev); 6971aa042f60SSong, Yoong Siang 6972aa042f60SSong, Yoong Siang return ret; 6973aa042f60SSong, Yoong Siang } 6974aa042f60SSong, Yoong Siang 69755a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 69765a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work) 69775a558611SOng Boon Leong { 69785a558611SOng Boon Leong struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 69795a558611SOng Boon Leong fpe_task); 69805a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 69815a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 69825a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 69835a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 69845a558611SOng Boon Leong bool *enable = &fpe_cfg->enable; 69855a558611SOng Boon Leong int retries = 20; 69865a558611SOng Boon Leong 69875a558611SOng Boon Leong while (retries-- > 0) { 69885a558611SOng Boon Leong /* Bail out immediately if FPE handshake is OFF */ 69895a558611SOng Boon Leong if (*lo_state == FPE_STATE_OFF || !*hs_enable) 69905a558611SOng Boon Leong break; 69915a558611SOng Boon Leong 69925a558611SOng Boon Leong if (*lo_state == FPE_STATE_ENTERING_ON && 69935a558611SOng Boon Leong *lp_state == FPE_STATE_ENTERING_ON) { 69945a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 69955a558611SOng Boon Leong priv->plat->tx_queues_to_use, 69965a558611SOng Boon Leong priv->plat->rx_queues_to_use, 69975a558611SOng Boon Leong *enable); 69985a558611SOng Boon Leong 69995a558611SOng Boon Leong netdev_info(priv->dev, "configured FPE\n"); 70005a558611SOng Boon Leong 70015a558611SOng Boon Leong *lo_state = FPE_STATE_ON; 70025a558611SOng Boon Leong *lp_state = FPE_STATE_ON; 70035a558611SOng Boon Leong netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 70045a558611SOng Boon Leong break; 70055a558611SOng Boon Leong } 70065a558611SOng Boon Leong 70075a558611SOng Boon Leong if ((*lo_state == FPE_STATE_CAPABLE || 70085a558611SOng Boon Leong *lo_state == FPE_STATE_ENTERING_ON) && 70095a558611SOng Boon Leong *lp_state != FPE_STATE_ON) { 70105a558611SOng Boon Leong netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 70115a558611SOng Boon Leong *lo_state, *lp_state); 70125a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 70135a558611SOng Boon Leong MPACKET_VERIFY); 70145a558611SOng Boon Leong } 70155a558611SOng Boon Leong /* Sleep then retry */ 70165a558611SOng Boon Leong msleep(500); 70175a558611SOng Boon Leong } 70185a558611SOng Boon Leong 70195a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 70205a558611SOng Boon Leong } 70215a558611SOng Boon Leong 70225a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 70235a558611SOng Boon Leong { 70245a558611SOng Boon Leong if (priv->plat->fpe_cfg->hs_enable != enable) { 70255a558611SOng Boon Leong if (enable) { 70265a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 70275a558611SOng Boon Leong MPACKET_VERIFY); 70285a558611SOng Boon Leong } else { 70295a558611SOng Boon Leong priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 70305a558611SOng Boon Leong priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 70315a558611SOng Boon Leong } 70325a558611SOng Boon Leong 70335a558611SOng Boon Leong priv->plat->fpe_cfg->hs_enable = enable; 70345a558611SOng Boon Leong } 70355a558611SOng Boon Leong } 70365a558611SOng Boon Leong 7037cf3f047bSGiuseppe CAVALLARO /** 7038bfab27a1SGiuseppe CAVALLARO * stmmac_dvr_probe 7039bfab27a1SGiuseppe CAVALLARO * @device: device pointer 7040ff3dd78cSGiuseppe CAVALLARO * @plat_dat: platform data pointer 7041e56788cfSJoachim Eastwood * @res: stmmac resource pointer 7042bfab27a1SGiuseppe CAVALLARO * Description: this is the main probe function used to 7043bfab27a1SGiuseppe CAVALLARO * call the alloc_etherdev, allocate the priv structure. 70449afec6efSAndy Shevchenko * Return: 704515ffac73SJoachim Eastwood * returns 0 on success, otherwise errno. 70467ac6653aSJeff Kirsher */ 704715ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device, 7048cf3f047bSGiuseppe CAVALLARO struct plat_stmmacenet_data *plat_dat, 7049e56788cfSJoachim Eastwood struct stmmac_resources *res) 70507ac6653aSJeff Kirsher { 7051bfab27a1SGiuseppe CAVALLARO struct net_device *ndev = NULL; 7052bfab27a1SGiuseppe CAVALLARO struct stmmac_priv *priv; 70530366f7e0SOng Boon Leong u32 rxq; 705476067459SJose Abreu int i, ret = 0; 70557ac6653aSJeff Kirsher 70569737070cSJisheng Zhang ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 70579737070cSJisheng Zhang MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 705841de8d4cSJoe Perches if (!ndev) 705915ffac73SJoachim Eastwood return -ENOMEM; 70607ac6653aSJeff Kirsher 7061bfab27a1SGiuseppe CAVALLARO SET_NETDEV_DEV(ndev, device); 70627ac6653aSJeff Kirsher 7063bfab27a1SGiuseppe CAVALLARO priv = netdev_priv(ndev); 7064bfab27a1SGiuseppe CAVALLARO priv->device = device; 7065bfab27a1SGiuseppe CAVALLARO priv->dev = ndev; 7066bfab27a1SGiuseppe CAVALLARO 7067bfab27a1SGiuseppe CAVALLARO stmmac_set_ethtool_ops(ndev); 7068cf3f047bSGiuseppe CAVALLARO priv->pause = pause; 7069cf3f047bSGiuseppe CAVALLARO priv->plat = plat_dat; 7070e56788cfSJoachim Eastwood priv->ioaddr = res->addr; 7071e56788cfSJoachim Eastwood priv->dev->base_addr = (unsigned long)res->addr; 70726ccf12aeSWong, Vee Khee priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 7073e56788cfSJoachim Eastwood 7074e56788cfSJoachim Eastwood priv->dev->irq = res->irq; 7075e56788cfSJoachim Eastwood priv->wol_irq = res->wol_irq; 7076e56788cfSJoachim Eastwood priv->lpi_irq = res->lpi_irq; 70778532f613SOng Boon Leong priv->sfty_ce_irq = res->sfty_ce_irq; 70788532f613SOng Boon Leong priv->sfty_ue_irq = res->sfty_ue_irq; 70798532f613SOng Boon Leong for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 70808532f613SOng Boon Leong priv->rx_irq[i] = res->rx_irq[i]; 70818532f613SOng Boon Leong for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 70828532f613SOng Boon Leong priv->tx_irq[i] = res->tx_irq[i]; 7083e56788cfSJoachim Eastwood 708483216e39SMichael Walle if (!is_zero_ether_addr(res->mac)) 7085a96d317fSJakub Kicinski eth_hw_addr_set(priv->dev, res->mac); 7086bfab27a1SGiuseppe CAVALLARO 7087a7a62685SJoachim Eastwood dev_set_drvdata(device, priv->dev); 7088803f8fc4SJoachim Eastwood 7089cf3f047bSGiuseppe CAVALLARO /* Verify driver arguments */ 7090cf3f047bSGiuseppe CAVALLARO stmmac_verify_args(); 7091cf3f047bSGiuseppe CAVALLARO 7092bba2556eSOng Boon Leong priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7093bba2556eSOng Boon Leong if (!priv->af_xdp_zc_qps) 7094bba2556eSOng Boon Leong return -ENOMEM; 7095bba2556eSOng Boon Leong 709634877a15SJose Abreu /* Allocate workqueue */ 709734877a15SJose Abreu priv->wq = create_singlethread_workqueue("stmmac_wq"); 709834877a15SJose Abreu if (!priv->wq) { 709934877a15SJose Abreu dev_err(priv->device, "failed to create workqueue\n"); 7100*a137f3f2SGaosheng Cui goto error_wq_init; 710134877a15SJose Abreu } 710234877a15SJose Abreu 710334877a15SJose Abreu INIT_WORK(&priv->service_task, stmmac_service_task); 710434877a15SJose Abreu 71055a558611SOng Boon Leong /* Initialize Link Partner FPE workqueue */ 71065a558611SOng Boon Leong INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 71075a558611SOng Boon Leong 7108cf3f047bSGiuseppe CAVALLARO /* Override with kernel parameters if supplied XXX CRS XXX 7109ceb69499SGiuseppe CAVALLARO * this needs to have multiple instances 7110ceb69499SGiuseppe CAVALLARO */ 7111cf3f047bSGiuseppe CAVALLARO if ((phyaddr >= 0) && (phyaddr <= 31)) 7112cf3f047bSGiuseppe CAVALLARO priv->plat->phy_addr = phyaddr; 7113cf3f047bSGiuseppe CAVALLARO 711490f522a2SEugeniy Paltsev if (priv->plat->stmmac_rst) { 711590f522a2SEugeniy Paltsev ret = reset_control_assert(priv->plat->stmmac_rst); 7116f573c0b9Sjpinto reset_control_deassert(priv->plat->stmmac_rst); 711790f522a2SEugeniy Paltsev /* Some reset controllers have only reset callback instead of 711890f522a2SEugeniy Paltsev * assert + deassert callbacks pair. 711990f522a2SEugeniy Paltsev */ 712090f522a2SEugeniy Paltsev if (ret == -ENOTSUPP) 712190f522a2SEugeniy Paltsev reset_control_reset(priv->plat->stmmac_rst); 712290f522a2SEugeniy Paltsev } 7123c5e4ddbdSChen-Yu Tsai 7124e67f325eSMatthew Hagan ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7125e67f325eSMatthew Hagan if (ret == -ENOTSUPP) 7126e67f325eSMatthew Hagan dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7127e67f325eSMatthew Hagan ERR_PTR(ret)); 7128e67f325eSMatthew Hagan 7129cf3f047bSGiuseppe CAVALLARO /* Init MAC and get the capabilities */ 7130c24602efSGiuseppe CAVALLARO ret = stmmac_hw_init(priv); 7131c24602efSGiuseppe CAVALLARO if (ret) 713262866e98SChen-Yu Tsai goto error_hw_init; 7133cf3f047bSGiuseppe CAVALLARO 713496874c61SMohammad Athari Bin Ismail /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 713596874c61SMohammad Athari Bin Ismail */ 713696874c61SMohammad Athari Bin Ismail if (priv->synopsys_id < DWMAC_CORE_5_20) 713796874c61SMohammad Athari Bin Ismail priv->plat->dma_cfg->dche = false; 713896874c61SMohammad Athari Bin Ismail 7139b561af36SVinod Koul stmmac_check_ether_addr(priv); 7140b561af36SVinod Koul 7141cf3f047bSGiuseppe CAVALLARO ndev->netdev_ops = &stmmac_netdev_ops; 7142cf3f047bSGiuseppe CAVALLARO 7143cf3f047bSGiuseppe CAVALLARO ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7144cf3f047bSGiuseppe CAVALLARO NETIF_F_RXCSUM; 7145f748be53SAlexandre TORGUE 71464dbbe8ddSJose Abreu ret = stmmac_tc_init(priv, priv); 71474dbbe8ddSJose Abreu if (!ret) { 71484dbbe8ddSJose Abreu ndev->hw_features |= NETIF_F_HW_TC; 71494dbbe8ddSJose Abreu } 71504dbbe8ddSJose Abreu 7151f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 71529edfa7daSNiklas Cassel ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7153b7766206SJose Abreu if (priv->plat->has_gmac4) 7154b7766206SJose Abreu ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7155f748be53SAlexandre TORGUE priv->tso = true; 715638ddc59dSLABBE Corentin dev_info(priv->device, "TSO feature enabled\n"); 7157f748be53SAlexandre TORGUE } 7158a993db88SJose Abreu 715947f753c1STan Tee Min if (priv->dma_cap.sphen && !priv->plat->sph_disable) { 716067afd6d1SJose Abreu ndev->hw_features |= NETIF_F_GRO; 7161d08d32d1SOng Boon Leong priv->sph_cap = true; 7162d08d32d1SOng Boon Leong priv->sph = priv->sph_cap; 716367afd6d1SJose Abreu dev_info(priv->device, "SPH feature enabled\n"); 716467afd6d1SJose Abreu } 716567afd6d1SJose Abreu 7166f119cc98SFugang Duan /* The current IP register MAC_HW_Feature1[ADDR64] only define 7167f119cc98SFugang Duan * 32/40/64 bit width, but some SOC support others like i.MX8MP 7168f119cc98SFugang Duan * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 7169f119cc98SFugang Duan * So overwrite dma_cap.addr64 according to HW real design. 7170f119cc98SFugang Duan */ 7171f119cc98SFugang Duan if (priv->plat->addr64) 7172f119cc98SFugang Duan priv->dma_cap.addr64 = priv->plat->addr64; 7173f119cc98SFugang Duan 7174a993db88SJose Abreu if (priv->dma_cap.addr64) { 7175a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, 7176a993db88SJose Abreu DMA_BIT_MASK(priv->dma_cap.addr64)); 7177a993db88SJose Abreu if (!ret) { 7178a993db88SJose Abreu dev_info(priv->device, "Using %d bits DMA width\n", 7179a993db88SJose Abreu priv->dma_cap.addr64); 7180968a2978SThierry Reding 7181968a2978SThierry Reding /* 7182968a2978SThierry Reding * If more than 32 bits can be addressed, make sure to 7183968a2978SThierry Reding * enable enhanced addressing mode. 7184968a2978SThierry Reding */ 7185968a2978SThierry Reding if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7186968a2978SThierry Reding priv->plat->dma_cfg->eame = true; 7187a993db88SJose Abreu } else { 7188a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7189a993db88SJose Abreu if (ret) { 7190a993db88SJose Abreu dev_err(priv->device, "Failed to set DMA Mask\n"); 7191a993db88SJose Abreu goto error_hw_init; 7192a993db88SJose Abreu } 7193a993db88SJose Abreu 7194a993db88SJose Abreu priv->dma_cap.addr64 = 32; 7195a993db88SJose Abreu } 7196a993db88SJose Abreu } 7197a993db88SJose Abreu 7198bfab27a1SGiuseppe CAVALLARO ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7199bfab27a1SGiuseppe CAVALLARO ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 72007ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED 72017ac6653aSJeff Kirsher /* Both mac100 and gmac support receive VLAN tag detection */ 7202ab188e8fSElad Nachman ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 72033cd1cfcbSJose Abreu if (priv->dma_cap.vlhash) { 72043cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 72053cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 72063cd1cfcbSJose Abreu } 720730d93227SJose Abreu if (priv->dma_cap.vlins) { 720830d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 720930d93227SJose Abreu if (priv->dma_cap.dvlan) 721030d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 721130d93227SJose Abreu } 72127ac6653aSJeff Kirsher #endif 72137ac6653aSJeff Kirsher priv->msg_enable = netif_msg_init(debug, default_msg_level); 72147ac6653aSJeff Kirsher 721576067459SJose Abreu /* Initialize RSS */ 721676067459SJose Abreu rxq = priv->plat->rx_queues_to_use; 721776067459SJose Abreu netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 721876067459SJose Abreu for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 721976067459SJose Abreu priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 722076067459SJose Abreu 722176067459SJose Abreu if (priv->dma_cap.rssen && priv->plat->rss_en) 722276067459SJose Abreu ndev->features |= NETIF_F_RXHASH; 722376067459SJose Abreu 722444770e11SJarod Wilson /* MTU range: 46 - hw-specific max */ 722544770e11SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 722656bcd591SJose Abreu if (priv->plat->has_xgmac) 72277d9e6c5aSJose Abreu ndev->max_mtu = XGMAC_JUMBO_LEN; 722856bcd591SJose Abreu else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 722956bcd591SJose Abreu ndev->max_mtu = JUMBO_LEN; 723044770e11SJarod Wilson else 723144770e11SJarod Wilson ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7232a2cd64f3SKweh, Hock Leong /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7233a2cd64f3SKweh, Hock Leong * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7234a2cd64f3SKweh, Hock Leong */ 7235a2cd64f3SKweh, Hock Leong if ((priv->plat->maxmtu < ndev->max_mtu) && 7236a2cd64f3SKweh, Hock Leong (priv->plat->maxmtu >= ndev->min_mtu)) 723744770e11SJarod Wilson ndev->max_mtu = priv->plat->maxmtu; 7238a2cd64f3SKweh, Hock Leong else if (priv->plat->maxmtu < ndev->min_mtu) 7239b618ab45SHeiner Kallweit dev_warn(priv->device, 7240a2cd64f3SKweh, Hock Leong "%s: warning: maxmtu having invalid value (%d)\n", 7241a2cd64f3SKweh, Hock Leong __func__, priv->plat->maxmtu); 724244770e11SJarod Wilson 72437ac6653aSJeff Kirsher if (flow_ctrl) 72447ac6653aSJeff Kirsher priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 72457ac6653aSJeff Kirsher 72468fce3331SJose Abreu /* Setup channels NAPI */ 72470366f7e0SOng Boon Leong stmmac_napi_add(ndev); 72487ac6653aSJeff Kirsher 724929555fa3SThierry Reding mutex_init(&priv->lock); 72507ac6653aSJeff Kirsher 7251cd7201f4SGiuseppe CAVALLARO /* If a specific clk_csr value is passed from the platform 7252cd7201f4SGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 7253cd7201f4SGiuseppe CAVALLARO * changed at run-time and it is fixed. Viceversa the driver'll try to 7254cd7201f4SGiuseppe CAVALLARO * set the MDC clock dynamically according to the csr actual 7255cd7201f4SGiuseppe CAVALLARO * clock input. 7256cd7201f4SGiuseppe CAVALLARO */ 72575e7f7fc5SBiao Huang if (priv->plat->clk_csr >= 0) 7258cd7201f4SGiuseppe CAVALLARO priv->clk_csr = priv->plat->clk_csr; 72595e7f7fc5SBiao Huang else 72605e7f7fc5SBiao Huang stmmac_clk_csr_set(priv); 7261cd7201f4SGiuseppe CAVALLARO 7262e58bb43fSGiuseppe CAVALLARO stmmac_check_pcs_mode(priv); 7263e58bb43fSGiuseppe CAVALLARO 72645ec55823SJoakim Zhang pm_runtime_get_noresume(device); 72655ec55823SJoakim Zhang pm_runtime_set_active(device); 7266d90d0c17SKai-Heng Feng if (!pm_runtime_enabled(device)) 72675ec55823SJoakim Zhang pm_runtime_enable(device); 72685ec55823SJoakim Zhang 7269a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 72703fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) { 72714bfcbd7aSFrancesco Virlinzi /* MDIO bus Registration */ 72724bfcbd7aSFrancesco Virlinzi ret = stmmac_mdio_register(ndev); 72734bfcbd7aSFrancesco Virlinzi if (ret < 0) { 7274839612d2SRasmus Villemoes dev_err_probe(priv->device, ret, 7275839612d2SRasmus Villemoes "%s: MDIO bus (id: %d) registration failed\n", 72764bfcbd7aSFrancesco Virlinzi __func__, priv->plat->bus_id); 72776a81c26fSViresh Kumar goto error_mdio_register; 72784bfcbd7aSFrancesco Virlinzi } 7279e58bb43fSGiuseppe CAVALLARO } 72804bfcbd7aSFrancesco Virlinzi 728146682cb8SVoon Weifeng if (priv->plat->speed_mode_2500) 728246682cb8SVoon Weifeng priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 728346682cb8SVoon Weifeng 72847413f9a6SVladimir Oltean if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7285597a68ceSVoon Weifeng ret = stmmac_xpcs_setup(priv->mii); 7286597a68ceSVoon Weifeng if (ret) 7287597a68ceSVoon Weifeng goto error_xpcs_setup; 7288597a68ceSVoon Weifeng } 7289597a68ceSVoon Weifeng 729074371272SJose Abreu ret = stmmac_phy_setup(priv); 729174371272SJose Abreu if (ret) { 729274371272SJose Abreu netdev_err(ndev, "failed to setup phy (%d)\n", ret); 729374371272SJose Abreu goto error_phy_setup; 729474371272SJose Abreu } 729574371272SJose Abreu 729657016590SFlorian Fainelli ret = register_netdev(ndev); 7297b2eb09afSFlorian Fainelli if (ret) { 7298b618ab45SHeiner Kallweit dev_err(priv->device, "%s: ERROR %i registering the device\n", 729957016590SFlorian Fainelli __func__, ret); 7300b2eb09afSFlorian Fainelli goto error_netdev_register; 7301b2eb09afSFlorian Fainelli } 73027ac6653aSJeff Kirsher 73035f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS 73048d72ab11SGreg Kroah-Hartman stmmac_init_fs(ndev); 73055f2b8b62SThierry Reding #endif 73065f2b8b62SThierry Reding 73074047b9dbSBhupesh Sharma if (priv->plat->dump_debug_regs) 73084047b9dbSBhupesh Sharma priv->plat->dump_debug_regs(priv->plat->bsp_priv); 73094047b9dbSBhupesh Sharma 73105ec55823SJoakim Zhang /* Let pm_runtime_put() disable the clocks. 73115ec55823SJoakim Zhang * If CONFIG_PM is not enabled, the clocks will stay powered. 73125ec55823SJoakim Zhang */ 73135ec55823SJoakim Zhang pm_runtime_put(device); 73145ec55823SJoakim Zhang 731557016590SFlorian Fainelli return ret; 73167ac6653aSJeff Kirsher 73176a81c26fSViresh Kumar error_netdev_register: 731874371272SJose Abreu phylink_destroy(priv->phylink); 7319597a68ceSVoon Weifeng error_xpcs_setup: 732074371272SJose Abreu error_phy_setup: 7321a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 7322b2eb09afSFlorian Fainelli priv->hw->pcs != STMMAC_PCS_RTBI) 7323b2eb09afSFlorian Fainelli stmmac_mdio_unregister(ndev); 73247ac6653aSJeff Kirsher error_mdio_register: 73250366f7e0SOng Boon Leong stmmac_napi_del(ndev); 732662866e98SChen-Yu Tsai error_hw_init: 732734877a15SJose Abreu destroy_workqueue(priv->wq); 7328*a137f3f2SGaosheng Cui error_wq_init: 7329d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 73307ac6653aSJeff Kirsher 733115ffac73SJoachim Eastwood return ret; 73327ac6653aSJeff Kirsher } 7333b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 73347ac6653aSJeff Kirsher 73357ac6653aSJeff Kirsher /** 73367ac6653aSJeff Kirsher * stmmac_dvr_remove 7337f4e7bd81SJoachim Eastwood * @dev: device pointer 73387ac6653aSJeff Kirsher * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7339bfab27a1SGiuseppe CAVALLARO * changes the link status, releases the DMA descriptor rings. 73407ac6653aSJeff Kirsher */ 7341f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev) 73427ac6653aSJeff Kirsher { 7343f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 73447ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 73457ac6653aSJeff Kirsher 734638ddc59dSLABBE Corentin netdev_info(priv->dev, "%s: removing driver", __func__); 73477ac6653aSJeff Kirsher 734864495203SJisheng Zhang pm_runtime_get_sync(dev); 734964495203SJisheng Zhang 7350ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7351c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 73527ac6653aSJeff Kirsher netif_carrier_off(ndev); 73537ac6653aSJeff Kirsher unregister_netdev(ndev); 73549a7b3950SOng Boon Leong 73559a7b3950SOng Boon Leong /* Serdes power down needs to happen after VLAN filter 73569a7b3950SOng Boon Leong * is deleted that is triggered by unregister_netdev(). 73579a7b3950SOng Boon Leong */ 73589a7b3950SOng Boon Leong if (priv->plat->serdes_powerdown) 73599a7b3950SOng Boon Leong priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 73609a7b3950SOng Boon Leong 7361474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS 7362474a31e1SAaro Koskinen stmmac_exit_fs(ndev); 7363474a31e1SAaro Koskinen #endif 736474371272SJose Abreu phylink_destroy(priv->phylink); 7365f573c0b9Sjpinto if (priv->plat->stmmac_rst) 7366f573c0b9Sjpinto reset_control_assert(priv->plat->stmmac_rst); 7367e67f325eSMatthew Hagan reset_control_assert(priv->plat->stmmac_ahb_rst); 7368a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 73693fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) 7370e743471fSBryan O'Donoghue stmmac_mdio_unregister(ndev); 737134877a15SJose Abreu destroy_workqueue(priv->wq); 737229555fa3SThierry Reding mutex_destroy(&priv->lock); 7373d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 73747ac6653aSJeff Kirsher 73750d9a1591SBiao Huang pm_runtime_disable(dev); 73760d9a1591SBiao Huang pm_runtime_put_noidle(dev); 73770d9a1591SBiao Huang 73787ac6653aSJeff Kirsher return 0; 73797ac6653aSJeff Kirsher } 7380b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 73817ac6653aSJeff Kirsher 7382732fdf0eSGiuseppe CAVALLARO /** 7383732fdf0eSGiuseppe CAVALLARO * stmmac_suspend - suspend callback 7384f4e7bd81SJoachim Eastwood * @dev: device pointer 7385732fdf0eSGiuseppe CAVALLARO * Description: this is the function to suspend the device and it is called 7386732fdf0eSGiuseppe CAVALLARO * by the platform driver to stop the network queue, release the resources, 7387732fdf0eSGiuseppe CAVALLARO * program the PMT register (for WoL), clean and release driver resources. 7388732fdf0eSGiuseppe CAVALLARO */ 7389f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev) 73907ac6653aSJeff Kirsher { 7391f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 73927ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 739314b41a29SNicolin Chen u32 chan; 73947ac6653aSJeff Kirsher 73957ac6653aSJeff Kirsher if (!ndev || !netif_running(ndev)) 73967ac6653aSJeff Kirsher return 0; 73977ac6653aSJeff Kirsher 7398134cc4ceSThierry Reding mutex_lock(&priv->lock); 739919e13cb2SJose Abreu 74007ac6653aSJeff Kirsher netif_device_detach(ndev); 74017ac6653aSJeff Kirsher 7402c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 74037ac6653aSJeff Kirsher 740414b41a29SNicolin Chen for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 74058531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 740614b41a29SNicolin Chen 74075f585913SFugang Duan if (priv->eee_enabled) { 74085f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 74095f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 74105f585913SFugang Duan } 74115f585913SFugang Duan 74127ac6653aSJeff Kirsher /* Stop TX/RX DMA */ 7413ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7414c24602efSGiuseppe CAVALLARO 7415b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 7416b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7417b9663b7cSVoon Weifeng 74187ac6653aSJeff Kirsher /* Enable Power down mode by programming the PMT regs */ 7419e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7420c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, priv->wolopts); 742189f7f2cfSSrinivas Kandagatla priv->irq_wake = 1; 742289f7f2cfSSrinivas Kandagatla } else { 7423c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 7424db88f10aSSrinivas Kandagatla pinctrl_pm_select_sleep_state(priv->device); 742530f347aeSYang Yingliang } 74265a558611SOng Boon Leong 742729555fa3SThierry Reding mutex_unlock(&priv->lock); 74282d871aa0SVince Bridgers 742990702dcdSJoakim Zhang rtnl_lock(); 743090702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 743190702dcdSJoakim Zhang phylink_suspend(priv->phylink, true); 743290702dcdSJoakim Zhang } else { 743390702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 743490702dcdSJoakim Zhang phylink_speed_down(priv->phylink, false); 743590702dcdSJoakim Zhang phylink_suspend(priv->phylink, false); 743690702dcdSJoakim Zhang } 743790702dcdSJoakim Zhang rtnl_unlock(); 743890702dcdSJoakim Zhang 74395a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 74405a558611SOng Boon Leong /* Disable FPE */ 74415a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 74425a558611SOng Boon Leong priv->plat->tx_queues_to_use, 74435a558611SOng Boon Leong priv->plat->rx_queues_to_use, false); 74445a558611SOng Boon Leong 74455a558611SOng Boon Leong stmmac_fpe_handshake(priv, false); 74466b28a86dSMohammad Athari Bin Ismail stmmac_fpe_stop_wq(priv); 74475a558611SOng Boon Leong } 74485a558611SOng Boon Leong 7449bd00632cSLABBE Corentin priv->speed = SPEED_UNKNOWN; 74507ac6653aSJeff Kirsher return 0; 74517ac6653aSJeff Kirsher } 7452b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend); 74537ac6653aSJeff Kirsher 7454f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7455f9ec5723SChristian Marangi { 74568531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7457f9ec5723SChristian Marangi 7458f9ec5723SChristian Marangi rx_q->cur_rx = 0; 7459f9ec5723SChristian Marangi rx_q->dirty_rx = 0; 7460f9ec5723SChristian Marangi } 7461f9ec5723SChristian Marangi 7462f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7463f9ec5723SChristian Marangi { 74648531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7465f9ec5723SChristian Marangi 7466f9ec5723SChristian Marangi tx_q->cur_tx = 0; 7467f9ec5723SChristian Marangi tx_q->dirty_tx = 0; 7468f9ec5723SChristian Marangi tx_q->mss = 0; 7469f9ec5723SChristian Marangi 7470f9ec5723SChristian Marangi netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7471f9ec5723SChristian Marangi } 7472f9ec5723SChristian Marangi 7473732fdf0eSGiuseppe CAVALLARO /** 747454139cf3SJoao Pinto * stmmac_reset_queues_param - reset queue parameters 7475d0ea5cbdSJesse Brandeburg * @priv: device pointer 747654139cf3SJoao Pinto */ 747754139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv) 747854139cf3SJoao Pinto { 747954139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 7480ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 748154139cf3SJoao Pinto u32 queue; 748254139cf3SJoao Pinto 7483f9ec5723SChristian Marangi for (queue = 0; queue < rx_cnt; queue++) 7484f9ec5723SChristian Marangi stmmac_reset_rx_queue(priv, queue); 748554139cf3SJoao Pinto 7486f9ec5723SChristian Marangi for (queue = 0; queue < tx_cnt; queue++) 7487f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, queue); 748854139cf3SJoao Pinto } 748954139cf3SJoao Pinto 749054139cf3SJoao Pinto /** 7491732fdf0eSGiuseppe CAVALLARO * stmmac_resume - resume callback 7492f4e7bd81SJoachim Eastwood * @dev: device pointer 7493732fdf0eSGiuseppe CAVALLARO * Description: when resume this function is invoked to setup the DMA and CORE 7494732fdf0eSGiuseppe CAVALLARO * in a usable state. 7495732fdf0eSGiuseppe CAVALLARO */ 7496f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev) 74977ac6653aSJeff Kirsher { 7498f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 74997ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 7500b9663b7cSVoon Weifeng int ret; 75017ac6653aSJeff Kirsher 75027ac6653aSJeff Kirsher if (!netif_running(ndev)) 75037ac6653aSJeff Kirsher return 0; 75047ac6653aSJeff Kirsher 75057ac6653aSJeff Kirsher /* Power Down bit, into the PM register, is cleared 75067ac6653aSJeff Kirsher * automatically as soon as a magic packet or a Wake-up frame 75077ac6653aSJeff Kirsher * is received. Anyway, it's better to manually clear 75087ac6653aSJeff Kirsher * this bit because it can generate problems while resuming 7509ceb69499SGiuseppe CAVALLARO * from another devices (e.g. serial console). 7510ceb69499SGiuseppe CAVALLARO */ 7511e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 751229555fa3SThierry Reding mutex_lock(&priv->lock); 7513c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, 0); 751429555fa3SThierry Reding mutex_unlock(&priv->lock); 751589f7f2cfSSrinivas Kandagatla priv->irq_wake = 0; 7516623997fbSSrinivas Kandagatla } else { 7517db88f10aSSrinivas Kandagatla pinctrl_pm_select_default_state(priv->device); 7518623997fbSSrinivas Kandagatla /* reset the phy so that it's ready */ 7519623997fbSSrinivas Kandagatla if (priv->mii) 7520623997fbSSrinivas Kandagatla stmmac_mdio_reset(priv->mii); 7521623997fbSSrinivas Kandagatla } 75227ac6653aSJeff Kirsher 7523b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7524b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7525b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7526b9663b7cSVoon Weifeng 7527b9663b7cSVoon Weifeng if (ret < 0) 7528b9663b7cSVoon Weifeng return ret; 7529b9663b7cSVoon Weifeng } 7530b9663b7cSVoon Weifeng 753136d18b56SFugang Duan rtnl_lock(); 753290702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 753390702dcdSJoakim Zhang phylink_resume(priv->phylink); 753490702dcdSJoakim Zhang } else { 753590702dcdSJoakim Zhang phylink_resume(priv->phylink); 753690702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 753736d18b56SFugang Duan phylink_speed_up(priv->phylink); 753836d18b56SFugang Duan } 753990702dcdSJoakim Zhang rtnl_unlock(); 754036d18b56SFugang Duan 75418e5debedSWong Vee Khee rtnl_lock(); 754229555fa3SThierry Reding mutex_lock(&priv->lock); 7543f55d84b0SVincent Palatin 754454139cf3SJoao Pinto stmmac_reset_queues_param(priv); 754500423969SThierry Reding 75464ec236c7SFugang Duan stmmac_free_tx_skbufs(priv); 7547ba39b344SChristian Marangi stmmac_clear_descriptors(priv, &priv->dma_conf); 7548ae79a639SGiuseppe CAVALLARO 7549fe131929SHuacai Chen stmmac_hw_setup(ndev, false); 7550d429b66eSJose Abreu stmmac_init_coalesce(priv); 7551ac316c78SGiuseppe CAVALLARO stmmac_set_rx_mode(ndev); 75527ac6653aSJeff Kirsher 7553ed64639bSWong Vee Khee stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7554ed64639bSWong Vee Khee 7555c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 7556087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 75577ac6653aSJeff Kirsher 7558134cc4ceSThierry Reding mutex_unlock(&priv->lock); 75598e5debedSWong Vee Khee rtnl_unlock(); 7560134cc4ceSThierry Reding 756131096c3eSLeon Yu netif_device_attach(ndev); 756231096c3eSLeon Yu 75637ac6653aSJeff Kirsher return 0; 75647ac6653aSJeff Kirsher } 7565b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume); 7566ba27ec66SGiuseppe CAVALLARO 75677ac6653aSJeff Kirsher #ifndef MODULE 75687ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str) 75697ac6653aSJeff Kirsher { 75707ac6653aSJeff Kirsher char *opt; 75717ac6653aSJeff Kirsher 75727ac6653aSJeff Kirsher if (!str || !*str) 7573e01b042eSRandy Dunlap return 1; 75747ac6653aSJeff Kirsher while ((opt = strsep(&str, ",")) != NULL) { 75757ac6653aSJeff Kirsher if (!strncmp(opt, "debug:", 6)) { 7576ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &debug)) 75777ac6653aSJeff Kirsher goto err; 75787ac6653aSJeff Kirsher } else if (!strncmp(opt, "phyaddr:", 8)) { 7579ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 8, 0, &phyaddr)) 75807ac6653aSJeff Kirsher goto err; 75817ac6653aSJeff Kirsher } else if (!strncmp(opt, "buf_sz:", 7)) { 7582ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 7, 0, &buf_sz)) 75837ac6653aSJeff Kirsher goto err; 75847ac6653aSJeff Kirsher } else if (!strncmp(opt, "tc:", 3)) { 7585ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 3, 0, &tc)) 75867ac6653aSJeff Kirsher goto err; 75877ac6653aSJeff Kirsher } else if (!strncmp(opt, "watchdog:", 9)) { 7588ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 9, 0, &watchdog)) 75897ac6653aSJeff Kirsher goto err; 75907ac6653aSJeff Kirsher } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7591ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &flow_ctrl)) 75927ac6653aSJeff Kirsher goto err; 75937ac6653aSJeff Kirsher } else if (!strncmp(opt, "pause:", 6)) { 7594ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &pause)) 75957ac6653aSJeff Kirsher goto err; 7596506f669cSGiuseppe CAVALLARO } else if (!strncmp(opt, "eee_timer:", 10)) { 7597d765955dSGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &eee_timer)) 7598d765955dSGiuseppe CAVALLARO goto err; 75994a7d666aSGiuseppe CAVALLARO } else if (!strncmp(opt, "chain_mode:", 11)) { 76004a7d666aSGiuseppe CAVALLARO if (kstrtoint(opt + 11, 0, &chain_mode)) 76014a7d666aSGiuseppe CAVALLARO goto err; 76027ac6653aSJeff Kirsher } 76037ac6653aSJeff Kirsher } 7604e01b042eSRandy Dunlap return 1; 76057ac6653aSJeff Kirsher 76067ac6653aSJeff Kirsher err: 76077ac6653aSJeff Kirsher pr_err("%s: ERROR broken module parameter conversion", __func__); 7608e01b042eSRandy Dunlap return 1; 76097ac6653aSJeff Kirsher } 76107ac6653aSJeff Kirsher 76117ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt); 7612ceb69499SGiuseppe CAVALLARO #endif /* MODULE */ 76136fc0d0f2SGiuseppe Cavallaro 7614466c5ac8SMathieu Olivari static int __init stmmac_init(void) 7615466c5ac8SMathieu Olivari { 7616466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7617466c5ac8SMathieu Olivari /* Create debugfs main directory if it doesn't exist yet */ 76188d72ab11SGreg Kroah-Hartman if (!stmmac_fs_dir) 7619466c5ac8SMathieu Olivari stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7620474a31e1SAaro Koskinen register_netdevice_notifier(&stmmac_notifier); 7621466c5ac8SMathieu Olivari #endif 7622466c5ac8SMathieu Olivari 7623466c5ac8SMathieu Olivari return 0; 7624466c5ac8SMathieu Olivari } 7625466c5ac8SMathieu Olivari 7626466c5ac8SMathieu Olivari static void __exit stmmac_exit(void) 7627466c5ac8SMathieu Olivari { 7628466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7629474a31e1SAaro Koskinen unregister_netdevice_notifier(&stmmac_notifier); 7630466c5ac8SMathieu Olivari debugfs_remove_recursive(stmmac_fs_dir); 7631466c5ac8SMathieu Olivari #endif 7632466c5ac8SMathieu Olivari } 7633466c5ac8SMathieu Olivari 7634466c5ac8SMathieu Olivari module_init(stmmac_init) 7635466c5ac8SMathieu Olivari module_exit(stmmac_exit) 7636466c5ac8SMathieu Olivari 76376fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 76386fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 76396fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL"); 7640