14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27ac6653aSJeff Kirsher /******************************************************************************* 37ac6653aSJeff Kirsher This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 47ac6653aSJeff Kirsher ST Ethernet IPs are built around a Synopsys IP Core. 57ac6653aSJeff Kirsher 6286a8372SGiuseppe CAVALLARO Copyright(C) 2007-2011 STMicroelectronics Ltd 77ac6653aSJeff Kirsher 87ac6653aSJeff Kirsher 97ac6653aSJeff Kirsher Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 107ac6653aSJeff Kirsher 117ac6653aSJeff Kirsher Documentation available at: 127ac6653aSJeff Kirsher http://www.stlinux.com 137ac6653aSJeff Kirsher Support available at: 147ac6653aSJeff Kirsher https://bugzilla.stlinux.com/ 157ac6653aSJeff Kirsher *******************************************************************************/ 167ac6653aSJeff Kirsher 176a81c26fSViresh Kumar #include <linux/clk.h> 187ac6653aSJeff Kirsher #include <linux/kernel.h> 197ac6653aSJeff Kirsher #include <linux/interrupt.h> 207ac6653aSJeff Kirsher #include <linux/ip.h> 217ac6653aSJeff Kirsher #include <linux/tcp.h> 227ac6653aSJeff Kirsher #include <linux/skbuff.h> 237ac6653aSJeff Kirsher #include <linux/ethtool.h> 247ac6653aSJeff Kirsher #include <linux/if_ether.h> 257ac6653aSJeff Kirsher #include <linux/crc32.h> 267ac6653aSJeff Kirsher #include <linux/mii.h> 2701789349SJiri Pirko #include <linux/if.h> 287ac6653aSJeff Kirsher #include <linux/if_vlan.h> 297ac6653aSJeff Kirsher #include <linux/dma-mapping.h> 307ac6653aSJeff Kirsher #include <linux/slab.h> 315ec55823SJoakim Zhang #include <linux/pm_runtime.h> 327ac6653aSJeff Kirsher #include <linux/prefetch.h> 33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h> 3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h> 367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h> 3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h> 39eeef2f6bSJose Abreu #include <linux/phylink.h> 40b7766206SJose Abreu #include <linux/udp.h> 415fabb012SOng Boon Leong #include <linux/bpf_trace.h> 424dbbe8ddSJose Abreu #include <net/pkt_cls.h> 43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h> 44891434b1SRayagond Kokatanur #include "stmmac_ptp.h" 45286a8372SGiuseppe CAVALLARO #include "stmmac.h" 465fabb012SOng Boon Leong #include "stmmac_xdp.h" 47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h> 485790cf3cSMathieu Olivari #include <linux/of_mdio.h> 4919d857c9SPhil Reid #include "dwmac1000.h" 507d9e6c5aSJose Abreu #include "dwxgmac2.h" 5142de047dSJose Abreu #include "hwif.h" 527ac6653aSJeff Kirsher 53a6da2bbbSHolger Assmann /* As long as the interface is active, we keep the timestamping counter enabled 54a6da2bbbSHolger Assmann * with fine resolution and binary rollover. This avoid non-monotonic behavior 55a6da2bbbSHolger Assmann * (clock jumps) when changing timestamping settings at runtime. 56a6da2bbbSHolger Assmann */ 57a6da2bbbSHolger Assmann #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 58a6da2bbbSHolger Assmann PTP_TCR_TSCTRLSSR) 59a6da2bbbSHolger Assmann 608d558f02SJose Abreu #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 61f748be53SAlexandre TORGUE #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 627ac6653aSJeff Kirsher 637ac6653aSJeff Kirsher /* Module parameters */ 6432ceabcaSGiuseppe CAVALLARO #define TX_TIMEO 5000 657ac6653aSJeff Kirsher static int watchdog = TX_TIMEO; 66d3757ba4SJoe Perches module_param(watchdog, int, 0644); 6732ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 687ac6653aSJeff Kirsher 6932ceabcaSGiuseppe CAVALLARO static int debug = -1; 70d3757ba4SJoe Perches module_param(debug, int, 0644); 7132ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 727ac6653aSJeff Kirsher 7347d1f71fSstephen hemminger static int phyaddr = -1; 74d3757ba4SJoe Perches module_param(phyaddr, int, 0444); 757ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address"); 767ac6653aSJeff Kirsher 778531c808SChristian Marangi #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 788531c808SChristian Marangi #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 797ac6653aSJeff Kirsher 80132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */ 81132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX 256 82132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL 16 83bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH 16 84bba2556eSOng Boon Leong 855fabb012SOng Boon Leong #define STMMAC_XDP_PASS 0 865fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED BIT(0) 87be8b38a7SOng Boon Leong #define STMMAC_XDP_TX BIT(1) 888b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT BIT(2) 895fabb012SOng Boon Leong 90e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO; 91d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644); 927ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 937ac6653aSJeff Kirsher 947ac6653aSJeff Kirsher static int pause = PAUSE_TIME; 95d3757ba4SJoe Perches module_param(pause, int, 0644); 967ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 977ac6653aSJeff Kirsher 987ac6653aSJeff Kirsher #define TC_DEFAULT 64 997ac6653aSJeff Kirsher static int tc = TC_DEFAULT; 100d3757ba4SJoe Perches module_param(tc, int, 0644); 1017ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value"); 1027ac6653aSJeff Kirsher 103d916701cSGiuseppe CAVALLARO #define DEFAULT_BUFSIZE 1536 104d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE; 105d3757ba4SJoe Perches module_param(buf_sz, int, 0644); 1067ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 1077ac6653aSJeff Kirsher 10822ad3838SGiuseppe Cavallaro #define STMMAC_RX_COPYBREAK 256 10922ad3838SGiuseppe Cavallaro 1107ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 1117ac6653aSJeff Kirsher NETIF_MSG_LINK | NETIF_MSG_IFUP | 1127ac6653aSJeff Kirsher NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 1137ac6653aSJeff Kirsher 114d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER 1000 115d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 116d3757ba4SJoe Perches module_param(eee_timer, int, 0644); 117d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 118388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 119d765955dSGiuseppe CAVALLARO 12022d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors, 12122d3efe5SPavel Machek * but allow user to force to use the chain instead of the ring 1224a7d666aSGiuseppe CAVALLARO */ 1234a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode; 124d3757ba4SJoe Perches module_param(chain_mode, int, 0444); 1254a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 1264a7d666aSGiuseppe CAVALLARO 1277ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 1288532f613SOng Boon Leong /* For MSI interrupts handling */ 1298532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 1308532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 1318532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 1328532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 133f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 134f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 135f9ec5723SChristian Marangi static void stmmac_reset_queues_param(struct stmmac_priv *priv); 136132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 137132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 1383a6c12a0SXiaoliang Yang static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 1393a6c12a0SXiaoliang Yang u32 rxmode, u32 chan); 1407ac6653aSJeff Kirsher 14150fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 142481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops; 1438d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev); 144466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev); 145bfab27a1SGiuseppe CAVALLARO #endif 146bfab27a1SGiuseppe CAVALLARO 147d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 1489125cdd1SGiuseppe CAVALLARO 1495ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 1505ec55823SJoakim Zhang { 1515ec55823SJoakim Zhang int ret = 0; 1525ec55823SJoakim Zhang 1535ec55823SJoakim Zhang if (enabled) { 1545ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->stmmac_clk); 1555ec55823SJoakim Zhang if (ret) 1565ec55823SJoakim Zhang return ret; 1575ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->pclk); 1585ec55823SJoakim Zhang if (ret) { 1595ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1605ec55823SJoakim Zhang return ret; 1615ec55823SJoakim Zhang } 162b4d45aeeSJoakim Zhang if (priv->plat->clks_config) { 163b4d45aeeSJoakim Zhang ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 164b4d45aeeSJoakim Zhang if (ret) { 165b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 166b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 167b4d45aeeSJoakim Zhang return ret; 168b4d45aeeSJoakim Zhang } 169b4d45aeeSJoakim Zhang } 1705ec55823SJoakim Zhang } else { 1715ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1725ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 173b4d45aeeSJoakim Zhang if (priv->plat->clks_config) 174b4d45aeeSJoakim Zhang priv->plat->clks_config(priv->plat->bsp_priv, enabled); 1755ec55823SJoakim Zhang } 1765ec55823SJoakim Zhang 1775ec55823SJoakim Zhang return ret; 1785ec55823SJoakim Zhang } 1795ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 1805ec55823SJoakim Zhang 1817ac6653aSJeff Kirsher /** 1827ac6653aSJeff Kirsher * stmmac_verify_args - verify the driver parameters. 183732fdf0eSGiuseppe CAVALLARO * Description: it checks the driver parameters and set a default in case of 184732fdf0eSGiuseppe CAVALLARO * errors. 1857ac6653aSJeff Kirsher */ 1867ac6653aSJeff Kirsher static void stmmac_verify_args(void) 1877ac6653aSJeff Kirsher { 1887ac6653aSJeff Kirsher if (unlikely(watchdog < 0)) 1897ac6653aSJeff Kirsher watchdog = TX_TIMEO; 190d916701cSGiuseppe CAVALLARO if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 191d916701cSGiuseppe CAVALLARO buf_sz = DEFAULT_BUFSIZE; 1927ac6653aSJeff Kirsher if (unlikely(flow_ctrl > 1)) 1937ac6653aSJeff Kirsher flow_ctrl = FLOW_AUTO; 1947ac6653aSJeff Kirsher else if (likely(flow_ctrl < 0)) 1957ac6653aSJeff Kirsher flow_ctrl = FLOW_OFF; 1967ac6653aSJeff Kirsher if (unlikely((pause < 0) || (pause > 0xffff))) 1977ac6653aSJeff Kirsher pause = PAUSE_TIME; 198d765955dSGiuseppe CAVALLARO if (eee_timer < 0) 199d765955dSGiuseppe CAVALLARO eee_timer = STMMAC_DEFAULT_LPI_TIMER; 2007ac6653aSJeff Kirsher } 2017ac6653aSJeff Kirsher 202bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 203c22a3f48SJoao Pinto { 204c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2058fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2068fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 207c22a3f48SJoao Pinto u32 queue; 208c22a3f48SJoao Pinto 2098fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2108fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 211c22a3f48SJoao Pinto 212132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 213132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 214132c32eeSOng Boon Leong napi_disable(&ch->rxtx_napi); 215132c32eeSOng Boon Leong continue; 216132c32eeSOng Boon Leong } 217132c32eeSOng Boon Leong 2184ccb4585SJose Abreu if (queue < rx_queues_cnt) 2194ccb4585SJose Abreu napi_disable(&ch->rx_napi); 2204ccb4585SJose Abreu if (queue < tx_queues_cnt) 2214ccb4585SJose Abreu napi_disable(&ch->tx_napi); 222c22a3f48SJoao Pinto } 223c22a3f48SJoao Pinto } 224c22a3f48SJoao Pinto 225c22a3f48SJoao Pinto /** 226bba2556eSOng Boon Leong * stmmac_disable_all_queues - Disable all queues 227bba2556eSOng Boon Leong * @priv: driver private structure 228bba2556eSOng Boon Leong */ 229bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv) 230bba2556eSOng Boon Leong { 231bba2556eSOng Boon Leong u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 232bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 233bba2556eSOng Boon Leong u32 queue; 234bba2556eSOng Boon Leong 235bba2556eSOng Boon Leong /* synchronize_rcu() needed for pending XDP buffers to drain */ 236bba2556eSOng Boon Leong for (queue = 0; queue < rx_queues_cnt; queue++) { 2378531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[queue]; 238bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 239bba2556eSOng Boon Leong synchronize_rcu(); 240bba2556eSOng Boon Leong break; 241bba2556eSOng Boon Leong } 242bba2556eSOng Boon Leong } 243bba2556eSOng Boon Leong 244bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 245bba2556eSOng Boon Leong } 246bba2556eSOng Boon Leong 247bba2556eSOng Boon Leong /** 248c22a3f48SJoao Pinto * stmmac_enable_all_queues - Enable all queues 249c22a3f48SJoao Pinto * @priv: driver private structure 250c22a3f48SJoao Pinto */ 251c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv) 252c22a3f48SJoao Pinto { 253c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2548fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2558fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 256c22a3f48SJoao Pinto u32 queue; 257c22a3f48SJoao Pinto 2588fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2598fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 260c22a3f48SJoao Pinto 261132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 262132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 263132c32eeSOng Boon Leong napi_enable(&ch->rxtx_napi); 264132c32eeSOng Boon Leong continue; 265132c32eeSOng Boon Leong } 266132c32eeSOng Boon Leong 2674ccb4585SJose Abreu if (queue < rx_queues_cnt) 2684ccb4585SJose Abreu napi_enable(&ch->rx_napi); 2694ccb4585SJose Abreu if (queue < tx_queues_cnt) 2704ccb4585SJose Abreu napi_enable(&ch->tx_napi); 271c22a3f48SJoao Pinto } 272c22a3f48SJoao Pinto } 273c22a3f48SJoao Pinto 27434877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv) 27534877a15SJose Abreu { 27634877a15SJose Abreu if (!test_bit(STMMAC_DOWN, &priv->state) && 27734877a15SJose Abreu !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 27834877a15SJose Abreu queue_work(priv->wq, &priv->service_task); 27934877a15SJose Abreu } 28034877a15SJose Abreu 28134877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv) 28234877a15SJose Abreu { 28334877a15SJose Abreu netif_carrier_off(priv->dev); 28434877a15SJose Abreu set_bit(STMMAC_RESET_REQUESTED, &priv->state); 28534877a15SJose Abreu stmmac_service_event_schedule(priv); 28634877a15SJose Abreu } 28734877a15SJose Abreu 288c22a3f48SJoao Pinto /** 28932ceabcaSGiuseppe CAVALLARO * stmmac_clk_csr_set - dynamically set the MDC clock 29032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29132ceabcaSGiuseppe CAVALLARO * Description: this is to dynamically set the MDC clock according to the csr 29232ceabcaSGiuseppe CAVALLARO * clock input. 29332ceabcaSGiuseppe CAVALLARO * Note: 29432ceabcaSGiuseppe CAVALLARO * If a specific clk_csr value is passed from the platform 29532ceabcaSGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 29632ceabcaSGiuseppe CAVALLARO * changed at run-time and it is fixed (as reported in the driver 29732ceabcaSGiuseppe CAVALLARO * documentation). Viceversa the driver will try to set the MDC 29832ceabcaSGiuseppe CAVALLARO * clock dynamically according to the actual clock input. 29932ceabcaSGiuseppe CAVALLARO */ 300cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv) 301cd7201f4SGiuseppe CAVALLARO { 302cd7201f4SGiuseppe CAVALLARO u32 clk_rate; 303cd7201f4SGiuseppe CAVALLARO 304f573c0b9Sjpinto clk_rate = clk_get_rate(priv->plat->stmmac_clk); 305cd7201f4SGiuseppe CAVALLARO 306cd7201f4SGiuseppe CAVALLARO /* Platform provided default clk_csr would be assumed valid 307ceb69499SGiuseppe CAVALLARO * for all other cases except for the below mentioned ones. 308ceb69499SGiuseppe CAVALLARO * For values higher than the IEEE 802.3 specified frequency 309ceb69499SGiuseppe CAVALLARO * we can not estimate the proper divider as it is not known 310ceb69499SGiuseppe CAVALLARO * the frequency of clk_csr_i. So we do not change the default 311ceb69499SGiuseppe CAVALLARO * divider. 312ceb69499SGiuseppe CAVALLARO */ 313cd7201f4SGiuseppe CAVALLARO if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 314cd7201f4SGiuseppe CAVALLARO if (clk_rate < CSR_F_35M) 315cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_20_35M; 316cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 317cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_35_60M; 318cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 319cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_60_100M; 320cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 321cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_100_150M; 322cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 323cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_150_250M; 32408dad2f4SJesper Nilsson else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 325cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_250_300M; 326ceb69499SGiuseppe CAVALLARO } 3279f93ac8dSLABBE Corentin 3289f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) { 3299f93ac8dSLABBE Corentin if (clk_rate > 160000000) 3309f93ac8dSLABBE Corentin priv->clk_csr = 0x03; 3319f93ac8dSLABBE Corentin else if (clk_rate > 80000000) 3329f93ac8dSLABBE Corentin priv->clk_csr = 0x02; 3339f93ac8dSLABBE Corentin else if (clk_rate > 40000000) 3349f93ac8dSLABBE Corentin priv->clk_csr = 0x01; 3359f93ac8dSLABBE Corentin else 3369f93ac8dSLABBE Corentin priv->clk_csr = 0; 3379f93ac8dSLABBE Corentin } 3387d9e6c5aSJose Abreu 3397d9e6c5aSJose Abreu if (priv->plat->has_xgmac) { 3407d9e6c5aSJose Abreu if (clk_rate > 400000000) 3417d9e6c5aSJose Abreu priv->clk_csr = 0x5; 3427d9e6c5aSJose Abreu else if (clk_rate > 350000000) 3437d9e6c5aSJose Abreu priv->clk_csr = 0x4; 3447d9e6c5aSJose Abreu else if (clk_rate > 300000000) 3457d9e6c5aSJose Abreu priv->clk_csr = 0x3; 3467d9e6c5aSJose Abreu else if (clk_rate > 250000000) 3477d9e6c5aSJose Abreu priv->clk_csr = 0x2; 3487d9e6c5aSJose Abreu else if (clk_rate > 150000000) 3497d9e6c5aSJose Abreu priv->clk_csr = 0x1; 3507d9e6c5aSJose Abreu else 3517d9e6c5aSJose Abreu priv->clk_csr = 0x0; 3527d9e6c5aSJose Abreu } 353cd7201f4SGiuseppe CAVALLARO } 354cd7201f4SGiuseppe CAVALLARO 3557ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len) 3567ac6653aSJeff Kirsher { 357424c4f78SAndy Shevchenko pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 358424c4f78SAndy Shevchenko print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 3597ac6653aSJeff Kirsher } 3607ac6653aSJeff Kirsher 361ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 3627ac6653aSJeff Kirsher { 3638531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 364a6a3e026SLABBE Corentin u32 avail; 365e3ad57c9SGiuseppe Cavallaro 366ce736788SJoao Pinto if (tx_q->dirty_tx > tx_q->cur_tx) 367ce736788SJoao Pinto avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 368e3ad57c9SGiuseppe Cavallaro else 3698531c808SChristian Marangi avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 370e3ad57c9SGiuseppe Cavallaro 371e3ad57c9SGiuseppe Cavallaro return avail; 372e3ad57c9SGiuseppe Cavallaro } 373e3ad57c9SGiuseppe Cavallaro 37454139cf3SJoao Pinto /** 37554139cf3SJoao Pinto * stmmac_rx_dirty - Get RX queue dirty 37654139cf3SJoao Pinto * @priv: driver private structure 37754139cf3SJoao Pinto * @queue: RX queue index 37854139cf3SJoao Pinto */ 37954139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 380e3ad57c9SGiuseppe Cavallaro { 3818531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 382a6a3e026SLABBE Corentin u32 dirty; 383e3ad57c9SGiuseppe Cavallaro 38454139cf3SJoao Pinto if (rx_q->dirty_rx <= rx_q->cur_rx) 38554139cf3SJoao Pinto dirty = rx_q->cur_rx - rx_q->dirty_rx; 386e3ad57c9SGiuseppe Cavallaro else 3878531c808SChristian Marangi dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 388e3ad57c9SGiuseppe Cavallaro 389e3ad57c9SGiuseppe Cavallaro return dirty; 3907ac6653aSJeff Kirsher } 3917ac6653aSJeff Kirsher 392be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 393be1c7eaeSVineetha G. Jaya Kumaran { 394be1c7eaeSVineetha G. Jaya Kumaran int tx_lpi_timer; 395be1c7eaeSVineetha G. Jaya Kumaran 396be1c7eaeSVineetha G. Jaya Kumaran /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 397be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en = en ? 0 : 1; 398be1c7eaeSVineetha G. Jaya Kumaran tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 399be1c7eaeSVineetha G. Jaya Kumaran stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 400be1c7eaeSVineetha G. Jaya Kumaran } 401be1c7eaeSVineetha G. Jaya Kumaran 40232ceabcaSGiuseppe CAVALLARO /** 403732fdf0eSGiuseppe CAVALLARO * stmmac_enable_eee_mode - check and enter in LPI mode 40432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 405732fdf0eSGiuseppe CAVALLARO * Description: this function is to verify and enter in LPI mode in case of 406732fdf0eSGiuseppe CAVALLARO * EEE. 40732ceabcaSGiuseppe CAVALLARO */ 408c74ead22SJisheng Zhang static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 409d765955dSGiuseppe CAVALLARO { 410ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 411ce736788SJoao Pinto u32 queue; 412ce736788SJoao Pinto 413ce736788SJoao Pinto /* check if all TX queues have the work finished */ 414ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 4158531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 416ce736788SJoao Pinto 417ce736788SJoao Pinto if (tx_q->dirty_tx != tx_q->cur_tx) 418c74ead22SJisheng Zhang return -EBUSY; /* still unfinished work */ 419ce736788SJoao Pinto } 420ce736788SJoao Pinto 421d765955dSGiuseppe CAVALLARO /* Check and enter in LPI mode */ 422ce736788SJoao Pinto if (!priv->tx_path_in_lpi_mode) 423c10d4c82SJose Abreu stmmac_set_eee_mode(priv, priv->hw, 424b4b7b772Sjpinto priv->plat->en_tx_lpi_clockgating); 425c74ead22SJisheng Zhang return 0; 426d765955dSGiuseppe CAVALLARO } 427d765955dSGiuseppe CAVALLARO 42832ceabcaSGiuseppe CAVALLARO /** 429732fdf0eSGiuseppe CAVALLARO * stmmac_disable_eee_mode - disable and exit from LPI mode 43032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 43132ceabcaSGiuseppe CAVALLARO * Description: this function is to exit and disable EEE in case of 43232ceabcaSGiuseppe CAVALLARO * LPI state is true. This is called by the xmit. 43332ceabcaSGiuseppe CAVALLARO */ 434d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv) 435d765955dSGiuseppe CAVALLARO { 436be1c7eaeSVineetha G. Jaya Kumaran if (!priv->eee_sw_timer_en) { 437be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 438be1c7eaeSVineetha G. Jaya Kumaran return; 439be1c7eaeSVineetha G. Jaya Kumaran } 440be1c7eaeSVineetha G. Jaya Kumaran 441c10d4c82SJose Abreu stmmac_reset_eee_mode(priv, priv->hw); 442d765955dSGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 443d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 444d765955dSGiuseppe CAVALLARO } 445d765955dSGiuseppe CAVALLARO 446d765955dSGiuseppe CAVALLARO /** 447732fdf0eSGiuseppe CAVALLARO * stmmac_eee_ctrl_timer - EEE TX SW timer. 448d0ea5cbdSJesse Brandeburg * @t: timer_list struct containing private info 449d765955dSGiuseppe CAVALLARO * Description: 45032ceabcaSGiuseppe CAVALLARO * if there is no data transfer and if we are not in LPI state, 451d765955dSGiuseppe CAVALLARO * then MAC Transmitter can be moved to LPI state. 452d765955dSGiuseppe CAVALLARO */ 453e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t) 454d765955dSGiuseppe CAVALLARO { 455e99e88a9SKees Cook struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 456d765955dSGiuseppe CAVALLARO 457c74ead22SJisheng Zhang if (stmmac_enable_eee_mode(priv)) 458388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 459d765955dSGiuseppe CAVALLARO } 460d765955dSGiuseppe CAVALLARO 461d765955dSGiuseppe CAVALLARO /** 462732fdf0eSGiuseppe CAVALLARO * stmmac_eee_init - init EEE 46332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 464d765955dSGiuseppe CAVALLARO * Description: 465732fdf0eSGiuseppe CAVALLARO * if the GMAC supports the EEE (from the HW cap reg) and the phy device 466732fdf0eSGiuseppe CAVALLARO * can also manage EEE, this function enable the LPI state and start related 467732fdf0eSGiuseppe CAVALLARO * timer. 468d765955dSGiuseppe CAVALLARO */ 469d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv) 470d765955dSGiuseppe CAVALLARO { 471388e201dSVineetha G. Jaya Kumaran int eee_tw_timer = priv->eee_tw_timer; 472879626e3SJerome Brunet 473f5351ef7SGiuseppe CAVALLARO /* Using PCS we cannot dial with the phy registers at this stage 474f5351ef7SGiuseppe CAVALLARO * so we do not support extra feature like EEE. 475f5351ef7SGiuseppe CAVALLARO */ 476a47b9e15SDejin Zheng if (priv->hw->pcs == STMMAC_PCS_TBI || 477a47b9e15SDejin Zheng priv->hw->pcs == STMMAC_PCS_RTBI) 47874371272SJose Abreu return false; 479f5351ef7SGiuseppe CAVALLARO 48074371272SJose Abreu /* Check if MAC core supports the EEE feature. */ 48174371272SJose Abreu if (!priv->dma_cap.eee) 48274371272SJose Abreu return false; 483d765955dSGiuseppe CAVALLARO 48429555fa3SThierry Reding mutex_lock(&priv->lock); 48574371272SJose Abreu 48674371272SJose Abreu /* Check if it needs to be deactivated */ 487177d935aSJon Hunter if (!priv->eee_active) { 488177d935aSJon Hunter if (priv->eee_enabled) { 48938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "disable EEE\n"); 490be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 49183bf79b6SGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 492388e201dSVineetha G. Jaya Kumaran stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 493d4aeaed8SWong Vee Khee if (priv->hw->xpcs) 494d4aeaed8SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 495d4aeaed8SWong Vee Khee priv->plat->mult_fact_100ns, 496d4aeaed8SWong Vee Khee false); 497177d935aSJon Hunter } 4980867bb97SJon Hunter mutex_unlock(&priv->lock); 49974371272SJose Abreu return false; 50074371272SJose Abreu } 50174371272SJose Abreu 50274371272SJose Abreu if (priv->eee_active && !priv->eee_enabled) { 50374371272SJose Abreu timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 50474371272SJose Abreu stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 505388e201dSVineetha G. Jaya Kumaran eee_tw_timer); 506656ed8b0SWong Vee Khee if (priv->hw->xpcs) 507656ed8b0SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 508656ed8b0SWong Vee Khee priv->plat->mult_fact_100ns, 509656ed8b0SWong Vee Khee true); 51083bf79b6SGiuseppe CAVALLARO } 51174371272SJose Abreu 512be1c7eaeSVineetha G. Jaya Kumaran if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 513be1c7eaeSVineetha G. Jaya Kumaran del_timer_sync(&priv->eee_ctrl_timer); 514be1c7eaeSVineetha G. Jaya Kumaran priv->tx_path_in_lpi_mode = false; 515be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 1); 516be1c7eaeSVineetha G. Jaya Kumaran } else { 517be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 518be1c7eaeSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, 519be1c7eaeSVineetha G. Jaya Kumaran STMMAC_LPI_T(priv->tx_lpi_timer)); 520be1c7eaeSVineetha G. Jaya Kumaran } 521388e201dSVineetha G. Jaya Kumaran 52229555fa3SThierry Reding mutex_unlock(&priv->lock); 52338ddc59dSLABBE Corentin netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 52474371272SJose Abreu return true; 525d765955dSGiuseppe CAVALLARO } 526d765955dSGiuseppe CAVALLARO 527732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps 52832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 529ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 530891434b1SRayagond Kokatanur * @skb : the socket buffer 531891434b1SRayagond Kokatanur * Description : 532891434b1SRayagond Kokatanur * This function will read timestamp from the descriptor & pass it to stack. 533891434b1SRayagond Kokatanur * and also perform some sanity checks. 534891434b1SRayagond Kokatanur */ 535891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 536ba1ffd74SGiuseppe CAVALLARO struct dma_desc *p, struct sk_buff *skb) 537891434b1SRayagond Kokatanur { 538891434b1SRayagond Kokatanur struct skb_shared_hwtstamps shhwtstamp; 53925e80cd0SJose Abreu bool found = false; 540df103170SNathan Chancellor u64 ns = 0; 541891434b1SRayagond Kokatanur 542891434b1SRayagond Kokatanur if (!priv->hwts_tx_en) 543891434b1SRayagond Kokatanur return; 544891434b1SRayagond Kokatanur 545ceb69499SGiuseppe CAVALLARO /* exit if skb doesn't support hw tstamp */ 54675e4364fSdamuzi000 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 547891434b1SRayagond Kokatanur return; 548891434b1SRayagond Kokatanur 549891434b1SRayagond Kokatanur /* check tx tstamp status */ 55042de047dSJose Abreu if (stmmac_get_tx_timestamp_status(priv, p)) { 55142de047dSJose Abreu stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 55225e80cd0SJose Abreu found = true; 55325e80cd0SJose Abreu } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 55425e80cd0SJose Abreu found = true; 55525e80cd0SJose Abreu } 556891434b1SRayagond Kokatanur 55725e80cd0SJose Abreu if (found) { 558c6d5f193SKurt Kanzenbach ns -= priv->plat->cdc_error_adj; 5593600be5fSVoon Weifeng 560891434b1SRayagond Kokatanur memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 561891434b1SRayagond Kokatanur shhwtstamp.hwtstamp = ns_to_ktime(ns); 562ba1ffd74SGiuseppe CAVALLARO 56333d4c482SMario Molitor netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 564891434b1SRayagond Kokatanur /* pass tstamp to stack */ 565891434b1SRayagond Kokatanur skb_tstamp_tx(skb, &shhwtstamp); 566ba1ffd74SGiuseppe CAVALLARO } 567891434b1SRayagond Kokatanur } 568891434b1SRayagond Kokatanur 569732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps 57032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 571ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 572ba1ffd74SGiuseppe CAVALLARO * @np : next descriptor pointer 573891434b1SRayagond Kokatanur * @skb : the socket buffer 574891434b1SRayagond Kokatanur * Description : 575891434b1SRayagond Kokatanur * This function will read received packet's timestamp from the descriptor 576891434b1SRayagond Kokatanur * and pass it to stack. It also perform some sanity checks. 577891434b1SRayagond Kokatanur */ 578ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 579ba1ffd74SGiuseppe CAVALLARO struct dma_desc *np, struct sk_buff *skb) 580891434b1SRayagond Kokatanur { 581891434b1SRayagond Kokatanur struct skb_shared_hwtstamps *shhwtstamp = NULL; 58298870943SJose Abreu struct dma_desc *desc = p; 583df103170SNathan Chancellor u64 ns = 0; 584891434b1SRayagond Kokatanur 585891434b1SRayagond Kokatanur if (!priv->hwts_rx_en) 586891434b1SRayagond Kokatanur return; 587ba1ffd74SGiuseppe CAVALLARO /* For GMAC4, the valid timestamp is from CTX next desc. */ 5887d9e6c5aSJose Abreu if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 58998870943SJose Abreu desc = np; 590891434b1SRayagond Kokatanur 59198870943SJose Abreu /* Check if timestamp is available */ 59242de047dSJose Abreu if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 59342de047dSJose Abreu stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 5943600be5fSVoon Weifeng 595c6d5f193SKurt Kanzenbach ns -= priv->plat->cdc_error_adj; 5963600be5fSVoon Weifeng 59733d4c482SMario Molitor netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 598891434b1SRayagond Kokatanur shhwtstamp = skb_hwtstamps(skb); 599891434b1SRayagond Kokatanur memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 600891434b1SRayagond Kokatanur shhwtstamp->hwtstamp = ns_to_ktime(ns); 601ba1ffd74SGiuseppe CAVALLARO } else { 60233d4c482SMario Molitor netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 603ba1ffd74SGiuseppe CAVALLARO } 604891434b1SRayagond Kokatanur } 605891434b1SRayagond Kokatanur 606891434b1SRayagond Kokatanur /** 607d6228b7cSArtem Panfilov * stmmac_hwtstamp_set - control hardware timestamping. 608891434b1SRayagond Kokatanur * @dev: device pointer. 6098d45e42bSLABBE Corentin * @ifr: An IOCTL specific structure, that can contain a pointer to 610891434b1SRayagond Kokatanur * a proprietary structure used to pass information to the driver. 611891434b1SRayagond Kokatanur * Description: 612891434b1SRayagond Kokatanur * This function configures the MAC to enable/disable both outgoing(TX) 613891434b1SRayagond Kokatanur * and incoming(RX) packets time stamping based on user input. 614891434b1SRayagond Kokatanur * Return Value: 615891434b1SRayagond Kokatanur * 0 on success and an appropriate -ve integer on failure. 616891434b1SRayagond Kokatanur */ 617d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 618891434b1SRayagond Kokatanur { 619891434b1SRayagond Kokatanur struct stmmac_priv *priv = netdev_priv(dev); 620891434b1SRayagond Kokatanur struct hwtstamp_config config; 621891434b1SRayagond Kokatanur u32 ptp_v2 = 0; 622891434b1SRayagond Kokatanur u32 tstamp_all = 0; 623891434b1SRayagond Kokatanur u32 ptp_over_ipv4_udp = 0; 624891434b1SRayagond Kokatanur u32 ptp_over_ipv6_udp = 0; 625891434b1SRayagond Kokatanur u32 ptp_over_ethernet = 0; 626891434b1SRayagond Kokatanur u32 snap_type_sel = 0; 627891434b1SRayagond Kokatanur u32 ts_master_en = 0; 628891434b1SRayagond Kokatanur u32 ts_event_en = 0; 629891434b1SRayagond Kokatanur 630891434b1SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 631891434b1SRayagond Kokatanur netdev_alert(priv->dev, "No support for HW time stamping\n"); 632891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 633891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 634891434b1SRayagond Kokatanur 635891434b1SRayagond Kokatanur return -EOPNOTSUPP; 636891434b1SRayagond Kokatanur } 637891434b1SRayagond Kokatanur 638891434b1SRayagond Kokatanur if (copy_from_user(&config, ifr->ifr_data, 639d6228b7cSArtem Panfilov sizeof(config))) 640891434b1SRayagond Kokatanur return -EFAULT; 641891434b1SRayagond Kokatanur 64238ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 643891434b1SRayagond Kokatanur __func__, config.flags, config.tx_type, config.rx_filter); 644891434b1SRayagond Kokatanur 6455f3da328SBen Hutchings if (config.tx_type != HWTSTAMP_TX_OFF && 6465f3da328SBen Hutchings config.tx_type != HWTSTAMP_TX_ON) 647891434b1SRayagond Kokatanur return -ERANGE; 648891434b1SRayagond Kokatanur 649891434b1SRayagond Kokatanur if (priv->adv_ts) { 650891434b1SRayagond Kokatanur switch (config.rx_filter) { 651891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 652ceb69499SGiuseppe CAVALLARO /* time stamp no incoming packet at all */ 653891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 654891434b1SRayagond Kokatanur break; 655891434b1SRayagond Kokatanur 656891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 657ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, any kind of event packet */ 658891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 6597d8e249fSIlias Apalodimas /* 'xmac' hardware can support Sync, Pdelay_Req and 6607d8e249fSIlias Apalodimas * Pdelay_resp by setting bit14 and bits17/16 to 01 6617d8e249fSIlias Apalodimas * This leaves Delay_Req timestamps out. 6627d8e249fSIlias Apalodimas * Enable all events *and* general purpose message 6637d8e249fSIlias Apalodimas * timestamping 6647d8e249fSIlias Apalodimas */ 665891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 666891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 667891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 668891434b1SRayagond Kokatanur break; 669891434b1SRayagond Kokatanur 670891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 671ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Sync packet */ 672891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 673891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 674891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 675891434b1SRayagond Kokatanur 676891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 677891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 678891434b1SRayagond Kokatanur break; 679891434b1SRayagond Kokatanur 680891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 681ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Delay_req packet */ 682891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 683891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 684891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 685891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 686891434b1SRayagond Kokatanur 687891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 688891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 689891434b1SRayagond Kokatanur break; 690891434b1SRayagond Kokatanur 691891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 692ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, any kind of event packet */ 693891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 694891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 695891434b1SRayagond Kokatanur /* take time stamp for all event messages */ 696891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 697891434b1SRayagond Kokatanur 698891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 699891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 700891434b1SRayagond Kokatanur break; 701891434b1SRayagond Kokatanur 702891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 703ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Sync packet */ 704891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 705891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 706891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 707891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 708891434b1SRayagond Kokatanur 709891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 710891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 711891434b1SRayagond Kokatanur break; 712891434b1SRayagond Kokatanur 713891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 714ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Delay_req packet */ 715891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 716891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 717891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 718891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 719891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 720891434b1SRayagond Kokatanur 721891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 722891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 723891434b1SRayagond Kokatanur break; 724891434b1SRayagond Kokatanur 725891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_EVENT: 726ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1 any layer, any kind of event packet */ 727891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 728891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 729891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 7303cb95802SKurt Kanzenbach if (priv->synopsys_id < DWMAC_CORE_4_10) 73114f34733SJose Abreu ts_event_en = PTP_TCR_TSEVNTENA; 732891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 733891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 734891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 735891434b1SRayagond Kokatanur break; 736891434b1SRayagond Kokatanur 737891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_SYNC: 738ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Sync packet */ 739891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 740891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 741891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 742891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 743891434b1SRayagond Kokatanur 744891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 745891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 746891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 747891434b1SRayagond Kokatanur break; 748891434b1SRayagond Kokatanur 749891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 750ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Delay_req packet */ 751891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 752891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 753891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 754891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 755891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 756891434b1SRayagond Kokatanur 757891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 758891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 759891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 760891434b1SRayagond Kokatanur break; 761891434b1SRayagond Kokatanur 762e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 763891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_ALL: 764ceb69499SGiuseppe CAVALLARO /* time stamp any incoming packet */ 765891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_ALL; 766891434b1SRayagond Kokatanur tstamp_all = PTP_TCR_TSENALL; 767891434b1SRayagond Kokatanur break; 768891434b1SRayagond Kokatanur 769891434b1SRayagond Kokatanur default: 770891434b1SRayagond Kokatanur return -ERANGE; 771891434b1SRayagond Kokatanur } 772891434b1SRayagond Kokatanur } else { 773891434b1SRayagond Kokatanur switch (config.rx_filter) { 774891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 775891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 776891434b1SRayagond Kokatanur break; 777891434b1SRayagond Kokatanur default: 778891434b1SRayagond Kokatanur /* PTP v1, UDP, any kind of event packet */ 779891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 780891434b1SRayagond Kokatanur break; 781891434b1SRayagond Kokatanur } 782891434b1SRayagond Kokatanur } 783891434b1SRayagond Kokatanur priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 7845f3da328SBen Hutchings priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 785891434b1SRayagond Kokatanur 786a6da2bbbSHolger Assmann priv->systime_flags = STMMAC_HWTS_ACTIVE; 787891434b1SRayagond Kokatanur 788a6da2bbbSHolger Assmann if (priv->hwts_tx_en || priv->hwts_rx_en) { 789a6da2bbbSHolger Assmann priv->systime_flags |= tstamp_all | ptp_v2 | 790a6da2bbbSHolger Assmann ptp_over_ethernet | ptp_over_ipv6_udp | 791a6da2bbbSHolger Assmann ptp_over_ipv4_udp | ts_event_en | 792a6da2bbbSHolger Assmann ts_master_en | snap_type_sel; 793891434b1SRayagond Kokatanur } 794891434b1SRayagond Kokatanur 795a6da2bbbSHolger Assmann stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 796a6da2bbbSHolger Assmann 797d6228b7cSArtem Panfilov memcpy(&priv->tstamp_config, &config, sizeof(config)); 798d6228b7cSArtem Panfilov 799891434b1SRayagond Kokatanur return copy_to_user(ifr->ifr_data, &config, 800d6228b7cSArtem Panfilov sizeof(config)) ? -EFAULT : 0; 801d6228b7cSArtem Panfilov } 802d6228b7cSArtem Panfilov 803d6228b7cSArtem Panfilov /** 804d6228b7cSArtem Panfilov * stmmac_hwtstamp_get - read hardware timestamping. 805d6228b7cSArtem Panfilov * @dev: device pointer. 806d6228b7cSArtem Panfilov * @ifr: An IOCTL specific structure, that can contain a pointer to 807d6228b7cSArtem Panfilov * a proprietary structure used to pass information to the driver. 808d6228b7cSArtem Panfilov * Description: 809d6228b7cSArtem Panfilov * This function obtain the current hardware timestamping settings 810d0ea5cbdSJesse Brandeburg * as requested. 811d6228b7cSArtem Panfilov */ 812d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 813d6228b7cSArtem Panfilov { 814d6228b7cSArtem Panfilov struct stmmac_priv *priv = netdev_priv(dev); 815d6228b7cSArtem Panfilov struct hwtstamp_config *config = &priv->tstamp_config; 816d6228b7cSArtem Panfilov 817d6228b7cSArtem Panfilov if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 818d6228b7cSArtem Panfilov return -EOPNOTSUPP; 819d6228b7cSArtem Panfilov 820d6228b7cSArtem Panfilov return copy_to_user(ifr->ifr_data, config, 821d6228b7cSArtem Panfilov sizeof(*config)) ? -EFAULT : 0; 822891434b1SRayagond Kokatanur } 823891434b1SRayagond Kokatanur 82432ceabcaSGiuseppe CAVALLARO /** 825a6da2bbbSHolger Assmann * stmmac_init_tstamp_counter - init hardware timestamping counter 826a6da2bbbSHolger Assmann * @priv: driver private structure 827a6da2bbbSHolger Assmann * @systime_flags: timestamping flags 828a6da2bbbSHolger Assmann * Description: 829a6da2bbbSHolger Assmann * Initialize hardware counter for packet timestamping. 830a6da2bbbSHolger Assmann * This is valid as long as the interface is open and not suspended. 831a6da2bbbSHolger Assmann * Will be rerun after resuming from suspend, case in which the timestamping 832a6da2bbbSHolger Assmann * flags updated by stmmac_hwtstamp_set() also need to be restored. 833a6da2bbbSHolger Assmann */ 834a6da2bbbSHolger Assmann int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 835a6da2bbbSHolger Assmann { 836a6da2bbbSHolger Assmann bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 837a6da2bbbSHolger Assmann struct timespec64 now; 838a6da2bbbSHolger Assmann u32 sec_inc = 0; 839a6da2bbbSHolger Assmann u64 temp = 0; 840a6da2bbbSHolger Assmann 841a6da2bbbSHolger Assmann if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 842a6da2bbbSHolger Assmann return -EOPNOTSUPP; 843a6da2bbbSHolger Assmann 844a6da2bbbSHolger Assmann stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 845a6da2bbbSHolger Assmann priv->systime_flags = systime_flags; 846a6da2bbbSHolger Assmann 847a6da2bbbSHolger Assmann /* program Sub Second Increment reg */ 848a6da2bbbSHolger Assmann stmmac_config_sub_second_increment(priv, priv->ptpaddr, 849a6da2bbbSHolger Assmann priv->plat->clk_ptp_rate, 850a6da2bbbSHolger Assmann xmac, &sec_inc); 851a6da2bbbSHolger Assmann temp = div_u64(1000000000ULL, sec_inc); 852a6da2bbbSHolger Assmann 853a6da2bbbSHolger Assmann /* Store sub second increment for later use */ 854a6da2bbbSHolger Assmann priv->sub_second_inc = sec_inc; 855a6da2bbbSHolger Assmann 856a6da2bbbSHolger Assmann /* calculate default added value: 857a6da2bbbSHolger Assmann * formula is : 858a6da2bbbSHolger Assmann * addend = (2^32)/freq_div_ratio; 859a6da2bbbSHolger Assmann * where, freq_div_ratio = 1e9ns/sec_inc 860a6da2bbbSHolger Assmann */ 861a6da2bbbSHolger Assmann temp = (u64)(temp << 32); 862a6da2bbbSHolger Assmann priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 863a6da2bbbSHolger Assmann stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 864a6da2bbbSHolger Assmann 865a6da2bbbSHolger Assmann /* initialize system time */ 866a6da2bbbSHolger Assmann ktime_get_real_ts64(&now); 867a6da2bbbSHolger Assmann 868a6da2bbbSHolger Assmann /* lower 32 bits of tv_sec are safe until y2106 */ 869a6da2bbbSHolger Assmann stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 870a6da2bbbSHolger Assmann 871a6da2bbbSHolger Assmann return 0; 872a6da2bbbSHolger Assmann } 873a6da2bbbSHolger Assmann EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 874a6da2bbbSHolger Assmann 875a6da2bbbSHolger Assmann /** 876732fdf0eSGiuseppe CAVALLARO * stmmac_init_ptp - init PTP 87732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 878732fdf0eSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 87932ceabcaSGiuseppe CAVALLARO * This is done by looking at the HW cap. register. 880732fdf0eSGiuseppe CAVALLARO * This function also registers the ptp driver. 88132ceabcaSGiuseppe CAVALLARO */ 88292ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv) 883891434b1SRayagond Kokatanur { 8847d9e6c5aSJose Abreu bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 885a6da2bbbSHolger Assmann int ret; 8867d9e6c5aSJose Abreu 88794c82de4SMohammad Athari Bin Ismail if (priv->plat->ptp_clk_freq_config) 88894c82de4SMohammad Athari Bin Ismail priv->plat->ptp_clk_freq_config(priv); 88994c82de4SMohammad Athari Bin Ismail 890a6da2bbbSHolger Assmann ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 891a6da2bbbSHolger Assmann if (ret) 892a6da2bbbSHolger Assmann return ret; 89392ba6888SRayagond Kokatanur 894891434b1SRayagond Kokatanur priv->adv_ts = 0; 8957d9e6c5aSJose Abreu /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 8967d9e6c5aSJose Abreu if (xmac && priv->dma_cap.atime_stamp) 897be9b3174SGiuseppe CAVALLARO priv->adv_ts = 1; 898be9b3174SGiuseppe CAVALLARO /* Dwmac 3.x core with extend_desc can support adv_ts */ 899be9b3174SGiuseppe CAVALLARO else if (priv->extend_desc && priv->dma_cap.atime_stamp) 900891434b1SRayagond Kokatanur priv->adv_ts = 1; 9017cd01399SVince Bridgers 902be9b3174SGiuseppe CAVALLARO if (priv->dma_cap.time_stamp) 903be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 9047cd01399SVince Bridgers 905be9b3174SGiuseppe CAVALLARO if (priv->adv_ts) 906be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, 907be9b3174SGiuseppe CAVALLARO "IEEE 1588-2008 Advanced Timestamp supported\n"); 908891434b1SRayagond Kokatanur 909891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 910891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 91192ba6888SRayagond Kokatanur 912c30a70d3SGiuseppe CAVALLARO return 0; 91392ba6888SRayagond Kokatanur } 91492ba6888SRayagond Kokatanur 91592ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv) 91692ba6888SRayagond Kokatanur { 917f573c0b9Sjpinto clk_disable_unprepare(priv->plat->clk_ptp_ref); 91892ba6888SRayagond Kokatanur stmmac_ptp_unregister(priv); 919891434b1SRayagond Kokatanur } 920891434b1SRayagond Kokatanur 9217ac6653aSJeff Kirsher /** 92229feff39SJoao Pinto * stmmac_mac_flow_ctrl - Configure flow control in all queues 92329feff39SJoao Pinto * @priv: driver private structure 924d0ea5cbdSJesse Brandeburg * @duplex: duplex passed to the next function 92529feff39SJoao Pinto * Description: It is used for configuring the flow control in all queues 92629feff39SJoao Pinto */ 92729feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 92829feff39SJoao Pinto { 92929feff39SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 93029feff39SJoao Pinto 931c10d4c82SJose Abreu stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 93229feff39SJoao Pinto priv->pause, tx_cnt); 93329feff39SJoao Pinto } 93429feff39SJoao Pinto 93572e94511SRussell King (Oracle) static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 93672e94511SRussell King (Oracle) phy_interface_t interface) 93772e94511SRussell King (Oracle) { 93872e94511SRussell King (Oracle) struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 93972e94511SRussell King (Oracle) 94072e94511SRussell King (Oracle) if (!priv->hw->xpcs) 94172e94511SRussell King (Oracle) return NULL; 94272e94511SRussell King (Oracle) 94372e94511SRussell King (Oracle) return &priv->hw->xpcs->pcs; 94472e94511SRussell King (Oracle) } 94572e94511SRussell King (Oracle) 94674371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 94774371272SJose Abreu const struct phylink_link_state *state) 9489ad372fcSJose Abreu { 94911059740SVladimir Oltean /* Nothing to do, xpcs_config() handles everything */ 950eeef2f6bSJose Abreu } 951eeef2f6bSJose Abreu 9525a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 9535a558611SOng Boon Leong { 9545a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 9555a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 9565a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 9575a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 9585a558611SOng Boon Leong 9595a558611SOng Boon Leong if (is_up && *hs_enable) { 9605a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 9615a558611SOng Boon Leong } else { 9621f7096f0SWong Vee Khee *lo_state = FPE_STATE_OFF; 9631f7096f0SWong Vee Khee *lp_state = FPE_STATE_OFF; 9645a558611SOng Boon Leong } 9655a558611SOng Boon Leong } 9665a558611SOng Boon Leong 96774371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config, 96874371272SJose Abreu unsigned int mode, phy_interface_t interface) 9699ad372fcSJose Abreu { 97074371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 9719ad372fcSJose Abreu 9729ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 97374371272SJose Abreu priv->eee_active = false; 974388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = false; 975d4aeaed8SWong Vee Khee priv->eee_enabled = stmmac_eee_init(priv); 97674371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, false); 9775a558611SOng Boon Leong 97863c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 9795a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, false); 9809ad372fcSJose Abreu } 9819ad372fcSJose Abreu 98274371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config, 98391a208f2SRussell King struct phy_device *phy, 98474371272SJose Abreu unsigned int mode, phy_interface_t interface, 98591a208f2SRussell King int speed, int duplex, 98691a208f2SRussell King bool tx_pause, bool rx_pause) 9879ad372fcSJose Abreu { 98874371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 989a3a57bf0SHeiner Kallweit u32 old_ctrl, ctrl; 99046f69dedSJose Abreu 991a3a57bf0SHeiner Kallweit old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 992a3a57bf0SHeiner Kallweit ctrl = old_ctrl & ~priv->hw->link.speed_mask; 99346f69dedSJose Abreu 99446f69dedSJose Abreu if (interface == PHY_INTERFACE_MODE_USXGMII) { 99546f69dedSJose Abreu switch (speed) { 99646f69dedSJose Abreu case SPEED_10000: 99746f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 99846f69dedSJose Abreu break; 99946f69dedSJose Abreu case SPEED_5000: 100046f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed5000; 100146f69dedSJose Abreu break; 100246f69dedSJose Abreu case SPEED_2500: 100346f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed2500; 100446f69dedSJose Abreu break; 100546f69dedSJose Abreu default: 100646f69dedSJose Abreu return; 100746f69dedSJose Abreu } 10088a880936SJose Abreu } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 10098a880936SJose Abreu switch (speed) { 10108a880936SJose Abreu case SPEED_100000: 10118a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed100000; 10128a880936SJose Abreu break; 10138a880936SJose Abreu case SPEED_50000: 10148a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed50000; 10158a880936SJose Abreu break; 10168a880936SJose Abreu case SPEED_40000: 10178a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed40000; 10188a880936SJose Abreu break; 10198a880936SJose Abreu case SPEED_25000: 10208a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed25000; 10218a880936SJose Abreu break; 10228a880936SJose Abreu case SPEED_10000: 10238a880936SJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 10248a880936SJose Abreu break; 10258a880936SJose Abreu case SPEED_2500: 10268a880936SJose Abreu ctrl |= priv->hw->link.speed2500; 10278a880936SJose Abreu break; 10288a880936SJose Abreu case SPEED_1000: 10298a880936SJose Abreu ctrl |= priv->hw->link.speed1000; 10308a880936SJose Abreu break; 10318a880936SJose Abreu default: 10328a880936SJose Abreu return; 10338a880936SJose Abreu } 103446f69dedSJose Abreu } else { 103546f69dedSJose Abreu switch (speed) { 103646f69dedSJose Abreu case SPEED_2500: 103746f69dedSJose Abreu ctrl |= priv->hw->link.speed2500; 103846f69dedSJose Abreu break; 103946f69dedSJose Abreu case SPEED_1000: 104046f69dedSJose Abreu ctrl |= priv->hw->link.speed1000; 104146f69dedSJose Abreu break; 104246f69dedSJose Abreu case SPEED_100: 104346f69dedSJose Abreu ctrl |= priv->hw->link.speed100; 104446f69dedSJose Abreu break; 104546f69dedSJose Abreu case SPEED_10: 104646f69dedSJose Abreu ctrl |= priv->hw->link.speed10; 104746f69dedSJose Abreu break; 104846f69dedSJose Abreu default: 104946f69dedSJose Abreu return; 105046f69dedSJose Abreu } 105146f69dedSJose Abreu } 105246f69dedSJose Abreu 105346f69dedSJose Abreu priv->speed = speed; 105446f69dedSJose Abreu 105546f69dedSJose Abreu if (priv->plat->fix_mac_speed) 105646f69dedSJose Abreu priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 105746f69dedSJose Abreu 105846f69dedSJose Abreu if (!duplex) 105946f69dedSJose Abreu ctrl &= ~priv->hw->link.duplex; 106046f69dedSJose Abreu else 106146f69dedSJose Abreu ctrl |= priv->hw->link.duplex; 106246f69dedSJose Abreu 106346f69dedSJose Abreu /* Flow Control operation */ 106446f69dedSJose Abreu if (tx_pause && rx_pause) 106546f69dedSJose Abreu stmmac_mac_flow_ctrl(priv, duplex); 106646f69dedSJose Abreu 1067a3a57bf0SHeiner Kallweit if (ctrl != old_ctrl) 106846f69dedSJose Abreu writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 10699ad372fcSJose Abreu 10709ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 10715b111770SJose Abreu if (phy && priv->dma_cap.eee) { 107274371272SJose Abreu priv->eee_active = phy_init_eee(phy, 1) >= 0; 107374371272SJose Abreu priv->eee_enabled = stmmac_eee_init(priv); 1074388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = priv->eee_enabled; 107574371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, true); 107674371272SJose Abreu } 10775a558611SOng Boon Leong 107863c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 10795a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, true); 10809ad372fcSJose Abreu } 10819ad372fcSJose Abreu 108274371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 108304a0683fSRussell King (Oracle) .validate = phylink_generic_validate, 108472e94511SRussell King (Oracle) .mac_select_pcs = stmmac_mac_select_pcs, 108574371272SJose Abreu .mac_config = stmmac_mac_config, 108674371272SJose Abreu .mac_link_down = stmmac_mac_link_down, 108774371272SJose Abreu .mac_link_up = stmmac_mac_link_up, 1088eeef2f6bSJose Abreu }; 1089eeef2f6bSJose Abreu 109029feff39SJoao Pinto /** 1091732fdf0eSGiuseppe CAVALLARO * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 109232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 109332ceabcaSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PCS. 109432ceabcaSGiuseppe CAVALLARO * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 109532ceabcaSGiuseppe CAVALLARO * configured for the TBI, RTBI, or SGMII PHY interface. 109632ceabcaSGiuseppe CAVALLARO */ 1097e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1098e58bb43fSGiuseppe CAVALLARO { 1099e58bb43fSGiuseppe CAVALLARO int interface = priv->plat->interface; 1100e58bb43fSGiuseppe CAVALLARO 1101e58bb43fSGiuseppe CAVALLARO if (priv->dma_cap.pcs) { 11020d909dcdSByungho An if ((interface == PHY_INTERFACE_MODE_RGMII) || 11030d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_ID) || 11040d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 11050d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 110638ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 11073fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_RGMII; 11080d909dcdSByungho An } else if (interface == PHY_INTERFACE_MODE_SGMII) { 110938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 11103fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_SGMII; 1111e58bb43fSGiuseppe CAVALLARO } 1112e58bb43fSGiuseppe CAVALLARO } 1113e58bb43fSGiuseppe CAVALLARO } 1114e58bb43fSGiuseppe CAVALLARO 11157ac6653aSJeff Kirsher /** 11167ac6653aSJeff Kirsher * stmmac_init_phy - PHY initialization 11177ac6653aSJeff Kirsher * @dev: net device structure 11187ac6653aSJeff Kirsher * Description: it initializes the driver's PHY state, and attaches the PHY 11197ac6653aSJeff Kirsher * to the mac driver. 11207ac6653aSJeff Kirsher * Return value: 11217ac6653aSJeff Kirsher * 0 on success 11227ac6653aSJeff Kirsher */ 11237ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev) 11247ac6653aSJeff Kirsher { 11257ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 1126ab21cf92SOng Boon Leong struct fwnode_handle *fwnode; 112774371272SJose Abreu int ret; 11287ac6653aSJeff Kirsher 1129ab21cf92SOng Boon Leong fwnode = of_fwnode_handle(priv->plat->phylink_node); 1130ab21cf92SOng Boon Leong if (!fwnode) 1131ab21cf92SOng Boon Leong fwnode = dev_fwnode(priv->device); 113274371272SJose Abreu 1133ab21cf92SOng Boon Leong if (fwnode) 1134ab21cf92SOng Boon Leong ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 113542e87024SJose Abreu 113642e87024SJose Abreu /* Some DT bindings do not set-up the PHY handle. Let's try to 113742e87024SJose Abreu * manually parse it 113842e87024SJose Abreu */ 1139ab21cf92SOng Boon Leong if (!fwnode || ret) { 114074371272SJose Abreu int addr = priv->plat->phy_addr; 114174371272SJose Abreu struct phy_device *phydev; 1142f142af2eSSrinivas Kandagatla 114374371272SJose Abreu phydev = mdiobus_get_phy(priv->mii, addr); 114474371272SJose Abreu if (!phydev) { 114574371272SJose Abreu netdev_err(priv->dev, "no phy at addr %d\n", addr); 11467ac6653aSJeff Kirsher return -ENODEV; 11477ac6653aSJeff Kirsher } 11488e99fc5fSGiuseppe Cavallaro 114974371272SJose Abreu ret = phylink_connect_phy(priv->phylink, phydev); 115074371272SJose Abreu } 1151c51e424dSFlorian Fainelli 1152576f9eacSJoakim Zhang if (!priv->plat->pmt) { 1153576f9eacSJoakim Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1154576f9eacSJoakim Zhang 11551d8e5b0fSJisheng Zhang phylink_ethtool_get_wol(priv->phylink, &wol); 11561d8e5b0fSJisheng Zhang device_set_wakeup_capable(priv->device, !!wol.supported); 1157576f9eacSJoakim Zhang } 11581d8e5b0fSJisheng Zhang 115974371272SJose Abreu return ret; 116074371272SJose Abreu } 116174371272SJose Abreu 116274371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv) 116374371272SJose Abreu { 116411059740SVladimir Oltean struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 1165c63d1e5cSArnd Bergmann struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 116692c3807bSRussell King (Oracle) int max_speed = priv->plat->max_speed; 11670060c878SAlexandru Ardelean int mode = priv->plat->phy_interface; 116874371272SJose Abreu struct phylink *phylink; 116974371272SJose Abreu 117074371272SJose Abreu priv->phylink_config.dev = &priv->dev->dev; 117174371272SJose Abreu priv->phylink_config.type = PHYLINK_NETDEV; 1172593f555fSSriranjani P if (priv->plat->mdio_bus_data) 1173e5e5b771SOng Boon Leong priv->phylink_config.ovr_an_inband = 117412628565SDavid S. Miller mdio_bus_data->xpcs_an_inband; 117574371272SJose Abreu 11768dc6051cSJose Abreu if (!fwnode) 11778dc6051cSJose Abreu fwnode = dev_fwnode(priv->device); 11788dc6051cSJose Abreu 1179d194923dSRussell King (Oracle) /* Set the platform/firmware specified interface mode */ 1180d194923dSRussell King (Oracle) __set_bit(mode, priv->phylink_config.supported_interfaces); 1181d194923dSRussell King (Oracle) 1182d194923dSRussell King (Oracle) /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1183d194923dSRussell King (Oracle) if (priv->hw->xpcs) 1184d194923dSRussell King (Oracle) xpcs_get_interfaces(priv->hw->xpcs, 1185d194923dSRussell King (Oracle) priv->phylink_config.supported_interfaces); 1186d194923dSRussell King (Oracle) 118792c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 118892c3807bSRussell King (Oracle) MAC_10 | MAC_100; 118992c3807bSRussell King (Oracle) 119092c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 1000) 119192c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_1000; 119292c3807bSRussell King (Oracle) 119392c3807bSRussell King (Oracle) if (priv->plat->has_gmac4) { 119492c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 2500) 119592c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_2500FD; 119692c3807bSRussell King (Oracle) } else if (priv->plat->has_xgmac) { 119792c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 2500) 119892c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_2500FD; 119992c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 5000) 120092c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_5000FD; 120192c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 10000) 120292c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_10000FD; 120392c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 25000) 120492c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_25000FD; 120592c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 40000) 120692c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_40000FD; 120792c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 50000) 120892c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_50000FD; 120992c3807bSRussell King (Oracle) if (!max_speed || max_speed >= 100000) 121092c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities |= MAC_100000FD; 121192c3807bSRussell King (Oracle) } 121292c3807bSRussell King (Oracle) 121392c3807bSRussell King (Oracle) /* Half-Duplex can only work with single queue */ 121492c3807bSRussell King (Oracle) if (priv->plat->tx_queues_to_use > 1) 121592c3807bSRussell King (Oracle) priv->phylink_config.mac_capabilities &= 121692c3807bSRussell King (Oracle) ~(MAC_10HD | MAC_100HD | MAC_1000HD); 121792c3807bSRussell King (Oracle) 1218c63d1e5cSArnd Bergmann phylink = phylink_create(&priv->phylink_config, fwnode, 121974371272SJose Abreu mode, &stmmac_phylink_mac_ops); 122074371272SJose Abreu if (IS_ERR(phylink)) 122174371272SJose Abreu return PTR_ERR(phylink); 122274371272SJose Abreu 122374371272SJose Abreu priv->phylink = phylink; 12247ac6653aSJeff Kirsher return 0; 12257ac6653aSJeff Kirsher } 12267ac6653aSJeff Kirsher 1227ba39b344SChristian Marangi static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1228ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1229c24602efSGiuseppe CAVALLARO { 123054139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 1231bfaf91caSJoakim Zhang unsigned int desc_size; 123271fedb01SJoao Pinto void *head_rx; 123354139cf3SJoao Pinto u32 queue; 123454139cf3SJoao Pinto 123554139cf3SJoao Pinto /* Display RX rings */ 123654139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 1237ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 123854139cf3SJoao Pinto 123954139cf3SJoao Pinto pr_info("\tRX Queue %u rings\n", queue); 1240d0225e7dSAlexandre TORGUE 1241bfaf91caSJoakim Zhang if (priv->extend_desc) { 124254139cf3SJoao Pinto head_rx = (void *)rx_q->dma_erx; 1243bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1244bfaf91caSJoakim Zhang } else { 124554139cf3SJoao Pinto head_rx = (void *)rx_q->dma_rx; 1246bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1247bfaf91caSJoakim Zhang } 124871fedb01SJoao Pinto 124971fedb01SJoao Pinto /* Display RX ring */ 1250ba39b344SChristian Marangi stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1251bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 12525bacd778SLABBE Corentin } 125354139cf3SJoao Pinto } 1254d0225e7dSAlexandre TORGUE 1255ba39b344SChristian Marangi static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1256ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 125771fedb01SJoao Pinto { 1258ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 1259bfaf91caSJoakim Zhang unsigned int desc_size; 126071fedb01SJoao Pinto void *head_tx; 1261ce736788SJoao Pinto u32 queue; 1262ce736788SJoao Pinto 1263ce736788SJoao Pinto /* Display TX rings */ 1264ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 1265ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1266ce736788SJoao Pinto 1267ce736788SJoao Pinto pr_info("\tTX Queue %d rings\n", queue); 126871fedb01SJoao Pinto 1269bfaf91caSJoakim Zhang if (priv->extend_desc) { 1270ce736788SJoao Pinto head_tx = (void *)tx_q->dma_etx; 1271bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1272bfaf91caSJoakim Zhang } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1273579a25a8SJose Abreu head_tx = (void *)tx_q->dma_entx; 1274bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_edesc); 1275bfaf91caSJoakim Zhang } else { 1276ce736788SJoao Pinto head_tx = (void *)tx_q->dma_tx; 1277bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1278bfaf91caSJoakim Zhang } 127971fedb01SJoao Pinto 1280ba39b344SChristian Marangi stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1281bfaf91caSJoakim Zhang tx_q->dma_tx_phy, desc_size); 1282c24602efSGiuseppe CAVALLARO } 1283ce736788SJoao Pinto } 1284c24602efSGiuseppe CAVALLARO 1285ba39b344SChristian Marangi static void stmmac_display_rings(struct stmmac_priv *priv, 1286ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 128771fedb01SJoao Pinto { 128871fedb01SJoao Pinto /* Display RX ring */ 1289ba39b344SChristian Marangi stmmac_display_rx_rings(priv, dma_conf); 129071fedb01SJoao Pinto 129171fedb01SJoao Pinto /* Display TX ring */ 1292ba39b344SChristian Marangi stmmac_display_tx_rings(priv, dma_conf); 129371fedb01SJoao Pinto } 129471fedb01SJoao Pinto 1295286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize) 1296286a8372SGiuseppe CAVALLARO { 1297286a8372SGiuseppe CAVALLARO int ret = bufsize; 1298286a8372SGiuseppe CAVALLARO 1299b2f3a481SJose Abreu if (mtu >= BUF_SIZE_8KiB) 1300b2f3a481SJose Abreu ret = BUF_SIZE_16KiB; 1301b2f3a481SJose Abreu else if (mtu >= BUF_SIZE_4KiB) 1302286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_8KiB; 1303286a8372SGiuseppe CAVALLARO else if (mtu >= BUF_SIZE_2KiB) 1304286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_4KiB; 1305d916701cSGiuseppe CAVALLARO else if (mtu > DEFAULT_BUFSIZE) 1306286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_2KiB; 1307286a8372SGiuseppe CAVALLARO else 1308d916701cSGiuseppe CAVALLARO ret = DEFAULT_BUFSIZE; 1309286a8372SGiuseppe CAVALLARO 1310286a8372SGiuseppe CAVALLARO return ret; 1311286a8372SGiuseppe CAVALLARO } 1312286a8372SGiuseppe CAVALLARO 131332ceabcaSGiuseppe CAVALLARO /** 131471fedb01SJoao Pinto * stmmac_clear_rx_descriptors - clear RX descriptors 131532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 1316ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 131754139cf3SJoao Pinto * @queue: RX queue index 131871fedb01SJoao Pinto * Description: this function is called to clear the RX descriptors 131932ceabcaSGiuseppe CAVALLARO * in case of both basic and extended descriptors are used. 132032ceabcaSGiuseppe CAVALLARO */ 1321ba39b344SChristian Marangi static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1322ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1323ba39b344SChristian Marangi u32 queue) 1324c24602efSGiuseppe CAVALLARO { 1325ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 13265bacd778SLABBE Corentin int i; 1327c24602efSGiuseppe CAVALLARO 132871fedb01SJoao Pinto /* Clear the RX descriptors */ 1329ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) 13305bacd778SLABBE Corentin if (priv->extend_desc) 133142de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 13325bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1333ba39b344SChristian Marangi (i == dma_conf->dma_rx_size - 1), 1334ba39b344SChristian Marangi dma_conf->dma_buf_sz); 13355bacd778SLABBE Corentin else 133642de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 13375bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1338ba39b344SChristian Marangi (i == dma_conf->dma_rx_size - 1), 1339ba39b344SChristian Marangi dma_conf->dma_buf_sz); 134071fedb01SJoao Pinto } 134171fedb01SJoao Pinto 134271fedb01SJoao Pinto /** 134371fedb01SJoao Pinto * stmmac_clear_tx_descriptors - clear tx descriptors 134471fedb01SJoao Pinto * @priv: driver private structure 1345ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1346ce736788SJoao Pinto * @queue: TX queue index. 134771fedb01SJoao Pinto * Description: this function is called to clear the TX descriptors 134871fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 134971fedb01SJoao Pinto */ 1350ba39b344SChristian Marangi static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1351ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1352ba39b344SChristian Marangi u32 queue) 135371fedb01SJoao Pinto { 1354ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 135571fedb01SJoao Pinto int i; 135671fedb01SJoao Pinto 135771fedb01SJoao Pinto /* Clear the TX descriptors */ 1358ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) { 1359ba39b344SChristian Marangi int last = (i == (dma_conf->dma_tx_size - 1)); 1360579a25a8SJose Abreu struct dma_desc *p; 1361579a25a8SJose Abreu 13625bacd778SLABBE Corentin if (priv->extend_desc) 1363579a25a8SJose Abreu p = &tx_q->dma_etx[i].basic; 1364579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1365579a25a8SJose Abreu p = &tx_q->dma_entx[i].basic; 13665bacd778SLABBE Corentin else 1367579a25a8SJose Abreu p = &tx_q->dma_tx[i]; 1368579a25a8SJose Abreu 1369579a25a8SJose Abreu stmmac_init_tx_desc(priv, p, priv->mode, last); 1370579a25a8SJose Abreu } 1371c24602efSGiuseppe CAVALLARO } 1372c24602efSGiuseppe CAVALLARO 1373732fdf0eSGiuseppe CAVALLARO /** 137471fedb01SJoao Pinto * stmmac_clear_descriptors - clear descriptors 137571fedb01SJoao Pinto * @priv: driver private structure 1376ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 137771fedb01SJoao Pinto * Description: this function is called to clear the TX and RX descriptors 137871fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 137971fedb01SJoao Pinto */ 1380ba39b344SChristian Marangi static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1381ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 138271fedb01SJoao Pinto { 138354139cf3SJoao Pinto u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1384ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 138554139cf3SJoao Pinto u32 queue; 138654139cf3SJoao Pinto 138771fedb01SJoao Pinto /* Clear the RX descriptors */ 138854139cf3SJoao Pinto for (queue = 0; queue < rx_queue_cnt; queue++) 1389ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, dma_conf, queue); 139071fedb01SJoao Pinto 139171fedb01SJoao Pinto /* Clear the TX descriptors */ 1392ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) 1393ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, dma_conf, queue); 139471fedb01SJoao Pinto } 139571fedb01SJoao Pinto 139671fedb01SJoao Pinto /** 1397732fdf0eSGiuseppe CAVALLARO * stmmac_init_rx_buffers - init the RX descriptor buffer. 1398732fdf0eSGiuseppe CAVALLARO * @priv: driver private structure 1399ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1400732fdf0eSGiuseppe CAVALLARO * @p: descriptor pointer 1401732fdf0eSGiuseppe CAVALLARO * @i: descriptor index 140254139cf3SJoao Pinto * @flags: gfp flag 140354139cf3SJoao Pinto * @queue: RX queue index 1404732fdf0eSGiuseppe CAVALLARO * Description: this function is called to allocate a receive buffer, perform 1405732fdf0eSGiuseppe CAVALLARO * the DMA mapping and init the descriptor. 1406732fdf0eSGiuseppe CAVALLARO */ 1407ba39b344SChristian Marangi static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1408ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1409ba39b344SChristian Marangi struct dma_desc *p, 141054139cf3SJoao Pinto int i, gfp_t flags, u32 queue) 1411c24602efSGiuseppe CAVALLARO { 1412ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 14132af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1414884d2b84SDavid Wu gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1415884d2b84SDavid Wu 1416884d2b84SDavid Wu if (priv->dma_cap.addr64 <= 32) 1417884d2b84SDavid Wu gfp |= GFP_DMA32; 1418c24602efSGiuseppe CAVALLARO 1419da5ec7f2SOng Boon Leong if (!buf->page) { 1420884d2b84SDavid Wu buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 14212af6106aSJose Abreu if (!buf->page) 142256329137SBartlomiej Zolnierkiewicz return -ENOMEM; 14235fabb012SOng Boon Leong buf->page_offset = stmmac_rx_offset(priv); 1424da5ec7f2SOng Boon Leong } 1425c24602efSGiuseppe CAVALLARO 1426da5ec7f2SOng Boon Leong if (priv->sph && !buf->sec_page) { 1427884d2b84SDavid Wu buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 142867afd6d1SJose Abreu if (!buf->sec_page) 142967afd6d1SJose Abreu return -ENOMEM; 143067afd6d1SJose Abreu 143167afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1432396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 143367afd6d1SJose Abreu } else { 143467afd6d1SJose Abreu buf->sec_page = NULL; 1435396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 143667afd6d1SJose Abreu } 143767afd6d1SJose Abreu 14385fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 14395fabb012SOng Boon Leong 14402af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 1441ba39b344SChristian Marangi if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 14422c520b1cSJose Abreu stmmac_init_desc3(priv, p); 1443c24602efSGiuseppe CAVALLARO 1444c24602efSGiuseppe CAVALLARO return 0; 1445c24602efSGiuseppe CAVALLARO } 1446c24602efSGiuseppe CAVALLARO 144771fedb01SJoao Pinto /** 144871fedb01SJoao Pinto * stmmac_free_rx_buffer - free RX dma buffers 144971fedb01SJoao Pinto * @priv: private structure 1450ba39b344SChristian Marangi * @rx_q: RX queue 145171fedb01SJoao Pinto * @i: buffer index. 145271fedb01SJoao Pinto */ 1453ba39b344SChristian Marangi static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1454ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q, 1455ba39b344SChristian Marangi int i) 145656329137SBartlomiej Zolnierkiewicz { 14572af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 145854139cf3SJoao Pinto 14592af6106aSJose Abreu if (buf->page) 1460458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->page, false); 14612af6106aSJose Abreu buf->page = NULL; 146267afd6d1SJose Abreu 146367afd6d1SJose Abreu if (buf->sec_page) 1464458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 146567afd6d1SJose Abreu buf->sec_page = NULL; 146656329137SBartlomiej Zolnierkiewicz } 146756329137SBartlomiej Zolnierkiewicz 14687ac6653aSJeff Kirsher /** 146971fedb01SJoao Pinto * stmmac_free_tx_buffer - free RX dma buffers 147071fedb01SJoao Pinto * @priv: private structure 1471ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1472ce736788SJoao Pinto * @queue: RX queue index 147371fedb01SJoao Pinto * @i: buffer index. 147471fedb01SJoao Pinto */ 1475ba39b344SChristian Marangi static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1476ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1477ba39b344SChristian Marangi u32 queue, int i) 147871fedb01SJoao Pinto { 1479ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1480ce736788SJoao Pinto 1481be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf && 1482be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1483ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].map_as_page) 148471fedb01SJoao Pinto dma_unmap_page(priv->device, 1485ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1486ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 148771fedb01SJoao Pinto DMA_TO_DEVICE); 148871fedb01SJoao Pinto else 148971fedb01SJoao Pinto dma_unmap_single(priv->device, 1490ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1491ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 149271fedb01SJoao Pinto DMA_TO_DEVICE); 149371fedb01SJoao Pinto } 149471fedb01SJoao Pinto 1495be8b38a7SOng Boon Leong if (tx_q->xdpf[i] && 14968b278a5bSOng Boon Leong (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 14978b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1498be8b38a7SOng Boon Leong xdp_return_frame(tx_q->xdpf[i]); 1499be8b38a7SOng Boon Leong tx_q->xdpf[i] = NULL; 1500be8b38a7SOng Boon Leong } 1501be8b38a7SOng Boon Leong 1502132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1503132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 1504132c32eeSOng Boon Leong 1505be8b38a7SOng Boon Leong if (tx_q->tx_skbuff[i] && 1506be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1507ce736788SJoao Pinto dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1508ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 1509be8b38a7SOng Boon Leong } 1510be8b38a7SOng Boon Leong 1511ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1512ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 151371fedb01SJoao Pinto } 151471fedb01SJoao Pinto 151571fedb01SJoao Pinto /** 15164298255fSOng Boon Leong * dma_free_rx_skbufs - free RX dma buffers 15174298255fSOng Boon Leong * @priv: private structure 1518ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 15194298255fSOng Boon Leong * @queue: RX queue index 15204298255fSOng Boon Leong */ 1521ba39b344SChristian Marangi static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1522ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1523ba39b344SChristian Marangi u32 queue) 15244298255fSOng Boon Leong { 1525ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 15264298255fSOng Boon Leong int i; 15274298255fSOng Boon Leong 1528ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) 1529ba39b344SChristian Marangi stmmac_free_rx_buffer(priv, rx_q, i); 15304298255fSOng Boon Leong } 15314298255fSOng Boon Leong 1532ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1533ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1534ba39b344SChristian Marangi u32 queue, gfp_t flags) 15354298255fSOng Boon Leong { 1536ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 15374298255fSOng Boon Leong int i; 15384298255fSOng Boon Leong 1539ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 15404298255fSOng Boon Leong struct dma_desc *p; 15414298255fSOng Boon Leong int ret; 15424298255fSOng Boon Leong 15434298255fSOng Boon Leong if (priv->extend_desc) 15444298255fSOng Boon Leong p = &((rx_q->dma_erx + i)->basic); 15454298255fSOng Boon Leong else 15464298255fSOng Boon Leong p = rx_q->dma_rx + i; 15474298255fSOng Boon Leong 1548ba39b344SChristian Marangi ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 15494298255fSOng Boon Leong queue); 15504298255fSOng Boon Leong if (ret) 15514298255fSOng Boon Leong return ret; 1552bba2556eSOng Boon Leong 1553bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 15544298255fSOng Boon Leong } 15554298255fSOng Boon Leong 15564298255fSOng Boon Leong return 0; 15574298255fSOng Boon Leong } 15584298255fSOng Boon Leong 15594298255fSOng Boon Leong /** 1560bba2556eSOng Boon Leong * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1561bba2556eSOng Boon Leong * @priv: private structure 1562ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1563bba2556eSOng Boon Leong * @queue: RX queue index 1564bba2556eSOng Boon Leong */ 1565ba39b344SChristian Marangi static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1566ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1567ba39b344SChristian Marangi u32 queue) 1568bba2556eSOng Boon Leong { 1569ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1570bba2556eSOng Boon Leong int i; 1571bba2556eSOng Boon Leong 1572ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 1573bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1574bba2556eSOng Boon Leong 1575bba2556eSOng Boon Leong if (!buf->xdp) 1576bba2556eSOng Boon Leong continue; 1577bba2556eSOng Boon Leong 1578bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 1579bba2556eSOng Boon Leong buf->xdp = NULL; 1580bba2556eSOng Boon Leong } 1581bba2556eSOng Boon Leong } 1582bba2556eSOng Boon Leong 1583ba39b344SChristian Marangi static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1584ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1585ba39b344SChristian Marangi u32 queue) 1586bba2556eSOng Boon Leong { 1587ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1588bba2556eSOng Boon Leong int i; 1589bba2556eSOng Boon Leong 1590ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_rx_size; i++) { 1591bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 1592bba2556eSOng Boon Leong dma_addr_t dma_addr; 1593bba2556eSOng Boon Leong struct dma_desc *p; 1594bba2556eSOng Boon Leong 1595bba2556eSOng Boon Leong if (priv->extend_desc) 1596bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + i); 1597bba2556eSOng Boon Leong else 1598bba2556eSOng Boon Leong p = rx_q->dma_rx + i; 1599bba2556eSOng Boon Leong 1600bba2556eSOng Boon Leong buf = &rx_q->buf_pool[i]; 1601bba2556eSOng Boon Leong 1602bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1603bba2556eSOng Boon Leong if (!buf->xdp) 1604bba2556eSOng Boon Leong return -ENOMEM; 1605bba2556eSOng Boon Leong 1606bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1607bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, p, dma_addr); 1608bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 1609bba2556eSOng Boon Leong } 1610bba2556eSOng Boon Leong 1611bba2556eSOng Boon Leong return 0; 1612bba2556eSOng Boon Leong } 1613bba2556eSOng Boon Leong 1614bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1615bba2556eSOng Boon Leong { 1616bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1617bba2556eSOng Boon Leong return NULL; 1618bba2556eSOng Boon Leong 1619bba2556eSOng Boon Leong return xsk_get_pool_from_qid(priv->dev, queue); 1620bba2556eSOng Boon Leong } 1621bba2556eSOng Boon Leong 16229c63faaaSJoakim Zhang /** 1623de0b90e5SOng Boon Leong * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1624de0b90e5SOng Boon Leong * @priv: driver private structure 1625ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1626de0b90e5SOng Boon Leong * @queue: RX queue index 16275bacd778SLABBE Corentin * @flags: gfp flag. 162871fedb01SJoao Pinto * Description: this function initializes the DMA RX descriptors 16295bacd778SLABBE Corentin * and allocates the socket buffers. It supports the chained and ring 1630286a8372SGiuseppe CAVALLARO * modes. 16317ac6653aSJeff Kirsher */ 1632ba39b344SChristian Marangi static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1633ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1634ba39b344SChristian Marangi u32 queue, gfp_t flags) 16357ac6653aSJeff Kirsher { 1636ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1637de0b90e5SOng Boon Leong int ret; 163854139cf3SJoao Pinto 163954139cf3SJoao Pinto netif_dbg(priv, probe, priv->dev, 164054139cf3SJoao Pinto "(%s) dma_rx_phy=0x%08x\n", __func__, 164154139cf3SJoao Pinto (u32)rx_q->dma_rx_phy); 164254139cf3SJoao Pinto 1643ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1644cbcf0999SJose Abreu 1645bba2556eSOng Boon Leong xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1646bba2556eSOng Boon Leong 1647bba2556eSOng Boon Leong rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1648bba2556eSOng Boon Leong 1649bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1650bba2556eSOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1651bba2556eSOng Boon Leong MEM_TYPE_XSK_BUFF_POOL, 1652bba2556eSOng Boon Leong NULL)); 1653bba2556eSOng Boon Leong netdev_info(priv->dev, 1654bba2556eSOng Boon Leong "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1655bba2556eSOng Boon Leong rx_q->queue_index); 1656bba2556eSOng Boon Leong xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1657bba2556eSOng Boon Leong } else { 1658be8b38a7SOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1659be8b38a7SOng Boon Leong MEM_TYPE_PAGE_POOL, 1660be8b38a7SOng Boon Leong rx_q->page_pool)); 1661be8b38a7SOng Boon Leong netdev_info(priv->dev, 1662be8b38a7SOng Boon Leong "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1663be8b38a7SOng Boon Leong rx_q->queue_index); 1664bba2556eSOng Boon Leong } 1665be8b38a7SOng Boon Leong 1666bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1667bba2556eSOng Boon Leong /* RX XDP ZC buffer pool may not be populated, e.g. 1668bba2556eSOng Boon Leong * xdpsock TX-only. 1669bba2556eSOng Boon Leong */ 1670ba39b344SChristian Marangi stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1671bba2556eSOng Boon Leong } else { 1672ba39b344SChristian Marangi ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 16734298255fSOng Boon Leong if (ret < 0) 1674de0b90e5SOng Boon Leong return -ENOMEM; 1675bba2556eSOng Boon Leong } 167654139cf3SJoao Pinto 1677c24602efSGiuseppe CAVALLARO /* Setup the chained descriptor addresses */ 1678c24602efSGiuseppe CAVALLARO if (priv->mode == STMMAC_CHAIN_MODE) { 167971fedb01SJoao Pinto if (priv->extend_desc) 16802c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_erx, 1681aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1682ba39b344SChristian Marangi dma_conf->dma_rx_size, 1); 168371fedb01SJoao Pinto else 16842c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_rx, 1685aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1686ba39b344SChristian Marangi dma_conf->dma_rx_size, 0); 168771fedb01SJoao Pinto } 1688de0b90e5SOng Boon Leong 1689de0b90e5SOng Boon Leong return 0; 1690de0b90e5SOng Boon Leong } 1691de0b90e5SOng Boon Leong 1692ba39b344SChristian Marangi static int init_dma_rx_desc_rings(struct net_device *dev, 1693ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1694ba39b344SChristian Marangi gfp_t flags) 1695de0b90e5SOng Boon Leong { 1696de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1697de0b90e5SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 169858e06d05SDan Carpenter int queue; 1699de0b90e5SOng Boon Leong int ret; 1700de0b90e5SOng Boon Leong 1701de0b90e5SOng Boon Leong /* RX INITIALIZATION */ 1702de0b90e5SOng Boon Leong netif_dbg(priv, probe, priv->dev, 1703de0b90e5SOng Boon Leong "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1704de0b90e5SOng Boon Leong 1705de0b90e5SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 1706ba39b344SChristian Marangi ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1707de0b90e5SOng Boon Leong if (ret) 1708de0b90e5SOng Boon Leong goto err_init_rx_buffers; 170954139cf3SJoao Pinto } 171054139cf3SJoao Pinto 171171fedb01SJoao Pinto return 0; 171254139cf3SJoao Pinto 171371fedb01SJoao Pinto err_init_rx_buffers: 171454139cf3SJoao Pinto while (queue >= 0) { 1715ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1716bba2556eSOng Boon Leong 1717bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1718ba39b344SChristian Marangi dma_free_rx_xskbufs(priv, dma_conf, queue); 1719bba2556eSOng Boon Leong else 1720ba39b344SChristian Marangi dma_free_rx_skbufs(priv, dma_conf, queue); 172154139cf3SJoao Pinto 1722bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1723bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1724bba2556eSOng Boon Leong 172554139cf3SJoao Pinto queue--; 172654139cf3SJoao Pinto } 172754139cf3SJoao Pinto 172871fedb01SJoao Pinto return ret; 172971fedb01SJoao Pinto } 173071fedb01SJoao Pinto 173171fedb01SJoao Pinto /** 1732de0b90e5SOng Boon Leong * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1733de0b90e5SOng Boon Leong * @priv: driver private structure 1734ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1735de0b90e5SOng Boon Leong * @queue: TX queue index 173671fedb01SJoao Pinto * Description: this function initializes the DMA TX descriptors 173771fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 173871fedb01SJoao Pinto * modes. 173971fedb01SJoao Pinto */ 1740ba39b344SChristian Marangi static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1741ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1742ba39b344SChristian Marangi u32 queue) 174371fedb01SJoao Pinto { 1744ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1745de0b90e5SOng Boon Leong int i; 1746ce736788SJoao Pinto 174771fedb01SJoao Pinto netif_dbg(priv, probe, priv->dev, 1748ce736788SJoao Pinto "(%s) dma_tx_phy=0x%08x\n", __func__, 1749ce736788SJoao Pinto (u32)tx_q->dma_tx_phy); 175071fedb01SJoao Pinto 175171fedb01SJoao Pinto /* Setup the chained descriptor addresses */ 175271fedb01SJoao Pinto if (priv->mode == STMMAC_CHAIN_MODE) { 175371fedb01SJoao Pinto if (priv->extend_desc) 17542c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_etx, 1755aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1756ba39b344SChristian Marangi dma_conf->dma_tx_size, 1); 1757579a25a8SJose Abreu else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 17582c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_tx, 1759aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1760ba39b344SChristian Marangi dma_conf->dma_tx_size, 0); 1761c24602efSGiuseppe CAVALLARO } 1762286a8372SGiuseppe CAVALLARO 1763132c32eeSOng Boon Leong tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1764132c32eeSOng Boon Leong 1765ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) { 1766c24602efSGiuseppe CAVALLARO struct dma_desc *p; 1767de0b90e5SOng Boon Leong 1768c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 1769ce736788SJoao Pinto p = &((tx_q->dma_etx + i)->basic); 1770579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1771579a25a8SJose Abreu p = &((tx_q->dma_entx + i)->basic); 1772c24602efSGiuseppe CAVALLARO else 1773ce736788SJoao Pinto p = tx_q->dma_tx + i; 1774f748be53SAlexandre TORGUE 177544c67f85SJose Abreu stmmac_clear_desc(priv, p); 1776f748be53SAlexandre TORGUE 1777ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1778ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 1779ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len = 0; 1780ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].last_segment = false; 1781ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 17824a7d666aSGiuseppe CAVALLARO } 1783c24602efSGiuseppe CAVALLARO 1784de0b90e5SOng Boon Leong return 0; 1785c22a3f48SJoao Pinto } 17867ac6653aSJeff Kirsher 1787ba39b344SChristian Marangi static int init_dma_tx_desc_rings(struct net_device *dev, 1788ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1789de0b90e5SOng Boon Leong { 1790de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1791de0b90e5SOng Boon Leong u32 tx_queue_cnt; 1792de0b90e5SOng Boon Leong u32 queue; 1793de0b90e5SOng Boon Leong 1794de0b90e5SOng Boon Leong tx_queue_cnt = priv->plat->tx_queues_to_use; 1795de0b90e5SOng Boon Leong 1796de0b90e5SOng Boon Leong for (queue = 0; queue < tx_queue_cnt; queue++) 1797ba39b344SChristian Marangi __init_dma_tx_desc_rings(priv, dma_conf, queue); 1798de0b90e5SOng Boon Leong 179971fedb01SJoao Pinto return 0; 180071fedb01SJoao Pinto } 180171fedb01SJoao Pinto 180271fedb01SJoao Pinto /** 180371fedb01SJoao Pinto * init_dma_desc_rings - init the RX/TX descriptor rings 180471fedb01SJoao Pinto * @dev: net device structure 1805ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 180671fedb01SJoao Pinto * @flags: gfp flag. 180771fedb01SJoao Pinto * Description: this function initializes the DMA RX/TX descriptors 180871fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 180971fedb01SJoao Pinto * modes. 181071fedb01SJoao Pinto */ 1811ba39b344SChristian Marangi static int init_dma_desc_rings(struct net_device *dev, 1812ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1813ba39b344SChristian Marangi gfp_t flags) 181471fedb01SJoao Pinto { 181571fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 181671fedb01SJoao Pinto int ret; 181771fedb01SJoao Pinto 1818ba39b344SChristian Marangi ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 181971fedb01SJoao Pinto if (ret) 182071fedb01SJoao Pinto return ret; 182171fedb01SJoao Pinto 1822ba39b344SChristian Marangi ret = init_dma_tx_desc_rings(dev, dma_conf); 182371fedb01SJoao Pinto 1824ba39b344SChristian Marangi stmmac_clear_descriptors(priv, dma_conf); 18257ac6653aSJeff Kirsher 1826c24602efSGiuseppe CAVALLARO if (netif_msg_hw(priv)) 1827ba39b344SChristian Marangi stmmac_display_rings(priv, dma_conf); 182856329137SBartlomiej Zolnierkiewicz 182956329137SBartlomiej Zolnierkiewicz return ret; 18307ac6653aSJeff Kirsher } 18317ac6653aSJeff Kirsher 183271fedb01SJoao Pinto /** 183371fedb01SJoao Pinto * dma_free_tx_skbufs - free TX dma buffers 183471fedb01SJoao Pinto * @priv: private structure 1835ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1836ce736788SJoao Pinto * @queue: TX queue index 183771fedb01SJoao Pinto */ 1838ba39b344SChristian Marangi static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1839ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1840ba39b344SChristian Marangi u32 queue) 18417ac6653aSJeff Kirsher { 1842ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 18437ac6653aSJeff Kirsher int i; 18447ac6653aSJeff Kirsher 1845132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1846132c32eeSOng Boon Leong 1847ba39b344SChristian Marangi for (i = 0; i < dma_conf->dma_tx_size; i++) 1848ba39b344SChristian Marangi stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1849132c32eeSOng Boon Leong 1850132c32eeSOng Boon Leong if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1851132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1852132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1853132c32eeSOng Boon Leong tx_q->xsk_pool = NULL; 1854132c32eeSOng Boon Leong } 18557ac6653aSJeff Kirsher } 18567ac6653aSJeff Kirsher 1857732fdf0eSGiuseppe CAVALLARO /** 18584ec236c7SFugang Duan * stmmac_free_tx_skbufs - free TX skb buffers 18594ec236c7SFugang Duan * @priv: private structure 18604ec236c7SFugang Duan */ 18614ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 18624ec236c7SFugang Duan { 18634ec236c7SFugang Duan u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 18644ec236c7SFugang Duan u32 queue; 18654ec236c7SFugang Duan 18664ec236c7SFugang Duan for (queue = 0; queue < tx_queue_cnt; queue++) 1867ba39b344SChristian Marangi dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 18684ec236c7SFugang Duan } 18694ec236c7SFugang Duan 18704ec236c7SFugang Duan /** 1871da5ec7f2SOng Boon Leong * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 187254139cf3SJoao Pinto * @priv: private structure 1873ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1874da5ec7f2SOng Boon Leong * @queue: RX queue index 187554139cf3SJoao Pinto */ 1876ba39b344SChristian Marangi static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1877ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1878ba39b344SChristian Marangi u32 queue) 187954139cf3SJoao Pinto { 1880ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 188154139cf3SJoao Pinto 188254139cf3SJoao Pinto /* Release the DMA RX socket buffers */ 1883bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1884ba39b344SChristian Marangi dma_free_rx_xskbufs(priv, dma_conf, queue); 1885bba2556eSOng Boon Leong else 1886ba39b344SChristian Marangi dma_free_rx_skbufs(priv, dma_conf, queue); 188754139cf3SJoao Pinto 1888bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1889bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1890bba2556eSOng Boon Leong 189154139cf3SJoao Pinto /* Free DMA regions of consistent memory previously allocated */ 189254139cf3SJoao Pinto if (!priv->extend_desc) 1893ba39b344SChristian Marangi dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1894aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 189554139cf3SJoao Pinto rx_q->dma_rx, rx_q->dma_rx_phy); 189654139cf3SJoao Pinto else 1897ba39b344SChristian Marangi dma_free_coherent(priv->device, dma_conf->dma_rx_size * 189854139cf3SJoao Pinto sizeof(struct dma_extended_desc), 189954139cf3SJoao Pinto rx_q->dma_erx, rx_q->dma_rx_phy); 190054139cf3SJoao Pinto 1901be8b38a7SOng Boon Leong if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1902be8b38a7SOng Boon Leong xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1903be8b38a7SOng Boon Leong 19042af6106aSJose Abreu kfree(rx_q->buf_pool); 1905c3f812ceSJonathan Lemon if (rx_q->page_pool) 19062af6106aSJose Abreu page_pool_destroy(rx_q->page_pool); 19072af6106aSJose Abreu } 1908da5ec7f2SOng Boon Leong 1909ba39b344SChristian Marangi static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1910ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1911da5ec7f2SOng Boon Leong { 1912da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1913da5ec7f2SOng Boon Leong u32 queue; 1914da5ec7f2SOng Boon Leong 1915da5ec7f2SOng Boon Leong /* Free RX queue resources */ 1916da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) 1917ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, dma_conf, queue); 191854139cf3SJoao Pinto } 191954139cf3SJoao Pinto 192054139cf3SJoao Pinto /** 1921da5ec7f2SOng Boon Leong * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1922ce736788SJoao Pinto * @priv: private structure 1923ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1924da5ec7f2SOng Boon Leong * @queue: TX queue index 1925ce736788SJoao Pinto */ 1926ba39b344SChristian Marangi static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1927ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1928ba39b344SChristian Marangi u32 queue) 1929ce736788SJoao Pinto { 1930ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1931579a25a8SJose Abreu size_t size; 1932579a25a8SJose Abreu void *addr; 1933ce736788SJoao Pinto 1934ce736788SJoao Pinto /* Release the DMA TX socket buffers */ 1935ba39b344SChristian Marangi dma_free_tx_skbufs(priv, dma_conf, queue); 1936ce736788SJoao Pinto 1937579a25a8SJose Abreu if (priv->extend_desc) { 1938579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1939579a25a8SJose Abreu addr = tx_q->dma_etx; 1940579a25a8SJose Abreu } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1941579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1942579a25a8SJose Abreu addr = tx_q->dma_entx; 1943579a25a8SJose Abreu } else { 1944579a25a8SJose Abreu size = sizeof(struct dma_desc); 1945579a25a8SJose Abreu addr = tx_q->dma_tx; 1946579a25a8SJose Abreu } 1947579a25a8SJose Abreu 1948ba39b344SChristian Marangi size *= dma_conf->dma_tx_size; 1949579a25a8SJose Abreu 1950579a25a8SJose Abreu dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1951ce736788SJoao Pinto 1952ce736788SJoao Pinto kfree(tx_q->tx_skbuff_dma); 1953ce736788SJoao Pinto kfree(tx_q->tx_skbuff); 1954ce736788SJoao Pinto } 1955da5ec7f2SOng Boon Leong 1956ba39b344SChristian Marangi static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 1957ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 1958da5ec7f2SOng Boon Leong { 1959da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 1960da5ec7f2SOng Boon Leong u32 queue; 1961da5ec7f2SOng Boon Leong 1962da5ec7f2SOng Boon Leong /* Free TX queue resources */ 1963da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) 1964ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, dma_conf, queue); 1965ce736788SJoao Pinto } 1966ce736788SJoao Pinto 1967ce736788SJoao Pinto /** 1968da5ec7f2SOng Boon Leong * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 1969732fdf0eSGiuseppe CAVALLARO * @priv: private structure 1970ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 1971da5ec7f2SOng Boon Leong * @queue: RX queue index 1972732fdf0eSGiuseppe CAVALLARO * Description: according to which descriptor can be used (extend or basic) 1973732fdf0eSGiuseppe CAVALLARO * this function allocates the resources for TX and RX paths. In case of 1974732fdf0eSGiuseppe CAVALLARO * reception, for example, it pre-allocated the RX socket buffer in order to 1975732fdf0eSGiuseppe CAVALLARO * allow zero-copy mechanism. 1976732fdf0eSGiuseppe CAVALLARO */ 1977ba39b344SChristian Marangi static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 1978ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 1979ba39b344SChristian Marangi u32 queue) 198009f8d696SSrinivas Kandagatla { 1981ba39b344SChristian Marangi struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1982be8b38a7SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 1983da5ec7f2SOng Boon Leong bool xdp_prog = stmmac_xdp_is_enabled(priv); 19842af6106aSJose Abreu struct page_pool_params pp_params = { 0 }; 19854f28bd95SThierry Reding unsigned int num_pages; 1986132c32eeSOng Boon Leong unsigned int napi_id; 1987be8b38a7SOng Boon Leong int ret; 198854139cf3SJoao Pinto 198954139cf3SJoao Pinto rx_q->queue_index = queue; 199054139cf3SJoao Pinto rx_q->priv_data = priv; 199154139cf3SJoao Pinto 19925fabb012SOng Boon Leong pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1993ba39b344SChristian Marangi pp_params.pool_size = dma_conf->dma_rx_size; 1994ba39b344SChristian Marangi num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 19954f28bd95SThierry Reding pp_params.order = ilog2(num_pages); 19962af6106aSJose Abreu pp_params.nid = dev_to_node(priv->device); 19972af6106aSJose Abreu pp_params.dev = priv->device; 19985fabb012SOng Boon Leong pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 19995fabb012SOng Boon Leong pp_params.offset = stmmac_rx_offset(priv); 20005fabb012SOng Boon Leong pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 20015bacd778SLABBE Corentin 20022af6106aSJose Abreu rx_q->page_pool = page_pool_create(&pp_params); 20032af6106aSJose Abreu if (IS_ERR(rx_q->page_pool)) { 20042af6106aSJose Abreu ret = PTR_ERR(rx_q->page_pool); 20052af6106aSJose Abreu rx_q->page_pool = NULL; 2006da5ec7f2SOng Boon Leong return ret; 20072af6106aSJose Abreu } 20082af6106aSJose Abreu 2009ba39b344SChristian Marangi rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2010aa042f60SSong, Yoong Siang sizeof(*rx_q->buf_pool), 20115bacd778SLABBE Corentin GFP_KERNEL); 20122af6106aSJose Abreu if (!rx_q->buf_pool) 2013da5ec7f2SOng Boon Leong return -ENOMEM; 20145bacd778SLABBE Corentin 20155bacd778SLABBE Corentin if (priv->extend_desc) { 2016750afb08SLuis Chamberlain rx_q->dma_erx = dma_alloc_coherent(priv->device, 2017ba39b344SChristian Marangi dma_conf->dma_rx_size * 2018aa042f60SSong, Yoong Siang sizeof(struct dma_extended_desc), 201954139cf3SJoao Pinto &rx_q->dma_rx_phy, 20205bacd778SLABBE Corentin GFP_KERNEL); 202154139cf3SJoao Pinto if (!rx_q->dma_erx) 2022da5ec7f2SOng Boon Leong return -ENOMEM; 20235bacd778SLABBE Corentin 202471fedb01SJoao Pinto } else { 2025750afb08SLuis Chamberlain rx_q->dma_rx = dma_alloc_coherent(priv->device, 2026ba39b344SChristian Marangi dma_conf->dma_rx_size * 2027aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 202854139cf3SJoao Pinto &rx_q->dma_rx_phy, 202971fedb01SJoao Pinto GFP_KERNEL); 203054139cf3SJoao Pinto if (!rx_q->dma_rx) 2031da5ec7f2SOng Boon Leong return -ENOMEM; 203271fedb01SJoao Pinto } 2033be8b38a7SOng Boon Leong 2034132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 2035132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) 2036132c32eeSOng Boon Leong napi_id = ch->rxtx_napi.napi_id; 2037132c32eeSOng Boon Leong else 2038132c32eeSOng Boon Leong napi_id = ch->rx_napi.napi_id; 2039132c32eeSOng Boon Leong 2040be8b38a7SOng Boon Leong ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2041be8b38a7SOng Boon Leong rx_q->queue_index, 2042132c32eeSOng Boon Leong napi_id); 2043be8b38a7SOng Boon Leong if (ret) { 2044be8b38a7SOng Boon Leong netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2045da5ec7f2SOng Boon Leong return -EINVAL; 2046be8b38a7SOng Boon Leong } 2047da5ec7f2SOng Boon Leong 2048da5ec7f2SOng Boon Leong return 0; 2049da5ec7f2SOng Boon Leong } 2050da5ec7f2SOng Boon Leong 2051ba39b344SChristian Marangi static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2052ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 2053da5ec7f2SOng Boon Leong { 2054da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 2055da5ec7f2SOng Boon Leong u32 queue; 2056da5ec7f2SOng Boon Leong int ret; 2057da5ec7f2SOng Boon Leong 2058da5ec7f2SOng Boon Leong /* RX queues buffers and DMA */ 2059da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 2060ba39b344SChristian Marangi ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2061da5ec7f2SOng Boon Leong if (ret) 2062da5ec7f2SOng Boon Leong goto err_dma; 206354139cf3SJoao Pinto } 206471fedb01SJoao Pinto 206571fedb01SJoao Pinto return 0; 206671fedb01SJoao Pinto 206771fedb01SJoao Pinto err_dma: 2068ba39b344SChristian Marangi free_dma_rx_desc_resources(priv, dma_conf); 206954139cf3SJoao Pinto 207071fedb01SJoao Pinto return ret; 207171fedb01SJoao Pinto } 207271fedb01SJoao Pinto 207371fedb01SJoao Pinto /** 2074da5ec7f2SOng Boon Leong * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 207571fedb01SJoao Pinto * @priv: private structure 2076ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 2077da5ec7f2SOng Boon Leong * @queue: TX queue index 207871fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 207971fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 208071fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 208171fedb01SJoao Pinto * allow zero-copy mechanism. 208271fedb01SJoao Pinto */ 2083ba39b344SChristian Marangi static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2084ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf, 2085ba39b344SChristian Marangi u32 queue) 208671fedb01SJoao Pinto { 2087ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2088579a25a8SJose Abreu size_t size; 2089579a25a8SJose Abreu void *addr; 2090ce736788SJoao Pinto 2091ce736788SJoao Pinto tx_q->queue_index = queue; 2092ce736788SJoao Pinto tx_q->priv_data = priv; 2093ce736788SJoao Pinto 2094ba39b344SChristian Marangi tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2095ce736788SJoao Pinto sizeof(*tx_q->tx_skbuff_dma), 209671fedb01SJoao Pinto GFP_KERNEL); 2097ce736788SJoao Pinto if (!tx_q->tx_skbuff_dma) 2098da5ec7f2SOng Boon Leong return -ENOMEM; 209971fedb01SJoao Pinto 2100ba39b344SChristian Marangi tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2101ce736788SJoao Pinto sizeof(struct sk_buff *), 210271fedb01SJoao Pinto GFP_KERNEL); 2103ce736788SJoao Pinto if (!tx_q->tx_skbuff) 2104da5ec7f2SOng Boon Leong return -ENOMEM; 210571fedb01SJoao Pinto 2106579a25a8SJose Abreu if (priv->extend_desc) 2107579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 2108579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2109579a25a8SJose Abreu size = sizeof(struct dma_edesc); 2110579a25a8SJose Abreu else 2111579a25a8SJose Abreu size = sizeof(struct dma_desc); 2112579a25a8SJose Abreu 2113ba39b344SChristian Marangi size *= dma_conf->dma_tx_size; 2114579a25a8SJose Abreu 2115579a25a8SJose Abreu addr = dma_alloc_coherent(priv->device, size, 2116579a25a8SJose Abreu &tx_q->dma_tx_phy, GFP_KERNEL); 2117579a25a8SJose Abreu if (!addr) 2118da5ec7f2SOng Boon Leong return -ENOMEM; 2119579a25a8SJose Abreu 2120579a25a8SJose Abreu if (priv->extend_desc) 2121579a25a8SJose Abreu tx_q->dma_etx = addr; 2122579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2123579a25a8SJose Abreu tx_q->dma_entx = addr; 2124579a25a8SJose Abreu else 2125579a25a8SJose Abreu tx_q->dma_tx = addr; 2126da5ec7f2SOng Boon Leong 2127da5ec7f2SOng Boon Leong return 0; 2128da5ec7f2SOng Boon Leong } 2129da5ec7f2SOng Boon Leong 2130ba39b344SChristian Marangi static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2131ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 2132da5ec7f2SOng Boon Leong { 2133da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 2134da5ec7f2SOng Boon Leong u32 queue; 2135da5ec7f2SOng Boon Leong int ret; 2136da5ec7f2SOng Boon Leong 2137da5ec7f2SOng Boon Leong /* TX queues buffers and DMA */ 2138da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) { 2139ba39b344SChristian Marangi ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2140da5ec7f2SOng Boon Leong if (ret) 2141da5ec7f2SOng Boon Leong goto err_dma; 21425bacd778SLABBE Corentin } 21435bacd778SLABBE Corentin 21445bacd778SLABBE Corentin return 0; 21455bacd778SLABBE Corentin 214662242260SChristophe Jaillet err_dma: 2147ba39b344SChristian Marangi free_dma_tx_desc_resources(priv, dma_conf); 214809f8d696SSrinivas Kandagatla return ret; 21495bacd778SLABBE Corentin } 215009f8d696SSrinivas Kandagatla 215171fedb01SJoao Pinto /** 215271fedb01SJoao Pinto * alloc_dma_desc_resources - alloc TX/RX resources. 215371fedb01SJoao Pinto * @priv: private structure 2154ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 215571fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 215671fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 215771fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 215871fedb01SJoao Pinto * allow zero-copy mechanism. 215971fedb01SJoao Pinto */ 2160ba39b344SChristian Marangi static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2161ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 21625bacd778SLABBE Corentin { 216354139cf3SJoao Pinto /* RX Allocation */ 2164ba39b344SChristian Marangi int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 216571fedb01SJoao Pinto 216671fedb01SJoao Pinto if (ret) 216771fedb01SJoao Pinto return ret; 216871fedb01SJoao Pinto 2169ba39b344SChristian Marangi ret = alloc_dma_tx_desc_resources(priv, dma_conf); 217071fedb01SJoao Pinto 217171fedb01SJoao Pinto return ret; 217271fedb01SJoao Pinto } 217371fedb01SJoao Pinto 217471fedb01SJoao Pinto /** 217571fedb01SJoao Pinto * free_dma_desc_resources - free dma desc resources 217671fedb01SJoao Pinto * @priv: private structure 2177ba39b344SChristian Marangi * @dma_conf: structure to take the dma data 217871fedb01SJoao Pinto */ 2179ba39b344SChristian Marangi static void free_dma_desc_resources(struct stmmac_priv *priv, 2180ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 218171fedb01SJoao Pinto { 218271fedb01SJoao Pinto /* Release the DMA TX socket buffers */ 2183ba39b344SChristian Marangi free_dma_tx_desc_resources(priv, dma_conf); 2184be8b38a7SOng Boon Leong 2185be8b38a7SOng Boon Leong /* Release the DMA RX socket buffers later 2186be8b38a7SOng Boon Leong * to ensure all pending XDP_TX buffers are returned. 2187be8b38a7SOng Boon Leong */ 2188ba39b344SChristian Marangi free_dma_rx_desc_resources(priv, dma_conf); 218971fedb01SJoao Pinto } 219071fedb01SJoao Pinto 219171fedb01SJoao Pinto /** 21929eb12474Sjpinto * stmmac_mac_enable_rx_queues - Enable MAC rx queues 21939eb12474Sjpinto * @priv: driver private structure 21949eb12474Sjpinto * Description: It is used for enabling the rx queues in the MAC 21959eb12474Sjpinto */ 21969eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 21979eb12474Sjpinto { 21984f6046f5SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 21994f6046f5SJoao Pinto int queue; 22004f6046f5SJoao Pinto u8 mode; 22019eb12474Sjpinto 22024f6046f5SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 22034f6046f5SJoao Pinto mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2204c10d4c82SJose Abreu stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 22054f6046f5SJoao Pinto } 22069eb12474Sjpinto } 22079eb12474Sjpinto 22089eb12474Sjpinto /** 2209ae4f0d46SJoao Pinto * stmmac_start_rx_dma - start RX DMA channel 2210ae4f0d46SJoao Pinto * @priv: driver private structure 2211ae4f0d46SJoao Pinto * @chan: RX channel index 2212ae4f0d46SJoao Pinto * Description: 2213ae4f0d46SJoao Pinto * This starts a RX DMA channel 2214ae4f0d46SJoao Pinto */ 2215ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2216ae4f0d46SJoao Pinto { 2217ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2218a4e887faSJose Abreu stmmac_start_rx(priv, priv->ioaddr, chan); 2219ae4f0d46SJoao Pinto } 2220ae4f0d46SJoao Pinto 2221ae4f0d46SJoao Pinto /** 2222ae4f0d46SJoao Pinto * stmmac_start_tx_dma - start TX DMA channel 2223ae4f0d46SJoao Pinto * @priv: driver private structure 2224ae4f0d46SJoao Pinto * @chan: TX channel index 2225ae4f0d46SJoao Pinto * Description: 2226ae4f0d46SJoao Pinto * This starts a TX DMA channel 2227ae4f0d46SJoao Pinto */ 2228ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2229ae4f0d46SJoao Pinto { 2230ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2231a4e887faSJose Abreu stmmac_start_tx(priv, priv->ioaddr, chan); 2232ae4f0d46SJoao Pinto } 2233ae4f0d46SJoao Pinto 2234ae4f0d46SJoao Pinto /** 2235ae4f0d46SJoao Pinto * stmmac_stop_rx_dma - stop RX DMA channel 2236ae4f0d46SJoao Pinto * @priv: driver private structure 2237ae4f0d46SJoao Pinto * @chan: RX channel index 2238ae4f0d46SJoao Pinto * Description: 2239ae4f0d46SJoao Pinto * This stops a RX DMA channel 2240ae4f0d46SJoao Pinto */ 2241ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2242ae4f0d46SJoao Pinto { 2243ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2244a4e887faSJose Abreu stmmac_stop_rx(priv, priv->ioaddr, chan); 2245ae4f0d46SJoao Pinto } 2246ae4f0d46SJoao Pinto 2247ae4f0d46SJoao Pinto /** 2248ae4f0d46SJoao Pinto * stmmac_stop_tx_dma - stop TX DMA channel 2249ae4f0d46SJoao Pinto * @priv: driver private structure 2250ae4f0d46SJoao Pinto * @chan: TX channel index 2251ae4f0d46SJoao Pinto * Description: 2252ae4f0d46SJoao Pinto * This stops a TX DMA channel 2253ae4f0d46SJoao Pinto */ 2254ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2255ae4f0d46SJoao Pinto { 2256ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2257a4e887faSJose Abreu stmmac_stop_tx(priv, priv->ioaddr, chan); 2258ae4f0d46SJoao Pinto } 2259ae4f0d46SJoao Pinto 2260087a7b94SVincent Whitchurch static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2261087a7b94SVincent Whitchurch { 2262087a7b94SVincent Whitchurch u32 rx_channels_count = priv->plat->rx_queues_to_use; 2263087a7b94SVincent Whitchurch u32 tx_channels_count = priv->plat->tx_queues_to_use; 2264087a7b94SVincent Whitchurch u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2265087a7b94SVincent Whitchurch u32 chan; 2266087a7b94SVincent Whitchurch 2267087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 2268087a7b94SVincent Whitchurch struct stmmac_channel *ch = &priv->channel[chan]; 2269087a7b94SVincent Whitchurch unsigned long flags; 2270087a7b94SVincent Whitchurch 2271087a7b94SVincent Whitchurch spin_lock_irqsave(&ch->lock, flags); 2272087a7b94SVincent Whitchurch stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2273087a7b94SVincent Whitchurch spin_unlock_irqrestore(&ch->lock, flags); 2274087a7b94SVincent Whitchurch } 2275087a7b94SVincent Whitchurch } 2276087a7b94SVincent Whitchurch 2277ae4f0d46SJoao Pinto /** 2278ae4f0d46SJoao Pinto * stmmac_start_all_dma - start all RX and TX DMA channels 2279ae4f0d46SJoao Pinto * @priv: driver private structure 2280ae4f0d46SJoao Pinto * Description: 2281ae4f0d46SJoao Pinto * This starts all the RX and TX DMA channels 2282ae4f0d46SJoao Pinto */ 2283ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv) 2284ae4f0d46SJoao Pinto { 2285ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2286ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2287ae4f0d46SJoao Pinto u32 chan = 0; 2288ae4f0d46SJoao Pinto 2289ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2290ae4f0d46SJoao Pinto stmmac_start_rx_dma(priv, chan); 2291ae4f0d46SJoao Pinto 2292ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2293ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 2294ae4f0d46SJoao Pinto } 2295ae4f0d46SJoao Pinto 2296ae4f0d46SJoao Pinto /** 2297ae4f0d46SJoao Pinto * stmmac_stop_all_dma - stop all RX and TX DMA channels 2298ae4f0d46SJoao Pinto * @priv: driver private structure 2299ae4f0d46SJoao Pinto * Description: 2300ae4f0d46SJoao Pinto * This stops the RX and TX DMA channels 2301ae4f0d46SJoao Pinto */ 2302ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2303ae4f0d46SJoao Pinto { 2304ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2305ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2306ae4f0d46SJoao Pinto u32 chan = 0; 2307ae4f0d46SJoao Pinto 2308ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2309ae4f0d46SJoao Pinto stmmac_stop_rx_dma(priv, chan); 2310ae4f0d46SJoao Pinto 2311ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2312ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2313ae4f0d46SJoao Pinto } 2314ae4f0d46SJoao Pinto 2315ae4f0d46SJoao Pinto /** 23167ac6653aSJeff Kirsher * stmmac_dma_operation_mode - HW DMA operation mode 231732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2318732fdf0eSGiuseppe CAVALLARO * Description: it is used for configuring the DMA operation mode register in 2319732fdf0eSGiuseppe CAVALLARO * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 23207ac6653aSJeff Kirsher */ 23217ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 23227ac6653aSJeff Kirsher { 23236deee222SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 23246deee222SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2325f88203a2SVince Bridgers int rxfifosz = priv->plat->rx_fifo_size; 232652a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 23276deee222SJoao Pinto u32 txmode = 0; 23286deee222SJoao Pinto u32 rxmode = 0; 23296deee222SJoao Pinto u32 chan = 0; 2330a0daae13SJose Abreu u8 qmode = 0; 2331f88203a2SVince Bridgers 233211fbf811SThierry Reding if (rxfifosz == 0) 233311fbf811SThierry Reding rxfifosz = priv->dma_cap.rx_fifo_size; 233452a76235SJose Abreu if (txfifosz == 0) 233552a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 233652a76235SJose Abreu 233752a76235SJose Abreu /* Adjust for real per queue fifo size */ 233852a76235SJose Abreu rxfifosz /= rx_channels_count; 233952a76235SJose Abreu txfifosz /= tx_channels_count; 234011fbf811SThierry Reding 23416deee222SJoao Pinto if (priv->plat->force_thresh_dma_mode) { 23426deee222SJoao Pinto txmode = tc; 23436deee222SJoao Pinto rxmode = tc; 23446deee222SJoao Pinto } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 23457ac6653aSJeff Kirsher /* 23467ac6653aSJeff Kirsher * In case of GMAC, SF mode can be enabled 23477ac6653aSJeff Kirsher * to perform the TX COE in HW. This depends on: 23487ac6653aSJeff Kirsher * 1) TX COE if actually supported 23497ac6653aSJeff Kirsher * 2) There is no bugged Jumbo frame support 23507ac6653aSJeff Kirsher * that needs to not insert csum in the TDES. 23517ac6653aSJeff Kirsher */ 23526deee222SJoao Pinto txmode = SF_DMA_MODE; 23536deee222SJoao Pinto rxmode = SF_DMA_MODE; 2354b2dec116SSonic Zhang priv->xstats.threshold = SF_DMA_MODE; 23556deee222SJoao Pinto } else { 23566deee222SJoao Pinto txmode = tc; 23576deee222SJoao Pinto rxmode = SF_DMA_MODE; 23586deee222SJoao Pinto } 23596deee222SJoao Pinto 23606deee222SJoao Pinto /* configure all channels */ 2361a0daae13SJose Abreu for (chan = 0; chan < rx_channels_count; chan++) { 23628531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2363bba2556eSOng Boon Leong u32 buf_size; 2364bba2556eSOng Boon Leong 2365a0daae13SJose Abreu qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 23666deee222SJoao Pinto 2367a4e887faSJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2368a0daae13SJose Abreu rxfifosz, qmode); 2369bba2556eSOng Boon Leong 2370bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 2371bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2372bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2373bba2556eSOng Boon Leong buf_size, 23744205c88eSJose Abreu chan); 2375bba2556eSOng Boon Leong } else { 2376bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 23778531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 2378bba2556eSOng Boon Leong chan); 2379bba2556eSOng Boon Leong } 2380a0daae13SJose Abreu } 2381a0daae13SJose Abreu 2382a0daae13SJose Abreu for (chan = 0; chan < tx_channels_count; chan++) { 2383a0daae13SJose Abreu qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2384a0daae13SJose Abreu 2385a4e887faSJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2386a0daae13SJose Abreu txfifosz, qmode); 2387a0daae13SJose Abreu } 23887ac6653aSJeff Kirsher } 23897ac6653aSJeff Kirsher 2390132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2391132c32eeSOng Boon Leong { 2392132c32eeSOng Boon Leong struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 23938531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2394132c32eeSOng Boon Leong struct xsk_buff_pool *pool = tx_q->xsk_pool; 2395132c32eeSOng Boon Leong unsigned int entry = tx_q->cur_tx; 2396132c32eeSOng Boon Leong struct dma_desc *tx_desc = NULL; 2397132c32eeSOng Boon Leong struct xdp_desc xdp_desc; 2398132c32eeSOng Boon Leong bool work_done = true; 2399132c32eeSOng Boon Leong 2400132c32eeSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 2401e92af33eSAlexander Lobakin txq_trans_cond_update(nq); 2402132c32eeSOng Boon Leong 2403132c32eeSOng Boon Leong budget = min(budget, stmmac_tx_avail(priv, queue)); 2404132c32eeSOng Boon Leong 2405132c32eeSOng Boon Leong while (budget-- > 0) { 2406132c32eeSOng Boon Leong dma_addr_t dma_addr; 2407132c32eeSOng Boon Leong bool set_ic; 2408132c32eeSOng Boon Leong 2409132c32eeSOng Boon Leong /* We are sharing with slow path and stop XSK TX desc submission when 2410132c32eeSOng Boon Leong * available TX ring is less than threshold. 2411132c32eeSOng Boon Leong */ 2412132c32eeSOng Boon Leong if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2413132c32eeSOng Boon Leong !netif_carrier_ok(priv->dev)) { 2414132c32eeSOng Boon Leong work_done = false; 2415132c32eeSOng Boon Leong break; 2416132c32eeSOng Boon Leong } 2417132c32eeSOng Boon Leong 2418132c32eeSOng Boon Leong if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2419132c32eeSOng Boon Leong break; 2420132c32eeSOng Boon Leong 2421132c32eeSOng Boon Leong if (likely(priv->extend_desc)) 2422132c32eeSOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2423132c32eeSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2424132c32eeSOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 2425132c32eeSOng Boon Leong else 2426132c32eeSOng Boon Leong tx_desc = tx_q->dma_tx + entry; 2427132c32eeSOng Boon Leong 2428132c32eeSOng Boon Leong dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2429132c32eeSOng Boon Leong xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2430132c32eeSOng Boon Leong 2431132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2432132c32eeSOng Boon Leong 2433132c32eeSOng Boon Leong /* To return XDP buffer to XSK pool, we simple call 2434132c32eeSOng Boon Leong * xsk_tx_completed(), so we don't need to fill up 2435132c32eeSOng Boon Leong * 'buf' and 'xdpf'. 2436132c32eeSOng Boon Leong */ 2437132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = 0; 2438132c32eeSOng Boon Leong tx_q->xdpf[entry] = NULL; 2439132c32eeSOng Boon Leong 2440132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 2441132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2442132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 2443132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2444132c32eeSOng Boon Leong 2445132c32eeSOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2446132c32eeSOng Boon Leong 2447132c32eeSOng Boon Leong tx_q->tx_count_frames++; 2448132c32eeSOng Boon Leong 2449132c32eeSOng Boon Leong if (!priv->tx_coal_frames[queue]) 2450132c32eeSOng Boon Leong set_ic = false; 2451132c32eeSOng Boon Leong else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2452132c32eeSOng Boon Leong set_ic = true; 2453132c32eeSOng Boon Leong else 2454132c32eeSOng Boon Leong set_ic = false; 2455132c32eeSOng Boon Leong 2456132c32eeSOng Boon Leong if (set_ic) { 2457132c32eeSOng Boon Leong tx_q->tx_count_frames = 0; 2458132c32eeSOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 2459132c32eeSOng Boon Leong priv->xstats.tx_set_ic_bit++; 2460132c32eeSOng Boon Leong } 2461132c32eeSOng Boon Leong 2462132c32eeSOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2463132c32eeSOng Boon Leong true, priv->mode, true, true, 2464132c32eeSOng Boon Leong xdp_desc.len); 2465132c32eeSOng Boon Leong 2466132c32eeSOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 2467132c32eeSOng Boon Leong 24688531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2469132c32eeSOng Boon Leong entry = tx_q->cur_tx; 2470132c32eeSOng Boon Leong } 2471132c32eeSOng Boon Leong 2472132c32eeSOng Boon Leong if (tx_desc) { 2473132c32eeSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 2474132c32eeSOng Boon Leong xsk_tx_release(pool); 2475132c32eeSOng Boon Leong } 2476132c32eeSOng Boon Leong 2477132c32eeSOng Boon Leong /* Return true if all of the 3 conditions are met 2478132c32eeSOng Boon Leong * a) TX Budget is still available 2479132c32eeSOng Boon Leong * b) work_done = true when XSK TX desc peek is empty (no more 2480132c32eeSOng Boon Leong * pending XSK TX for transmission) 2481132c32eeSOng Boon Leong */ 2482132c32eeSOng Boon Leong return !!budget && work_done; 2483132c32eeSOng Boon Leong } 2484132c32eeSOng Boon Leong 24853a6c12a0SXiaoliang Yang static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 24863a6c12a0SXiaoliang Yang { 24873a6c12a0SXiaoliang Yang if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 24883a6c12a0SXiaoliang Yang tc += 64; 24893a6c12a0SXiaoliang Yang 24903a6c12a0SXiaoliang Yang if (priv->plat->force_thresh_dma_mode) 24913a6c12a0SXiaoliang Yang stmmac_set_dma_operation_mode(priv, tc, tc, chan); 24923a6c12a0SXiaoliang Yang else 24933a6c12a0SXiaoliang Yang stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 24943a6c12a0SXiaoliang Yang chan); 24953a6c12a0SXiaoliang Yang 24963a6c12a0SXiaoliang Yang priv->xstats.threshold = tc; 24973a6c12a0SXiaoliang Yang } 24983a6c12a0SXiaoliang Yang } 24993a6c12a0SXiaoliang Yang 25007ac6653aSJeff Kirsher /** 2501732fdf0eSGiuseppe CAVALLARO * stmmac_tx_clean - to manage the transmission completion 250232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2503d0ea5cbdSJesse Brandeburg * @budget: napi budget limiting this functions packet handling 2504ce736788SJoao Pinto * @queue: TX queue index 2505732fdf0eSGiuseppe CAVALLARO * Description: it reclaims the transmit resources after transmission completes. 25067ac6653aSJeff Kirsher */ 25078fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 25087ac6653aSJeff Kirsher { 25098531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 251038979574SBeniamino Galvani unsigned int bytes_compl = 0, pkts_compl = 0; 2511132c32eeSOng Boon Leong unsigned int entry, xmits = 0, count = 0; 25127ac6653aSJeff Kirsher 25138fce3331SJose Abreu __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2514a9097a96SGiuseppe CAVALLARO 25159125cdd1SGiuseppe CAVALLARO priv->xstats.tx_clean++; 25169125cdd1SGiuseppe CAVALLARO 2517132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 2518132c32eeSOng Boon Leong 25198d5f4b07SBernd Edlinger entry = tx_q->dirty_tx; 2520132c32eeSOng Boon Leong 2521132c32eeSOng Boon Leong /* Try to clean all TX complete frame in 1 shot */ 25228531c808SChristian Marangi while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2523be8b38a7SOng Boon Leong struct xdp_frame *xdpf; 2524be8b38a7SOng Boon Leong struct sk_buff *skb; 2525c24602efSGiuseppe CAVALLARO struct dma_desc *p; 2526c363b658SFabrice Gasnier int status; 2527c24602efSGiuseppe CAVALLARO 25288b278a5bSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 25298b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2530be8b38a7SOng Boon Leong xdpf = tx_q->xdpf[entry]; 2531be8b38a7SOng Boon Leong skb = NULL; 2532be8b38a7SOng Boon Leong } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2533be8b38a7SOng Boon Leong xdpf = NULL; 2534be8b38a7SOng Boon Leong skb = tx_q->tx_skbuff[entry]; 2535be8b38a7SOng Boon Leong } else { 2536be8b38a7SOng Boon Leong xdpf = NULL; 2537be8b38a7SOng Boon Leong skb = NULL; 2538be8b38a7SOng Boon Leong } 2539be8b38a7SOng Boon Leong 2540c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 2541ce736788SJoao Pinto p = (struct dma_desc *)(tx_q->dma_etx + entry); 2542579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2543579a25a8SJose Abreu p = &tx_q->dma_entx[entry].basic; 2544c24602efSGiuseppe CAVALLARO else 2545ce736788SJoao Pinto p = tx_q->dma_tx + entry; 25467ac6653aSJeff Kirsher 254742de047dSJose Abreu status = stmmac_tx_status(priv, &priv->dev->stats, 254842de047dSJose Abreu &priv->xstats, p, priv->ioaddr); 2549c363b658SFabrice Gasnier /* Check if the descriptor is owned by the DMA */ 2550c363b658SFabrice Gasnier if (unlikely(status & tx_dma_own)) 2551c363b658SFabrice Gasnier break; 2552c363b658SFabrice Gasnier 25538fce3331SJose Abreu count++; 25548fce3331SJose Abreu 2555a6b25da5SNiklas Cassel /* Make sure descriptor fields are read after reading 2556a6b25da5SNiklas Cassel * the own bit. 2557a6b25da5SNiklas Cassel */ 2558a6b25da5SNiklas Cassel dma_rmb(); 2559a6b25da5SNiklas Cassel 2560c363b658SFabrice Gasnier /* Just consider the last segment and ...*/ 2561c363b658SFabrice Gasnier if (likely(!(status & tx_not_ls))) { 2562c363b658SFabrice Gasnier /* ... verify the status error condition */ 2563c363b658SFabrice Gasnier if (unlikely(status & tx_err)) { 2564c363b658SFabrice Gasnier priv->dev->stats.tx_errors++; 25653a6c12a0SXiaoliang Yang if (unlikely(status & tx_err_bump_tc)) 25663a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, queue); 2567c363b658SFabrice Gasnier } else { 25687ac6653aSJeff Kirsher priv->dev->stats.tx_packets++; 25697ac6653aSJeff Kirsher priv->xstats.tx_pkt_n++; 257068e9c5deSVijayakannan Ayyathurai priv->xstats.txq_stats[queue].tx_pkt_n++; 2571c363b658SFabrice Gasnier } 2572be8b38a7SOng Boon Leong if (skb) 2573ba1ffd74SGiuseppe CAVALLARO stmmac_get_tx_hwtstamp(priv, p, skb); 25747ac6653aSJeff Kirsher } 25757ac6653aSJeff Kirsher 2576be8b38a7SOng Boon Leong if (likely(tx_q->tx_skbuff_dma[entry].buf && 2577be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2578ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[entry].map_as_page) 2579362b37beSGiuseppe CAVALLARO dma_unmap_page(priv->device, 2580ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2581ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 25827ac6653aSJeff Kirsher DMA_TO_DEVICE); 2583362b37beSGiuseppe CAVALLARO else 2584362b37beSGiuseppe CAVALLARO dma_unmap_single(priv->device, 2585ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2586ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 2587362b37beSGiuseppe CAVALLARO DMA_TO_DEVICE); 2588ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = 0; 2589ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = 0; 2590ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = false; 2591cf32deecSRayagond Kokatanur } 2592f748be53SAlexandre TORGUE 25932c520b1cSJose Abreu stmmac_clean_desc3(priv, tx_q, p); 2594f748be53SAlexandre TORGUE 2595ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = false; 2596ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].is_jumbo = false; 25977ac6653aSJeff Kirsher 2598be8b38a7SOng Boon Leong if (xdpf && 2599be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2600be8b38a7SOng Boon Leong xdp_return_frame_rx_napi(xdpf); 2601be8b38a7SOng Boon Leong tx_q->xdpf[entry] = NULL; 2602be8b38a7SOng Boon Leong } 2603be8b38a7SOng Boon Leong 26048b278a5bSOng Boon Leong if (xdpf && 26058b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 26068b278a5bSOng Boon Leong xdp_return_frame(xdpf); 26078b278a5bSOng Boon Leong tx_q->xdpf[entry] = NULL; 26088b278a5bSOng Boon Leong } 26098b278a5bSOng Boon Leong 2610132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2611132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 2612132c32eeSOng Boon Leong 2613be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2614be8b38a7SOng Boon Leong if (likely(skb)) { 261538979574SBeniamino Galvani pkts_compl++; 261638979574SBeniamino Galvani bytes_compl += skb->len; 26177c565c33SEric W. Biederman dev_consume_skb_any(skb); 2618ce736788SJoao Pinto tx_q->tx_skbuff[entry] = NULL; 26197ac6653aSJeff Kirsher } 2620be8b38a7SOng Boon Leong } 26217ac6653aSJeff Kirsher 262242de047dSJose Abreu stmmac_release_tx_desc(priv, p, priv->mode); 26237ac6653aSJeff Kirsher 26248531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 26257ac6653aSJeff Kirsher } 2626ce736788SJoao Pinto tx_q->dirty_tx = entry; 262738979574SBeniamino Galvani 2628c22a3f48SJoao Pinto netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2629c22a3f48SJoao Pinto pkts_compl, bytes_compl); 263038979574SBeniamino Galvani 2631c22a3f48SJoao Pinto if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2632c22a3f48SJoao Pinto queue))) && 2633aa042f60SSong, Yoong Siang stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2634c22a3f48SJoao Pinto 2635b3e51069SLABBE Corentin netif_dbg(priv, tx_done, priv->dev, 2636b3e51069SLABBE Corentin "%s: restart transmit\n", __func__); 2637c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 26387ac6653aSJeff Kirsher } 2639d765955dSGiuseppe CAVALLARO 2640132c32eeSOng Boon Leong if (tx_q->xsk_pool) { 2641132c32eeSOng Boon Leong bool work_done; 2642132c32eeSOng Boon Leong 2643132c32eeSOng Boon Leong if (tx_q->xsk_frames_done) 2644132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2645132c32eeSOng Boon Leong 2646132c32eeSOng Boon Leong if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2647132c32eeSOng Boon Leong xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2648132c32eeSOng Boon Leong 2649132c32eeSOng Boon Leong /* For XSK TX, we try to send as many as possible. 2650132c32eeSOng Boon Leong * If XSK work done (XSK TX desc empty and budget still 2651132c32eeSOng Boon Leong * available), return "budget - 1" to reenable TX IRQ. 2652132c32eeSOng Boon Leong * Else, return "budget" to make NAPI continue polling. 2653132c32eeSOng Boon Leong */ 2654132c32eeSOng Boon Leong work_done = stmmac_xdp_xmit_zc(priv, queue, 2655132c32eeSOng Boon Leong STMMAC_XSK_TX_BUDGET_MAX); 2656132c32eeSOng Boon Leong if (work_done) 2657132c32eeSOng Boon Leong xmits = budget - 1; 2658132c32eeSOng Boon Leong else 2659132c32eeSOng Boon Leong xmits = budget; 2660132c32eeSOng Boon Leong } 2661132c32eeSOng Boon Leong 2662be1c7eaeSVineetha G. Jaya Kumaran if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2663be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en) { 2664c74ead22SJisheng Zhang if (stmmac_enable_eee_mode(priv)) 2665388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2666d765955dSGiuseppe CAVALLARO } 26678fce3331SJose Abreu 26684ccb4585SJose Abreu /* We still have pending packets, let's call for a new scheduling */ 26694ccb4585SJose Abreu if (tx_q->dirty_tx != tx_q->cur_tx) 2670db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2671db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2672d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 26734ccb4585SJose Abreu 26748fce3331SJose Abreu __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 26758fce3331SJose Abreu 2676132c32eeSOng Boon Leong /* Combine decisions from TX clean and XSK TX */ 2677132c32eeSOng Boon Leong return max(count, xmits); 26787ac6653aSJeff Kirsher } 26797ac6653aSJeff Kirsher 26807ac6653aSJeff Kirsher /** 2681732fdf0eSGiuseppe CAVALLARO * stmmac_tx_err - to manage the tx error 268232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 26835bacd778SLABBE Corentin * @chan: channel index 26847ac6653aSJeff Kirsher * Description: it cleans the descriptors and restarts the transmission 2685732fdf0eSGiuseppe CAVALLARO * in case of transmission errors. 26867ac6653aSJeff Kirsher */ 26875bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 26887ac6653aSJeff Kirsher { 26898531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2690ce736788SJoao Pinto 2691c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 26927ac6653aSJeff Kirsher 2693ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2694ba39b344SChristian Marangi dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2695ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2696f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, chan); 2697f421031eSJongsung Kim stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2698f421031eSJongsung Kim tx_q->dma_tx_phy, chan); 2699ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 27007ac6653aSJeff Kirsher 27017ac6653aSJeff Kirsher priv->dev->stats.tx_errors++; 2702c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 27037ac6653aSJeff Kirsher } 27047ac6653aSJeff Kirsher 270532ceabcaSGiuseppe CAVALLARO /** 27066deee222SJoao Pinto * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 27076deee222SJoao Pinto * @priv: driver private structure 27086deee222SJoao Pinto * @txmode: TX operating mode 27096deee222SJoao Pinto * @rxmode: RX operating mode 27106deee222SJoao Pinto * @chan: channel index 27116deee222SJoao Pinto * Description: it is used for configuring of the DMA operation mode in 27126deee222SJoao Pinto * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 27136deee222SJoao Pinto * mode. 27146deee222SJoao Pinto */ 27156deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 27166deee222SJoao Pinto u32 rxmode, u32 chan) 27176deee222SJoao Pinto { 2718a0daae13SJose Abreu u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2719a0daae13SJose Abreu u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 272052a76235SJose Abreu u32 rx_channels_count = priv->plat->rx_queues_to_use; 272152a76235SJose Abreu u32 tx_channels_count = priv->plat->tx_queues_to_use; 27226deee222SJoao Pinto int rxfifosz = priv->plat->rx_fifo_size; 272352a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 27246deee222SJoao Pinto 27256deee222SJoao Pinto if (rxfifosz == 0) 27266deee222SJoao Pinto rxfifosz = priv->dma_cap.rx_fifo_size; 272752a76235SJose Abreu if (txfifosz == 0) 272852a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 272952a76235SJose Abreu 273052a76235SJose Abreu /* Adjust for real per queue fifo size */ 273152a76235SJose Abreu rxfifosz /= rx_channels_count; 273252a76235SJose Abreu txfifosz /= tx_channels_count; 27336deee222SJoao Pinto 2734ab0204e3SJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2735ab0204e3SJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 27366deee222SJoao Pinto } 27376deee222SJoao Pinto 27388bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 27398bf993a5SJose Abreu { 274063a550fcSJose Abreu int ret; 27418bf993a5SJose Abreu 2742c10d4c82SJose Abreu ret = stmmac_safety_feat_irq_status(priv, priv->dev, 27438bf993a5SJose Abreu priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2744c10d4c82SJose Abreu if (ret && (ret != -EINVAL)) { 27458bf993a5SJose Abreu stmmac_global_err(priv); 2746c10d4c82SJose Abreu return true; 2747c10d4c82SJose Abreu } 2748c10d4c82SJose Abreu 2749c10d4c82SJose Abreu return false; 27508bf993a5SJose Abreu } 27518bf993a5SJose Abreu 27527e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 27538fce3331SJose Abreu { 27548fce3331SJose Abreu int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 27557e1c520cSOng Boon Leong &priv->xstats, chan, dir); 27568531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 27578531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 27588fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[chan]; 2759132c32eeSOng Boon Leong struct napi_struct *rx_napi; 2760132c32eeSOng Boon Leong struct napi_struct *tx_napi; 2761021bd5e3SJose Abreu unsigned long flags; 27628fce3331SJose Abreu 2763132c32eeSOng Boon Leong rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2764132c32eeSOng Boon Leong tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2765132c32eeSOng Boon Leong 27664ccb4585SJose Abreu if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2767132c32eeSOng Boon Leong if (napi_schedule_prep(rx_napi)) { 2768021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2769021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2770021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2771132c32eeSOng Boon Leong __napi_schedule(rx_napi); 27723ba07debSJose Abreu } 27734ccb4585SJose Abreu } 27744ccb4585SJose Abreu 2775021bd5e3SJose Abreu if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2776132c32eeSOng Boon Leong if (napi_schedule_prep(tx_napi)) { 2777021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2778021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2779021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2780132c32eeSOng Boon Leong __napi_schedule(tx_napi); 2781021bd5e3SJose Abreu } 2782021bd5e3SJose Abreu } 27838fce3331SJose Abreu 27848fce3331SJose Abreu return status; 27858fce3331SJose Abreu } 27868fce3331SJose Abreu 27876deee222SJoao Pinto /** 2788732fdf0eSGiuseppe CAVALLARO * stmmac_dma_interrupt - DMA ISR 278932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 279032ceabcaSGiuseppe CAVALLARO * Description: this is the DMA ISR. It is called by the main ISR. 2791732fdf0eSGiuseppe CAVALLARO * It calls the dwmac dma routine and schedule poll method in case of some 2792732fdf0eSGiuseppe CAVALLARO * work can be done. 279332ceabcaSGiuseppe CAVALLARO */ 27947ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv) 27957ac6653aSJeff Kirsher { 2796d62a107aSJoao Pinto u32 tx_channel_count = priv->plat->tx_queues_to_use; 27975a6a0445SNiklas Cassel u32 rx_channel_count = priv->plat->rx_queues_to_use; 27985a6a0445SNiklas Cassel u32 channels_to_check = tx_channel_count > rx_channel_count ? 27995a6a0445SNiklas Cassel tx_channel_count : rx_channel_count; 2800d62a107aSJoao Pinto u32 chan; 28018ac60ffbSKees Cook int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 28028ac60ffbSKees Cook 28038ac60ffbSKees Cook /* Make sure we never check beyond our status buffer. */ 28048ac60ffbSKees Cook if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 28058ac60ffbSKees Cook channels_to_check = ARRAY_SIZE(status); 280668e5cfafSJoao Pinto 28075a6a0445SNiklas Cassel for (chan = 0; chan < channels_to_check; chan++) 28087e1c520cSOng Boon Leong status[chan] = stmmac_napi_check(priv, chan, 28097e1c520cSOng Boon Leong DMA_DIR_RXTX); 2810d62a107aSJoao Pinto 28115a6a0445SNiklas Cassel for (chan = 0; chan < tx_channel_count; chan++) { 28125a6a0445SNiklas Cassel if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 28137ac6653aSJeff Kirsher /* Try to bump up the dma threshold on this failure */ 28143a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, chan); 28155a6a0445SNiklas Cassel } else if (unlikely(status[chan] == tx_hard_error)) { 28164e593262SJoao Pinto stmmac_tx_err(priv, chan); 28177ac6653aSJeff Kirsher } 2818d62a107aSJoao Pinto } 2819d62a107aSJoao Pinto } 28207ac6653aSJeff Kirsher 282132ceabcaSGiuseppe CAVALLARO /** 282232ceabcaSGiuseppe CAVALLARO * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 282332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 282432ceabcaSGiuseppe CAVALLARO * Description: this masks the MMC irq, in fact, the counters are managed in SW. 282532ceabcaSGiuseppe CAVALLARO */ 28261c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv) 28271c901a46SGiuseppe CAVALLARO { 28281c901a46SGiuseppe CAVALLARO unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 28291c901a46SGiuseppe CAVALLARO MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 28301c901a46SGiuseppe CAVALLARO 28313b1dd2c5SJose Abreu stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 28324f795b25SGiuseppe CAVALLARO 28334f795b25SGiuseppe CAVALLARO if (priv->dma_cap.rmon) { 28343b1dd2c5SJose Abreu stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 28351c901a46SGiuseppe CAVALLARO memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 28364f795b25SGiuseppe CAVALLARO } else 283738ddc59dSLABBE Corentin netdev_info(priv->dev, "No MAC Management Counters available\n"); 28381c901a46SGiuseppe CAVALLARO } 28391c901a46SGiuseppe CAVALLARO 2840732fdf0eSGiuseppe CAVALLARO /** 2841732fdf0eSGiuseppe CAVALLARO * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 284232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 284319e30c14SGiuseppe CAVALLARO * Description: 284419e30c14SGiuseppe CAVALLARO * new GMAC chip generations have a new register to indicate the 2845e7434821SGiuseppe CAVALLARO * presence of the optional feature/functions. 284619e30c14SGiuseppe CAVALLARO * This can be also used to override the value passed through the 284719e30c14SGiuseppe CAVALLARO * platform and necessary for old MAC10/100 and GMAC chips. 2848e7434821SGiuseppe CAVALLARO */ 2849e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv) 2850e7434821SGiuseppe CAVALLARO { 2851a4e887faSJose Abreu return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2852e7434821SGiuseppe CAVALLARO } 2853e7434821SGiuseppe CAVALLARO 285432ceabcaSGiuseppe CAVALLARO /** 2855732fdf0eSGiuseppe CAVALLARO * stmmac_check_ether_addr - check if the MAC addr is valid 285632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 285732ceabcaSGiuseppe CAVALLARO * Description: 285832ceabcaSGiuseppe CAVALLARO * it is to verify if the MAC address is valid, in case of failures it 285932ceabcaSGiuseppe CAVALLARO * generates a random MAC address 286032ceabcaSGiuseppe CAVALLARO */ 2861bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2862bfab27a1SGiuseppe CAVALLARO { 28637f9b8fe5SJakub Kicinski u8 addr[ETH_ALEN]; 28647f9b8fe5SJakub Kicinski 2865bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) { 28667f9b8fe5SJakub Kicinski stmmac_get_umac_addr(priv, priv->hw, addr, 0); 28677f9b8fe5SJakub Kicinski if (is_valid_ether_addr(addr)) 28687f9b8fe5SJakub Kicinski eth_hw_addr_set(priv->dev, addr); 28697f9b8fe5SJakub Kicinski else 2870f2cedb63SDanny Kukawka eth_hw_addr_random(priv->dev); 2871af649352SJisheng Zhang dev_info(priv->device, "device MAC address %pM\n", 2872bfab27a1SGiuseppe CAVALLARO priv->dev->dev_addr); 2873bfab27a1SGiuseppe CAVALLARO } 2874c88460b7SHans de Goede } 2875bfab27a1SGiuseppe CAVALLARO 287632ceabcaSGiuseppe CAVALLARO /** 2877732fdf0eSGiuseppe CAVALLARO * stmmac_init_dma_engine - DMA init. 287832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 287932ceabcaSGiuseppe CAVALLARO * Description: 288032ceabcaSGiuseppe CAVALLARO * It inits the DMA invoking the specific MAC/GMAC callback. 288132ceabcaSGiuseppe CAVALLARO * Some DMA parameters can be passed from the platform; 288232ceabcaSGiuseppe CAVALLARO * in case of these are not passed a default is kept for the MAC or GMAC. 288332ceabcaSGiuseppe CAVALLARO */ 28840f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv) 28850f1f88a8SGiuseppe CAVALLARO { 288647f2a9ceSJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 288747f2a9ceSJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 288824aaed0cSJose Abreu u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 288954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q; 2890ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 289147f2a9ceSJoao Pinto u32 chan = 0; 2892c24602efSGiuseppe CAVALLARO int atds = 0; 2893495db273SGiuseppe Cavallaro int ret = 0; 28940f1f88a8SGiuseppe CAVALLARO 2895a332e2faSNiklas Cassel if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2896a332e2faSNiklas Cassel dev_err(priv->device, "Invalid DMA configuration\n"); 289789ab75bfSNiklas Cassel return -EINVAL; 28980f1f88a8SGiuseppe CAVALLARO } 28990f1f88a8SGiuseppe CAVALLARO 2900c24602efSGiuseppe CAVALLARO if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2901c24602efSGiuseppe CAVALLARO atds = 1; 2902c24602efSGiuseppe CAVALLARO 2903a4e887faSJose Abreu ret = stmmac_reset(priv, priv->ioaddr); 2904495db273SGiuseppe Cavallaro if (ret) { 2905495db273SGiuseppe Cavallaro dev_err(priv->device, "Failed to reset the dma\n"); 2906495db273SGiuseppe Cavallaro return ret; 2907495db273SGiuseppe Cavallaro } 2908495db273SGiuseppe Cavallaro 29097d9e6c5aSJose Abreu /* DMA Configuration */ 29107d9e6c5aSJose Abreu stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 29117d9e6c5aSJose Abreu 29127d9e6c5aSJose Abreu if (priv->plat->axi) 29137d9e6c5aSJose Abreu stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 29147d9e6c5aSJose Abreu 2915af8f3fb7SWeifeng Voon /* DMA CSR Channel configuration */ 2916087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 2917af8f3fb7SWeifeng Voon stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2918087a7b94SVincent Whitchurch stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2919087a7b94SVincent Whitchurch } 2920af8f3fb7SWeifeng Voon 292147f2a9ceSJoao Pinto /* DMA RX Channel Configuration */ 292247f2a9ceSJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) { 29238531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[chan]; 292454139cf3SJoao Pinto 292524aaed0cSJose Abreu stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 292624aaed0cSJose Abreu rx_q->dma_rx_phy, chan); 292747f2a9ceSJoao Pinto 292854139cf3SJoao Pinto rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2929bba2556eSOng Boon Leong (rx_q->buf_alloc_num * 2930aa042f60SSong, Yoong Siang sizeof(struct dma_desc)); 2931a4e887faSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2932a4e887faSJose Abreu rx_q->rx_tail_addr, chan); 293347f2a9ceSJoao Pinto } 293447f2a9ceSJoao Pinto 293547f2a9ceSJoao Pinto /* DMA TX Channel Configuration */ 293647f2a9ceSJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) { 29378531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[chan]; 2938ce736788SJoao Pinto 293924aaed0cSJose Abreu stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 294024aaed0cSJose Abreu tx_q->dma_tx_phy, chan); 2941f748be53SAlexandre TORGUE 29420431100bSJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2943a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2944a4e887faSJose Abreu tx_q->tx_tail_addr, chan); 294547f2a9ceSJoao Pinto } 294624aaed0cSJose Abreu 2947495db273SGiuseppe Cavallaro return ret; 29480f1f88a8SGiuseppe CAVALLARO } 29490f1f88a8SGiuseppe CAVALLARO 29508fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 29518fce3331SJose Abreu { 29528531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 29538fce3331SJose Abreu 2954db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2955db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2956d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 29578fce3331SJose Abreu } 29588fce3331SJose Abreu 2959bfab27a1SGiuseppe CAVALLARO /** 2960732fdf0eSGiuseppe CAVALLARO * stmmac_tx_timer - mitigation sw timer for tx. 2961d0ea5cbdSJesse Brandeburg * @t: data pointer 29629125cdd1SGiuseppe CAVALLARO * Description: 29639125cdd1SGiuseppe CAVALLARO * This is the timer handler to directly invoke the stmmac_tx_clean. 29649125cdd1SGiuseppe CAVALLARO */ 2965d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 29669125cdd1SGiuseppe CAVALLARO { 2967d5a05e69SVincent Whitchurch struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 29688fce3331SJose Abreu struct stmmac_priv *priv = tx_q->priv_data; 29698fce3331SJose Abreu struct stmmac_channel *ch; 2970132c32eeSOng Boon Leong struct napi_struct *napi; 29719125cdd1SGiuseppe CAVALLARO 29728fce3331SJose Abreu ch = &priv->channel[tx_q->queue_index]; 2973132c32eeSOng Boon Leong napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 29748fce3331SJose Abreu 2975132c32eeSOng Boon Leong if (likely(napi_schedule_prep(napi))) { 2976021bd5e3SJose Abreu unsigned long flags; 2977021bd5e3SJose Abreu 2978021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2979021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2980021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2981132c32eeSOng Boon Leong __napi_schedule(napi); 2982021bd5e3SJose Abreu } 2983d5a05e69SVincent Whitchurch 2984d5a05e69SVincent Whitchurch return HRTIMER_NORESTART; 29859125cdd1SGiuseppe CAVALLARO } 29869125cdd1SGiuseppe CAVALLARO 29879125cdd1SGiuseppe CAVALLARO /** 2988d429b66eSJose Abreu * stmmac_init_coalesce - init mitigation options. 298932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29909125cdd1SGiuseppe CAVALLARO * Description: 2991d429b66eSJose Abreu * This inits the coalesce parameters: i.e. timer rate, 29929125cdd1SGiuseppe CAVALLARO * timer handler and default threshold used for enabling the 29939125cdd1SGiuseppe CAVALLARO * interrupt on completion bit. 29949125cdd1SGiuseppe CAVALLARO */ 2995d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv) 29969125cdd1SGiuseppe CAVALLARO { 29978fce3331SJose Abreu u32 tx_channel_count = priv->plat->tx_queues_to_use; 2998db2f2842SOng Boon Leong u32 rx_channel_count = priv->plat->rx_queues_to_use; 29998fce3331SJose Abreu u32 chan; 30008fce3331SJose Abreu 30018fce3331SJose Abreu for (chan = 0; chan < tx_channel_count; chan++) { 30028531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 30038fce3331SJose Abreu 3004db2f2842SOng Boon Leong priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3005db2f2842SOng Boon Leong priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3006db2f2842SOng Boon Leong 3007d5a05e69SVincent Whitchurch hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3008d5a05e69SVincent Whitchurch tx_q->txtimer.function = stmmac_tx_timer; 30098fce3331SJose Abreu } 3010db2f2842SOng Boon Leong 3011db2f2842SOng Boon Leong for (chan = 0; chan < rx_channel_count; chan++) 3012db2f2842SOng Boon Leong priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 30139125cdd1SGiuseppe CAVALLARO } 30149125cdd1SGiuseppe CAVALLARO 30154854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv) 30164854ab99SJoao Pinto { 30174854ab99SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 30184854ab99SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 30194854ab99SJoao Pinto u32 chan; 30204854ab99SJoao Pinto 30214854ab99SJoao Pinto /* set TX ring length */ 30224854ab99SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 3023a4e887faSJose Abreu stmmac_set_tx_ring_len(priv, priv->ioaddr, 30248531c808SChristian Marangi (priv->dma_conf.dma_tx_size - 1), chan); 30254854ab99SJoao Pinto 30264854ab99SJoao Pinto /* set RX ring length */ 30274854ab99SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 3028a4e887faSJose Abreu stmmac_set_rx_ring_len(priv, priv->ioaddr, 30298531c808SChristian Marangi (priv->dma_conf.dma_rx_size - 1), chan); 30304854ab99SJoao Pinto } 30314854ab99SJoao Pinto 30329125cdd1SGiuseppe CAVALLARO /** 30336a3a7193SJoao Pinto * stmmac_set_tx_queue_weight - Set TX queue weight 30346a3a7193SJoao Pinto * @priv: driver private structure 30356a3a7193SJoao Pinto * Description: It is used for setting TX queues weight 30366a3a7193SJoao Pinto */ 30376a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 30386a3a7193SJoao Pinto { 30396a3a7193SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 30406a3a7193SJoao Pinto u32 weight; 30416a3a7193SJoao Pinto u32 queue; 30426a3a7193SJoao Pinto 30436a3a7193SJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 30446a3a7193SJoao Pinto weight = priv->plat->tx_queues_cfg[queue].weight; 3045c10d4c82SJose Abreu stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 30466a3a7193SJoao Pinto } 30476a3a7193SJoao Pinto } 30486a3a7193SJoao Pinto 30496a3a7193SJoao Pinto /** 305019d91873SJoao Pinto * stmmac_configure_cbs - Configure CBS in TX queue 305119d91873SJoao Pinto * @priv: driver private structure 305219d91873SJoao Pinto * Description: It is used for configuring CBS in AVB TX queues 305319d91873SJoao Pinto */ 305419d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv) 305519d91873SJoao Pinto { 305619d91873SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 305719d91873SJoao Pinto u32 mode_to_use; 305819d91873SJoao Pinto u32 queue; 305919d91873SJoao Pinto 306044781fefSJoao Pinto /* queue 0 is reserved for legacy traffic */ 306144781fefSJoao Pinto for (queue = 1; queue < tx_queues_count; queue++) { 306219d91873SJoao Pinto mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 306319d91873SJoao Pinto if (mode_to_use == MTL_QUEUE_DCB) 306419d91873SJoao Pinto continue; 306519d91873SJoao Pinto 3066c10d4c82SJose Abreu stmmac_config_cbs(priv, priv->hw, 306719d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].send_slope, 306819d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].idle_slope, 306919d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].high_credit, 307019d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].low_credit, 307119d91873SJoao Pinto queue); 307219d91873SJoao Pinto } 307319d91873SJoao Pinto } 307419d91873SJoao Pinto 307519d91873SJoao Pinto /** 3076d43042f4SJoao Pinto * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3077d43042f4SJoao Pinto * @priv: driver private structure 3078d43042f4SJoao Pinto * Description: It is used for mapping RX queues to RX dma channels 3079d43042f4SJoao Pinto */ 3080d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3081d43042f4SJoao Pinto { 3082d43042f4SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3083d43042f4SJoao Pinto u32 queue; 3084d43042f4SJoao Pinto u32 chan; 3085d43042f4SJoao Pinto 3086d43042f4SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3087d43042f4SJoao Pinto chan = priv->plat->rx_queues_cfg[queue].chan; 3088c10d4c82SJose Abreu stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3089d43042f4SJoao Pinto } 3090d43042f4SJoao Pinto } 3091d43042f4SJoao Pinto 3092d43042f4SJoao Pinto /** 3093a8f5102aSJoao Pinto * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3094a8f5102aSJoao Pinto * @priv: driver private structure 3095a8f5102aSJoao Pinto * Description: It is used for configuring the RX Queue Priority 3096a8f5102aSJoao Pinto */ 3097a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3098a8f5102aSJoao Pinto { 3099a8f5102aSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3100a8f5102aSJoao Pinto u32 queue; 3101a8f5102aSJoao Pinto u32 prio; 3102a8f5102aSJoao Pinto 3103a8f5102aSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3104a8f5102aSJoao Pinto if (!priv->plat->rx_queues_cfg[queue].use_prio) 3105a8f5102aSJoao Pinto continue; 3106a8f5102aSJoao Pinto 3107a8f5102aSJoao Pinto prio = priv->plat->rx_queues_cfg[queue].prio; 3108c10d4c82SJose Abreu stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3109a8f5102aSJoao Pinto } 3110a8f5102aSJoao Pinto } 3111a8f5102aSJoao Pinto 3112a8f5102aSJoao Pinto /** 3113a8f5102aSJoao Pinto * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3114a8f5102aSJoao Pinto * @priv: driver private structure 3115a8f5102aSJoao Pinto * Description: It is used for configuring the TX Queue Priority 3116a8f5102aSJoao Pinto */ 3117a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3118a8f5102aSJoao Pinto { 3119a8f5102aSJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3120a8f5102aSJoao Pinto u32 queue; 3121a8f5102aSJoao Pinto u32 prio; 3122a8f5102aSJoao Pinto 3123a8f5102aSJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 3124a8f5102aSJoao Pinto if (!priv->plat->tx_queues_cfg[queue].use_prio) 3125a8f5102aSJoao Pinto continue; 3126a8f5102aSJoao Pinto 3127a8f5102aSJoao Pinto prio = priv->plat->tx_queues_cfg[queue].prio; 3128c10d4c82SJose Abreu stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3129a8f5102aSJoao Pinto } 3130a8f5102aSJoao Pinto } 3131a8f5102aSJoao Pinto 3132a8f5102aSJoao Pinto /** 3133abe80fdcSJoao Pinto * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3134abe80fdcSJoao Pinto * @priv: driver private structure 3135abe80fdcSJoao Pinto * Description: It is used for configuring the RX queue routing 3136abe80fdcSJoao Pinto */ 3137abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3138abe80fdcSJoao Pinto { 3139abe80fdcSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3140abe80fdcSJoao Pinto u32 queue; 3141abe80fdcSJoao Pinto u8 packet; 3142abe80fdcSJoao Pinto 3143abe80fdcSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3144abe80fdcSJoao Pinto /* no specific packet type routing specified for the queue */ 3145abe80fdcSJoao Pinto if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3146abe80fdcSJoao Pinto continue; 3147abe80fdcSJoao Pinto 3148abe80fdcSJoao Pinto packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3149c10d4c82SJose Abreu stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3150abe80fdcSJoao Pinto } 3151abe80fdcSJoao Pinto } 3152abe80fdcSJoao Pinto 315376067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv) 315476067459SJose Abreu { 315576067459SJose Abreu if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 315676067459SJose Abreu priv->rss.enable = false; 315776067459SJose Abreu return; 315876067459SJose Abreu } 315976067459SJose Abreu 316076067459SJose Abreu if (priv->dev->features & NETIF_F_RXHASH) 316176067459SJose Abreu priv->rss.enable = true; 316276067459SJose Abreu else 316376067459SJose Abreu priv->rss.enable = false; 316476067459SJose Abreu 316576067459SJose Abreu stmmac_rss_configure(priv, priv->hw, &priv->rss, 316676067459SJose Abreu priv->plat->rx_queues_to_use); 316776067459SJose Abreu } 316876067459SJose Abreu 3169abe80fdcSJoao Pinto /** 3170d0a9c9f9SJoao Pinto * stmmac_mtl_configuration - Configure MTL 3171d0a9c9f9SJoao Pinto * @priv: driver private structure 3172d0a9c9f9SJoao Pinto * Description: It is used for configurring MTL 3173d0a9c9f9SJoao Pinto */ 3174d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3175d0a9c9f9SJoao Pinto { 3176d0a9c9f9SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3177d0a9c9f9SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3178d0a9c9f9SJoao Pinto 3179c10d4c82SJose Abreu if (tx_queues_count > 1) 31806a3a7193SJoao Pinto stmmac_set_tx_queue_weight(priv); 31816a3a7193SJoao Pinto 3182d0a9c9f9SJoao Pinto /* Configure MTL RX algorithms */ 3183c10d4c82SJose Abreu if (rx_queues_count > 1) 3184c10d4c82SJose Abreu stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3185d0a9c9f9SJoao Pinto priv->plat->rx_sched_algorithm); 3186d0a9c9f9SJoao Pinto 3187d0a9c9f9SJoao Pinto /* Configure MTL TX algorithms */ 3188c10d4c82SJose Abreu if (tx_queues_count > 1) 3189c10d4c82SJose Abreu stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3190d0a9c9f9SJoao Pinto priv->plat->tx_sched_algorithm); 3191d0a9c9f9SJoao Pinto 319219d91873SJoao Pinto /* Configure CBS in AVB TX queues */ 3193c10d4c82SJose Abreu if (tx_queues_count > 1) 319419d91873SJoao Pinto stmmac_configure_cbs(priv); 319519d91873SJoao Pinto 3196d43042f4SJoao Pinto /* Map RX MTL to DMA channels */ 3197d43042f4SJoao Pinto stmmac_rx_queue_dma_chan_map(priv); 3198d43042f4SJoao Pinto 3199d0a9c9f9SJoao Pinto /* Enable MAC RX Queues */ 3200d0a9c9f9SJoao Pinto stmmac_mac_enable_rx_queues(priv); 32016deee222SJoao Pinto 3202a8f5102aSJoao Pinto /* Set RX priorities */ 3203c10d4c82SJose Abreu if (rx_queues_count > 1) 3204a8f5102aSJoao Pinto stmmac_mac_config_rx_queues_prio(priv); 3205a8f5102aSJoao Pinto 3206a8f5102aSJoao Pinto /* Set TX priorities */ 3207c10d4c82SJose Abreu if (tx_queues_count > 1) 3208a8f5102aSJoao Pinto stmmac_mac_config_tx_queues_prio(priv); 3209abe80fdcSJoao Pinto 3210abe80fdcSJoao Pinto /* Set RX routing */ 3211c10d4c82SJose Abreu if (rx_queues_count > 1) 3212abe80fdcSJoao Pinto stmmac_mac_config_rx_queues_routing(priv); 321376067459SJose Abreu 321476067459SJose Abreu /* Receive Side Scaling */ 321576067459SJose Abreu if (rx_queues_count > 1) 321676067459SJose Abreu stmmac_mac_config_rss(priv); 3217d0a9c9f9SJoao Pinto } 3218d0a9c9f9SJoao Pinto 32198bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 32208bf993a5SJose Abreu { 3221c10d4c82SJose Abreu if (priv->dma_cap.asp) { 32228bf993a5SJose Abreu netdev_info(priv->dev, "Enabling Safety Features\n"); 32235ac712dcSWong Vee Khee stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 32245ac712dcSWong Vee Khee priv->plat->safety_feat_cfg); 32258bf993a5SJose Abreu } else { 32268bf993a5SJose Abreu netdev_info(priv->dev, "No Safety Features support found\n"); 32278bf993a5SJose Abreu } 32288bf993a5SJose Abreu } 32298bf993a5SJose Abreu 32305a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 32315a558611SOng Boon Leong { 32325a558611SOng Boon Leong char *name; 32335a558611SOng Boon Leong 32345a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3235db7c691dSMohammad Athari Bin Ismail clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 32365a558611SOng Boon Leong 32375a558611SOng Boon Leong name = priv->wq_name; 32385a558611SOng Boon Leong sprintf(name, "%s-fpe", priv->dev->name); 32395a558611SOng Boon Leong 32405a558611SOng Boon Leong priv->fpe_wq = create_singlethread_workqueue(name); 32415a558611SOng Boon Leong if (!priv->fpe_wq) { 32425a558611SOng Boon Leong netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 32435a558611SOng Boon Leong 32445a558611SOng Boon Leong return -ENOMEM; 32455a558611SOng Boon Leong } 32465a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue start"); 32475a558611SOng Boon Leong 32485a558611SOng Boon Leong return 0; 32495a558611SOng Boon Leong } 32505a558611SOng Boon Leong 3251d0a9c9f9SJoao Pinto /** 3252732fdf0eSGiuseppe CAVALLARO * stmmac_hw_setup - setup mac in a usable state. 3253523f11b5SSrinivas Kandagatla * @dev : pointer to the device structure. 32540735e639SMohammad Athari Bin Ismail * @ptp_register: register PTP if set 3255523f11b5SSrinivas Kandagatla * Description: 3256732fdf0eSGiuseppe CAVALLARO * this is the main function to setup the HW in a usable state because the 3257732fdf0eSGiuseppe CAVALLARO * dma engine is reset, the core registers are configured (e.g. AXI, 3258732fdf0eSGiuseppe CAVALLARO * Checksum features, timers). The DMA is ready to start receiving and 3259732fdf0eSGiuseppe CAVALLARO * transmitting. 3260523f11b5SSrinivas Kandagatla * Return value: 3261523f11b5SSrinivas Kandagatla * 0 on success and an appropriate (-)ve integer as defined in errno.h 3262523f11b5SSrinivas Kandagatla * file on failure. 3263523f11b5SSrinivas Kandagatla */ 32640735e639SMohammad Athari Bin Ismail static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3265523f11b5SSrinivas Kandagatla { 3266523f11b5SSrinivas Kandagatla struct stmmac_priv *priv = netdev_priv(dev); 32673c55d4d0SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 3268146617b8SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 3269d08d32d1SOng Boon Leong bool sph_en; 3270146617b8SJoao Pinto u32 chan; 3271523f11b5SSrinivas Kandagatla int ret; 3272523f11b5SSrinivas Kandagatla 3273523f11b5SSrinivas Kandagatla /* DMA initialization and SW reset */ 3274523f11b5SSrinivas Kandagatla ret = stmmac_init_dma_engine(priv); 3275523f11b5SSrinivas Kandagatla if (ret < 0) { 327638ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 327738ddc59dSLABBE Corentin __func__); 3278523f11b5SSrinivas Kandagatla return ret; 3279523f11b5SSrinivas Kandagatla } 3280523f11b5SSrinivas Kandagatla 3281523f11b5SSrinivas Kandagatla /* Copy the MAC addr into the HW */ 3282c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3283523f11b5SSrinivas Kandagatla 328402e57b9dSGiuseppe CAVALLARO /* PS and related bits will be programmed according to the speed */ 328502e57b9dSGiuseppe CAVALLARO if (priv->hw->pcs) { 328602e57b9dSGiuseppe CAVALLARO int speed = priv->plat->mac_port_sel_speed; 328702e57b9dSGiuseppe CAVALLARO 328802e57b9dSGiuseppe CAVALLARO if ((speed == SPEED_10) || (speed == SPEED_100) || 328902e57b9dSGiuseppe CAVALLARO (speed == SPEED_1000)) { 329002e57b9dSGiuseppe CAVALLARO priv->hw->ps = speed; 329102e57b9dSGiuseppe CAVALLARO } else { 329202e57b9dSGiuseppe CAVALLARO dev_warn(priv->device, "invalid port speed\n"); 329302e57b9dSGiuseppe CAVALLARO priv->hw->ps = 0; 329402e57b9dSGiuseppe CAVALLARO } 329502e57b9dSGiuseppe CAVALLARO } 329602e57b9dSGiuseppe CAVALLARO 3297523f11b5SSrinivas Kandagatla /* Initialize the MAC Core */ 3298c10d4c82SJose Abreu stmmac_core_init(priv, priv->hw, dev); 3299523f11b5SSrinivas Kandagatla 3300d0a9c9f9SJoao Pinto /* Initialize MTL*/ 3301d0a9c9f9SJoao Pinto stmmac_mtl_configuration(priv); 33029eb12474Sjpinto 33038bf993a5SJose Abreu /* Initialize Safety Features */ 33048bf993a5SJose Abreu stmmac_safety_feat_configuration(priv); 33058bf993a5SJose Abreu 3306c10d4c82SJose Abreu ret = stmmac_rx_ipc(priv, priv->hw); 3307978aded4SGiuseppe CAVALLARO if (!ret) { 330838ddc59dSLABBE Corentin netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3309978aded4SGiuseppe CAVALLARO priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3310d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 3311978aded4SGiuseppe CAVALLARO } 3312978aded4SGiuseppe CAVALLARO 3313523f11b5SSrinivas Kandagatla /* Enable the MAC Rx/Tx */ 3314c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 3315523f11b5SSrinivas Kandagatla 3316b4f0a661SJoao Pinto /* Set the HW DMA mode and the COE */ 3317b4f0a661SJoao Pinto stmmac_dma_operation_mode(priv); 3318b4f0a661SJoao Pinto 3319523f11b5SSrinivas Kandagatla stmmac_mmc_setup(priv); 3320523f11b5SSrinivas Kandagatla 3321f4c7d894SBiao Huang if (ptp_register) { 3322f4c7d894SBiao Huang ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3323f4c7d894SBiao Huang if (ret < 0) 3324f4c7d894SBiao Huang netdev_warn(priv->dev, 3325f4c7d894SBiao Huang "failed to enable PTP reference clock: %pe\n", 3326f4c7d894SBiao Huang ERR_PTR(ret)); 3327f4c7d894SBiao Huang } 3328f4c7d894SBiao Huang 3329523f11b5SSrinivas Kandagatla ret = stmmac_init_ptp(priv); 3330722eef28SHeiner Kallweit if (ret == -EOPNOTSUPP) 33311a212771SHeiner Kallweit netdev_info(priv->dev, "PTP not supported by HW\n"); 3332722eef28SHeiner Kallweit else if (ret) 3333722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP init failed\n"); 33340735e639SMohammad Athari Bin Ismail else if (ptp_register) 33350735e639SMohammad Athari Bin Ismail stmmac_ptp_register(priv); 3336523f11b5SSrinivas Kandagatla 3337388e201dSVineetha G. Jaya Kumaran priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3338388e201dSVineetha G. Jaya Kumaran 3339388e201dSVineetha G. Jaya Kumaran /* Convert the timer from msec to usec */ 3340388e201dSVineetha G. Jaya Kumaran if (!priv->tx_lpi_timer) 3341388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_timer = eee_timer * 1000; 3342523f11b5SSrinivas Kandagatla 3343a4e887faSJose Abreu if (priv->use_riwt) { 3344db2f2842SOng Boon Leong u32 queue; 33454e4337ccSJose Abreu 3346db2f2842SOng Boon Leong for (queue = 0; queue < rx_cnt; queue++) { 3347db2f2842SOng Boon Leong if (!priv->rx_riwt[queue]) 3348db2f2842SOng Boon Leong priv->rx_riwt[queue] = DEF_DMA_RIWT; 3349db2f2842SOng Boon Leong 3350db2f2842SOng Boon Leong stmmac_rx_watchdog(priv, priv->ioaddr, 3351db2f2842SOng Boon Leong priv->rx_riwt[queue], queue); 3352db2f2842SOng Boon Leong } 3353523f11b5SSrinivas Kandagatla } 3354523f11b5SSrinivas Kandagatla 3355c10d4c82SJose Abreu if (priv->hw->pcs) 3356c9ad4c10SBen Dooks (Codethink) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3357523f11b5SSrinivas Kandagatla 33584854ab99SJoao Pinto /* set TX and RX rings length */ 33594854ab99SJoao Pinto stmmac_set_rings_length(priv); 33604854ab99SJoao Pinto 3361f748be53SAlexandre TORGUE /* Enable TSO */ 3362146617b8SJoao Pinto if (priv->tso) { 33635e6038b8SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 33648531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 33655e6038b8SOng Boon Leong 33665e6038b8SOng Boon Leong /* TSO and TBS cannot co-exist */ 33675e6038b8SOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 33685e6038b8SOng Boon Leong continue; 33695e6038b8SOng Boon Leong 3370a4e887faSJose Abreu stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3371146617b8SJoao Pinto } 33725e6038b8SOng Boon Leong } 3373f748be53SAlexandre TORGUE 337467afd6d1SJose Abreu /* Enable Split Header */ 3375d08d32d1SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 337667afd6d1SJose Abreu for (chan = 0; chan < rx_cnt; chan++) 3377d08d32d1SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3378d08d32d1SOng Boon Leong 337967afd6d1SJose Abreu 338030d93227SJose Abreu /* VLAN Tag Insertion */ 338130d93227SJose Abreu if (priv->dma_cap.vlins) 338230d93227SJose Abreu stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 338330d93227SJose Abreu 3384579a25a8SJose Abreu /* TBS */ 3385579a25a8SJose Abreu for (chan = 0; chan < tx_cnt; chan++) { 33868531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3387579a25a8SJose Abreu int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3388579a25a8SJose Abreu 3389579a25a8SJose Abreu stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3390579a25a8SJose Abreu } 3391579a25a8SJose Abreu 3392686cff3dSAashish Verma /* Configure real RX and TX queues */ 3393686cff3dSAashish Verma netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3394686cff3dSAashish Verma netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3395686cff3dSAashish Verma 33967d9e6c5aSJose Abreu /* Start the ball rolling... */ 33977d9e6c5aSJose Abreu stmmac_start_all_dma(priv); 33987d9e6c5aSJose Abreu 33995a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 34005a558611SOng Boon Leong stmmac_fpe_start_wq(priv); 34015a558611SOng Boon Leong 34025a558611SOng Boon Leong if (priv->plat->fpe_cfg->enable) 34035a558611SOng Boon Leong stmmac_fpe_handshake(priv, true); 34045a558611SOng Boon Leong } 34055a558611SOng Boon Leong 3406523f11b5SSrinivas Kandagatla return 0; 3407523f11b5SSrinivas Kandagatla } 3408523f11b5SSrinivas Kandagatla 3409c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev) 3410c66f6c37SThierry Reding { 3411c66f6c37SThierry Reding struct stmmac_priv *priv = netdev_priv(dev); 3412c66f6c37SThierry Reding 3413c66f6c37SThierry Reding clk_disable_unprepare(priv->plat->clk_ptp_ref); 3414c66f6c37SThierry Reding } 3415c66f6c37SThierry Reding 34168532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev, 34178532f613SOng Boon Leong enum request_irq_err irq_err, int irq_idx) 34188532f613SOng Boon Leong { 34198532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34208532f613SOng Boon Leong int j; 34218532f613SOng Boon Leong 34228532f613SOng Boon Leong switch (irq_err) { 34238532f613SOng Boon Leong case REQ_IRQ_ERR_ALL: 34248532f613SOng Boon Leong irq_idx = priv->plat->tx_queues_to_use; 34258532f613SOng Boon Leong fallthrough; 34268532f613SOng Boon Leong case REQ_IRQ_ERR_TX: 34278532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 34288deec94cSOng Boon Leong if (priv->tx_irq[j] > 0) { 34298deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[j], NULL); 34308531c808SChristian Marangi free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 34318532f613SOng Boon Leong } 34328deec94cSOng Boon Leong } 34338532f613SOng Boon Leong irq_idx = priv->plat->rx_queues_to_use; 34348532f613SOng Boon Leong fallthrough; 34358532f613SOng Boon Leong case REQ_IRQ_ERR_RX: 34368532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 34378deec94cSOng Boon Leong if (priv->rx_irq[j] > 0) { 34388deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[j], NULL); 34398531c808SChristian Marangi free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 34408532f613SOng Boon Leong } 34418deec94cSOng Boon Leong } 34428532f613SOng Boon Leong 34438532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 34448532f613SOng Boon Leong free_irq(priv->sfty_ue_irq, dev); 34458532f613SOng Boon Leong fallthrough; 34468532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_UE: 34478532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 34488532f613SOng Boon Leong free_irq(priv->sfty_ce_irq, dev); 34498532f613SOng Boon Leong fallthrough; 34508532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_CE: 34518532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 34528532f613SOng Boon Leong free_irq(priv->lpi_irq, dev); 34538532f613SOng Boon Leong fallthrough; 34548532f613SOng Boon Leong case REQ_IRQ_ERR_LPI: 34558532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 34568532f613SOng Boon Leong free_irq(priv->wol_irq, dev); 34578532f613SOng Boon Leong fallthrough; 34588532f613SOng Boon Leong case REQ_IRQ_ERR_WOL: 34598532f613SOng Boon Leong free_irq(dev->irq, dev); 34608532f613SOng Boon Leong fallthrough; 34618532f613SOng Boon Leong case REQ_IRQ_ERR_MAC: 34628532f613SOng Boon Leong case REQ_IRQ_ERR_NO: 34638532f613SOng Boon Leong /* If MAC IRQ request error, no more IRQ to free */ 34648532f613SOng Boon Leong break; 34658532f613SOng Boon Leong } 34668532f613SOng Boon Leong } 34678532f613SOng Boon Leong 34688532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev) 34698532f613SOng Boon Leong { 34708532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34713e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 34728deec94cSOng Boon Leong cpumask_t cpu_mask; 34738532f613SOng Boon Leong int irq_idx = 0; 34748532f613SOng Boon Leong char *int_name; 34758532f613SOng Boon Leong int ret; 34768532f613SOng Boon Leong int i; 34778532f613SOng Boon Leong 34788532f613SOng Boon Leong /* For common interrupt */ 34798532f613SOng Boon Leong int_name = priv->int_name_mac; 34808532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "mac"); 34818532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_mac_interrupt, 34828532f613SOng Boon Leong 0, int_name, dev); 34838532f613SOng Boon Leong if (unlikely(ret < 0)) { 34848532f613SOng Boon Leong netdev_err(priv->dev, 34858532f613SOng Boon Leong "%s: alloc mac MSI %d (error: %d)\n", 34868532f613SOng Boon Leong __func__, dev->irq, ret); 34878532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 34888532f613SOng Boon Leong goto irq_error; 34898532f613SOng Boon Leong } 34908532f613SOng Boon Leong 34918532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 34928532f613SOng Boon Leong * is used for WoL 34938532f613SOng Boon Leong */ 34948532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 34958532f613SOng Boon Leong int_name = priv->int_name_wol; 34968532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "wol"); 34978532f613SOng Boon Leong ret = request_irq(priv->wol_irq, 34988532f613SOng Boon Leong stmmac_mac_interrupt, 34998532f613SOng Boon Leong 0, int_name, dev); 35008532f613SOng Boon Leong if (unlikely(ret < 0)) { 35018532f613SOng Boon Leong netdev_err(priv->dev, 35028532f613SOng Boon Leong "%s: alloc wol MSI %d (error: %d)\n", 35038532f613SOng Boon Leong __func__, priv->wol_irq, ret); 35048532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 35058532f613SOng Boon Leong goto irq_error; 35068532f613SOng Boon Leong } 35078532f613SOng Boon Leong } 35088532f613SOng Boon Leong 35098532f613SOng Boon Leong /* Request the LPI IRQ in case of another line 35108532f613SOng Boon Leong * is used for LPI 35118532f613SOng Boon Leong */ 35128532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 35138532f613SOng Boon Leong int_name = priv->int_name_lpi; 35148532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "lpi"); 35158532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, 35168532f613SOng Boon Leong stmmac_mac_interrupt, 35178532f613SOng Boon Leong 0, int_name, dev); 35188532f613SOng Boon Leong if (unlikely(ret < 0)) { 35198532f613SOng Boon Leong netdev_err(priv->dev, 35208532f613SOng Boon Leong "%s: alloc lpi MSI %d (error: %d)\n", 35218532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 35228532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 35238532f613SOng Boon Leong goto irq_error; 35248532f613SOng Boon Leong } 35258532f613SOng Boon Leong } 35268532f613SOng Boon Leong 35278532f613SOng Boon Leong /* Request the Safety Feature Correctible Error line in 35288532f613SOng Boon Leong * case of another line is used 35298532f613SOng Boon Leong */ 35308532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 35318532f613SOng Boon Leong int_name = priv->int_name_sfty_ce; 35328532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 35338532f613SOng Boon Leong ret = request_irq(priv->sfty_ce_irq, 35348532f613SOng Boon Leong stmmac_safety_interrupt, 35358532f613SOng Boon Leong 0, int_name, dev); 35368532f613SOng Boon Leong if (unlikely(ret < 0)) { 35378532f613SOng Boon Leong netdev_err(priv->dev, 35388532f613SOng Boon Leong "%s: alloc sfty ce MSI %d (error: %d)\n", 35398532f613SOng Boon Leong __func__, priv->sfty_ce_irq, ret); 35408532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_CE; 35418532f613SOng Boon Leong goto irq_error; 35428532f613SOng Boon Leong } 35438532f613SOng Boon Leong } 35448532f613SOng Boon Leong 35458532f613SOng Boon Leong /* Request the Safety Feature Uncorrectible Error line in 35468532f613SOng Boon Leong * case of another line is used 35478532f613SOng Boon Leong */ 35488532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 35498532f613SOng Boon Leong int_name = priv->int_name_sfty_ue; 35508532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 35518532f613SOng Boon Leong ret = request_irq(priv->sfty_ue_irq, 35528532f613SOng Boon Leong stmmac_safety_interrupt, 35538532f613SOng Boon Leong 0, int_name, dev); 35548532f613SOng Boon Leong if (unlikely(ret < 0)) { 35558532f613SOng Boon Leong netdev_err(priv->dev, 35568532f613SOng Boon Leong "%s: alloc sfty ue MSI %d (error: %d)\n", 35578532f613SOng Boon Leong __func__, priv->sfty_ue_irq, ret); 35588532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_UE; 35598532f613SOng Boon Leong goto irq_error; 35608532f613SOng Boon Leong } 35618532f613SOng Boon Leong } 35628532f613SOng Boon Leong 35638532f613SOng Boon Leong /* Request Rx MSI irq */ 35648532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3565d68c2e1dSArnd Bergmann if (i >= MTL_MAX_RX_QUEUES) 35663e0d5699SArnd Bergmann break; 35678532f613SOng Boon Leong if (priv->rx_irq[i] == 0) 35688532f613SOng Boon Leong continue; 35698532f613SOng Boon Leong 35708532f613SOng Boon Leong int_name = priv->int_name_rx_irq[i]; 35718532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 35728532f613SOng Boon Leong ret = request_irq(priv->rx_irq[i], 35738532f613SOng Boon Leong stmmac_msi_intr_rx, 35748531c808SChristian Marangi 0, int_name, &priv->dma_conf.rx_queue[i]); 35758532f613SOng Boon Leong if (unlikely(ret < 0)) { 35768532f613SOng Boon Leong netdev_err(priv->dev, 35778532f613SOng Boon Leong "%s: alloc rx-%d MSI %d (error: %d)\n", 35788532f613SOng Boon Leong __func__, i, priv->rx_irq[i], ret); 35798532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_RX; 35808532f613SOng Boon Leong irq_idx = i; 35818532f613SOng Boon Leong goto irq_error; 35828532f613SOng Boon Leong } 35838deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35848deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35858deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 35868532f613SOng Boon Leong } 35878532f613SOng Boon Leong 35888532f613SOng Boon Leong /* Request Tx MSI irq */ 35898532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3590d68c2e1dSArnd Bergmann if (i >= MTL_MAX_TX_QUEUES) 35913e0d5699SArnd Bergmann break; 35928532f613SOng Boon Leong if (priv->tx_irq[i] == 0) 35938532f613SOng Boon Leong continue; 35948532f613SOng Boon Leong 35958532f613SOng Boon Leong int_name = priv->int_name_tx_irq[i]; 35968532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 35978532f613SOng Boon Leong ret = request_irq(priv->tx_irq[i], 35988532f613SOng Boon Leong stmmac_msi_intr_tx, 35998531c808SChristian Marangi 0, int_name, &priv->dma_conf.tx_queue[i]); 36008532f613SOng Boon Leong if (unlikely(ret < 0)) { 36018532f613SOng Boon Leong netdev_err(priv->dev, 36028532f613SOng Boon Leong "%s: alloc tx-%d MSI %d (error: %d)\n", 36038532f613SOng Boon Leong __func__, i, priv->tx_irq[i], ret); 36048532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_TX; 36058532f613SOng Boon Leong irq_idx = i; 36068532f613SOng Boon Leong goto irq_error; 36078532f613SOng Boon Leong } 36088deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 36098deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 36108deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 36118532f613SOng Boon Leong } 36128532f613SOng Boon Leong 36138532f613SOng Boon Leong return 0; 36148532f613SOng Boon Leong 36158532f613SOng Boon Leong irq_error: 36168532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, irq_idx); 36178532f613SOng Boon Leong return ret; 36188532f613SOng Boon Leong } 36198532f613SOng Boon Leong 36208532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev) 36218532f613SOng Boon Leong { 36228532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36233e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 36248532f613SOng Boon Leong int ret; 36258532f613SOng Boon Leong 36268532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_interrupt, 36278532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36288532f613SOng Boon Leong if (unlikely(ret < 0)) { 36298532f613SOng Boon Leong netdev_err(priv->dev, 36308532f613SOng Boon Leong "%s: ERROR: allocating the IRQ %d (error: %d)\n", 36318532f613SOng Boon Leong __func__, dev->irq, ret); 36328532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 36333e6dc7b6SWong Vee Khee goto irq_error; 36348532f613SOng Boon Leong } 36358532f613SOng Boon Leong 36368532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 36378532f613SOng Boon Leong * is used for WoL 36388532f613SOng Boon Leong */ 36398532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 36408532f613SOng Boon Leong ret = request_irq(priv->wol_irq, stmmac_interrupt, 36418532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36428532f613SOng Boon Leong if (unlikely(ret < 0)) { 36438532f613SOng Boon Leong netdev_err(priv->dev, 36448532f613SOng Boon Leong "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 36458532f613SOng Boon Leong __func__, priv->wol_irq, ret); 36468532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 36473e6dc7b6SWong Vee Khee goto irq_error; 36488532f613SOng Boon Leong } 36498532f613SOng Boon Leong } 36508532f613SOng Boon Leong 36518532f613SOng Boon Leong /* Request the IRQ lines */ 36528532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 36538532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, stmmac_interrupt, 36548532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36558532f613SOng Boon Leong if (unlikely(ret < 0)) { 36568532f613SOng Boon Leong netdev_err(priv->dev, 36578532f613SOng Boon Leong "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 36588532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 36598532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 36608532f613SOng Boon Leong goto irq_error; 36618532f613SOng Boon Leong } 36628532f613SOng Boon Leong } 36638532f613SOng Boon Leong 36648532f613SOng Boon Leong return 0; 36658532f613SOng Boon Leong 36668532f613SOng Boon Leong irq_error: 36678532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, 0); 36688532f613SOng Boon Leong return ret; 36698532f613SOng Boon Leong } 36708532f613SOng Boon Leong 36718532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev) 36728532f613SOng Boon Leong { 36738532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36748532f613SOng Boon Leong int ret; 36758532f613SOng Boon Leong 36768532f613SOng Boon Leong /* Request the IRQ lines */ 36778532f613SOng Boon Leong if (priv->plat->multi_msi_en) 36788532f613SOng Boon Leong ret = stmmac_request_irq_multi_msi(dev); 36798532f613SOng Boon Leong else 36808532f613SOng Boon Leong ret = stmmac_request_irq_single(dev); 36818532f613SOng Boon Leong 36828532f613SOng Boon Leong return ret; 36838532f613SOng Boon Leong } 36848532f613SOng Boon Leong 3685523f11b5SSrinivas Kandagatla /** 3686ba39b344SChristian Marangi * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3687ba39b344SChristian Marangi * @priv: driver private structure 3688ba39b344SChristian Marangi * @mtu: MTU to setup the dma queue and buf with 3689ba39b344SChristian Marangi * Description: Allocate and generate a dma_conf based on the provided MTU. 3690ba39b344SChristian Marangi * Allocate the Tx/Rx DMA queue and init them. 3691ba39b344SChristian Marangi * Return value: 3692ba39b344SChristian Marangi * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3693ba39b344SChristian Marangi */ 3694ba39b344SChristian Marangi static struct stmmac_dma_conf * 3695ba39b344SChristian Marangi stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3696ba39b344SChristian Marangi { 3697ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf; 3698ba39b344SChristian Marangi int chan, bfsize, ret; 3699ba39b344SChristian Marangi 3700ba39b344SChristian Marangi dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3701ba39b344SChristian Marangi if (!dma_conf) { 3702ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3703ba39b344SChristian Marangi __func__); 3704ba39b344SChristian Marangi return ERR_PTR(-ENOMEM); 3705ba39b344SChristian Marangi } 3706ba39b344SChristian Marangi 3707ba39b344SChristian Marangi bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3708ba39b344SChristian Marangi if (bfsize < 0) 3709ba39b344SChristian Marangi bfsize = 0; 3710ba39b344SChristian Marangi 3711ba39b344SChristian Marangi if (bfsize < BUF_SIZE_16KiB) 3712ba39b344SChristian Marangi bfsize = stmmac_set_bfsize(mtu, 0); 3713ba39b344SChristian Marangi 3714ba39b344SChristian Marangi dma_conf->dma_buf_sz = bfsize; 3715ba39b344SChristian Marangi /* Chose the tx/rx size from the already defined one in the 3716ba39b344SChristian Marangi * priv struct. (if defined) 3717ba39b344SChristian Marangi */ 3718ba39b344SChristian Marangi dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3719ba39b344SChristian Marangi dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3720ba39b344SChristian Marangi 3721ba39b344SChristian Marangi if (!dma_conf->dma_tx_size) 3722ba39b344SChristian Marangi dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3723ba39b344SChristian Marangi if (!dma_conf->dma_rx_size) 3724ba39b344SChristian Marangi dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3725ba39b344SChristian Marangi 3726ba39b344SChristian Marangi /* Earlier check for TBS */ 3727ba39b344SChristian Marangi for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3728ba39b344SChristian Marangi struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3729ba39b344SChristian Marangi int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3730ba39b344SChristian Marangi 3731ba39b344SChristian Marangi /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3732ba39b344SChristian Marangi tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3733ba39b344SChristian Marangi } 3734ba39b344SChristian Marangi 3735ba39b344SChristian Marangi ret = alloc_dma_desc_resources(priv, dma_conf); 3736ba39b344SChristian Marangi if (ret < 0) { 3737ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3738ba39b344SChristian Marangi __func__); 3739ba39b344SChristian Marangi goto alloc_error; 3740ba39b344SChristian Marangi } 3741ba39b344SChristian Marangi 3742ba39b344SChristian Marangi ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3743ba39b344SChristian Marangi if (ret < 0) { 3744ba39b344SChristian Marangi netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3745ba39b344SChristian Marangi __func__); 3746ba39b344SChristian Marangi goto init_error; 3747ba39b344SChristian Marangi } 3748ba39b344SChristian Marangi 3749ba39b344SChristian Marangi return dma_conf; 3750ba39b344SChristian Marangi 3751ba39b344SChristian Marangi init_error: 3752ba39b344SChristian Marangi free_dma_desc_resources(priv, dma_conf); 3753ba39b344SChristian Marangi alloc_error: 3754ba39b344SChristian Marangi kfree(dma_conf); 3755ba39b344SChristian Marangi return ERR_PTR(ret); 3756ba39b344SChristian Marangi } 3757ba39b344SChristian Marangi 3758ba39b344SChristian Marangi /** 3759ba39b344SChristian Marangi * __stmmac_open - open entry point of the driver 37607ac6653aSJeff Kirsher * @dev : pointer to the device structure. 3761ba39b344SChristian Marangi * @dma_conf : structure to take the dma data 37627ac6653aSJeff Kirsher * Description: 37637ac6653aSJeff Kirsher * This function is the open entry point of the driver. 37647ac6653aSJeff Kirsher * Return value: 37657ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 37667ac6653aSJeff Kirsher * file on failure. 37677ac6653aSJeff Kirsher */ 3768ba39b344SChristian Marangi static int __stmmac_open(struct net_device *dev, 3769ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf) 37707ac6653aSJeff Kirsher { 37717ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 37729900074eSVladimir Oltean int mode = priv->plat->phy_interface; 37738fce3331SJose Abreu u32 chan; 37747ac6653aSJeff Kirsher int ret; 37757ac6653aSJeff Kirsher 377685648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 377785648865SMinghao Chi if (ret < 0) 37785ec55823SJoakim Zhang return ret; 37795ec55823SJoakim Zhang 3780a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 3781f213bbe8SJose Abreu priv->hw->pcs != STMMAC_PCS_RTBI && 37829900074eSVladimir Oltean (!priv->hw->xpcs || 378311059740SVladimir Oltean xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { 37847ac6653aSJeff Kirsher ret = stmmac_init_phy(dev); 3785e58bb43fSGiuseppe CAVALLARO if (ret) { 378638ddc59dSLABBE Corentin netdev_err(priv->dev, 378738ddc59dSLABBE Corentin "%s: Cannot attach to PHY (error: %d)\n", 3788e58bb43fSGiuseppe CAVALLARO __func__, ret); 37895ec55823SJoakim Zhang goto init_phy_error; 37907ac6653aSJeff Kirsher } 3791e58bb43fSGiuseppe CAVALLARO } 37927ac6653aSJeff Kirsher 3793523f11b5SSrinivas Kandagatla /* Extra statistics */ 3794523f11b5SSrinivas Kandagatla memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3795523f11b5SSrinivas Kandagatla priv->xstats.threshold = tc; 3796523f11b5SSrinivas Kandagatla 379722ad3838SGiuseppe Cavallaro priv->rx_copybreak = STMMAC_RX_COPYBREAK; 379856329137SBartlomiej Zolnierkiewicz 3799ba39b344SChristian Marangi buf_sz = dma_conf->dma_buf_sz; 3800ba39b344SChristian Marangi memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 38015bacd778SLABBE Corentin 3802f9ec5723SChristian Marangi stmmac_reset_queues_param(priv); 3803f9ec5723SChristian Marangi 3804fe131929SHuacai Chen ret = stmmac_hw_setup(dev, true); 380556329137SBartlomiej Zolnierkiewicz if (ret < 0) { 380638ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3807c9324d18SGiuseppe CAVALLARO goto init_error; 38087ac6653aSJeff Kirsher } 38097ac6653aSJeff Kirsher 3810d429b66eSJose Abreu stmmac_init_coalesce(priv); 3811777da230SGiuseppe CAVALLARO 381274371272SJose Abreu phylink_start(priv->phylink); 381377b28983SJisheng Zhang /* We may have called phylink_speed_down before */ 381477b28983SJisheng Zhang phylink_speed_up(priv->phylink); 38157ac6653aSJeff Kirsher 38168532f613SOng Boon Leong ret = stmmac_request_irq(dev); 38178532f613SOng Boon Leong if (ret) 38186c1e5abeSThierry Reding goto irq_error; 3819d765955dSGiuseppe CAVALLARO 3820c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 38219f19306dSOng Boon Leong netif_tx_start_all_queues(priv->dev); 3822087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 38237ac6653aSJeff Kirsher 38247ac6653aSJeff Kirsher return 0; 38257ac6653aSJeff Kirsher 38266c1e5abeSThierry Reding irq_error: 382774371272SJose Abreu phylink_stop(priv->phylink); 38287a13f8f5SFrancesco Virlinzi 38298fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 38308531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 38318fce3331SJose Abreu 3832c66f6c37SThierry Reding stmmac_hw_teardown(dev); 3833c9324d18SGiuseppe CAVALLARO init_error: 3834ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 383574371272SJose Abreu phylink_disconnect_phy(priv->phylink); 38365ec55823SJoakim Zhang init_phy_error: 38375ec55823SJoakim Zhang pm_runtime_put(priv->device); 38387ac6653aSJeff Kirsher return ret; 38397ac6653aSJeff Kirsher } 38407ac6653aSJeff Kirsher 3841ba39b344SChristian Marangi static int stmmac_open(struct net_device *dev) 3842ba39b344SChristian Marangi { 3843ba39b344SChristian Marangi struct stmmac_priv *priv = netdev_priv(dev); 3844ba39b344SChristian Marangi struct stmmac_dma_conf *dma_conf; 3845ba39b344SChristian Marangi int ret; 3846ba39b344SChristian Marangi 3847ba39b344SChristian Marangi dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3848ba39b344SChristian Marangi if (IS_ERR(dma_conf)) 3849ba39b344SChristian Marangi return PTR_ERR(dma_conf); 3850ba39b344SChristian Marangi 3851ba39b344SChristian Marangi ret = __stmmac_open(dev, dma_conf); 3852ba39b344SChristian Marangi kfree(dma_conf); 3853ba39b344SChristian Marangi return ret; 3854ba39b344SChristian Marangi } 3855ba39b344SChristian Marangi 38565a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 38575a558611SOng Boon Leong { 38585a558611SOng Boon Leong set_bit(__FPE_REMOVING, &priv->fpe_task_state); 38595a558611SOng Boon Leong 38605a558611SOng Boon Leong if (priv->fpe_wq) 38615a558611SOng Boon Leong destroy_workqueue(priv->fpe_wq); 38625a558611SOng Boon Leong 38635a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue stop"); 38645a558611SOng Boon Leong } 38655a558611SOng Boon Leong 38667ac6653aSJeff Kirsher /** 38677ac6653aSJeff Kirsher * stmmac_release - close entry point of the driver 38687ac6653aSJeff Kirsher * @dev : device pointer. 38697ac6653aSJeff Kirsher * Description: 38707ac6653aSJeff Kirsher * This is the stop entry point of the driver. 38717ac6653aSJeff Kirsher */ 3872ac746c85SOng Boon Leong static int stmmac_release(struct net_device *dev) 38737ac6653aSJeff Kirsher { 38747ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 38758fce3331SJose Abreu u32 chan; 38767ac6653aSJeff Kirsher 387777b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 387877b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 38797ac6653aSJeff Kirsher /* Stop and disconnect the PHY */ 388074371272SJose Abreu phylink_stop(priv->phylink); 388174371272SJose Abreu phylink_disconnect_phy(priv->phylink); 38827ac6653aSJeff Kirsher 3883c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 38847ac6653aSJeff Kirsher 38858fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 38868531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 38879125cdd1SGiuseppe CAVALLARO 38887028471eSChristian Marangi netif_tx_disable(dev); 38897028471eSChristian Marangi 38907ac6653aSJeff Kirsher /* Free the IRQ lines */ 38918532f613SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 38927ac6653aSJeff Kirsher 38935f585913SFugang Duan if (priv->eee_enabled) { 38945f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 38955f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 38965f585913SFugang Duan } 38975f585913SFugang Duan 38987ac6653aSJeff Kirsher /* Stop TX/RX DMA and clear the descriptors */ 3899ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 39007ac6653aSJeff Kirsher 39017ac6653aSJeff Kirsher /* Release and free the Rx/Tx resources */ 3902ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 39037ac6653aSJeff Kirsher 39047ac6653aSJeff Kirsher /* Disable the MAC Rx/Tx */ 3905c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 39067ac6653aSJeff Kirsher 39077ac6653aSJeff Kirsher netif_carrier_off(dev); 39087ac6653aSJeff Kirsher 390992ba6888SRayagond Kokatanur stmmac_release_ptp(priv); 391092ba6888SRayagond Kokatanur 39115ec55823SJoakim Zhang pm_runtime_put(priv->device); 39125ec55823SJoakim Zhang 39135a558611SOng Boon Leong if (priv->dma_cap.fpesel) 39145a558611SOng Boon Leong stmmac_fpe_stop_wq(priv); 39155a558611SOng Boon Leong 39167ac6653aSJeff Kirsher return 0; 39177ac6653aSJeff Kirsher } 39187ac6653aSJeff Kirsher 391930d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 392030d93227SJose Abreu struct stmmac_tx_queue *tx_q) 392130d93227SJose Abreu { 392230d93227SJose Abreu u16 tag = 0x0, inner_tag = 0x0; 392330d93227SJose Abreu u32 inner_type = 0x0; 392430d93227SJose Abreu struct dma_desc *p; 392530d93227SJose Abreu 392630d93227SJose Abreu if (!priv->dma_cap.vlins) 392730d93227SJose Abreu return false; 392830d93227SJose Abreu if (!skb_vlan_tag_present(skb)) 392930d93227SJose Abreu return false; 393030d93227SJose Abreu if (skb->vlan_proto == htons(ETH_P_8021AD)) { 393130d93227SJose Abreu inner_tag = skb_vlan_tag_get(skb); 393230d93227SJose Abreu inner_type = STMMAC_VLAN_INSERT; 393330d93227SJose Abreu } 393430d93227SJose Abreu 393530d93227SJose Abreu tag = skb_vlan_tag_get(skb); 393630d93227SJose Abreu 3937579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3938579a25a8SJose Abreu p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3939579a25a8SJose Abreu else 3940579a25a8SJose Abreu p = &tx_q->dma_tx[tx_q->cur_tx]; 3941579a25a8SJose Abreu 394230d93227SJose Abreu if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 394330d93227SJose Abreu return false; 394430d93227SJose Abreu 394530d93227SJose Abreu stmmac_set_tx_owner(priv, p); 39468531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 394730d93227SJose Abreu return true; 394830d93227SJose Abreu } 394930d93227SJose Abreu 39507ac6653aSJeff Kirsher /** 3951f748be53SAlexandre TORGUE * stmmac_tso_allocator - close entry point of the driver 3952f748be53SAlexandre TORGUE * @priv: driver private structure 3953f748be53SAlexandre TORGUE * @des: buffer start address 3954f748be53SAlexandre TORGUE * @total_len: total length to fill in descriptors 3955d0ea5cbdSJesse Brandeburg * @last_segment: condition for the last descriptor 3956ce736788SJoao Pinto * @queue: TX queue index 3957f748be53SAlexandre TORGUE * Description: 3958f748be53SAlexandre TORGUE * This function fills descriptor and request new descriptors according to 3959f748be53SAlexandre TORGUE * buffer length to fill 3960f748be53SAlexandre TORGUE */ 3961a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3962ce736788SJoao Pinto int total_len, bool last_segment, u32 queue) 3963f748be53SAlexandre TORGUE { 39648531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 3965f748be53SAlexandre TORGUE struct dma_desc *desc; 39665bacd778SLABBE Corentin u32 buff_size; 3967ce736788SJoao Pinto int tmp_len; 3968f748be53SAlexandre TORGUE 3969f748be53SAlexandre TORGUE tmp_len = total_len; 3970f748be53SAlexandre TORGUE 3971f748be53SAlexandre TORGUE while (tmp_len > 0) { 3972a993db88SJose Abreu dma_addr_t curr_addr; 3973a993db88SJose Abreu 3974aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 39758531c808SChristian Marangi priv->dma_conf.dma_tx_size); 3976b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3977579a25a8SJose Abreu 3978579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3979579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3980579a25a8SJose Abreu else 3981579a25a8SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 3982f748be53SAlexandre TORGUE 3983a993db88SJose Abreu curr_addr = des + (total_len - tmp_len); 3984a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) 3985a993db88SJose Abreu desc->des0 = cpu_to_le32(curr_addr); 3986a993db88SJose Abreu else 3987a993db88SJose Abreu stmmac_set_desc_addr(priv, desc, curr_addr); 3988a993db88SJose Abreu 3989f748be53SAlexandre TORGUE buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3990f748be53SAlexandre TORGUE TSO_MAX_BUFF_SIZE : tmp_len; 3991f748be53SAlexandre TORGUE 399242de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3993f748be53SAlexandre TORGUE 0, 1, 3994426849e6SNiklas Cassel (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3995f748be53SAlexandre TORGUE 0, 0); 3996f748be53SAlexandre TORGUE 3997f748be53SAlexandre TORGUE tmp_len -= TSO_MAX_BUFF_SIZE; 3998f748be53SAlexandre TORGUE } 3999f748be53SAlexandre TORGUE } 4000f748be53SAlexandre TORGUE 4001d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4002d96febedSOng Boon Leong { 40038531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4004d96febedSOng Boon Leong int desc_size; 4005d96febedSOng Boon Leong 4006d96febedSOng Boon Leong if (likely(priv->extend_desc)) 4007d96febedSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 4008d96febedSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4009d96febedSOng Boon Leong desc_size = sizeof(struct dma_edesc); 4010d96febedSOng Boon Leong else 4011d96febedSOng Boon Leong desc_size = sizeof(struct dma_desc); 4012d96febedSOng Boon Leong 4013d96febedSOng Boon Leong /* The own bit must be the latest setting done when prepare the 4014d96febedSOng Boon Leong * descriptor and then barrier is needed to make sure that 4015d96febedSOng Boon Leong * all is coherent before granting the DMA engine. 4016d96febedSOng Boon Leong */ 4017d96febedSOng Boon Leong wmb(); 4018d96febedSOng Boon Leong 4019d96febedSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4020d96febedSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4021d96febedSOng Boon Leong } 4022d96febedSOng Boon Leong 4023f748be53SAlexandre TORGUE /** 4024f748be53SAlexandre TORGUE * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4025f748be53SAlexandre TORGUE * @skb : the socket buffer 4026f748be53SAlexandre TORGUE * @dev : device pointer 4027f748be53SAlexandre TORGUE * Description: this is the transmit function that is called on TSO frames 4028f748be53SAlexandre TORGUE * (support available on GMAC4 and newer chips). 4029f748be53SAlexandre TORGUE * Diagram below show the ring programming in case of TSO frames: 4030f748be53SAlexandre TORGUE * 4031f748be53SAlexandre TORGUE * First Descriptor 4032f748be53SAlexandre TORGUE * -------- 4033f748be53SAlexandre TORGUE * | DES0 |---> buffer1 = L2/L3/L4 header 4034f748be53SAlexandre TORGUE * | DES1 |---> TCP Payload (can continue on next descr...) 4035f748be53SAlexandre TORGUE * | DES2 |---> buffer 1 and 2 len 4036f748be53SAlexandre TORGUE * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4037f748be53SAlexandre TORGUE * -------- 4038f748be53SAlexandre TORGUE * | 4039f748be53SAlexandre TORGUE * ... 4040f748be53SAlexandre TORGUE * | 4041f748be53SAlexandre TORGUE * -------- 4042f748be53SAlexandre TORGUE * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4043f748be53SAlexandre TORGUE * | DES1 | --| 4044f748be53SAlexandre TORGUE * | DES2 | --> buffer 1 and 2 len 4045f748be53SAlexandre TORGUE * | DES3 | 4046f748be53SAlexandre TORGUE * -------- 4047f748be53SAlexandre TORGUE * 4048f748be53SAlexandre TORGUE * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4049f748be53SAlexandre TORGUE */ 4050f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4051f748be53SAlexandre TORGUE { 4052ce736788SJoao Pinto struct dma_desc *desc, *first, *mss_desc = NULL; 4053f748be53SAlexandre TORGUE struct stmmac_priv *priv = netdev_priv(dev); 4054f748be53SAlexandre TORGUE int nfrags = skb_shinfo(skb)->nr_frags; 4055ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 4056c2837423SJose Abreu unsigned int first_entry, tx_packets; 4057d96febedSOng Boon Leong int tmp_pay_len = 0, first_tx; 4058ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4059c2837423SJose Abreu bool has_vlan, set_ic; 4060579a25a8SJose Abreu u8 proto_hdr_len, hdr; 4061ce736788SJoao Pinto u32 pay_len, mss; 4062a993db88SJose Abreu dma_addr_t des; 4063f748be53SAlexandre TORGUE int i; 4064f748be53SAlexandre TORGUE 40658531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 4066c2837423SJose Abreu first_tx = tx_q->cur_tx; 4067ce736788SJoao Pinto 4068f748be53SAlexandre TORGUE /* Compute header lengths */ 4069b7766206SJose Abreu if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4070b7766206SJose Abreu proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4071b7766206SJose Abreu hdr = sizeof(struct udphdr); 4072b7766206SJose Abreu } else { 4073504148feSEric Dumazet proto_hdr_len = skb_tcp_all_headers(skb); 4074b7766206SJose Abreu hdr = tcp_hdrlen(skb); 4075b7766206SJose Abreu } 4076f748be53SAlexandre TORGUE 4077f748be53SAlexandre TORGUE /* Desc availability based on threshold should be enough safe */ 4078ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < 4079f748be53SAlexandre TORGUE (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4080c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4081c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4082c22a3f48SJoao Pinto queue)); 4083f748be53SAlexandre TORGUE /* This is a hard error, log it. */ 408438ddc59dSLABBE Corentin netdev_err(priv->dev, 408538ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 408638ddc59dSLABBE Corentin __func__); 4087f748be53SAlexandre TORGUE } 4088f748be53SAlexandre TORGUE return NETDEV_TX_BUSY; 4089f748be53SAlexandre TORGUE } 4090f748be53SAlexandre TORGUE 4091f748be53SAlexandre TORGUE pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4092f748be53SAlexandre TORGUE 4093f748be53SAlexandre TORGUE mss = skb_shinfo(skb)->gso_size; 4094f748be53SAlexandre TORGUE 4095f748be53SAlexandre TORGUE /* set new MSS value if needed */ 40968d212a9eSNiklas Cassel if (mss != tx_q->mss) { 4097579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4098579a25a8SJose Abreu mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4099579a25a8SJose Abreu else 4100579a25a8SJose Abreu mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4101579a25a8SJose Abreu 410242de047dSJose Abreu stmmac_set_mss(priv, mss_desc, mss); 41038d212a9eSNiklas Cassel tx_q->mss = mss; 4104aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 41058531c808SChristian Marangi priv->dma_conf.dma_tx_size); 4106b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4107f748be53SAlexandre TORGUE } 4108f748be53SAlexandre TORGUE 4109f748be53SAlexandre TORGUE if (netif_msg_tx_queued(priv)) { 4110b7766206SJose Abreu pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4111b7766206SJose Abreu __func__, hdr, proto_hdr_len, pay_len, mss); 4112f748be53SAlexandre TORGUE pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4113f748be53SAlexandre TORGUE skb->data_len); 4114f748be53SAlexandre TORGUE } 4115f748be53SAlexandre TORGUE 411630d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 411730d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 411830d93227SJose Abreu 4119ce736788SJoao Pinto first_entry = tx_q->cur_tx; 4120b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 4121f748be53SAlexandre TORGUE 4122579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4123579a25a8SJose Abreu desc = &tx_q->dma_entx[first_entry].basic; 4124579a25a8SJose Abreu else 4125579a25a8SJose Abreu desc = &tx_q->dma_tx[first_entry]; 4126f748be53SAlexandre TORGUE first = desc; 4127f748be53SAlexandre TORGUE 412830d93227SJose Abreu if (has_vlan) 412930d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 413030d93227SJose Abreu 4131f748be53SAlexandre TORGUE /* first descriptor: fill Headers on Buf1 */ 4132f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4133f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4134f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4135f748be53SAlexandre TORGUE goto dma_map_err; 4136f748be53SAlexandre TORGUE 4137ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4138ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4139be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4140be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4141f748be53SAlexandre TORGUE 4142a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) { 4143f8be0d78SMichael Weiser first->des0 = cpu_to_le32(des); 4144f748be53SAlexandre TORGUE 4145f748be53SAlexandre TORGUE /* Fill start of payload in buff2 of first descriptor */ 4146f748be53SAlexandre TORGUE if (pay_len) 4147f8be0d78SMichael Weiser first->des1 = cpu_to_le32(des + proto_hdr_len); 4148f748be53SAlexandre TORGUE 4149f748be53SAlexandre TORGUE /* If needed take extra descriptors to fill the remaining payload */ 4150f748be53SAlexandre TORGUE tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4151a993db88SJose Abreu } else { 4152a993db88SJose Abreu stmmac_set_desc_addr(priv, first, des); 4153a993db88SJose Abreu tmp_pay_len = pay_len; 415434c15202Syuqi jin des += proto_hdr_len; 4155b2f07199SJose Abreu pay_len = 0; 4156a993db88SJose Abreu } 4157f748be53SAlexandre TORGUE 4158ce736788SJoao Pinto stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4159f748be53SAlexandre TORGUE 4160f748be53SAlexandre TORGUE /* Prepare fragments */ 4161f748be53SAlexandre TORGUE for (i = 0; i < nfrags; i++) { 4162f748be53SAlexandre TORGUE const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4163f748be53SAlexandre TORGUE 4164f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, 4165f748be53SAlexandre TORGUE skb_frag_size(frag), 4166f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4167937071c1SThierry Reding if (dma_mapping_error(priv->device, des)) 4168937071c1SThierry Reding goto dma_map_err; 4169f748be53SAlexandre TORGUE 4170f748be53SAlexandre TORGUE stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4171ce736788SJoao Pinto (i == nfrags - 1), queue); 4172f748be53SAlexandre TORGUE 4173ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4174ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4175ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4176be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4177f748be53SAlexandre TORGUE } 4178f748be53SAlexandre TORGUE 4179ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4180f748be53SAlexandre TORGUE 418105cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 418205cf0d1bSNiklas Cassel tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4183be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 418405cf0d1bSNiklas Cassel 41857df4a3a7SJose Abreu /* Manage tx mitigation */ 4186c2837423SJose Abreu tx_packets = (tx_q->cur_tx + 1) - first_tx; 4187c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4188c2837423SJose Abreu 4189c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4190c2837423SJose Abreu set_ic = true; 4191db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4192c2837423SJose Abreu set_ic = false; 4193db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4194c2837423SJose Abreu set_ic = true; 4195db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4196db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4197c2837423SJose Abreu set_ic = true; 4198c2837423SJose Abreu else 4199c2837423SJose Abreu set_ic = false; 4200c2837423SJose Abreu 4201c2837423SJose Abreu if (set_ic) { 4202579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4203579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4204579a25a8SJose Abreu else 42057df4a3a7SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 4206579a25a8SJose Abreu 42077df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 42087df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 42097df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 42107df4a3a7SJose Abreu } 42117df4a3a7SJose Abreu 421205cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 421305cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 421405cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 421505cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 421605cf0d1bSNiklas Cassel */ 42178531c808SChristian Marangi tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4218f748be53SAlexandre TORGUE 4219ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4220b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 422138ddc59dSLABBE Corentin __func__); 4222c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4223f748be53SAlexandre TORGUE } 4224f748be53SAlexandre TORGUE 4225f748be53SAlexandre TORGUE dev->stats.tx_bytes += skb->len; 4226f748be53SAlexandre TORGUE priv->xstats.tx_tso_frames++; 4227f748be53SAlexandre TORGUE priv->xstats.tx_tso_nfrags += nfrags; 4228f748be53SAlexandre TORGUE 42298000ddc0SJose Abreu if (priv->sarc_type) 42308000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 42318000ddc0SJose Abreu 4232f748be53SAlexandre TORGUE skb_tx_timestamp(skb); 4233f748be53SAlexandre TORGUE 4234f748be53SAlexandre TORGUE if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4235f748be53SAlexandre TORGUE priv->hwts_tx_en)) { 4236f748be53SAlexandre TORGUE /* declare that device is doing timestamping */ 4237f748be53SAlexandre TORGUE skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 423842de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4239f748be53SAlexandre TORGUE } 4240f748be53SAlexandre TORGUE 4241f748be53SAlexandre TORGUE /* Complete the first descriptor before granting the DMA */ 424242de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, first, 1, 4243f748be53SAlexandre TORGUE proto_hdr_len, 4244f748be53SAlexandre TORGUE pay_len, 4245ce736788SJoao Pinto 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4246b7766206SJose Abreu hdr / 4, (skb->len - proto_hdr_len)); 4247f748be53SAlexandre TORGUE 4248f748be53SAlexandre TORGUE /* If context desc is used to change MSS */ 424915d2ee42SNiklas Cassel if (mss_desc) { 425015d2ee42SNiklas Cassel /* Make sure that first descriptor has been completely 425115d2ee42SNiklas Cassel * written, including its own bit. This is because MSS is 425215d2ee42SNiklas Cassel * actually before first descriptor, so we need to make 425315d2ee42SNiklas Cassel * sure that MSS's own bit is the last thing written. 425415d2ee42SNiklas Cassel */ 425515d2ee42SNiklas Cassel dma_wmb(); 425642de047dSJose Abreu stmmac_set_tx_owner(priv, mss_desc); 425715d2ee42SNiklas Cassel } 4258f748be53SAlexandre TORGUE 4259f748be53SAlexandre TORGUE if (netif_msg_pktdata(priv)) { 4260f748be53SAlexandre TORGUE pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4261ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4262ce736788SJoao Pinto tx_q->cur_tx, first, nfrags); 4263f748be53SAlexandre TORGUE pr_info(">>> frame to be transmitted: "); 4264f748be53SAlexandre TORGUE print_pkt(skb->data, skb_headlen(skb)); 4265f748be53SAlexandre TORGUE } 4266f748be53SAlexandre TORGUE 4267c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4268f748be53SAlexandre TORGUE 4269d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 42704772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 4271f748be53SAlexandre TORGUE 4272f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4273f748be53SAlexandre TORGUE 4274f748be53SAlexandre TORGUE dma_map_err: 4275f748be53SAlexandre TORGUE dev_err(priv->device, "Tx dma map failed\n"); 4276f748be53SAlexandre TORGUE dev_kfree_skb(skb); 4277f748be53SAlexandre TORGUE priv->dev->stats.tx_dropped++; 4278f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4279f748be53SAlexandre TORGUE } 4280f748be53SAlexandre TORGUE 4281f748be53SAlexandre TORGUE /** 4282732fdf0eSGiuseppe CAVALLARO * stmmac_xmit - Tx entry point of the driver 42837ac6653aSJeff Kirsher * @skb : the socket buffer 42847ac6653aSJeff Kirsher * @dev : device pointer 428532ceabcaSGiuseppe CAVALLARO * Description : this is the tx entry point of the driver. 428632ceabcaSGiuseppe CAVALLARO * It programs the chain or the ring and supports oversized frames 428732ceabcaSGiuseppe CAVALLARO * and SG feature. 42887ac6653aSJeff Kirsher */ 42897ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 42907ac6653aSJeff Kirsher { 4291c2837423SJose Abreu unsigned int first_entry, tx_packets, enh_desc; 42927ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 42930e80bdc9SGiuseppe Cavallaro unsigned int nopaged_len = skb_headlen(skb); 42944a7d666aSGiuseppe CAVALLARO int i, csum_insertion = 0, is_jumbo = 0; 4295ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 42967ac6653aSJeff Kirsher int nfrags = skb_shinfo(skb)->nr_frags; 4297b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 4298579a25a8SJose Abreu struct dma_edesc *tbs_desc = NULL; 42997ac6653aSJeff Kirsher struct dma_desc *desc, *first; 4300ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4301c2837423SJose Abreu bool has_vlan, set_ic; 4302d96febedSOng Boon Leong int entry, first_tx; 4303a993db88SJose Abreu dma_addr_t des; 4304f748be53SAlexandre TORGUE 43058531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 4306c2837423SJose Abreu first_tx = tx_q->cur_tx; 4307ce736788SJoao Pinto 4308be1c7eaeSVineetha G. Jaya Kumaran if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4309e2cd682dSJose Abreu stmmac_disable_eee_mode(priv); 4310e2cd682dSJose Abreu 4311f748be53SAlexandre TORGUE /* Manage oversized TCP frames for GMAC4 device */ 4312f748be53SAlexandre TORGUE if (skb_is_gso(skb) && priv->tso) { 4313b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4314b7766206SJose Abreu return stmmac_tso_xmit(skb, dev); 4315b7766206SJose Abreu if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4316f748be53SAlexandre TORGUE return stmmac_tso_xmit(skb, dev); 4317f748be53SAlexandre TORGUE } 43187ac6653aSJeff Kirsher 4319ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4320c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4321c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4322c22a3f48SJoao Pinto queue)); 43237ac6653aSJeff Kirsher /* This is a hard error, log it. */ 432438ddc59dSLABBE Corentin netdev_err(priv->dev, 432538ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 432638ddc59dSLABBE Corentin __func__); 43277ac6653aSJeff Kirsher } 43287ac6653aSJeff Kirsher return NETDEV_TX_BUSY; 43297ac6653aSJeff Kirsher } 43307ac6653aSJeff Kirsher 433130d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 433230d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 433330d93227SJose Abreu 4334ce736788SJoao Pinto entry = tx_q->cur_tx; 43350e80bdc9SGiuseppe Cavallaro first_entry = entry; 4336b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 43377ac6653aSJeff Kirsher 43387ac6653aSJeff Kirsher csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 43397ac6653aSJeff Kirsher 43400e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4341ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4342579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4343579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4344c24602efSGiuseppe CAVALLARO else 4345ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 4346c24602efSGiuseppe CAVALLARO 43477ac6653aSJeff Kirsher first = desc; 43487ac6653aSJeff Kirsher 434930d93227SJose Abreu if (has_vlan) 435030d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 435130d93227SJose Abreu 43520e80bdc9SGiuseppe Cavallaro enh_desc = priv->plat->enh_desc; 43534a7d666aSGiuseppe CAVALLARO /* To program the descriptors according to the size of the frame */ 435429896a67SGiuseppe CAVALLARO if (enh_desc) 43552c520b1cSJose Abreu is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 435629896a67SGiuseppe CAVALLARO 435763a550fcSJose Abreu if (unlikely(is_jumbo)) { 43582c520b1cSJose Abreu entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 435963a550fcSJose Abreu if (unlikely(entry < 0) && (entry != -EINVAL)) 4360362b37beSGiuseppe CAVALLARO goto dma_map_err; 436129896a67SGiuseppe CAVALLARO } 43627ac6653aSJeff Kirsher 43637ac6653aSJeff Kirsher for (i = 0; i < nfrags; i++) { 43649e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 43659e903e08SEric Dumazet int len = skb_frag_size(frag); 4366be434d50SGiuseppe Cavallaro bool last_segment = (i == (nfrags - 1)); 43677ac6653aSJeff Kirsher 43688531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4369b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[entry]); 4370e3ad57c9SGiuseppe Cavallaro 43710e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4372ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4373579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4374579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4375c24602efSGiuseppe CAVALLARO else 4376ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 43777ac6653aSJeff Kirsher 4378f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, len, 4379f722380dSIan Campbell DMA_TO_DEVICE); 4380f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4381362b37beSGiuseppe CAVALLARO goto dma_map_err; /* should reuse desc w/o issues */ 4382362b37beSGiuseppe CAVALLARO 4383ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = des; 43846844171dSJose Abreu 43856844171dSJose Abreu stmmac_set_desc_addr(priv, desc, des); 4386f748be53SAlexandre TORGUE 4387ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = true; 4388ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = len; 4389ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4390be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 43910e80bdc9SGiuseppe Cavallaro 43920e80bdc9SGiuseppe Cavallaro /* Prepare the descriptor and set the own bit too */ 439342de047dSJose Abreu stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 439442de047dSJose Abreu priv->mode, 1, last_segment, skb->len); 43957ac6653aSJeff Kirsher } 43967ac6653aSJeff Kirsher 439705cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 439805cf0d1bSNiklas Cassel tx_q->tx_skbuff[entry] = skb; 4399be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4400e3ad57c9SGiuseppe Cavallaro 44017df4a3a7SJose Abreu /* According to the coalesce parameter the IC bit for the latest 44027df4a3a7SJose Abreu * segment is reset and the timer re-started to clean the tx status. 44037df4a3a7SJose Abreu * This approach takes care about the fragments: desc is the first 44047df4a3a7SJose Abreu * element in case of no SG. 44057df4a3a7SJose Abreu */ 4406c2837423SJose Abreu tx_packets = (entry + 1) - first_tx; 4407c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4408c2837423SJose Abreu 4409c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4410c2837423SJose Abreu set_ic = true; 4411db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4412c2837423SJose Abreu set_ic = false; 4413db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4414c2837423SJose Abreu set_ic = true; 4415db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4416db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4417c2837423SJose Abreu set_ic = true; 4418c2837423SJose Abreu else 4419c2837423SJose Abreu set_ic = false; 4420c2837423SJose Abreu 4421c2837423SJose Abreu if (set_ic) { 44227df4a3a7SJose Abreu if (likely(priv->extend_desc)) 44237df4a3a7SJose Abreu desc = &tx_q->dma_etx[entry].basic; 4424579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4425579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 44267df4a3a7SJose Abreu else 44277df4a3a7SJose Abreu desc = &tx_q->dma_tx[entry]; 44287df4a3a7SJose Abreu 44297df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 44307df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 44317df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 44327df4a3a7SJose Abreu } 44337df4a3a7SJose Abreu 443405cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 443505cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 443605cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 443705cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 443805cf0d1bSNiklas Cassel */ 44398531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4440ce736788SJoao Pinto tx_q->cur_tx = entry; 44417ac6653aSJeff Kirsher 44427ac6653aSJeff Kirsher if (netif_msg_pktdata(priv)) { 444338ddc59dSLABBE Corentin netdev_dbg(priv->dev, 444438ddc59dSLABBE Corentin "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4445ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 44460e80bdc9SGiuseppe Cavallaro entry, first, nfrags); 444783d7af64SGiuseppe CAVALLARO 444838ddc59dSLABBE Corentin netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 44497ac6653aSJeff Kirsher print_pkt(skb->data, skb->len); 44507ac6653aSJeff Kirsher } 44510e80bdc9SGiuseppe Cavallaro 4452ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4453b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4454b3e51069SLABBE Corentin __func__); 4455c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 44567ac6653aSJeff Kirsher } 44577ac6653aSJeff Kirsher 44587ac6653aSJeff Kirsher dev->stats.tx_bytes += skb->len; 44597ac6653aSJeff Kirsher 44608000ddc0SJose Abreu if (priv->sarc_type) 44618000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 44628000ddc0SJose Abreu 44630e80bdc9SGiuseppe Cavallaro skb_tx_timestamp(skb); 44640e80bdc9SGiuseppe Cavallaro 44650e80bdc9SGiuseppe Cavallaro /* Ready to fill the first descriptor and set the OWN bit w/o any 44660e80bdc9SGiuseppe Cavallaro * problems because all the descriptors are actually ready to be 44670e80bdc9SGiuseppe Cavallaro * passed to the DMA engine. 44680e80bdc9SGiuseppe Cavallaro */ 44690e80bdc9SGiuseppe Cavallaro if (likely(!is_jumbo)) { 44700e80bdc9SGiuseppe Cavallaro bool last_segment = (nfrags == 0); 44710e80bdc9SGiuseppe Cavallaro 4472f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, 44730e80bdc9SGiuseppe Cavallaro nopaged_len, DMA_TO_DEVICE); 4474f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 44750e80bdc9SGiuseppe Cavallaro goto dma_map_err; 44760e80bdc9SGiuseppe Cavallaro 4477ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4478be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4479be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 44806844171dSJose Abreu 44816844171dSJose Abreu stmmac_set_desc_addr(priv, first, des); 4482f748be53SAlexandre TORGUE 4483ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4484ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 44850e80bdc9SGiuseppe Cavallaro 4486891434b1SRayagond Kokatanur if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4487891434b1SRayagond Kokatanur priv->hwts_tx_en)) { 4488891434b1SRayagond Kokatanur /* declare that device is doing timestamping */ 4489891434b1SRayagond Kokatanur skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 449042de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4491891434b1SRayagond Kokatanur } 4492891434b1SRayagond Kokatanur 44930e80bdc9SGiuseppe Cavallaro /* Prepare the first descriptor setting the OWN bit too */ 449442de047dSJose Abreu stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4495579a25a8SJose Abreu csum_insertion, priv->mode, 0, last_segment, 449642de047dSJose Abreu skb->len); 449780acbed9SAaro Koskinen } 44980e80bdc9SGiuseppe Cavallaro 4499579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_EN) { 4500579a25a8SJose Abreu struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4501579a25a8SJose Abreu 4502579a25a8SJose Abreu tbs_desc = &tx_q->dma_entx[first_entry]; 4503579a25a8SJose Abreu stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4504579a25a8SJose Abreu } 4505579a25a8SJose Abreu 4506579a25a8SJose Abreu stmmac_set_tx_owner(priv, first); 4507579a25a8SJose Abreu 4508c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4509f748be53SAlexandre TORGUE 4510a4e887faSJose Abreu stmmac_enable_dma_transmission(priv, priv->ioaddr); 45118fce3331SJose Abreu 4512d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 45134772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 45147ac6653aSJeff Kirsher 4515362b37beSGiuseppe CAVALLARO return NETDEV_TX_OK; 4516a9097a96SGiuseppe CAVALLARO 4517362b37beSGiuseppe CAVALLARO dma_map_err: 451838ddc59dSLABBE Corentin netdev_err(priv->dev, "Tx DMA map failed\n"); 4519362b37beSGiuseppe CAVALLARO dev_kfree_skb(skb); 4520362b37beSGiuseppe CAVALLARO priv->dev->stats.tx_dropped++; 45217ac6653aSJeff Kirsher return NETDEV_TX_OK; 45227ac6653aSJeff Kirsher } 45237ac6653aSJeff Kirsher 4524b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4525b9381985SVince Bridgers { 4526ab188e8fSElad Nachman struct vlan_ethhdr *veth; 4527ab188e8fSElad Nachman __be16 vlan_proto; 4528b9381985SVince Bridgers u16 vlanid; 4529b9381985SVince Bridgers 4530ab188e8fSElad Nachman veth = (struct vlan_ethhdr *)skb->data; 4531ab188e8fSElad Nachman vlan_proto = veth->h_vlan_proto; 4532ab188e8fSElad Nachman 4533ab188e8fSElad Nachman if ((vlan_proto == htons(ETH_P_8021Q) && 4534ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4535ab188e8fSElad Nachman (vlan_proto == htons(ETH_P_8021AD) && 4536ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4537b9381985SVince Bridgers /* pop the vlan tag */ 4538ab188e8fSElad Nachman vlanid = ntohs(veth->h_vlan_TCI); 4539ab188e8fSElad Nachman memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4540b9381985SVince Bridgers skb_pull(skb, VLAN_HLEN); 4541ab188e8fSElad Nachman __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4542b9381985SVince Bridgers } 4543b9381985SVince Bridgers } 4544b9381985SVince Bridgers 454532ceabcaSGiuseppe CAVALLARO /** 4546732fdf0eSGiuseppe CAVALLARO * stmmac_rx_refill - refill used skb preallocated buffers 454732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 454854139cf3SJoao Pinto * @queue: RX queue index 454932ceabcaSGiuseppe CAVALLARO * Description : this is to reallocate the skb for the reception process 455032ceabcaSGiuseppe CAVALLARO * that is based on zero-copy. 455132ceabcaSGiuseppe CAVALLARO */ 455254139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 45537ac6653aSJeff Kirsher { 45548531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 45555fabb012SOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 455654139cf3SJoao Pinto unsigned int entry = rx_q->dirty_rx; 4557884d2b84SDavid Wu gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4558884d2b84SDavid Wu 4559884d2b84SDavid Wu if (priv->dma_cap.addr64 <= 32) 4560884d2b84SDavid Wu gfp |= GFP_DMA32; 456154139cf3SJoao Pinto 4562e3ad57c9SGiuseppe Cavallaro while (dirty-- > 0) { 45632af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4564c24602efSGiuseppe CAVALLARO struct dma_desc *p; 4565d429b66eSJose Abreu bool use_rx_wd; 4566c24602efSGiuseppe CAVALLARO 4567c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 456854139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 4569c24602efSGiuseppe CAVALLARO else 457054139cf3SJoao Pinto p = rx_q->dma_rx + entry; 4571c24602efSGiuseppe CAVALLARO 45722af6106aSJose Abreu if (!buf->page) { 4573884d2b84SDavid Wu buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 45742af6106aSJose Abreu if (!buf->page) 45757ac6653aSJeff Kirsher break; 4576120e87f9SGiuseppe Cavallaro } 45777ac6653aSJeff Kirsher 457867afd6d1SJose Abreu if (priv->sph && !buf->sec_page) { 4579884d2b84SDavid Wu buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 458067afd6d1SJose Abreu if (!buf->sec_page) 458167afd6d1SJose Abreu break; 458267afd6d1SJose Abreu 458367afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 458467afd6d1SJose Abreu } 458567afd6d1SJose Abreu 45865fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 45873caa61c2SJose Abreu 45882af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 4589396e13e1SJoakim Zhang if (priv->sph) 4590396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4591396e13e1SJoakim Zhang else 4592396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 45932c520b1cSJose Abreu stmmac_refill_desc3(priv, rx_q, p); 4594286a8372SGiuseppe CAVALLARO 4595d429b66eSJose Abreu rx_q->rx_count_frames++; 4596db2f2842SOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4597db2f2842SOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 45986fa9d691SJose Abreu rx_q->rx_count_frames = 0; 459909146abeSJose Abreu 4600db2f2842SOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 460109146abeSJose Abreu use_rx_wd |= rx_q->rx_count_frames > 0; 460209146abeSJose Abreu if (!priv->use_riwt) 460309146abeSJose Abreu use_rx_wd = false; 4604d429b66eSJose Abreu 4605ad688cdbSPavel Machek dma_wmb(); 46062af6106aSJose Abreu stmmac_set_rx_owner(priv, p, use_rx_wd); 4607e3ad57c9SGiuseppe Cavallaro 46088531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 46097ac6653aSJeff Kirsher } 461054139cf3SJoao Pinto rx_q->dirty_rx = entry; 4611858a31ffSJose Abreu rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4612858a31ffSJose Abreu (rx_q->dirty_rx * sizeof(struct dma_desc)); 46134523a561SBiao Huang stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 46147ac6653aSJeff Kirsher } 46157ac6653aSJeff Kirsher 461688ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 461788ebe2cfSJose Abreu struct dma_desc *p, 461888ebe2cfSJose Abreu int status, unsigned int len) 461988ebe2cfSJose Abreu { 462088ebe2cfSJose Abreu unsigned int plen = 0, hlen = 0; 462131f2760eSLuo Jiaxing int coe = priv->hw->rx_csum; 462288ebe2cfSJose Abreu 462388ebe2cfSJose Abreu /* Not first descriptor, buffer is always zero */ 462488ebe2cfSJose Abreu if (priv->sph && len) 462588ebe2cfSJose Abreu return 0; 462688ebe2cfSJose Abreu 462788ebe2cfSJose Abreu /* First descriptor, get split header length */ 462831f2760eSLuo Jiaxing stmmac_get_rx_header_len(priv, p, &hlen); 462988ebe2cfSJose Abreu if (priv->sph && hlen) { 463088ebe2cfSJose Abreu priv->xstats.rx_split_hdr_pkt_n++; 463188ebe2cfSJose Abreu return hlen; 463288ebe2cfSJose Abreu } 463388ebe2cfSJose Abreu 463488ebe2cfSJose Abreu /* First descriptor, not last descriptor and not split header */ 463588ebe2cfSJose Abreu if (status & rx_not_ls) 46368531c808SChristian Marangi return priv->dma_conf.dma_buf_sz; 463788ebe2cfSJose Abreu 463888ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 463988ebe2cfSJose Abreu 464088ebe2cfSJose Abreu /* First descriptor and last descriptor and not split header */ 46418531c808SChristian Marangi return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 464288ebe2cfSJose Abreu } 464388ebe2cfSJose Abreu 464488ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 464588ebe2cfSJose Abreu struct dma_desc *p, 464688ebe2cfSJose Abreu int status, unsigned int len) 464788ebe2cfSJose Abreu { 464888ebe2cfSJose Abreu int coe = priv->hw->rx_csum; 464988ebe2cfSJose Abreu unsigned int plen = 0; 465088ebe2cfSJose Abreu 465188ebe2cfSJose Abreu /* Not split header, buffer is not available */ 465288ebe2cfSJose Abreu if (!priv->sph) 465388ebe2cfSJose Abreu return 0; 465488ebe2cfSJose Abreu 465588ebe2cfSJose Abreu /* Not last descriptor */ 465688ebe2cfSJose Abreu if (status & rx_not_ls) 46578531c808SChristian Marangi return priv->dma_conf.dma_buf_sz; 465888ebe2cfSJose Abreu 465988ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 466088ebe2cfSJose Abreu 466188ebe2cfSJose Abreu /* Last descriptor */ 466288ebe2cfSJose Abreu return plen - len; 466388ebe2cfSJose Abreu } 466488ebe2cfSJose Abreu 4665be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 46668b278a5bSOng Boon Leong struct xdp_frame *xdpf, bool dma_map) 4667be8b38a7SOng Boon Leong { 46688531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4669be8b38a7SOng Boon Leong unsigned int entry = tx_q->cur_tx; 4670be8b38a7SOng Boon Leong struct dma_desc *tx_desc; 4671be8b38a7SOng Boon Leong dma_addr_t dma_addr; 4672be8b38a7SOng Boon Leong bool set_ic; 4673be8b38a7SOng Boon Leong 4674be8b38a7SOng Boon Leong if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4675be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4676be8b38a7SOng Boon Leong 4677be8b38a7SOng Boon Leong if (likely(priv->extend_desc)) 4678be8b38a7SOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4679be8b38a7SOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4680be8b38a7SOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 4681be8b38a7SOng Boon Leong else 4682be8b38a7SOng Boon Leong tx_desc = tx_q->dma_tx + entry; 4683be8b38a7SOng Boon Leong 46848b278a5bSOng Boon Leong if (dma_map) { 46858b278a5bSOng Boon Leong dma_addr = dma_map_single(priv->device, xdpf->data, 46868b278a5bSOng Boon Leong xdpf->len, DMA_TO_DEVICE); 46878b278a5bSOng Boon Leong if (dma_mapping_error(priv->device, dma_addr)) 46888b278a5bSOng Boon Leong return STMMAC_XDP_CONSUMED; 46898b278a5bSOng Boon Leong 46908b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 46918b278a5bSOng Boon Leong } else { 46928b278a5bSOng Boon Leong struct page *page = virt_to_page(xdpf->data); 46938b278a5bSOng Boon Leong 4694be8b38a7SOng Boon Leong dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4695be8b38a7SOng Boon Leong xdpf->headroom; 4696be8b38a7SOng Boon Leong dma_sync_single_for_device(priv->device, dma_addr, 4697be8b38a7SOng Boon Leong xdpf->len, DMA_BIDIRECTIONAL); 4698be8b38a7SOng Boon Leong 4699be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 47008b278a5bSOng Boon Leong } 4701be8b38a7SOng Boon Leong 4702be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4703be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 4704be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4705be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 4706be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4707be8b38a7SOng Boon Leong 4708be8b38a7SOng Boon Leong tx_q->xdpf[entry] = xdpf; 4709be8b38a7SOng Boon Leong 4710be8b38a7SOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4711be8b38a7SOng Boon Leong 4712be8b38a7SOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4713be8b38a7SOng Boon Leong true, priv->mode, true, true, 4714be8b38a7SOng Boon Leong xdpf->len); 4715be8b38a7SOng Boon Leong 4716be8b38a7SOng Boon Leong tx_q->tx_count_frames++; 4717be8b38a7SOng Boon Leong 4718be8b38a7SOng Boon Leong if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4719be8b38a7SOng Boon Leong set_ic = true; 4720be8b38a7SOng Boon Leong else 4721be8b38a7SOng Boon Leong set_ic = false; 4722be8b38a7SOng Boon Leong 4723be8b38a7SOng Boon Leong if (set_ic) { 4724be8b38a7SOng Boon Leong tx_q->tx_count_frames = 0; 4725be8b38a7SOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 4726be8b38a7SOng Boon Leong priv->xstats.tx_set_ic_bit++; 4727be8b38a7SOng Boon Leong } 4728be8b38a7SOng Boon Leong 4729be8b38a7SOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 4730be8b38a7SOng Boon Leong 47318531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4732be8b38a7SOng Boon Leong tx_q->cur_tx = entry; 4733be8b38a7SOng Boon Leong 4734be8b38a7SOng Boon Leong return STMMAC_XDP_TX; 4735be8b38a7SOng Boon Leong } 4736be8b38a7SOng Boon Leong 4737be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4738be8b38a7SOng Boon Leong int cpu) 4739be8b38a7SOng Boon Leong { 4740be8b38a7SOng Boon Leong int index = cpu; 4741be8b38a7SOng Boon Leong 4742be8b38a7SOng Boon Leong if (unlikely(index < 0)) 4743be8b38a7SOng Boon Leong index = 0; 4744be8b38a7SOng Boon Leong 4745be8b38a7SOng Boon Leong while (index >= priv->plat->tx_queues_to_use) 4746be8b38a7SOng Boon Leong index -= priv->plat->tx_queues_to_use; 4747be8b38a7SOng Boon Leong 4748be8b38a7SOng Boon Leong return index; 4749be8b38a7SOng Boon Leong } 4750be8b38a7SOng Boon Leong 4751be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4752be8b38a7SOng Boon Leong struct xdp_buff *xdp) 4753be8b38a7SOng Boon Leong { 4754be8b38a7SOng Boon Leong struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4755be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4756be8b38a7SOng Boon Leong struct netdev_queue *nq; 4757be8b38a7SOng Boon Leong int queue; 4758be8b38a7SOng Boon Leong int res; 4759be8b38a7SOng Boon Leong 4760be8b38a7SOng Boon Leong if (unlikely(!xdpf)) 4761be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4762be8b38a7SOng Boon Leong 4763be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4764be8b38a7SOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 4765be8b38a7SOng Boon Leong 4766be8b38a7SOng Boon Leong __netif_tx_lock(nq, cpu); 4767be8b38a7SOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 4768e92af33eSAlexander Lobakin txq_trans_cond_update(nq); 4769be8b38a7SOng Boon Leong 47708b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4771be8b38a7SOng Boon Leong if (res == STMMAC_XDP_TX) 4772be8b38a7SOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 4773be8b38a7SOng Boon Leong 4774be8b38a7SOng Boon Leong __netif_tx_unlock(nq); 4775be8b38a7SOng Boon Leong 4776be8b38a7SOng Boon Leong return res; 4777be8b38a7SOng Boon Leong } 4778be8b38a7SOng Boon Leong 4779bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4780bba71cacSOng Boon Leong struct bpf_prog *prog, 47815fabb012SOng Boon Leong struct xdp_buff *xdp) 47825fabb012SOng Boon Leong { 47835fabb012SOng Boon Leong u32 act; 4784bba71cacSOng Boon Leong int res; 47855fabb012SOng Boon Leong 47865fabb012SOng Boon Leong act = bpf_prog_run_xdp(prog, xdp); 47875fabb012SOng Boon Leong switch (act) { 47885fabb012SOng Boon Leong case XDP_PASS: 47895fabb012SOng Boon Leong res = STMMAC_XDP_PASS; 47905fabb012SOng Boon Leong break; 4791be8b38a7SOng Boon Leong case XDP_TX: 4792be8b38a7SOng Boon Leong res = stmmac_xdp_xmit_back(priv, xdp); 4793be8b38a7SOng Boon Leong break; 47948b278a5bSOng Boon Leong case XDP_REDIRECT: 47958b278a5bSOng Boon Leong if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 47968b278a5bSOng Boon Leong res = STMMAC_XDP_CONSUMED; 47978b278a5bSOng Boon Leong else 47988b278a5bSOng Boon Leong res = STMMAC_XDP_REDIRECT; 47998b278a5bSOng Boon Leong break; 48005fabb012SOng Boon Leong default: 4801c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(priv->dev, prog, act); 48025fabb012SOng Boon Leong fallthrough; 48035fabb012SOng Boon Leong case XDP_ABORTED: 48045fabb012SOng Boon Leong trace_xdp_exception(priv->dev, prog, act); 48055fabb012SOng Boon Leong fallthrough; 48065fabb012SOng Boon Leong case XDP_DROP: 48075fabb012SOng Boon Leong res = STMMAC_XDP_CONSUMED; 48085fabb012SOng Boon Leong break; 48095fabb012SOng Boon Leong } 48105fabb012SOng Boon Leong 4811bba71cacSOng Boon Leong return res; 4812bba71cacSOng Boon Leong } 4813bba71cacSOng Boon Leong 4814bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4815bba71cacSOng Boon Leong struct xdp_buff *xdp) 4816bba71cacSOng Boon Leong { 4817bba71cacSOng Boon Leong struct bpf_prog *prog; 4818bba71cacSOng Boon Leong int res; 4819bba71cacSOng Boon Leong 4820bba71cacSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4821bba71cacSOng Boon Leong if (!prog) { 4822bba71cacSOng Boon Leong res = STMMAC_XDP_PASS; 48232f1e432dSToke Høiland-Jørgensen goto out; 4824bba71cacSOng Boon Leong } 4825bba71cacSOng Boon Leong 4826bba71cacSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, xdp); 48272f1e432dSToke Høiland-Jørgensen out: 48285fabb012SOng Boon Leong return ERR_PTR(-res); 48295fabb012SOng Boon Leong } 48305fabb012SOng Boon Leong 4831be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4832be8b38a7SOng Boon Leong int xdp_status) 4833be8b38a7SOng Boon Leong { 4834be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4835be8b38a7SOng Boon Leong int queue; 4836be8b38a7SOng Boon Leong 4837be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4838be8b38a7SOng Boon Leong 4839be8b38a7SOng Boon Leong if (xdp_status & STMMAC_XDP_TX) 4840be8b38a7SOng Boon Leong stmmac_tx_timer_arm(priv, queue); 48418b278a5bSOng Boon Leong 48428b278a5bSOng Boon Leong if (xdp_status & STMMAC_XDP_REDIRECT) 48438b278a5bSOng Boon Leong xdp_do_flush(); 4844be8b38a7SOng Boon Leong } 4845be8b38a7SOng Boon Leong 4846bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4847bba2556eSOng Boon Leong struct xdp_buff *xdp) 4848bba2556eSOng Boon Leong { 4849bba2556eSOng Boon Leong unsigned int metasize = xdp->data - xdp->data_meta; 4850bba2556eSOng Boon Leong unsigned int datasize = xdp->data_end - xdp->data; 4851bba2556eSOng Boon Leong struct sk_buff *skb; 4852bba2556eSOng Boon Leong 4853132c32eeSOng Boon Leong skb = __napi_alloc_skb(&ch->rxtx_napi, 4854bba2556eSOng Boon Leong xdp->data_end - xdp->data_hard_start, 4855bba2556eSOng Boon Leong GFP_ATOMIC | __GFP_NOWARN); 4856bba2556eSOng Boon Leong if (unlikely(!skb)) 4857bba2556eSOng Boon Leong return NULL; 4858bba2556eSOng Boon Leong 4859bba2556eSOng Boon Leong skb_reserve(skb, xdp->data - xdp->data_hard_start); 4860bba2556eSOng Boon Leong memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4861bba2556eSOng Boon Leong if (metasize) 4862bba2556eSOng Boon Leong skb_metadata_set(skb, metasize); 4863bba2556eSOng Boon Leong 4864bba2556eSOng Boon Leong return skb; 4865bba2556eSOng Boon Leong } 4866bba2556eSOng Boon Leong 4867bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4868bba2556eSOng Boon Leong struct dma_desc *p, struct dma_desc *np, 4869bba2556eSOng Boon Leong struct xdp_buff *xdp) 4870bba2556eSOng Boon Leong { 4871bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 4872bba2556eSOng Boon Leong unsigned int len = xdp->data_end - xdp->data; 4873bba2556eSOng Boon Leong enum pkt_hash_types hash_type; 4874bba2556eSOng Boon Leong int coe = priv->hw->rx_csum; 4875bba2556eSOng Boon Leong struct sk_buff *skb; 4876bba2556eSOng Boon Leong u32 hash; 4877bba2556eSOng Boon Leong 4878bba2556eSOng Boon Leong skb = stmmac_construct_skb_zc(ch, xdp); 4879bba2556eSOng Boon Leong if (!skb) { 4880bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4881bba2556eSOng Boon Leong return; 4882bba2556eSOng Boon Leong } 4883bba2556eSOng Boon Leong 4884bba2556eSOng Boon Leong stmmac_get_rx_hwtstamp(priv, p, np, skb); 4885bba2556eSOng Boon Leong stmmac_rx_vlan(priv->dev, skb); 4886bba2556eSOng Boon Leong skb->protocol = eth_type_trans(skb, priv->dev); 4887bba2556eSOng Boon Leong 4888bba2556eSOng Boon Leong if (unlikely(!coe)) 4889bba2556eSOng Boon Leong skb_checksum_none_assert(skb); 4890bba2556eSOng Boon Leong else 4891bba2556eSOng Boon Leong skb->ip_summed = CHECKSUM_UNNECESSARY; 4892bba2556eSOng Boon Leong 4893bba2556eSOng Boon Leong if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4894bba2556eSOng Boon Leong skb_set_hash(skb, hash, hash_type); 4895bba2556eSOng Boon Leong 4896bba2556eSOng Boon Leong skb_record_rx_queue(skb, queue); 4897132c32eeSOng Boon Leong napi_gro_receive(&ch->rxtx_napi, skb); 4898bba2556eSOng Boon Leong 4899bba2556eSOng Boon Leong priv->dev->stats.rx_packets++; 4900bba2556eSOng Boon Leong priv->dev->stats.rx_bytes += len; 4901bba2556eSOng Boon Leong } 4902bba2556eSOng Boon Leong 4903bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4904bba2556eSOng Boon Leong { 49058531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4906bba2556eSOng Boon Leong unsigned int entry = rx_q->dirty_rx; 4907bba2556eSOng Boon Leong struct dma_desc *rx_desc = NULL; 4908bba2556eSOng Boon Leong bool ret = true; 4909bba2556eSOng Boon Leong 4910bba2556eSOng Boon Leong budget = min(budget, stmmac_rx_dirty(priv, queue)); 4911bba2556eSOng Boon Leong 4912bba2556eSOng Boon Leong while (budget-- > 0 && entry != rx_q->cur_rx) { 4913bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4914bba2556eSOng Boon Leong dma_addr_t dma_addr; 4915bba2556eSOng Boon Leong bool use_rx_wd; 4916bba2556eSOng Boon Leong 4917bba2556eSOng Boon Leong if (!buf->xdp) { 4918bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 4919bba2556eSOng Boon Leong if (!buf->xdp) { 4920bba2556eSOng Boon Leong ret = false; 4921bba2556eSOng Boon Leong break; 4922bba2556eSOng Boon Leong } 4923bba2556eSOng Boon Leong } 4924bba2556eSOng Boon Leong 4925bba2556eSOng Boon Leong if (priv->extend_desc) 4926bba2556eSOng Boon Leong rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 4927bba2556eSOng Boon Leong else 4928bba2556eSOng Boon Leong rx_desc = rx_q->dma_rx + entry; 4929bba2556eSOng Boon Leong 4930bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 4931bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, rx_desc, dma_addr); 4932bba2556eSOng Boon Leong stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 4933bba2556eSOng Boon Leong stmmac_refill_desc3(priv, rx_q, rx_desc); 4934bba2556eSOng Boon Leong 4935bba2556eSOng Boon Leong rx_q->rx_count_frames++; 4936bba2556eSOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4937bba2556eSOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4938bba2556eSOng Boon Leong rx_q->rx_count_frames = 0; 4939bba2556eSOng Boon Leong 4940bba2556eSOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 4941bba2556eSOng Boon Leong use_rx_wd |= rx_q->rx_count_frames > 0; 4942bba2556eSOng Boon Leong if (!priv->use_riwt) 4943bba2556eSOng Boon Leong use_rx_wd = false; 4944bba2556eSOng Boon Leong 4945bba2556eSOng Boon Leong dma_wmb(); 4946bba2556eSOng Boon Leong stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 4947bba2556eSOng Boon Leong 49488531c808SChristian Marangi entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4949bba2556eSOng Boon Leong } 4950bba2556eSOng Boon Leong 4951bba2556eSOng Boon Leong if (rx_desc) { 4952bba2556eSOng Boon Leong rx_q->dirty_rx = entry; 4953bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4954bba2556eSOng Boon Leong (rx_q->dirty_rx * sizeof(struct dma_desc)); 4955bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4956bba2556eSOng Boon Leong } 4957bba2556eSOng Boon Leong 4958bba2556eSOng Boon Leong return ret; 4959bba2556eSOng Boon Leong } 4960bba2556eSOng Boon Leong 4961bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 4962bba2556eSOng Boon Leong { 49638531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4964bba2556eSOng Boon Leong unsigned int count = 0, error = 0, len = 0; 4965bba2556eSOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 4966bba2556eSOng Boon Leong unsigned int next_entry = rx_q->cur_rx; 4967bba2556eSOng Boon Leong unsigned int desc_size; 4968bba2556eSOng Boon Leong struct bpf_prog *prog; 4969bba2556eSOng Boon Leong bool failure = false; 4970bba2556eSOng Boon Leong int xdp_status = 0; 4971bba2556eSOng Boon Leong int status = 0; 4972bba2556eSOng Boon Leong 4973bba2556eSOng Boon Leong if (netif_msg_rx_status(priv)) { 4974bba2556eSOng Boon Leong void *rx_head; 4975bba2556eSOng Boon Leong 4976bba2556eSOng Boon Leong netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4977bba2556eSOng Boon Leong if (priv->extend_desc) { 4978bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_erx; 4979bba2556eSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 4980bba2556eSOng Boon Leong } else { 4981bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_rx; 4982bba2556eSOng Boon Leong desc_size = sizeof(struct dma_desc); 4983bba2556eSOng Boon Leong } 4984bba2556eSOng Boon Leong 49858531c808SChristian Marangi stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 4986bba2556eSOng Boon Leong rx_q->dma_rx_phy, desc_size); 4987bba2556eSOng Boon Leong } 4988bba2556eSOng Boon Leong while (count < limit) { 4989bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 4990bba2556eSOng Boon Leong unsigned int buf1_len = 0; 4991bba2556eSOng Boon Leong struct dma_desc *np, *p; 4992bba2556eSOng Boon Leong int entry; 4993bba2556eSOng Boon Leong int res; 4994bba2556eSOng Boon Leong 4995bba2556eSOng Boon Leong if (!count && rx_q->state_saved) { 4996bba2556eSOng Boon Leong error = rx_q->state.error; 4997bba2556eSOng Boon Leong len = rx_q->state.len; 4998bba2556eSOng Boon Leong } else { 4999bba2556eSOng Boon Leong rx_q->state_saved = false; 5000bba2556eSOng Boon Leong error = 0; 5001bba2556eSOng Boon Leong len = 0; 5002bba2556eSOng Boon Leong } 5003bba2556eSOng Boon Leong 5004bba2556eSOng Boon Leong if (count >= limit) 5005bba2556eSOng Boon Leong break; 5006bba2556eSOng Boon Leong 5007bba2556eSOng Boon Leong read_again: 5008bba2556eSOng Boon Leong buf1_len = 0; 5009bba2556eSOng Boon Leong entry = next_entry; 5010bba2556eSOng Boon Leong buf = &rx_q->buf_pool[entry]; 5011bba2556eSOng Boon Leong 5012bba2556eSOng Boon Leong if (dirty >= STMMAC_RX_FILL_BATCH) { 5013bba2556eSOng Boon Leong failure = failure || 5014bba2556eSOng Boon Leong !stmmac_rx_refill_zc(priv, queue, dirty); 5015bba2556eSOng Boon Leong dirty = 0; 5016bba2556eSOng Boon Leong } 5017bba2556eSOng Boon Leong 5018bba2556eSOng Boon Leong if (priv->extend_desc) 5019bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + entry); 5020bba2556eSOng Boon Leong else 5021bba2556eSOng Boon Leong p = rx_q->dma_rx + entry; 5022bba2556eSOng Boon Leong 5023bba2556eSOng Boon Leong /* read the status of the incoming frame */ 5024bba2556eSOng Boon Leong status = stmmac_rx_status(priv, &priv->dev->stats, 5025bba2556eSOng Boon Leong &priv->xstats, p); 5026bba2556eSOng Boon Leong /* check if managed by the DMA otherwise go ahead */ 5027bba2556eSOng Boon Leong if (unlikely(status & dma_own)) 5028bba2556eSOng Boon Leong break; 5029bba2556eSOng Boon Leong 5030bba2556eSOng Boon Leong /* Prefetch the next RX descriptor */ 5031bba2556eSOng Boon Leong rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 50328531c808SChristian Marangi priv->dma_conf.dma_rx_size); 5033bba2556eSOng Boon Leong next_entry = rx_q->cur_rx; 5034bba2556eSOng Boon Leong 5035bba2556eSOng Boon Leong if (priv->extend_desc) 5036bba2556eSOng Boon Leong np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5037bba2556eSOng Boon Leong else 5038bba2556eSOng Boon Leong np = rx_q->dma_rx + next_entry; 5039bba2556eSOng Boon Leong 5040bba2556eSOng Boon Leong prefetch(np); 5041bba2556eSOng Boon Leong 50422b9fff64SSong Yoong Siang /* Ensure a valid XSK buffer before proceed */ 50432b9fff64SSong Yoong Siang if (!buf->xdp) 50442b9fff64SSong Yoong Siang break; 50452b9fff64SSong Yoong Siang 5046bba2556eSOng Boon Leong if (priv->extend_desc) 5047bba2556eSOng Boon Leong stmmac_rx_extended_status(priv, &priv->dev->stats, 5048bba2556eSOng Boon Leong &priv->xstats, 5049bba2556eSOng Boon Leong rx_q->dma_erx + entry); 5050bba2556eSOng Boon Leong if (unlikely(status == discard_frame)) { 5051bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5052bba2556eSOng Boon Leong buf->xdp = NULL; 5053bba2556eSOng Boon Leong dirty++; 5054bba2556eSOng Boon Leong error = 1; 5055bba2556eSOng Boon Leong if (!priv->hwts_rx_en) 5056bba2556eSOng Boon Leong priv->dev->stats.rx_errors++; 5057bba2556eSOng Boon Leong } 5058bba2556eSOng Boon Leong 5059bba2556eSOng Boon Leong if (unlikely(error && (status & rx_not_ls))) 5060bba2556eSOng Boon Leong goto read_again; 5061bba2556eSOng Boon Leong if (unlikely(error)) { 5062bba2556eSOng Boon Leong count++; 5063bba2556eSOng Boon Leong continue; 5064bba2556eSOng Boon Leong } 5065bba2556eSOng Boon Leong 5066bba2556eSOng Boon Leong /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5067bba2556eSOng Boon Leong if (likely(status & rx_not_ls)) { 5068bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5069bba2556eSOng Boon Leong buf->xdp = NULL; 5070bba2556eSOng Boon Leong dirty++; 5071bba2556eSOng Boon Leong count++; 5072bba2556eSOng Boon Leong goto read_again; 5073bba2556eSOng Boon Leong } 5074bba2556eSOng Boon Leong 5075bba2556eSOng Boon Leong /* XDP ZC Frame only support primary buffers for now */ 5076bba2556eSOng Boon Leong buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5077bba2556eSOng Boon Leong len += buf1_len; 5078bba2556eSOng Boon Leong 5079929d4342SKurt Kanzenbach /* ACS is disabled; strip manually. */ 5080929d4342SKurt Kanzenbach if (likely(!(status & rx_not_ls))) { 5081bba2556eSOng Boon Leong buf1_len -= ETH_FCS_LEN; 5082bba2556eSOng Boon Leong len -= ETH_FCS_LEN; 5083bba2556eSOng Boon Leong } 5084bba2556eSOng Boon Leong 5085bba2556eSOng Boon Leong /* RX buffer is good and fit into a XSK pool buffer */ 5086bba2556eSOng Boon Leong buf->xdp->data_end = buf->xdp->data + buf1_len; 5087bba2556eSOng Boon Leong xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5088bba2556eSOng Boon Leong 5089bba2556eSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 5090bba2556eSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5091bba2556eSOng Boon Leong 5092bba2556eSOng Boon Leong switch (res) { 5093bba2556eSOng Boon Leong case STMMAC_XDP_PASS: 5094bba2556eSOng Boon Leong stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5095bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5096bba2556eSOng Boon Leong break; 5097bba2556eSOng Boon Leong case STMMAC_XDP_CONSUMED: 5098bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 5099bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 5100bba2556eSOng Boon Leong break; 5101bba2556eSOng Boon Leong case STMMAC_XDP_TX: 5102bba2556eSOng Boon Leong case STMMAC_XDP_REDIRECT: 5103bba2556eSOng Boon Leong xdp_status |= res; 5104bba2556eSOng Boon Leong break; 5105bba2556eSOng Boon Leong } 5106bba2556eSOng Boon Leong 5107bba2556eSOng Boon Leong buf->xdp = NULL; 5108bba2556eSOng Boon Leong dirty++; 5109bba2556eSOng Boon Leong count++; 5110bba2556eSOng Boon Leong } 5111bba2556eSOng Boon Leong 5112bba2556eSOng Boon Leong if (status & rx_not_ls) { 5113bba2556eSOng Boon Leong rx_q->state_saved = true; 5114bba2556eSOng Boon Leong rx_q->state.error = error; 5115bba2556eSOng Boon Leong rx_q->state.len = len; 5116bba2556eSOng Boon Leong } 5117bba2556eSOng Boon Leong 5118bba2556eSOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5119bba2556eSOng Boon Leong 512068e9c5deSVijayakannan Ayyathurai priv->xstats.rx_pkt_n += count; 512168e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 512268e9c5deSVijayakannan Ayyathurai 5123bba2556eSOng Boon Leong if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5124bba2556eSOng Boon Leong if (failure || stmmac_rx_dirty(priv, queue) > 0) 5125bba2556eSOng Boon Leong xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5126bba2556eSOng Boon Leong else 5127bba2556eSOng Boon Leong xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5128bba2556eSOng Boon Leong 5129bba2556eSOng Boon Leong return (int)count; 5130bba2556eSOng Boon Leong } 5131bba2556eSOng Boon Leong 5132bba2556eSOng Boon Leong return failure ? limit : (int)count; 5133bba2556eSOng Boon Leong } 5134bba2556eSOng Boon Leong 513532ceabcaSGiuseppe CAVALLARO /** 5136732fdf0eSGiuseppe CAVALLARO * stmmac_rx - manage the receive process 513732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 513854139cf3SJoao Pinto * @limit: napi bugget 513954139cf3SJoao Pinto * @queue: RX queue index. 514032ceabcaSGiuseppe CAVALLARO * Description : this the function called by the napi poll method. 514132ceabcaSGiuseppe CAVALLARO * It gets all the frames inside the ring. 514232ceabcaSGiuseppe CAVALLARO */ 514354139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 51447ac6653aSJeff Kirsher { 51458531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 51468fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 5147ec222003SJose Abreu unsigned int count = 0, error = 0, len = 0; 5148ec222003SJose Abreu int status = 0, coe = priv->hw->rx_csum; 514907b39753SAaro Koskinen unsigned int next_entry = rx_q->cur_rx; 51505fabb012SOng Boon Leong enum dma_data_direction dma_dir; 5151bfaf91caSJoakim Zhang unsigned int desc_size; 5152ec222003SJose Abreu struct sk_buff *skb = NULL; 51535fabb012SOng Boon Leong struct xdp_buff xdp; 5154be8b38a7SOng Boon Leong int xdp_status = 0; 51555fabb012SOng Boon Leong int buf_sz; 51565fabb012SOng Boon Leong 51575fabb012SOng Boon Leong dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 51588531c808SChristian Marangi buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 51597ac6653aSJeff Kirsher 516083d7af64SGiuseppe CAVALLARO if (netif_msg_rx_status(priv)) { 5161d0225e7dSAlexandre TORGUE void *rx_head; 5162d0225e7dSAlexandre TORGUE 516338ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5164bfaf91caSJoakim Zhang if (priv->extend_desc) { 516554139cf3SJoao Pinto rx_head = (void *)rx_q->dma_erx; 5166bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 5167bfaf91caSJoakim Zhang } else { 516854139cf3SJoao Pinto rx_head = (void *)rx_q->dma_rx; 5169bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 5170bfaf91caSJoakim Zhang } 5171d0225e7dSAlexandre TORGUE 51728531c808SChristian Marangi stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5173bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 51747ac6653aSJeff Kirsher } 5175c24602efSGiuseppe CAVALLARO while (count < limit) { 517688ebe2cfSJose Abreu unsigned int buf1_len = 0, buf2_len = 0; 5177ec222003SJose Abreu enum pkt_hash_types hash_type; 51782af6106aSJose Abreu struct stmmac_rx_buffer *buf; 51792af6106aSJose Abreu struct dma_desc *np, *p; 5180ec222003SJose Abreu int entry; 5181ec222003SJose Abreu u32 hash; 51827ac6653aSJeff Kirsher 5183ec222003SJose Abreu if (!count && rx_q->state_saved) { 5184ec222003SJose Abreu skb = rx_q->state.skb; 5185ec222003SJose Abreu error = rx_q->state.error; 5186ec222003SJose Abreu len = rx_q->state.len; 5187ec222003SJose Abreu } else { 5188ec222003SJose Abreu rx_q->state_saved = false; 5189ec222003SJose Abreu skb = NULL; 5190ec222003SJose Abreu error = 0; 5191ec222003SJose Abreu len = 0; 5192ec222003SJose Abreu } 5193ec222003SJose Abreu 5194ec222003SJose Abreu if (count >= limit) 5195ec222003SJose Abreu break; 5196ec222003SJose Abreu 5197ec222003SJose Abreu read_again: 519888ebe2cfSJose Abreu buf1_len = 0; 519988ebe2cfSJose Abreu buf2_len = 0; 520007b39753SAaro Koskinen entry = next_entry; 52012af6106aSJose Abreu buf = &rx_q->buf_pool[entry]; 520207b39753SAaro Koskinen 5203c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 520454139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 5205c24602efSGiuseppe CAVALLARO else 520654139cf3SJoao Pinto p = rx_q->dma_rx + entry; 5207c24602efSGiuseppe CAVALLARO 5208c1fa3212SFabrice Gasnier /* read the status of the incoming frame */ 520942de047dSJose Abreu status = stmmac_rx_status(priv, &priv->dev->stats, 5210c1fa3212SFabrice Gasnier &priv->xstats, p); 5211c1fa3212SFabrice Gasnier /* check if managed by the DMA otherwise go ahead */ 5212c1fa3212SFabrice Gasnier if (unlikely(status & dma_own)) 52137ac6653aSJeff Kirsher break; 52147ac6653aSJeff Kirsher 5215aa042f60SSong, Yoong Siang rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 52168531c808SChristian Marangi priv->dma_conf.dma_rx_size); 521754139cf3SJoao Pinto next_entry = rx_q->cur_rx; 5218e3ad57c9SGiuseppe Cavallaro 5219c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 522054139cf3SJoao Pinto np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5221c24602efSGiuseppe CAVALLARO else 522254139cf3SJoao Pinto np = rx_q->dma_rx + next_entry; 5223ba1ffd74SGiuseppe CAVALLARO 5224ba1ffd74SGiuseppe CAVALLARO prefetch(np); 52257ac6653aSJeff Kirsher 522642de047dSJose Abreu if (priv->extend_desc) 522742de047dSJose Abreu stmmac_rx_extended_status(priv, &priv->dev->stats, 522842de047dSJose Abreu &priv->xstats, rx_q->dma_erx + entry); 5229891434b1SRayagond Kokatanur if (unlikely(status == discard_frame)) { 52302af6106aSJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 52312af6106aSJose Abreu buf->page = NULL; 5232ec222003SJose Abreu error = 1; 52330b273ca4SJose Abreu if (!priv->hwts_rx_en) 52340b273ca4SJose Abreu priv->dev->stats.rx_errors++; 5235ec222003SJose Abreu } 5236f748be53SAlexandre TORGUE 5237ec222003SJose Abreu if (unlikely(error && (status & rx_not_ls))) 5238ec222003SJose Abreu goto read_again; 5239ec222003SJose Abreu if (unlikely(error)) { 5240ec222003SJose Abreu dev_kfree_skb(skb); 524188ebe2cfSJose Abreu skb = NULL; 5242cda4985aSJose Abreu count++; 524307b39753SAaro Koskinen continue; 5244e527c4a7SGiuseppe CAVALLARO } 5245e527c4a7SGiuseppe CAVALLARO 5246ec222003SJose Abreu /* Buffer is good. Go on. */ 5247ec222003SJose Abreu 52484744bf07SMatteo Croce prefetch(page_address(buf->page) + buf->page_offset); 524988ebe2cfSJose Abreu if (buf->sec_page) 525088ebe2cfSJose Abreu prefetch(page_address(buf->sec_page)); 525188ebe2cfSJose Abreu 525288ebe2cfSJose Abreu buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 525388ebe2cfSJose Abreu len += buf1_len; 525488ebe2cfSJose Abreu buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 525588ebe2cfSJose Abreu len += buf2_len; 5256ec222003SJose Abreu 5257929d4342SKurt Kanzenbach /* ACS is disabled; strip manually. */ 5258929d4342SKurt Kanzenbach if (likely(!(status & rx_not_ls))) { 52590f296e78SZekun Shen if (buf2_len) { 526088ebe2cfSJose Abreu buf2_len -= ETH_FCS_LEN; 5261ec222003SJose Abreu len -= ETH_FCS_LEN; 52620f296e78SZekun Shen } else if (buf1_len) { 52630f296e78SZekun Shen buf1_len -= ETH_FCS_LEN; 52640f296e78SZekun Shen len -= ETH_FCS_LEN; 52650f296e78SZekun Shen } 526683d7af64SGiuseppe CAVALLARO } 526722ad3838SGiuseppe Cavallaro 5268ec222003SJose Abreu if (!skb) { 5269be8b38a7SOng Boon Leong unsigned int pre_len, sync_len; 5270be8b38a7SOng Boon Leong 52715fabb012SOng Boon Leong dma_sync_single_for_cpu(priv->device, buf->addr, 52725fabb012SOng Boon Leong buf1_len, dma_dir); 52735fabb012SOng Boon Leong 5274d172268fSMatteo Croce xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); 5275d172268fSMatteo Croce xdp_prepare_buff(&xdp, page_address(buf->page), 5276d172268fSMatteo Croce buf->page_offset, buf1_len, false); 52775fabb012SOng Boon Leong 5278be8b38a7SOng Boon Leong pre_len = xdp.data_end - xdp.data_hard_start - 5279be8b38a7SOng Boon Leong buf->page_offset; 52805fabb012SOng Boon Leong skb = stmmac_xdp_run_prog(priv, &xdp); 5281be8b38a7SOng Boon Leong /* Due xdp_adjust_tail: DMA sync for_device 5282be8b38a7SOng Boon Leong * cover max len CPU touch 5283be8b38a7SOng Boon Leong */ 5284be8b38a7SOng Boon Leong sync_len = xdp.data_end - xdp.data_hard_start - 5285be8b38a7SOng Boon Leong buf->page_offset; 5286be8b38a7SOng Boon Leong sync_len = max(sync_len, pre_len); 52875fabb012SOng Boon Leong 52885fabb012SOng Boon Leong /* For Not XDP_PASS verdict */ 52895fabb012SOng Boon Leong if (IS_ERR(skb)) { 52905fabb012SOng Boon Leong unsigned int xdp_res = -PTR_ERR(skb); 52915fabb012SOng Boon Leong 52925fabb012SOng Boon Leong if (xdp_res & STMMAC_XDP_CONSUMED) { 5293be8b38a7SOng Boon Leong page_pool_put_page(rx_q->page_pool, 5294be8b38a7SOng Boon Leong virt_to_head_page(xdp.data), 5295be8b38a7SOng Boon Leong sync_len, true); 52965fabb012SOng Boon Leong buf->page = NULL; 52975fabb012SOng Boon Leong priv->dev->stats.rx_dropped++; 52985fabb012SOng Boon Leong 52995fabb012SOng Boon Leong /* Clear skb as it was set as 53005fabb012SOng Boon Leong * status by XDP program. 53015fabb012SOng Boon Leong */ 53025fabb012SOng Boon Leong skb = NULL; 53035fabb012SOng Boon Leong 53045fabb012SOng Boon Leong if (unlikely((status & rx_not_ls))) 53055fabb012SOng Boon Leong goto read_again; 53065fabb012SOng Boon Leong 53075fabb012SOng Boon Leong count++; 53085fabb012SOng Boon Leong continue; 53098b278a5bSOng Boon Leong } else if (xdp_res & (STMMAC_XDP_TX | 53108b278a5bSOng Boon Leong STMMAC_XDP_REDIRECT)) { 5311be8b38a7SOng Boon Leong xdp_status |= xdp_res; 5312be8b38a7SOng Boon Leong buf->page = NULL; 5313be8b38a7SOng Boon Leong skb = NULL; 5314be8b38a7SOng Boon Leong count++; 5315be8b38a7SOng Boon Leong continue; 53165fabb012SOng Boon Leong } 53175fabb012SOng Boon Leong } 53185fabb012SOng Boon Leong } 53195fabb012SOng Boon Leong 53205fabb012SOng Boon Leong if (!skb) { 53215fabb012SOng Boon Leong /* XDP program may expand or reduce tail */ 53225fabb012SOng Boon Leong buf1_len = xdp.data_end - xdp.data; 53235fabb012SOng Boon Leong 532488ebe2cfSJose Abreu skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5325ec222003SJose Abreu if (!skb) { 532622ad3838SGiuseppe Cavallaro priv->dev->stats.rx_dropped++; 5327cda4985aSJose Abreu count++; 532888ebe2cfSJose Abreu goto drain_data; 532922ad3838SGiuseppe Cavallaro } 533022ad3838SGiuseppe Cavallaro 53315fabb012SOng Boon Leong /* XDP program may adjust header */ 53325fabb012SOng Boon Leong skb_copy_to_linear_data(skb, xdp.data, buf1_len); 533388ebe2cfSJose Abreu skb_put(skb, buf1_len); 533422ad3838SGiuseppe Cavallaro 5335ec222003SJose Abreu /* Data payload copied into SKB, page ready for recycle */ 5336ec222003SJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 5337ec222003SJose Abreu buf->page = NULL; 533888ebe2cfSJose Abreu } else if (buf1_len) { 5339ec222003SJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 53405fabb012SOng Boon Leong buf1_len, dma_dir); 5341ec222003SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 53425fabb012SOng Boon Leong buf->page, buf->page_offset, buf1_len, 53438531c808SChristian Marangi priv->dma_conf.dma_buf_sz); 5344ec222003SJose Abreu 5345ec222003SJose Abreu /* Data payload appended into SKB */ 5346ec222003SJose Abreu page_pool_release_page(rx_q->page_pool, buf->page); 5347ec222003SJose Abreu buf->page = NULL; 53487ac6653aSJeff Kirsher } 534983d7af64SGiuseppe CAVALLARO 535088ebe2cfSJose Abreu if (buf2_len) { 535167afd6d1SJose Abreu dma_sync_single_for_cpu(priv->device, buf->sec_addr, 53525fabb012SOng Boon Leong buf2_len, dma_dir); 535367afd6d1SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 535488ebe2cfSJose Abreu buf->sec_page, 0, buf2_len, 53558531c808SChristian Marangi priv->dma_conf.dma_buf_sz); 535667afd6d1SJose Abreu 535767afd6d1SJose Abreu /* Data payload appended into SKB */ 535867afd6d1SJose Abreu page_pool_release_page(rx_q->page_pool, buf->sec_page); 535967afd6d1SJose Abreu buf->sec_page = NULL; 536067afd6d1SJose Abreu } 536167afd6d1SJose Abreu 536288ebe2cfSJose Abreu drain_data: 5363ec222003SJose Abreu if (likely(status & rx_not_ls)) 5364ec222003SJose Abreu goto read_again; 536588ebe2cfSJose Abreu if (!skb) 536688ebe2cfSJose Abreu continue; 5367ec222003SJose Abreu 5368ec222003SJose Abreu /* Got entire packet into SKB. Finish it. */ 5369ec222003SJose Abreu 5370ba1ffd74SGiuseppe CAVALLARO stmmac_get_rx_hwtstamp(priv, p, np, skb); 5371b9381985SVince Bridgers stmmac_rx_vlan(priv->dev, skb); 53727ac6653aSJeff Kirsher skb->protocol = eth_type_trans(skb, priv->dev); 53737ac6653aSJeff Kirsher 5374ceb69499SGiuseppe CAVALLARO if (unlikely(!coe)) 53757ac6653aSJeff Kirsher skb_checksum_none_assert(skb); 537662a2ab93SGiuseppe CAVALLARO else 53777ac6653aSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 537862a2ab93SGiuseppe CAVALLARO 537976067459SJose Abreu if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 538076067459SJose Abreu skb_set_hash(skb, hash, hash_type); 538176067459SJose Abreu 538276067459SJose Abreu skb_record_rx_queue(skb, queue); 53834ccb4585SJose Abreu napi_gro_receive(&ch->rx_napi, skb); 538488ebe2cfSJose Abreu skb = NULL; 53857ac6653aSJeff Kirsher 53867ac6653aSJeff Kirsher priv->dev->stats.rx_packets++; 5387ec222003SJose Abreu priv->dev->stats.rx_bytes += len; 5388cda4985aSJose Abreu count++; 53897ac6653aSJeff Kirsher } 5390ec222003SJose Abreu 539188ebe2cfSJose Abreu if (status & rx_not_ls || skb) { 5392ec222003SJose Abreu rx_q->state_saved = true; 5393ec222003SJose Abreu rx_q->state.skb = skb; 5394ec222003SJose Abreu rx_q->state.error = error; 5395ec222003SJose Abreu rx_q->state.len = len; 53967ac6653aSJeff Kirsher } 53977ac6653aSJeff Kirsher 5398be8b38a7SOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5399be8b38a7SOng Boon Leong 540054139cf3SJoao Pinto stmmac_rx_refill(priv, queue); 54017ac6653aSJeff Kirsher 54027ac6653aSJeff Kirsher priv->xstats.rx_pkt_n += count; 540368e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 54047ac6653aSJeff Kirsher 54057ac6653aSJeff Kirsher return count; 54067ac6653aSJeff Kirsher } 54077ac6653aSJeff Kirsher 54084ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 54097ac6653aSJeff Kirsher { 54108fce3331SJose Abreu struct stmmac_channel *ch = 54114ccb4585SJose Abreu container_of(napi, struct stmmac_channel, rx_napi); 54128fce3331SJose Abreu struct stmmac_priv *priv = ch->priv_data; 54138fce3331SJose Abreu u32 chan = ch->index; 54144ccb4585SJose Abreu int work_done; 54157ac6653aSJeff Kirsher 54169125cdd1SGiuseppe CAVALLARO priv->xstats.napi_poll++; 5417ce736788SJoao Pinto 5418132c32eeSOng Boon Leong work_done = stmmac_rx(priv, budget, chan); 5419021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5420021bd5e3SJose Abreu unsigned long flags; 5421021bd5e3SJose Abreu 5422021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5423021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5424021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5425021bd5e3SJose Abreu } 5426021bd5e3SJose Abreu 54274ccb4585SJose Abreu return work_done; 54284ccb4585SJose Abreu } 5429ce736788SJoao Pinto 54304ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 54314ccb4585SJose Abreu { 54324ccb4585SJose Abreu struct stmmac_channel *ch = 54334ccb4585SJose Abreu container_of(napi, struct stmmac_channel, tx_napi); 54344ccb4585SJose Abreu struct stmmac_priv *priv = ch->priv_data; 54354ccb4585SJose Abreu u32 chan = ch->index; 54364ccb4585SJose Abreu int work_done; 54374ccb4585SJose Abreu 54384ccb4585SJose Abreu priv->xstats.napi_poll++; 54394ccb4585SJose Abreu 5440132c32eeSOng Boon Leong work_done = stmmac_tx_clean(priv, budget, chan); 5441fa0be0a4SJose Abreu work_done = min(work_done, budget); 54428fce3331SJose Abreu 5443021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5444021bd5e3SJose Abreu unsigned long flags; 54454ccb4585SJose Abreu 5446021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5447021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5448021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5449fa0be0a4SJose Abreu } 54508fce3331SJose Abreu 54517ac6653aSJeff Kirsher return work_done; 54527ac6653aSJeff Kirsher } 54537ac6653aSJeff Kirsher 5454132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5455132c32eeSOng Boon Leong { 5456132c32eeSOng Boon Leong struct stmmac_channel *ch = 5457132c32eeSOng Boon Leong container_of(napi, struct stmmac_channel, rxtx_napi); 5458132c32eeSOng Boon Leong struct stmmac_priv *priv = ch->priv_data; 545981d0885dSSong Yoong Siang int rx_done, tx_done, rxtx_done; 5460132c32eeSOng Boon Leong u32 chan = ch->index; 5461132c32eeSOng Boon Leong 5462132c32eeSOng Boon Leong priv->xstats.napi_poll++; 5463132c32eeSOng Boon Leong 5464132c32eeSOng Boon Leong tx_done = stmmac_tx_clean(priv, budget, chan); 5465132c32eeSOng Boon Leong tx_done = min(tx_done, budget); 5466132c32eeSOng Boon Leong 5467132c32eeSOng Boon Leong rx_done = stmmac_rx_zc(priv, budget, chan); 5468132c32eeSOng Boon Leong 546981d0885dSSong Yoong Siang rxtx_done = max(tx_done, rx_done); 547081d0885dSSong Yoong Siang 5471132c32eeSOng Boon Leong /* If either TX or RX work is not complete, return budget 5472132c32eeSOng Boon Leong * and keep pooling 5473132c32eeSOng Boon Leong */ 547481d0885dSSong Yoong Siang if (rxtx_done >= budget) 5475132c32eeSOng Boon Leong return budget; 5476132c32eeSOng Boon Leong 5477132c32eeSOng Boon Leong /* all work done, exit the polling mode */ 547881d0885dSSong Yoong Siang if (napi_complete_done(napi, rxtx_done)) { 5479132c32eeSOng Boon Leong unsigned long flags; 5480132c32eeSOng Boon Leong 5481132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 5482132c32eeSOng Boon Leong /* Both RX and TX work done are compelte, 5483132c32eeSOng Boon Leong * so enable both RX & TX IRQs. 5484132c32eeSOng Boon Leong */ 5485132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5486132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 5487132c32eeSOng Boon Leong } 5488132c32eeSOng Boon Leong 548981d0885dSSong Yoong Siang return min(rxtx_done, budget - 1); 5490132c32eeSOng Boon Leong } 5491132c32eeSOng Boon Leong 54927ac6653aSJeff Kirsher /** 54937ac6653aSJeff Kirsher * stmmac_tx_timeout 54947ac6653aSJeff Kirsher * @dev : Pointer to net device structure 5495d0ea5cbdSJesse Brandeburg * @txqueue: the index of the hanging transmit queue 54967ac6653aSJeff Kirsher * Description: this function is called when a packet transmission fails to 54977284a3f1SGiuseppe CAVALLARO * complete within a reasonable time. The driver will mark the error in the 54987ac6653aSJeff Kirsher * netdev structure and arrange for the device to be reset to a sane state 54997ac6653aSJeff Kirsher * in order to transmit a new packet. 55007ac6653aSJeff Kirsher */ 55010290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 55027ac6653aSJeff Kirsher { 55037ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 55047ac6653aSJeff Kirsher 550534877a15SJose Abreu stmmac_global_err(priv); 55067ac6653aSJeff Kirsher } 55077ac6653aSJeff Kirsher 55087ac6653aSJeff Kirsher /** 550901789349SJiri Pirko * stmmac_set_rx_mode - entry point for multicast addressing 55107ac6653aSJeff Kirsher * @dev : pointer to the device structure 55117ac6653aSJeff Kirsher * Description: 55127ac6653aSJeff Kirsher * This function is a driver entry point which gets called by the kernel 55137ac6653aSJeff Kirsher * whenever multicast addresses must be enabled/disabled. 55147ac6653aSJeff Kirsher * Return value: 55157ac6653aSJeff Kirsher * void. 55167ac6653aSJeff Kirsher */ 551701789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev) 55187ac6653aSJeff Kirsher { 55197ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 55207ac6653aSJeff Kirsher 5521c10d4c82SJose Abreu stmmac_set_filter(priv, priv->hw, dev); 55227ac6653aSJeff Kirsher } 55237ac6653aSJeff Kirsher 55247ac6653aSJeff Kirsher /** 55257ac6653aSJeff Kirsher * stmmac_change_mtu - entry point to change MTU size for the device. 55267ac6653aSJeff Kirsher * @dev : device pointer. 55277ac6653aSJeff Kirsher * @new_mtu : the new MTU size for the device. 55287ac6653aSJeff Kirsher * Description: the Maximum Transfer Unit (MTU) is used by the network layer 55297ac6653aSJeff Kirsher * to drive packet transmission. Ethernet has an MTU of 1500 octets 55307ac6653aSJeff Kirsher * (ETH_DATA_LEN). This value can be changed with ifconfig. 55317ac6653aSJeff Kirsher * Return value: 55327ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 55337ac6653aSJeff Kirsher * file on failure. 55347ac6653aSJeff Kirsher */ 55357ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 55367ac6653aSJeff Kirsher { 553738ddc59dSLABBE Corentin struct stmmac_priv *priv = netdev_priv(dev); 5538eaf4fac4SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 553934700796SChristian Marangi struct stmmac_dma_conf *dma_conf; 55405b55299eSDavid Wu const int mtu = new_mtu; 554134700796SChristian Marangi int ret; 5542eaf4fac4SJose Abreu 5543eaf4fac4SJose Abreu if (txfifosz == 0) 5544eaf4fac4SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 5545eaf4fac4SJose Abreu 5546eaf4fac4SJose Abreu txfifosz /= priv->plat->tx_queues_to_use; 554738ddc59dSLABBE Corentin 55485fabb012SOng Boon Leong if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 55495fabb012SOng Boon Leong netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 55505fabb012SOng Boon Leong return -EINVAL; 55515fabb012SOng Boon Leong } 55525fabb012SOng Boon Leong 5553eaf4fac4SJose Abreu new_mtu = STMMAC_ALIGN(new_mtu); 5554eaf4fac4SJose Abreu 5555eaf4fac4SJose Abreu /* If condition true, FIFO is too small or MTU too large */ 5556eaf4fac4SJose Abreu if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5557eaf4fac4SJose Abreu return -EINVAL; 5558eaf4fac4SJose Abreu 555934700796SChristian Marangi if (netif_running(dev)) { 556034700796SChristian Marangi netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 556134700796SChristian Marangi /* Try to allocate the new DMA conf with the new mtu */ 556234700796SChristian Marangi dma_conf = stmmac_setup_dma_desc(priv, mtu); 556334700796SChristian Marangi if (IS_ERR(dma_conf)) { 556434700796SChristian Marangi netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 556534700796SChristian Marangi mtu); 556634700796SChristian Marangi return PTR_ERR(dma_conf); 556734700796SChristian Marangi } 5568f748be53SAlexandre TORGUE 556934700796SChristian Marangi stmmac_release(dev); 557034700796SChristian Marangi 557134700796SChristian Marangi ret = __stmmac_open(dev, dma_conf); 557234700796SChristian Marangi kfree(dma_conf); 557334700796SChristian Marangi if (ret) { 557434700796SChristian Marangi netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 557534700796SChristian Marangi return ret; 557634700796SChristian Marangi } 557734700796SChristian Marangi 557834700796SChristian Marangi stmmac_set_rx_mode(dev); 557934700796SChristian Marangi } 558034700796SChristian Marangi 558134700796SChristian Marangi dev->mtu = mtu; 55827ac6653aSJeff Kirsher netdev_update_features(dev); 55837ac6653aSJeff Kirsher 55847ac6653aSJeff Kirsher return 0; 55857ac6653aSJeff Kirsher } 55867ac6653aSJeff Kirsher 5587c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev, 5588c8f44affSMichał Mirosław netdev_features_t features) 55897ac6653aSJeff Kirsher { 55907ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 55917ac6653aSJeff Kirsher 559238912bdbSDeepak SIKRI if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 55937ac6653aSJeff Kirsher features &= ~NETIF_F_RXCSUM; 5594d2afb5bdSGiuseppe CAVALLARO 55957ac6653aSJeff Kirsher if (!priv->plat->tx_coe) 5596a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 55977ac6653aSJeff Kirsher 55987ac6653aSJeff Kirsher /* Some GMAC devices have a bugged Jumbo frame support that 55997ac6653aSJeff Kirsher * needs to have the Tx COE disabled for oversized frames 56007ac6653aSJeff Kirsher * (due to limited buffer sizes). In this case we disable 5601ceb69499SGiuseppe CAVALLARO * the TX csum insertion in the TDES and not use SF. 5602ceb69499SGiuseppe CAVALLARO */ 56037ac6653aSJeff Kirsher if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5604a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 56057ac6653aSJeff Kirsher 5606f748be53SAlexandre TORGUE /* Disable tso if asked by ethtool */ 5607f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5608f748be53SAlexandre TORGUE if (features & NETIF_F_TSO) 5609f748be53SAlexandre TORGUE priv->tso = true; 5610f748be53SAlexandre TORGUE else 5611f748be53SAlexandre TORGUE priv->tso = false; 5612f748be53SAlexandre TORGUE } 5613f748be53SAlexandre TORGUE 56147ac6653aSJeff Kirsher return features; 56157ac6653aSJeff Kirsher } 56167ac6653aSJeff Kirsher 5617d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev, 5618d2afb5bdSGiuseppe CAVALLARO netdev_features_t features) 5619d2afb5bdSGiuseppe CAVALLARO { 5620d2afb5bdSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(netdev); 5621d2afb5bdSGiuseppe CAVALLARO 5622d2afb5bdSGiuseppe CAVALLARO /* Keep the COE Type in case of csum is supporting */ 5623d2afb5bdSGiuseppe CAVALLARO if (features & NETIF_F_RXCSUM) 5624d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 5625d2afb5bdSGiuseppe CAVALLARO else 5626d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 5627d2afb5bdSGiuseppe CAVALLARO /* No check needed because rx_coe has been set before and it will be 5628d2afb5bdSGiuseppe CAVALLARO * fixed in case of issue. 5629d2afb5bdSGiuseppe CAVALLARO */ 5630c10d4c82SJose Abreu stmmac_rx_ipc(priv, priv->hw); 5631d2afb5bdSGiuseppe CAVALLARO 5632f8e7dfd6SVincent Whitchurch if (priv->sph_cap) { 5633f8e7dfd6SVincent Whitchurch bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5634f8e7dfd6SVincent Whitchurch u32 chan; 56355fabb012SOng Boon Leong 563667afd6d1SJose Abreu for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 563767afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5638f8e7dfd6SVincent Whitchurch } 563967afd6d1SJose Abreu 5640d2afb5bdSGiuseppe CAVALLARO return 0; 5641d2afb5bdSGiuseppe CAVALLARO } 5642d2afb5bdSGiuseppe CAVALLARO 56435a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 56445a558611SOng Boon Leong { 56455a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 56465a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 56475a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 56485a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 56495a558611SOng Boon Leong 56505a558611SOng Boon Leong if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 56515a558611SOng Boon Leong return; 56525a558611SOng Boon Leong 56535a558611SOng Boon Leong /* If LP has sent verify mPacket, LP is FPE capable */ 56545a558611SOng Boon Leong if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 56555a558611SOng Boon Leong if (*lp_state < FPE_STATE_CAPABLE) 56565a558611SOng Boon Leong *lp_state = FPE_STATE_CAPABLE; 56575a558611SOng Boon Leong 56585a558611SOng Boon Leong /* If user has requested FPE enable, quickly response */ 56595a558611SOng Boon Leong if (*hs_enable) 56605a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 56615a558611SOng Boon Leong MPACKET_RESPONSE); 56625a558611SOng Boon Leong } 56635a558611SOng Boon Leong 56645a558611SOng Boon Leong /* If Local has sent verify mPacket, Local is FPE capable */ 56655a558611SOng Boon Leong if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 56665a558611SOng Boon Leong if (*lo_state < FPE_STATE_CAPABLE) 56675a558611SOng Boon Leong *lo_state = FPE_STATE_CAPABLE; 56685a558611SOng Boon Leong } 56695a558611SOng Boon Leong 56705a558611SOng Boon Leong /* If LP has sent response mPacket, LP is entering FPE ON */ 56715a558611SOng Boon Leong if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 56725a558611SOng Boon Leong *lp_state = FPE_STATE_ENTERING_ON; 56735a558611SOng Boon Leong 56745a558611SOng Boon Leong /* If Local has sent response mPacket, Local is entering FPE ON */ 56755a558611SOng Boon Leong if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 56765a558611SOng Boon Leong *lo_state = FPE_STATE_ENTERING_ON; 56775a558611SOng Boon Leong 56785a558611SOng Boon Leong if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 56795a558611SOng Boon Leong !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 56805a558611SOng Boon Leong priv->fpe_wq) { 56815a558611SOng Boon Leong queue_work(priv->fpe_wq, &priv->fpe_task); 56825a558611SOng Boon Leong } 56835a558611SOng Boon Leong } 56845a558611SOng Boon Leong 568529e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv) 56867ac6653aSJeff Kirsher { 56877bac4e1eSJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 56887bac4e1eSJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 56897bac4e1eSJoao Pinto u32 queues_count; 56907bac4e1eSJoao Pinto u32 queue; 56917d9e6c5aSJose Abreu bool xmac; 56927bac4e1eSJoao Pinto 56937d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 56947bac4e1eSJoao Pinto queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 56957ac6653aSJeff Kirsher 569689f7f2cfSSrinivas Kandagatla if (priv->irq_wake) 569789f7f2cfSSrinivas Kandagatla pm_wakeup_event(priv->device, 0); 569889f7f2cfSSrinivas Kandagatla 5699e49aa315SVoon Weifeng if (priv->dma_cap.estsel) 57009f298959SOng Boon Leong stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 57019f298959SOng Boon Leong &priv->xstats, tx_cnt); 5702e49aa315SVoon Weifeng 57035a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 57045a558611SOng Boon Leong int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 57055a558611SOng Boon Leong priv->dev); 57065a558611SOng Boon Leong 57075a558611SOng Boon Leong stmmac_fpe_event_status(priv, status); 57085a558611SOng Boon Leong } 57095a558611SOng Boon Leong 57107ac6653aSJeff Kirsher /* To handle GMAC own interrupts */ 57117d9e6c5aSJose Abreu if ((priv->plat->has_gmac) || xmac) { 5712c10d4c82SJose Abreu int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 57138f71a88dSJoao Pinto 5714d765955dSGiuseppe CAVALLARO if (unlikely(status)) { 5715d765955dSGiuseppe CAVALLARO /* For LPI we need to save the tx status */ 57160982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5717d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = true; 57180982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5719d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 57207bac4e1eSJoao Pinto } 57217bac4e1eSJoao Pinto 57227bac4e1eSJoao Pinto for (queue = 0; queue < queues_count; queue++) { 57238a7cb245SYannick Vignon status = stmmac_host_mtl_irq_status(priv, priv->hw, 57247bac4e1eSJoao Pinto queue); 57257bac4e1eSJoao Pinto } 572670523e63SGiuseppe CAVALLARO 572770523e63SGiuseppe CAVALLARO /* PCS link status */ 57283fe5cadbSGiuseppe CAVALLARO if (priv->hw->pcs) { 572970523e63SGiuseppe CAVALLARO if (priv->xstats.pcs_link) 573029e6573cSOng Boon Leong netif_carrier_on(priv->dev); 573170523e63SGiuseppe CAVALLARO else 573229e6573cSOng Boon Leong netif_carrier_off(priv->dev); 573370523e63SGiuseppe CAVALLARO } 5734f4da5652STan Tee Min 5735f4da5652STan Tee Min stmmac_timestamp_interrupt(priv, priv); 5736d765955dSGiuseppe CAVALLARO } 573729e6573cSOng Boon Leong } 573829e6573cSOng Boon Leong 573929e6573cSOng Boon Leong /** 574029e6573cSOng Boon Leong * stmmac_interrupt - main ISR 574129e6573cSOng Boon Leong * @irq: interrupt number. 574229e6573cSOng Boon Leong * @dev_id: to pass the net device pointer. 574329e6573cSOng Boon Leong * Description: this is the main driver interrupt service routine. 574429e6573cSOng Boon Leong * It can call: 574529e6573cSOng Boon Leong * o DMA service routine (to manage incoming frame reception and transmission 574629e6573cSOng Boon Leong * status) 574729e6573cSOng Boon Leong * o Core interrupts to manage: remote wake-up, management counter, LPI 574829e6573cSOng Boon Leong * interrupts. 574929e6573cSOng Boon Leong */ 575029e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 575129e6573cSOng Boon Leong { 575229e6573cSOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 575329e6573cSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 575429e6573cSOng Boon Leong 575529e6573cSOng Boon Leong /* Check if adapter is up */ 575629e6573cSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 575729e6573cSOng Boon Leong return IRQ_HANDLED; 575829e6573cSOng Boon Leong 575929e6573cSOng Boon Leong /* Check if a fatal error happened */ 576029e6573cSOng Boon Leong if (stmmac_safety_feat_interrupt(priv)) 576129e6573cSOng Boon Leong return IRQ_HANDLED; 576229e6573cSOng Boon Leong 576329e6573cSOng Boon Leong /* To handle Common interrupts */ 576429e6573cSOng Boon Leong stmmac_common_interrupt(priv); 5765d765955dSGiuseppe CAVALLARO 5766d765955dSGiuseppe CAVALLARO /* To handle DMA interrupts */ 57677ac6653aSJeff Kirsher stmmac_dma_interrupt(priv); 57687ac6653aSJeff Kirsher 57697ac6653aSJeff Kirsher return IRQ_HANDLED; 57707ac6653aSJeff Kirsher } 57717ac6653aSJeff Kirsher 57728532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 57738532f613SOng Boon Leong { 57748532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 57758532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 57768532f613SOng Boon Leong 57778532f613SOng Boon Leong if (unlikely(!dev)) { 57788532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 57798532f613SOng Boon Leong return IRQ_NONE; 57808532f613SOng Boon Leong } 57818532f613SOng Boon Leong 57828532f613SOng Boon Leong /* Check if adapter is up */ 57838532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57848532f613SOng Boon Leong return IRQ_HANDLED; 57858532f613SOng Boon Leong 57868532f613SOng Boon Leong /* To handle Common interrupts */ 57878532f613SOng Boon Leong stmmac_common_interrupt(priv); 57888532f613SOng Boon Leong 57898532f613SOng Boon Leong return IRQ_HANDLED; 57908532f613SOng Boon Leong } 57918532f613SOng Boon Leong 57928532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 57938532f613SOng Boon Leong { 57948532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 57958532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 57968532f613SOng Boon Leong 57978532f613SOng Boon Leong if (unlikely(!dev)) { 57988532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 57998532f613SOng Boon Leong return IRQ_NONE; 58008532f613SOng Boon Leong } 58018532f613SOng Boon Leong 58028532f613SOng Boon Leong /* Check if adapter is up */ 58038532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58048532f613SOng Boon Leong return IRQ_HANDLED; 58058532f613SOng Boon Leong 58068532f613SOng Boon Leong /* Check if a fatal error happened */ 58078532f613SOng Boon Leong stmmac_safety_feat_interrupt(priv); 58088532f613SOng Boon Leong 58098532f613SOng Boon Leong return IRQ_HANDLED; 58108532f613SOng Boon Leong } 58118532f613SOng Boon Leong 58128532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 58138532f613SOng Boon Leong { 58148532f613SOng Boon Leong struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 58158531c808SChristian Marangi struct stmmac_dma_conf *dma_conf; 58168532f613SOng Boon Leong int chan = tx_q->queue_index; 58178532f613SOng Boon Leong struct stmmac_priv *priv; 58188532f613SOng Boon Leong int status; 58198532f613SOng Boon Leong 58208531c808SChristian Marangi dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 58218531c808SChristian Marangi priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 58228532f613SOng Boon Leong 58238532f613SOng Boon Leong if (unlikely(!data)) { 58248532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58258532f613SOng Boon Leong return IRQ_NONE; 58268532f613SOng Boon Leong } 58278532f613SOng Boon Leong 58288532f613SOng Boon Leong /* Check if adapter is up */ 58298532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58308532f613SOng Boon Leong return IRQ_HANDLED; 58318532f613SOng Boon Leong 58328532f613SOng Boon Leong status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 58338532f613SOng Boon Leong 58348532f613SOng Boon Leong if (unlikely(status & tx_hard_error_bump_tc)) { 58358532f613SOng Boon Leong /* Try to bump up the dma threshold on this failure */ 58363a6c12a0SXiaoliang Yang stmmac_bump_dma_threshold(priv, chan); 58378532f613SOng Boon Leong } else if (unlikely(status == tx_hard_error)) { 58388532f613SOng Boon Leong stmmac_tx_err(priv, chan); 58398532f613SOng Boon Leong } 58408532f613SOng Boon Leong 58418532f613SOng Boon Leong return IRQ_HANDLED; 58428532f613SOng Boon Leong } 58438532f613SOng Boon Leong 58448532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 58458532f613SOng Boon Leong { 58468532f613SOng Boon Leong struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 58478531c808SChristian Marangi struct stmmac_dma_conf *dma_conf; 58488532f613SOng Boon Leong int chan = rx_q->queue_index; 58498532f613SOng Boon Leong struct stmmac_priv *priv; 58508532f613SOng Boon Leong 58518531c808SChristian Marangi dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 58528531c808SChristian Marangi priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 58538532f613SOng Boon Leong 58548532f613SOng Boon Leong if (unlikely(!data)) { 58558532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 58568532f613SOng Boon Leong return IRQ_NONE; 58578532f613SOng Boon Leong } 58588532f613SOng Boon Leong 58598532f613SOng Boon Leong /* Check if adapter is up */ 58608532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58618532f613SOng Boon Leong return IRQ_HANDLED; 58628532f613SOng Boon Leong 58638532f613SOng Boon Leong stmmac_napi_check(priv, chan, DMA_DIR_RX); 58648532f613SOng Boon Leong 58658532f613SOng Boon Leong return IRQ_HANDLED; 58668532f613SOng Boon Leong } 58678532f613SOng Boon Leong 58687ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 58697ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools 5870ceb69499SGiuseppe CAVALLARO * to allow network I/O with interrupts disabled. 5871ceb69499SGiuseppe CAVALLARO */ 58727ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev) 58737ac6653aSJeff Kirsher { 58748532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 58758532f613SOng Boon Leong int i; 58768532f613SOng Boon Leong 58778532f613SOng Boon Leong /* If adapter is down, do nothing */ 58788532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 58798532f613SOng Boon Leong return; 58808532f613SOng Boon Leong 58818532f613SOng Boon Leong if (priv->plat->multi_msi_en) { 58828532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) 58838531c808SChristian Marangi stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); 58848532f613SOng Boon Leong 58858532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) 58868531c808SChristian Marangi stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); 58878532f613SOng Boon Leong } else { 58887ac6653aSJeff Kirsher disable_irq(dev->irq); 58897ac6653aSJeff Kirsher stmmac_interrupt(dev->irq, dev); 58907ac6653aSJeff Kirsher enable_irq(dev->irq); 58917ac6653aSJeff Kirsher } 58928532f613SOng Boon Leong } 58937ac6653aSJeff Kirsher #endif 58947ac6653aSJeff Kirsher 58957ac6653aSJeff Kirsher /** 58967ac6653aSJeff Kirsher * stmmac_ioctl - Entry point for the Ioctl 58977ac6653aSJeff Kirsher * @dev: Device pointer. 58987ac6653aSJeff Kirsher * @rq: An IOCTL specefic structure, that can contain a pointer to 58997ac6653aSJeff Kirsher * a proprietary structure used to pass information to the driver. 59007ac6653aSJeff Kirsher * @cmd: IOCTL command 59017ac6653aSJeff Kirsher * Description: 590232ceabcaSGiuseppe CAVALLARO * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 59037ac6653aSJeff Kirsher */ 59047ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 59057ac6653aSJeff Kirsher { 590674371272SJose Abreu struct stmmac_priv *priv = netdev_priv (dev); 5907891434b1SRayagond Kokatanur int ret = -EOPNOTSUPP; 59087ac6653aSJeff Kirsher 59097ac6653aSJeff Kirsher if (!netif_running(dev)) 59107ac6653aSJeff Kirsher return -EINVAL; 59117ac6653aSJeff Kirsher 5912891434b1SRayagond Kokatanur switch (cmd) { 5913891434b1SRayagond Kokatanur case SIOCGMIIPHY: 5914891434b1SRayagond Kokatanur case SIOCGMIIREG: 5915891434b1SRayagond Kokatanur case SIOCSMIIREG: 591674371272SJose Abreu ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 5917891434b1SRayagond Kokatanur break; 5918891434b1SRayagond Kokatanur case SIOCSHWTSTAMP: 5919d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_set(dev, rq); 5920d6228b7cSArtem Panfilov break; 5921d6228b7cSArtem Panfilov case SIOCGHWTSTAMP: 5922d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_get(dev, rq); 5923891434b1SRayagond Kokatanur break; 5924891434b1SRayagond Kokatanur default: 5925891434b1SRayagond Kokatanur break; 5926891434b1SRayagond Kokatanur } 59277ac6653aSJeff Kirsher 59287ac6653aSJeff Kirsher return ret; 59297ac6653aSJeff Kirsher } 59307ac6653aSJeff Kirsher 59314dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 59324dbbe8ddSJose Abreu void *cb_priv) 59334dbbe8ddSJose Abreu { 59344dbbe8ddSJose Abreu struct stmmac_priv *priv = cb_priv; 59354dbbe8ddSJose Abreu int ret = -EOPNOTSUPP; 59364dbbe8ddSJose Abreu 5937425eabddSJose Abreu if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 5938425eabddSJose Abreu return ret; 5939425eabddSJose Abreu 5940bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 59414dbbe8ddSJose Abreu 59424dbbe8ddSJose Abreu switch (type) { 59434dbbe8ddSJose Abreu case TC_SETUP_CLSU32: 59444dbbe8ddSJose Abreu ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 59454dbbe8ddSJose Abreu break; 5946425eabddSJose Abreu case TC_SETUP_CLSFLOWER: 5947425eabddSJose Abreu ret = stmmac_tc_setup_cls(priv, priv, type_data); 5948425eabddSJose Abreu break; 59494dbbe8ddSJose Abreu default: 59504dbbe8ddSJose Abreu break; 59514dbbe8ddSJose Abreu } 59524dbbe8ddSJose Abreu 59534dbbe8ddSJose Abreu stmmac_enable_all_queues(priv); 59544dbbe8ddSJose Abreu return ret; 59554dbbe8ddSJose Abreu } 59564dbbe8ddSJose Abreu 5957955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list); 5958955bcb6eSPablo Neira Ayuso 59594dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 59604dbbe8ddSJose Abreu void *type_data) 59614dbbe8ddSJose Abreu { 59624dbbe8ddSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 59634dbbe8ddSJose Abreu 59644dbbe8ddSJose Abreu switch (type) { 59654dbbe8ddSJose Abreu case TC_SETUP_BLOCK: 5966955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data, 5967955bcb6eSPablo Neira Ayuso &stmmac_block_cb_list, 59684e95bc26SPablo Neira Ayuso stmmac_setup_tc_block_cb, 59694e95bc26SPablo Neira Ayuso priv, priv, true); 59701f705bc6SJose Abreu case TC_SETUP_QDISC_CBS: 59711f705bc6SJose Abreu return stmmac_tc_setup_cbs(priv, priv, type_data); 5972b60189e0SJose Abreu case TC_SETUP_QDISC_TAPRIO: 5973b60189e0SJose Abreu return stmmac_tc_setup_taprio(priv, priv, type_data); 5974430b383cSJose Abreu case TC_SETUP_QDISC_ETF: 5975430b383cSJose Abreu return stmmac_tc_setup_etf(priv, priv, type_data); 59764dbbe8ddSJose Abreu default: 59774dbbe8ddSJose Abreu return -EOPNOTSUPP; 59784dbbe8ddSJose Abreu } 59794dbbe8ddSJose Abreu } 59804dbbe8ddSJose Abreu 59814993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 59824993e5b3SJose Abreu struct net_device *sb_dev) 59834993e5b3SJose Abreu { 5984b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 5985b7766206SJose Abreu 5986b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 59874993e5b3SJose Abreu /* 5988b7766206SJose Abreu * There is no way to determine the number of TSO/USO 59894993e5b3SJose Abreu * capable Queues. Let's use always the Queue 0 5990b7766206SJose Abreu * because if TSO/USO is supported then at least this 59914993e5b3SJose Abreu * one will be capable. 59924993e5b3SJose Abreu */ 59934993e5b3SJose Abreu return 0; 59944993e5b3SJose Abreu } 59954993e5b3SJose Abreu 59964993e5b3SJose Abreu return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 59974993e5b3SJose Abreu } 59984993e5b3SJose Abreu 5999a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6000a830405eSBhadram Varka { 6001a830405eSBhadram Varka struct stmmac_priv *priv = netdev_priv(ndev); 6002a830405eSBhadram Varka int ret = 0; 6003a830405eSBhadram Varka 600485648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 600585648865SMinghao Chi if (ret < 0) 60064691ffb1SJoakim Zhang return ret; 60074691ffb1SJoakim Zhang 6008a830405eSBhadram Varka ret = eth_mac_addr(ndev, addr); 6009a830405eSBhadram Varka if (ret) 60104691ffb1SJoakim Zhang goto set_mac_error; 6011a830405eSBhadram Varka 6012c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6013a830405eSBhadram Varka 60144691ffb1SJoakim Zhang set_mac_error: 60154691ffb1SJoakim Zhang pm_runtime_put(priv->device); 60164691ffb1SJoakim Zhang 6017a830405eSBhadram Varka return ret; 6018a830405eSBhadram Varka } 6019a830405eSBhadram Varka 602050fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 60217ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir; 60227ac29055SGiuseppe CAVALLARO 6023c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc, 6024bfaf91caSJoakim Zhang struct seq_file *seq, dma_addr_t dma_phy_addr) 60257ac29055SGiuseppe CAVALLARO { 60267ac29055SGiuseppe CAVALLARO int i; 6027c24602efSGiuseppe CAVALLARO struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6028c24602efSGiuseppe CAVALLARO struct dma_desc *p = (struct dma_desc *)head; 6029bfaf91caSJoakim Zhang dma_addr_t dma_addr; 60307ac29055SGiuseppe CAVALLARO 6031c24602efSGiuseppe CAVALLARO for (i = 0; i < size; i++) { 6032c24602efSGiuseppe CAVALLARO if (extend_desc) { 6033bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*ep); 6034bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6035bfaf91caSJoakim Zhang i, &dma_addr, 6036f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des0), 6037f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des1), 6038f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des2), 6039f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des3)); 6040c24602efSGiuseppe CAVALLARO ep++; 6041c24602efSGiuseppe CAVALLARO } else { 6042bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*p); 6043bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6044bfaf91caSJoakim Zhang i, &dma_addr, 6045f8be0d78SMichael Weiser le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6046f8be0d78SMichael Weiser le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6047c24602efSGiuseppe CAVALLARO p++; 6048c24602efSGiuseppe CAVALLARO } 60497ac29055SGiuseppe CAVALLARO seq_printf(seq, "\n"); 60507ac29055SGiuseppe CAVALLARO } 6051c24602efSGiuseppe CAVALLARO } 60527ac29055SGiuseppe CAVALLARO 6053fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6054c24602efSGiuseppe CAVALLARO { 6055c24602efSGiuseppe CAVALLARO struct net_device *dev = seq->private; 6056c24602efSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 605754139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 6058ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 605954139cf3SJoao Pinto u32 queue; 606054139cf3SJoao Pinto 60615f2b8b62SThierry Reding if ((dev->flags & IFF_UP) == 0) 60625f2b8b62SThierry Reding return 0; 60635f2b8b62SThierry Reding 606454139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 60658531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 606654139cf3SJoao Pinto 606754139cf3SJoao Pinto seq_printf(seq, "RX Queue %d:\n", queue); 60687ac29055SGiuseppe CAVALLARO 6069c24602efSGiuseppe CAVALLARO if (priv->extend_desc) { 607054139cf3SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 607154139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_erx, 60728531c808SChristian Marangi priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 607354139cf3SJoao Pinto } else { 607454139cf3SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 607554139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_rx, 60768531c808SChristian Marangi priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 607754139cf3SJoao Pinto } 607854139cf3SJoao Pinto } 607954139cf3SJoao Pinto 6080ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 60818531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6082ce736788SJoao Pinto 6083ce736788SJoao Pinto seq_printf(seq, "TX Queue %d:\n", queue); 6084ce736788SJoao Pinto 608554139cf3SJoao Pinto if (priv->extend_desc) { 6086ce736788SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 6087ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_etx, 60888531c808SChristian Marangi priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6089579a25a8SJose Abreu } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6090ce736788SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 6091ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_tx, 60928531c808SChristian Marangi priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6093ce736788SJoao Pinto } 60947ac29055SGiuseppe CAVALLARO } 60957ac29055SGiuseppe CAVALLARO 60967ac29055SGiuseppe CAVALLARO return 0; 60977ac29055SGiuseppe CAVALLARO } 6098fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 60997ac29055SGiuseppe CAVALLARO 6100fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6101e7434821SGiuseppe CAVALLARO { 6102e7434821SGiuseppe CAVALLARO struct net_device *dev = seq->private; 6103e7434821SGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 6104e7434821SGiuseppe CAVALLARO 610519e30c14SGiuseppe CAVALLARO if (!priv->hw_cap_support) { 6106e7434821SGiuseppe CAVALLARO seq_printf(seq, "DMA HW features not supported\n"); 6107e7434821SGiuseppe CAVALLARO return 0; 6108e7434821SGiuseppe CAVALLARO } 6109e7434821SGiuseppe CAVALLARO 6110e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6111e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tDMA HW features\n"); 6112e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6113e7434821SGiuseppe CAVALLARO 611422d3efe5SPavel Machek seq_printf(seq, "\t10/100 Mbps: %s\n", 6115e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 611622d3efe5SPavel Machek seq_printf(seq, "\t1000 Mbps: %s\n", 6117e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_1000) ? "Y" : "N"); 611822d3efe5SPavel Machek seq_printf(seq, "\tHalf duplex: %s\n", 6119e7434821SGiuseppe CAVALLARO (priv->dma_cap.half_duplex) ? "Y" : "N"); 6120e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tHash Filter: %s\n", 6121e7434821SGiuseppe CAVALLARO (priv->dma_cap.hash_filter) ? "Y" : "N"); 6122e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6123e7434821SGiuseppe CAVALLARO (priv->dma_cap.multi_addr) ? "Y" : "N"); 61248d45e42bSLABBE Corentin seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6125e7434821SGiuseppe CAVALLARO (priv->dma_cap.pcs) ? "Y" : "N"); 6126e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6127e7434821SGiuseppe CAVALLARO (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6128e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Remote wake up: %s\n", 6129e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6130e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Magic Frame: %s\n", 6131e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6132e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRMON module: %s\n", 6133e7434821SGiuseppe CAVALLARO (priv->dma_cap.rmon) ? "Y" : "N"); 6134e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6135e7434821SGiuseppe CAVALLARO (priv->dma_cap.time_stamp) ? "Y" : "N"); 6136e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6137e7434821SGiuseppe CAVALLARO (priv->dma_cap.atime_stamp) ? "Y" : "N"); 613822d3efe5SPavel Machek seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6139e7434821SGiuseppe CAVALLARO (priv->dma_cap.eee) ? "Y" : "N"); 6140e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6141e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6142e7434821SGiuseppe CAVALLARO (priv->dma_cap.tx_coe) ? "Y" : "N"); 6143f748be53SAlexandre TORGUE if (priv->synopsys_id >= DWMAC_CORE_4_00) { 6144f748be53SAlexandre TORGUE seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6145f748be53SAlexandre TORGUE (priv->dma_cap.rx_coe) ? "Y" : "N"); 6146f748be53SAlexandre TORGUE } else { 6147e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6148e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6149e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6150e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6151f748be53SAlexandre TORGUE } 6152e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6153e7434821SGiuseppe CAVALLARO (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6154e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6155e7434821SGiuseppe CAVALLARO priv->dma_cap.number_rx_channel); 6156e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6157e7434821SGiuseppe CAVALLARO priv->dma_cap.number_tx_channel); 61587d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 61597d0b447aSJose Abreu priv->dma_cap.number_rx_queues); 61607d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 61617d0b447aSJose Abreu priv->dma_cap.number_tx_queues); 6162e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tEnhanced descriptors: %s\n", 6163e7434821SGiuseppe CAVALLARO (priv->dma_cap.enh_desc) ? "Y" : "N"); 61647d0b447aSJose Abreu seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 61657d0b447aSJose Abreu seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 61667d0b447aSJose Abreu seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 61677d0b447aSJose Abreu seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 61687d0b447aSJose Abreu seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 61697d0b447aSJose Abreu priv->dma_cap.pps_out_num); 61707d0b447aSJose Abreu seq_printf(seq, "\tSafety Features: %s\n", 61717d0b447aSJose Abreu priv->dma_cap.asp ? "Y" : "N"); 61727d0b447aSJose Abreu seq_printf(seq, "\tFlexible RX Parser: %s\n", 61737d0b447aSJose Abreu priv->dma_cap.frpsel ? "Y" : "N"); 61747d0b447aSJose Abreu seq_printf(seq, "\tEnhanced Addressing: %d\n", 61757d0b447aSJose Abreu priv->dma_cap.addr64); 61767d0b447aSJose Abreu seq_printf(seq, "\tReceive Side Scaling: %s\n", 61777d0b447aSJose Abreu priv->dma_cap.rssen ? "Y" : "N"); 61787d0b447aSJose Abreu seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 61797d0b447aSJose Abreu priv->dma_cap.vlhash ? "Y" : "N"); 61807d0b447aSJose Abreu seq_printf(seq, "\tSplit Header: %s\n", 61817d0b447aSJose Abreu priv->dma_cap.sphen ? "Y" : "N"); 61827d0b447aSJose Abreu seq_printf(seq, "\tVLAN TX Insertion: %s\n", 61837d0b447aSJose Abreu priv->dma_cap.vlins ? "Y" : "N"); 61847d0b447aSJose Abreu seq_printf(seq, "\tDouble VLAN: %s\n", 61857d0b447aSJose Abreu priv->dma_cap.dvlan ? "Y" : "N"); 61867d0b447aSJose Abreu seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 61877d0b447aSJose Abreu priv->dma_cap.l3l4fnum); 61887d0b447aSJose Abreu seq_printf(seq, "\tARP Offloading: %s\n", 61897d0b447aSJose Abreu priv->dma_cap.arpoffsel ? "Y" : "N"); 619044e65475SJose Abreu seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 619144e65475SJose Abreu priv->dma_cap.estsel ? "Y" : "N"); 619244e65475SJose Abreu seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 619344e65475SJose Abreu priv->dma_cap.fpesel ? "Y" : "N"); 619444e65475SJose Abreu seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 619544e65475SJose Abreu priv->dma_cap.tbssel ? "Y" : "N"); 6196e7434821SGiuseppe CAVALLARO return 0; 6197e7434821SGiuseppe CAVALLARO } 6198fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6199e7434821SGiuseppe CAVALLARO 6200481a7d15SJiping Ma /* Use network device events to rename debugfs file entries. 6201481a7d15SJiping Ma */ 6202481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused, 6203481a7d15SJiping Ma unsigned long event, void *ptr) 6204481a7d15SJiping Ma { 6205481a7d15SJiping Ma struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6206481a7d15SJiping Ma struct stmmac_priv *priv = netdev_priv(dev); 6207481a7d15SJiping Ma 6208481a7d15SJiping Ma if (dev->netdev_ops != &stmmac_netdev_ops) 6209481a7d15SJiping Ma goto done; 6210481a7d15SJiping Ma 6211481a7d15SJiping Ma switch (event) { 6212481a7d15SJiping Ma case NETDEV_CHANGENAME: 6213481a7d15SJiping Ma if (priv->dbgfs_dir) 6214481a7d15SJiping Ma priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6215481a7d15SJiping Ma priv->dbgfs_dir, 6216481a7d15SJiping Ma stmmac_fs_dir, 6217481a7d15SJiping Ma dev->name); 6218481a7d15SJiping Ma break; 6219481a7d15SJiping Ma } 6220481a7d15SJiping Ma done: 6221481a7d15SJiping Ma return NOTIFY_DONE; 6222481a7d15SJiping Ma } 6223481a7d15SJiping Ma 6224481a7d15SJiping Ma static struct notifier_block stmmac_notifier = { 6225481a7d15SJiping Ma .notifier_call = stmmac_device_event, 6226481a7d15SJiping Ma }; 6227481a7d15SJiping Ma 62288d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev) 62297ac29055SGiuseppe CAVALLARO { 6230466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 62317ac29055SGiuseppe CAVALLARO 6232474a31e1SAaro Koskinen rtnl_lock(); 6233474a31e1SAaro Koskinen 6234466c5ac8SMathieu Olivari /* Create per netdev entries */ 6235466c5ac8SMathieu Olivari priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6236466c5ac8SMathieu Olivari 62377ac29055SGiuseppe CAVALLARO /* Entry to report DMA RX/TX rings */ 62388d72ab11SGreg Kroah-Hartman debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 62397ac29055SGiuseppe CAVALLARO &stmmac_rings_status_fops); 62407ac29055SGiuseppe CAVALLARO 6241e7434821SGiuseppe CAVALLARO /* Entry to report the DMA HW features */ 62428d72ab11SGreg Kroah-Hartman debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 62438d72ab11SGreg Kroah-Hartman &stmmac_dma_cap_fops); 6244481a7d15SJiping Ma 6245474a31e1SAaro Koskinen rtnl_unlock(); 62467ac29055SGiuseppe CAVALLARO } 62477ac29055SGiuseppe CAVALLARO 6248466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev) 62497ac29055SGiuseppe CAVALLARO { 6250466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 6251466c5ac8SMathieu Olivari 6252466c5ac8SMathieu Olivari debugfs_remove_recursive(priv->dbgfs_dir); 62537ac29055SGiuseppe CAVALLARO } 625450fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 62557ac29055SGiuseppe CAVALLARO 62563cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le) 62573cd1cfcbSJose Abreu { 62583cd1cfcbSJose Abreu unsigned char *data = (unsigned char *)&vid_le; 62593cd1cfcbSJose Abreu unsigned char data_byte = 0; 62603cd1cfcbSJose Abreu u32 crc = ~0x0; 62613cd1cfcbSJose Abreu u32 temp = 0; 62623cd1cfcbSJose Abreu int i, bits; 62633cd1cfcbSJose Abreu 62643cd1cfcbSJose Abreu bits = get_bitmask_order(VLAN_VID_MASK); 62653cd1cfcbSJose Abreu for (i = 0; i < bits; i++) { 62663cd1cfcbSJose Abreu if ((i % 8) == 0) 62673cd1cfcbSJose Abreu data_byte = data[i / 8]; 62683cd1cfcbSJose Abreu 62693cd1cfcbSJose Abreu temp = ((crc & 1) ^ data_byte) & 1; 62703cd1cfcbSJose Abreu crc >>= 1; 62713cd1cfcbSJose Abreu data_byte >>= 1; 62723cd1cfcbSJose Abreu 62733cd1cfcbSJose Abreu if (temp) 62743cd1cfcbSJose Abreu crc ^= 0xedb88320; 62753cd1cfcbSJose Abreu } 62763cd1cfcbSJose Abreu 62773cd1cfcbSJose Abreu return crc; 62783cd1cfcbSJose Abreu } 62793cd1cfcbSJose Abreu 62803cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 62813cd1cfcbSJose Abreu { 62823cd1cfcbSJose Abreu u32 crc, hash = 0; 6283a24cae70SJose Abreu __le16 pmatch = 0; 6284c7ab0b80SJose Abreu int count = 0; 6285c7ab0b80SJose Abreu u16 vid = 0; 62863cd1cfcbSJose Abreu 62873cd1cfcbSJose Abreu for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 62883cd1cfcbSJose Abreu __le16 vid_le = cpu_to_le16(vid); 62893cd1cfcbSJose Abreu crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 62903cd1cfcbSJose Abreu hash |= (1 << crc); 6291c7ab0b80SJose Abreu count++; 62923cd1cfcbSJose Abreu } 62933cd1cfcbSJose Abreu 6294c7ab0b80SJose Abreu if (!priv->dma_cap.vlhash) { 6295c7ab0b80SJose Abreu if (count > 2) /* VID = 0 always passes filter */ 6296c7ab0b80SJose Abreu return -EOPNOTSUPP; 6297c7ab0b80SJose Abreu 6298a24cae70SJose Abreu pmatch = cpu_to_le16(vid); 6299c7ab0b80SJose Abreu hash = 0; 6300c7ab0b80SJose Abreu } 6301c7ab0b80SJose Abreu 6302a24cae70SJose Abreu return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 63033cd1cfcbSJose Abreu } 63043cd1cfcbSJose Abreu 63053cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 63063cd1cfcbSJose Abreu { 63073cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 63083cd1cfcbSJose Abreu bool is_double = false; 63093cd1cfcbSJose Abreu int ret; 63103cd1cfcbSJose Abreu 63113cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 63123cd1cfcbSJose Abreu is_double = true; 63133cd1cfcbSJose Abreu 63143cd1cfcbSJose Abreu set_bit(vid, priv->active_vlans); 63153cd1cfcbSJose Abreu ret = stmmac_vlan_update(priv, is_double); 63163cd1cfcbSJose Abreu if (ret) { 63173cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 63183cd1cfcbSJose Abreu return ret; 63193cd1cfcbSJose Abreu } 63203cd1cfcbSJose Abreu 6321dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6322ed64639bSWong Vee Khee ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6323dd6a4998SJose Abreu if (ret) 63243cd1cfcbSJose Abreu return ret; 63253cd1cfcbSJose Abreu } 63263cd1cfcbSJose Abreu 6327dd6a4998SJose Abreu return 0; 6328dd6a4998SJose Abreu } 6329dd6a4998SJose Abreu 63303cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 63313cd1cfcbSJose Abreu { 63323cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 63333cd1cfcbSJose Abreu bool is_double = false; 6334ed64639bSWong Vee Khee int ret; 63353cd1cfcbSJose Abreu 633685648865SMinghao Chi ret = pm_runtime_resume_and_get(priv->device); 633785648865SMinghao Chi if (ret < 0) 6338b3dcb312SJoakim Zhang return ret; 6339b3dcb312SJoakim Zhang 63403cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 63413cd1cfcbSJose Abreu is_double = true; 63423cd1cfcbSJose Abreu 63433cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 6344dd6a4998SJose Abreu 6345dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6346ed64639bSWong Vee Khee ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6347ed64639bSWong Vee Khee if (ret) 63485ec55823SJoakim Zhang goto del_vlan_error; 6349dd6a4998SJose Abreu } 6350ed64639bSWong Vee Khee 63515ec55823SJoakim Zhang ret = stmmac_vlan_update(priv, is_double); 63525ec55823SJoakim Zhang 63535ec55823SJoakim Zhang del_vlan_error: 63545ec55823SJoakim Zhang pm_runtime_put(priv->device); 63555ec55823SJoakim Zhang 63565ec55823SJoakim Zhang return ret; 63573cd1cfcbSJose Abreu } 63583cd1cfcbSJose Abreu 63595fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 63605fabb012SOng Boon Leong { 63615fabb012SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 63625fabb012SOng Boon Leong 63635fabb012SOng Boon Leong switch (bpf->command) { 63645fabb012SOng Boon Leong case XDP_SETUP_PROG: 63655fabb012SOng Boon Leong return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6366bba2556eSOng Boon Leong case XDP_SETUP_XSK_POOL: 6367bba2556eSOng Boon Leong return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6368bba2556eSOng Boon Leong bpf->xsk.queue_id); 63695fabb012SOng Boon Leong default: 63705fabb012SOng Boon Leong return -EOPNOTSUPP; 63715fabb012SOng Boon Leong } 63725fabb012SOng Boon Leong } 63735fabb012SOng Boon Leong 63748b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 63758b278a5bSOng Boon Leong struct xdp_frame **frames, u32 flags) 63768b278a5bSOng Boon Leong { 63778b278a5bSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 63788b278a5bSOng Boon Leong int cpu = smp_processor_id(); 63798b278a5bSOng Boon Leong struct netdev_queue *nq; 63808b278a5bSOng Boon Leong int i, nxmit = 0; 63818b278a5bSOng Boon Leong int queue; 63828b278a5bSOng Boon Leong 63838b278a5bSOng Boon Leong if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 63848b278a5bSOng Boon Leong return -ENETDOWN; 63858b278a5bSOng Boon Leong 63868b278a5bSOng Boon Leong if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 63878b278a5bSOng Boon Leong return -EINVAL; 63888b278a5bSOng Boon Leong 63898b278a5bSOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 63908b278a5bSOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 63918b278a5bSOng Boon Leong 63928b278a5bSOng Boon Leong __netif_tx_lock(nq, cpu); 63938b278a5bSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 63945337824fSEric Dumazet txq_trans_cond_update(nq); 63958b278a5bSOng Boon Leong 63968b278a5bSOng Boon Leong for (i = 0; i < num_frames; i++) { 63978b278a5bSOng Boon Leong int res; 63988b278a5bSOng Boon Leong 63998b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 64008b278a5bSOng Boon Leong if (res == STMMAC_XDP_CONSUMED) 64018b278a5bSOng Boon Leong break; 64028b278a5bSOng Boon Leong 64038b278a5bSOng Boon Leong nxmit++; 64048b278a5bSOng Boon Leong } 64058b278a5bSOng Boon Leong 64068b278a5bSOng Boon Leong if (flags & XDP_XMIT_FLUSH) { 64078b278a5bSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 64088b278a5bSOng Boon Leong stmmac_tx_timer_arm(priv, queue); 64098b278a5bSOng Boon Leong } 64108b278a5bSOng Boon Leong 64118b278a5bSOng Boon Leong __netif_tx_unlock(nq); 64128b278a5bSOng Boon Leong 64138b278a5bSOng Boon Leong return nxmit; 64148b278a5bSOng Boon Leong } 64158b278a5bSOng Boon Leong 6416bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6417bba2556eSOng Boon Leong { 6418bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6419bba2556eSOng Boon Leong unsigned long flags; 6420bba2556eSOng Boon Leong 6421bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6422bba2556eSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6423bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6424bba2556eSOng Boon Leong 6425bba2556eSOng Boon Leong stmmac_stop_rx_dma(priv, queue); 6426ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6427bba2556eSOng Boon Leong } 6428bba2556eSOng Boon Leong 6429bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6430bba2556eSOng Boon Leong { 64318531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6432bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6433bba2556eSOng Boon Leong unsigned long flags; 6434bba2556eSOng Boon Leong u32 buf_size; 6435bba2556eSOng Boon Leong int ret; 6436bba2556eSOng Boon Leong 6437ba39b344SChristian Marangi ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6438bba2556eSOng Boon Leong if (ret) { 6439bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6440bba2556eSOng Boon Leong return; 6441bba2556eSOng Boon Leong } 6442bba2556eSOng Boon Leong 6443ba39b344SChristian Marangi ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6444bba2556eSOng Boon Leong if (ret) { 6445ba39b344SChristian Marangi __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6446bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to init RX desc.\n"); 6447bba2556eSOng Boon Leong return; 6448bba2556eSOng Boon Leong } 6449bba2556eSOng Boon Leong 6450f9ec5723SChristian Marangi stmmac_reset_rx_queue(priv, queue); 6451ba39b344SChristian Marangi stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6452bba2556eSOng Boon Leong 6453bba2556eSOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6454bba2556eSOng Boon Leong rx_q->dma_rx_phy, rx_q->queue_index); 6455bba2556eSOng Boon Leong 6456bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6457bba2556eSOng Boon Leong sizeof(struct dma_desc)); 6458bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6459bba2556eSOng Boon Leong rx_q->rx_tail_addr, rx_q->queue_index); 6460bba2556eSOng Boon Leong 6461bba2556eSOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6462bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6463bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6464bba2556eSOng Boon Leong buf_size, 6465bba2556eSOng Boon Leong rx_q->queue_index); 6466bba2556eSOng Boon Leong } else { 6467bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 64688531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 6469bba2556eSOng Boon Leong rx_q->queue_index); 6470bba2556eSOng Boon Leong } 6471bba2556eSOng Boon Leong 6472bba2556eSOng Boon Leong stmmac_start_rx_dma(priv, queue); 6473bba2556eSOng Boon Leong 6474bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6475bba2556eSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6476bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6477bba2556eSOng Boon Leong } 6478bba2556eSOng Boon Leong 6479132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6480132c32eeSOng Boon Leong { 6481132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6482132c32eeSOng Boon Leong unsigned long flags; 6483132c32eeSOng Boon Leong 6484132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6485132c32eeSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6486132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6487132c32eeSOng Boon Leong 6488132c32eeSOng Boon Leong stmmac_stop_tx_dma(priv, queue); 6489ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6490132c32eeSOng Boon Leong } 6491132c32eeSOng Boon Leong 6492132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6493132c32eeSOng Boon Leong { 64948531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6495132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6496132c32eeSOng Boon Leong unsigned long flags; 6497132c32eeSOng Boon Leong int ret; 6498132c32eeSOng Boon Leong 6499ba39b344SChristian Marangi ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6500132c32eeSOng Boon Leong if (ret) { 6501132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6502132c32eeSOng Boon Leong return; 6503132c32eeSOng Boon Leong } 6504132c32eeSOng Boon Leong 6505ba39b344SChristian Marangi ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6506132c32eeSOng Boon Leong if (ret) { 6507ba39b344SChristian Marangi __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6508132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to init TX desc.\n"); 6509132c32eeSOng Boon Leong return; 6510132c32eeSOng Boon Leong } 6511132c32eeSOng Boon Leong 6512f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, queue); 6513ba39b344SChristian Marangi stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6514132c32eeSOng Boon Leong 6515132c32eeSOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6516132c32eeSOng Boon Leong tx_q->dma_tx_phy, tx_q->queue_index); 6517132c32eeSOng Boon Leong 6518132c32eeSOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 6519132c32eeSOng Boon Leong stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6520132c32eeSOng Boon Leong 6521132c32eeSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6522132c32eeSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6523132c32eeSOng Boon Leong tx_q->tx_tail_addr, tx_q->queue_index); 6524132c32eeSOng Boon Leong 6525132c32eeSOng Boon Leong stmmac_start_tx_dma(priv, queue); 6526132c32eeSOng Boon Leong 6527132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6528132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6529132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6530132c32eeSOng Boon Leong } 6531132c32eeSOng Boon Leong 6532ac746c85SOng Boon Leong void stmmac_xdp_release(struct net_device *dev) 6533ac746c85SOng Boon Leong { 6534ac746c85SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6535ac746c85SOng Boon Leong u32 chan; 6536ac746c85SOng Boon Leong 6537ac746c85SOng Boon Leong /* Disable NAPI process */ 6538ac746c85SOng Boon Leong stmmac_disable_all_queues(priv); 6539ac746c85SOng Boon Leong 6540ac746c85SOng Boon Leong for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 65418531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6542ac746c85SOng Boon Leong 6543ac746c85SOng Boon Leong /* Free the IRQ lines */ 6544ac746c85SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6545ac746c85SOng Boon Leong 6546ac746c85SOng Boon Leong /* Stop TX/RX DMA channels */ 6547ac746c85SOng Boon Leong stmmac_stop_all_dma(priv); 6548ac746c85SOng Boon Leong 6549ac746c85SOng Boon Leong /* Release and free the Rx/Tx resources */ 6550ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 6551ac746c85SOng Boon Leong 6552ac746c85SOng Boon Leong /* Disable the MAC Rx/Tx */ 6553ac746c85SOng Boon Leong stmmac_mac_set(priv, priv->ioaddr, false); 6554ac746c85SOng Boon Leong 6555ac746c85SOng Boon Leong /* set trans_start so we don't get spurious 6556ac746c85SOng Boon Leong * watchdogs during reset 6557ac746c85SOng Boon Leong */ 6558ac746c85SOng Boon Leong netif_trans_update(dev); 6559ac746c85SOng Boon Leong netif_carrier_off(dev); 6560ac746c85SOng Boon Leong } 6561ac746c85SOng Boon Leong 6562ac746c85SOng Boon Leong int stmmac_xdp_open(struct net_device *dev) 6563ac746c85SOng Boon Leong { 6564ac746c85SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6565ac746c85SOng Boon Leong u32 rx_cnt = priv->plat->rx_queues_to_use; 6566ac746c85SOng Boon Leong u32 tx_cnt = priv->plat->tx_queues_to_use; 6567ac746c85SOng Boon Leong u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6568ac746c85SOng Boon Leong struct stmmac_rx_queue *rx_q; 6569ac746c85SOng Boon Leong struct stmmac_tx_queue *tx_q; 6570ac746c85SOng Boon Leong u32 buf_size; 6571ac746c85SOng Boon Leong bool sph_en; 6572ac746c85SOng Boon Leong u32 chan; 6573ac746c85SOng Boon Leong int ret; 6574ac746c85SOng Boon Leong 6575ba39b344SChristian Marangi ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6576ac746c85SOng Boon Leong if (ret < 0) { 6577ac746c85SOng Boon Leong netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6578ac746c85SOng Boon Leong __func__); 6579ac746c85SOng Boon Leong goto dma_desc_error; 6580ac746c85SOng Boon Leong } 6581ac746c85SOng Boon Leong 6582ba39b344SChristian Marangi ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6583ac746c85SOng Boon Leong if (ret < 0) { 6584ac746c85SOng Boon Leong netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6585ac746c85SOng Boon Leong __func__); 6586ac746c85SOng Boon Leong goto init_error; 6587ac746c85SOng Boon Leong } 6588ac746c85SOng Boon Leong 6589ac746c85SOng Boon Leong /* DMA CSR Channel configuration */ 6590087a7b94SVincent Whitchurch for (chan = 0; chan < dma_csr_ch; chan++) { 6591ac746c85SOng Boon Leong stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6592087a7b94SVincent Whitchurch stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6593087a7b94SVincent Whitchurch } 6594ac746c85SOng Boon Leong 6595ac746c85SOng Boon Leong /* Adjust Split header */ 6596ac746c85SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6597ac746c85SOng Boon Leong 6598ac746c85SOng Boon Leong /* DMA RX Channel Configuration */ 6599ac746c85SOng Boon Leong for (chan = 0; chan < rx_cnt; chan++) { 66008531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[chan]; 6601ac746c85SOng Boon Leong 6602ac746c85SOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6603ac746c85SOng Boon Leong rx_q->dma_rx_phy, chan); 6604ac746c85SOng Boon Leong 6605ac746c85SOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6606ac746c85SOng Boon Leong (rx_q->buf_alloc_num * 6607ac746c85SOng Boon Leong sizeof(struct dma_desc)); 6608ac746c85SOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6609ac746c85SOng Boon Leong rx_q->rx_tail_addr, chan); 6610ac746c85SOng Boon Leong 6611ac746c85SOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6612ac746c85SOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6613ac746c85SOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6614ac746c85SOng Boon Leong buf_size, 6615ac746c85SOng Boon Leong rx_q->queue_index); 6616ac746c85SOng Boon Leong } else { 6617ac746c85SOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 66188531c808SChristian Marangi priv->dma_conf.dma_buf_sz, 6619ac746c85SOng Boon Leong rx_q->queue_index); 6620ac746c85SOng Boon Leong } 6621ac746c85SOng Boon Leong 6622ac746c85SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6623ac746c85SOng Boon Leong } 6624ac746c85SOng Boon Leong 6625ac746c85SOng Boon Leong /* DMA TX Channel Configuration */ 6626ac746c85SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 66278531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[chan]; 6628ac746c85SOng Boon Leong 6629ac746c85SOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6630ac746c85SOng Boon Leong tx_q->dma_tx_phy, chan); 6631ac746c85SOng Boon Leong 6632ac746c85SOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6633ac746c85SOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6634ac746c85SOng Boon Leong tx_q->tx_tail_addr, chan); 663561da6ac7SOng Boon Leong 663661da6ac7SOng Boon Leong hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 663761da6ac7SOng Boon Leong tx_q->txtimer.function = stmmac_tx_timer; 6638ac746c85SOng Boon Leong } 6639ac746c85SOng Boon Leong 6640ac746c85SOng Boon Leong /* Enable the MAC Rx/Tx */ 6641ac746c85SOng Boon Leong stmmac_mac_set(priv, priv->ioaddr, true); 6642ac746c85SOng Boon Leong 6643ac746c85SOng Boon Leong /* Start Rx & Tx DMA Channels */ 6644ac746c85SOng Boon Leong stmmac_start_all_dma(priv); 6645ac746c85SOng Boon Leong 6646ac746c85SOng Boon Leong ret = stmmac_request_irq(dev); 6647ac746c85SOng Boon Leong if (ret) 6648ac746c85SOng Boon Leong goto irq_error; 6649ac746c85SOng Boon Leong 6650ac746c85SOng Boon Leong /* Enable NAPI process*/ 6651ac746c85SOng Boon Leong stmmac_enable_all_queues(priv); 6652ac746c85SOng Boon Leong netif_carrier_on(dev); 6653ac746c85SOng Boon Leong netif_tx_start_all_queues(dev); 6654087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 6655ac746c85SOng Boon Leong 6656ac746c85SOng Boon Leong return 0; 6657ac746c85SOng Boon Leong 6658ac746c85SOng Boon Leong irq_error: 6659ac746c85SOng Boon Leong for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 66608531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6661ac746c85SOng Boon Leong 6662ac746c85SOng Boon Leong stmmac_hw_teardown(dev); 6663ac746c85SOng Boon Leong init_error: 6664ba39b344SChristian Marangi free_dma_desc_resources(priv, &priv->dma_conf); 6665ac746c85SOng Boon Leong dma_desc_error: 6666ac746c85SOng Boon Leong return ret; 6667ac746c85SOng Boon Leong } 6668ac746c85SOng Boon Leong 6669bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6670bba2556eSOng Boon Leong { 6671bba2556eSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6672bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 6673132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q; 6674bba2556eSOng Boon Leong struct stmmac_channel *ch; 6675bba2556eSOng Boon Leong 6676bba2556eSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state) || 6677bba2556eSOng Boon Leong !netif_carrier_ok(priv->dev)) 6678bba2556eSOng Boon Leong return -ENETDOWN; 6679bba2556eSOng Boon Leong 6680bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv)) 6681a817ead4SMaciej Fijalkowski return -EINVAL; 6682bba2556eSOng Boon Leong 6683132c32eeSOng Boon Leong if (queue >= priv->plat->rx_queues_to_use || 6684132c32eeSOng Boon Leong queue >= priv->plat->tx_queues_to_use) 6685bba2556eSOng Boon Leong return -EINVAL; 6686bba2556eSOng Boon Leong 66878531c808SChristian Marangi rx_q = &priv->dma_conf.rx_queue[queue]; 66888531c808SChristian Marangi tx_q = &priv->dma_conf.tx_queue[queue]; 6689bba2556eSOng Boon Leong ch = &priv->channel[queue]; 6690bba2556eSOng Boon Leong 6691132c32eeSOng Boon Leong if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6692a817ead4SMaciej Fijalkowski return -EINVAL; 6693bba2556eSOng Boon Leong 6694132c32eeSOng Boon Leong if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6695bba2556eSOng Boon Leong /* EQoS does not have per-DMA channel SW interrupt, 6696bba2556eSOng Boon Leong * so we schedule RX Napi straight-away. 6697bba2556eSOng Boon Leong */ 6698132c32eeSOng Boon Leong if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6699132c32eeSOng Boon Leong __napi_schedule(&ch->rxtx_napi); 6700bba2556eSOng Boon Leong } 6701bba2556eSOng Boon Leong 6702bba2556eSOng Boon Leong return 0; 6703bba2556eSOng Boon Leong } 6704bba2556eSOng Boon Leong 67057ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = { 67067ac6653aSJeff Kirsher .ndo_open = stmmac_open, 67077ac6653aSJeff Kirsher .ndo_start_xmit = stmmac_xmit, 67087ac6653aSJeff Kirsher .ndo_stop = stmmac_release, 67097ac6653aSJeff Kirsher .ndo_change_mtu = stmmac_change_mtu, 67107ac6653aSJeff Kirsher .ndo_fix_features = stmmac_fix_features, 6711d2afb5bdSGiuseppe CAVALLARO .ndo_set_features = stmmac_set_features, 671201789349SJiri Pirko .ndo_set_rx_mode = stmmac_set_rx_mode, 67137ac6653aSJeff Kirsher .ndo_tx_timeout = stmmac_tx_timeout, 6714a7605370SArnd Bergmann .ndo_eth_ioctl = stmmac_ioctl, 67154dbbe8ddSJose Abreu .ndo_setup_tc = stmmac_setup_tc, 67164993e5b3SJose Abreu .ndo_select_queue = stmmac_select_queue, 67177ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 67187ac6653aSJeff Kirsher .ndo_poll_controller = stmmac_poll_controller, 67197ac6653aSJeff Kirsher #endif 6720a830405eSBhadram Varka .ndo_set_mac_address = stmmac_set_mac_address, 67213cd1cfcbSJose Abreu .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 67223cd1cfcbSJose Abreu .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 67235fabb012SOng Boon Leong .ndo_bpf = stmmac_bpf, 67248b278a5bSOng Boon Leong .ndo_xdp_xmit = stmmac_xdp_xmit, 6725bba2556eSOng Boon Leong .ndo_xsk_wakeup = stmmac_xsk_wakeup, 67267ac6653aSJeff Kirsher }; 67277ac6653aSJeff Kirsher 672834877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv) 672934877a15SJose Abreu { 673034877a15SJose Abreu if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 673134877a15SJose Abreu return; 673234877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 673334877a15SJose Abreu return; 673434877a15SJose Abreu 673534877a15SJose Abreu netdev_err(priv->dev, "Reset adapter.\n"); 673634877a15SJose Abreu 673734877a15SJose Abreu rtnl_lock(); 673834877a15SJose Abreu netif_trans_update(priv->dev); 673934877a15SJose Abreu while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 674034877a15SJose Abreu usleep_range(1000, 2000); 674134877a15SJose Abreu 674234877a15SJose Abreu set_bit(STMMAC_DOWN, &priv->state); 674334877a15SJose Abreu dev_close(priv->dev); 674400f54e68SPetr Machata dev_open(priv->dev, NULL); 674534877a15SJose Abreu clear_bit(STMMAC_DOWN, &priv->state); 674634877a15SJose Abreu clear_bit(STMMAC_RESETING, &priv->state); 674734877a15SJose Abreu rtnl_unlock(); 674834877a15SJose Abreu } 674934877a15SJose Abreu 675034877a15SJose Abreu static void stmmac_service_task(struct work_struct *work) 675134877a15SJose Abreu { 675234877a15SJose Abreu struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 675334877a15SJose Abreu service_task); 675434877a15SJose Abreu 675534877a15SJose Abreu stmmac_reset_subtask(priv); 675634877a15SJose Abreu clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 675734877a15SJose Abreu } 675834877a15SJose Abreu 67597ac6653aSJeff Kirsher /** 6760cf3f047bSGiuseppe CAVALLARO * stmmac_hw_init - Init the MAC device 676132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 6762732fdf0eSGiuseppe CAVALLARO * Description: this function is to configure the MAC device according to 6763732fdf0eSGiuseppe CAVALLARO * some platform parameters or the HW capability register. It prepares the 6764732fdf0eSGiuseppe CAVALLARO * driver to use either ring or chain modes and to setup either enhanced or 6765732fdf0eSGiuseppe CAVALLARO * normal descriptors. 6766cf3f047bSGiuseppe CAVALLARO */ 6767cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv) 6768cf3f047bSGiuseppe CAVALLARO { 67695f0456b4SJose Abreu int ret; 6770cf3f047bSGiuseppe CAVALLARO 67719f93ac8dSLABBE Corentin /* dwmac-sun8i only work in chain mode */ 67729f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) 67739f93ac8dSLABBE Corentin chain_mode = 1; 67745f0456b4SJose Abreu priv->chain_mode = chain_mode; 67759f93ac8dSLABBE Corentin 67765f0456b4SJose Abreu /* Initialize HW Interface */ 67775f0456b4SJose Abreu ret = stmmac_hwif_init(priv); 67785f0456b4SJose Abreu if (ret) 67795f0456b4SJose Abreu return ret; 67804a7d666aSGiuseppe CAVALLARO 6781cf3f047bSGiuseppe CAVALLARO /* Get the HW capability (new GMAC newer than 3.50a) */ 6782cf3f047bSGiuseppe CAVALLARO priv->hw_cap_support = stmmac_get_hw_features(priv); 6783cf3f047bSGiuseppe CAVALLARO if (priv->hw_cap_support) { 678438ddc59dSLABBE Corentin dev_info(priv->device, "DMA HW capability register supported\n"); 6785cf3f047bSGiuseppe CAVALLARO 6786cf3f047bSGiuseppe CAVALLARO /* We can override some gmac/dma configuration fields: e.g. 6787cf3f047bSGiuseppe CAVALLARO * enh_desc, tx_coe (e.g. that are passed through the 6788cf3f047bSGiuseppe CAVALLARO * platform) with the values from the HW capability 6789cf3f047bSGiuseppe CAVALLARO * register (if supported). 6790cf3f047bSGiuseppe CAVALLARO */ 6791cf3f047bSGiuseppe CAVALLARO priv->plat->enh_desc = priv->dma_cap.enh_desc; 67925a9b876eSLing Pei Lee priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 67935a9b876eSLing Pei Lee !priv->plat->use_phy_wol; 67943fe5cadbSGiuseppe CAVALLARO priv->hw->pmt = priv->plat->pmt; 6795b8ef7020SBiao Huang if (priv->dma_cap.hash_tb_sz) { 6796b8ef7020SBiao Huang priv->hw->multicast_filter_bins = 6797b8ef7020SBiao Huang (BIT(priv->dma_cap.hash_tb_sz) << 5); 6798b8ef7020SBiao Huang priv->hw->mcast_bits_log2 = 6799b8ef7020SBiao Huang ilog2(priv->hw->multicast_filter_bins); 6800b8ef7020SBiao Huang } 680138912bdbSDeepak SIKRI 6802a8df35d4SEzequiel Garcia /* TXCOE doesn't work in thresh DMA mode */ 6803a8df35d4SEzequiel Garcia if (priv->plat->force_thresh_dma_mode) 6804a8df35d4SEzequiel Garcia priv->plat->tx_coe = 0; 6805a8df35d4SEzequiel Garcia else 680638912bdbSDeepak SIKRI priv->plat->tx_coe = priv->dma_cap.tx_coe; 6807a8df35d4SEzequiel Garcia 6808f748be53SAlexandre TORGUE /* In case of GMAC4 rx_coe is from HW cap register. */ 6809f748be53SAlexandre TORGUE priv->plat->rx_coe = priv->dma_cap.rx_coe; 681038912bdbSDeepak SIKRI 681138912bdbSDeepak SIKRI if (priv->dma_cap.rx_coe_type2) 681238912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 681338912bdbSDeepak SIKRI else if (priv->dma_cap.rx_coe_type1) 681438912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 681538912bdbSDeepak SIKRI 681638ddc59dSLABBE Corentin } else { 681738ddc59dSLABBE Corentin dev_info(priv->device, "No HW DMA feature register supported\n"); 681838ddc59dSLABBE Corentin } 6819cf3f047bSGiuseppe CAVALLARO 6820d2afb5bdSGiuseppe CAVALLARO if (priv->plat->rx_coe) { 6821d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 682238ddc59dSLABBE Corentin dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 6823f748be53SAlexandre TORGUE if (priv->synopsys_id < DWMAC_CORE_4_00) 682438ddc59dSLABBE Corentin dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 6825d2afb5bdSGiuseppe CAVALLARO } 6826cf3f047bSGiuseppe CAVALLARO if (priv->plat->tx_coe) 682738ddc59dSLABBE Corentin dev_info(priv->device, "TX Checksum insertion supported\n"); 6828cf3f047bSGiuseppe CAVALLARO 6829cf3f047bSGiuseppe CAVALLARO if (priv->plat->pmt) { 683038ddc59dSLABBE Corentin dev_info(priv->device, "Wake-Up On Lan supported\n"); 6831cf3f047bSGiuseppe CAVALLARO device_set_wakeup_capable(priv->device, 1); 6832cf3f047bSGiuseppe CAVALLARO } 6833cf3f047bSGiuseppe CAVALLARO 6834f748be53SAlexandre TORGUE if (priv->dma_cap.tsoen) 683538ddc59dSLABBE Corentin dev_info(priv->device, "TSO supported\n"); 6836f748be53SAlexandre TORGUE 6837e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 6838e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 6839e0f9956aSChuah, Kim Tatt 68407cfde0afSJose Abreu /* Run HW quirks, if any */ 68417cfde0afSJose Abreu if (priv->hwif_quirks) { 68427cfde0afSJose Abreu ret = priv->hwif_quirks(priv); 68437cfde0afSJose Abreu if (ret) 68447cfde0afSJose Abreu return ret; 68457cfde0afSJose Abreu } 68467cfde0afSJose Abreu 68473b509466SJose Abreu /* Rx Watchdog is available in the COREs newer than the 3.40. 68483b509466SJose Abreu * In some case, for example on bugged HW this feature 68493b509466SJose Abreu * has to be disable and this can be done by passing the 68503b509466SJose Abreu * riwt_off field from the platform. 68513b509466SJose Abreu */ 68523b509466SJose Abreu if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 68533b509466SJose Abreu (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 68543b509466SJose Abreu priv->use_riwt = 1; 68553b509466SJose Abreu dev_info(priv->device, 68563b509466SJose Abreu "Enable RX Mitigation via HW Watchdog Timer\n"); 68573b509466SJose Abreu } 68583b509466SJose Abreu 6859c24602efSGiuseppe CAVALLARO return 0; 6860cf3f047bSGiuseppe CAVALLARO } 6861cf3f047bSGiuseppe CAVALLARO 68620366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev) 68630366f7e0SOng Boon Leong { 68640366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 68650366f7e0SOng Boon Leong u32 queue, maxq; 68660366f7e0SOng Boon Leong 68670366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 68680366f7e0SOng Boon Leong 68690366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 68700366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 68710366f7e0SOng Boon Leong 68720366f7e0SOng Boon Leong ch->priv_data = priv; 68730366f7e0SOng Boon Leong ch->index = queue; 68742b94f526SMarek Szyprowski spin_lock_init(&ch->lock); 68750366f7e0SOng Boon Leong 68760366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) { 6877*b48b89f9SJakub Kicinski netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 68780366f7e0SOng Boon Leong } 68790366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) { 688016d083e2SJakub Kicinski netif_napi_add_tx(dev, &ch->tx_napi, 688116d083e2SJakub Kicinski stmmac_napi_poll_tx); 68820366f7e0SOng Boon Leong } 6883132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6884132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6885132c32eeSOng Boon Leong netif_napi_add(dev, &ch->rxtx_napi, 6886*b48b89f9SJakub Kicinski stmmac_napi_poll_rxtx); 6887132c32eeSOng Boon Leong } 68880366f7e0SOng Boon Leong } 68890366f7e0SOng Boon Leong } 68900366f7e0SOng Boon Leong 68910366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev) 68920366f7e0SOng Boon Leong { 68930366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 68940366f7e0SOng Boon Leong u32 queue, maxq; 68950366f7e0SOng Boon Leong 68960366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 68970366f7e0SOng Boon Leong 68980366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 68990366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 69000366f7e0SOng Boon Leong 69010366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) 69020366f7e0SOng Boon Leong netif_napi_del(&ch->rx_napi); 69030366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) 69040366f7e0SOng Boon Leong netif_napi_del(&ch->tx_napi); 6905132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6906132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6907132c32eeSOng Boon Leong netif_napi_del(&ch->rxtx_napi); 6908132c32eeSOng Boon Leong } 69090366f7e0SOng Boon Leong } 69100366f7e0SOng Boon Leong } 69110366f7e0SOng Boon Leong 69120366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 69130366f7e0SOng Boon Leong { 69140366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 69150366f7e0SOng Boon Leong int ret = 0; 69160366f7e0SOng Boon Leong 69170366f7e0SOng Boon Leong if (netif_running(dev)) 69180366f7e0SOng Boon Leong stmmac_release(dev); 69190366f7e0SOng Boon Leong 69200366f7e0SOng Boon Leong stmmac_napi_del(dev); 69210366f7e0SOng Boon Leong 69220366f7e0SOng Boon Leong priv->plat->rx_queues_to_use = rx_cnt; 69230366f7e0SOng Boon Leong priv->plat->tx_queues_to_use = tx_cnt; 69240366f7e0SOng Boon Leong 69250366f7e0SOng Boon Leong stmmac_napi_add(dev); 69260366f7e0SOng Boon Leong 69270366f7e0SOng Boon Leong if (netif_running(dev)) 69280366f7e0SOng Boon Leong ret = stmmac_open(dev); 69290366f7e0SOng Boon Leong 69300366f7e0SOng Boon Leong return ret; 69310366f7e0SOng Boon Leong } 69320366f7e0SOng Boon Leong 6933aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 6934aa042f60SSong, Yoong Siang { 6935aa042f60SSong, Yoong Siang struct stmmac_priv *priv = netdev_priv(dev); 6936aa042f60SSong, Yoong Siang int ret = 0; 6937aa042f60SSong, Yoong Siang 6938aa042f60SSong, Yoong Siang if (netif_running(dev)) 6939aa042f60SSong, Yoong Siang stmmac_release(dev); 6940aa042f60SSong, Yoong Siang 69418531c808SChristian Marangi priv->dma_conf.dma_rx_size = rx_size; 69428531c808SChristian Marangi priv->dma_conf.dma_tx_size = tx_size; 6943aa042f60SSong, Yoong Siang 6944aa042f60SSong, Yoong Siang if (netif_running(dev)) 6945aa042f60SSong, Yoong Siang ret = stmmac_open(dev); 6946aa042f60SSong, Yoong Siang 6947aa042f60SSong, Yoong Siang return ret; 6948aa042f60SSong, Yoong Siang } 6949aa042f60SSong, Yoong Siang 69505a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 69515a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work) 69525a558611SOng Boon Leong { 69535a558611SOng Boon Leong struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 69545a558611SOng Boon Leong fpe_task); 69555a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 69565a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 69575a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 69585a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 69595a558611SOng Boon Leong bool *enable = &fpe_cfg->enable; 69605a558611SOng Boon Leong int retries = 20; 69615a558611SOng Boon Leong 69625a558611SOng Boon Leong while (retries-- > 0) { 69635a558611SOng Boon Leong /* Bail out immediately if FPE handshake is OFF */ 69645a558611SOng Boon Leong if (*lo_state == FPE_STATE_OFF || !*hs_enable) 69655a558611SOng Boon Leong break; 69665a558611SOng Boon Leong 69675a558611SOng Boon Leong if (*lo_state == FPE_STATE_ENTERING_ON && 69685a558611SOng Boon Leong *lp_state == FPE_STATE_ENTERING_ON) { 69695a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 69705a558611SOng Boon Leong priv->plat->tx_queues_to_use, 69715a558611SOng Boon Leong priv->plat->rx_queues_to_use, 69725a558611SOng Boon Leong *enable); 69735a558611SOng Boon Leong 69745a558611SOng Boon Leong netdev_info(priv->dev, "configured FPE\n"); 69755a558611SOng Boon Leong 69765a558611SOng Boon Leong *lo_state = FPE_STATE_ON; 69775a558611SOng Boon Leong *lp_state = FPE_STATE_ON; 69785a558611SOng Boon Leong netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 69795a558611SOng Boon Leong break; 69805a558611SOng Boon Leong } 69815a558611SOng Boon Leong 69825a558611SOng Boon Leong if ((*lo_state == FPE_STATE_CAPABLE || 69835a558611SOng Boon Leong *lo_state == FPE_STATE_ENTERING_ON) && 69845a558611SOng Boon Leong *lp_state != FPE_STATE_ON) { 69855a558611SOng Boon Leong netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 69865a558611SOng Boon Leong *lo_state, *lp_state); 69875a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 69885a558611SOng Boon Leong MPACKET_VERIFY); 69895a558611SOng Boon Leong } 69905a558611SOng Boon Leong /* Sleep then retry */ 69915a558611SOng Boon Leong msleep(500); 69925a558611SOng Boon Leong } 69935a558611SOng Boon Leong 69945a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 69955a558611SOng Boon Leong } 69965a558611SOng Boon Leong 69975a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 69985a558611SOng Boon Leong { 69995a558611SOng Boon Leong if (priv->plat->fpe_cfg->hs_enable != enable) { 70005a558611SOng Boon Leong if (enable) { 70015a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 70025a558611SOng Boon Leong MPACKET_VERIFY); 70035a558611SOng Boon Leong } else { 70045a558611SOng Boon Leong priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 70055a558611SOng Boon Leong priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 70065a558611SOng Boon Leong } 70075a558611SOng Boon Leong 70085a558611SOng Boon Leong priv->plat->fpe_cfg->hs_enable = enable; 70095a558611SOng Boon Leong } 70105a558611SOng Boon Leong } 70115a558611SOng Boon Leong 7012cf3f047bSGiuseppe CAVALLARO /** 7013bfab27a1SGiuseppe CAVALLARO * stmmac_dvr_probe 7014bfab27a1SGiuseppe CAVALLARO * @device: device pointer 7015ff3dd78cSGiuseppe CAVALLARO * @plat_dat: platform data pointer 7016e56788cfSJoachim Eastwood * @res: stmmac resource pointer 7017bfab27a1SGiuseppe CAVALLARO * Description: this is the main probe function used to 7018bfab27a1SGiuseppe CAVALLARO * call the alloc_etherdev, allocate the priv structure. 70199afec6efSAndy Shevchenko * Return: 702015ffac73SJoachim Eastwood * returns 0 on success, otherwise errno. 70217ac6653aSJeff Kirsher */ 702215ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device, 7023cf3f047bSGiuseppe CAVALLARO struct plat_stmmacenet_data *plat_dat, 7024e56788cfSJoachim Eastwood struct stmmac_resources *res) 70257ac6653aSJeff Kirsher { 7026bfab27a1SGiuseppe CAVALLARO struct net_device *ndev = NULL; 7027bfab27a1SGiuseppe CAVALLARO struct stmmac_priv *priv; 70280366f7e0SOng Boon Leong u32 rxq; 702976067459SJose Abreu int i, ret = 0; 70307ac6653aSJeff Kirsher 70319737070cSJisheng Zhang ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 70329737070cSJisheng Zhang MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 703341de8d4cSJoe Perches if (!ndev) 703415ffac73SJoachim Eastwood return -ENOMEM; 70357ac6653aSJeff Kirsher 7036bfab27a1SGiuseppe CAVALLARO SET_NETDEV_DEV(ndev, device); 70377ac6653aSJeff Kirsher 7038bfab27a1SGiuseppe CAVALLARO priv = netdev_priv(ndev); 7039bfab27a1SGiuseppe CAVALLARO priv->device = device; 7040bfab27a1SGiuseppe CAVALLARO priv->dev = ndev; 7041bfab27a1SGiuseppe CAVALLARO 7042bfab27a1SGiuseppe CAVALLARO stmmac_set_ethtool_ops(ndev); 7043cf3f047bSGiuseppe CAVALLARO priv->pause = pause; 7044cf3f047bSGiuseppe CAVALLARO priv->plat = plat_dat; 7045e56788cfSJoachim Eastwood priv->ioaddr = res->addr; 7046e56788cfSJoachim Eastwood priv->dev->base_addr = (unsigned long)res->addr; 70476ccf12aeSWong, Vee Khee priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 7048e56788cfSJoachim Eastwood 7049e56788cfSJoachim Eastwood priv->dev->irq = res->irq; 7050e56788cfSJoachim Eastwood priv->wol_irq = res->wol_irq; 7051e56788cfSJoachim Eastwood priv->lpi_irq = res->lpi_irq; 70528532f613SOng Boon Leong priv->sfty_ce_irq = res->sfty_ce_irq; 70538532f613SOng Boon Leong priv->sfty_ue_irq = res->sfty_ue_irq; 70548532f613SOng Boon Leong for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 70558532f613SOng Boon Leong priv->rx_irq[i] = res->rx_irq[i]; 70568532f613SOng Boon Leong for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 70578532f613SOng Boon Leong priv->tx_irq[i] = res->tx_irq[i]; 7058e56788cfSJoachim Eastwood 705983216e39SMichael Walle if (!is_zero_ether_addr(res->mac)) 7060a96d317fSJakub Kicinski eth_hw_addr_set(priv->dev, res->mac); 7061bfab27a1SGiuseppe CAVALLARO 7062a7a62685SJoachim Eastwood dev_set_drvdata(device, priv->dev); 7063803f8fc4SJoachim Eastwood 7064cf3f047bSGiuseppe CAVALLARO /* Verify driver arguments */ 7065cf3f047bSGiuseppe CAVALLARO stmmac_verify_args(); 7066cf3f047bSGiuseppe CAVALLARO 7067bba2556eSOng Boon Leong priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7068bba2556eSOng Boon Leong if (!priv->af_xdp_zc_qps) 7069bba2556eSOng Boon Leong return -ENOMEM; 7070bba2556eSOng Boon Leong 707134877a15SJose Abreu /* Allocate workqueue */ 707234877a15SJose Abreu priv->wq = create_singlethread_workqueue("stmmac_wq"); 707334877a15SJose Abreu if (!priv->wq) { 707434877a15SJose Abreu dev_err(priv->device, "failed to create workqueue\n"); 70759737070cSJisheng Zhang return -ENOMEM; 707634877a15SJose Abreu } 707734877a15SJose Abreu 707834877a15SJose Abreu INIT_WORK(&priv->service_task, stmmac_service_task); 707934877a15SJose Abreu 70805a558611SOng Boon Leong /* Initialize Link Partner FPE workqueue */ 70815a558611SOng Boon Leong INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 70825a558611SOng Boon Leong 7083cf3f047bSGiuseppe CAVALLARO /* Override with kernel parameters if supplied XXX CRS XXX 7084ceb69499SGiuseppe CAVALLARO * this needs to have multiple instances 7085ceb69499SGiuseppe CAVALLARO */ 7086cf3f047bSGiuseppe CAVALLARO if ((phyaddr >= 0) && (phyaddr <= 31)) 7087cf3f047bSGiuseppe CAVALLARO priv->plat->phy_addr = phyaddr; 7088cf3f047bSGiuseppe CAVALLARO 708990f522a2SEugeniy Paltsev if (priv->plat->stmmac_rst) { 709090f522a2SEugeniy Paltsev ret = reset_control_assert(priv->plat->stmmac_rst); 7091f573c0b9Sjpinto reset_control_deassert(priv->plat->stmmac_rst); 709290f522a2SEugeniy Paltsev /* Some reset controllers have only reset callback instead of 709390f522a2SEugeniy Paltsev * assert + deassert callbacks pair. 709490f522a2SEugeniy Paltsev */ 709590f522a2SEugeniy Paltsev if (ret == -ENOTSUPP) 709690f522a2SEugeniy Paltsev reset_control_reset(priv->plat->stmmac_rst); 709790f522a2SEugeniy Paltsev } 7098c5e4ddbdSChen-Yu Tsai 7099e67f325eSMatthew Hagan ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7100e67f325eSMatthew Hagan if (ret == -ENOTSUPP) 7101e67f325eSMatthew Hagan dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7102e67f325eSMatthew Hagan ERR_PTR(ret)); 7103e67f325eSMatthew Hagan 7104cf3f047bSGiuseppe CAVALLARO /* Init MAC and get the capabilities */ 7105c24602efSGiuseppe CAVALLARO ret = stmmac_hw_init(priv); 7106c24602efSGiuseppe CAVALLARO if (ret) 710762866e98SChen-Yu Tsai goto error_hw_init; 7108cf3f047bSGiuseppe CAVALLARO 710996874c61SMohammad Athari Bin Ismail /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 711096874c61SMohammad Athari Bin Ismail */ 711196874c61SMohammad Athari Bin Ismail if (priv->synopsys_id < DWMAC_CORE_5_20) 711296874c61SMohammad Athari Bin Ismail priv->plat->dma_cfg->dche = false; 711396874c61SMohammad Athari Bin Ismail 7114b561af36SVinod Koul stmmac_check_ether_addr(priv); 7115b561af36SVinod Koul 7116cf3f047bSGiuseppe CAVALLARO ndev->netdev_ops = &stmmac_netdev_ops; 7117cf3f047bSGiuseppe CAVALLARO 7118cf3f047bSGiuseppe CAVALLARO ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7119cf3f047bSGiuseppe CAVALLARO NETIF_F_RXCSUM; 7120f748be53SAlexandre TORGUE 71214dbbe8ddSJose Abreu ret = stmmac_tc_init(priv, priv); 71224dbbe8ddSJose Abreu if (!ret) { 71234dbbe8ddSJose Abreu ndev->hw_features |= NETIF_F_HW_TC; 71244dbbe8ddSJose Abreu } 71254dbbe8ddSJose Abreu 7126f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 71279edfa7daSNiklas Cassel ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7128b7766206SJose Abreu if (priv->plat->has_gmac4) 7129b7766206SJose Abreu ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7130f748be53SAlexandre TORGUE priv->tso = true; 713138ddc59dSLABBE Corentin dev_info(priv->device, "TSO feature enabled\n"); 7132f748be53SAlexandre TORGUE } 7133a993db88SJose Abreu 713447f753c1STan Tee Min if (priv->dma_cap.sphen && !priv->plat->sph_disable) { 713567afd6d1SJose Abreu ndev->hw_features |= NETIF_F_GRO; 7136d08d32d1SOng Boon Leong priv->sph_cap = true; 7137d08d32d1SOng Boon Leong priv->sph = priv->sph_cap; 713867afd6d1SJose Abreu dev_info(priv->device, "SPH feature enabled\n"); 713967afd6d1SJose Abreu } 714067afd6d1SJose Abreu 7141f119cc98SFugang Duan /* The current IP register MAC_HW_Feature1[ADDR64] only define 7142f119cc98SFugang Duan * 32/40/64 bit width, but some SOC support others like i.MX8MP 7143f119cc98SFugang Duan * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 7144f119cc98SFugang Duan * So overwrite dma_cap.addr64 according to HW real design. 7145f119cc98SFugang Duan */ 7146f119cc98SFugang Duan if (priv->plat->addr64) 7147f119cc98SFugang Duan priv->dma_cap.addr64 = priv->plat->addr64; 7148f119cc98SFugang Duan 7149a993db88SJose Abreu if (priv->dma_cap.addr64) { 7150a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, 7151a993db88SJose Abreu DMA_BIT_MASK(priv->dma_cap.addr64)); 7152a993db88SJose Abreu if (!ret) { 7153a993db88SJose Abreu dev_info(priv->device, "Using %d bits DMA width\n", 7154a993db88SJose Abreu priv->dma_cap.addr64); 7155968a2978SThierry Reding 7156968a2978SThierry Reding /* 7157968a2978SThierry Reding * If more than 32 bits can be addressed, make sure to 7158968a2978SThierry Reding * enable enhanced addressing mode. 7159968a2978SThierry Reding */ 7160968a2978SThierry Reding if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7161968a2978SThierry Reding priv->plat->dma_cfg->eame = true; 7162a993db88SJose Abreu } else { 7163a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7164a993db88SJose Abreu if (ret) { 7165a993db88SJose Abreu dev_err(priv->device, "Failed to set DMA Mask\n"); 7166a993db88SJose Abreu goto error_hw_init; 7167a993db88SJose Abreu } 7168a993db88SJose Abreu 7169a993db88SJose Abreu priv->dma_cap.addr64 = 32; 7170a993db88SJose Abreu } 7171a993db88SJose Abreu } 7172a993db88SJose Abreu 7173bfab27a1SGiuseppe CAVALLARO ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7174bfab27a1SGiuseppe CAVALLARO ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 71757ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED 71767ac6653aSJeff Kirsher /* Both mac100 and gmac support receive VLAN tag detection */ 7177ab188e8fSElad Nachman ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 71783cd1cfcbSJose Abreu if (priv->dma_cap.vlhash) { 71793cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 71803cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 71813cd1cfcbSJose Abreu } 718230d93227SJose Abreu if (priv->dma_cap.vlins) { 718330d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 718430d93227SJose Abreu if (priv->dma_cap.dvlan) 718530d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 718630d93227SJose Abreu } 71877ac6653aSJeff Kirsher #endif 71887ac6653aSJeff Kirsher priv->msg_enable = netif_msg_init(debug, default_msg_level); 71897ac6653aSJeff Kirsher 719076067459SJose Abreu /* Initialize RSS */ 719176067459SJose Abreu rxq = priv->plat->rx_queues_to_use; 719276067459SJose Abreu netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 719376067459SJose Abreu for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 719476067459SJose Abreu priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 719576067459SJose Abreu 719676067459SJose Abreu if (priv->dma_cap.rssen && priv->plat->rss_en) 719776067459SJose Abreu ndev->features |= NETIF_F_RXHASH; 719876067459SJose Abreu 719944770e11SJarod Wilson /* MTU range: 46 - hw-specific max */ 720044770e11SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 720156bcd591SJose Abreu if (priv->plat->has_xgmac) 72027d9e6c5aSJose Abreu ndev->max_mtu = XGMAC_JUMBO_LEN; 720356bcd591SJose Abreu else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 720456bcd591SJose Abreu ndev->max_mtu = JUMBO_LEN; 720544770e11SJarod Wilson else 720644770e11SJarod Wilson ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7207a2cd64f3SKweh, Hock Leong /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7208a2cd64f3SKweh, Hock Leong * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7209a2cd64f3SKweh, Hock Leong */ 7210a2cd64f3SKweh, Hock Leong if ((priv->plat->maxmtu < ndev->max_mtu) && 7211a2cd64f3SKweh, Hock Leong (priv->plat->maxmtu >= ndev->min_mtu)) 721244770e11SJarod Wilson ndev->max_mtu = priv->plat->maxmtu; 7213a2cd64f3SKweh, Hock Leong else if (priv->plat->maxmtu < ndev->min_mtu) 7214b618ab45SHeiner Kallweit dev_warn(priv->device, 7215a2cd64f3SKweh, Hock Leong "%s: warning: maxmtu having invalid value (%d)\n", 7216a2cd64f3SKweh, Hock Leong __func__, priv->plat->maxmtu); 721744770e11SJarod Wilson 72187ac6653aSJeff Kirsher if (flow_ctrl) 72197ac6653aSJeff Kirsher priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 72207ac6653aSJeff Kirsher 72218fce3331SJose Abreu /* Setup channels NAPI */ 72220366f7e0SOng Boon Leong stmmac_napi_add(ndev); 72237ac6653aSJeff Kirsher 722429555fa3SThierry Reding mutex_init(&priv->lock); 72257ac6653aSJeff Kirsher 7226cd7201f4SGiuseppe CAVALLARO /* If a specific clk_csr value is passed from the platform 7227cd7201f4SGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 7228cd7201f4SGiuseppe CAVALLARO * changed at run-time and it is fixed. Viceversa the driver'll try to 7229cd7201f4SGiuseppe CAVALLARO * set the MDC clock dynamically according to the csr actual 7230cd7201f4SGiuseppe CAVALLARO * clock input. 7231cd7201f4SGiuseppe CAVALLARO */ 72325e7f7fc5SBiao Huang if (priv->plat->clk_csr >= 0) 7233cd7201f4SGiuseppe CAVALLARO priv->clk_csr = priv->plat->clk_csr; 72345e7f7fc5SBiao Huang else 72355e7f7fc5SBiao Huang stmmac_clk_csr_set(priv); 7236cd7201f4SGiuseppe CAVALLARO 7237e58bb43fSGiuseppe CAVALLARO stmmac_check_pcs_mode(priv); 7238e58bb43fSGiuseppe CAVALLARO 72395ec55823SJoakim Zhang pm_runtime_get_noresume(device); 72405ec55823SJoakim Zhang pm_runtime_set_active(device); 7241d90d0c17SKai-Heng Feng if (!pm_runtime_enabled(device)) 72425ec55823SJoakim Zhang pm_runtime_enable(device); 72435ec55823SJoakim Zhang 7244a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 72453fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) { 72464bfcbd7aSFrancesco Virlinzi /* MDIO bus Registration */ 72474bfcbd7aSFrancesco Virlinzi ret = stmmac_mdio_register(ndev); 72484bfcbd7aSFrancesco Virlinzi if (ret < 0) { 7249839612d2SRasmus Villemoes dev_err_probe(priv->device, ret, 7250839612d2SRasmus Villemoes "%s: MDIO bus (id: %d) registration failed\n", 72514bfcbd7aSFrancesco Virlinzi __func__, priv->plat->bus_id); 72526a81c26fSViresh Kumar goto error_mdio_register; 72534bfcbd7aSFrancesco Virlinzi } 7254e58bb43fSGiuseppe CAVALLARO } 72554bfcbd7aSFrancesco Virlinzi 725646682cb8SVoon Weifeng if (priv->plat->speed_mode_2500) 725746682cb8SVoon Weifeng priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 725846682cb8SVoon Weifeng 72597413f9a6SVladimir Oltean if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7260597a68ceSVoon Weifeng ret = stmmac_xpcs_setup(priv->mii); 7261597a68ceSVoon Weifeng if (ret) 7262597a68ceSVoon Weifeng goto error_xpcs_setup; 7263597a68ceSVoon Weifeng } 7264597a68ceSVoon Weifeng 726574371272SJose Abreu ret = stmmac_phy_setup(priv); 726674371272SJose Abreu if (ret) { 726774371272SJose Abreu netdev_err(ndev, "failed to setup phy (%d)\n", ret); 726874371272SJose Abreu goto error_phy_setup; 726974371272SJose Abreu } 727074371272SJose Abreu 727157016590SFlorian Fainelli ret = register_netdev(ndev); 7272b2eb09afSFlorian Fainelli if (ret) { 7273b618ab45SHeiner Kallweit dev_err(priv->device, "%s: ERROR %i registering the device\n", 727457016590SFlorian Fainelli __func__, ret); 7275b2eb09afSFlorian Fainelli goto error_netdev_register; 7276b2eb09afSFlorian Fainelli } 72777ac6653aSJeff Kirsher 7278b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7279b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7280b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7281b9663b7cSVoon Weifeng 7282b9663b7cSVoon Weifeng if (ret < 0) 7283801eb050SAndy Shevchenko goto error_serdes_powerup; 7284b9663b7cSVoon Weifeng } 7285b9663b7cSVoon Weifeng 72865f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS 72878d72ab11SGreg Kroah-Hartman stmmac_init_fs(ndev); 72885f2b8b62SThierry Reding #endif 72895f2b8b62SThierry Reding 72904047b9dbSBhupesh Sharma if (priv->plat->dump_debug_regs) 72914047b9dbSBhupesh Sharma priv->plat->dump_debug_regs(priv->plat->bsp_priv); 72924047b9dbSBhupesh Sharma 72935ec55823SJoakim Zhang /* Let pm_runtime_put() disable the clocks. 72945ec55823SJoakim Zhang * If CONFIG_PM is not enabled, the clocks will stay powered. 72955ec55823SJoakim Zhang */ 72965ec55823SJoakim Zhang pm_runtime_put(device); 72975ec55823SJoakim Zhang 729857016590SFlorian Fainelli return ret; 72997ac6653aSJeff Kirsher 7300801eb050SAndy Shevchenko error_serdes_powerup: 7301801eb050SAndy Shevchenko unregister_netdev(ndev); 73026a81c26fSViresh Kumar error_netdev_register: 730374371272SJose Abreu phylink_destroy(priv->phylink); 7304597a68ceSVoon Weifeng error_xpcs_setup: 730574371272SJose Abreu error_phy_setup: 7306a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 7307b2eb09afSFlorian Fainelli priv->hw->pcs != STMMAC_PCS_RTBI) 7308b2eb09afSFlorian Fainelli stmmac_mdio_unregister(ndev); 73097ac6653aSJeff Kirsher error_mdio_register: 73100366f7e0SOng Boon Leong stmmac_napi_del(ndev); 731162866e98SChen-Yu Tsai error_hw_init: 731234877a15SJose Abreu destroy_workqueue(priv->wq); 7313d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 73147ac6653aSJeff Kirsher 731515ffac73SJoachim Eastwood return ret; 73167ac6653aSJeff Kirsher } 7317b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 73187ac6653aSJeff Kirsher 73197ac6653aSJeff Kirsher /** 73207ac6653aSJeff Kirsher * stmmac_dvr_remove 7321f4e7bd81SJoachim Eastwood * @dev: device pointer 73227ac6653aSJeff Kirsher * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7323bfab27a1SGiuseppe CAVALLARO * changes the link status, releases the DMA descriptor rings. 73247ac6653aSJeff Kirsher */ 7325f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev) 73267ac6653aSJeff Kirsher { 7327f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 73287ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 73297ac6653aSJeff Kirsher 733038ddc59dSLABBE Corentin netdev_info(priv->dev, "%s: removing driver", __func__); 73317ac6653aSJeff Kirsher 733264495203SJisheng Zhang pm_runtime_get_sync(dev); 733364495203SJisheng Zhang 7334ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7335c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 73367ac6653aSJeff Kirsher netif_carrier_off(ndev); 73377ac6653aSJeff Kirsher unregister_netdev(ndev); 73389a7b3950SOng Boon Leong 73399a7b3950SOng Boon Leong /* Serdes power down needs to happen after VLAN filter 73409a7b3950SOng Boon Leong * is deleted that is triggered by unregister_netdev(). 73419a7b3950SOng Boon Leong */ 73429a7b3950SOng Boon Leong if (priv->plat->serdes_powerdown) 73439a7b3950SOng Boon Leong priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 73449a7b3950SOng Boon Leong 7345474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS 7346474a31e1SAaro Koskinen stmmac_exit_fs(ndev); 7347474a31e1SAaro Koskinen #endif 734874371272SJose Abreu phylink_destroy(priv->phylink); 7349f573c0b9Sjpinto if (priv->plat->stmmac_rst) 7350f573c0b9Sjpinto reset_control_assert(priv->plat->stmmac_rst); 7351e67f325eSMatthew Hagan reset_control_assert(priv->plat->stmmac_ahb_rst); 7352a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 73533fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) 7354e743471fSBryan O'Donoghue stmmac_mdio_unregister(ndev); 735534877a15SJose Abreu destroy_workqueue(priv->wq); 735629555fa3SThierry Reding mutex_destroy(&priv->lock); 7357d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 73587ac6653aSJeff Kirsher 73590d9a1591SBiao Huang pm_runtime_disable(dev); 73600d9a1591SBiao Huang pm_runtime_put_noidle(dev); 73610d9a1591SBiao Huang 73627ac6653aSJeff Kirsher return 0; 73637ac6653aSJeff Kirsher } 7364b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 73657ac6653aSJeff Kirsher 7366732fdf0eSGiuseppe CAVALLARO /** 7367732fdf0eSGiuseppe CAVALLARO * stmmac_suspend - suspend callback 7368f4e7bd81SJoachim Eastwood * @dev: device pointer 7369732fdf0eSGiuseppe CAVALLARO * Description: this is the function to suspend the device and it is called 7370732fdf0eSGiuseppe CAVALLARO * by the platform driver to stop the network queue, release the resources, 7371732fdf0eSGiuseppe CAVALLARO * program the PMT register (for WoL), clean and release driver resources. 7372732fdf0eSGiuseppe CAVALLARO */ 7373f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev) 73747ac6653aSJeff Kirsher { 7375f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 73767ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 737714b41a29SNicolin Chen u32 chan; 73787ac6653aSJeff Kirsher 73797ac6653aSJeff Kirsher if (!ndev || !netif_running(ndev)) 73807ac6653aSJeff Kirsher return 0; 73817ac6653aSJeff Kirsher 7382134cc4ceSThierry Reding mutex_lock(&priv->lock); 738319e13cb2SJose Abreu 73847ac6653aSJeff Kirsher netif_device_detach(ndev); 73857ac6653aSJeff Kirsher 7386c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 73877ac6653aSJeff Kirsher 738814b41a29SNicolin Chen for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 73898531c808SChristian Marangi hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 739014b41a29SNicolin Chen 73915f585913SFugang Duan if (priv->eee_enabled) { 73925f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 73935f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 73945f585913SFugang Duan } 73955f585913SFugang Duan 73967ac6653aSJeff Kirsher /* Stop TX/RX DMA */ 7397ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7398c24602efSGiuseppe CAVALLARO 7399b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 7400b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7401b9663b7cSVoon Weifeng 74027ac6653aSJeff Kirsher /* Enable Power down mode by programming the PMT regs */ 7403e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7404c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, priv->wolopts); 740589f7f2cfSSrinivas Kandagatla priv->irq_wake = 1; 740689f7f2cfSSrinivas Kandagatla } else { 7407c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 7408db88f10aSSrinivas Kandagatla pinctrl_pm_select_sleep_state(priv->device); 740930f347aeSYang Yingliang } 74105a558611SOng Boon Leong 741129555fa3SThierry Reding mutex_unlock(&priv->lock); 74122d871aa0SVince Bridgers 741390702dcdSJoakim Zhang rtnl_lock(); 741490702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 741590702dcdSJoakim Zhang phylink_suspend(priv->phylink, true); 741690702dcdSJoakim Zhang } else { 741790702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 741890702dcdSJoakim Zhang phylink_speed_down(priv->phylink, false); 741990702dcdSJoakim Zhang phylink_suspend(priv->phylink, false); 742090702dcdSJoakim Zhang } 742190702dcdSJoakim Zhang rtnl_unlock(); 742290702dcdSJoakim Zhang 74235a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 74245a558611SOng Boon Leong /* Disable FPE */ 74255a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 74265a558611SOng Boon Leong priv->plat->tx_queues_to_use, 74275a558611SOng Boon Leong priv->plat->rx_queues_to_use, false); 74285a558611SOng Boon Leong 74295a558611SOng Boon Leong stmmac_fpe_handshake(priv, false); 74306b28a86dSMohammad Athari Bin Ismail stmmac_fpe_stop_wq(priv); 74315a558611SOng Boon Leong } 74325a558611SOng Boon Leong 7433bd00632cSLABBE Corentin priv->speed = SPEED_UNKNOWN; 74347ac6653aSJeff Kirsher return 0; 74357ac6653aSJeff Kirsher } 7436b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend); 74377ac6653aSJeff Kirsher 7438f9ec5723SChristian Marangi static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7439f9ec5723SChristian Marangi { 74408531c808SChristian Marangi struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7441f9ec5723SChristian Marangi 7442f9ec5723SChristian Marangi rx_q->cur_rx = 0; 7443f9ec5723SChristian Marangi rx_q->dirty_rx = 0; 7444f9ec5723SChristian Marangi } 7445f9ec5723SChristian Marangi 7446f9ec5723SChristian Marangi static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7447f9ec5723SChristian Marangi { 74488531c808SChristian Marangi struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7449f9ec5723SChristian Marangi 7450f9ec5723SChristian Marangi tx_q->cur_tx = 0; 7451f9ec5723SChristian Marangi tx_q->dirty_tx = 0; 7452f9ec5723SChristian Marangi tx_q->mss = 0; 7453f9ec5723SChristian Marangi 7454f9ec5723SChristian Marangi netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7455f9ec5723SChristian Marangi } 7456f9ec5723SChristian Marangi 7457732fdf0eSGiuseppe CAVALLARO /** 745854139cf3SJoao Pinto * stmmac_reset_queues_param - reset queue parameters 7459d0ea5cbdSJesse Brandeburg * @priv: device pointer 746054139cf3SJoao Pinto */ 746154139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv) 746254139cf3SJoao Pinto { 746354139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 7464ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 746554139cf3SJoao Pinto u32 queue; 746654139cf3SJoao Pinto 7467f9ec5723SChristian Marangi for (queue = 0; queue < rx_cnt; queue++) 7468f9ec5723SChristian Marangi stmmac_reset_rx_queue(priv, queue); 746954139cf3SJoao Pinto 7470f9ec5723SChristian Marangi for (queue = 0; queue < tx_cnt; queue++) 7471f9ec5723SChristian Marangi stmmac_reset_tx_queue(priv, queue); 747254139cf3SJoao Pinto } 747354139cf3SJoao Pinto 747454139cf3SJoao Pinto /** 7475732fdf0eSGiuseppe CAVALLARO * stmmac_resume - resume callback 7476f4e7bd81SJoachim Eastwood * @dev: device pointer 7477732fdf0eSGiuseppe CAVALLARO * Description: when resume this function is invoked to setup the DMA and CORE 7478732fdf0eSGiuseppe CAVALLARO * in a usable state. 7479732fdf0eSGiuseppe CAVALLARO */ 7480f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev) 74817ac6653aSJeff Kirsher { 7482f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 74837ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 7484b9663b7cSVoon Weifeng int ret; 74857ac6653aSJeff Kirsher 74867ac6653aSJeff Kirsher if (!netif_running(ndev)) 74877ac6653aSJeff Kirsher return 0; 74887ac6653aSJeff Kirsher 74897ac6653aSJeff Kirsher /* Power Down bit, into the PM register, is cleared 74907ac6653aSJeff Kirsher * automatically as soon as a magic packet or a Wake-up frame 74917ac6653aSJeff Kirsher * is received. Anyway, it's better to manually clear 74927ac6653aSJeff Kirsher * this bit because it can generate problems while resuming 7493ceb69499SGiuseppe CAVALLARO * from another devices (e.g. serial console). 7494ceb69499SGiuseppe CAVALLARO */ 7495e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 749629555fa3SThierry Reding mutex_lock(&priv->lock); 7497c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, 0); 749829555fa3SThierry Reding mutex_unlock(&priv->lock); 749989f7f2cfSSrinivas Kandagatla priv->irq_wake = 0; 7500623997fbSSrinivas Kandagatla } else { 7501db88f10aSSrinivas Kandagatla pinctrl_pm_select_default_state(priv->device); 7502623997fbSSrinivas Kandagatla /* reset the phy so that it's ready */ 7503623997fbSSrinivas Kandagatla if (priv->mii) 7504623997fbSSrinivas Kandagatla stmmac_mdio_reset(priv->mii); 7505623997fbSSrinivas Kandagatla } 75067ac6653aSJeff Kirsher 7507b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7508b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7509b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7510b9663b7cSVoon Weifeng 7511b9663b7cSVoon Weifeng if (ret < 0) 7512b9663b7cSVoon Weifeng return ret; 7513b9663b7cSVoon Weifeng } 7514b9663b7cSVoon Weifeng 751536d18b56SFugang Duan rtnl_lock(); 751690702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 751790702dcdSJoakim Zhang phylink_resume(priv->phylink); 751890702dcdSJoakim Zhang } else { 751990702dcdSJoakim Zhang phylink_resume(priv->phylink); 752090702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 752136d18b56SFugang Duan phylink_speed_up(priv->phylink); 752236d18b56SFugang Duan } 752390702dcdSJoakim Zhang rtnl_unlock(); 752436d18b56SFugang Duan 75258e5debedSWong Vee Khee rtnl_lock(); 752629555fa3SThierry Reding mutex_lock(&priv->lock); 7527f55d84b0SVincent Palatin 752854139cf3SJoao Pinto stmmac_reset_queues_param(priv); 752900423969SThierry Reding 75304ec236c7SFugang Duan stmmac_free_tx_skbufs(priv); 7531ba39b344SChristian Marangi stmmac_clear_descriptors(priv, &priv->dma_conf); 7532ae79a639SGiuseppe CAVALLARO 7533fe131929SHuacai Chen stmmac_hw_setup(ndev, false); 7534d429b66eSJose Abreu stmmac_init_coalesce(priv); 7535ac316c78SGiuseppe CAVALLARO stmmac_set_rx_mode(ndev); 75367ac6653aSJeff Kirsher 7537ed64639bSWong Vee Khee stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7538ed64639bSWong Vee Khee 7539c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 7540087a7b94SVincent Whitchurch stmmac_enable_all_dma_irq(priv); 75417ac6653aSJeff Kirsher 7542134cc4ceSThierry Reding mutex_unlock(&priv->lock); 75438e5debedSWong Vee Khee rtnl_unlock(); 7544134cc4ceSThierry Reding 754531096c3eSLeon Yu netif_device_attach(ndev); 754631096c3eSLeon Yu 75477ac6653aSJeff Kirsher return 0; 75487ac6653aSJeff Kirsher } 7549b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume); 7550ba27ec66SGiuseppe CAVALLARO 75517ac6653aSJeff Kirsher #ifndef MODULE 75527ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str) 75537ac6653aSJeff Kirsher { 75547ac6653aSJeff Kirsher char *opt; 75557ac6653aSJeff Kirsher 75567ac6653aSJeff Kirsher if (!str || !*str) 7557e01b042eSRandy Dunlap return 1; 75587ac6653aSJeff Kirsher while ((opt = strsep(&str, ",")) != NULL) { 75597ac6653aSJeff Kirsher if (!strncmp(opt, "debug:", 6)) { 7560ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &debug)) 75617ac6653aSJeff Kirsher goto err; 75627ac6653aSJeff Kirsher } else if (!strncmp(opt, "phyaddr:", 8)) { 7563ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 8, 0, &phyaddr)) 75647ac6653aSJeff Kirsher goto err; 75657ac6653aSJeff Kirsher } else if (!strncmp(opt, "buf_sz:", 7)) { 7566ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 7, 0, &buf_sz)) 75677ac6653aSJeff Kirsher goto err; 75687ac6653aSJeff Kirsher } else if (!strncmp(opt, "tc:", 3)) { 7569ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 3, 0, &tc)) 75707ac6653aSJeff Kirsher goto err; 75717ac6653aSJeff Kirsher } else if (!strncmp(opt, "watchdog:", 9)) { 7572ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 9, 0, &watchdog)) 75737ac6653aSJeff Kirsher goto err; 75747ac6653aSJeff Kirsher } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7575ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &flow_ctrl)) 75767ac6653aSJeff Kirsher goto err; 75777ac6653aSJeff Kirsher } else if (!strncmp(opt, "pause:", 6)) { 7578ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &pause)) 75797ac6653aSJeff Kirsher goto err; 7580506f669cSGiuseppe CAVALLARO } else if (!strncmp(opt, "eee_timer:", 10)) { 7581d765955dSGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &eee_timer)) 7582d765955dSGiuseppe CAVALLARO goto err; 75834a7d666aSGiuseppe CAVALLARO } else if (!strncmp(opt, "chain_mode:", 11)) { 75844a7d666aSGiuseppe CAVALLARO if (kstrtoint(opt + 11, 0, &chain_mode)) 75854a7d666aSGiuseppe CAVALLARO goto err; 75867ac6653aSJeff Kirsher } 75877ac6653aSJeff Kirsher } 7588e01b042eSRandy Dunlap return 1; 75897ac6653aSJeff Kirsher 75907ac6653aSJeff Kirsher err: 75917ac6653aSJeff Kirsher pr_err("%s: ERROR broken module parameter conversion", __func__); 7592e01b042eSRandy Dunlap return 1; 75937ac6653aSJeff Kirsher } 75947ac6653aSJeff Kirsher 75957ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt); 7596ceb69499SGiuseppe CAVALLARO #endif /* MODULE */ 75976fc0d0f2SGiuseppe Cavallaro 7598466c5ac8SMathieu Olivari static int __init stmmac_init(void) 7599466c5ac8SMathieu Olivari { 7600466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7601466c5ac8SMathieu Olivari /* Create debugfs main directory if it doesn't exist yet */ 76028d72ab11SGreg Kroah-Hartman if (!stmmac_fs_dir) 7603466c5ac8SMathieu Olivari stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7604474a31e1SAaro Koskinen register_netdevice_notifier(&stmmac_notifier); 7605466c5ac8SMathieu Olivari #endif 7606466c5ac8SMathieu Olivari 7607466c5ac8SMathieu Olivari return 0; 7608466c5ac8SMathieu Olivari } 7609466c5ac8SMathieu Olivari 7610466c5ac8SMathieu Olivari static void __exit stmmac_exit(void) 7611466c5ac8SMathieu Olivari { 7612466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7613474a31e1SAaro Koskinen unregister_netdevice_notifier(&stmmac_notifier); 7614466c5ac8SMathieu Olivari debugfs_remove_recursive(stmmac_fs_dir); 7615466c5ac8SMathieu Olivari #endif 7616466c5ac8SMathieu Olivari } 7617466c5ac8SMathieu Olivari 7618466c5ac8SMathieu Olivari module_init(stmmac_init) 7619466c5ac8SMathieu Olivari module_exit(stmmac_exit) 7620466c5ac8SMathieu Olivari 76216fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 76226fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 76236fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL"); 7624