14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27ac6653aSJeff Kirsher /******************************************************************************* 37ac6653aSJeff Kirsher This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 47ac6653aSJeff Kirsher ST Ethernet IPs are built around a Synopsys IP Core. 57ac6653aSJeff Kirsher 6286a8372SGiuseppe CAVALLARO Copyright(C) 2007-2011 STMicroelectronics Ltd 77ac6653aSJeff Kirsher 87ac6653aSJeff Kirsher 97ac6653aSJeff Kirsher Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 107ac6653aSJeff Kirsher 117ac6653aSJeff Kirsher Documentation available at: 127ac6653aSJeff Kirsher http://www.stlinux.com 137ac6653aSJeff Kirsher Support available at: 147ac6653aSJeff Kirsher https://bugzilla.stlinux.com/ 157ac6653aSJeff Kirsher *******************************************************************************/ 167ac6653aSJeff Kirsher 176a81c26fSViresh Kumar #include <linux/clk.h> 187ac6653aSJeff Kirsher #include <linux/kernel.h> 197ac6653aSJeff Kirsher #include <linux/interrupt.h> 207ac6653aSJeff Kirsher #include <linux/ip.h> 217ac6653aSJeff Kirsher #include <linux/tcp.h> 227ac6653aSJeff Kirsher #include <linux/skbuff.h> 237ac6653aSJeff Kirsher #include <linux/ethtool.h> 247ac6653aSJeff Kirsher #include <linux/if_ether.h> 257ac6653aSJeff Kirsher #include <linux/crc32.h> 267ac6653aSJeff Kirsher #include <linux/mii.h> 2701789349SJiri Pirko #include <linux/if.h> 287ac6653aSJeff Kirsher #include <linux/if_vlan.h> 297ac6653aSJeff Kirsher #include <linux/dma-mapping.h> 307ac6653aSJeff Kirsher #include <linux/slab.h> 315ec55823SJoakim Zhang #include <linux/pm_runtime.h> 327ac6653aSJeff Kirsher #include <linux/prefetch.h> 33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h> 3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h> 367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h> 3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h> 39eeef2f6bSJose Abreu #include <linux/phylink.h> 40b7766206SJose Abreu #include <linux/udp.h> 415fabb012SOng Boon Leong #include <linux/bpf_trace.h> 424dbbe8ddSJose Abreu #include <net/pkt_cls.h> 43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h> 44891434b1SRayagond Kokatanur #include "stmmac_ptp.h" 45286a8372SGiuseppe CAVALLARO #include "stmmac.h" 465fabb012SOng Boon Leong #include "stmmac_xdp.h" 47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h> 485790cf3cSMathieu Olivari #include <linux/of_mdio.h> 4919d857c9SPhil Reid #include "dwmac1000.h" 507d9e6c5aSJose Abreu #include "dwxgmac2.h" 5142de047dSJose Abreu #include "hwif.h" 527ac6653aSJeff Kirsher 538d558f02SJose Abreu #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 54f748be53SAlexandre TORGUE #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 557ac6653aSJeff Kirsher 567ac6653aSJeff Kirsher /* Module parameters */ 5732ceabcaSGiuseppe CAVALLARO #define TX_TIMEO 5000 587ac6653aSJeff Kirsher static int watchdog = TX_TIMEO; 59d3757ba4SJoe Perches module_param(watchdog, int, 0644); 6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 617ac6653aSJeff Kirsher 6232ceabcaSGiuseppe CAVALLARO static int debug = -1; 63d3757ba4SJoe Perches module_param(debug, int, 0644); 6432ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 657ac6653aSJeff Kirsher 6647d1f71fSstephen hemminger static int phyaddr = -1; 67d3757ba4SJoe Perches module_param(phyaddr, int, 0444); 687ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address"); 697ac6653aSJeff Kirsher 70aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 71aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 727ac6653aSJeff Kirsher 73132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */ 74132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX 256 75132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL 16 76bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH 16 77bba2556eSOng Boon Leong 785fabb012SOng Boon Leong #define STMMAC_XDP_PASS 0 795fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED BIT(0) 80be8b38a7SOng Boon Leong #define STMMAC_XDP_TX BIT(1) 818b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT BIT(2) 825fabb012SOng Boon Leong 83e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO; 84d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644); 857ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 867ac6653aSJeff Kirsher 877ac6653aSJeff Kirsher static int pause = PAUSE_TIME; 88d3757ba4SJoe Perches module_param(pause, int, 0644); 897ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 907ac6653aSJeff Kirsher 917ac6653aSJeff Kirsher #define TC_DEFAULT 64 927ac6653aSJeff Kirsher static int tc = TC_DEFAULT; 93d3757ba4SJoe Perches module_param(tc, int, 0644); 947ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value"); 957ac6653aSJeff Kirsher 96d916701cSGiuseppe CAVALLARO #define DEFAULT_BUFSIZE 1536 97d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE; 98d3757ba4SJoe Perches module_param(buf_sz, int, 0644); 997ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 1007ac6653aSJeff Kirsher 10122ad3838SGiuseppe Cavallaro #define STMMAC_RX_COPYBREAK 256 10222ad3838SGiuseppe Cavallaro 1037ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 1047ac6653aSJeff Kirsher NETIF_MSG_LINK | NETIF_MSG_IFUP | 1057ac6653aSJeff Kirsher NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 1067ac6653aSJeff Kirsher 107d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER 1000 108d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 109d3757ba4SJoe Perches module_param(eee_timer, int, 0644); 110d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 111388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 112d765955dSGiuseppe CAVALLARO 11322d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors, 11422d3efe5SPavel Machek * but allow user to force to use the chain instead of the ring 1154a7d666aSGiuseppe CAVALLARO */ 1164a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode; 117d3757ba4SJoe Perches module_param(chain_mode, int, 0444); 1184a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 1194a7d666aSGiuseppe CAVALLARO 1207ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 1218532f613SOng Boon Leong /* For MSI interrupts handling */ 1228532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 1238532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 1248532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 1258532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 126132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 127132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 1287ac6653aSJeff Kirsher 12950fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 130481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops; 1318d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev); 132466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev); 133bfab27a1SGiuseppe CAVALLARO #endif 134bfab27a1SGiuseppe CAVALLARO 135d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 1369125cdd1SGiuseppe CAVALLARO 1375ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 1385ec55823SJoakim Zhang { 1395ec55823SJoakim Zhang int ret = 0; 1405ec55823SJoakim Zhang 1415ec55823SJoakim Zhang if (enabled) { 1425ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->stmmac_clk); 1435ec55823SJoakim Zhang if (ret) 1445ec55823SJoakim Zhang return ret; 1455ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->pclk); 1465ec55823SJoakim Zhang if (ret) { 1475ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1485ec55823SJoakim Zhang return ret; 1495ec55823SJoakim Zhang } 150b4d45aeeSJoakim Zhang if (priv->plat->clks_config) { 151b4d45aeeSJoakim Zhang ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 152b4d45aeeSJoakim Zhang if (ret) { 153b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 154b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 155b4d45aeeSJoakim Zhang return ret; 156b4d45aeeSJoakim Zhang } 157b4d45aeeSJoakim Zhang } 1585ec55823SJoakim Zhang } else { 1595ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1605ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 161b4d45aeeSJoakim Zhang if (priv->plat->clks_config) 162b4d45aeeSJoakim Zhang priv->plat->clks_config(priv->plat->bsp_priv, enabled); 1635ec55823SJoakim Zhang } 1645ec55823SJoakim Zhang 1655ec55823SJoakim Zhang return ret; 1665ec55823SJoakim Zhang } 1675ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 1685ec55823SJoakim Zhang 1697ac6653aSJeff Kirsher /** 1707ac6653aSJeff Kirsher * stmmac_verify_args - verify the driver parameters. 171732fdf0eSGiuseppe CAVALLARO * Description: it checks the driver parameters and set a default in case of 172732fdf0eSGiuseppe CAVALLARO * errors. 1737ac6653aSJeff Kirsher */ 1747ac6653aSJeff Kirsher static void stmmac_verify_args(void) 1757ac6653aSJeff Kirsher { 1767ac6653aSJeff Kirsher if (unlikely(watchdog < 0)) 1777ac6653aSJeff Kirsher watchdog = TX_TIMEO; 178d916701cSGiuseppe CAVALLARO if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 179d916701cSGiuseppe CAVALLARO buf_sz = DEFAULT_BUFSIZE; 1807ac6653aSJeff Kirsher if (unlikely(flow_ctrl > 1)) 1817ac6653aSJeff Kirsher flow_ctrl = FLOW_AUTO; 1827ac6653aSJeff Kirsher else if (likely(flow_ctrl < 0)) 1837ac6653aSJeff Kirsher flow_ctrl = FLOW_OFF; 1847ac6653aSJeff Kirsher if (unlikely((pause < 0) || (pause > 0xffff))) 1857ac6653aSJeff Kirsher pause = PAUSE_TIME; 186d765955dSGiuseppe CAVALLARO if (eee_timer < 0) 187d765955dSGiuseppe CAVALLARO eee_timer = STMMAC_DEFAULT_LPI_TIMER; 1887ac6653aSJeff Kirsher } 1897ac6653aSJeff Kirsher 190bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 191c22a3f48SJoao Pinto { 192c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 1938fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 1948fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 195c22a3f48SJoao Pinto u32 queue; 196c22a3f48SJoao Pinto 1978fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 1988fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 199c22a3f48SJoao Pinto 200132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 201132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 202132c32eeSOng Boon Leong napi_disable(&ch->rxtx_napi); 203132c32eeSOng Boon Leong continue; 204132c32eeSOng Boon Leong } 205132c32eeSOng Boon Leong 2064ccb4585SJose Abreu if (queue < rx_queues_cnt) 2074ccb4585SJose Abreu napi_disable(&ch->rx_napi); 2084ccb4585SJose Abreu if (queue < tx_queues_cnt) 2094ccb4585SJose Abreu napi_disable(&ch->tx_napi); 210c22a3f48SJoao Pinto } 211c22a3f48SJoao Pinto } 212c22a3f48SJoao Pinto 213c22a3f48SJoao Pinto /** 214bba2556eSOng Boon Leong * stmmac_disable_all_queues - Disable all queues 215bba2556eSOng Boon Leong * @priv: driver private structure 216bba2556eSOng Boon Leong */ 217bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv) 218bba2556eSOng Boon Leong { 219bba2556eSOng Boon Leong u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 220bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 221bba2556eSOng Boon Leong u32 queue; 222bba2556eSOng Boon Leong 223bba2556eSOng Boon Leong /* synchronize_rcu() needed for pending XDP buffers to drain */ 224bba2556eSOng Boon Leong for (queue = 0; queue < rx_queues_cnt; queue++) { 225bba2556eSOng Boon Leong rx_q = &priv->rx_queue[queue]; 226bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 227bba2556eSOng Boon Leong synchronize_rcu(); 228bba2556eSOng Boon Leong break; 229bba2556eSOng Boon Leong } 230bba2556eSOng Boon Leong } 231bba2556eSOng Boon Leong 232bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 233bba2556eSOng Boon Leong } 234bba2556eSOng Boon Leong 235bba2556eSOng Boon Leong /** 236c22a3f48SJoao Pinto * stmmac_enable_all_queues - Enable all queues 237c22a3f48SJoao Pinto * @priv: driver private structure 238c22a3f48SJoao Pinto */ 239c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv) 240c22a3f48SJoao Pinto { 241c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2428fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2438fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 244c22a3f48SJoao Pinto u32 queue; 245c22a3f48SJoao Pinto 2468fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2478fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 248c22a3f48SJoao Pinto 249132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 250132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 251132c32eeSOng Boon Leong napi_enable(&ch->rxtx_napi); 252132c32eeSOng Boon Leong continue; 253132c32eeSOng Boon Leong } 254132c32eeSOng Boon Leong 2554ccb4585SJose Abreu if (queue < rx_queues_cnt) 2564ccb4585SJose Abreu napi_enable(&ch->rx_napi); 2574ccb4585SJose Abreu if (queue < tx_queues_cnt) 2584ccb4585SJose Abreu napi_enable(&ch->tx_napi); 259c22a3f48SJoao Pinto } 260c22a3f48SJoao Pinto } 261c22a3f48SJoao Pinto 26234877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv) 26334877a15SJose Abreu { 26434877a15SJose Abreu if (!test_bit(STMMAC_DOWN, &priv->state) && 26534877a15SJose Abreu !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 26634877a15SJose Abreu queue_work(priv->wq, &priv->service_task); 26734877a15SJose Abreu } 26834877a15SJose Abreu 26934877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv) 27034877a15SJose Abreu { 27134877a15SJose Abreu netif_carrier_off(priv->dev); 27234877a15SJose Abreu set_bit(STMMAC_RESET_REQUESTED, &priv->state); 27334877a15SJose Abreu stmmac_service_event_schedule(priv); 27434877a15SJose Abreu } 27534877a15SJose Abreu 276c22a3f48SJoao Pinto /** 27732ceabcaSGiuseppe CAVALLARO * stmmac_clk_csr_set - dynamically set the MDC clock 27832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 27932ceabcaSGiuseppe CAVALLARO * Description: this is to dynamically set the MDC clock according to the csr 28032ceabcaSGiuseppe CAVALLARO * clock input. 28132ceabcaSGiuseppe CAVALLARO * Note: 28232ceabcaSGiuseppe CAVALLARO * If a specific clk_csr value is passed from the platform 28332ceabcaSGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 28432ceabcaSGiuseppe CAVALLARO * changed at run-time and it is fixed (as reported in the driver 28532ceabcaSGiuseppe CAVALLARO * documentation). Viceversa the driver will try to set the MDC 28632ceabcaSGiuseppe CAVALLARO * clock dynamically according to the actual clock input. 28732ceabcaSGiuseppe CAVALLARO */ 288cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv) 289cd7201f4SGiuseppe CAVALLARO { 290cd7201f4SGiuseppe CAVALLARO u32 clk_rate; 291cd7201f4SGiuseppe CAVALLARO 292f573c0b9Sjpinto clk_rate = clk_get_rate(priv->plat->stmmac_clk); 293cd7201f4SGiuseppe CAVALLARO 294cd7201f4SGiuseppe CAVALLARO /* Platform provided default clk_csr would be assumed valid 295ceb69499SGiuseppe CAVALLARO * for all other cases except for the below mentioned ones. 296ceb69499SGiuseppe CAVALLARO * For values higher than the IEEE 802.3 specified frequency 297ceb69499SGiuseppe CAVALLARO * we can not estimate the proper divider as it is not known 298ceb69499SGiuseppe CAVALLARO * the frequency of clk_csr_i. So we do not change the default 299ceb69499SGiuseppe CAVALLARO * divider. 300ceb69499SGiuseppe CAVALLARO */ 301cd7201f4SGiuseppe CAVALLARO if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 302cd7201f4SGiuseppe CAVALLARO if (clk_rate < CSR_F_35M) 303cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_20_35M; 304cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 305cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_35_60M; 306cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 307cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_60_100M; 308cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 309cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_100_150M; 310cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 311cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_150_250M; 31219d857c9SPhil Reid else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 313cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_250_300M; 314ceb69499SGiuseppe CAVALLARO } 3159f93ac8dSLABBE Corentin 3169f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) { 3179f93ac8dSLABBE Corentin if (clk_rate > 160000000) 3189f93ac8dSLABBE Corentin priv->clk_csr = 0x03; 3199f93ac8dSLABBE Corentin else if (clk_rate > 80000000) 3209f93ac8dSLABBE Corentin priv->clk_csr = 0x02; 3219f93ac8dSLABBE Corentin else if (clk_rate > 40000000) 3229f93ac8dSLABBE Corentin priv->clk_csr = 0x01; 3239f93ac8dSLABBE Corentin else 3249f93ac8dSLABBE Corentin priv->clk_csr = 0; 3259f93ac8dSLABBE Corentin } 3267d9e6c5aSJose Abreu 3277d9e6c5aSJose Abreu if (priv->plat->has_xgmac) { 3287d9e6c5aSJose Abreu if (clk_rate > 400000000) 3297d9e6c5aSJose Abreu priv->clk_csr = 0x5; 3307d9e6c5aSJose Abreu else if (clk_rate > 350000000) 3317d9e6c5aSJose Abreu priv->clk_csr = 0x4; 3327d9e6c5aSJose Abreu else if (clk_rate > 300000000) 3337d9e6c5aSJose Abreu priv->clk_csr = 0x3; 3347d9e6c5aSJose Abreu else if (clk_rate > 250000000) 3357d9e6c5aSJose Abreu priv->clk_csr = 0x2; 3367d9e6c5aSJose Abreu else if (clk_rate > 150000000) 3377d9e6c5aSJose Abreu priv->clk_csr = 0x1; 3387d9e6c5aSJose Abreu else 3397d9e6c5aSJose Abreu priv->clk_csr = 0x0; 3407d9e6c5aSJose Abreu } 341cd7201f4SGiuseppe CAVALLARO } 342cd7201f4SGiuseppe CAVALLARO 3437ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len) 3447ac6653aSJeff Kirsher { 345424c4f78SAndy Shevchenko pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 346424c4f78SAndy Shevchenko print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 3477ac6653aSJeff Kirsher } 3487ac6653aSJeff Kirsher 349ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 3507ac6653aSJeff Kirsher { 351ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 352a6a3e026SLABBE Corentin u32 avail; 353e3ad57c9SGiuseppe Cavallaro 354ce736788SJoao Pinto if (tx_q->dirty_tx > tx_q->cur_tx) 355ce736788SJoao Pinto avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 356e3ad57c9SGiuseppe Cavallaro else 357aa042f60SSong, Yoong Siang avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 358e3ad57c9SGiuseppe Cavallaro 359e3ad57c9SGiuseppe Cavallaro return avail; 360e3ad57c9SGiuseppe Cavallaro } 361e3ad57c9SGiuseppe Cavallaro 36254139cf3SJoao Pinto /** 36354139cf3SJoao Pinto * stmmac_rx_dirty - Get RX queue dirty 36454139cf3SJoao Pinto * @priv: driver private structure 36554139cf3SJoao Pinto * @queue: RX queue index 36654139cf3SJoao Pinto */ 36754139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 368e3ad57c9SGiuseppe Cavallaro { 36954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 370a6a3e026SLABBE Corentin u32 dirty; 371e3ad57c9SGiuseppe Cavallaro 37254139cf3SJoao Pinto if (rx_q->dirty_rx <= rx_q->cur_rx) 37354139cf3SJoao Pinto dirty = rx_q->cur_rx - rx_q->dirty_rx; 374e3ad57c9SGiuseppe Cavallaro else 375aa042f60SSong, Yoong Siang dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 376e3ad57c9SGiuseppe Cavallaro 377e3ad57c9SGiuseppe Cavallaro return dirty; 3787ac6653aSJeff Kirsher } 3797ac6653aSJeff Kirsher 380be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 381be1c7eaeSVineetha G. Jaya Kumaran { 382be1c7eaeSVineetha G. Jaya Kumaran int tx_lpi_timer; 383be1c7eaeSVineetha G. Jaya Kumaran 384be1c7eaeSVineetha G. Jaya Kumaran /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 385be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en = en ? 0 : 1; 386be1c7eaeSVineetha G. Jaya Kumaran tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 387be1c7eaeSVineetha G. Jaya Kumaran stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 388be1c7eaeSVineetha G. Jaya Kumaran } 389be1c7eaeSVineetha G. Jaya Kumaran 39032ceabcaSGiuseppe CAVALLARO /** 391732fdf0eSGiuseppe CAVALLARO * stmmac_enable_eee_mode - check and enter in LPI mode 39232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 393732fdf0eSGiuseppe CAVALLARO * Description: this function is to verify and enter in LPI mode in case of 394732fdf0eSGiuseppe CAVALLARO * EEE. 39532ceabcaSGiuseppe CAVALLARO */ 396d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 397d765955dSGiuseppe CAVALLARO { 398ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 399ce736788SJoao Pinto u32 queue; 400ce736788SJoao Pinto 401ce736788SJoao Pinto /* check if all TX queues have the work finished */ 402ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 403ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 404ce736788SJoao Pinto 405ce736788SJoao Pinto if (tx_q->dirty_tx != tx_q->cur_tx) 406ce736788SJoao Pinto return; /* still unfinished work */ 407ce736788SJoao Pinto } 408ce736788SJoao Pinto 409d765955dSGiuseppe CAVALLARO /* Check and enter in LPI mode */ 410ce736788SJoao Pinto if (!priv->tx_path_in_lpi_mode) 411c10d4c82SJose Abreu stmmac_set_eee_mode(priv, priv->hw, 412b4b7b772Sjpinto priv->plat->en_tx_lpi_clockgating); 413d765955dSGiuseppe CAVALLARO } 414d765955dSGiuseppe CAVALLARO 41532ceabcaSGiuseppe CAVALLARO /** 416732fdf0eSGiuseppe CAVALLARO * stmmac_disable_eee_mode - disable and exit from LPI mode 41732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 41832ceabcaSGiuseppe CAVALLARO * Description: this function is to exit and disable EEE in case of 41932ceabcaSGiuseppe CAVALLARO * LPI state is true. This is called by the xmit. 42032ceabcaSGiuseppe CAVALLARO */ 421d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv) 422d765955dSGiuseppe CAVALLARO { 423be1c7eaeSVineetha G. Jaya Kumaran if (!priv->eee_sw_timer_en) { 424be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 425be1c7eaeSVineetha G. Jaya Kumaran return; 426be1c7eaeSVineetha G. Jaya Kumaran } 427be1c7eaeSVineetha G. Jaya Kumaran 428c10d4c82SJose Abreu stmmac_reset_eee_mode(priv, priv->hw); 429d765955dSGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 430d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 431d765955dSGiuseppe CAVALLARO } 432d765955dSGiuseppe CAVALLARO 433d765955dSGiuseppe CAVALLARO /** 434732fdf0eSGiuseppe CAVALLARO * stmmac_eee_ctrl_timer - EEE TX SW timer. 435d0ea5cbdSJesse Brandeburg * @t: timer_list struct containing private info 436d765955dSGiuseppe CAVALLARO * Description: 43732ceabcaSGiuseppe CAVALLARO * if there is no data transfer and if we are not in LPI state, 438d765955dSGiuseppe CAVALLARO * then MAC Transmitter can be moved to LPI state. 439d765955dSGiuseppe CAVALLARO */ 440e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t) 441d765955dSGiuseppe CAVALLARO { 442e99e88a9SKees Cook struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 443d765955dSGiuseppe CAVALLARO 444d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 445388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 446d765955dSGiuseppe CAVALLARO } 447d765955dSGiuseppe CAVALLARO 448d765955dSGiuseppe CAVALLARO /** 449732fdf0eSGiuseppe CAVALLARO * stmmac_eee_init - init EEE 45032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 451d765955dSGiuseppe CAVALLARO * Description: 452732fdf0eSGiuseppe CAVALLARO * if the GMAC supports the EEE (from the HW cap reg) and the phy device 453732fdf0eSGiuseppe CAVALLARO * can also manage EEE, this function enable the LPI state and start related 454732fdf0eSGiuseppe CAVALLARO * timer. 455d765955dSGiuseppe CAVALLARO */ 456d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv) 457d765955dSGiuseppe CAVALLARO { 458388e201dSVineetha G. Jaya Kumaran int eee_tw_timer = priv->eee_tw_timer; 459879626e3SJerome Brunet 460f5351ef7SGiuseppe CAVALLARO /* Using PCS we cannot dial with the phy registers at this stage 461f5351ef7SGiuseppe CAVALLARO * so we do not support extra feature like EEE. 462f5351ef7SGiuseppe CAVALLARO */ 463a47b9e15SDejin Zheng if (priv->hw->pcs == STMMAC_PCS_TBI || 464a47b9e15SDejin Zheng priv->hw->pcs == STMMAC_PCS_RTBI) 46574371272SJose Abreu return false; 466f5351ef7SGiuseppe CAVALLARO 46774371272SJose Abreu /* Check if MAC core supports the EEE feature. */ 46874371272SJose Abreu if (!priv->dma_cap.eee) 46974371272SJose Abreu return false; 470d765955dSGiuseppe CAVALLARO 47129555fa3SThierry Reding mutex_lock(&priv->lock); 47274371272SJose Abreu 47374371272SJose Abreu /* Check if it needs to be deactivated */ 474177d935aSJon Hunter if (!priv->eee_active) { 475177d935aSJon Hunter if (priv->eee_enabled) { 47638ddc59dSLABBE Corentin netdev_dbg(priv->dev, "disable EEE\n"); 477be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 47883bf79b6SGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 479388e201dSVineetha G. Jaya Kumaran stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 480177d935aSJon Hunter } 4810867bb97SJon Hunter mutex_unlock(&priv->lock); 48274371272SJose Abreu return false; 48374371272SJose Abreu } 48474371272SJose Abreu 48574371272SJose Abreu if (priv->eee_active && !priv->eee_enabled) { 48674371272SJose Abreu timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 48774371272SJose Abreu stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 488388e201dSVineetha G. Jaya Kumaran eee_tw_timer); 48983bf79b6SGiuseppe CAVALLARO } 49074371272SJose Abreu 491be1c7eaeSVineetha G. Jaya Kumaran if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 492be1c7eaeSVineetha G. Jaya Kumaran del_timer_sync(&priv->eee_ctrl_timer); 493be1c7eaeSVineetha G. Jaya Kumaran priv->tx_path_in_lpi_mode = false; 494be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 1); 495be1c7eaeSVineetha G. Jaya Kumaran } else { 496be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 497be1c7eaeSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, 498be1c7eaeSVineetha G. Jaya Kumaran STMMAC_LPI_T(priv->tx_lpi_timer)); 499be1c7eaeSVineetha G. Jaya Kumaran } 500388e201dSVineetha G. Jaya Kumaran 50129555fa3SThierry Reding mutex_unlock(&priv->lock); 50238ddc59dSLABBE Corentin netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 50374371272SJose Abreu return true; 504d765955dSGiuseppe CAVALLARO } 505d765955dSGiuseppe CAVALLARO 506732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps 50732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 508ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 509891434b1SRayagond Kokatanur * @skb : the socket buffer 510891434b1SRayagond Kokatanur * Description : 511891434b1SRayagond Kokatanur * This function will read timestamp from the descriptor & pass it to stack. 512891434b1SRayagond Kokatanur * and also perform some sanity checks. 513891434b1SRayagond Kokatanur */ 514891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 515ba1ffd74SGiuseppe CAVALLARO struct dma_desc *p, struct sk_buff *skb) 516891434b1SRayagond Kokatanur { 517891434b1SRayagond Kokatanur struct skb_shared_hwtstamps shhwtstamp; 51825e80cd0SJose Abreu bool found = false; 5193600be5fSVoon Weifeng s64 adjust = 0; 520df103170SNathan Chancellor u64 ns = 0; 521891434b1SRayagond Kokatanur 522891434b1SRayagond Kokatanur if (!priv->hwts_tx_en) 523891434b1SRayagond Kokatanur return; 524891434b1SRayagond Kokatanur 525ceb69499SGiuseppe CAVALLARO /* exit if skb doesn't support hw tstamp */ 52675e4364fSdamuzi000 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 527891434b1SRayagond Kokatanur return; 528891434b1SRayagond Kokatanur 529891434b1SRayagond Kokatanur /* check tx tstamp status */ 53042de047dSJose Abreu if (stmmac_get_tx_timestamp_status(priv, p)) { 53142de047dSJose Abreu stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 53225e80cd0SJose Abreu found = true; 53325e80cd0SJose Abreu } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 53425e80cd0SJose Abreu found = true; 53525e80cd0SJose Abreu } 536891434b1SRayagond Kokatanur 53725e80cd0SJose Abreu if (found) { 5383600be5fSVoon Weifeng /* Correct the clk domain crossing(CDC) error */ 5393600be5fSVoon Weifeng if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 5403600be5fSVoon Weifeng adjust += -(2 * (NSEC_PER_SEC / 5413600be5fSVoon Weifeng priv->plat->clk_ptp_rate)); 5423600be5fSVoon Weifeng ns += adjust; 5433600be5fSVoon Weifeng } 5443600be5fSVoon Weifeng 545891434b1SRayagond Kokatanur memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 546891434b1SRayagond Kokatanur shhwtstamp.hwtstamp = ns_to_ktime(ns); 547ba1ffd74SGiuseppe CAVALLARO 54833d4c482SMario Molitor netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 549891434b1SRayagond Kokatanur /* pass tstamp to stack */ 550891434b1SRayagond Kokatanur skb_tstamp_tx(skb, &shhwtstamp); 551ba1ffd74SGiuseppe CAVALLARO } 552891434b1SRayagond Kokatanur } 553891434b1SRayagond Kokatanur 554732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps 55532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 556ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 557ba1ffd74SGiuseppe CAVALLARO * @np : next descriptor pointer 558891434b1SRayagond Kokatanur * @skb : the socket buffer 559891434b1SRayagond Kokatanur * Description : 560891434b1SRayagond Kokatanur * This function will read received packet's timestamp from the descriptor 561891434b1SRayagond Kokatanur * and pass it to stack. It also perform some sanity checks. 562891434b1SRayagond Kokatanur */ 563ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 564ba1ffd74SGiuseppe CAVALLARO struct dma_desc *np, struct sk_buff *skb) 565891434b1SRayagond Kokatanur { 566891434b1SRayagond Kokatanur struct skb_shared_hwtstamps *shhwtstamp = NULL; 56798870943SJose Abreu struct dma_desc *desc = p; 5683600be5fSVoon Weifeng u64 adjust = 0; 569df103170SNathan Chancellor u64 ns = 0; 570891434b1SRayagond Kokatanur 571891434b1SRayagond Kokatanur if (!priv->hwts_rx_en) 572891434b1SRayagond Kokatanur return; 573ba1ffd74SGiuseppe CAVALLARO /* For GMAC4, the valid timestamp is from CTX next desc. */ 5747d9e6c5aSJose Abreu if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 57598870943SJose Abreu desc = np; 576891434b1SRayagond Kokatanur 57798870943SJose Abreu /* Check if timestamp is available */ 57842de047dSJose Abreu if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 57942de047dSJose Abreu stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 5803600be5fSVoon Weifeng 5813600be5fSVoon Weifeng /* Correct the clk domain crossing(CDC) error */ 5823600be5fSVoon Weifeng if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 5833600be5fSVoon Weifeng adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); 5843600be5fSVoon Weifeng ns -= adjust; 5853600be5fSVoon Weifeng } 5863600be5fSVoon Weifeng 58733d4c482SMario Molitor netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 588891434b1SRayagond Kokatanur shhwtstamp = skb_hwtstamps(skb); 589891434b1SRayagond Kokatanur memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 590891434b1SRayagond Kokatanur shhwtstamp->hwtstamp = ns_to_ktime(ns); 591ba1ffd74SGiuseppe CAVALLARO } else { 59233d4c482SMario Molitor netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 593ba1ffd74SGiuseppe CAVALLARO } 594891434b1SRayagond Kokatanur } 595891434b1SRayagond Kokatanur 596891434b1SRayagond Kokatanur /** 597d6228b7cSArtem Panfilov * stmmac_hwtstamp_set - control hardware timestamping. 598891434b1SRayagond Kokatanur * @dev: device pointer. 5998d45e42bSLABBE Corentin * @ifr: An IOCTL specific structure, that can contain a pointer to 600891434b1SRayagond Kokatanur * a proprietary structure used to pass information to the driver. 601891434b1SRayagond Kokatanur * Description: 602891434b1SRayagond Kokatanur * This function configures the MAC to enable/disable both outgoing(TX) 603891434b1SRayagond Kokatanur * and incoming(RX) packets time stamping based on user input. 604891434b1SRayagond Kokatanur * Return Value: 605891434b1SRayagond Kokatanur * 0 on success and an appropriate -ve integer on failure. 606891434b1SRayagond Kokatanur */ 607d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 608891434b1SRayagond Kokatanur { 609891434b1SRayagond Kokatanur struct stmmac_priv *priv = netdev_priv(dev); 610891434b1SRayagond Kokatanur struct hwtstamp_config config; 6110a624155SArnd Bergmann struct timespec64 now; 612891434b1SRayagond Kokatanur u64 temp = 0; 613891434b1SRayagond Kokatanur u32 ptp_v2 = 0; 614891434b1SRayagond Kokatanur u32 tstamp_all = 0; 615891434b1SRayagond Kokatanur u32 ptp_over_ipv4_udp = 0; 616891434b1SRayagond Kokatanur u32 ptp_over_ipv6_udp = 0; 617891434b1SRayagond Kokatanur u32 ptp_over_ethernet = 0; 618891434b1SRayagond Kokatanur u32 snap_type_sel = 0; 619891434b1SRayagond Kokatanur u32 ts_master_en = 0; 620891434b1SRayagond Kokatanur u32 ts_event_en = 0; 621df103170SNathan Chancellor u32 sec_inc = 0; 622891434b1SRayagond Kokatanur u32 value = 0; 6237d9e6c5aSJose Abreu bool xmac; 6247d9e6c5aSJose Abreu 6257d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 626891434b1SRayagond Kokatanur 627891434b1SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 628891434b1SRayagond Kokatanur netdev_alert(priv->dev, "No support for HW time stamping\n"); 629891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 630891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 631891434b1SRayagond Kokatanur 632891434b1SRayagond Kokatanur return -EOPNOTSUPP; 633891434b1SRayagond Kokatanur } 634891434b1SRayagond Kokatanur 635891434b1SRayagond Kokatanur if (copy_from_user(&config, ifr->ifr_data, 636d6228b7cSArtem Panfilov sizeof(config))) 637891434b1SRayagond Kokatanur return -EFAULT; 638891434b1SRayagond Kokatanur 63938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 640891434b1SRayagond Kokatanur __func__, config.flags, config.tx_type, config.rx_filter); 641891434b1SRayagond Kokatanur 642891434b1SRayagond Kokatanur /* reserved for future extensions */ 643891434b1SRayagond Kokatanur if (config.flags) 644891434b1SRayagond Kokatanur return -EINVAL; 645891434b1SRayagond Kokatanur 6465f3da328SBen Hutchings if (config.tx_type != HWTSTAMP_TX_OFF && 6475f3da328SBen Hutchings config.tx_type != HWTSTAMP_TX_ON) 648891434b1SRayagond Kokatanur return -ERANGE; 649891434b1SRayagond Kokatanur 650891434b1SRayagond Kokatanur if (priv->adv_ts) { 651891434b1SRayagond Kokatanur switch (config.rx_filter) { 652891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 653ceb69499SGiuseppe CAVALLARO /* time stamp no incoming packet at all */ 654891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 655891434b1SRayagond Kokatanur break; 656891434b1SRayagond Kokatanur 657891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 658ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, any kind of event packet */ 659891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 6607d8e249fSIlias Apalodimas /* 'xmac' hardware can support Sync, Pdelay_Req and 6617d8e249fSIlias Apalodimas * Pdelay_resp by setting bit14 and bits17/16 to 01 6627d8e249fSIlias Apalodimas * This leaves Delay_Req timestamps out. 6637d8e249fSIlias Apalodimas * Enable all events *and* general purpose message 6647d8e249fSIlias Apalodimas * timestamping 6657d8e249fSIlias Apalodimas */ 666891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669891434b1SRayagond Kokatanur break; 670891434b1SRayagond Kokatanur 671891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 672ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Sync packet */ 673891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 674891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 675891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 676891434b1SRayagond Kokatanur 677891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679891434b1SRayagond Kokatanur break; 680891434b1SRayagond Kokatanur 681891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 682ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Delay_req packet */ 683891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 684891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 685891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 686891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 687891434b1SRayagond Kokatanur 688891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690891434b1SRayagond Kokatanur break; 691891434b1SRayagond Kokatanur 692891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 693ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, any kind of event packet */ 694891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 695891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 696891434b1SRayagond Kokatanur /* take time stamp for all event messages */ 697891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 698891434b1SRayagond Kokatanur 699891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 700891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 701891434b1SRayagond Kokatanur break; 702891434b1SRayagond Kokatanur 703891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 704ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Sync packet */ 705891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 706891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 707891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 708891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 709891434b1SRayagond Kokatanur 710891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 711891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 712891434b1SRayagond Kokatanur break; 713891434b1SRayagond Kokatanur 714891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 715ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Delay_req packet */ 716891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 717891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 718891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 719891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 720891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 721891434b1SRayagond Kokatanur 722891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 723891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 724891434b1SRayagond Kokatanur break; 725891434b1SRayagond Kokatanur 726891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_EVENT: 727ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1 any layer, any kind of event packet */ 728891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 729891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 730891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 731f2fb6b62SFugang Duan if (priv->synopsys_id != DWMAC_CORE_5_10) 73214f34733SJose Abreu ts_event_en = PTP_TCR_TSEVNTENA; 733891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 734891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 735891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 736891434b1SRayagond Kokatanur break; 737891434b1SRayagond Kokatanur 738891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_SYNC: 739ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Sync packet */ 740891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 741891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 742891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 743891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 744891434b1SRayagond Kokatanur 745891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 746891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 747891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 748891434b1SRayagond Kokatanur break; 749891434b1SRayagond Kokatanur 750891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 751ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Delay_req packet */ 752891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 753891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 754891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 755891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 756891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 757891434b1SRayagond Kokatanur 758891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 759891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 760891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 761891434b1SRayagond Kokatanur break; 762891434b1SRayagond Kokatanur 763e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 764891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_ALL: 765ceb69499SGiuseppe CAVALLARO /* time stamp any incoming packet */ 766891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_ALL; 767891434b1SRayagond Kokatanur tstamp_all = PTP_TCR_TSENALL; 768891434b1SRayagond Kokatanur break; 769891434b1SRayagond Kokatanur 770891434b1SRayagond Kokatanur default: 771891434b1SRayagond Kokatanur return -ERANGE; 772891434b1SRayagond Kokatanur } 773891434b1SRayagond Kokatanur } else { 774891434b1SRayagond Kokatanur switch (config.rx_filter) { 775891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 776891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 777891434b1SRayagond Kokatanur break; 778891434b1SRayagond Kokatanur default: 779891434b1SRayagond Kokatanur /* PTP v1, UDP, any kind of event packet */ 780891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 781891434b1SRayagond Kokatanur break; 782891434b1SRayagond Kokatanur } 783891434b1SRayagond Kokatanur } 784891434b1SRayagond Kokatanur priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 7855f3da328SBen Hutchings priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 786891434b1SRayagond Kokatanur 787891434b1SRayagond Kokatanur if (!priv->hwts_tx_en && !priv->hwts_rx_en) 788cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 789891434b1SRayagond Kokatanur else { 790891434b1SRayagond Kokatanur value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 791891434b1SRayagond Kokatanur tstamp_all | ptp_v2 | ptp_over_ethernet | 792891434b1SRayagond Kokatanur ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 793891434b1SRayagond Kokatanur ts_master_en | snap_type_sel); 794cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 795891434b1SRayagond Kokatanur 796891434b1SRayagond Kokatanur /* program Sub Second Increment reg */ 797cc4c9001SJose Abreu stmmac_config_sub_second_increment(priv, 798f573c0b9Sjpinto priv->ptpaddr, priv->plat->clk_ptp_rate, 7997d9e6c5aSJose Abreu xmac, &sec_inc); 80019d857c9SPhil Reid temp = div_u64(1000000000ULL, sec_inc); 801891434b1SRayagond Kokatanur 8029a8a02c9SJose Abreu /* Store sub second increment and flags for later use */ 8039a8a02c9SJose Abreu priv->sub_second_inc = sec_inc; 8049a8a02c9SJose Abreu priv->systime_flags = value; 8059a8a02c9SJose Abreu 806891434b1SRayagond Kokatanur /* calculate default added value: 807891434b1SRayagond Kokatanur * formula is : 808891434b1SRayagond Kokatanur * addend = (2^32)/freq_div_ratio; 80919d857c9SPhil Reid * where, freq_div_ratio = 1e9ns/sec_inc 810891434b1SRayagond Kokatanur */ 81119d857c9SPhil Reid temp = (u64)(temp << 32); 812f573c0b9Sjpinto priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 813cc4c9001SJose Abreu stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 814891434b1SRayagond Kokatanur 815891434b1SRayagond Kokatanur /* initialize system time */ 8160a624155SArnd Bergmann ktime_get_real_ts64(&now); 8170a624155SArnd Bergmann 8180a624155SArnd Bergmann /* lower 32 bits of tv_sec are safe until y2106 */ 819cc4c9001SJose Abreu stmmac_init_systime(priv, priv->ptpaddr, 820cc4c9001SJose Abreu (u32)now.tv_sec, now.tv_nsec); 821891434b1SRayagond Kokatanur } 822891434b1SRayagond Kokatanur 823d6228b7cSArtem Panfilov memcpy(&priv->tstamp_config, &config, sizeof(config)); 824d6228b7cSArtem Panfilov 825891434b1SRayagond Kokatanur return copy_to_user(ifr->ifr_data, &config, 826d6228b7cSArtem Panfilov sizeof(config)) ? -EFAULT : 0; 827d6228b7cSArtem Panfilov } 828d6228b7cSArtem Panfilov 829d6228b7cSArtem Panfilov /** 830d6228b7cSArtem Panfilov * stmmac_hwtstamp_get - read hardware timestamping. 831d6228b7cSArtem Panfilov * @dev: device pointer. 832d6228b7cSArtem Panfilov * @ifr: An IOCTL specific structure, that can contain a pointer to 833d6228b7cSArtem Panfilov * a proprietary structure used to pass information to the driver. 834d6228b7cSArtem Panfilov * Description: 835d6228b7cSArtem Panfilov * This function obtain the current hardware timestamping settings 836d0ea5cbdSJesse Brandeburg * as requested. 837d6228b7cSArtem Panfilov */ 838d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 839d6228b7cSArtem Panfilov { 840d6228b7cSArtem Panfilov struct stmmac_priv *priv = netdev_priv(dev); 841d6228b7cSArtem Panfilov struct hwtstamp_config *config = &priv->tstamp_config; 842d6228b7cSArtem Panfilov 843d6228b7cSArtem Panfilov if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 844d6228b7cSArtem Panfilov return -EOPNOTSUPP; 845d6228b7cSArtem Panfilov 846d6228b7cSArtem Panfilov return copy_to_user(ifr->ifr_data, config, 847d6228b7cSArtem Panfilov sizeof(*config)) ? -EFAULT : 0; 848891434b1SRayagond Kokatanur } 849891434b1SRayagond Kokatanur 85032ceabcaSGiuseppe CAVALLARO /** 851732fdf0eSGiuseppe CAVALLARO * stmmac_init_ptp - init PTP 85232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 853732fdf0eSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 85432ceabcaSGiuseppe CAVALLARO * This is done by looking at the HW cap. register. 855732fdf0eSGiuseppe CAVALLARO * This function also registers the ptp driver. 85632ceabcaSGiuseppe CAVALLARO */ 85792ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv) 858891434b1SRayagond Kokatanur { 8597d9e6c5aSJose Abreu bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 8607d9e6c5aSJose Abreu 86192ba6888SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 86292ba6888SRayagond Kokatanur return -EOPNOTSUPP; 86392ba6888SRayagond Kokatanur 864891434b1SRayagond Kokatanur priv->adv_ts = 0; 8657d9e6c5aSJose Abreu /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 8667d9e6c5aSJose Abreu if (xmac && priv->dma_cap.atime_stamp) 867be9b3174SGiuseppe CAVALLARO priv->adv_ts = 1; 868be9b3174SGiuseppe CAVALLARO /* Dwmac 3.x core with extend_desc can support adv_ts */ 869be9b3174SGiuseppe CAVALLARO else if (priv->extend_desc && priv->dma_cap.atime_stamp) 870891434b1SRayagond Kokatanur priv->adv_ts = 1; 8717cd01399SVince Bridgers 872be9b3174SGiuseppe CAVALLARO if (priv->dma_cap.time_stamp) 873be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 8747cd01399SVince Bridgers 875be9b3174SGiuseppe CAVALLARO if (priv->adv_ts) 876be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, 877be9b3174SGiuseppe CAVALLARO "IEEE 1588-2008 Advanced Timestamp supported\n"); 878891434b1SRayagond Kokatanur 879891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 880891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 88192ba6888SRayagond Kokatanur 882c30a70d3SGiuseppe CAVALLARO stmmac_ptp_register(priv); 883c30a70d3SGiuseppe CAVALLARO 884c30a70d3SGiuseppe CAVALLARO return 0; 88592ba6888SRayagond Kokatanur } 88692ba6888SRayagond Kokatanur 88792ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv) 88892ba6888SRayagond Kokatanur { 889f573c0b9Sjpinto clk_disable_unprepare(priv->plat->clk_ptp_ref); 89092ba6888SRayagond Kokatanur stmmac_ptp_unregister(priv); 891891434b1SRayagond Kokatanur } 892891434b1SRayagond Kokatanur 8937ac6653aSJeff Kirsher /** 89429feff39SJoao Pinto * stmmac_mac_flow_ctrl - Configure flow control in all queues 89529feff39SJoao Pinto * @priv: driver private structure 896d0ea5cbdSJesse Brandeburg * @duplex: duplex passed to the next function 89729feff39SJoao Pinto * Description: It is used for configuring the flow control in all queues 89829feff39SJoao Pinto */ 89929feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 90029feff39SJoao Pinto { 90129feff39SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 90229feff39SJoao Pinto 903c10d4c82SJose Abreu stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 90429feff39SJoao Pinto priv->pause, tx_cnt); 90529feff39SJoao Pinto } 90629feff39SJoao Pinto 907eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config, 908eeef2f6bSJose Abreu unsigned long *supported, 909eeef2f6bSJose Abreu struct phylink_link_state *state) 910eeef2f6bSJose Abreu { 911eeef2f6bSJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 9125b0d7d7dSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 913eeef2f6bSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 914eeef2f6bSJose Abreu int tx_cnt = priv->plat->tx_queues_to_use; 915eeef2f6bSJose Abreu int max_speed = priv->plat->max_speed; 916eeef2f6bSJose Abreu 9175b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Half); 9185b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Full); 9195b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Half); 9205b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Full); 921df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Half); 922df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Full); 923df7699c7SJose Abreu phylink_set(mac_supported, 1000baseKX_Full); 9245b0d7d7dSJose Abreu 9255b0d7d7dSJose Abreu phylink_set(mac_supported, Autoneg); 9265b0d7d7dSJose Abreu phylink_set(mac_supported, Pause); 9275b0d7d7dSJose Abreu phylink_set(mac_supported, Asym_Pause); 9285b0d7d7dSJose Abreu phylink_set_port_modes(mac_supported); 9295b0d7d7dSJose Abreu 930eeef2f6bSJose Abreu /* Cut down 1G if asked to */ 931eeef2f6bSJose Abreu if ((max_speed > 0) && (max_speed < 1000)) { 932eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Full); 933eeef2f6bSJose Abreu phylink_set(mask, 1000baseX_Full); 93446682cb8SVoon Weifeng } else if (priv->plat->has_gmac4) { 935345502afSColin Ian King if (!max_speed || max_speed >= 2500) { 93646682cb8SVoon Weifeng phylink_set(mac_supported, 2500baseT_Full); 93746682cb8SVoon Weifeng phylink_set(mac_supported, 2500baseX_Full); 938345502afSColin Ian King } 9395b0d7d7dSJose Abreu } else if (priv->plat->has_xgmac) { 940d9da2c87SJose Abreu if (!max_speed || (max_speed >= 2500)) { 9415b0d7d7dSJose Abreu phylink_set(mac_supported, 2500baseT_Full); 942d9da2c87SJose Abreu phylink_set(mac_supported, 2500baseX_Full); 943d9da2c87SJose Abreu } 944d9da2c87SJose Abreu if (!max_speed || (max_speed >= 5000)) { 9455b0d7d7dSJose Abreu phylink_set(mac_supported, 5000baseT_Full); 946d9da2c87SJose Abreu } 947d9da2c87SJose Abreu if (!max_speed || (max_speed >= 10000)) { 9485b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseSR_Full); 9495b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLR_Full); 9505b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseER_Full); 9515b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLRM_Full); 9525b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseT_Full); 9535b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKX4_Full); 9545b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKR_Full); 955eeef2f6bSJose Abreu } 9568a880936SJose Abreu if (!max_speed || (max_speed >= 25000)) { 9578a880936SJose Abreu phylink_set(mac_supported, 25000baseCR_Full); 9588a880936SJose Abreu phylink_set(mac_supported, 25000baseKR_Full); 9598a880936SJose Abreu phylink_set(mac_supported, 25000baseSR_Full); 9608a880936SJose Abreu } 9618a880936SJose Abreu if (!max_speed || (max_speed >= 40000)) { 9628a880936SJose Abreu phylink_set(mac_supported, 40000baseKR4_Full); 9638a880936SJose Abreu phylink_set(mac_supported, 40000baseCR4_Full); 9648a880936SJose Abreu phylink_set(mac_supported, 40000baseSR4_Full); 9658a880936SJose Abreu phylink_set(mac_supported, 40000baseLR4_Full); 9668a880936SJose Abreu } 9678a880936SJose Abreu if (!max_speed || (max_speed >= 50000)) { 9688a880936SJose Abreu phylink_set(mac_supported, 50000baseCR2_Full); 9698a880936SJose Abreu phylink_set(mac_supported, 50000baseKR2_Full); 9708a880936SJose Abreu phylink_set(mac_supported, 50000baseSR2_Full); 9718a880936SJose Abreu phylink_set(mac_supported, 50000baseKR_Full); 9728a880936SJose Abreu phylink_set(mac_supported, 50000baseSR_Full); 9738a880936SJose Abreu phylink_set(mac_supported, 50000baseCR_Full); 9748a880936SJose Abreu phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 9758a880936SJose Abreu phylink_set(mac_supported, 50000baseDR_Full); 9768a880936SJose Abreu } 9778a880936SJose Abreu if (!max_speed || (max_speed >= 100000)) { 9788a880936SJose Abreu phylink_set(mac_supported, 100000baseKR4_Full); 9798a880936SJose Abreu phylink_set(mac_supported, 100000baseSR4_Full); 9808a880936SJose Abreu phylink_set(mac_supported, 100000baseCR4_Full); 9818a880936SJose Abreu phylink_set(mac_supported, 100000baseLR4_ER4_Full); 9828a880936SJose Abreu phylink_set(mac_supported, 100000baseKR2_Full); 9838a880936SJose Abreu phylink_set(mac_supported, 100000baseSR2_Full); 9848a880936SJose Abreu phylink_set(mac_supported, 100000baseCR2_Full); 9858a880936SJose Abreu phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 9868a880936SJose Abreu phylink_set(mac_supported, 100000baseDR2_Full); 9878a880936SJose Abreu } 988d9da2c87SJose Abreu } 989eeef2f6bSJose Abreu 990eeef2f6bSJose Abreu /* Half-Duplex can only work with single queue */ 991eeef2f6bSJose Abreu if (tx_cnt > 1) { 992eeef2f6bSJose Abreu phylink_set(mask, 10baseT_Half); 993eeef2f6bSJose Abreu phylink_set(mask, 100baseT_Half); 994eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Half); 995eeef2f6bSJose Abreu } 996eeef2f6bSJose Abreu 997422829f9SJose Abreu linkmode_and(supported, supported, mac_supported); 998422829f9SJose Abreu linkmode_andnot(supported, supported, mask); 999422829f9SJose Abreu 1000422829f9SJose Abreu linkmode_and(state->advertising, state->advertising, mac_supported); 1001422829f9SJose Abreu linkmode_andnot(state->advertising, state->advertising, mask); 1002f213bbe8SJose Abreu 1003f213bbe8SJose Abreu /* If PCS is supported, check which modes it supports. */ 1004a1a753edSVladimir Oltean if (priv->hw->xpcs) 100511059740SVladimir Oltean xpcs_validate(priv->hw->xpcs, supported, state); 1006eeef2f6bSJose Abreu } 1007eeef2f6bSJose Abreu 100874371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 100974371272SJose Abreu const struct phylink_link_state *state) 10109ad372fcSJose Abreu { 101111059740SVladimir Oltean /* Nothing to do, xpcs_config() handles everything */ 1012eeef2f6bSJose Abreu } 1013eeef2f6bSJose Abreu 10145a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 10155a558611SOng Boon Leong { 10165a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 10175a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 10185a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 10195a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 10205a558611SOng Boon Leong 10215a558611SOng Boon Leong if (is_up && *hs_enable) { 10225a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 10235a558611SOng Boon Leong } else { 10241f7096f0SWong Vee Khee *lo_state = FPE_STATE_OFF; 10251f7096f0SWong Vee Khee *lp_state = FPE_STATE_OFF; 10265a558611SOng Boon Leong } 10275a558611SOng Boon Leong } 10285a558611SOng Boon Leong 102974371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config, 103074371272SJose Abreu unsigned int mode, phy_interface_t interface) 10319ad372fcSJose Abreu { 103274371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 10339ad372fcSJose Abreu 10349ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 103574371272SJose Abreu priv->eee_active = false; 1036388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = false; 103774371272SJose Abreu stmmac_eee_init(priv); 103874371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, false); 10395a558611SOng Boon Leong 104063c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 10415a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, false); 10429ad372fcSJose Abreu } 10439ad372fcSJose Abreu 104474371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config, 104591a208f2SRussell King struct phy_device *phy, 104674371272SJose Abreu unsigned int mode, phy_interface_t interface, 104791a208f2SRussell King int speed, int duplex, 104891a208f2SRussell King bool tx_pause, bool rx_pause) 10499ad372fcSJose Abreu { 105074371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 105146f69dedSJose Abreu u32 ctrl; 105246f69dedSJose Abreu 105346f69dedSJose Abreu ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 105446f69dedSJose Abreu ctrl &= ~priv->hw->link.speed_mask; 105546f69dedSJose Abreu 105646f69dedSJose Abreu if (interface == PHY_INTERFACE_MODE_USXGMII) { 105746f69dedSJose Abreu switch (speed) { 105846f69dedSJose Abreu case SPEED_10000: 105946f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 106046f69dedSJose Abreu break; 106146f69dedSJose Abreu case SPEED_5000: 106246f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed5000; 106346f69dedSJose Abreu break; 106446f69dedSJose Abreu case SPEED_2500: 106546f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed2500; 106646f69dedSJose Abreu break; 106746f69dedSJose Abreu default: 106846f69dedSJose Abreu return; 106946f69dedSJose Abreu } 10708a880936SJose Abreu } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 10718a880936SJose Abreu switch (speed) { 10728a880936SJose Abreu case SPEED_100000: 10738a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed100000; 10748a880936SJose Abreu break; 10758a880936SJose Abreu case SPEED_50000: 10768a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed50000; 10778a880936SJose Abreu break; 10788a880936SJose Abreu case SPEED_40000: 10798a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed40000; 10808a880936SJose Abreu break; 10818a880936SJose Abreu case SPEED_25000: 10828a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed25000; 10838a880936SJose Abreu break; 10848a880936SJose Abreu case SPEED_10000: 10858a880936SJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 10868a880936SJose Abreu break; 10878a880936SJose Abreu case SPEED_2500: 10888a880936SJose Abreu ctrl |= priv->hw->link.speed2500; 10898a880936SJose Abreu break; 10908a880936SJose Abreu case SPEED_1000: 10918a880936SJose Abreu ctrl |= priv->hw->link.speed1000; 10928a880936SJose Abreu break; 10938a880936SJose Abreu default: 10948a880936SJose Abreu return; 10958a880936SJose Abreu } 109646f69dedSJose Abreu } else { 109746f69dedSJose Abreu switch (speed) { 109846f69dedSJose Abreu case SPEED_2500: 109946f69dedSJose Abreu ctrl |= priv->hw->link.speed2500; 110046f69dedSJose Abreu break; 110146f69dedSJose Abreu case SPEED_1000: 110246f69dedSJose Abreu ctrl |= priv->hw->link.speed1000; 110346f69dedSJose Abreu break; 110446f69dedSJose Abreu case SPEED_100: 110546f69dedSJose Abreu ctrl |= priv->hw->link.speed100; 110646f69dedSJose Abreu break; 110746f69dedSJose Abreu case SPEED_10: 110846f69dedSJose Abreu ctrl |= priv->hw->link.speed10; 110946f69dedSJose Abreu break; 111046f69dedSJose Abreu default: 111146f69dedSJose Abreu return; 111246f69dedSJose Abreu } 111346f69dedSJose Abreu } 111446f69dedSJose Abreu 111546f69dedSJose Abreu priv->speed = speed; 111646f69dedSJose Abreu 111746f69dedSJose Abreu if (priv->plat->fix_mac_speed) 111846f69dedSJose Abreu priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 111946f69dedSJose Abreu 112046f69dedSJose Abreu if (!duplex) 112146f69dedSJose Abreu ctrl &= ~priv->hw->link.duplex; 112246f69dedSJose Abreu else 112346f69dedSJose Abreu ctrl |= priv->hw->link.duplex; 112446f69dedSJose Abreu 112546f69dedSJose Abreu /* Flow Control operation */ 112646f69dedSJose Abreu if (tx_pause && rx_pause) 112746f69dedSJose Abreu stmmac_mac_flow_ctrl(priv, duplex); 112846f69dedSJose Abreu 112946f69dedSJose Abreu writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 11309ad372fcSJose Abreu 11319ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 11325b111770SJose Abreu if (phy && priv->dma_cap.eee) { 113374371272SJose Abreu priv->eee_active = phy_init_eee(phy, 1) >= 0; 113474371272SJose Abreu priv->eee_enabled = stmmac_eee_init(priv); 1135388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = priv->eee_enabled; 113674371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, true); 113774371272SJose Abreu } 11385a558611SOng Boon Leong 113963c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 11405a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, true); 11419ad372fcSJose Abreu } 11429ad372fcSJose Abreu 114374371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1144eeef2f6bSJose Abreu .validate = stmmac_validate, 114574371272SJose Abreu .mac_config = stmmac_mac_config, 114674371272SJose Abreu .mac_link_down = stmmac_mac_link_down, 114774371272SJose Abreu .mac_link_up = stmmac_mac_link_up, 1148eeef2f6bSJose Abreu }; 1149eeef2f6bSJose Abreu 115029feff39SJoao Pinto /** 1151732fdf0eSGiuseppe CAVALLARO * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 115232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 115332ceabcaSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PCS. 115432ceabcaSGiuseppe CAVALLARO * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 115532ceabcaSGiuseppe CAVALLARO * configured for the TBI, RTBI, or SGMII PHY interface. 115632ceabcaSGiuseppe CAVALLARO */ 1157e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1158e58bb43fSGiuseppe CAVALLARO { 1159e58bb43fSGiuseppe CAVALLARO int interface = priv->plat->interface; 1160e58bb43fSGiuseppe CAVALLARO 1161e58bb43fSGiuseppe CAVALLARO if (priv->dma_cap.pcs) { 11620d909dcdSByungho An if ((interface == PHY_INTERFACE_MODE_RGMII) || 11630d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_ID) || 11640d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 11650d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 116638ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 11673fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_RGMII; 11680d909dcdSByungho An } else if (interface == PHY_INTERFACE_MODE_SGMII) { 116938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 11703fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_SGMII; 1171e58bb43fSGiuseppe CAVALLARO } 1172e58bb43fSGiuseppe CAVALLARO } 1173e58bb43fSGiuseppe CAVALLARO } 1174e58bb43fSGiuseppe CAVALLARO 11757ac6653aSJeff Kirsher /** 11767ac6653aSJeff Kirsher * stmmac_init_phy - PHY initialization 11777ac6653aSJeff Kirsher * @dev: net device structure 11787ac6653aSJeff Kirsher * Description: it initializes the driver's PHY state, and attaches the PHY 11797ac6653aSJeff Kirsher * to the mac driver. 11807ac6653aSJeff Kirsher * Return value: 11817ac6653aSJeff Kirsher * 0 on success 11827ac6653aSJeff Kirsher */ 11837ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev) 11847ac6653aSJeff Kirsher { 11857ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 118674371272SJose Abreu struct device_node *node; 118774371272SJose Abreu int ret; 11887ac6653aSJeff Kirsher 11894838a540SJose Abreu node = priv->plat->phylink_node; 119074371272SJose Abreu 119142e87024SJose Abreu if (node) 119274371272SJose Abreu ret = phylink_of_phy_connect(priv->phylink, node, 0); 119342e87024SJose Abreu 119442e87024SJose Abreu /* Some DT bindings do not set-up the PHY handle. Let's try to 119542e87024SJose Abreu * manually parse it 119642e87024SJose Abreu */ 119742e87024SJose Abreu if (!node || ret) { 119874371272SJose Abreu int addr = priv->plat->phy_addr; 119974371272SJose Abreu struct phy_device *phydev; 1200f142af2eSSrinivas Kandagatla 120174371272SJose Abreu phydev = mdiobus_get_phy(priv->mii, addr); 120274371272SJose Abreu if (!phydev) { 120374371272SJose Abreu netdev_err(priv->dev, "no phy at addr %d\n", addr); 12047ac6653aSJeff Kirsher return -ENODEV; 12057ac6653aSJeff Kirsher } 12068e99fc5fSGiuseppe Cavallaro 120774371272SJose Abreu ret = phylink_connect_phy(priv->phylink, phydev); 120874371272SJose Abreu } 1209c51e424dSFlorian Fainelli 1210576f9eacSJoakim Zhang if (!priv->plat->pmt) { 1211576f9eacSJoakim Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1212576f9eacSJoakim Zhang 12131d8e5b0fSJisheng Zhang phylink_ethtool_get_wol(priv->phylink, &wol); 12141d8e5b0fSJisheng Zhang device_set_wakeup_capable(priv->device, !!wol.supported); 1215576f9eacSJoakim Zhang } 12161d8e5b0fSJisheng Zhang 121774371272SJose Abreu return ret; 121874371272SJose Abreu } 121974371272SJose Abreu 122074371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv) 122174371272SJose Abreu { 122211059740SVladimir Oltean struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 1223c63d1e5cSArnd Bergmann struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 12240060c878SAlexandru Ardelean int mode = priv->plat->phy_interface; 122574371272SJose Abreu struct phylink *phylink; 122674371272SJose Abreu 122774371272SJose Abreu priv->phylink_config.dev = &priv->dev->dev; 122874371272SJose Abreu priv->phylink_config.type = PHYLINK_NETDEV; 1229f213bbe8SJose Abreu priv->phylink_config.pcs_poll = true; 1230593f555fSSriranjani P if (priv->plat->mdio_bus_data) 1231e5e5b771SOng Boon Leong priv->phylink_config.ovr_an_inband = 123212628565SDavid S. Miller mdio_bus_data->xpcs_an_inband; 123374371272SJose Abreu 12348dc6051cSJose Abreu if (!fwnode) 12358dc6051cSJose Abreu fwnode = dev_fwnode(priv->device); 12368dc6051cSJose Abreu 1237c63d1e5cSArnd Bergmann phylink = phylink_create(&priv->phylink_config, fwnode, 123874371272SJose Abreu mode, &stmmac_phylink_mac_ops); 123974371272SJose Abreu if (IS_ERR(phylink)) 124074371272SJose Abreu return PTR_ERR(phylink); 124174371272SJose Abreu 1242b55b1d50SVladimir Oltean if (priv->hw->xpcs) 1243b55b1d50SVladimir Oltean phylink_set_pcs(phylink, &priv->hw->xpcs->pcs); 124411059740SVladimir Oltean 124574371272SJose Abreu priv->phylink = phylink; 12467ac6653aSJeff Kirsher return 0; 12477ac6653aSJeff Kirsher } 12487ac6653aSJeff Kirsher 124971fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1250c24602efSGiuseppe CAVALLARO { 125154139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 1252bfaf91caSJoakim Zhang unsigned int desc_size; 125371fedb01SJoao Pinto void *head_rx; 125454139cf3SJoao Pinto u32 queue; 125554139cf3SJoao Pinto 125654139cf3SJoao Pinto /* Display RX rings */ 125754139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 125854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 125954139cf3SJoao Pinto 126054139cf3SJoao Pinto pr_info("\tRX Queue %u rings\n", queue); 1261d0225e7dSAlexandre TORGUE 1262bfaf91caSJoakim Zhang if (priv->extend_desc) { 126354139cf3SJoao Pinto head_rx = (void *)rx_q->dma_erx; 1264bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1265bfaf91caSJoakim Zhang } else { 126654139cf3SJoao Pinto head_rx = (void *)rx_q->dma_rx; 1267bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1268bfaf91caSJoakim Zhang } 126971fedb01SJoao Pinto 127071fedb01SJoao Pinto /* Display RX ring */ 1271bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1272bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 12735bacd778SLABBE Corentin } 127454139cf3SJoao Pinto } 1275d0225e7dSAlexandre TORGUE 127671fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv) 127771fedb01SJoao Pinto { 1278ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 1279bfaf91caSJoakim Zhang unsigned int desc_size; 128071fedb01SJoao Pinto void *head_tx; 1281ce736788SJoao Pinto u32 queue; 1282ce736788SJoao Pinto 1283ce736788SJoao Pinto /* Display TX rings */ 1284ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 1285ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1286ce736788SJoao Pinto 1287ce736788SJoao Pinto pr_info("\tTX Queue %d rings\n", queue); 128871fedb01SJoao Pinto 1289bfaf91caSJoakim Zhang if (priv->extend_desc) { 1290ce736788SJoao Pinto head_tx = (void *)tx_q->dma_etx; 1291bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1292bfaf91caSJoakim Zhang } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1293579a25a8SJose Abreu head_tx = (void *)tx_q->dma_entx; 1294bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_edesc); 1295bfaf91caSJoakim Zhang } else { 1296ce736788SJoao Pinto head_tx = (void *)tx_q->dma_tx; 1297bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1298bfaf91caSJoakim Zhang } 129971fedb01SJoao Pinto 1300bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1301bfaf91caSJoakim Zhang tx_q->dma_tx_phy, desc_size); 1302c24602efSGiuseppe CAVALLARO } 1303ce736788SJoao Pinto } 1304c24602efSGiuseppe CAVALLARO 130571fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv) 130671fedb01SJoao Pinto { 130771fedb01SJoao Pinto /* Display RX ring */ 130871fedb01SJoao Pinto stmmac_display_rx_rings(priv); 130971fedb01SJoao Pinto 131071fedb01SJoao Pinto /* Display TX ring */ 131171fedb01SJoao Pinto stmmac_display_tx_rings(priv); 131271fedb01SJoao Pinto } 131371fedb01SJoao Pinto 1314286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize) 1315286a8372SGiuseppe CAVALLARO { 1316286a8372SGiuseppe CAVALLARO int ret = bufsize; 1317286a8372SGiuseppe CAVALLARO 1318b2f3a481SJose Abreu if (mtu >= BUF_SIZE_8KiB) 1319b2f3a481SJose Abreu ret = BUF_SIZE_16KiB; 1320b2f3a481SJose Abreu else if (mtu >= BUF_SIZE_4KiB) 1321286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_8KiB; 1322286a8372SGiuseppe CAVALLARO else if (mtu >= BUF_SIZE_2KiB) 1323286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_4KiB; 1324d916701cSGiuseppe CAVALLARO else if (mtu > DEFAULT_BUFSIZE) 1325286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_2KiB; 1326286a8372SGiuseppe CAVALLARO else 1327d916701cSGiuseppe CAVALLARO ret = DEFAULT_BUFSIZE; 1328286a8372SGiuseppe CAVALLARO 1329286a8372SGiuseppe CAVALLARO return ret; 1330286a8372SGiuseppe CAVALLARO } 1331286a8372SGiuseppe CAVALLARO 133232ceabcaSGiuseppe CAVALLARO /** 133371fedb01SJoao Pinto * stmmac_clear_rx_descriptors - clear RX descriptors 133432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 133554139cf3SJoao Pinto * @queue: RX queue index 133671fedb01SJoao Pinto * Description: this function is called to clear the RX descriptors 133732ceabcaSGiuseppe CAVALLARO * in case of both basic and extended descriptors are used. 133832ceabcaSGiuseppe CAVALLARO */ 133954139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1340c24602efSGiuseppe CAVALLARO { 134154139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 13425bacd778SLABBE Corentin int i; 1343c24602efSGiuseppe CAVALLARO 134471fedb01SJoao Pinto /* Clear the RX descriptors */ 1345aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_rx_size; i++) 13465bacd778SLABBE Corentin if (priv->extend_desc) 134742de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 13485bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1349aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1350583e6361SAaro Koskinen priv->dma_buf_sz); 13515bacd778SLABBE Corentin else 135242de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 13535bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1354aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1355583e6361SAaro Koskinen priv->dma_buf_sz); 135671fedb01SJoao Pinto } 135771fedb01SJoao Pinto 135871fedb01SJoao Pinto /** 135971fedb01SJoao Pinto * stmmac_clear_tx_descriptors - clear tx descriptors 136071fedb01SJoao Pinto * @priv: driver private structure 1361ce736788SJoao Pinto * @queue: TX queue index. 136271fedb01SJoao Pinto * Description: this function is called to clear the TX descriptors 136371fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 136471fedb01SJoao Pinto */ 1365ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 136671fedb01SJoao Pinto { 1367ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 136871fedb01SJoao Pinto int i; 136971fedb01SJoao Pinto 137071fedb01SJoao Pinto /* Clear the TX descriptors */ 1371aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1372aa042f60SSong, Yoong Siang int last = (i == (priv->dma_tx_size - 1)); 1373579a25a8SJose Abreu struct dma_desc *p; 1374579a25a8SJose Abreu 13755bacd778SLABBE Corentin if (priv->extend_desc) 1376579a25a8SJose Abreu p = &tx_q->dma_etx[i].basic; 1377579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1378579a25a8SJose Abreu p = &tx_q->dma_entx[i].basic; 13795bacd778SLABBE Corentin else 1380579a25a8SJose Abreu p = &tx_q->dma_tx[i]; 1381579a25a8SJose Abreu 1382579a25a8SJose Abreu stmmac_init_tx_desc(priv, p, priv->mode, last); 1383579a25a8SJose Abreu } 1384c24602efSGiuseppe CAVALLARO } 1385c24602efSGiuseppe CAVALLARO 1386732fdf0eSGiuseppe CAVALLARO /** 138771fedb01SJoao Pinto * stmmac_clear_descriptors - clear descriptors 138871fedb01SJoao Pinto * @priv: driver private structure 138971fedb01SJoao Pinto * Description: this function is called to clear the TX and RX descriptors 139071fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 139171fedb01SJoao Pinto */ 139271fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv) 139371fedb01SJoao Pinto { 139454139cf3SJoao Pinto u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1395ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 139654139cf3SJoao Pinto u32 queue; 139754139cf3SJoao Pinto 139871fedb01SJoao Pinto /* Clear the RX descriptors */ 139954139cf3SJoao Pinto for (queue = 0; queue < rx_queue_cnt; queue++) 140054139cf3SJoao Pinto stmmac_clear_rx_descriptors(priv, queue); 140171fedb01SJoao Pinto 140271fedb01SJoao Pinto /* Clear the TX descriptors */ 1403ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) 1404ce736788SJoao Pinto stmmac_clear_tx_descriptors(priv, queue); 140571fedb01SJoao Pinto } 140671fedb01SJoao Pinto 140771fedb01SJoao Pinto /** 1408732fdf0eSGiuseppe CAVALLARO * stmmac_init_rx_buffers - init the RX descriptor buffer. 1409732fdf0eSGiuseppe CAVALLARO * @priv: driver private structure 1410732fdf0eSGiuseppe CAVALLARO * @p: descriptor pointer 1411732fdf0eSGiuseppe CAVALLARO * @i: descriptor index 141254139cf3SJoao Pinto * @flags: gfp flag 141354139cf3SJoao Pinto * @queue: RX queue index 1414732fdf0eSGiuseppe CAVALLARO * Description: this function is called to allocate a receive buffer, perform 1415732fdf0eSGiuseppe CAVALLARO * the DMA mapping and init the descriptor. 1416732fdf0eSGiuseppe CAVALLARO */ 1417c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 141854139cf3SJoao Pinto int i, gfp_t flags, u32 queue) 1419c24602efSGiuseppe CAVALLARO { 142054139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 14212af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1422c24602efSGiuseppe CAVALLARO 1423da5ec7f2SOng Boon Leong if (!buf->page) { 14242af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 14252af6106aSJose Abreu if (!buf->page) 142656329137SBartlomiej Zolnierkiewicz return -ENOMEM; 14275fabb012SOng Boon Leong buf->page_offset = stmmac_rx_offset(priv); 1428da5ec7f2SOng Boon Leong } 1429c24602efSGiuseppe CAVALLARO 1430da5ec7f2SOng Boon Leong if (priv->sph && !buf->sec_page) { 143167afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 143267afd6d1SJose Abreu if (!buf->sec_page) 143367afd6d1SJose Abreu return -ENOMEM; 143467afd6d1SJose Abreu 143567afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1436396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 143767afd6d1SJose Abreu } else { 143867afd6d1SJose Abreu buf->sec_page = NULL; 1439396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 144067afd6d1SJose Abreu } 144167afd6d1SJose Abreu 14425fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 14435fabb012SOng Boon Leong 14442af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 14452c520b1cSJose Abreu if (priv->dma_buf_sz == BUF_SIZE_16KiB) 14462c520b1cSJose Abreu stmmac_init_desc3(priv, p); 1447c24602efSGiuseppe CAVALLARO 1448c24602efSGiuseppe CAVALLARO return 0; 1449c24602efSGiuseppe CAVALLARO } 1450c24602efSGiuseppe CAVALLARO 145171fedb01SJoao Pinto /** 145271fedb01SJoao Pinto * stmmac_free_rx_buffer - free RX dma buffers 145371fedb01SJoao Pinto * @priv: private structure 145454139cf3SJoao Pinto * @queue: RX queue index 145571fedb01SJoao Pinto * @i: buffer index. 145671fedb01SJoao Pinto */ 145754139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 145856329137SBartlomiej Zolnierkiewicz { 145954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 14602af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 146154139cf3SJoao Pinto 14622af6106aSJose Abreu if (buf->page) 1463458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->page, false); 14642af6106aSJose Abreu buf->page = NULL; 146567afd6d1SJose Abreu 146667afd6d1SJose Abreu if (buf->sec_page) 1467458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 146867afd6d1SJose Abreu buf->sec_page = NULL; 146956329137SBartlomiej Zolnierkiewicz } 147056329137SBartlomiej Zolnierkiewicz 14717ac6653aSJeff Kirsher /** 147271fedb01SJoao Pinto * stmmac_free_tx_buffer - free RX dma buffers 147371fedb01SJoao Pinto * @priv: private structure 1474ce736788SJoao Pinto * @queue: RX queue index 147571fedb01SJoao Pinto * @i: buffer index. 147671fedb01SJoao Pinto */ 1477ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 147871fedb01SJoao Pinto { 1479ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1480ce736788SJoao Pinto 1481be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf && 1482be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1483ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].map_as_page) 148471fedb01SJoao Pinto dma_unmap_page(priv->device, 1485ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1486ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 148771fedb01SJoao Pinto DMA_TO_DEVICE); 148871fedb01SJoao Pinto else 148971fedb01SJoao Pinto dma_unmap_single(priv->device, 1490ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1491ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 149271fedb01SJoao Pinto DMA_TO_DEVICE); 149371fedb01SJoao Pinto } 149471fedb01SJoao Pinto 1495be8b38a7SOng Boon Leong if (tx_q->xdpf[i] && 14968b278a5bSOng Boon Leong (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 14978b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1498be8b38a7SOng Boon Leong xdp_return_frame(tx_q->xdpf[i]); 1499be8b38a7SOng Boon Leong tx_q->xdpf[i] = NULL; 1500be8b38a7SOng Boon Leong } 1501be8b38a7SOng Boon Leong 1502132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1503132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 1504132c32eeSOng Boon Leong 1505be8b38a7SOng Boon Leong if (tx_q->tx_skbuff[i] && 1506be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1507ce736788SJoao Pinto dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1508ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 1509be8b38a7SOng Boon Leong } 1510be8b38a7SOng Boon Leong 1511ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1512ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 151371fedb01SJoao Pinto } 151471fedb01SJoao Pinto 151571fedb01SJoao Pinto /** 15164298255fSOng Boon Leong * dma_free_rx_skbufs - free RX dma buffers 15174298255fSOng Boon Leong * @priv: private structure 15184298255fSOng Boon Leong * @queue: RX queue index 15194298255fSOng Boon Leong */ 15204298255fSOng Boon Leong static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 15214298255fSOng Boon Leong { 15224298255fSOng Boon Leong int i; 15234298255fSOng Boon Leong 15244298255fSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) 15254298255fSOng Boon Leong stmmac_free_rx_buffer(priv, queue, i); 15264298255fSOng Boon Leong } 15274298255fSOng Boon Leong 15284298255fSOng Boon Leong static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, 15294298255fSOng Boon Leong gfp_t flags) 15304298255fSOng Boon Leong { 15314298255fSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 15324298255fSOng Boon Leong int i; 15334298255fSOng Boon Leong 15344298255fSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 15354298255fSOng Boon Leong struct dma_desc *p; 15364298255fSOng Boon Leong int ret; 15374298255fSOng Boon Leong 15384298255fSOng Boon Leong if (priv->extend_desc) 15394298255fSOng Boon Leong p = &((rx_q->dma_erx + i)->basic); 15404298255fSOng Boon Leong else 15414298255fSOng Boon Leong p = rx_q->dma_rx + i; 15424298255fSOng Boon Leong 15434298255fSOng Boon Leong ret = stmmac_init_rx_buffers(priv, p, i, flags, 15444298255fSOng Boon Leong queue); 15454298255fSOng Boon Leong if (ret) 15464298255fSOng Boon Leong return ret; 1547bba2556eSOng Boon Leong 1548bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 15494298255fSOng Boon Leong } 15504298255fSOng Boon Leong 15514298255fSOng Boon Leong return 0; 15524298255fSOng Boon Leong } 15534298255fSOng Boon Leong 15544298255fSOng Boon Leong /** 1555bba2556eSOng Boon Leong * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1556bba2556eSOng Boon Leong * @priv: private structure 1557bba2556eSOng Boon Leong * @queue: RX queue index 1558bba2556eSOng Boon Leong */ 1559bba2556eSOng Boon Leong static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) 1560bba2556eSOng Boon Leong { 1561bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1562bba2556eSOng Boon Leong int i; 1563bba2556eSOng Boon Leong 1564bba2556eSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 1565bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1566bba2556eSOng Boon Leong 1567bba2556eSOng Boon Leong if (!buf->xdp) 1568bba2556eSOng Boon Leong continue; 1569bba2556eSOng Boon Leong 1570bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 1571bba2556eSOng Boon Leong buf->xdp = NULL; 1572bba2556eSOng Boon Leong } 1573bba2556eSOng Boon Leong } 1574bba2556eSOng Boon Leong 1575bba2556eSOng Boon Leong static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) 1576bba2556eSOng Boon Leong { 1577bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1578bba2556eSOng Boon Leong int i; 1579bba2556eSOng Boon Leong 1580bba2556eSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 1581bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 1582bba2556eSOng Boon Leong dma_addr_t dma_addr; 1583bba2556eSOng Boon Leong struct dma_desc *p; 1584bba2556eSOng Boon Leong 1585bba2556eSOng Boon Leong if (priv->extend_desc) 1586bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + i); 1587bba2556eSOng Boon Leong else 1588bba2556eSOng Boon Leong p = rx_q->dma_rx + i; 1589bba2556eSOng Boon Leong 1590bba2556eSOng Boon Leong buf = &rx_q->buf_pool[i]; 1591bba2556eSOng Boon Leong 1592bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1593bba2556eSOng Boon Leong if (!buf->xdp) 1594bba2556eSOng Boon Leong return -ENOMEM; 1595bba2556eSOng Boon Leong 1596bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1597bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, p, dma_addr); 1598bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 1599bba2556eSOng Boon Leong } 1600bba2556eSOng Boon Leong 1601bba2556eSOng Boon Leong return 0; 1602bba2556eSOng Boon Leong } 1603bba2556eSOng Boon Leong 1604bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1605bba2556eSOng Boon Leong { 1606bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1607bba2556eSOng Boon Leong return NULL; 1608bba2556eSOng Boon Leong 1609bba2556eSOng Boon Leong return xsk_get_pool_from_qid(priv->dev, queue); 1610bba2556eSOng Boon Leong } 1611bba2556eSOng Boon Leong 16129c63faaaSJoakim Zhang /** 1613de0b90e5SOng Boon Leong * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1614de0b90e5SOng Boon Leong * @priv: driver private structure 1615de0b90e5SOng Boon Leong * @queue: RX queue index 16165bacd778SLABBE Corentin * @flags: gfp flag. 161771fedb01SJoao Pinto * Description: this function initializes the DMA RX descriptors 16185bacd778SLABBE Corentin * and allocates the socket buffers. It supports the chained and ring 1619286a8372SGiuseppe CAVALLARO * modes. 16207ac6653aSJeff Kirsher */ 1621de0b90e5SOng Boon Leong static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) 16227ac6653aSJeff Kirsher { 162354139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1624de0b90e5SOng Boon Leong int ret; 162554139cf3SJoao Pinto 162654139cf3SJoao Pinto netif_dbg(priv, probe, priv->dev, 162754139cf3SJoao Pinto "(%s) dma_rx_phy=0x%08x\n", __func__, 162854139cf3SJoao Pinto (u32)rx_q->dma_rx_phy); 162954139cf3SJoao Pinto 1630cbcf0999SJose Abreu stmmac_clear_rx_descriptors(priv, queue); 1631cbcf0999SJose Abreu 1632bba2556eSOng Boon Leong xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1633bba2556eSOng Boon Leong 1634bba2556eSOng Boon Leong rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1635bba2556eSOng Boon Leong 1636bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1637bba2556eSOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1638bba2556eSOng Boon Leong MEM_TYPE_XSK_BUFF_POOL, 1639bba2556eSOng Boon Leong NULL)); 1640bba2556eSOng Boon Leong netdev_info(priv->dev, 1641bba2556eSOng Boon Leong "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1642bba2556eSOng Boon Leong rx_q->queue_index); 1643bba2556eSOng Boon Leong xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1644bba2556eSOng Boon Leong } else { 1645be8b38a7SOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1646be8b38a7SOng Boon Leong MEM_TYPE_PAGE_POOL, 1647be8b38a7SOng Boon Leong rx_q->page_pool)); 1648be8b38a7SOng Boon Leong netdev_info(priv->dev, 1649be8b38a7SOng Boon Leong "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1650be8b38a7SOng Boon Leong rx_q->queue_index); 1651bba2556eSOng Boon Leong } 1652be8b38a7SOng Boon Leong 1653bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1654bba2556eSOng Boon Leong /* RX XDP ZC buffer pool may not be populated, e.g. 1655bba2556eSOng Boon Leong * xdpsock TX-only. 1656bba2556eSOng Boon Leong */ 1657bba2556eSOng Boon Leong stmmac_alloc_rx_buffers_zc(priv, queue); 1658bba2556eSOng Boon Leong } else { 16594298255fSOng Boon Leong ret = stmmac_alloc_rx_buffers(priv, queue, flags); 16604298255fSOng Boon Leong if (ret < 0) 1661de0b90e5SOng Boon Leong return -ENOMEM; 1662bba2556eSOng Boon Leong } 166354139cf3SJoao Pinto 166454139cf3SJoao Pinto rx_q->cur_rx = 0; 16654298255fSOng Boon Leong rx_q->dirty_rx = 0; 166654139cf3SJoao Pinto 1667c24602efSGiuseppe CAVALLARO /* Setup the chained descriptor addresses */ 1668c24602efSGiuseppe CAVALLARO if (priv->mode == STMMAC_CHAIN_MODE) { 166971fedb01SJoao Pinto if (priv->extend_desc) 16702c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_erx, 1671aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1672aa042f60SSong, Yoong Siang priv->dma_rx_size, 1); 167371fedb01SJoao Pinto else 16742c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_rx, 1675aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1676aa042f60SSong, Yoong Siang priv->dma_rx_size, 0); 167771fedb01SJoao Pinto } 1678de0b90e5SOng Boon Leong 1679de0b90e5SOng Boon Leong return 0; 1680de0b90e5SOng Boon Leong } 1681de0b90e5SOng Boon Leong 1682de0b90e5SOng Boon Leong static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1683de0b90e5SOng Boon Leong { 1684de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1685de0b90e5SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1686de0b90e5SOng Boon Leong u32 queue; 1687de0b90e5SOng Boon Leong int ret; 1688de0b90e5SOng Boon Leong 1689de0b90e5SOng Boon Leong /* RX INITIALIZATION */ 1690de0b90e5SOng Boon Leong netif_dbg(priv, probe, priv->dev, 1691de0b90e5SOng Boon Leong "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1692de0b90e5SOng Boon Leong 1693de0b90e5SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 1694de0b90e5SOng Boon Leong ret = __init_dma_rx_desc_rings(priv, queue, flags); 1695de0b90e5SOng Boon Leong if (ret) 1696de0b90e5SOng Boon Leong goto err_init_rx_buffers; 169754139cf3SJoao Pinto } 169854139cf3SJoao Pinto 169971fedb01SJoao Pinto return 0; 170054139cf3SJoao Pinto 170171fedb01SJoao Pinto err_init_rx_buffers: 170254139cf3SJoao Pinto while (queue >= 0) { 1703bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1704bba2556eSOng Boon Leong 1705bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1706bba2556eSOng Boon Leong dma_free_rx_xskbufs(priv, queue); 1707bba2556eSOng Boon Leong else 17084298255fSOng Boon Leong dma_free_rx_skbufs(priv, queue); 170954139cf3SJoao Pinto 1710bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1711bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1712bba2556eSOng Boon Leong 171354139cf3SJoao Pinto if (queue == 0) 171454139cf3SJoao Pinto break; 171554139cf3SJoao Pinto 171654139cf3SJoao Pinto queue--; 171754139cf3SJoao Pinto } 171854139cf3SJoao Pinto 171971fedb01SJoao Pinto return ret; 172071fedb01SJoao Pinto } 172171fedb01SJoao Pinto 172271fedb01SJoao Pinto /** 1723de0b90e5SOng Boon Leong * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1724de0b90e5SOng Boon Leong * @priv: driver private structure 1725de0b90e5SOng Boon Leong * @queue : TX queue index 172671fedb01SJoao Pinto * Description: this function initializes the DMA TX descriptors 172771fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 172871fedb01SJoao Pinto * modes. 172971fedb01SJoao Pinto */ 1730de0b90e5SOng Boon Leong static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) 173171fedb01SJoao Pinto { 1732ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1733de0b90e5SOng Boon Leong int i; 1734ce736788SJoao Pinto 173571fedb01SJoao Pinto netif_dbg(priv, probe, priv->dev, 1736ce736788SJoao Pinto "(%s) dma_tx_phy=0x%08x\n", __func__, 1737ce736788SJoao Pinto (u32)tx_q->dma_tx_phy); 173871fedb01SJoao Pinto 173971fedb01SJoao Pinto /* Setup the chained descriptor addresses */ 174071fedb01SJoao Pinto if (priv->mode == STMMAC_CHAIN_MODE) { 174171fedb01SJoao Pinto if (priv->extend_desc) 17422c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_etx, 1743aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1744aa042f60SSong, Yoong Siang priv->dma_tx_size, 1); 1745579a25a8SJose Abreu else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 17462c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_tx, 1747aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1748aa042f60SSong, Yoong Siang priv->dma_tx_size, 0); 1749c24602efSGiuseppe CAVALLARO } 1750286a8372SGiuseppe CAVALLARO 1751132c32eeSOng Boon Leong tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1752132c32eeSOng Boon Leong 1753aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1754c24602efSGiuseppe CAVALLARO struct dma_desc *p; 1755de0b90e5SOng Boon Leong 1756c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 1757ce736788SJoao Pinto p = &((tx_q->dma_etx + i)->basic); 1758579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1759579a25a8SJose Abreu p = &((tx_q->dma_entx + i)->basic); 1760c24602efSGiuseppe CAVALLARO else 1761ce736788SJoao Pinto p = tx_q->dma_tx + i; 1762f748be53SAlexandre TORGUE 176344c67f85SJose Abreu stmmac_clear_desc(priv, p); 1764f748be53SAlexandre TORGUE 1765ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1766ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 1767ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len = 0; 1768ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].last_segment = false; 1769ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 17704a7d666aSGiuseppe CAVALLARO } 1771c24602efSGiuseppe CAVALLARO 1772ce736788SJoao Pinto tx_q->dirty_tx = 0; 1773ce736788SJoao Pinto tx_q->cur_tx = 0; 17748d212a9eSNiklas Cassel tx_q->mss = 0; 1775ce736788SJoao Pinto 1776c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1777de0b90e5SOng Boon Leong 1778de0b90e5SOng Boon Leong return 0; 1779c22a3f48SJoao Pinto } 17807ac6653aSJeff Kirsher 1781de0b90e5SOng Boon Leong static int init_dma_tx_desc_rings(struct net_device *dev) 1782de0b90e5SOng Boon Leong { 1783de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1784de0b90e5SOng Boon Leong u32 tx_queue_cnt; 1785de0b90e5SOng Boon Leong u32 queue; 1786de0b90e5SOng Boon Leong 1787de0b90e5SOng Boon Leong tx_queue_cnt = priv->plat->tx_queues_to_use; 1788de0b90e5SOng Boon Leong 1789de0b90e5SOng Boon Leong for (queue = 0; queue < tx_queue_cnt; queue++) 1790de0b90e5SOng Boon Leong __init_dma_tx_desc_rings(priv, queue); 1791de0b90e5SOng Boon Leong 179271fedb01SJoao Pinto return 0; 179371fedb01SJoao Pinto } 179471fedb01SJoao Pinto 179571fedb01SJoao Pinto /** 179671fedb01SJoao Pinto * init_dma_desc_rings - init the RX/TX descriptor rings 179771fedb01SJoao Pinto * @dev: net device structure 179871fedb01SJoao Pinto * @flags: gfp flag. 179971fedb01SJoao Pinto * Description: this function initializes the DMA RX/TX descriptors 180071fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 180171fedb01SJoao Pinto * modes. 180271fedb01SJoao Pinto */ 180371fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 180471fedb01SJoao Pinto { 180571fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 180671fedb01SJoao Pinto int ret; 180771fedb01SJoao Pinto 180871fedb01SJoao Pinto ret = init_dma_rx_desc_rings(dev, flags); 180971fedb01SJoao Pinto if (ret) 181071fedb01SJoao Pinto return ret; 181171fedb01SJoao Pinto 181271fedb01SJoao Pinto ret = init_dma_tx_desc_rings(dev); 181371fedb01SJoao Pinto 18145bacd778SLABBE Corentin stmmac_clear_descriptors(priv); 18157ac6653aSJeff Kirsher 1816c24602efSGiuseppe CAVALLARO if (netif_msg_hw(priv)) 1817c24602efSGiuseppe CAVALLARO stmmac_display_rings(priv); 181856329137SBartlomiej Zolnierkiewicz 181956329137SBartlomiej Zolnierkiewicz return ret; 18207ac6653aSJeff Kirsher } 18217ac6653aSJeff Kirsher 182271fedb01SJoao Pinto /** 182371fedb01SJoao Pinto * dma_free_tx_skbufs - free TX dma buffers 182471fedb01SJoao Pinto * @priv: private structure 1825ce736788SJoao Pinto * @queue: TX queue index 182671fedb01SJoao Pinto */ 1827ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 18287ac6653aSJeff Kirsher { 1829132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 18307ac6653aSJeff Kirsher int i; 18317ac6653aSJeff Kirsher 1832132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1833132c32eeSOng Boon Leong 1834aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) 1835ce736788SJoao Pinto stmmac_free_tx_buffer(priv, queue, i); 1836132c32eeSOng Boon Leong 1837132c32eeSOng Boon Leong if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1838132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1839132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1840132c32eeSOng Boon Leong tx_q->xsk_pool = NULL; 1841132c32eeSOng Boon Leong } 18427ac6653aSJeff Kirsher } 18437ac6653aSJeff Kirsher 1844732fdf0eSGiuseppe CAVALLARO /** 18454ec236c7SFugang Duan * stmmac_free_tx_skbufs - free TX skb buffers 18464ec236c7SFugang Duan * @priv: private structure 18474ec236c7SFugang Duan */ 18484ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 18494ec236c7SFugang Duan { 18504ec236c7SFugang Duan u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 18514ec236c7SFugang Duan u32 queue; 18524ec236c7SFugang Duan 18534ec236c7SFugang Duan for (queue = 0; queue < tx_queue_cnt; queue++) 18544ec236c7SFugang Duan dma_free_tx_skbufs(priv, queue); 18554ec236c7SFugang Duan } 18564ec236c7SFugang Duan 18574ec236c7SFugang Duan /** 1858da5ec7f2SOng Boon Leong * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 185954139cf3SJoao Pinto * @priv: private structure 1860da5ec7f2SOng Boon Leong * @queue: RX queue index 186154139cf3SJoao Pinto */ 1862da5ec7f2SOng Boon Leong static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 186354139cf3SJoao Pinto { 186454139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 186554139cf3SJoao Pinto 186654139cf3SJoao Pinto /* Release the DMA RX socket buffers */ 1867bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1868bba2556eSOng Boon Leong dma_free_rx_xskbufs(priv, queue); 1869bba2556eSOng Boon Leong else 187054139cf3SJoao Pinto dma_free_rx_skbufs(priv, queue); 187154139cf3SJoao Pinto 1872bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1873bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1874bba2556eSOng Boon Leong 187554139cf3SJoao Pinto /* Free DMA regions of consistent memory previously allocated */ 187654139cf3SJoao Pinto if (!priv->extend_desc) 1877aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 1878aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 187954139cf3SJoao Pinto rx_q->dma_rx, rx_q->dma_rx_phy); 188054139cf3SJoao Pinto else 1881aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 188254139cf3SJoao Pinto sizeof(struct dma_extended_desc), 188354139cf3SJoao Pinto rx_q->dma_erx, rx_q->dma_rx_phy); 188454139cf3SJoao Pinto 1885be8b38a7SOng Boon Leong if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1886be8b38a7SOng Boon Leong xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1887be8b38a7SOng Boon Leong 18882af6106aSJose Abreu kfree(rx_q->buf_pool); 1889c3f812ceSJonathan Lemon if (rx_q->page_pool) 18902af6106aSJose Abreu page_pool_destroy(rx_q->page_pool); 18912af6106aSJose Abreu } 1892da5ec7f2SOng Boon Leong 1893da5ec7f2SOng Boon Leong static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1894da5ec7f2SOng Boon Leong { 1895da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1896da5ec7f2SOng Boon Leong u32 queue; 1897da5ec7f2SOng Boon Leong 1898da5ec7f2SOng Boon Leong /* Free RX queue resources */ 1899da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) 1900da5ec7f2SOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 190154139cf3SJoao Pinto } 190254139cf3SJoao Pinto 190354139cf3SJoao Pinto /** 1904da5ec7f2SOng Boon Leong * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1905ce736788SJoao Pinto * @priv: private structure 1906da5ec7f2SOng Boon Leong * @queue: TX queue index 1907ce736788SJoao Pinto */ 1908da5ec7f2SOng Boon Leong static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 1909ce736788SJoao Pinto { 1910ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1911579a25a8SJose Abreu size_t size; 1912579a25a8SJose Abreu void *addr; 1913ce736788SJoao Pinto 1914ce736788SJoao Pinto /* Release the DMA TX socket buffers */ 1915ce736788SJoao Pinto dma_free_tx_skbufs(priv, queue); 1916ce736788SJoao Pinto 1917579a25a8SJose Abreu if (priv->extend_desc) { 1918579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1919579a25a8SJose Abreu addr = tx_q->dma_etx; 1920579a25a8SJose Abreu } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1921579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1922579a25a8SJose Abreu addr = tx_q->dma_entx; 1923579a25a8SJose Abreu } else { 1924579a25a8SJose Abreu size = sizeof(struct dma_desc); 1925579a25a8SJose Abreu addr = tx_q->dma_tx; 1926579a25a8SJose Abreu } 1927579a25a8SJose Abreu 1928aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 1929579a25a8SJose Abreu 1930579a25a8SJose Abreu dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1931ce736788SJoao Pinto 1932ce736788SJoao Pinto kfree(tx_q->tx_skbuff_dma); 1933ce736788SJoao Pinto kfree(tx_q->tx_skbuff); 1934ce736788SJoao Pinto } 1935da5ec7f2SOng Boon Leong 1936da5ec7f2SOng Boon Leong static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1937da5ec7f2SOng Boon Leong { 1938da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 1939da5ec7f2SOng Boon Leong u32 queue; 1940da5ec7f2SOng Boon Leong 1941da5ec7f2SOng Boon Leong /* Free TX queue resources */ 1942da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) 1943da5ec7f2SOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 1944ce736788SJoao Pinto } 1945ce736788SJoao Pinto 1946ce736788SJoao Pinto /** 1947da5ec7f2SOng Boon Leong * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 1948732fdf0eSGiuseppe CAVALLARO * @priv: private structure 1949da5ec7f2SOng Boon Leong * @queue: RX queue index 1950732fdf0eSGiuseppe CAVALLARO * Description: according to which descriptor can be used (extend or basic) 1951732fdf0eSGiuseppe CAVALLARO * this function allocates the resources for TX and RX paths. In case of 1952732fdf0eSGiuseppe CAVALLARO * reception, for example, it pre-allocated the RX socket buffer in order to 1953732fdf0eSGiuseppe CAVALLARO * allow zero-copy mechanism. 1954732fdf0eSGiuseppe CAVALLARO */ 1955da5ec7f2SOng Boon Leong static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 195609f8d696SSrinivas Kandagatla { 195754139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1958be8b38a7SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 1959da5ec7f2SOng Boon Leong bool xdp_prog = stmmac_xdp_is_enabled(priv); 19602af6106aSJose Abreu struct page_pool_params pp_params = { 0 }; 19614f28bd95SThierry Reding unsigned int num_pages; 1962132c32eeSOng Boon Leong unsigned int napi_id; 1963be8b38a7SOng Boon Leong int ret; 196454139cf3SJoao Pinto 196554139cf3SJoao Pinto rx_q->queue_index = queue; 196654139cf3SJoao Pinto rx_q->priv_data = priv; 196754139cf3SJoao Pinto 19685fabb012SOng Boon Leong pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1969aa042f60SSong, Yoong Siang pp_params.pool_size = priv->dma_rx_size; 19704f28bd95SThierry Reding num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 19714f28bd95SThierry Reding pp_params.order = ilog2(num_pages); 19722af6106aSJose Abreu pp_params.nid = dev_to_node(priv->device); 19732af6106aSJose Abreu pp_params.dev = priv->device; 19745fabb012SOng Boon Leong pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 19755fabb012SOng Boon Leong pp_params.offset = stmmac_rx_offset(priv); 19765fabb012SOng Boon Leong pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 19775bacd778SLABBE Corentin 19782af6106aSJose Abreu rx_q->page_pool = page_pool_create(&pp_params); 19792af6106aSJose Abreu if (IS_ERR(rx_q->page_pool)) { 19802af6106aSJose Abreu ret = PTR_ERR(rx_q->page_pool); 19812af6106aSJose Abreu rx_q->page_pool = NULL; 1982da5ec7f2SOng Boon Leong return ret; 19832af6106aSJose Abreu } 19842af6106aSJose Abreu 1985aa042f60SSong, Yoong Siang rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1986aa042f60SSong, Yoong Siang sizeof(*rx_q->buf_pool), 19875bacd778SLABBE Corentin GFP_KERNEL); 19882af6106aSJose Abreu if (!rx_q->buf_pool) 1989da5ec7f2SOng Boon Leong return -ENOMEM; 19905bacd778SLABBE Corentin 19915bacd778SLABBE Corentin if (priv->extend_desc) { 1992750afb08SLuis Chamberlain rx_q->dma_erx = dma_alloc_coherent(priv->device, 1993aa042f60SSong, Yoong Siang priv->dma_rx_size * 1994aa042f60SSong, Yoong Siang sizeof(struct dma_extended_desc), 199554139cf3SJoao Pinto &rx_q->dma_rx_phy, 19965bacd778SLABBE Corentin GFP_KERNEL); 199754139cf3SJoao Pinto if (!rx_q->dma_erx) 1998da5ec7f2SOng Boon Leong return -ENOMEM; 19995bacd778SLABBE Corentin 200071fedb01SJoao Pinto } else { 2001750afb08SLuis Chamberlain rx_q->dma_rx = dma_alloc_coherent(priv->device, 2002aa042f60SSong, Yoong Siang priv->dma_rx_size * 2003aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 200454139cf3SJoao Pinto &rx_q->dma_rx_phy, 200571fedb01SJoao Pinto GFP_KERNEL); 200654139cf3SJoao Pinto if (!rx_q->dma_rx) 2007da5ec7f2SOng Boon Leong return -ENOMEM; 200871fedb01SJoao Pinto } 2009be8b38a7SOng Boon Leong 2010132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 2011132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) 2012132c32eeSOng Boon Leong napi_id = ch->rxtx_napi.napi_id; 2013132c32eeSOng Boon Leong else 2014132c32eeSOng Boon Leong napi_id = ch->rx_napi.napi_id; 2015132c32eeSOng Boon Leong 2016be8b38a7SOng Boon Leong ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2017be8b38a7SOng Boon Leong rx_q->queue_index, 2018132c32eeSOng Boon Leong napi_id); 2019be8b38a7SOng Boon Leong if (ret) { 2020be8b38a7SOng Boon Leong netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2021da5ec7f2SOng Boon Leong return -EINVAL; 2022be8b38a7SOng Boon Leong } 2023da5ec7f2SOng Boon Leong 2024da5ec7f2SOng Boon Leong return 0; 2025da5ec7f2SOng Boon Leong } 2026da5ec7f2SOng Boon Leong 2027da5ec7f2SOng Boon Leong static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 2028da5ec7f2SOng Boon Leong { 2029da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 2030da5ec7f2SOng Boon Leong u32 queue; 2031da5ec7f2SOng Boon Leong int ret; 2032da5ec7f2SOng Boon Leong 2033da5ec7f2SOng Boon Leong /* RX queues buffers and DMA */ 2034da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 2035da5ec7f2SOng Boon Leong ret = __alloc_dma_rx_desc_resources(priv, queue); 2036da5ec7f2SOng Boon Leong if (ret) 2037da5ec7f2SOng Boon Leong goto err_dma; 203854139cf3SJoao Pinto } 203971fedb01SJoao Pinto 204071fedb01SJoao Pinto return 0; 204171fedb01SJoao Pinto 204271fedb01SJoao Pinto err_dma: 204354139cf3SJoao Pinto free_dma_rx_desc_resources(priv); 204454139cf3SJoao Pinto 204571fedb01SJoao Pinto return ret; 204671fedb01SJoao Pinto } 204771fedb01SJoao Pinto 204871fedb01SJoao Pinto /** 2049da5ec7f2SOng Boon Leong * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 205071fedb01SJoao Pinto * @priv: private structure 2051da5ec7f2SOng Boon Leong * @queue: TX queue index 205271fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 205371fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 205471fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 205571fedb01SJoao Pinto * allow zero-copy mechanism. 205671fedb01SJoao Pinto */ 2057da5ec7f2SOng Boon Leong static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 205871fedb01SJoao Pinto { 2059ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2060579a25a8SJose Abreu size_t size; 2061579a25a8SJose Abreu void *addr; 2062ce736788SJoao Pinto 2063ce736788SJoao Pinto tx_q->queue_index = queue; 2064ce736788SJoao Pinto tx_q->priv_data = priv; 2065ce736788SJoao Pinto 2066aa042f60SSong, Yoong Siang tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 2067ce736788SJoao Pinto sizeof(*tx_q->tx_skbuff_dma), 206871fedb01SJoao Pinto GFP_KERNEL); 2069ce736788SJoao Pinto if (!tx_q->tx_skbuff_dma) 2070da5ec7f2SOng Boon Leong return -ENOMEM; 207171fedb01SJoao Pinto 2072aa042f60SSong, Yoong Siang tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 2073ce736788SJoao Pinto sizeof(struct sk_buff *), 207471fedb01SJoao Pinto GFP_KERNEL); 2075ce736788SJoao Pinto if (!tx_q->tx_skbuff) 2076da5ec7f2SOng Boon Leong return -ENOMEM; 207771fedb01SJoao Pinto 2078579a25a8SJose Abreu if (priv->extend_desc) 2079579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 2080579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2081579a25a8SJose Abreu size = sizeof(struct dma_edesc); 2082579a25a8SJose Abreu else 2083579a25a8SJose Abreu size = sizeof(struct dma_desc); 2084579a25a8SJose Abreu 2085aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 2086579a25a8SJose Abreu 2087579a25a8SJose Abreu addr = dma_alloc_coherent(priv->device, size, 2088579a25a8SJose Abreu &tx_q->dma_tx_phy, GFP_KERNEL); 2089579a25a8SJose Abreu if (!addr) 2090da5ec7f2SOng Boon Leong return -ENOMEM; 2091579a25a8SJose Abreu 2092579a25a8SJose Abreu if (priv->extend_desc) 2093579a25a8SJose Abreu tx_q->dma_etx = addr; 2094579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2095579a25a8SJose Abreu tx_q->dma_entx = addr; 2096579a25a8SJose Abreu else 2097579a25a8SJose Abreu tx_q->dma_tx = addr; 2098da5ec7f2SOng Boon Leong 2099da5ec7f2SOng Boon Leong return 0; 2100da5ec7f2SOng Boon Leong } 2101da5ec7f2SOng Boon Leong 2102da5ec7f2SOng Boon Leong static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 2103da5ec7f2SOng Boon Leong { 2104da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 2105da5ec7f2SOng Boon Leong u32 queue; 2106da5ec7f2SOng Boon Leong int ret; 2107da5ec7f2SOng Boon Leong 2108da5ec7f2SOng Boon Leong /* TX queues buffers and DMA */ 2109da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) { 2110da5ec7f2SOng Boon Leong ret = __alloc_dma_tx_desc_resources(priv, queue); 2111da5ec7f2SOng Boon Leong if (ret) 2112da5ec7f2SOng Boon Leong goto err_dma; 21135bacd778SLABBE Corentin } 21145bacd778SLABBE Corentin 21155bacd778SLABBE Corentin return 0; 21165bacd778SLABBE Corentin 211762242260SChristophe Jaillet err_dma: 2118ce736788SJoao Pinto free_dma_tx_desc_resources(priv); 211909f8d696SSrinivas Kandagatla return ret; 21205bacd778SLABBE Corentin } 212109f8d696SSrinivas Kandagatla 212271fedb01SJoao Pinto /** 212371fedb01SJoao Pinto * alloc_dma_desc_resources - alloc TX/RX resources. 212471fedb01SJoao Pinto * @priv: private structure 212571fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 212671fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 212771fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 212871fedb01SJoao Pinto * allow zero-copy mechanism. 212971fedb01SJoao Pinto */ 213071fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv) 21315bacd778SLABBE Corentin { 213254139cf3SJoao Pinto /* RX Allocation */ 213371fedb01SJoao Pinto int ret = alloc_dma_rx_desc_resources(priv); 213471fedb01SJoao Pinto 213571fedb01SJoao Pinto if (ret) 213671fedb01SJoao Pinto return ret; 213771fedb01SJoao Pinto 213871fedb01SJoao Pinto ret = alloc_dma_tx_desc_resources(priv); 213971fedb01SJoao Pinto 214071fedb01SJoao Pinto return ret; 214171fedb01SJoao Pinto } 214271fedb01SJoao Pinto 214371fedb01SJoao Pinto /** 214471fedb01SJoao Pinto * free_dma_desc_resources - free dma desc resources 214571fedb01SJoao Pinto * @priv: private structure 214671fedb01SJoao Pinto */ 214771fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv) 214871fedb01SJoao Pinto { 214971fedb01SJoao Pinto /* Release the DMA TX socket buffers */ 215071fedb01SJoao Pinto free_dma_tx_desc_resources(priv); 2151be8b38a7SOng Boon Leong 2152be8b38a7SOng Boon Leong /* Release the DMA RX socket buffers later 2153be8b38a7SOng Boon Leong * to ensure all pending XDP_TX buffers are returned. 2154be8b38a7SOng Boon Leong */ 2155be8b38a7SOng Boon Leong free_dma_rx_desc_resources(priv); 215671fedb01SJoao Pinto } 215771fedb01SJoao Pinto 215871fedb01SJoao Pinto /** 21599eb12474Sjpinto * stmmac_mac_enable_rx_queues - Enable MAC rx queues 21609eb12474Sjpinto * @priv: driver private structure 21619eb12474Sjpinto * Description: It is used for enabling the rx queues in the MAC 21629eb12474Sjpinto */ 21639eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 21649eb12474Sjpinto { 21654f6046f5SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 21664f6046f5SJoao Pinto int queue; 21674f6046f5SJoao Pinto u8 mode; 21689eb12474Sjpinto 21694f6046f5SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 21704f6046f5SJoao Pinto mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2171c10d4c82SJose Abreu stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 21724f6046f5SJoao Pinto } 21739eb12474Sjpinto } 21749eb12474Sjpinto 21759eb12474Sjpinto /** 2176ae4f0d46SJoao Pinto * stmmac_start_rx_dma - start RX DMA channel 2177ae4f0d46SJoao Pinto * @priv: driver private structure 2178ae4f0d46SJoao Pinto * @chan: RX channel index 2179ae4f0d46SJoao Pinto * Description: 2180ae4f0d46SJoao Pinto * This starts a RX DMA channel 2181ae4f0d46SJoao Pinto */ 2182ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2183ae4f0d46SJoao Pinto { 2184ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2185a4e887faSJose Abreu stmmac_start_rx(priv, priv->ioaddr, chan); 2186ae4f0d46SJoao Pinto } 2187ae4f0d46SJoao Pinto 2188ae4f0d46SJoao Pinto /** 2189ae4f0d46SJoao Pinto * stmmac_start_tx_dma - start TX DMA channel 2190ae4f0d46SJoao Pinto * @priv: driver private structure 2191ae4f0d46SJoao Pinto * @chan: TX channel index 2192ae4f0d46SJoao Pinto * Description: 2193ae4f0d46SJoao Pinto * This starts a TX DMA channel 2194ae4f0d46SJoao Pinto */ 2195ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2196ae4f0d46SJoao Pinto { 2197ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2198a4e887faSJose Abreu stmmac_start_tx(priv, priv->ioaddr, chan); 2199ae4f0d46SJoao Pinto } 2200ae4f0d46SJoao Pinto 2201ae4f0d46SJoao Pinto /** 2202ae4f0d46SJoao Pinto * stmmac_stop_rx_dma - stop RX DMA channel 2203ae4f0d46SJoao Pinto * @priv: driver private structure 2204ae4f0d46SJoao Pinto * @chan: RX channel index 2205ae4f0d46SJoao Pinto * Description: 2206ae4f0d46SJoao Pinto * This stops a RX DMA channel 2207ae4f0d46SJoao Pinto */ 2208ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2209ae4f0d46SJoao Pinto { 2210ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2211a4e887faSJose Abreu stmmac_stop_rx(priv, priv->ioaddr, chan); 2212ae4f0d46SJoao Pinto } 2213ae4f0d46SJoao Pinto 2214ae4f0d46SJoao Pinto /** 2215ae4f0d46SJoao Pinto * stmmac_stop_tx_dma - stop TX DMA channel 2216ae4f0d46SJoao Pinto * @priv: driver private structure 2217ae4f0d46SJoao Pinto * @chan: TX channel index 2218ae4f0d46SJoao Pinto * Description: 2219ae4f0d46SJoao Pinto * This stops a TX DMA channel 2220ae4f0d46SJoao Pinto */ 2221ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2222ae4f0d46SJoao Pinto { 2223ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2224a4e887faSJose Abreu stmmac_stop_tx(priv, priv->ioaddr, chan); 2225ae4f0d46SJoao Pinto } 2226ae4f0d46SJoao Pinto 2227ae4f0d46SJoao Pinto /** 2228ae4f0d46SJoao Pinto * stmmac_start_all_dma - start all RX and TX DMA channels 2229ae4f0d46SJoao Pinto * @priv: driver private structure 2230ae4f0d46SJoao Pinto * Description: 2231ae4f0d46SJoao Pinto * This starts all the RX and TX DMA channels 2232ae4f0d46SJoao Pinto */ 2233ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv) 2234ae4f0d46SJoao Pinto { 2235ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2236ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2237ae4f0d46SJoao Pinto u32 chan = 0; 2238ae4f0d46SJoao Pinto 2239ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2240ae4f0d46SJoao Pinto stmmac_start_rx_dma(priv, chan); 2241ae4f0d46SJoao Pinto 2242ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2243ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 2244ae4f0d46SJoao Pinto } 2245ae4f0d46SJoao Pinto 2246ae4f0d46SJoao Pinto /** 2247ae4f0d46SJoao Pinto * stmmac_stop_all_dma - stop all RX and TX DMA channels 2248ae4f0d46SJoao Pinto * @priv: driver private structure 2249ae4f0d46SJoao Pinto * Description: 2250ae4f0d46SJoao Pinto * This stops the RX and TX DMA channels 2251ae4f0d46SJoao Pinto */ 2252ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2253ae4f0d46SJoao Pinto { 2254ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2255ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2256ae4f0d46SJoao Pinto u32 chan = 0; 2257ae4f0d46SJoao Pinto 2258ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2259ae4f0d46SJoao Pinto stmmac_stop_rx_dma(priv, chan); 2260ae4f0d46SJoao Pinto 2261ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2262ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2263ae4f0d46SJoao Pinto } 2264ae4f0d46SJoao Pinto 2265ae4f0d46SJoao Pinto /** 22667ac6653aSJeff Kirsher * stmmac_dma_operation_mode - HW DMA operation mode 226732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2268732fdf0eSGiuseppe CAVALLARO * Description: it is used for configuring the DMA operation mode register in 2269732fdf0eSGiuseppe CAVALLARO * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 22707ac6653aSJeff Kirsher */ 22717ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 22727ac6653aSJeff Kirsher { 22736deee222SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 22746deee222SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2275f88203a2SVince Bridgers int rxfifosz = priv->plat->rx_fifo_size; 227652a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 22776deee222SJoao Pinto u32 txmode = 0; 22786deee222SJoao Pinto u32 rxmode = 0; 22796deee222SJoao Pinto u32 chan = 0; 2280a0daae13SJose Abreu u8 qmode = 0; 2281f88203a2SVince Bridgers 228211fbf811SThierry Reding if (rxfifosz == 0) 228311fbf811SThierry Reding rxfifosz = priv->dma_cap.rx_fifo_size; 228452a76235SJose Abreu if (txfifosz == 0) 228552a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 228652a76235SJose Abreu 228752a76235SJose Abreu /* Adjust for real per queue fifo size */ 228852a76235SJose Abreu rxfifosz /= rx_channels_count; 228952a76235SJose Abreu txfifosz /= tx_channels_count; 229011fbf811SThierry Reding 22916deee222SJoao Pinto if (priv->plat->force_thresh_dma_mode) { 22926deee222SJoao Pinto txmode = tc; 22936deee222SJoao Pinto rxmode = tc; 22946deee222SJoao Pinto } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 22957ac6653aSJeff Kirsher /* 22967ac6653aSJeff Kirsher * In case of GMAC, SF mode can be enabled 22977ac6653aSJeff Kirsher * to perform the TX COE in HW. This depends on: 22987ac6653aSJeff Kirsher * 1) TX COE if actually supported 22997ac6653aSJeff Kirsher * 2) There is no bugged Jumbo frame support 23007ac6653aSJeff Kirsher * that needs to not insert csum in the TDES. 23017ac6653aSJeff Kirsher */ 23026deee222SJoao Pinto txmode = SF_DMA_MODE; 23036deee222SJoao Pinto rxmode = SF_DMA_MODE; 2304b2dec116SSonic Zhang priv->xstats.threshold = SF_DMA_MODE; 23056deee222SJoao Pinto } else { 23066deee222SJoao Pinto txmode = tc; 23076deee222SJoao Pinto rxmode = SF_DMA_MODE; 23086deee222SJoao Pinto } 23096deee222SJoao Pinto 23106deee222SJoao Pinto /* configure all channels */ 2311a0daae13SJose Abreu for (chan = 0; chan < rx_channels_count; chan++) { 2312bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2313bba2556eSOng Boon Leong u32 buf_size; 2314bba2556eSOng Boon Leong 2315a0daae13SJose Abreu qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 23166deee222SJoao Pinto 2317a4e887faSJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2318a0daae13SJose Abreu rxfifosz, qmode); 2319bba2556eSOng Boon Leong 2320bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 2321bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2322bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2323bba2556eSOng Boon Leong buf_size, 23244205c88eSJose Abreu chan); 2325bba2556eSOng Boon Leong } else { 2326bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2327bba2556eSOng Boon Leong priv->dma_buf_sz, 2328bba2556eSOng Boon Leong chan); 2329bba2556eSOng Boon Leong } 2330a0daae13SJose Abreu } 2331a0daae13SJose Abreu 2332a0daae13SJose Abreu for (chan = 0; chan < tx_channels_count; chan++) { 2333a0daae13SJose Abreu qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2334a0daae13SJose Abreu 2335a4e887faSJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2336a0daae13SJose Abreu txfifosz, qmode); 2337a0daae13SJose Abreu } 23387ac6653aSJeff Kirsher } 23397ac6653aSJeff Kirsher 2340132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2341132c32eeSOng Boon Leong { 2342132c32eeSOng Boon Leong struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2343132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2344132c32eeSOng Boon Leong struct xsk_buff_pool *pool = tx_q->xsk_pool; 2345132c32eeSOng Boon Leong unsigned int entry = tx_q->cur_tx; 2346132c32eeSOng Boon Leong struct dma_desc *tx_desc = NULL; 2347132c32eeSOng Boon Leong struct xdp_desc xdp_desc; 2348132c32eeSOng Boon Leong bool work_done = true; 2349132c32eeSOng Boon Leong 2350132c32eeSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 2351132c32eeSOng Boon Leong nq->trans_start = jiffies; 2352132c32eeSOng Boon Leong 2353132c32eeSOng Boon Leong budget = min(budget, stmmac_tx_avail(priv, queue)); 2354132c32eeSOng Boon Leong 2355132c32eeSOng Boon Leong while (budget-- > 0) { 2356132c32eeSOng Boon Leong dma_addr_t dma_addr; 2357132c32eeSOng Boon Leong bool set_ic; 2358132c32eeSOng Boon Leong 2359132c32eeSOng Boon Leong /* We are sharing with slow path and stop XSK TX desc submission when 2360132c32eeSOng Boon Leong * available TX ring is less than threshold. 2361132c32eeSOng Boon Leong */ 2362132c32eeSOng Boon Leong if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2363132c32eeSOng Boon Leong !netif_carrier_ok(priv->dev)) { 2364132c32eeSOng Boon Leong work_done = false; 2365132c32eeSOng Boon Leong break; 2366132c32eeSOng Boon Leong } 2367132c32eeSOng Boon Leong 2368132c32eeSOng Boon Leong if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2369132c32eeSOng Boon Leong break; 2370132c32eeSOng Boon Leong 2371132c32eeSOng Boon Leong if (likely(priv->extend_desc)) 2372132c32eeSOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2373132c32eeSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2374132c32eeSOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 2375132c32eeSOng Boon Leong else 2376132c32eeSOng Boon Leong tx_desc = tx_q->dma_tx + entry; 2377132c32eeSOng Boon Leong 2378132c32eeSOng Boon Leong dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2379132c32eeSOng Boon Leong xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2380132c32eeSOng Boon Leong 2381132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2382132c32eeSOng Boon Leong 2383132c32eeSOng Boon Leong /* To return XDP buffer to XSK pool, we simple call 2384132c32eeSOng Boon Leong * xsk_tx_completed(), so we don't need to fill up 2385132c32eeSOng Boon Leong * 'buf' and 'xdpf'. 2386132c32eeSOng Boon Leong */ 2387132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = 0; 2388132c32eeSOng Boon Leong tx_q->xdpf[entry] = NULL; 2389132c32eeSOng Boon Leong 2390132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 2391132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2392132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 2393132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2394132c32eeSOng Boon Leong 2395132c32eeSOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2396132c32eeSOng Boon Leong 2397132c32eeSOng Boon Leong tx_q->tx_count_frames++; 2398132c32eeSOng Boon Leong 2399132c32eeSOng Boon Leong if (!priv->tx_coal_frames[queue]) 2400132c32eeSOng Boon Leong set_ic = false; 2401132c32eeSOng Boon Leong else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2402132c32eeSOng Boon Leong set_ic = true; 2403132c32eeSOng Boon Leong else 2404132c32eeSOng Boon Leong set_ic = false; 2405132c32eeSOng Boon Leong 2406132c32eeSOng Boon Leong if (set_ic) { 2407132c32eeSOng Boon Leong tx_q->tx_count_frames = 0; 2408132c32eeSOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 2409132c32eeSOng Boon Leong priv->xstats.tx_set_ic_bit++; 2410132c32eeSOng Boon Leong } 2411132c32eeSOng Boon Leong 2412132c32eeSOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2413132c32eeSOng Boon Leong true, priv->mode, true, true, 2414132c32eeSOng Boon Leong xdp_desc.len); 2415132c32eeSOng Boon Leong 2416132c32eeSOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 2417132c32eeSOng Boon Leong 2418132c32eeSOng Boon Leong tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 2419132c32eeSOng Boon Leong entry = tx_q->cur_tx; 2420132c32eeSOng Boon Leong } 2421132c32eeSOng Boon Leong 2422132c32eeSOng Boon Leong if (tx_desc) { 2423132c32eeSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 2424132c32eeSOng Boon Leong xsk_tx_release(pool); 2425132c32eeSOng Boon Leong } 2426132c32eeSOng Boon Leong 2427132c32eeSOng Boon Leong /* Return true if all of the 3 conditions are met 2428132c32eeSOng Boon Leong * a) TX Budget is still available 2429132c32eeSOng Boon Leong * b) work_done = true when XSK TX desc peek is empty (no more 2430132c32eeSOng Boon Leong * pending XSK TX for transmission) 2431132c32eeSOng Boon Leong */ 2432132c32eeSOng Boon Leong return !!budget && work_done; 2433132c32eeSOng Boon Leong } 2434132c32eeSOng Boon Leong 24357ac6653aSJeff Kirsher /** 2436732fdf0eSGiuseppe CAVALLARO * stmmac_tx_clean - to manage the transmission completion 243732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2438d0ea5cbdSJesse Brandeburg * @budget: napi budget limiting this functions packet handling 2439ce736788SJoao Pinto * @queue: TX queue index 2440732fdf0eSGiuseppe CAVALLARO * Description: it reclaims the transmit resources after transmission completes. 24417ac6653aSJeff Kirsher */ 24428fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 24437ac6653aSJeff Kirsher { 2444ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 244538979574SBeniamino Galvani unsigned int bytes_compl = 0, pkts_compl = 0; 2446132c32eeSOng Boon Leong unsigned int entry, xmits = 0, count = 0; 24477ac6653aSJeff Kirsher 24488fce3331SJose Abreu __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2449a9097a96SGiuseppe CAVALLARO 24509125cdd1SGiuseppe CAVALLARO priv->xstats.tx_clean++; 24519125cdd1SGiuseppe CAVALLARO 2452132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 2453132c32eeSOng Boon Leong 24548d5f4b07SBernd Edlinger entry = tx_q->dirty_tx; 2455132c32eeSOng Boon Leong 2456132c32eeSOng Boon Leong /* Try to clean all TX complete frame in 1 shot */ 2457132c32eeSOng Boon Leong while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { 2458be8b38a7SOng Boon Leong struct xdp_frame *xdpf; 2459be8b38a7SOng Boon Leong struct sk_buff *skb; 2460c24602efSGiuseppe CAVALLARO struct dma_desc *p; 2461c363b658SFabrice Gasnier int status; 2462c24602efSGiuseppe CAVALLARO 24638b278a5bSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 24648b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2465be8b38a7SOng Boon Leong xdpf = tx_q->xdpf[entry]; 2466be8b38a7SOng Boon Leong skb = NULL; 2467be8b38a7SOng Boon Leong } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2468be8b38a7SOng Boon Leong xdpf = NULL; 2469be8b38a7SOng Boon Leong skb = tx_q->tx_skbuff[entry]; 2470be8b38a7SOng Boon Leong } else { 2471be8b38a7SOng Boon Leong xdpf = NULL; 2472be8b38a7SOng Boon Leong skb = NULL; 2473be8b38a7SOng Boon Leong } 2474be8b38a7SOng Boon Leong 2475c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 2476ce736788SJoao Pinto p = (struct dma_desc *)(tx_q->dma_etx + entry); 2477579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2478579a25a8SJose Abreu p = &tx_q->dma_entx[entry].basic; 2479c24602efSGiuseppe CAVALLARO else 2480ce736788SJoao Pinto p = tx_q->dma_tx + entry; 24817ac6653aSJeff Kirsher 248242de047dSJose Abreu status = stmmac_tx_status(priv, &priv->dev->stats, 248342de047dSJose Abreu &priv->xstats, p, priv->ioaddr); 2484c363b658SFabrice Gasnier /* Check if the descriptor is owned by the DMA */ 2485c363b658SFabrice Gasnier if (unlikely(status & tx_dma_own)) 2486c363b658SFabrice Gasnier break; 2487c363b658SFabrice Gasnier 24888fce3331SJose Abreu count++; 24898fce3331SJose Abreu 2490a6b25da5SNiklas Cassel /* Make sure descriptor fields are read after reading 2491a6b25da5SNiklas Cassel * the own bit. 2492a6b25da5SNiklas Cassel */ 2493a6b25da5SNiklas Cassel dma_rmb(); 2494a6b25da5SNiklas Cassel 2495c363b658SFabrice Gasnier /* Just consider the last segment and ...*/ 2496c363b658SFabrice Gasnier if (likely(!(status & tx_not_ls))) { 2497c363b658SFabrice Gasnier /* ... verify the status error condition */ 2498c363b658SFabrice Gasnier if (unlikely(status & tx_err)) { 2499c363b658SFabrice Gasnier priv->dev->stats.tx_errors++; 2500c363b658SFabrice Gasnier } else { 25017ac6653aSJeff Kirsher priv->dev->stats.tx_packets++; 25027ac6653aSJeff Kirsher priv->xstats.tx_pkt_n++; 2503*68e9c5deSVijayakannan Ayyathurai priv->xstats.txq_stats[queue].tx_pkt_n++; 2504c363b658SFabrice Gasnier } 2505be8b38a7SOng Boon Leong if (skb) 2506ba1ffd74SGiuseppe CAVALLARO stmmac_get_tx_hwtstamp(priv, p, skb); 25077ac6653aSJeff Kirsher } 25087ac6653aSJeff Kirsher 2509be8b38a7SOng Boon Leong if (likely(tx_q->tx_skbuff_dma[entry].buf && 2510be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2511ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[entry].map_as_page) 2512362b37beSGiuseppe CAVALLARO dma_unmap_page(priv->device, 2513ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2514ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 25157ac6653aSJeff Kirsher DMA_TO_DEVICE); 2516362b37beSGiuseppe CAVALLARO else 2517362b37beSGiuseppe CAVALLARO dma_unmap_single(priv->device, 2518ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2519ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 2520362b37beSGiuseppe CAVALLARO DMA_TO_DEVICE); 2521ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = 0; 2522ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = 0; 2523ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = false; 2524cf32deecSRayagond Kokatanur } 2525f748be53SAlexandre TORGUE 25262c520b1cSJose Abreu stmmac_clean_desc3(priv, tx_q, p); 2527f748be53SAlexandre TORGUE 2528ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = false; 2529ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].is_jumbo = false; 25307ac6653aSJeff Kirsher 2531be8b38a7SOng Boon Leong if (xdpf && 2532be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2533be8b38a7SOng Boon Leong xdp_return_frame_rx_napi(xdpf); 2534be8b38a7SOng Boon Leong tx_q->xdpf[entry] = NULL; 2535be8b38a7SOng Boon Leong } 2536be8b38a7SOng Boon Leong 25378b278a5bSOng Boon Leong if (xdpf && 25388b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 25398b278a5bSOng Boon Leong xdp_return_frame(xdpf); 25408b278a5bSOng Boon Leong tx_q->xdpf[entry] = NULL; 25418b278a5bSOng Boon Leong } 25428b278a5bSOng Boon Leong 2543132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2544132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 2545132c32eeSOng Boon Leong 2546be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2547be8b38a7SOng Boon Leong if (likely(skb)) { 254838979574SBeniamino Galvani pkts_compl++; 254938979574SBeniamino Galvani bytes_compl += skb->len; 25507c565c33SEric W. Biederman dev_consume_skb_any(skb); 2551ce736788SJoao Pinto tx_q->tx_skbuff[entry] = NULL; 25527ac6653aSJeff Kirsher } 2553be8b38a7SOng Boon Leong } 25547ac6653aSJeff Kirsher 255542de047dSJose Abreu stmmac_release_tx_desc(priv, p, priv->mode); 25567ac6653aSJeff Kirsher 2557aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 25587ac6653aSJeff Kirsher } 2559ce736788SJoao Pinto tx_q->dirty_tx = entry; 256038979574SBeniamino Galvani 2561c22a3f48SJoao Pinto netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2562c22a3f48SJoao Pinto pkts_compl, bytes_compl); 256338979574SBeniamino Galvani 2564c22a3f48SJoao Pinto if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2565c22a3f48SJoao Pinto queue))) && 2566aa042f60SSong, Yoong Siang stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2567c22a3f48SJoao Pinto 2568b3e51069SLABBE Corentin netif_dbg(priv, tx_done, priv->dev, 2569b3e51069SLABBE Corentin "%s: restart transmit\n", __func__); 2570c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 25717ac6653aSJeff Kirsher } 2572d765955dSGiuseppe CAVALLARO 2573132c32eeSOng Boon Leong if (tx_q->xsk_pool) { 2574132c32eeSOng Boon Leong bool work_done; 2575132c32eeSOng Boon Leong 2576132c32eeSOng Boon Leong if (tx_q->xsk_frames_done) 2577132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2578132c32eeSOng Boon Leong 2579132c32eeSOng Boon Leong if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2580132c32eeSOng Boon Leong xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2581132c32eeSOng Boon Leong 2582132c32eeSOng Boon Leong /* For XSK TX, we try to send as many as possible. 2583132c32eeSOng Boon Leong * If XSK work done (XSK TX desc empty and budget still 2584132c32eeSOng Boon Leong * available), return "budget - 1" to reenable TX IRQ. 2585132c32eeSOng Boon Leong * Else, return "budget" to make NAPI continue polling. 2586132c32eeSOng Boon Leong */ 2587132c32eeSOng Boon Leong work_done = stmmac_xdp_xmit_zc(priv, queue, 2588132c32eeSOng Boon Leong STMMAC_XSK_TX_BUDGET_MAX); 2589132c32eeSOng Boon Leong if (work_done) 2590132c32eeSOng Boon Leong xmits = budget - 1; 2591132c32eeSOng Boon Leong else 2592132c32eeSOng Boon Leong xmits = budget; 2593132c32eeSOng Boon Leong } 2594132c32eeSOng Boon Leong 2595be1c7eaeSVineetha G. Jaya Kumaran if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2596be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en) { 2597d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 2598388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2599d765955dSGiuseppe CAVALLARO } 26008fce3331SJose Abreu 26014ccb4585SJose Abreu /* We still have pending packets, let's call for a new scheduling */ 26024ccb4585SJose Abreu if (tx_q->dirty_tx != tx_q->cur_tx) 2603db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2604db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2605d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 26064ccb4585SJose Abreu 26078fce3331SJose Abreu __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 26088fce3331SJose Abreu 2609132c32eeSOng Boon Leong /* Combine decisions from TX clean and XSK TX */ 2610132c32eeSOng Boon Leong return max(count, xmits); 26117ac6653aSJeff Kirsher } 26127ac6653aSJeff Kirsher 26137ac6653aSJeff Kirsher /** 2614732fdf0eSGiuseppe CAVALLARO * stmmac_tx_err - to manage the tx error 261532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 26165bacd778SLABBE Corentin * @chan: channel index 26177ac6653aSJeff Kirsher * Description: it cleans the descriptors and restarts the transmission 2618732fdf0eSGiuseppe CAVALLARO * in case of transmission errors. 26197ac6653aSJeff Kirsher */ 26205bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 26217ac6653aSJeff Kirsher { 2622ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2623ce736788SJoao Pinto 2624c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 26257ac6653aSJeff Kirsher 2626ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2627ce736788SJoao Pinto dma_free_tx_skbufs(priv, chan); 2628579a25a8SJose Abreu stmmac_clear_tx_descriptors(priv, chan); 2629ce736788SJoao Pinto tx_q->dirty_tx = 0; 2630ce736788SJoao Pinto tx_q->cur_tx = 0; 26318d212a9eSNiklas Cassel tx_q->mss = 0; 2632c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2633f421031eSJongsung Kim stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2634f421031eSJongsung Kim tx_q->dma_tx_phy, chan); 2635ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 26367ac6653aSJeff Kirsher 26377ac6653aSJeff Kirsher priv->dev->stats.tx_errors++; 2638c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 26397ac6653aSJeff Kirsher } 26407ac6653aSJeff Kirsher 264132ceabcaSGiuseppe CAVALLARO /** 26426deee222SJoao Pinto * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 26436deee222SJoao Pinto * @priv: driver private structure 26446deee222SJoao Pinto * @txmode: TX operating mode 26456deee222SJoao Pinto * @rxmode: RX operating mode 26466deee222SJoao Pinto * @chan: channel index 26476deee222SJoao Pinto * Description: it is used for configuring of the DMA operation mode in 26486deee222SJoao Pinto * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 26496deee222SJoao Pinto * mode. 26506deee222SJoao Pinto */ 26516deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 26526deee222SJoao Pinto u32 rxmode, u32 chan) 26536deee222SJoao Pinto { 2654a0daae13SJose Abreu u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2655a0daae13SJose Abreu u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 265652a76235SJose Abreu u32 rx_channels_count = priv->plat->rx_queues_to_use; 265752a76235SJose Abreu u32 tx_channels_count = priv->plat->tx_queues_to_use; 26586deee222SJoao Pinto int rxfifosz = priv->plat->rx_fifo_size; 265952a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 26606deee222SJoao Pinto 26616deee222SJoao Pinto if (rxfifosz == 0) 26626deee222SJoao Pinto rxfifosz = priv->dma_cap.rx_fifo_size; 266352a76235SJose Abreu if (txfifosz == 0) 266452a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 266552a76235SJose Abreu 266652a76235SJose Abreu /* Adjust for real per queue fifo size */ 266752a76235SJose Abreu rxfifosz /= rx_channels_count; 266852a76235SJose Abreu txfifosz /= tx_channels_count; 26696deee222SJoao Pinto 2670ab0204e3SJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2671ab0204e3SJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 26726deee222SJoao Pinto } 26736deee222SJoao Pinto 26748bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 26758bf993a5SJose Abreu { 267663a550fcSJose Abreu int ret; 26778bf993a5SJose Abreu 2678c10d4c82SJose Abreu ret = stmmac_safety_feat_irq_status(priv, priv->dev, 26798bf993a5SJose Abreu priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2680c10d4c82SJose Abreu if (ret && (ret != -EINVAL)) { 26818bf993a5SJose Abreu stmmac_global_err(priv); 2682c10d4c82SJose Abreu return true; 2683c10d4c82SJose Abreu } 2684c10d4c82SJose Abreu 2685c10d4c82SJose Abreu return false; 26868bf993a5SJose Abreu } 26878bf993a5SJose Abreu 26887e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 26898fce3331SJose Abreu { 26908fce3331SJose Abreu int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 26917e1c520cSOng Boon Leong &priv->xstats, chan, dir); 2692132c32eeSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2693132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 26948fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[chan]; 2695132c32eeSOng Boon Leong struct napi_struct *rx_napi; 2696132c32eeSOng Boon Leong struct napi_struct *tx_napi; 2697021bd5e3SJose Abreu unsigned long flags; 26988fce3331SJose Abreu 2699132c32eeSOng Boon Leong rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2700132c32eeSOng Boon Leong tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2701132c32eeSOng Boon Leong 27024ccb4585SJose Abreu if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2703132c32eeSOng Boon Leong if (napi_schedule_prep(rx_napi)) { 2704021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2705021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2706021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2707132c32eeSOng Boon Leong __napi_schedule(rx_napi); 27083ba07debSJose Abreu } 27094ccb4585SJose Abreu } 27104ccb4585SJose Abreu 2711021bd5e3SJose Abreu if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2712132c32eeSOng Boon Leong if (napi_schedule_prep(tx_napi)) { 2713021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2714021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2715021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2716132c32eeSOng Boon Leong __napi_schedule(tx_napi); 2717021bd5e3SJose Abreu } 2718021bd5e3SJose Abreu } 27198fce3331SJose Abreu 27208fce3331SJose Abreu return status; 27218fce3331SJose Abreu } 27228fce3331SJose Abreu 27236deee222SJoao Pinto /** 2724732fdf0eSGiuseppe CAVALLARO * stmmac_dma_interrupt - DMA ISR 272532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 272632ceabcaSGiuseppe CAVALLARO * Description: this is the DMA ISR. It is called by the main ISR. 2727732fdf0eSGiuseppe CAVALLARO * It calls the dwmac dma routine and schedule poll method in case of some 2728732fdf0eSGiuseppe CAVALLARO * work can be done. 272932ceabcaSGiuseppe CAVALLARO */ 27307ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv) 27317ac6653aSJeff Kirsher { 2732d62a107aSJoao Pinto u32 tx_channel_count = priv->plat->tx_queues_to_use; 27335a6a0445SNiklas Cassel u32 rx_channel_count = priv->plat->rx_queues_to_use; 27345a6a0445SNiklas Cassel u32 channels_to_check = tx_channel_count > rx_channel_count ? 27355a6a0445SNiklas Cassel tx_channel_count : rx_channel_count; 2736d62a107aSJoao Pinto u32 chan; 27378ac60ffbSKees Cook int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 27388ac60ffbSKees Cook 27398ac60ffbSKees Cook /* Make sure we never check beyond our status buffer. */ 27408ac60ffbSKees Cook if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 27418ac60ffbSKees Cook channels_to_check = ARRAY_SIZE(status); 274268e5cfafSJoao Pinto 27435a6a0445SNiklas Cassel for (chan = 0; chan < channels_to_check; chan++) 27447e1c520cSOng Boon Leong status[chan] = stmmac_napi_check(priv, chan, 27457e1c520cSOng Boon Leong DMA_DIR_RXTX); 2746d62a107aSJoao Pinto 27475a6a0445SNiklas Cassel for (chan = 0; chan < tx_channel_count; chan++) { 27485a6a0445SNiklas Cassel if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 27497ac6653aSJeff Kirsher /* Try to bump up the dma threshold on this failure */ 2750b2dec116SSonic Zhang if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2751b2dec116SSonic Zhang (tc <= 256)) { 27527ac6653aSJeff Kirsher tc += 64; 2753c405abe2SSonic Zhang if (priv->plat->force_thresh_dma_mode) 2754d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2755d62a107aSJoao Pinto tc, 2756d62a107aSJoao Pinto tc, 2757d62a107aSJoao Pinto chan); 2758c405abe2SSonic Zhang else 2759d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2760d62a107aSJoao Pinto tc, 2761d62a107aSJoao Pinto SF_DMA_MODE, 2762d62a107aSJoao Pinto chan); 27637ac6653aSJeff Kirsher priv->xstats.threshold = tc; 27647ac6653aSJeff Kirsher } 27655a6a0445SNiklas Cassel } else if (unlikely(status[chan] == tx_hard_error)) { 27664e593262SJoao Pinto stmmac_tx_err(priv, chan); 27677ac6653aSJeff Kirsher } 2768d62a107aSJoao Pinto } 2769d62a107aSJoao Pinto } 27707ac6653aSJeff Kirsher 277132ceabcaSGiuseppe CAVALLARO /** 277232ceabcaSGiuseppe CAVALLARO * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 277332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 277432ceabcaSGiuseppe CAVALLARO * Description: this masks the MMC irq, in fact, the counters are managed in SW. 277532ceabcaSGiuseppe CAVALLARO */ 27761c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv) 27771c901a46SGiuseppe CAVALLARO { 27781c901a46SGiuseppe CAVALLARO unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 27791c901a46SGiuseppe CAVALLARO MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 27801c901a46SGiuseppe CAVALLARO 27813b1dd2c5SJose Abreu stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 27824f795b25SGiuseppe CAVALLARO 27834f795b25SGiuseppe CAVALLARO if (priv->dma_cap.rmon) { 27843b1dd2c5SJose Abreu stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 27851c901a46SGiuseppe CAVALLARO memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 27864f795b25SGiuseppe CAVALLARO } else 278738ddc59dSLABBE Corentin netdev_info(priv->dev, "No MAC Management Counters available\n"); 27881c901a46SGiuseppe CAVALLARO } 27891c901a46SGiuseppe CAVALLARO 2790732fdf0eSGiuseppe CAVALLARO /** 2791732fdf0eSGiuseppe CAVALLARO * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 279232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 279319e30c14SGiuseppe CAVALLARO * Description: 279419e30c14SGiuseppe CAVALLARO * new GMAC chip generations have a new register to indicate the 2795e7434821SGiuseppe CAVALLARO * presence of the optional feature/functions. 279619e30c14SGiuseppe CAVALLARO * This can be also used to override the value passed through the 279719e30c14SGiuseppe CAVALLARO * platform and necessary for old MAC10/100 and GMAC chips. 2798e7434821SGiuseppe CAVALLARO */ 2799e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv) 2800e7434821SGiuseppe CAVALLARO { 2801a4e887faSJose Abreu return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2802e7434821SGiuseppe CAVALLARO } 2803e7434821SGiuseppe CAVALLARO 280432ceabcaSGiuseppe CAVALLARO /** 2805732fdf0eSGiuseppe CAVALLARO * stmmac_check_ether_addr - check if the MAC addr is valid 280632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 280732ceabcaSGiuseppe CAVALLARO * Description: 280832ceabcaSGiuseppe CAVALLARO * it is to verify if the MAC address is valid, in case of failures it 280932ceabcaSGiuseppe CAVALLARO * generates a random MAC address 281032ceabcaSGiuseppe CAVALLARO */ 2811bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2812bfab27a1SGiuseppe CAVALLARO { 2813bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2814c10d4c82SJose Abreu stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2815bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) 2816f2cedb63SDanny Kukawka eth_hw_addr_random(priv->dev); 2817af649352SJisheng Zhang dev_info(priv->device, "device MAC address %pM\n", 2818bfab27a1SGiuseppe CAVALLARO priv->dev->dev_addr); 2819bfab27a1SGiuseppe CAVALLARO } 2820c88460b7SHans de Goede } 2821bfab27a1SGiuseppe CAVALLARO 282232ceabcaSGiuseppe CAVALLARO /** 2823732fdf0eSGiuseppe CAVALLARO * stmmac_init_dma_engine - DMA init. 282432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 282532ceabcaSGiuseppe CAVALLARO * Description: 282632ceabcaSGiuseppe CAVALLARO * It inits the DMA invoking the specific MAC/GMAC callback. 282732ceabcaSGiuseppe CAVALLARO * Some DMA parameters can be passed from the platform; 282832ceabcaSGiuseppe CAVALLARO * in case of these are not passed a default is kept for the MAC or GMAC. 282932ceabcaSGiuseppe CAVALLARO */ 28300f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv) 28310f1f88a8SGiuseppe CAVALLARO { 283247f2a9ceSJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 283347f2a9ceSJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 283424aaed0cSJose Abreu u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 283554139cf3SJoao Pinto struct stmmac_rx_queue *rx_q; 2836ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 283747f2a9ceSJoao Pinto u32 chan = 0; 2838c24602efSGiuseppe CAVALLARO int atds = 0; 2839495db273SGiuseppe Cavallaro int ret = 0; 28400f1f88a8SGiuseppe CAVALLARO 2841a332e2faSNiklas Cassel if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2842a332e2faSNiklas Cassel dev_err(priv->device, "Invalid DMA configuration\n"); 284389ab75bfSNiklas Cassel return -EINVAL; 28440f1f88a8SGiuseppe CAVALLARO } 28450f1f88a8SGiuseppe CAVALLARO 2846c24602efSGiuseppe CAVALLARO if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2847c24602efSGiuseppe CAVALLARO atds = 1; 2848c24602efSGiuseppe CAVALLARO 2849a4e887faSJose Abreu ret = stmmac_reset(priv, priv->ioaddr); 2850495db273SGiuseppe Cavallaro if (ret) { 2851495db273SGiuseppe Cavallaro dev_err(priv->device, "Failed to reset the dma\n"); 2852495db273SGiuseppe Cavallaro return ret; 2853495db273SGiuseppe Cavallaro } 2854495db273SGiuseppe Cavallaro 28557d9e6c5aSJose Abreu /* DMA Configuration */ 28567d9e6c5aSJose Abreu stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 28577d9e6c5aSJose Abreu 28587d9e6c5aSJose Abreu if (priv->plat->axi) 28597d9e6c5aSJose Abreu stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 28607d9e6c5aSJose Abreu 2861af8f3fb7SWeifeng Voon /* DMA CSR Channel configuration */ 2862af8f3fb7SWeifeng Voon for (chan = 0; chan < dma_csr_ch; chan++) 2863af8f3fb7SWeifeng Voon stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2864af8f3fb7SWeifeng Voon 286547f2a9ceSJoao Pinto /* DMA RX Channel Configuration */ 286647f2a9ceSJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) { 286754139cf3SJoao Pinto rx_q = &priv->rx_queue[chan]; 286854139cf3SJoao Pinto 286924aaed0cSJose Abreu stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 287024aaed0cSJose Abreu rx_q->dma_rx_phy, chan); 287147f2a9ceSJoao Pinto 287254139cf3SJoao Pinto rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2873bba2556eSOng Boon Leong (rx_q->buf_alloc_num * 2874aa042f60SSong, Yoong Siang sizeof(struct dma_desc)); 2875a4e887faSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2876a4e887faSJose Abreu rx_q->rx_tail_addr, chan); 287747f2a9ceSJoao Pinto } 287847f2a9ceSJoao Pinto 287947f2a9ceSJoao Pinto /* DMA TX Channel Configuration */ 288047f2a9ceSJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) { 2881ce736788SJoao Pinto tx_q = &priv->tx_queue[chan]; 2882ce736788SJoao Pinto 288324aaed0cSJose Abreu stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 288424aaed0cSJose Abreu tx_q->dma_tx_phy, chan); 2885f748be53SAlexandre TORGUE 28860431100bSJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2887a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2888a4e887faSJose Abreu tx_q->tx_tail_addr, chan); 288947f2a9ceSJoao Pinto } 289024aaed0cSJose Abreu 2891495db273SGiuseppe Cavallaro return ret; 28920f1f88a8SGiuseppe CAVALLARO } 28930f1f88a8SGiuseppe CAVALLARO 28948fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 28958fce3331SJose Abreu { 28968fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 28978fce3331SJose Abreu 2898db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2899db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2900d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 29018fce3331SJose Abreu } 29028fce3331SJose Abreu 2903bfab27a1SGiuseppe CAVALLARO /** 2904732fdf0eSGiuseppe CAVALLARO * stmmac_tx_timer - mitigation sw timer for tx. 2905d0ea5cbdSJesse Brandeburg * @t: data pointer 29069125cdd1SGiuseppe CAVALLARO * Description: 29079125cdd1SGiuseppe CAVALLARO * This is the timer handler to directly invoke the stmmac_tx_clean. 29089125cdd1SGiuseppe CAVALLARO */ 2909d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 29109125cdd1SGiuseppe CAVALLARO { 2911d5a05e69SVincent Whitchurch struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 29128fce3331SJose Abreu struct stmmac_priv *priv = tx_q->priv_data; 29138fce3331SJose Abreu struct stmmac_channel *ch; 2914132c32eeSOng Boon Leong struct napi_struct *napi; 29159125cdd1SGiuseppe CAVALLARO 29168fce3331SJose Abreu ch = &priv->channel[tx_q->queue_index]; 2917132c32eeSOng Boon Leong napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 29188fce3331SJose Abreu 2919132c32eeSOng Boon Leong if (likely(napi_schedule_prep(napi))) { 2920021bd5e3SJose Abreu unsigned long flags; 2921021bd5e3SJose Abreu 2922021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2923021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2924021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2925132c32eeSOng Boon Leong __napi_schedule(napi); 2926021bd5e3SJose Abreu } 2927d5a05e69SVincent Whitchurch 2928d5a05e69SVincent Whitchurch return HRTIMER_NORESTART; 29299125cdd1SGiuseppe CAVALLARO } 29309125cdd1SGiuseppe CAVALLARO 29319125cdd1SGiuseppe CAVALLARO /** 2932d429b66eSJose Abreu * stmmac_init_coalesce - init mitigation options. 293332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29349125cdd1SGiuseppe CAVALLARO * Description: 2935d429b66eSJose Abreu * This inits the coalesce parameters: i.e. timer rate, 29369125cdd1SGiuseppe CAVALLARO * timer handler and default threshold used for enabling the 29379125cdd1SGiuseppe CAVALLARO * interrupt on completion bit. 29389125cdd1SGiuseppe CAVALLARO */ 2939d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv) 29409125cdd1SGiuseppe CAVALLARO { 29418fce3331SJose Abreu u32 tx_channel_count = priv->plat->tx_queues_to_use; 2942db2f2842SOng Boon Leong u32 rx_channel_count = priv->plat->rx_queues_to_use; 29438fce3331SJose Abreu u32 chan; 29448fce3331SJose Abreu 29458fce3331SJose Abreu for (chan = 0; chan < tx_channel_count; chan++) { 29468fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 29478fce3331SJose Abreu 2948db2f2842SOng Boon Leong priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 2949db2f2842SOng Boon Leong priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 2950db2f2842SOng Boon Leong 2951d5a05e69SVincent Whitchurch hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2952d5a05e69SVincent Whitchurch tx_q->txtimer.function = stmmac_tx_timer; 29538fce3331SJose Abreu } 2954db2f2842SOng Boon Leong 2955db2f2842SOng Boon Leong for (chan = 0; chan < rx_channel_count; chan++) 2956db2f2842SOng Boon Leong priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 29579125cdd1SGiuseppe CAVALLARO } 29589125cdd1SGiuseppe CAVALLARO 29594854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv) 29604854ab99SJoao Pinto { 29614854ab99SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 29624854ab99SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 29634854ab99SJoao Pinto u32 chan; 29644854ab99SJoao Pinto 29654854ab99SJoao Pinto /* set TX ring length */ 29664854ab99SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2967a4e887faSJose Abreu stmmac_set_tx_ring_len(priv, priv->ioaddr, 2968aa042f60SSong, Yoong Siang (priv->dma_tx_size - 1), chan); 29694854ab99SJoao Pinto 29704854ab99SJoao Pinto /* set RX ring length */ 29714854ab99SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2972a4e887faSJose Abreu stmmac_set_rx_ring_len(priv, priv->ioaddr, 2973aa042f60SSong, Yoong Siang (priv->dma_rx_size - 1), chan); 29744854ab99SJoao Pinto } 29754854ab99SJoao Pinto 29769125cdd1SGiuseppe CAVALLARO /** 29776a3a7193SJoao Pinto * stmmac_set_tx_queue_weight - Set TX queue weight 29786a3a7193SJoao Pinto * @priv: driver private structure 29796a3a7193SJoao Pinto * Description: It is used for setting TX queues weight 29806a3a7193SJoao Pinto */ 29816a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 29826a3a7193SJoao Pinto { 29836a3a7193SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 29846a3a7193SJoao Pinto u32 weight; 29856a3a7193SJoao Pinto u32 queue; 29866a3a7193SJoao Pinto 29876a3a7193SJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 29886a3a7193SJoao Pinto weight = priv->plat->tx_queues_cfg[queue].weight; 2989c10d4c82SJose Abreu stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 29906a3a7193SJoao Pinto } 29916a3a7193SJoao Pinto } 29926a3a7193SJoao Pinto 29936a3a7193SJoao Pinto /** 299419d91873SJoao Pinto * stmmac_configure_cbs - Configure CBS in TX queue 299519d91873SJoao Pinto * @priv: driver private structure 299619d91873SJoao Pinto * Description: It is used for configuring CBS in AVB TX queues 299719d91873SJoao Pinto */ 299819d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv) 299919d91873SJoao Pinto { 300019d91873SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 300119d91873SJoao Pinto u32 mode_to_use; 300219d91873SJoao Pinto u32 queue; 300319d91873SJoao Pinto 300444781fefSJoao Pinto /* queue 0 is reserved for legacy traffic */ 300544781fefSJoao Pinto for (queue = 1; queue < tx_queues_count; queue++) { 300619d91873SJoao Pinto mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 300719d91873SJoao Pinto if (mode_to_use == MTL_QUEUE_DCB) 300819d91873SJoao Pinto continue; 300919d91873SJoao Pinto 3010c10d4c82SJose Abreu stmmac_config_cbs(priv, priv->hw, 301119d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].send_slope, 301219d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].idle_slope, 301319d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].high_credit, 301419d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].low_credit, 301519d91873SJoao Pinto queue); 301619d91873SJoao Pinto } 301719d91873SJoao Pinto } 301819d91873SJoao Pinto 301919d91873SJoao Pinto /** 3020d43042f4SJoao Pinto * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3021d43042f4SJoao Pinto * @priv: driver private structure 3022d43042f4SJoao Pinto * Description: It is used for mapping RX queues to RX dma channels 3023d43042f4SJoao Pinto */ 3024d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3025d43042f4SJoao Pinto { 3026d43042f4SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3027d43042f4SJoao Pinto u32 queue; 3028d43042f4SJoao Pinto u32 chan; 3029d43042f4SJoao Pinto 3030d43042f4SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3031d43042f4SJoao Pinto chan = priv->plat->rx_queues_cfg[queue].chan; 3032c10d4c82SJose Abreu stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3033d43042f4SJoao Pinto } 3034d43042f4SJoao Pinto } 3035d43042f4SJoao Pinto 3036d43042f4SJoao Pinto /** 3037a8f5102aSJoao Pinto * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3038a8f5102aSJoao Pinto * @priv: driver private structure 3039a8f5102aSJoao Pinto * Description: It is used for configuring the RX Queue Priority 3040a8f5102aSJoao Pinto */ 3041a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3042a8f5102aSJoao Pinto { 3043a8f5102aSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3044a8f5102aSJoao Pinto u32 queue; 3045a8f5102aSJoao Pinto u32 prio; 3046a8f5102aSJoao Pinto 3047a8f5102aSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3048a8f5102aSJoao Pinto if (!priv->plat->rx_queues_cfg[queue].use_prio) 3049a8f5102aSJoao Pinto continue; 3050a8f5102aSJoao Pinto 3051a8f5102aSJoao Pinto prio = priv->plat->rx_queues_cfg[queue].prio; 3052c10d4c82SJose Abreu stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3053a8f5102aSJoao Pinto } 3054a8f5102aSJoao Pinto } 3055a8f5102aSJoao Pinto 3056a8f5102aSJoao Pinto /** 3057a8f5102aSJoao Pinto * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3058a8f5102aSJoao Pinto * @priv: driver private structure 3059a8f5102aSJoao Pinto * Description: It is used for configuring the TX Queue Priority 3060a8f5102aSJoao Pinto */ 3061a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3062a8f5102aSJoao Pinto { 3063a8f5102aSJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3064a8f5102aSJoao Pinto u32 queue; 3065a8f5102aSJoao Pinto u32 prio; 3066a8f5102aSJoao Pinto 3067a8f5102aSJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 3068a8f5102aSJoao Pinto if (!priv->plat->tx_queues_cfg[queue].use_prio) 3069a8f5102aSJoao Pinto continue; 3070a8f5102aSJoao Pinto 3071a8f5102aSJoao Pinto prio = priv->plat->tx_queues_cfg[queue].prio; 3072c10d4c82SJose Abreu stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3073a8f5102aSJoao Pinto } 3074a8f5102aSJoao Pinto } 3075a8f5102aSJoao Pinto 3076a8f5102aSJoao Pinto /** 3077abe80fdcSJoao Pinto * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3078abe80fdcSJoao Pinto * @priv: driver private structure 3079abe80fdcSJoao Pinto * Description: It is used for configuring the RX queue routing 3080abe80fdcSJoao Pinto */ 3081abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3082abe80fdcSJoao Pinto { 3083abe80fdcSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3084abe80fdcSJoao Pinto u32 queue; 3085abe80fdcSJoao Pinto u8 packet; 3086abe80fdcSJoao Pinto 3087abe80fdcSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3088abe80fdcSJoao Pinto /* no specific packet type routing specified for the queue */ 3089abe80fdcSJoao Pinto if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3090abe80fdcSJoao Pinto continue; 3091abe80fdcSJoao Pinto 3092abe80fdcSJoao Pinto packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3093c10d4c82SJose Abreu stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3094abe80fdcSJoao Pinto } 3095abe80fdcSJoao Pinto } 3096abe80fdcSJoao Pinto 309776067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv) 309876067459SJose Abreu { 309976067459SJose Abreu if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 310076067459SJose Abreu priv->rss.enable = false; 310176067459SJose Abreu return; 310276067459SJose Abreu } 310376067459SJose Abreu 310476067459SJose Abreu if (priv->dev->features & NETIF_F_RXHASH) 310576067459SJose Abreu priv->rss.enable = true; 310676067459SJose Abreu else 310776067459SJose Abreu priv->rss.enable = false; 310876067459SJose Abreu 310976067459SJose Abreu stmmac_rss_configure(priv, priv->hw, &priv->rss, 311076067459SJose Abreu priv->plat->rx_queues_to_use); 311176067459SJose Abreu } 311276067459SJose Abreu 3113abe80fdcSJoao Pinto /** 3114d0a9c9f9SJoao Pinto * stmmac_mtl_configuration - Configure MTL 3115d0a9c9f9SJoao Pinto * @priv: driver private structure 3116d0a9c9f9SJoao Pinto * Description: It is used for configurring MTL 3117d0a9c9f9SJoao Pinto */ 3118d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3119d0a9c9f9SJoao Pinto { 3120d0a9c9f9SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3121d0a9c9f9SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3122d0a9c9f9SJoao Pinto 3123c10d4c82SJose Abreu if (tx_queues_count > 1) 31246a3a7193SJoao Pinto stmmac_set_tx_queue_weight(priv); 31256a3a7193SJoao Pinto 3126d0a9c9f9SJoao Pinto /* Configure MTL RX algorithms */ 3127c10d4c82SJose Abreu if (rx_queues_count > 1) 3128c10d4c82SJose Abreu stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3129d0a9c9f9SJoao Pinto priv->plat->rx_sched_algorithm); 3130d0a9c9f9SJoao Pinto 3131d0a9c9f9SJoao Pinto /* Configure MTL TX algorithms */ 3132c10d4c82SJose Abreu if (tx_queues_count > 1) 3133c10d4c82SJose Abreu stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3134d0a9c9f9SJoao Pinto priv->plat->tx_sched_algorithm); 3135d0a9c9f9SJoao Pinto 313619d91873SJoao Pinto /* Configure CBS in AVB TX queues */ 3137c10d4c82SJose Abreu if (tx_queues_count > 1) 313819d91873SJoao Pinto stmmac_configure_cbs(priv); 313919d91873SJoao Pinto 3140d43042f4SJoao Pinto /* Map RX MTL to DMA channels */ 3141d43042f4SJoao Pinto stmmac_rx_queue_dma_chan_map(priv); 3142d43042f4SJoao Pinto 3143d0a9c9f9SJoao Pinto /* Enable MAC RX Queues */ 3144d0a9c9f9SJoao Pinto stmmac_mac_enable_rx_queues(priv); 31456deee222SJoao Pinto 3146a8f5102aSJoao Pinto /* Set RX priorities */ 3147c10d4c82SJose Abreu if (rx_queues_count > 1) 3148a8f5102aSJoao Pinto stmmac_mac_config_rx_queues_prio(priv); 3149a8f5102aSJoao Pinto 3150a8f5102aSJoao Pinto /* Set TX priorities */ 3151c10d4c82SJose Abreu if (tx_queues_count > 1) 3152a8f5102aSJoao Pinto stmmac_mac_config_tx_queues_prio(priv); 3153abe80fdcSJoao Pinto 3154abe80fdcSJoao Pinto /* Set RX routing */ 3155c10d4c82SJose Abreu if (rx_queues_count > 1) 3156abe80fdcSJoao Pinto stmmac_mac_config_rx_queues_routing(priv); 315776067459SJose Abreu 315876067459SJose Abreu /* Receive Side Scaling */ 315976067459SJose Abreu if (rx_queues_count > 1) 316076067459SJose Abreu stmmac_mac_config_rss(priv); 3161d0a9c9f9SJoao Pinto } 3162d0a9c9f9SJoao Pinto 31638bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 31648bf993a5SJose Abreu { 3165c10d4c82SJose Abreu if (priv->dma_cap.asp) { 31668bf993a5SJose Abreu netdev_info(priv->dev, "Enabling Safety Features\n"); 31675ac712dcSWong Vee Khee stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 31685ac712dcSWong Vee Khee priv->plat->safety_feat_cfg); 31698bf993a5SJose Abreu } else { 31708bf993a5SJose Abreu netdev_info(priv->dev, "No Safety Features support found\n"); 31718bf993a5SJose Abreu } 31728bf993a5SJose Abreu } 31738bf993a5SJose Abreu 31745a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 31755a558611SOng Boon Leong { 31765a558611SOng Boon Leong char *name; 31775a558611SOng Boon Leong 31785a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3179db7c691dSMohammad Athari Bin Ismail clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 31805a558611SOng Boon Leong 31815a558611SOng Boon Leong name = priv->wq_name; 31825a558611SOng Boon Leong sprintf(name, "%s-fpe", priv->dev->name); 31835a558611SOng Boon Leong 31845a558611SOng Boon Leong priv->fpe_wq = create_singlethread_workqueue(name); 31855a558611SOng Boon Leong if (!priv->fpe_wq) { 31865a558611SOng Boon Leong netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 31875a558611SOng Boon Leong 31885a558611SOng Boon Leong return -ENOMEM; 31895a558611SOng Boon Leong } 31905a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue start"); 31915a558611SOng Boon Leong 31925a558611SOng Boon Leong return 0; 31935a558611SOng Boon Leong } 31945a558611SOng Boon Leong 3195d0a9c9f9SJoao Pinto /** 3196732fdf0eSGiuseppe CAVALLARO * stmmac_hw_setup - setup mac in a usable state. 3197523f11b5SSrinivas Kandagatla * @dev : pointer to the device structure. 3198d0ea5cbdSJesse Brandeburg * @init_ptp: initialize PTP if set 3199523f11b5SSrinivas Kandagatla * Description: 3200732fdf0eSGiuseppe CAVALLARO * this is the main function to setup the HW in a usable state because the 3201732fdf0eSGiuseppe CAVALLARO * dma engine is reset, the core registers are configured (e.g. AXI, 3202732fdf0eSGiuseppe CAVALLARO * Checksum features, timers). The DMA is ready to start receiving and 3203732fdf0eSGiuseppe CAVALLARO * transmitting. 3204523f11b5SSrinivas Kandagatla * Return value: 3205523f11b5SSrinivas Kandagatla * 0 on success and an appropriate (-)ve integer as defined in errno.h 3206523f11b5SSrinivas Kandagatla * file on failure. 3207523f11b5SSrinivas Kandagatla */ 3208fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 3209523f11b5SSrinivas Kandagatla { 3210523f11b5SSrinivas Kandagatla struct stmmac_priv *priv = netdev_priv(dev); 32113c55d4d0SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 3212146617b8SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 3213d08d32d1SOng Boon Leong bool sph_en; 3214146617b8SJoao Pinto u32 chan; 3215523f11b5SSrinivas Kandagatla int ret; 3216523f11b5SSrinivas Kandagatla 3217523f11b5SSrinivas Kandagatla /* DMA initialization and SW reset */ 3218523f11b5SSrinivas Kandagatla ret = stmmac_init_dma_engine(priv); 3219523f11b5SSrinivas Kandagatla if (ret < 0) { 322038ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 322138ddc59dSLABBE Corentin __func__); 3222523f11b5SSrinivas Kandagatla return ret; 3223523f11b5SSrinivas Kandagatla } 3224523f11b5SSrinivas Kandagatla 3225523f11b5SSrinivas Kandagatla /* Copy the MAC addr into the HW */ 3226c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3227523f11b5SSrinivas Kandagatla 322802e57b9dSGiuseppe CAVALLARO /* PS and related bits will be programmed according to the speed */ 322902e57b9dSGiuseppe CAVALLARO if (priv->hw->pcs) { 323002e57b9dSGiuseppe CAVALLARO int speed = priv->plat->mac_port_sel_speed; 323102e57b9dSGiuseppe CAVALLARO 323202e57b9dSGiuseppe CAVALLARO if ((speed == SPEED_10) || (speed == SPEED_100) || 323302e57b9dSGiuseppe CAVALLARO (speed == SPEED_1000)) { 323402e57b9dSGiuseppe CAVALLARO priv->hw->ps = speed; 323502e57b9dSGiuseppe CAVALLARO } else { 323602e57b9dSGiuseppe CAVALLARO dev_warn(priv->device, "invalid port speed\n"); 323702e57b9dSGiuseppe CAVALLARO priv->hw->ps = 0; 323802e57b9dSGiuseppe CAVALLARO } 323902e57b9dSGiuseppe CAVALLARO } 324002e57b9dSGiuseppe CAVALLARO 3241523f11b5SSrinivas Kandagatla /* Initialize the MAC Core */ 3242c10d4c82SJose Abreu stmmac_core_init(priv, priv->hw, dev); 3243523f11b5SSrinivas Kandagatla 3244d0a9c9f9SJoao Pinto /* Initialize MTL*/ 3245d0a9c9f9SJoao Pinto stmmac_mtl_configuration(priv); 32469eb12474Sjpinto 32478bf993a5SJose Abreu /* Initialize Safety Features */ 32488bf993a5SJose Abreu stmmac_safety_feat_configuration(priv); 32498bf993a5SJose Abreu 3250c10d4c82SJose Abreu ret = stmmac_rx_ipc(priv, priv->hw); 3251978aded4SGiuseppe CAVALLARO if (!ret) { 325238ddc59dSLABBE Corentin netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3253978aded4SGiuseppe CAVALLARO priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3254d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 3255978aded4SGiuseppe CAVALLARO } 3256978aded4SGiuseppe CAVALLARO 3257523f11b5SSrinivas Kandagatla /* Enable the MAC Rx/Tx */ 3258c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 3259523f11b5SSrinivas Kandagatla 3260b4f0a661SJoao Pinto /* Set the HW DMA mode and the COE */ 3261b4f0a661SJoao Pinto stmmac_dma_operation_mode(priv); 3262b4f0a661SJoao Pinto 3263523f11b5SSrinivas Kandagatla stmmac_mmc_setup(priv); 3264523f11b5SSrinivas Kandagatla 3265fe131929SHuacai Chen if (init_ptp) { 32660ad2be79SThierry Reding ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 32670ad2be79SThierry Reding if (ret < 0) 32680ad2be79SThierry Reding netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 32690ad2be79SThierry Reding 3270523f11b5SSrinivas Kandagatla ret = stmmac_init_ptp(priv); 3271722eef28SHeiner Kallweit if (ret == -EOPNOTSUPP) 3272722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP not supported by HW\n"); 3273722eef28SHeiner Kallweit else if (ret) 3274722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP init failed\n"); 3275fe131929SHuacai Chen } 3276523f11b5SSrinivas Kandagatla 3277388e201dSVineetha G. Jaya Kumaran priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3278388e201dSVineetha G. Jaya Kumaran 3279388e201dSVineetha G. Jaya Kumaran /* Convert the timer from msec to usec */ 3280388e201dSVineetha G. Jaya Kumaran if (!priv->tx_lpi_timer) 3281388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_timer = eee_timer * 1000; 3282523f11b5SSrinivas Kandagatla 3283a4e887faSJose Abreu if (priv->use_riwt) { 3284db2f2842SOng Boon Leong u32 queue; 32854e4337ccSJose Abreu 3286db2f2842SOng Boon Leong for (queue = 0; queue < rx_cnt; queue++) { 3287db2f2842SOng Boon Leong if (!priv->rx_riwt[queue]) 3288db2f2842SOng Boon Leong priv->rx_riwt[queue] = DEF_DMA_RIWT; 3289db2f2842SOng Boon Leong 3290db2f2842SOng Boon Leong stmmac_rx_watchdog(priv, priv->ioaddr, 3291db2f2842SOng Boon Leong priv->rx_riwt[queue], queue); 3292db2f2842SOng Boon Leong } 3293523f11b5SSrinivas Kandagatla } 3294523f11b5SSrinivas Kandagatla 3295c10d4c82SJose Abreu if (priv->hw->pcs) 3296c9ad4c10SBen Dooks (Codethink) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3297523f11b5SSrinivas Kandagatla 32984854ab99SJoao Pinto /* set TX and RX rings length */ 32994854ab99SJoao Pinto stmmac_set_rings_length(priv); 33004854ab99SJoao Pinto 3301f748be53SAlexandre TORGUE /* Enable TSO */ 3302146617b8SJoao Pinto if (priv->tso) { 33035e6038b8SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 33045e6038b8SOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 33055e6038b8SOng Boon Leong 33065e6038b8SOng Boon Leong /* TSO and TBS cannot co-exist */ 33075e6038b8SOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 33085e6038b8SOng Boon Leong continue; 33095e6038b8SOng Boon Leong 3310a4e887faSJose Abreu stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3311146617b8SJoao Pinto } 33125e6038b8SOng Boon Leong } 3313f748be53SAlexandre TORGUE 331467afd6d1SJose Abreu /* Enable Split Header */ 3315d08d32d1SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 331667afd6d1SJose Abreu for (chan = 0; chan < rx_cnt; chan++) 3317d08d32d1SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3318d08d32d1SOng Boon Leong 331967afd6d1SJose Abreu 332030d93227SJose Abreu /* VLAN Tag Insertion */ 332130d93227SJose Abreu if (priv->dma_cap.vlins) 332230d93227SJose Abreu stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 332330d93227SJose Abreu 3324579a25a8SJose Abreu /* TBS */ 3325579a25a8SJose Abreu for (chan = 0; chan < tx_cnt; chan++) { 3326579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3327579a25a8SJose Abreu int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3328579a25a8SJose Abreu 3329579a25a8SJose Abreu stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3330579a25a8SJose Abreu } 3331579a25a8SJose Abreu 3332686cff3dSAashish Verma /* Configure real RX and TX queues */ 3333686cff3dSAashish Verma netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3334686cff3dSAashish Verma netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3335686cff3dSAashish Verma 33367d9e6c5aSJose Abreu /* Start the ball rolling... */ 33377d9e6c5aSJose Abreu stmmac_start_all_dma(priv); 33387d9e6c5aSJose Abreu 33395a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 33405a558611SOng Boon Leong stmmac_fpe_start_wq(priv); 33415a558611SOng Boon Leong 33425a558611SOng Boon Leong if (priv->plat->fpe_cfg->enable) 33435a558611SOng Boon Leong stmmac_fpe_handshake(priv, true); 33445a558611SOng Boon Leong } 33455a558611SOng Boon Leong 3346523f11b5SSrinivas Kandagatla return 0; 3347523f11b5SSrinivas Kandagatla } 3348523f11b5SSrinivas Kandagatla 3349c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev) 3350c66f6c37SThierry Reding { 3351c66f6c37SThierry Reding struct stmmac_priv *priv = netdev_priv(dev); 3352c66f6c37SThierry Reding 3353c66f6c37SThierry Reding clk_disable_unprepare(priv->plat->clk_ptp_ref); 3354c66f6c37SThierry Reding } 3355c66f6c37SThierry Reding 33568532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev, 33578532f613SOng Boon Leong enum request_irq_err irq_err, int irq_idx) 33588532f613SOng Boon Leong { 33598532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 33608532f613SOng Boon Leong int j; 33618532f613SOng Boon Leong 33628532f613SOng Boon Leong switch (irq_err) { 33638532f613SOng Boon Leong case REQ_IRQ_ERR_ALL: 33648532f613SOng Boon Leong irq_idx = priv->plat->tx_queues_to_use; 33658532f613SOng Boon Leong fallthrough; 33668532f613SOng Boon Leong case REQ_IRQ_ERR_TX: 33678532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 33688deec94cSOng Boon Leong if (priv->tx_irq[j] > 0) { 33698deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[j], NULL); 33708532f613SOng Boon Leong free_irq(priv->tx_irq[j], &priv->tx_queue[j]); 33718532f613SOng Boon Leong } 33728deec94cSOng Boon Leong } 33738532f613SOng Boon Leong irq_idx = priv->plat->rx_queues_to_use; 33748532f613SOng Boon Leong fallthrough; 33758532f613SOng Boon Leong case REQ_IRQ_ERR_RX: 33768532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 33778deec94cSOng Boon Leong if (priv->rx_irq[j] > 0) { 33788deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[j], NULL); 33798532f613SOng Boon Leong free_irq(priv->rx_irq[j], &priv->rx_queue[j]); 33808532f613SOng Boon Leong } 33818deec94cSOng Boon Leong } 33828532f613SOng Boon Leong 33838532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 33848532f613SOng Boon Leong free_irq(priv->sfty_ue_irq, dev); 33858532f613SOng Boon Leong fallthrough; 33868532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_UE: 33878532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 33888532f613SOng Boon Leong free_irq(priv->sfty_ce_irq, dev); 33898532f613SOng Boon Leong fallthrough; 33908532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_CE: 33918532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 33928532f613SOng Boon Leong free_irq(priv->lpi_irq, dev); 33938532f613SOng Boon Leong fallthrough; 33948532f613SOng Boon Leong case REQ_IRQ_ERR_LPI: 33958532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 33968532f613SOng Boon Leong free_irq(priv->wol_irq, dev); 33978532f613SOng Boon Leong fallthrough; 33988532f613SOng Boon Leong case REQ_IRQ_ERR_WOL: 33998532f613SOng Boon Leong free_irq(dev->irq, dev); 34008532f613SOng Boon Leong fallthrough; 34018532f613SOng Boon Leong case REQ_IRQ_ERR_MAC: 34028532f613SOng Boon Leong case REQ_IRQ_ERR_NO: 34038532f613SOng Boon Leong /* If MAC IRQ request error, no more IRQ to free */ 34048532f613SOng Boon Leong break; 34058532f613SOng Boon Leong } 34068532f613SOng Boon Leong } 34078532f613SOng Boon Leong 34088532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev) 34098532f613SOng Boon Leong { 34108532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34113e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 34128deec94cSOng Boon Leong cpumask_t cpu_mask; 34138532f613SOng Boon Leong int irq_idx = 0; 34148532f613SOng Boon Leong char *int_name; 34158532f613SOng Boon Leong int ret; 34168532f613SOng Boon Leong int i; 34178532f613SOng Boon Leong 34188532f613SOng Boon Leong /* For common interrupt */ 34198532f613SOng Boon Leong int_name = priv->int_name_mac; 34208532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "mac"); 34218532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_mac_interrupt, 34228532f613SOng Boon Leong 0, int_name, dev); 34238532f613SOng Boon Leong if (unlikely(ret < 0)) { 34248532f613SOng Boon Leong netdev_err(priv->dev, 34258532f613SOng Boon Leong "%s: alloc mac MSI %d (error: %d)\n", 34268532f613SOng Boon Leong __func__, dev->irq, ret); 34278532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 34288532f613SOng Boon Leong goto irq_error; 34298532f613SOng Boon Leong } 34308532f613SOng Boon Leong 34318532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 34328532f613SOng Boon Leong * is used for WoL 34338532f613SOng Boon Leong */ 34348532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 34358532f613SOng Boon Leong int_name = priv->int_name_wol; 34368532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "wol"); 34378532f613SOng Boon Leong ret = request_irq(priv->wol_irq, 34388532f613SOng Boon Leong stmmac_mac_interrupt, 34398532f613SOng Boon Leong 0, int_name, dev); 34408532f613SOng Boon Leong if (unlikely(ret < 0)) { 34418532f613SOng Boon Leong netdev_err(priv->dev, 34428532f613SOng Boon Leong "%s: alloc wol MSI %d (error: %d)\n", 34438532f613SOng Boon Leong __func__, priv->wol_irq, ret); 34448532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 34458532f613SOng Boon Leong goto irq_error; 34468532f613SOng Boon Leong } 34478532f613SOng Boon Leong } 34488532f613SOng Boon Leong 34498532f613SOng Boon Leong /* Request the LPI IRQ in case of another line 34508532f613SOng Boon Leong * is used for LPI 34518532f613SOng Boon Leong */ 34528532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 34538532f613SOng Boon Leong int_name = priv->int_name_lpi; 34548532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "lpi"); 34558532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, 34568532f613SOng Boon Leong stmmac_mac_interrupt, 34578532f613SOng Boon Leong 0, int_name, dev); 34588532f613SOng Boon Leong if (unlikely(ret < 0)) { 34598532f613SOng Boon Leong netdev_err(priv->dev, 34608532f613SOng Boon Leong "%s: alloc lpi MSI %d (error: %d)\n", 34618532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 34628532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 34638532f613SOng Boon Leong goto irq_error; 34648532f613SOng Boon Leong } 34658532f613SOng Boon Leong } 34668532f613SOng Boon Leong 34678532f613SOng Boon Leong /* Request the Safety Feature Correctible Error line in 34688532f613SOng Boon Leong * case of another line is used 34698532f613SOng Boon Leong */ 34708532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 34718532f613SOng Boon Leong int_name = priv->int_name_sfty_ce; 34728532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 34738532f613SOng Boon Leong ret = request_irq(priv->sfty_ce_irq, 34748532f613SOng Boon Leong stmmac_safety_interrupt, 34758532f613SOng Boon Leong 0, int_name, dev); 34768532f613SOng Boon Leong if (unlikely(ret < 0)) { 34778532f613SOng Boon Leong netdev_err(priv->dev, 34788532f613SOng Boon Leong "%s: alloc sfty ce MSI %d (error: %d)\n", 34798532f613SOng Boon Leong __func__, priv->sfty_ce_irq, ret); 34808532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_CE; 34818532f613SOng Boon Leong goto irq_error; 34828532f613SOng Boon Leong } 34838532f613SOng Boon Leong } 34848532f613SOng Boon Leong 34858532f613SOng Boon Leong /* Request the Safety Feature Uncorrectible Error line in 34868532f613SOng Boon Leong * case of another line is used 34878532f613SOng Boon Leong */ 34888532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 34898532f613SOng Boon Leong int_name = priv->int_name_sfty_ue; 34908532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 34918532f613SOng Boon Leong ret = request_irq(priv->sfty_ue_irq, 34928532f613SOng Boon Leong stmmac_safety_interrupt, 34938532f613SOng Boon Leong 0, int_name, dev); 34948532f613SOng Boon Leong if (unlikely(ret < 0)) { 34958532f613SOng Boon Leong netdev_err(priv->dev, 34968532f613SOng Boon Leong "%s: alloc sfty ue MSI %d (error: %d)\n", 34978532f613SOng Boon Leong __func__, priv->sfty_ue_irq, ret); 34988532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_UE; 34998532f613SOng Boon Leong goto irq_error; 35008532f613SOng Boon Leong } 35018532f613SOng Boon Leong } 35028532f613SOng Boon Leong 35038532f613SOng Boon Leong /* Request Rx MSI irq */ 35048532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 35058532f613SOng Boon Leong if (priv->rx_irq[i] == 0) 35068532f613SOng Boon Leong continue; 35078532f613SOng Boon Leong 35088532f613SOng Boon Leong int_name = priv->int_name_rx_irq[i]; 35098532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 35108532f613SOng Boon Leong ret = request_irq(priv->rx_irq[i], 35118532f613SOng Boon Leong stmmac_msi_intr_rx, 35128532f613SOng Boon Leong 0, int_name, &priv->rx_queue[i]); 35138532f613SOng Boon Leong if (unlikely(ret < 0)) { 35148532f613SOng Boon Leong netdev_err(priv->dev, 35158532f613SOng Boon Leong "%s: alloc rx-%d MSI %d (error: %d)\n", 35168532f613SOng Boon Leong __func__, i, priv->rx_irq[i], ret); 35178532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_RX; 35188532f613SOng Boon Leong irq_idx = i; 35198532f613SOng Boon Leong goto irq_error; 35208532f613SOng Boon Leong } 35218deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35228deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35238deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 35248532f613SOng Boon Leong } 35258532f613SOng Boon Leong 35268532f613SOng Boon Leong /* Request Tx MSI irq */ 35278532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 35288532f613SOng Boon Leong if (priv->tx_irq[i] == 0) 35298532f613SOng Boon Leong continue; 35308532f613SOng Boon Leong 35318532f613SOng Boon Leong int_name = priv->int_name_tx_irq[i]; 35328532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 35338532f613SOng Boon Leong ret = request_irq(priv->tx_irq[i], 35348532f613SOng Boon Leong stmmac_msi_intr_tx, 35358532f613SOng Boon Leong 0, int_name, &priv->tx_queue[i]); 35368532f613SOng Boon Leong if (unlikely(ret < 0)) { 35378532f613SOng Boon Leong netdev_err(priv->dev, 35388532f613SOng Boon Leong "%s: alloc tx-%d MSI %d (error: %d)\n", 35398532f613SOng Boon Leong __func__, i, priv->tx_irq[i], ret); 35408532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_TX; 35418532f613SOng Boon Leong irq_idx = i; 35428532f613SOng Boon Leong goto irq_error; 35438532f613SOng Boon Leong } 35448deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35458deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35468deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 35478532f613SOng Boon Leong } 35488532f613SOng Boon Leong 35498532f613SOng Boon Leong return 0; 35508532f613SOng Boon Leong 35518532f613SOng Boon Leong irq_error: 35528532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, irq_idx); 35538532f613SOng Boon Leong return ret; 35548532f613SOng Boon Leong } 35558532f613SOng Boon Leong 35568532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev) 35578532f613SOng Boon Leong { 35588532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 35593e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 35608532f613SOng Boon Leong int ret; 35618532f613SOng Boon Leong 35628532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_interrupt, 35638532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 35648532f613SOng Boon Leong if (unlikely(ret < 0)) { 35658532f613SOng Boon Leong netdev_err(priv->dev, 35668532f613SOng Boon Leong "%s: ERROR: allocating the IRQ %d (error: %d)\n", 35678532f613SOng Boon Leong __func__, dev->irq, ret); 35688532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 35693e6dc7b6SWong Vee Khee goto irq_error; 35708532f613SOng Boon Leong } 35718532f613SOng Boon Leong 35728532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 35738532f613SOng Boon Leong * is used for WoL 35748532f613SOng Boon Leong */ 35758532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 35768532f613SOng Boon Leong ret = request_irq(priv->wol_irq, stmmac_interrupt, 35778532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 35788532f613SOng Boon Leong if (unlikely(ret < 0)) { 35798532f613SOng Boon Leong netdev_err(priv->dev, 35808532f613SOng Boon Leong "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 35818532f613SOng Boon Leong __func__, priv->wol_irq, ret); 35828532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 35833e6dc7b6SWong Vee Khee goto irq_error; 35848532f613SOng Boon Leong } 35858532f613SOng Boon Leong } 35868532f613SOng Boon Leong 35878532f613SOng Boon Leong /* Request the IRQ lines */ 35888532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 35898532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, stmmac_interrupt, 35908532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 35918532f613SOng Boon Leong if (unlikely(ret < 0)) { 35928532f613SOng Boon Leong netdev_err(priv->dev, 35938532f613SOng Boon Leong "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 35948532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 35958532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 35968532f613SOng Boon Leong goto irq_error; 35978532f613SOng Boon Leong } 35988532f613SOng Boon Leong } 35998532f613SOng Boon Leong 36008532f613SOng Boon Leong return 0; 36018532f613SOng Boon Leong 36028532f613SOng Boon Leong irq_error: 36038532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, 0); 36048532f613SOng Boon Leong return ret; 36058532f613SOng Boon Leong } 36068532f613SOng Boon Leong 36078532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev) 36088532f613SOng Boon Leong { 36098532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36108532f613SOng Boon Leong int ret; 36118532f613SOng Boon Leong 36128532f613SOng Boon Leong /* Request the IRQ lines */ 36138532f613SOng Boon Leong if (priv->plat->multi_msi_en) 36148532f613SOng Boon Leong ret = stmmac_request_irq_multi_msi(dev); 36158532f613SOng Boon Leong else 36168532f613SOng Boon Leong ret = stmmac_request_irq_single(dev); 36178532f613SOng Boon Leong 36188532f613SOng Boon Leong return ret; 36198532f613SOng Boon Leong } 36208532f613SOng Boon Leong 3621523f11b5SSrinivas Kandagatla /** 36227ac6653aSJeff Kirsher * stmmac_open - open entry point of the driver 36237ac6653aSJeff Kirsher * @dev : pointer to the device structure. 36247ac6653aSJeff Kirsher * Description: 36257ac6653aSJeff Kirsher * This function is the open entry point of the driver. 36267ac6653aSJeff Kirsher * Return value: 36277ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 36287ac6653aSJeff Kirsher * file on failure. 36297ac6653aSJeff Kirsher */ 36305fabb012SOng Boon Leong int stmmac_open(struct net_device *dev) 36317ac6653aSJeff Kirsher { 36327ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 36339900074eSVladimir Oltean int mode = priv->plat->phy_interface; 36345d626c87SJose Abreu int bfsize = 0; 36358fce3331SJose Abreu u32 chan; 36367ac6653aSJeff Kirsher int ret; 36377ac6653aSJeff Kirsher 36385ec55823SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 36395ec55823SJoakim Zhang if (ret < 0) { 36405ec55823SJoakim Zhang pm_runtime_put_noidle(priv->device); 36415ec55823SJoakim Zhang return ret; 36425ec55823SJoakim Zhang } 36435ec55823SJoakim Zhang 3644a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 3645f213bbe8SJose Abreu priv->hw->pcs != STMMAC_PCS_RTBI && 36469900074eSVladimir Oltean (!priv->hw->xpcs || 364711059740SVladimir Oltean xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { 36487ac6653aSJeff Kirsher ret = stmmac_init_phy(dev); 3649e58bb43fSGiuseppe CAVALLARO if (ret) { 365038ddc59dSLABBE Corentin netdev_err(priv->dev, 365138ddc59dSLABBE Corentin "%s: Cannot attach to PHY (error: %d)\n", 3652e58bb43fSGiuseppe CAVALLARO __func__, ret); 36535ec55823SJoakim Zhang goto init_phy_error; 36547ac6653aSJeff Kirsher } 3655e58bb43fSGiuseppe CAVALLARO } 36567ac6653aSJeff Kirsher 3657523f11b5SSrinivas Kandagatla /* Extra statistics */ 3658523f11b5SSrinivas Kandagatla memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3659523f11b5SSrinivas Kandagatla priv->xstats.threshold = tc; 3660523f11b5SSrinivas Kandagatla 36615d626c87SJose Abreu bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 36625d626c87SJose Abreu if (bfsize < 0) 36635d626c87SJose Abreu bfsize = 0; 36645d626c87SJose Abreu 36655d626c87SJose Abreu if (bfsize < BUF_SIZE_16KiB) 36665d626c87SJose Abreu bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 36675d626c87SJose Abreu 36685d626c87SJose Abreu priv->dma_buf_sz = bfsize; 36695d626c87SJose Abreu buf_sz = bfsize; 36705d626c87SJose Abreu 367122ad3838SGiuseppe Cavallaro priv->rx_copybreak = STMMAC_RX_COPYBREAK; 367256329137SBartlomiej Zolnierkiewicz 3673aa042f60SSong, Yoong Siang if (!priv->dma_tx_size) 3674aa042f60SSong, Yoong Siang priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3675aa042f60SSong, Yoong Siang if (!priv->dma_rx_size) 3676aa042f60SSong, Yoong Siang priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3677aa042f60SSong, Yoong Siang 3678579a25a8SJose Abreu /* Earlier check for TBS */ 3679579a25a8SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3680579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3681579a25a8SJose Abreu int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3682579a25a8SJose Abreu 36835e6038b8SOng Boon Leong /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3684579a25a8SJose Abreu tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3685579a25a8SJose Abreu } 3686579a25a8SJose Abreu 36875bacd778SLABBE Corentin ret = alloc_dma_desc_resources(priv); 36885bacd778SLABBE Corentin if (ret < 0) { 36895bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 36905bacd778SLABBE Corentin __func__); 36915bacd778SLABBE Corentin goto dma_desc_error; 36925bacd778SLABBE Corentin } 36935bacd778SLABBE Corentin 36945bacd778SLABBE Corentin ret = init_dma_desc_rings(dev, GFP_KERNEL); 36955bacd778SLABBE Corentin if (ret < 0) { 36965bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 36975bacd778SLABBE Corentin __func__); 36985bacd778SLABBE Corentin goto init_error; 36995bacd778SLABBE Corentin } 37005bacd778SLABBE Corentin 3701fe131929SHuacai Chen ret = stmmac_hw_setup(dev, true); 370256329137SBartlomiej Zolnierkiewicz if (ret < 0) { 370338ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3704c9324d18SGiuseppe CAVALLARO goto init_error; 37057ac6653aSJeff Kirsher } 37067ac6653aSJeff Kirsher 3707d429b66eSJose Abreu stmmac_init_coalesce(priv); 3708777da230SGiuseppe CAVALLARO 370974371272SJose Abreu phylink_start(priv->phylink); 371077b28983SJisheng Zhang /* We may have called phylink_speed_down before */ 371177b28983SJisheng Zhang phylink_speed_up(priv->phylink); 37127ac6653aSJeff Kirsher 37138532f613SOng Boon Leong ret = stmmac_request_irq(dev); 37148532f613SOng Boon Leong if (ret) 37156c1e5abeSThierry Reding goto irq_error; 3716d765955dSGiuseppe CAVALLARO 3717c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 37189f19306dSOng Boon Leong netif_tx_start_all_queues(priv->dev); 37197ac6653aSJeff Kirsher 37207ac6653aSJeff Kirsher return 0; 37217ac6653aSJeff Kirsher 37226c1e5abeSThierry Reding irq_error: 372374371272SJose Abreu phylink_stop(priv->phylink); 37247a13f8f5SFrancesco Virlinzi 37258fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3726d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 37278fce3331SJose Abreu 3728c66f6c37SThierry Reding stmmac_hw_teardown(dev); 3729c9324d18SGiuseppe CAVALLARO init_error: 3730c9324d18SGiuseppe CAVALLARO free_dma_desc_resources(priv); 37315bacd778SLABBE Corentin dma_desc_error: 373274371272SJose Abreu phylink_disconnect_phy(priv->phylink); 37335ec55823SJoakim Zhang init_phy_error: 37345ec55823SJoakim Zhang pm_runtime_put(priv->device); 37357ac6653aSJeff Kirsher return ret; 37367ac6653aSJeff Kirsher } 37377ac6653aSJeff Kirsher 37385a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 37395a558611SOng Boon Leong { 37405a558611SOng Boon Leong set_bit(__FPE_REMOVING, &priv->fpe_task_state); 37415a558611SOng Boon Leong 37425a558611SOng Boon Leong if (priv->fpe_wq) 37435a558611SOng Boon Leong destroy_workqueue(priv->fpe_wq); 37445a558611SOng Boon Leong 37455a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue stop"); 37465a558611SOng Boon Leong } 37475a558611SOng Boon Leong 37487ac6653aSJeff Kirsher /** 37497ac6653aSJeff Kirsher * stmmac_release - close entry point of the driver 37507ac6653aSJeff Kirsher * @dev : device pointer. 37517ac6653aSJeff Kirsher * Description: 37527ac6653aSJeff Kirsher * This is the stop entry point of the driver. 37537ac6653aSJeff Kirsher */ 37545fabb012SOng Boon Leong int stmmac_release(struct net_device *dev) 37557ac6653aSJeff Kirsher { 37567ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 37578fce3331SJose Abreu u32 chan; 37587ac6653aSJeff Kirsher 375977b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 376077b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 37617ac6653aSJeff Kirsher /* Stop and disconnect the PHY */ 376274371272SJose Abreu phylink_stop(priv->phylink); 376374371272SJose Abreu phylink_disconnect_phy(priv->phylink); 37647ac6653aSJeff Kirsher 3765c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 37667ac6653aSJeff Kirsher 37678fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3768d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 37699125cdd1SGiuseppe CAVALLARO 37707ac6653aSJeff Kirsher /* Free the IRQ lines */ 37718532f613SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 37727ac6653aSJeff Kirsher 37735f585913SFugang Duan if (priv->eee_enabled) { 37745f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 37755f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 37765f585913SFugang Duan } 37775f585913SFugang Duan 37787ac6653aSJeff Kirsher /* Stop TX/RX DMA and clear the descriptors */ 3779ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 37807ac6653aSJeff Kirsher 37817ac6653aSJeff Kirsher /* Release and free the Rx/Tx resources */ 37827ac6653aSJeff Kirsher free_dma_desc_resources(priv); 37837ac6653aSJeff Kirsher 37847ac6653aSJeff Kirsher /* Disable the MAC Rx/Tx */ 3785c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 37867ac6653aSJeff Kirsher 37877ac6653aSJeff Kirsher netif_carrier_off(dev); 37887ac6653aSJeff Kirsher 378992ba6888SRayagond Kokatanur stmmac_release_ptp(priv); 379092ba6888SRayagond Kokatanur 37915ec55823SJoakim Zhang pm_runtime_put(priv->device); 37925ec55823SJoakim Zhang 37935a558611SOng Boon Leong if (priv->dma_cap.fpesel) 37945a558611SOng Boon Leong stmmac_fpe_stop_wq(priv); 37955a558611SOng Boon Leong 37967ac6653aSJeff Kirsher return 0; 37977ac6653aSJeff Kirsher } 37987ac6653aSJeff Kirsher 379930d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 380030d93227SJose Abreu struct stmmac_tx_queue *tx_q) 380130d93227SJose Abreu { 380230d93227SJose Abreu u16 tag = 0x0, inner_tag = 0x0; 380330d93227SJose Abreu u32 inner_type = 0x0; 380430d93227SJose Abreu struct dma_desc *p; 380530d93227SJose Abreu 380630d93227SJose Abreu if (!priv->dma_cap.vlins) 380730d93227SJose Abreu return false; 380830d93227SJose Abreu if (!skb_vlan_tag_present(skb)) 380930d93227SJose Abreu return false; 381030d93227SJose Abreu if (skb->vlan_proto == htons(ETH_P_8021AD)) { 381130d93227SJose Abreu inner_tag = skb_vlan_tag_get(skb); 381230d93227SJose Abreu inner_type = STMMAC_VLAN_INSERT; 381330d93227SJose Abreu } 381430d93227SJose Abreu 381530d93227SJose Abreu tag = skb_vlan_tag_get(skb); 381630d93227SJose Abreu 3817579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3818579a25a8SJose Abreu p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3819579a25a8SJose Abreu else 3820579a25a8SJose Abreu p = &tx_q->dma_tx[tx_q->cur_tx]; 3821579a25a8SJose Abreu 382230d93227SJose Abreu if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 382330d93227SJose Abreu return false; 382430d93227SJose Abreu 382530d93227SJose Abreu stmmac_set_tx_owner(priv, p); 3826aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 382730d93227SJose Abreu return true; 382830d93227SJose Abreu } 382930d93227SJose Abreu 38307ac6653aSJeff Kirsher /** 3831f748be53SAlexandre TORGUE * stmmac_tso_allocator - close entry point of the driver 3832f748be53SAlexandre TORGUE * @priv: driver private structure 3833f748be53SAlexandre TORGUE * @des: buffer start address 3834f748be53SAlexandre TORGUE * @total_len: total length to fill in descriptors 3835d0ea5cbdSJesse Brandeburg * @last_segment: condition for the last descriptor 3836ce736788SJoao Pinto * @queue: TX queue index 3837f748be53SAlexandre TORGUE * Description: 3838f748be53SAlexandre TORGUE * This function fills descriptor and request new descriptors according to 3839f748be53SAlexandre TORGUE * buffer length to fill 3840f748be53SAlexandre TORGUE */ 3841a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3842ce736788SJoao Pinto int total_len, bool last_segment, u32 queue) 3843f748be53SAlexandre TORGUE { 3844ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3845f748be53SAlexandre TORGUE struct dma_desc *desc; 38465bacd778SLABBE Corentin u32 buff_size; 3847ce736788SJoao Pinto int tmp_len; 3848f748be53SAlexandre TORGUE 3849f748be53SAlexandre TORGUE tmp_len = total_len; 3850f748be53SAlexandre TORGUE 3851f748be53SAlexandre TORGUE while (tmp_len > 0) { 3852a993db88SJose Abreu dma_addr_t curr_addr; 3853a993db88SJose Abreu 3854aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3855aa042f60SSong, Yoong Siang priv->dma_tx_size); 3856b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3857579a25a8SJose Abreu 3858579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3859579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3860579a25a8SJose Abreu else 3861579a25a8SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 3862f748be53SAlexandre TORGUE 3863a993db88SJose Abreu curr_addr = des + (total_len - tmp_len); 3864a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) 3865a993db88SJose Abreu desc->des0 = cpu_to_le32(curr_addr); 3866a993db88SJose Abreu else 3867a993db88SJose Abreu stmmac_set_desc_addr(priv, desc, curr_addr); 3868a993db88SJose Abreu 3869f748be53SAlexandre TORGUE buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3870f748be53SAlexandre TORGUE TSO_MAX_BUFF_SIZE : tmp_len; 3871f748be53SAlexandre TORGUE 387242de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3873f748be53SAlexandre TORGUE 0, 1, 3874426849e6SNiklas Cassel (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3875f748be53SAlexandre TORGUE 0, 0); 3876f748be53SAlexandre TORGUE 3877f748be53SAlexandre TORGUE tmp_len -= TSO_MAX_BUFF_SIZE; 3878f748be53SAlexandre TORGUE } 3879f748be53SAlexandre TORGUE } 3880f748be53SAlexandre TORGUE 3881d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 3882d96febedSOng Boon Leong { 3883d96febedSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3884d96febedSOng Boon Leong int desc_size; 3885d96febedSOng Boon Leong 3886d96febedSOng Boon Leong if (likely(priv->extend_desc)) 3887d96febedSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 3888d96febedSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3889d96febedSOng Boon Leong desc_size = sizeof(struct dma_edesc); 3890d96febedSOng Boon Leong else 3891d96febedSOng Boon Leong desc_size = sizeof(struct dma_desc); 3892d96febedSOng Boon Leong 3893d96febedSOng Boon Leong /* The own bit must be the latest setting done when prepare the 3894d96febedSOng Boon Leong * descriptor and then barrier is needed to make sure that 3895d96febedSOng Boon Leong * all is coherent before granting the DMA engine. 3896d96febedSOng Boon Leong */ 3897d96febedSOng Boon Leong wmb(); 3898d96febedSOng Boon Leong 3899d96febedSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3900d96febedSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3901d96febedSOng Boon Leong } 3902d96febedSOng Boon Leong 3903f748be53SAlexandre TORGUE /** 3904f748be53SAlexandre TORGUE * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3905f748be53SAlexandre TORGUE * @skb : the socket buffer 3906f748be53SAlexandre TORGUE * @dev : device pointer 3907f748be53SAlexandre TORGUE * Description: this is the transmit function that is called on TSO frames 3908f748be53SAlexandre TORGUE * (support available on GMAC4 and newer chips). 3909f748be53SAlexandre TORGUE * Diagram below show the ring programming in case of TSO frames: 3910f748be53SAlexandre TORGUE * 3911f748be53SAlexandre TORGUE * First Descriptor 3912f748be53SAlexandre TORGUE * -------- 3913f748be53SAlexandre TORGUE * | DES0 |---> buffer1 = L2/L3/L4 header 3914f748be53SAlexandre TORGUE * | DES1 |---> TCP Payload (can continue on next descr...) 3915f748be53SAlexandre TORGUE * | DES2 |---> buffer 1 and 2 len 3916f748be53SAlexandre TORGUE * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3917f748be53SAlexandre TORGUE * -------- 3918f748be53SAlexandre TORGUE * | 3919f748be53SAlexandre TORGUE * ... 3920f748be53SAlexandre TORGUE * | 3921f748be53SAlexandre TORGUE * -------- 3922f748be53SAlexandre TORGUE * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3923f748be53SAlexandre TORGUE * | DES1 | --| 3924f748be53SAlexandre TORGUE * | DES2 | --> buffer 1 and 2 len 3925f748be53SAlexandre TORGUE * | DES3 | 3926f748be53SAlexandre TORGUE * -------- 3927f748be53SAlexandre TORGUE * 3928f748be53SAlexandre TORGUE * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3929f748be53SAlexandre TORGUE */ 3930f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3931f748be53SAlexandre TORGUE { 3932ce736788SJoao Pinto struct dma_desc *desc, *first, *mss_desc = NULL; 3933f748be53SAlexandre TORGUE struct stmmac_priv *priv = netdev_priv(dev); 3934f748be53SAlexandre TORGUE int nfrags = skb_shinfo(skb)->nr_frags; 3935ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 3936c2837423SJose Abreu unsigned int first_entry, tx_packets; 3937d96febedSOng Boon Leong int tmp_pay_len = 0, first_tx; 3938ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 3939c2837423SJose Abreu bool has_vlan, set_ic; 3940579a25a8SJose Abreu u8 proto_hdr_len, hdr; 3941ce736788SJoao Pinto u32 pay_len, mss; 3942a993db88SJose Abreu dma_addr_t des; 3943f748be53SAlexandre TORGUE int i; 3944f748be53SAlexandre TORGUE 3945ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 3946c2837423SJose Abreu first_tx = tx_q->cur_tx; 3947ce736788SJoao Pinto 3948f748be53SAlexandre TORGUE /* Compute header lengths */ 3949b7766206SJose Abreu if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3950b7766206SJose Abreu proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3951b7766206SJose Abreu hdr = sizeof(struct udphdr); 3952b7766206SJose Abreu } else { 3953f748be53SAlexandre TORGUE proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3954b7766206SJose Abreu hdr = tcp_hdrlen(skb); 3955b7766206SJose Abreu } 3956f748be53SAlexandre TORGUE 3957f748be53SAlexandre TORGUE /* Desc availability based on threshold should be enough safe */ 3958ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < 3959f748be53SAlexandre TORGUE (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3960c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3961c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3962c22a3f48SJoao Pinto queue)); 3963f748be53SAlexandre TORGUE /* This is a hard error, log it. */ 396438ddc59dSLABBE Corentin netdev_err(priv->dev, 396538ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 396638ddc59dSLABBE Corentin __func__); 3967f748be53SAlexandre TORGUE } 3968f748be53SAlexandre TORGUE return NETDEV_TX_BUSY; 3969f748be53SAlexandre TORGUE } 3970f748be53SAlexandre TORGUE 3971f748be53SAlexandre TORGUE pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3972f748be53SAlexandre TORGUE 3973f748be53SAlexandre TORGUE mss = skb_shinfo(skb)->gso_size; 3974f748be53SAlexandre TORGUE 3975f748be53SAlexandre TORGUE /* set new MSS value if needed */ 39768d212a9eSNiklas Cassel if (mss != tx_q->mss) { 3977579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3978579a25a8SJose Abreu mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3979579a25a8SJose Abreu else 3980579a25a8SJose Abreu mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3981579a25a8SJose Abreu 398242de047dSJose Abreu stmmac_set_mss(priv, mss_desc, mss); 39838d212a9eSNiklas Cassel tx_q->mss = mss; 3984aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3985aa042f60SSong, Yoong Siang priv->dma_tx_size); 3986b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3987f748be53SAlexandre TORGUE } 3988f748be53SAlexandre TORGUE 3989f748be53SAlexandre TORGUE if (netif_msg_tx_queued(priv)) { 3990b7766206SJose Abreu pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 3991b7766206SJose Abreu __func__, hdr, proto_hdr_len, pay_len, mss); 3992f748be53SAlexandre TORGUE pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 3993f748be53SAlexandre TORGUE skb->data_len); 3994f748be53SAlexandre TORGUE } 3995f748be53SAlexandre TORGUE 399630d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 399730d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 399830d93227SJose Abreu 3999ce736788SJoao Pinto first_entry = tx_q->cur_tx; 4000b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 4001f748be53SAlexandre TORGUE 4002579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4003579a25a8SJose Abreu desc = &tx_q->dma_entx[first_entry].basic; 4004579a25a8SJose Abreu else 4005579a25a8SJose Abreu desc = &tx_q->dma_tx[first_entry]; 4006f748be53SAlexandre TORGUE first = desc; 4007f748be53SAlexandre TORGUE 400830d93227SJose Abreu if (has_vlan) 400930d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 401030d93227SJose Abreu 4011f748be53SAlexandre TORGUE /* first descriptor: fill Headers on Buf1 */ 4012f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4013f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4014f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4015f748be53SAlexandre TORGUE goto dma_map_err; 4016f748be53SAlexandre TORGUE 4017ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4018ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4019be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4020be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4021f748be53SAlexandre TORGUE 4022a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) { 4023f8be0d78SMichael Weiser first->des0 = cpu_to_le32(des); 4024f748be53SAlexandre TORGUE 4025f748be53SAlexandre TORGUE /* Fill start of payload in buff2 of first descriptor */ 4026f748be53SAlexandre TORGUE if (pay_len) 4027f8be0d78SMichael Weiser first->des1 = cpu_to_le32(des + proto_hdr_len); 4028f748be53SAlexandre TORGUE 4029f748be53SAlexandre TORGUE /* If needed take extra descriptors to fill the remaining payload */ 4030f748be53SAlexandre TORGUE tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4031a993db88SJose Abreu } else { 4032a993db88SJose Abreu stmmac_set_desc_addr(priv, first, des); 4033a993db88SJose Abreu tmp_pay_len = pay_len; 403434c15202Syuqi jin des += proto_hdr_len; 4035b2f07199SJose Abreu pay_len = 0; 4036a993db88SJose Abreu } 4037f748be53SAlexandre TORGUE 4038ce736788SJoao Pinto stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4039f748be53SAlexandre TORGUE 4040f748be53SAlexandre TORGUE /* Prepare fragments */ 4041f748be53SAlexandre TORGUE for (i = 0; i < nfrags; i++) { 4042f748be53SAlexandre TORGUE const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4043f748be53SAlexandre TORGUE 4044f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, 4045f748be53SAlexandre TORGUE skb_frag_size(frag), 4046f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4047937071c1SThierry Reding if (dma_mapping_error(priv->device, des)) 4048937071c1SThierry Reding goto dma_map_err; 4049f748be53SAlexandre TORGUE 4050f748be53SAlexandre TORGUE stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4051ce736788SJoao Pinto (i == nfrags - 1), queue); 4052f748be53SAlexandre TORGUE 4053ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4054ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4055ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4056be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4057f748be53SAlexandre TORGUE } 4058f748be53SAlexandre TORGUE 4059ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4060f748be53SAlexandre TORGUE 406105cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 406205cf0d1bSNiklas Cassel tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4063be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 406405cf0d1bSNiklas Cassel 40657df4a3a7SJose Abreu /* Manage tx mitigation */ 4066c2837423SJose Abreu tx_packets = (tx_q->cur_tx + 1) - first_tx; 4067c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4068c2837423SJose Abreu 4069c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4070c2837423SJose Abreu set_ic = true; 4071db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4072c2837423SJose Abreu set_ic = false; 4073db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4074c2837423SJose Abreu set_ic = true; 4075db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4076db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4077c2837423SJose Abreu set_ic = true; 4078c2837423SJose Abreu else 4079c2837423SJose Abreu set_ic = false; 4080c2837423SJose Abreu 4081c2837423SJose Abreu if (set_ic) { 4082579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4083579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4084579a25a8SJose Abreu else 40857df4a3a7SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 4086579a25a8SJose Abreu 40877df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 40887df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 40897df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 40907df4a3a7SJose Abreu } 40917df4a3a7SJose Abreu 409205cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 409305cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 409405cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 409505cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 409605cf0d1bSNiklas Cassel */ 4097aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 4098f748be53SAlexandre TORGUE 4099ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4100b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 410138ddc59dSLABBE Corentin __func__); 4102c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4103f748be53SAlexandre TORGUE } 4104f748be53SAlexandre TORGUE 4105f748be53SAlexandre TORGUE dev->stats.tx_bytes += skb->len; 4106f748be53SAlexandre TORGUE priv->xstats.tx_tso_frames++; 4107f748be53SAlexandre TORGUE priv->xstats.tx_tso_nfrags += nfrags; 4108f748be53SAlexandre TORGUE 41098000ddc0SJose Abreu if (priv->sarc_type) 41108000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 41118000ddc0SJose Abreu 4112f748be53SAlexandre TORGUE skb_tx_timestamp(skb); 4113f748be53SAlexandre TORGUE 4114f748be53SAlexandre TORGUE if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4115f748be53SAlexandre TORGUE priv->hwts_tx_en)) { 4116f748be53SAlexandre TORGUE /* declare that device is doing timestamping */ 4117f748be53SAlexandre TORGUE skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 411842de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4119f748be53SAlexandre TORGUE } 4120f748be53SAlexandre TORGUE 4121f748be53SAlexandre TORGUE /* Complete the first descriptor before granting the DMA */ 412242de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, first, 1, 4123f748be53SAlexandre TORGUE proto_hdr_len, 4124f748be53SAlexandre TORGUE pay_len, 4125ce736788SJoao Pinto 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4126b7766206SJose Abreu hdr / 4, (skb->len - proto_hdr_len)); 4127f748be53SAlexandre TORGUE 4128f748be53SAlexandre TORGUE /* If context desc is used to change MSS */ 412915d2ee42SNiklas Cassel if (mss_desc) { 413015d2ee42SNiklas Cassel /* Make sure that first descriptor has been completely 413115d2ee42SNiklas Cassel * written, including its own bit. This is because MSS is 413215d2ee42SNiklas Cassel * actually before first descriptor, so we need to make 413315d2ee42SNiklas Cassel * sure that MSS's own bit is the last thing written. 413415d2ee42SNiklas Cassel */ 413515d2ee42SNiklas Cassel dma_wmb(); 413642de047dSJose Abreu stmmac_set_tx_owner(priv, mss_desc); 413715d2ee42SNiklas Cassel } 4138f748be53SAlexandre TORGUE 4139f748be53SAlexandre TORGUE if (netif_msg_pktdata(priv)) { 4140f748be53SAlexandre TORGUE pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4141ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4142ce736788SJoao Pinto tx_q->cur_tx, first, nfrags); 4143f748be53SAlexandre TORGUE pr_info(">>> frame to be transmitted: "); 4144f748be53SAlexandre TORGUE print_pkt(skb->data, skb_headlen(skb)); 4145f748be53SAlexandre TORGUE } 4146f748be53SAlexandre TORGUE 4147c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4148f748be53SAlexandre TORGUE 4149d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 41504772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 4151f748be53SAlexandre TORGUE 4152f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4153f748be53SAlexandre TORGUE 4154f748be53SAlexandre TORGUE dma_map_err: 4155f748be53SAlexandre TORGUE dev_err(priv->device, "Tx dma map failed\n"); 4156f748be53SAlexandre TORGUE dev_kfree_skb(skb); 4157f748be53SAlexandre TORGUE priv->dev->stats.tx_dropped++; 4158f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4159f748be53SAlexandre TORGUE } 4160f748be53SAlexandre TORGUE 4161f748be53SAlexandre TORGUE /** 4162732fdf0eSGiuseppe CAVALLARO * stmmac_xmit - Tx entry point of the driver 41637ac6653aSJeff Kirsher * @skb : the socket buffer 41647ac6653aSJeff Kirsher * @dev : device pointer 416532ceabcaSGiuseppe CAVALLARO * Description : this is the tx entry point of the driver. 416632ceabcaSGiuseppe CAVALLARO * It programs the chain or the ring and supports oversized frames 416732ceabcaSGiuseppe CAVALLARO * and SG feature. 41687ac6653aSJeff Kirsher */ 41697ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 41707ac6653aSJeff Kirsher { 4171c2837423SJose Abreu unsigned int first_entry, tx_packets, enh_desc; 41727ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 41730e80bdc9SGiuseppe Cavallaro unsigned int nopaged_len = skb_headlen(skb); 41744a7d666aSGiuseppe CAVALLARO int i, csum_insertion = 0, is_jumbo = 0; 4175ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 41767ac6653aSJeff Kirsher int nfrags = skb_shinfo(skb)->nr_frags; 4177b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 4178579a25a8SJose Abreu struct dma_edesc *tbs_desc = NULL; 41797ac6653aSJeff Kirsher struct dma_desc *desc, *first; 4180ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4181c2837423SJose Abreu bool has_vlan, set_ic; 4182d96febedSOng Boon Leong int entry, first_tx; 4183a993db88SJose Abreu dma_addr_t des; 4184f748be53SAlexandre TORGUE 4185ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 4186c2837423SJose Abreu first_tx = tx_q->cur_tx; 4187ce736788SJoao Pinto 4188be1c7eaeSVineetha G. Jaya Kumaran if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4189e2cd682dSJose Abreu stmmac_disable_eee_mode(priv); 4190e2cd682dSJose Abreu 4191f748be53SAlexandre TORGUE /* Manage oversized TCP frames for GMAC4 device */ 4192f748be53SAlexandre TORGUE if (skb_is_gso(skb) && priv->tso) { 4193b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4194b7766206SJose Abreu return stmmac_tso_xmit(skb, dev); 4195b7766206SJose Abreu if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4196f748be53SAlexandre TORGUE return stmmac_tso_xmit(skb, dev); 4197f748be53SAlexandre TORGUE } 41987ac6653aSJeff Kirsher 4199ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4200c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4201c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4202c22a3f48SJoao Pinto queue)); 42037ac6653aSJeff Kirsher /* This is a hard error, log it. */ 420438ddc59dSLABBE Corentin netdev_err(priv->dev, 420538ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 420638ddc59dSLABBE Corentin __func__); 42077ac6653aSJeff Kirsher } 42087ac6653aSJeff Kirsher return NETDEV_TX_BUSY; 42097ac6653aSJeff Kirsher } 42107ac6653aSJeff Kirsher 421130d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 421230d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 421330d93227SJose Abreu 4214ce736788SJoao Pinto entry = tx_q->cur_tx; 42150e80bdc9SGiuseppe Cavallaro first_entry = entry; 4216b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 42177ac6653aSJeff Kirsher 42187ac6653aSJeff Kirsher csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 42197ac6653aSJeff Kirsher 42200e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4221ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4222579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4223579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4224c24602efSGiuseppe CAVALLARO else 4225ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 4226c24602efSGiuseppe CAVALLARO 42277ac6653aSJeff Kirsher first = desc; 42287ac6653aSJeff Kirsher 422930d93227SJose Abreu if (has_vlan) 423030d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 423130d93227SJose Abreu 42320e80bdc9SGiuseppe Cavallaro enh_desc = priv->plat->enh_desc; 42334a7d666aSGiuseppe CAVALLARO /* To program the descriptors according to the size of the frame */ 423429896a67SGiuseppe CAVALLARO if (enh_desc) 42352c520b1cSJose Abreu is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 423629896a67SGiuseppe CAVALLARO 423763a550fcSJose Abreu if (unlikely(is_jumbo)) { 42382c520b1cSJose Abreu entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 423963a550fcSJose Abreu if (unlikely(entry < 0) && (entry != -EINVAL)) 4240362b37beSGiuseppe CAVALLARO goto dma_map_err; 424129896a67SGiuseppe CAVALLARO } 42427ac6653aSJeff Kirsher 42437ac6653aSJeff Kirsher for (i = 0; i < nfrags; i++) { 42449e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 42459e903e08SEric Dumazet int len = skb_frag_size(frag); 4246be434d50SGiuseppe Cavallaro bool last_segment = (i == (nfrags - 1)); 42477ac6653aSJeff Kirsher 4248aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4249b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[entry]); 4250e3ad57c9SGiuseppe Cavallaro 42510e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4252ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4253579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4254579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4255c24602efSGiuseppe CAVALLARO else 4256ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 42577ac6653aSJeff Kirsher 4258f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, len, 4259f722380dSIan Campbell DMA_TO_DEVICE); 4260f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4261362b37beSGiuseppe CAVALLARO goto dma_map_err; /* should reuse desc w/o issues */ 4262362b37beSGiuseppe CAVALLARO 4263ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = des; 42646844171dSJose Abreu 42656844171dSJose Abreu stmmac_set_desc_addr(priv, desc, des); 4266f748be53SAlexandre TORGUE 4267ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = true; 4268ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = len; 4269ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4270be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 42710e80bdc9SGiuseppe Cavallaro 42720e80bdc9SGiuseppe Cavallaro /* Prepare the descriptor and set the own bit too */ 427342de047dSJose Abreu stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 427442de047dSJose Abreu priv->mode, 1, last_segment, skb->len); 42757ac6653aSJeff Kirsher } 42767ac6653aSJeff Kirsher 427705cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 427805cf0d1bSNiklas Cassel tx_q->tx_skbuff[entry] = skb; 4279be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4280e3ad57c9SGiuseppe Cavallaro 42817df4a3a7SJose Abreu /* According to the coalesce parameter the IC bit for the latest 42827df4a3a7SJose Abreu * segment is reset and the timer re-started to clean the tx status. 42837df4a3a7SJose Abreu * This approach takes care about the fragments: desc is the first 42847df4a3a7SJose Abreu * element in case of no SG. 42857df4a3a7SJose Abreu */ 4286c2837423SJose Abreu tx_packets = (entry + 1) - first_tx; 4287c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4288c2837423SJose Abreu 4289c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4290c2837423SJose Abreu set_ic = true; 4291db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4292c2837423SJose Abreu set_ic = false; 4293db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4294c2837423SJose Abreu set_ic = true; 4295db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4296db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4297c2837423SJose Abreu set_ic = true; 4298c2837423SJose Abreu else 4299c2837423SJose Abreu set_ic = false; 4300c2837423SJose Abreu 4301c2837423SJose Abreu if (set_ic) { 43027df4a3a7SJose Abreu if (likely(priv->extend_desc)) 43037df4a3a7SJose Abreu desc = &tx_q->dma_etx[entry].basic; 4304579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4305579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 43067df4a3a7SJose Abreu else 43077df4a3a7SJose Abreu desc = &tx_q->dma_tx[entry]; 43087df4a3a7SJose Abreu 43097df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 43107df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 43117df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 43127df4a3a7SJose Abreu } 43137df4a3a7SJose Abreu 431405cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 431505cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 431605cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 431705cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 431805cf0d1bSNiklas Cassel */ 4319aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4320ce736788SJoao Pinto tx_q->cur_tx = entry; 43217ac6653aSJeff Kirsher 43227ac6653aSJeff Kirsher if (netif_msg_pktdata(priv)) { 432338ddc59dSLABBE Corentin netdev_dbg(priv->dev, 432438ddc59dSLABBE Corentin "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4325ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 43260e80bdc9SGiuseppe Cavallaro entry, first, nfrags); 432783d7af64SGiuseppe CAVALLARO 432838ddc59dSLABBE Corentin netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 43297ac6653aSJeff Kirsher print_pkt(skb->data, skb->len); 43307ac6653aSJeff Kirsher } 43310e80bdc9SGiuseppe Cavallaro 4332ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4333b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4334b3e51069SLABBE Corentin __func__); 4335c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 43367ac6653aSJeff Kirsher } 43377ac6653aSJeff Kirsher 43387ac6653aSJeff Kirsher dev->stats.tx_bytes += skb->len; 43397ac6653aSJeff Kirsher 43408000ddc0SJose Abreu if (priv->sarc_type) 43418000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 43428000ddc0SJose Abreu 43430e80bdc9SGiuseppe Cavallaro skb_tx_timestamp(skb); 43440e80bdc9SGiuseppe Cavallaro 43450e80bdc9SGiuseppe Cavallaro /* Ready to fill the first descriptor and set the OWN bit w/o any 43460e80bdc9SGiuseppe Cavallaro * problems because all the descriptors are actually ready to be 43470e80bdc9SGiuseppe Cavallaro * passed to the DMA engine. 43480e80bdc9SGiuseppe Cavallaro */ 43490e80bdc9SGiuseppe Cavallaro if (likely(!is_jumbo)) { 43500e80bdc9SGiuseppe Cavallaro bool last_segment = (nfrags == 0); 43510e80bdc9SGiuseppe Cavallaro 4352f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, 43530e80bdc9SGiuseppe Cavallaro nopaged_len, DMA_TO_DEVICE); 4354f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 43550e80bdc9SGiuseppe Cavallaro goto dma_map_err; 43560e80bdc9SGiuseppe Cavallaro 4357ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4358be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4359be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 43606844171dSJose Abreu 43616844171dSJose Abreu stmmac_set_desc_addr(priv, first, des); 4362f748be53SAlexandre TORGUE 4363ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4364ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 43650e80bdc9SGiuseppe Cavallaro 4366891434b1SRayagond Kokatanur if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4367891434b1SRayagond Kokatanur priv->hwts_tx_en)) { 4368891434b1SRayagond Kokatanur /* declare that device is doing timestamping */ 4369891434b1SRayagond Kokatanur skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 437042de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4371891434b1SRayagond Kokatanur } 4372891434b1SRayagond Kokatanur 43730e80bdc9SGiuseppe Cavallaro /* Prepare the first descriptor setting the OWN bit too */ 437442de047dSJose Abreu stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4375579a25a8SJose Abreu csum_insertion, priv->mode, 0, last_segment, 437642de047dSJose Abreu skb->len); 437780acbed9SAaro Koskinen } 43780e80bdc9SGiuseppe Cavallaro 4379579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_EN) { 4380579a25a8SJose Abreu struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4381579a25a8SJose Abreu 4382579a25a8SJose Abreu tbs_desc = &tx_q->dma_entx[first_entry]; 4383579a25a8SJose Abreu stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4384579a25a8SJose Abreu } 4385579a25a8SJose Abreu 4386579a25a8SJose Abreu stmmac_set_tx_owner(priv, first); 4387579a25a8SJose Abreu 4388c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4389f748be53SAlexandre TORGUE 4390a4e887faSJose Abreu stmmac_enable_dma_transmission(priv, priv->ioaddr); 43918fce3331SJose Abreu 4392d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 43934772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 43947ac6653aSJeff Kirsher 4395362b37beSGiuseppe CAVALLARO return NETDEV_TX_OK; 4396a9097a96SGiuseppe CAVALLARO 4397362b37beSGiuseppe CAVALLARO dma_map_err: 439838ddc59dSLABBE Corentin netdev_err(priv->dev, "Tx DMA map failed\n"); 4399362b37beSGiuseppe CAVALLARO dev_kfree_skb(skb); 4400362b37beSGiuseppe CAVALLARO priv->dev->stats.tx_dropped++; 44017ac6653aSJeff Kirsher return NETDEV_TX_OK; 44027ac6653aSJeff Kirsher } 44037ac6653aSJeff Kirsher 4404b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4405b9381985SVince Bridgers { 4406ab188e8fSElad Nachman struct vlan_ethhdr *veth; 4407ab188e8fSElad Nachman __be16 vlan_proto; 4408b9381985SVince Bridgers u16 vlanid; 4409b9381985SVince Bridgers 4410ab188e8fSElad Nachman veth = (struct vlan_ethhdr *)skb->data; 4411ab188e8fSElad Nachman vlan_proto = veth->h_vlan_proto; 4412ab188e8fSElad Nachman 4413ab188e8fSElad Nachman if ((vlan_proto == htons(ETH_P_8021Q) && 4414ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4415ab188e8fSElad Nachman (vlan_proto == htons(ETH_P_8021AD) && 4416ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4417b9381985SVince Bridgers /* pop the vlan tag */ 4418ab188e8fSElad Nachman vlanid = ntohs(veth->h_vlan_TCI); 4419ab188e8fSElad Nachman memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4420b9381985SVince Bridgers skb_pull(skb, VLAN_HLEN); 4421ab188e8fSElad Nachman __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4422b9381985SVince Bridgers } 4423b9381985SVince Bridgers } 4424b9381985SVince Bridgers 442532ceabcaSGiuseppe CAVALLARO /** 4426732fdf0eSGiuseppe CAVALLARO * stmmac_rx_refill - refill used skb preallocated buffers 442732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 442854139cf3SJoao Pinto * @queue: RX queue index 442932ceabcaSGiuseppe CAVALLARO * Description : this is to reallocate the skb for the reception process 443032ceabcaSGiuseppe CAVALLARO * that is based on zero-copy. 443132ceabcaSGiuseppe CAVALLARO */ 443254139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 44337ac6653aSJeff Kirsher { 443454139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 44355fabb012SOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 443654139cf3SJoao Pinto unsigned int entry = rx_q->dirty_rx; 443754139cf3SJoao Pinto 4438e3ad57c9SGiuseppe Cavallaro while (dirty-- > 0) { 44392af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4440c24602efSGiuseppe CAVALLARO struct dma_desc *p; 4441d429b66eSJose Abreu bool use_rx_wd; 4442c24602efSGiuseppe CAVALLARO 4443c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 444454139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 4445c24602efSGiuseppe CAVALLARO else 444654139cf3SJoao Pinto p = rx_q->dma_rx + entry; 4447c24602efSGiuseppe CAVALLARO 44482af6106aSJose Abreu if (!buf->page) { 44492af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 44502af6106aSJose Abreu if (!buf->page) 44517ac6653aSJeff Kirsher break; 4452120e87f9SGiuseppe Cavallaro } 44537ac6653aSJeff Kirsher 445467afd6d1SJose Abreu if (priv->sph && !buf->sec_page) { 445567afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 445667afd6d1SJose Abreu if (!buf->sec_page) 445767afd6d1SJose Abreu break; 445867afd6d1SJose Abreu 445967afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 446067afd6d1SJose Abreu } 446167afd6d1SJose Abreu 44625fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 44633caa61c2SJose Abreu 44642af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 4465396e13e1SJoakim Zhang if (priv->sph) 4466396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4467396e13e1SJoakim Zhang else 4468396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 44692c520b1cSJose Abreu stmmac_refill_desc3(priv, rx_q, p); 4470286a8372SGiuseppe CAVALLARO 4471d429b66eSJose Abreu rx_q->rx_count_frames++; 4472db2f2842SOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4473db2f2842SOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 44746fa9d691SJose Abreu rx_q->rx_count_frames = 0; 447509146abeSJose Abreu 4476db2f2842SOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 447709146abeSJose Abreu use_rx_wd |= rx_q->rx_count_frames > 0; 447809146abeSJose Abreu if (!priv->use_riwt) 447909146abeSJose Abreu use_rx_wd = false; 4480d429b66eSJose Abreu 4481ad688cdbSPavel Machek dma_wmb(); 44822af6106aSJose Abreu stmmac_set_rx_owner(priv, p, use_rx_wd); 4483e3ad57c9SGiuseppe Cavallaro 4484aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 44857ac6653aSJeff Kirsher } 448654139cf3SJoao Pinto rx_q->dirty_rx = entry; 4487858a31ffSJose Abreu rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4488858a31ffSJose Abreu (rx_q->dirty_rx * sizeof(struct dma_desc)); 44894523a561SBiao Huang stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 44907ac6653aSJeff Kirsher } 44917ac6653aSJeff Kirsher 449288ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 449388ebe2cfSJose Abreu struct dma_desc *p, 449488ebe2cfSJose Abreu int status, unsigned int len) 449588ebe2cfSJose Abreu { 449688ebe2cfSJose Abreu unsigned int plen = 0, hlen = 0; 449731f2760eSLuo Jiaxing int coe = priv->hw->rx_csum; 449888ebe2cfSJose Abreu 449988ebe2cfSJose Abreu /* Not first descriptor, buffer is always zero */ 450088ebe2cfSJose Abreu if (priv->sph && len) 450188ebe2cfSJose Abreu return 0; 450288ebe2cfSJose Abreu 450388ebe2cfSJose Abreu /* First descriptor, get split header length */ 450431f2760eSLuo Jiaxing stmmac_get_rx_header_len(priv, p, &hlen); 450588ebe2cfSJose Abreu if (priv->sph && hlen) { 450688ebe2cfSJose Abreu priv->xstats.rx_split_hdr_pkt_n++; 450788ebe2cfSJose Abreu return hlen; 450888ebe2cfSJose Abreu } 450988ebe2cfSJose Abreu 451088ebe2cfSJose Abreu /* First descriptor, not last descriptor and not split header */ 451188ebe2cfSJose Abreu if (status & rx_not_ls) 451288ebe2cfSJose Abreu return priv->dma_buf_sz; 451388ebe2cfSJose Abreu 451488ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 451588ebe2cfSJose Abreu 451688ebe2cfSJose Abreu /* First descriptor and last descriptor and not split header */ 451788ebe2cfSJose Abreu return min_t(unsigned int, priv->dma_buf_sz, plen); 451888ebe2cfSJose Abreu } 451988ebe2cfSJose Abreu 452088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 452188ebe2cfSJose Abreu struct dma_desc *p, 452288ebe2cfSJose Abreu int status, unsigned int len) 452388ebe2cfSJose Abreu { 452488ebe2cfSJose Abreu int coe = priv->hw->rx_csum; 452588ebe2cfSJose Abreu unsigned int plen = 0; 452688ebe2cfSJose Abreu 452788ebe2cfSJose Abreu /* Not split header, buffer is not available */ 452888ebe2cfSJose Abreu if (!priv->sph) 452988ebe2cfSJose Abreu return 0; 453088ebe2cfSJose Abreu 453188ebe2cfSJose Abreu /* Not last descriptor */ 453288ebe2cfSJose Abreu if (status & rx_not_ls) 453388ebe2cfSJose Abreu return priv->dma_buf_sz; 453488ebe2cfSJose Abreu 453588ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 453688ebe2cfSJose Abreu 453788ebe2cfSJose Abreu /* Last descriptor */ 453888ebe2cfSJose Abreu return plen - len; 453988ebe2cfSJose Abreu } 454088ebe2cfSJose Abreu 4541be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 45428b278a5bSOng Boon Leong struct xdp_frame *xdpf, bool dma_map) 4543be8b38a7SOng Boon Leong { 4544be8b38a7SOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4545be8b38a7SOng Boon Leong unsigned int entry = tx_q->cur_tx; 4546be8b38a7SOng Boon Leong struct dma_desc *tx_desc; 4547be8b38a7SOng Boon Leong dma_addr_t dma_addr; 4548be8b38a7SOng Boon Leong bool set_ic; 4549be8b38a7SOng Boon Leong 4550be8b38a7SOng Boon Leong if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4551be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4552be8b38a7SOng Boon Leong 4553be8b38a7SOng Boon Leong if (likely(priv->extend_desc)) 4554be8b38a7SOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4555be8b38a7SOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4556be8b38a7SOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 4557be8b38a7SOng Boon Leong else 4558be8b38a7SOng Boon Leong tx_desc = tx_q->dma_tx + entry; 4559be8b38a7SOng Boon Leong 45608b278a5bSOng Boon Leong if (dma_map) { 45618b278a5bSOng Boon Leong dma_addr = dma_map_single(priv->device, xdpf->data, 45628b278a5bSOng Boon Leong xdpf->len, DMA_TO_DEVICE); 45638b278a5bSOng Boon Leong if (dma_mapping_error(priv->device, dma_addr)) 45648b278a5bSOng Boon Leong return STMMAC_XDP_CONSUMED; 45658b278a5bSOng Boon Leong 45668b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 45678b278a5bSOng Boon Leong } else { 45688b278a5bSOng Boon Leong struct page *page = virt_to_page(xdpf->data); 45698b278a5bSOng Boon Leong 4570be8b38a7SOng Boon Leong dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4571be8b38a7SOng Boon Leong xdpf->headroom; 4572be8b38a7SOng Boon Leong dma_sync_single_for_device(priv->device, dma_addr, 4573be8b38a7SOng Boon Leong xdpf->len, DMA_BIDIRECTIONAL); 4574be8b38a7SOng Boon Leong 4575be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 45768b278a5bSOng Boon Leong } 4577be8b38a7SOng Boon Leong 4578be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4579be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 4580be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4581be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 4582be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4583be8b38a7SOng Boon Leong 4584be8b38a7SOng Boon Leong tx_q->xdpf[entry] = xdpf; 4585be8b38a7SOng Boon Leong 4586be8b38a7SOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4587be8b38a7SOng Boon Leong 4588be8b38a7SOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4589be8b38a7SOng Boon Leong true, priv->mode, true, true, 4590be8b38a7SOng Boon Leong xdpf->len); 4591be8b38a7SOng Boon Leong 4592be8b38a7SOng Boon Leong tx_q->tx_count_frames++; 4593be8b38a7SOng Boon Leong 4594be8b38a7SOng Boon Leong if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4595be8b38a7SOng Boon Leong set_ic = true; 4596be8b38a7SOng Boon Leong else 4597be8b38a7SOng Boon Leong set_ic = false; 4598be8b38a7SOng Boon Leong 4599be8b38a7SOng Boon Leong if (set_ic) { 4600be8b38a7SOng Boon Leong tx_q->tx_count_frames = 0; 4601be8b38a7SOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 4602be8b38a7SOng Boon Leong priv->xstats.tx_set_ic_bit++; 4603be8b38a7SOng Boon Leong } 4604be8b38a7SOng Boon Leong 4605be8b38a7SOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 4606be8b38a7SOng Boon Leong 4607be8b38a7SOng Boon Leong entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4608be8b38a7SOng Boon Leong tx_q->cur_tx = entry; 4609be8b38a7SOng Boon Leong 4610be8b38a7SOng Boon Leong return STMMAC_XDP_TX; 4611be8b38a7SOng Boon Leong } 4612be8b38a7SOng Boon Leong 4613be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4614be8b38a7SOng Boon Leong int cpu) 4615be8b38a7SOng Boon Leong { 4616be8b38a7SOng Boon Leong int index = cpu; 4617be8b38a7SOng Boon Leong 4618be8b38a7SOng Boon Leong if (unlikely(index < 0)) 4619be8b38a7SOng Boon Leong index = 0; 4620be8b38a7SOng Boon Leong 4621be8b38a7SOng Boon Leong while (index >= priv->plat->tx_queues_to_use) 4622be8b38a7SOng Boon Leong index -= priv->plat->tx_queues_to_use; 4623be8b38a7SOng Boon Leong 4624be8b38a7SOng Boon Leong return index; 4625be8b38a7SOng Boon Leong } 4626be8b38a7SOng Boon Leong 4627be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4628be8b38a7SOng Boon Leong struct xdp_buff *xdp) 4629be8b38a7SOng Boon Leong { 4630be8b38a7SOng Boon Leong struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4631be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4632be8b38a7SOng Boon Leong struct netdev_queue *nq; 4633be8b38a7SOng Boon Leong int queue; 4634be8b38a7SOng Boon Leong int res; 4635be8b38a7SOng Boon Leong 4636be8b38a7SOng Boon Leong if (unlikely(!xdpf)) 4637be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4638be8b38a7SOng Boon Leong 4639be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4640be8b38a7SOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 4641be8b38a7SOng Boon Leong 4642be8b38a7SOng Boon Leong __netif_tx_lock(nq, cpu); 4643be8b38a7SOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 4644be8b38a7SOng Boon Leong nq->trans_start = jiffies; 4645be8b38a7SOng Boon Leong 46468b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4647be8b38a7SOng Boon Leong if (res == STMMAC_XDP_TX) 4648be8b38a7SOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 4649be8b38a7SOng Boon Leong 4650be8b38a7SOng Boon Leong __netif_tx_unlock(nq); 4651be8b38a7SOng Boon Leong 4652be8b38a7SOng Boon Leong return res; 4653be8b38a7SOng Boon Leong } 4654be8b38a7SOng Boon Leong 4655bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4656bba71cacSOng Boon Leong struct bpf_prog *prog, 46575fabb012SOng Boon Leong struct xdp_buff *xdp) 46585fabb012SOng Boon Leong { 46595fabb012SOng Boon Leong u32 act; 4660bba71cacSOng Boon Leong int res; 46615fabb012SOng Boon Leong 46625fabb012SOng Boon Leong act = bpf_prog_run_xdp(prog, xdp); 46635fabb012SOng Boon Leong switch (act) { 46645fabb012SOng Boon Leong case XDP_PASS: 46655fabb012SOng Boon Leong res = STMMAC_XDP_PASS; 46665fabb012SOng Boon Leong break; 4667be8b38a7SOng Boon Leong case XDP_TX: 4668be8b38a7SOng Boon Leong res = stmmac_xdp_xmit_back(priv, xdp); 4669be8b38a7SOng Boon Leong break; 46708b278a5bSOng Boon Leong case XDP_REDIRECT: 46718b278a5bSOng Boon Leong if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 46728b278a5bSOng Boon Leong res = STMMAC_XDP_CONSUMED; 46738b278a5bSOng Boon Leong else 46748b278a5bSOng Boon Leong res = STMMAC_XDP_REDIRECT; 46758b278a5bSOng Boon Leong break; 46765fabb012SOng Boon Leong default: 46775fabb012SOng Boon Leong bpf_warn_invalid_xdp_action(act); 46785fabb012SOng Boon Leong fallthrough; 46795fabb012SOng Boon Leong case XDP_ABORTED: 46805fabb012SOng Boon Leong trace_xdp_exception(priv->dev, prog, act); 46815fabb012SOng Boon Leong fallthrough; 46825fabb012SOng Boon Leong case XDP_DROP: 46835fabb012SOng Boon Leong res = STMMAC_XDP_CONSUMED; 46845fabb012SOng Boon Leong break; 46855fabb012SOng Boon Leong } 46865fabb012SOng Boon Leong 4687bba71cacSOng Boon Leong return res; 4688bba71cacSOng Boon Leong } 4689bba71cacSOng Boon Leong 4690bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4691bba71cacSOng Boon Leong struct xdp_buff *xdp) 4692bba71cacSOng Boon Leong { 4693bba71cacSOng Boon Leong struct bpf_prog *prog; 4694bba71cacSOng Boon Leong int res; 4695bba71cacSOng Boon Leong 4696bba71cacSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4697bba71cacSOng Boon Leong if (!prog) { 4698bba71cacSOng Boon Leong res = STMMAC_XDP_PASS; 46992f1e432dSToke Høiland-Jørgensen goto out; 4700bba71cacSOng Boon Leong } 4701bba71cacSOng Boon Leong 4702bba71cacSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, xdp); 47032f1e432dSToke Høiland-Jørgensen out: 47045fabb012SOng Boon Leong return ERR_PTR(-res); 47055fabb012SOng Boon Leong } 47065fabb012SOng Boon Leong 4707be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4708be8b38a7SOng Boon Leong int xdp_status) 4709be8b38a7SOng Boon Leong { 4710be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4711be8b38a7SOng Boon Leong int queue; 4712be8b38a7SOng Boon Leong 4713be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4714be8b38a7SOng Boon Leong 4715be8b38a7SOng Boon Leong if (xdp_status & STMMAC_XDP_TX) 4716be8b38a7SOng Boon Leong stmmac_tx_timer_arm(priv, queue); 47178b278a5bSOng Boon Leong 47188b278a5bSOng Boon Leong if (xdp_status & STMMAC_XDP_REDIRECT) 47198b278a5bSOng Boon Leong xdp_do_flush(); 4720be8b38a7SOng Boon Leong } 4721be8b38a7SOng Boon Leong 4722bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4723bba2556eSOng Boon Leong struct xdp_buff *xdp) 4724bba2556eSOng Boon Leong { 4725bba2556eSOng Boon Leong unsigned int metasize = xdp->data - xdp->data_meta; 4726bba2556eSOng Boon Leong unsigned int datasize = xdp->data_end - xdp->data; 4727bba2556eSOng Boon Leong struct sk_buff *skb; 4728bba2556eSOng Boon Leong 4729132c32eeSOng Boon Leong skb = __napi_alloc_skb(&ch->rxtx_napi, 4730bba2556eSOng Boon Leong xdp->data_end - xdp->data_hard_start, 4731bba2556eSOng Boon Leong GFP_ATOMIC | __GFP_NOWARN); 4732bba2556eSOng Boon Leong if (unlikely(!skb)) 4733bba2556eSOng Boon Leong return NULL; 4734bba2556eSOng Boon Leong 4735bba2556eSOng Boon Leong skb_reserve(skb, xdp->data - xdp->data_hard_start); 4736bba2556eSOng Boon Leong memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4737bba2556eSOng Boon Leong if (metasize) 4738bba2556eSOng Boon Leong skb_metadata_set(skb, metasize); 4739bba2556eSOng Boon Leong 4740bba2556eSOng Boon Leong return skb; 4741bba2556eSOng Boon Leong } 4742bba2556eSOng Boon Leong 4743bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4744bba2556eSOng Boon Leong struct dma_desc *p, struct dma_desc *np, 4745bba2556eSOng Boon Leong struct xdp_buff *xdp) 4746bba2556eSOng Boon Leong { 4747bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 4748bba2556eSOng Boon Leong unsigned int len = xdp->data_end - xdp->data; 4749bba2556eSOng Boon Leong enum pkt_hash_types hash_type; 4750bba2556eSOng Boon Leong int coe = priv->hw->rx_csum; 4751bba2556eSOng Boon Leong struct sk_buff *skb; 4752bba2556eSOng Boon Leong u32 hash; 4753bba2556eSOng Boon Leong 4754bba2556eSOng Boon Leong skb = stmmac_construct_skb_zc(ch, xdp); 4755bba2556eSOng Boon Leong if (!skb) { 4756bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4757bba2556eSOng Boon Leong return; 4758bba2556eSOng Boon Leong } 4759bba2556eSOng Boon Leong 4760bba2556eSOng Boon Leong stmmac_get_rx_hwtstamp(priv, p, np, skb); 4761bba2556eSOng Boon Leong stmmac_rx_vlan(priv->dev, skb); 4762bba2556eSOng Boon Leong skb->protocol = eth_type_trans(skb, priv->dev); 4763bba2556eSOng Boon Leong 4764bba2556eSOng Boon Leong if (unlikely(!coe)) 4765bba2556eSOng Boon Leong skb_checksum_none_assert(skb); 4766bba2556eSOng Boon Leong else 4767bba2556eSOng Boon Leong skb->ip_summed = CHECKSUM_UNNECESSARY; 4768bba2556eSOng Boon Leong 4769bba2556eSOng Boon Leong if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4770bba2556eSOng Boon Leong skb_set_hash(skb, hash, hash_type); 4771bba2556eSOng Boon Leong 4772bba2556eSOng Boon Leong skb_record_rx_queue(skb, queue); 4773132c32eeSOng Boon Leong napi_gro_receive(&ch->rxtx_napi, skb); 4774bba2556eSOng Boon Leong 4775bba2556eSOng Boon Leong priv->dev->stats.rx_packets++; 4776bba2556eSOng Boon Leong priv->dev->stats.rx_bytes += len; 4777bba2556eSOng Boon Leong } 4778bba2556eSOng Boon Leong 4779bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4780bba2556eSOng Boon Leong { 4781bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4782bba2556eSOng Boon Leong unsigned int entry = rx_q->dirty_rx; 4783bba2556eSOng Boon Leong struct dma_desc *rx_desc = NULL; 4784bba2556eSOng Boon Leong bool ret = true; 4785bba2556eSOng Boon Leong 4786bba2556eSOng Boon Leong budget = min(budget, stmmac_rx_dirty(priv, queue)); 4787bba2556eSOng Boon Leong 4788bba2556eSOng Boon Leong while (budget-- > 0 && entry != rx_q->cur_rx) { 4789bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4790bba2556eSOng Boon Leong dma_addr_t dma_addr; 4791bba2556eSOng Boon Leong bool use_rx_wd; 4792bba2556eSOng Boon Leong 4793bba2556eSOng Boon Leong if (!buf->xdp) { 4794bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 4795bba2556eSOng Boon Leong if (!buf->xdp) { 4796bba2556eSOng Boon Leong ret = false; 4797bba2556eSOng Boon Leong break; 4798bba2556eSOng Boon Leong } 4799bba2556eSOng Boon Leong } 4800bba2556eSOng Boon Leong 4801bba2556eSOng Boon Leong if (priv->extend_desc) 4802bba2556eSOng Boon Leong rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 4803bba2556eSOng Boon Leong else 4804bba2556eSOng Boon Leong rx_desc = rx_q->dma_rx + entry; 4805bba2556eSOng Boon Leong 4806bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 4807bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, rx_desc, dma_addr); 4808bba2556eSOng Boon Leong stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 4809bba2556eSOng Boon Leong stmmac_refill_desc3(priv, rx_q, rx_desc); 4810bba2556eSOng Boon Leong 4811bba2556eSOng Boon Leong rx_q->rx_count_frames++; 4812bba2556eSOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4813bba2556eSOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4814bba2556eSOng Boon Leong rx_q->rx_count_frames = 0; 4815bba2556eSOng Boon Leong 4816bba2556eSOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 4817bba2556eSOng Boon Leong use_rx_wd |= rx_q->rx_count_frames > 0; 4818bba2556eSOng Boon Leong if (!priv->use_riwt) 4819bba2556eSOng Boon Leong use_rx_wd = false; 4820bba2556eSOng Boon Leong 4821bba2556eSOng Boon Leong dma_wmb(); 4822bba2556eSOng Boon Leong stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 4823bba2556eSOng Boon Leong 4824bba2556eSOng Boon Leong entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 4825bba2556eSOng Boon Leong } 4826bba2556eSOng Boon Leong 4827bba2556eSOng Boon Leong if (rx_desc) { 4828bba2556eSOng Boon Leong rx_q->dirty_rx = entry; 4829bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4830bba2556eSOng Boon Leong (rx_q->dirty_rx * sizeof(struct dma_desc)); 4831bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4832bba2556eSOng Boon Leong } 4833bba2556eSOng Boon Leong 4834bba2556eSOng Boon Leong return ret; 4835bba2556eSOng Boon Leong } 4836bba2556eSOng Boon Leong 4837bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 4838bba2556eSOng Boon Leong { 4839bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4840bba2556eSOng Boon Leong unsigned int count = 0, error = 0, len = 0; 4841bba2556eSOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 4842bba2556eSOng Boon Leong unsigned int next_entry = rx_q->cur_rx; 4843bba2556eSOng Boon Leong unsigned int desc_size; 4844bba2556eSOng Boon Leong struct bpf_prog *prog; 4845bba2556eSOng Boon Leong bool failure = false; 4846bba2556eSOng Boon Leong int xdp_status = 0; 4847bba2556eSOng Boon Leong int status = 0; 4848bba2556eSOng Boon Leong 4849bba2556eSOng Boon Leong if (netif_msg_rx_status(priv)) { 4850bba2556eSOng Boon Leong void *rx_head; 4851bba2556eSOng Boon Leong 4852bba2556eSOng Boon Leong netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4853bba2556eSOng Boon Leong if (priv->extend_desc) { 4854bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_erx; 4855bba2556eSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 4856bba2556eSOng Boon Leong } else { 4857bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_rx; 4858bba2556eSOng Boon Leong desc_size = sizeof(struct dma_desc); 4859bba2556eSOng Boon Leong } 4860bba2556eSOng Boon Leong 4861bba2556eSOng Boon Leong stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 4862bba2556eSOng Boon Leong rx_q->dma_rx_phy, desc_size); 4863bba2556eSOng Boon Leong } 4864bba2556eSOng Boon Leong while (count < limit) { 4865bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 4866bba2556eSOng Boon Leong unsigned int buf1_len = 0; 4867bba2556eSOng Boon Leong struct dma_desc *np, *p; 4868bba2556eSOng Boon Leong int entry; 4869bba2556eSOng Boon Leong int res; 4870bba2556eSOng Boon Leong 4871bba2556eSOng Boon Leong if (!count && rx_q->state_saved) { 4872bba2556eSOng Boon Leong error = rx_q->state.error; 4873bba2556eSOng Boon Leong len = rx_q->state.len; 4874bba2556eSOng Boon Leong } else { 4875bba2556eSOng Boon Leong rx_q->state_saved = false; 4876bba2556eSOng Boon Leong error = 0; 4877bba2556eSOng Boon Leong len = 0; 4878bba2556eSOng Boon Leong } 4879bba2556eSOng Boon Leong 4880bba2556eSOng Boon Leong if (count >= limit) 4881bba2556eSOng Boon Leong break; 4882bba2556eSOng Boon Leong 4883bba2556eSOng Boon Leong read_again: 4884bba2556eSOng Boon Leong buf1_len = 0; 4885bba2556eSOng Boon Leong entry = next_entry; 4886bba2556eSOng Boon Leong buf = &rx_q->buf_pool[entry]; 4887bba2556eSOng Boon Leong 4888bba2556eSOng Boon Leong if (dirty >= STMMAC_RX_FILL_BATCH) { 4889bba2556eSOng Boon Leong failure = failure || 4890bba2556eSOng Boon Leong !stmmac_rx_refill_zc(priv, queue, dirty); 4891bba2556eSOng Boon Leong dirty = 0; 4892bba2556eSOng Boon Leong } 4893bba2556eSOng Boon Leong 4894bba2556eSOng Boon Leong if (priv->extend_desc) 4895bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + entry); 4896bba2556eSOng Boon Leong else 4897bba2556eSOng Boon Leong p = rx_q->dma_rx + entry; 4898bba2556eSOng Boon Leong 4899bba2556eSOng Boon Leong /* read the status of the incoming frame */ 4900bba2556eSOng Boon Leong status = stmmac_rx_status(priv, &priv->dev->stats, 4901bba2556eSOng Boon Leong &priv->xstats, p); 4902bba2556eSOng Boon Leong /* check if managed by the DMA otherwise go ahead */ 4903bba2556eSOng Boon Leong if (unlikely(status & dma_own)) 4904bba2556eSOng Boon Leong break; 4905bba2556eSOng Boon Leong 4906bba2556eSOng Boon Leong /* Prefetch the next RX descriptor */ 4907bba2556eSOng Boon Leong rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 4908bba2556eSOng Boon Leong priv->dma_rx_size); 4909bba2556eSOng Boon Leong next_entry = rx_q->cur_rx; 4910bba2556eSOng Boon Leong 4911bba2556eSOng Boon Leong if (priv->extend_desc) 4912bba2556eSOng Boon Leong np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 4913bba2556eSOng Boon Leong else 4914bba2556eSOng Boon Leong np = rx_q->dma_rx + next_entry; 4915bba2556eSOng Boon Leong 4916bba2556eSOng Boon Leong prefetch(np); 4917bba2556eSOng Boon Leong 4918bba2556eSOng Boon Leong if (priv->extend_desc) 4919bba2556eSOng Boon Leong stmmac_rx_extended_status(priv, &priv->dev->stats, 4920bba2556eSOng Boon Leong &priv->xstats, 4921bba2556eSOng Boon Leong rx_q->dma_erx + entry); 4922bba2556eSOng Boon Leong if (unlikely(status == discard_frame)) { 4923bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4924bba2556eSOng Boon Leong buf->xdp = NULL; 4925bba2556eSOng Boon Leong dirty++; 4926bba2556eSOng Boon Leong error = 1; 4927bba2556eSOng Boon Leong if (!priv->hwts_rx_en) 4928bba2556eSOng Boon Leong priv->dev->stats.rx_errors++; 4929bba2556eSOng Boon Leong } 4930bba2556eSOng Boon Leong 4931bba2556eSOng Boon Leong if (unlikely(error && (status & rx_not_ls))) 4932bba2556eSOng Boon Leong goto read_again; 4933bba2556eSOng Boon Leong if (unlikely(error)) { 4934bba2556eSOng Boon Leong count++; 4935bba2556eSOng Boon Leong continue; 4936bba2556eSOng Boon Leong } 4937bba2556eSOng Boon Leong 4938bba2556eSOng Boon Leong /* Ensure a valid XSK buffer before proceed */ 4939bba2556eSOng Boon Leong if (!buf->xdp) 4940bba2556eSOng Boon Leong break; 4941bba2556eSOng Boon Leong 4942bba2556eSOng Boon Leong /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 4943bba2556eSOng Boon Leong if (likely(status & rx_not_ls)) { 4944bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4945bba2556eSOng Boon Leong buf->xdp = NULL; 4946bba2556eSOng Boon Leong dirty++; 4947bba2556eSOng Boon Leong count++; 4948bba2556eSOng Boon Leong goto read_again; 4949bba2556eSOng Boon Leong } 4950bba2556eSOng Boon Leong 4951bba2556eSOng Boon Leong /* XDP ZC Frame only support primary buffers for now */ 4952bba2556eSOng Boon Leong buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 4953bba2556eSOng Boon Leong len += buf1_len; 4954bba2556eSOng Boon Leong 4955bba2556eSOng Boon Leong /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 4956bba2556eSOng Boon Leong * Type frames (LLC/LLC-SNAP) 4957bba2556eSOng Boon Leong * 4958bba2556eSOng Boon Leong * llc_snap is never checked in GMAC >= 4, so this ACS 4959bba2556eSOng Boon Leong * feature is always disabled and packets need to be 4960bba2556eSOng Boon Leong * stripped manually. 4961bba2556eSOng Boon Leong */ 4962bba2556eSOng Boon Leong if (likely(!(status & rx_not_ls)) && 4963bba2556eSOng Boon Leong (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 4964bba2556eSOng Boon Leong unlikely(status != llc_snap))) { 4965bba2556eSOng Boon Leong buf1_len -= ETH_FCS_LEN; 4966bba2556eSOng Boon Leong len -= ETH_FCS_LEN; 4967bba2556eSOng Boon Leong } 4968bba2556eSOng Boon Leong 4969bba2556eSOng Boon Leong /* RX buffer is good and fit into a XSK pool buffer */ 4970bba2556eSOng Boon Leong buf->xdp->data_end = buf->xdp->data + buf1_len; 4971bba2556eSOng Boon Leong xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 4972bba2556eSOng Boon Leong 4973bba2556eSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4974bba2556eSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 4975bba2556eSOng Boon Leong 4976bba2556eSOng Boon Leong switch (res) { 4977bba2556eSOng Boon Leong case STMMAC_XDP_PASS: 4978bba2556eSOng Boon Leong stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 4979bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4980bba2556eSOng Boon Leong break; 4981bba2556eSOng Boon Leong case STMMAC_XDP_CONSUMED: 4982bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4983bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4984bba2556eSOng Boon Leong break; 4985bba2556eSOng Boon Leong case STMMAC_XDP_TX: 4986bba2556eSOng Boon Leong case STMMAC_XDP_REDIRECT: 4987bba2556eSOng Boon Leong xdp_status |= res; 4988bba2556eSOng Boon Leong break; 4989bba2556eSOng Boon Leong } 4990bba2556eSOng Boon Leong 4991bba2556eSOng Boon Leong buf->xdp = NULL; 4992bba2556eSOng Boon Leong dirty++; 4993bba2556eSOng Boon Leong count++; 4994bba2556eSOng Boon Leong } 4995bba2556eSOng Boon Leong 4996bba2556eSOng Boon Leong if (status & rx_not_ls) { 4997bba2556eSOng Boon Leong rx_q->state_saved = true; 4998bba2556eSOng Boon Leong rx_q->state.error = error; 4999bba2556eSOng Boon Leong rx_q->state.len = len; 5000bba2556eSOng Boon Leong } 5001bba2556eSOng Boon Leong 5002bba2556eSOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5003bba2556eSOng Boon Leong 5004*68e9c5deSVijayakannan Ayyathurai priv->xstats.rx_pkt_n += count; 5005*68e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 5006*68e9c5deSVijayakannan Ayyathurai 5007bba2556eSOng Boon Leong if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5008bba2556eSOng Boon Leong if (failure || stmmac_rx_dirty(priv, queue) > 0) 5009bba2556eSOng Boon Leong xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5010bba2556eSOng Boon Leong else 5011bba2556eSOng Boon Leong xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5012bba2556eSOng Boon Leong 5013bba2556eSOng Boon Leong return (int)count; 5014bba2556eSOng Boon Leong } 5015bba2556eSOng Boon Leong 5016bba2556eSOng Boon Leong return failure ? limit : (int)count; 5017bba2556eSOng Boon Leong } 5018bba2556eSOng Boon Leong 501932ceabcaSGiuseppe CAVALLARO /** 5020732fdf0eSGiuseppe CAVALLARO * stmmac_rx - manage the receive process 502132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 502254139cf3SJoao Pinto * @limit: napi bugget 502354139cf3SJoao Pinto * @queue: RX queue index. 502432ceabcaSGiuseppe CAVALLARO * Description : this the function called by the napi poll method. 502532ceabcaSGiuseppe CAVALLARO * It gets all the frames inside the ring. 502632ceabcaSGiuseppe CAVALLARO */ 502754139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 50287ac6653aSJeff Kirsher { 502954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 50308fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 5031ec222003SJose Abreu unsigned int count = 0, error = 0, len = 0; 5032ec222003SJose Abreu int status = 0, coe = priv->hw->rx_csum; 503307b39753SAaro Koskinen unsigned int next_entry = rx_q->cur_rx; 50345fabb012SOng Boon Leong enum dma_data_direction dma_dir; 5035bfaf91caSJoakim Zhang unsigned int desc_size; 5036ec222003SJose Abreu struct sk_buff *skb = NULL; 50375fabb012SOng Boon Leong struct xdp_buff xdp; 5038be8b38a7SOng Boon Leong int xdp_status = 0; 50395fabb012SOng Boon Leong int buf_sz; 50405fabb012SOng Boon Leong 50415fabb012SOng Boon Leong dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 50425fabb012SOng Boon Leong buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 50437ac6653aSJeff Kirsher 504483d7af64SGiuseppe CAVALLARO if (netif_msg_rx_status(priv)) { 5045d0225e7dSAlexandre TORGUE void *rx_head; 5046d0225e7dSAlexandre TORGUE 504738ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5048bfaf91caSJoakim Zhang if (priv->extend_desc) { 504954139cf3SJoao Pinto rx_head = (void *)rx_q->dma_erx; 5050bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 5051bfaf91caSJoakim Zhang } else { 505254139cf3SJoao Pinto rx_head = (void *)rx_q->dma_rx; 5053bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 5054bfaf91caSJoakim Zhang } 5055d0225e7dSAlexandre TORGUE 5056bfaf91caSJoakim Zhang stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 5057bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 50587ac6653aSJeff Kirsher } 5059c24602efSGiuseppe CAVALLARO while (count < limit) { 506088ebe2cfSJose Abreu unsigned int buf1_len = 0, buf2_len = 0; 5061ec222003SJose Abreu enum pkt_hash_types hash_type; 50622af6106aSJose Abreu struct stmmac_rx_buffer *buf; 50632af6106aSJose Abreu struct dma_desc *np, *p; 5064ec222003SJose Abreu int entry; 5065ec222003SJose Abreu u32 hash; 50667ac6653aSJeff Kirsher 5067ec222003SJose Abreu if (!count && rx_q->state_saved) { 5068ec222003SJose Abreu skb = rx_q->state.skb; 5069ec222003SJose Abreu error = rx_q->state.error; 5070ec222003SJose Abreu len = rx_q->state.len; 5071ec222003SJose Abreu } else { 5072ec222003SJose Abreu rx_q->state_saved = false; 5073ec222003SJose Abreu skb = NULL; 5074ec222003SJose Abreu error = 0; 5075ec222003SJose Abreu len = 0; 5076ec222003SJose Abreu } 5077ec222003SJose Abreu 5078ec222003SJose Abreu if (count >= limit) 5079ec222003SJose Abreu break; 5080ec222003SJose Abreu 5081ec222003SJose Abreu read_again: 508288ebe2cfSJose Abreu buf1_len = 0; 508388ebe2cfSJose Abreu buf2_len = 0; 508407b39753SAaro Koskinen entry = next_entry; 50852af6106aSJose Abreu buf = &rx_q->buf_pool[entry]; 508607b39753SAaro Koskinen 5087c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 508854139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 5089c24602efSGiuseppe CAVALLARO else 509054139cf3SJoao Pinto p = rx_q->dma_rx + entry; 5091c24602efSGiuseppe CAVALLARO 5092c1fa3212SFabrice Gasnier /* read the status of the incoming frame */ 509342de047dSJose Abreu status = stmmac_rx_status(priv, &priv->dev->stats, 5094c1fa3212SFabrice Gasnier &priv->xstats, p); 5095c1fa3212SFabrice Gasnier /* check if managed by the DMA otherwise go ahead */ 5096c1fa3212SFabrice Gasnier if (unlikely(status & dma_own)) 50977ac6653aSJeff Kirsher break; 50987ac6653aSJeff Kirsher 5099aa042f60SSong, Yoong Siang rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5100aa042f60SSong, Yoong Siang priv->dma_rx_size); 510154139cf3SJoao Pinto next_entry = rx_q->cur_rx; 5102e3ad57c9SGiuseppe Cavallaro 5103c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 510454139cf3SJoao Pinto np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5105c24602efSGiuseppe CAVALLARO else 510654139cf3SJoao Pinto np = rx_q->dma_rx + next_entry; 5107ba1ffd74SGiuseppe CAVALLARO 5108ba1ffd74SGiuseppe CAVALLARO prefetch(np); 51097ac6653aSJeff Kirsher 511042de047dSJose Abreu if (priv->extend_desc) 511142de047dSJose Abreu stmmac_rx_extended_status(priv, &priv->dev->stats, 511242de047dSJose Abreu &priv->xstats, rx_q->dma_erx + entry); 5113891434b1SRayagond Kokatanur if (unlikely(status == discard_frame)) { 51142af6106aSJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 51152af6106aSJose Abreu buf->page = NULL; 5116ec222003SJose Abreu error = 1; 51170b273ca4SJose Abreu if (!priv->hwts_rx_en) 51180b273ca4SJose Abreu priv->dev->stats.rx_errors++; 5119ec222003SJose Abreu } 5120f748be53SAlexandre TORGUE 5121ec222003SJose Abreu if (unlikely(error && (status & rx_not_ls))) 5122ec222003SJose Abreu goto read_again; 5123ec222003SJose Abreu if (unlikely(error)) { 5124ec222003SJose Abreu dev_kfree_skb(skb); 512588ebe2cfSJose Abreu skb = NULL; 5126cda4985aSJose Abreu count++; 512707b39753SAaro Koskinen continue; 5128e527c4a7SGiuseppe CAVALLARO } 5129e527c4a7SGiuseppe CAVALLARO 5130ec222003SJose Abreu /* Buffer is good. Go on. */ 5131ec222003SJose Abreu 51324744bf07SMatteo Croce prefetch(page_address(buf->page) + buf->page_offset); 513388ebe2cfSJose Abreu if (buf->sec_page) 513488ebe2cfSJose Abreu prefetch(page_address(buf->sec_page)); 513588ebe2cfSJose Abreu 513688ebe2cfSJose Abreu buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 513788ebe2cfSJose Abreu len += buf1_len; 513888ebe2cfSJose Abreu buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 513988ebe2cfSJose Abreu len += buf2_len; 5140ec222003SJose Abreu 51417ac6653aSJeff Kirsher /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 5142ceb69499SGiuseppe CAVALLARO * Type frames (LLC/LLC-SNAP) 5143565020aaSJose Abreu * 5144565020aaSJose Abreu * llc_snap is never checked in GMAC >= 4, so this ACS 5145565020aaSJose Abreu * feature is always disabled and packets need to be 5146565020aaSJose Abreu * stripped manually. 5147ceb69499SGiuseppe CAVALLARO */ 514893b5dce4SJose Abreu if (likely(!(status & rx_not_ls)) && 514993b5dce4SJose Abreu (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 515093b5dce4SJose Abreu unlikely(status != llc_snap))) { 515188ebe2cfSJose Abreu if (buf2_len) 515288ebe2cfSJose Abreu buf2_len -= ETH_FCS_LEN; 515388ebe2cfSJose Abreu else 515488ebe2cfSJose Abreu buf1_len -= ETH_FCS_LEN; 515588ebe2cfSJose Abreu 5156ec222003SJose Abreu len -= ETH_FCS_LEN; 515783d7af64SGiuseppe CAVALLARO } 515822ad3838SGiuseppe Cavallaro 5159ec222003SJose Abreu if (!skb) { 5160be8b38a7SOng Boon Leong unsigned int pre_len, sync_len; 5161be8b38a7SOng Boon Leong 51625fabb012SOng Boon Leong dma_sync_single_for_cpu(priv->device, buf->addr, 51635fabb012SOng Boon Leong buf1_len, dma_dir); 51645fabb012SOng Boon Leong 5165d172268fSMatteo Croce xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); 5166d172268fSMatteo Croce xdp_prepare_buff(&xdp, page_address(buf->page), 5167d172268fSMatteo Croce buf->page_offset, buf1_len, false); 51685fabb012SOng Boon Leong 5169be8b38a7SOng Boon Leong pre_len = xdp.data_end - xdp.data_hard_start - 5170be8b38a7SOng Boon Leong buf->page_offset; 51715fabb012SOng Boon Leong skb = stmmac_xdp_run_prog(priv, &xdp); 5172be8b38a7SOng Boon Leong /* Due xdp_adjust_tail: DMA sync for_device 5173be8b38a7SOng Boon Leong * cover max len CPU touch 5174be8b38a7SOng Boon Leong */ 5175be8b38a7SOng Boon Leong sync_len = xdp.data_end - xdp.data_hard_start - 5176be8b38a7SOng Boon Leong buf->page_offset; 5177be8b38a7SOng Boon Leong sync_len = max(sync_len, pre_len); 51785fabb012SOng Boon Leong 51795fabb012SOng Boon Leong /* For Not XDP_PASS verdict */ 51805fabb012SOng Boon Leong if (IS_ERR(skb)) { 51815fabb012SOng Boon Leong unsigned int xdp_res = -PTR_ERR(skb); 51825fabb012SOng Boon Leong 51835fabb012SOng Boon Leong if (xdp_res & STMMAC_XDP_CONSUMED) { 5184be8b38a7SOng Boon Leong page_pool_put_page(rx_q->page_pool, 5185be8b38a7SOng Boon Leong virt_to_head_page(xdp.data), 5186be8b38a7SOng Boon Leong sync_len, true); 51875fabb012SOng Boon Leong buf->page = NULL; 51885fabb012SOng Boon Leong priv->dev->stats.rx_dropped++; 51895fabb012SOng Boon Leong 51905fabb012SOng Boon Leong /* Clear skb as it was set as 51915fabb012SOng Boon Leong * status by XDP program. 51925fabb012SOng Boon Leong */ 51935fabb012SOng Boon Leong skb = NULL; 51945fabb012SOng Boon Leong 51955fabb012SOng Boon Leong if (unlikely((status & rx_not_ls))) 51965fabb012SOng Boon Leong goto read_again; 51975fabb012SOng Boon Leong 51985fabb012SOng Boon Leong count++; 51995fabb012SOng Boon Leong continue; 52008b278a5bSOng Boon Leong } else if (xdp_res & (STMMAC_XDP_TX | 52018b278a5bSOng Boon Leong STMMAC_XDP_REDIRECT)) { 5202be8b38a7SOng Boon Leong xdp_status |= xdp_res; 5203be8b38a7SOng Boon Leong buf->page = NULL; 5204be8b38a7SOng Boon Leong skb = NULL; 5205be8b38a7SOng Boon Leong count++; 5206be8b38a7SOng Boon Leong continue; 52075fabb012SOng Boon Leong } 52085fabb012SOng Boon Leong } 52095fabb012SOng Boon Leong } 52105fabb012SOng Boon Leong 52115fabb012SOng Boon Leong if (!skb) { 52125fabb012SOng Boon Leong /* XDP program may expand or reduce tail */ 52135fabb012SOng Boon Leong buf1_len = xdp.data_end - xdp.data; 52145fabb012SOng Boon Leong 521588ebe2cfSJose Abreu skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5216ec222003SJose Abreu if (!skb) { 521722ad3838SGiuseppe Cavallaro priv->dev->stats.rx_dropped++; 5218cda4985aSJose Abreu count++; 521988ebe2cfSJose Abreu goto drain_data; 522022ad3838SGiuseppe Cavallaro } 522122ad3838SGiuseppe Cavallaro 52225fabb012SOng Boon Leong /* XDP program may adjust header */ 52235fabb012SOng Boon Leong skb_copy_to_linear_data(skb, xdp.data, buf1_len); 522488ebe2cfSJose Abreu skb_put(skb, buf1_len); 522522ad3838SGiuseppe Cavallaro 5226ec222003SJose Abreu /* Data payload copied into SKB, page ready for recycle */ 5227ec222003SJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 5228ec222003SJose Abreu buf->page = NULL; 522988ebe2cfSJose Abreu } else if (buf1_len) { 5230ec222003SJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 52315fabb012SOng Boon Leong buf1_len, dma_dir); 5232ec222003SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 52335fabb012SOng Boon Leong buf->page, buf->page_offset, buf1_len, 5234ec222003SJose Abreu priv->dma_buf_sz); 5235ec222003SJose Abreu 5236ec222003SJose Abreu /* Data payload appended into SKB */ 5237ec222003SJose Abreu page_pool_release_page(rx_q->page_pool, buf->page); 5238ec222003SJose Abreu buf->page = NULL; 52397ac6653aSJeff Kirsher } 524083d7af64SGiuseppe CAVALLARO 524188ebe2cfSJose Abreu if (buf2_len) { 524267afd6d1SJose Abreu dma_sync_single_for_cpu(priv->device, buf->sec_addr, 52435fabb012SOng Boon Leong buf2_len, dma_dir); 524467afd6d1SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 524588ebe2cfSJose Abreu buf->sec_page, 0, buf2_len, 524667afd6d1SJose Abreu priv->dma_buf_sz); 524767afd6d1SJose Abreu 524867afd6d1SJose Abreu /* Data payload appended into SKB */ 524967afd6d1SJose Abreu page_pool_release_page(rx_q->page_pool, buf->sec_page); 525067afd6d1SJose Abreu buf->sec_page = NULL; 525167afd6d1SJose Abreu } 525267afd6d1SJose Abreu 525388ebe2cfSJose Abreu drain_data: 5254ec222003SJose Abreu if (likely(status & rx_not_ls)) 5255ec222003SJose Abreu goto read_again; 525688ebe2cfSJose Abreu if (!skb) 525788ebe2cfSJose Abreu continue; 5258ec222003SJose Abreu 5259ec222003SJose Abreu /* Got entire packet into SKB. Finish it. */ 5260ec222003SJose Abreu 5261ba1ffd74SGiuseppe CAVALLARO stmmac_get_rx_hwtstamp(priv, p, np, skb); 5262b9381985SVince Bridgers stmmac_rx_vlan(priv->dev, skb); 52637ac6653aSJeff Kirsher skb->protocol = eth_type_trans(skb, priv->dev); 52647ac6653aSJeff Kirsher 5265ceb69499SGiuseppe CAVALLARO if (unlikely(!coe)) 52667ac6653aSJeff Kirsher skb_checksum_none_assert(skb); 526762a2ab93SGiuseppe CAVALLARO else 52687ac6653aSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 526962a2ab93SGiuseppe CAVALLARO 527076067459SJose Abreu if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 527176067459SJose Abreu skb_set_hash(skb, hash, hash_type); 527276067459SJose Abreu 527376067459SJose Abreu skb_record_rx_queue(skb, queue); 52744ccb4585SJose Abreu napi_gro_receive(&ch->rx_napi, skb); 527588ebe2cfSJose Abreu skb = NULL; 52767ac6653aSJeff Kirsher 52777ac6653aSJeff Kirsher priv->dev->stats.rx_packets++; 5278ec222003SJose Abreu priv->dev->stats.rx_bytes += len; 5279cda4985aSJose Abreu count++; 52807ac6653aSJeff Kirsher } 5281ec222003SJose Abreu 528288ebe2cfSJose Abreu if (status & rx_not_ls || skb) { 5283ec222003SJose Abreu rx_q->state_saved = true; 5284ec222003SJose Abreu rx_q->state.skb = skb; 5285ec222003SJose Abreu rx_q->state.error = error; 5286ec222003SJose Abreu rx_q->state.len = len; 52877ac6653aSJeff Kirsher } 52887ac6653aSJeff Kirsher 5289be8b38a7SOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5290be8b38a7SOng Boon Leong 529154139cf3SJoao Pinto stmmac_rx_refill(priv, queue); 52927ac6653aSJeff Kirsher 52937ac6653aSJeff Kirsher priv->xstats.rx_pkt_n += count; 5294*68e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 52957ac6653aSJeff Kirsher 52967ac6653aSJeff Kirsher return count; 52977ac6653aSJeff Kirsher } 52987ac6653aSJeff Kirsher 52994ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 53007ac6653aSJeff Kirsher { 53018fce3331SJose Abreu struct stmmac_channel *ch = 53024ccb4585SJose Abreu container_of(napi, struct stmmac_channel, rx_napi); 53038fce3331SJose Abreu struct stmmac_priv *priv = ch->priv_data; 53048fce3331SJose Abreu u32 chan = ch->index; 53054ccb4585SJose Abreu int work_done; 53067ac6653aSJeff Kirsher 53079125cdd1SGiuseppe CAVALLARO priv->xstats.napi_poll++; 5308ce736788SJoao Pinto 5309132c32eeSOng Boon Leong work_done = stmmac_rx(priv, budget, chan); 5310021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5311021bd5e3SJose Abreu unsigned long flags; 5312021bd5e3SJose Abreu 5313021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5314021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5315021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5316021bd5e3SJose Abreu } 5317021bd5e3SJose Abreu 53184ccb4585SJose Abreu return work_done; 53194ccb4585SJose Abreu } 5320ce736788SJoao Pinto 53214ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 53224ccb4585SJose Abreu { 53234ccb4585SJose Abreu struct stmmac_channel *ch = 53244ccb4585SJose Abreu container_of(napi, struct stmmac_channel, tx_napi); 53254ccb4585SJose Abreu struct stmmac_priv *priv = ch->priv_data; 53264ccb4585SJose Abreu u32 chan = ch->index; 53274ccb4585SJose Abreu int work_done; 53284ccb4585SJose Abreu 53294ccb4585SJose Abreu priv->xstats.napi_poll++; 53304ccb4585SJose Abreu 5331132c32eeSOng Boon Leong work_done = stmmac_tx_clean(priv, budget, chan); 5332fa0be0a4SJose Abreu work_done = min(work_done, budget); 53338fce3331SJose Abreu 5334021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5335021bd5e3SJose Abreu unsigned long flags; 53364ccb4585SJose Abreu 5337021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5338021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5339021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5340fa0be0a4SJose Abreu } 53418fce3331SJose Abreu 53427ac6653aSJeff Kirsher return work_done; 53437ac6653aSJeff Kirsher } 53447ac6653aSJeff Kirsher 5345132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5346132c32eeSOng Boon Leong { 5347132c32eeSOng Boon Leong struct stmmac_channel *ch = 5348132c32eeSOng Boon Leong container_of(napi, struct stmmac_channel, rxtx_napi); 5349132c32eeSOng Boon Leong struct stmmac_priv *priv = ch->priv_data; 5350132c32eeSOng Boon Leong int rx_done, tx_done; 5351132c32eeSOng Boon Leong u32 chan = ch->index; 5352132c32eeSOng Boon Leong 5353132c32eeSOng Boon Leong priv->xstats.napi_poll++; 5354132c32eeSOng Boon Leong 5355132c32eeSOng Boon Leong tx_done = stmmac_tx_clean(priv, budget, chan); 5356132c32eeSOng Boon Leong tx_done = min(tx_done, budget); 5357132c32eeSOng Boon Leong 5358132c32eeSOng Boon Leong rx_done = stmmac_rx_zc(priv, budget, chan); 5359132c32eeSOng Boon Leong 5360132c32eeSOng Boon Leong /* If either TX or RX work is not complete, return budget 5361132c32eeSOng Boon Leong * and keep pooling 5362132c32eeSOng Boon Leong */ 5363132c32eeSOng Boon Leong if (tx_done >= budget || rx_done >= budget) 5364132c32eeSOng Boon Leong return budget; 5365132c32eeSOng Boon Leong 5366132c32eeSOng Boon Leong /* all work done, exit the polling mode */ 5367132c32eeSOng Boon Leong if (napi_complete_done(napi, rx_done)) { 5368132c32eeSOng Boon Leong unsigned long flags; 5369132c32eeSOng Boon Leong 5370132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 5371132c32eeSOng Boon Leong /* Both RX and TX work done are compelte, 5372132c32eeSOng Boon Leong * so enable both RX & TX IRQs. 5373132c32eeSOng Boon Leong */ 5374132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5375132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 5376132c32eeSOng Boon Leong } 5377132c32eeSOng Boon Leong 5378132c32eeSOng Boon Leong return min(rx_done, budget - 1); 5379132c32eeSOng Boon Leong } 5380132c32eeSOng Boon Leong 53817ac6653aSJeff Kirsher /** 53827ac6653aSJeff Kirsher * stmmac_tx_timeout 53837ac6653aSJeff Kirsher * @dev : Pointer to net device structure 5384d0ea5cbdSJesse Brandeburg * @txqueue: the index of the hanging transmit queue 53857ac6653aSJeff Kirsher * Description: this function is called when a packet transmission fails to 53867284a3f1SGiuseppe CAVALLARO * complete within a reasonable time. The driver will mark the error in the 53877ac6653aSJeff Kirsher * netdev structure and arrange for the device to be reset to a sane state 53887ac6653aSJeff Kirsher * in order to transmit a new packet. 53897ac6653aSJeff Kirsher */ 53900290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 53917ac6653aSJeff Kirsher { 53927ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 53937ac6653aSJeff Kirsher 539434877a15SJose Abreu stmmac_global_err(priv); 53957ac6653aSJeff Kirsher } 53967ac6653aSJeff Kirsher 53977ac6653aSJeff Kirsher /** 539801789349SJiri Pirko * stmmac_set_rx_mode - entry point for multicast addressing 53997ac6653aSJeff Kirsher * @dev : pointer to the device structure 54007ac6653aSJeff Kirsher * Description: 54017ac6653aSJeff Kirsher * This function is a driver entry point which gets called by the kernel 54027ac6653aSJeff Kirsher * whenever multicast addresses must be enabled/disabled. 54037ac6653aSJeff Kirsher * Return value: 54047ac6653aSJeff Kirsher * void. 54057ac6653aSJeff Kirsher */ 540601789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev) 54077ac6653aSJeff Kirsher { 54087ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 54097ac6653aSJeff Kirsher 5410c10d4c82SJose Abreu stmmac_set_filter(priv, priv->hw, dev); 54117ac6653aSJeff Kirsher } 54127ac6653aSJeff Kirsher 54137ac6653aSJeff Kirsher /** 54147ac6653aSJeff Kirsher * stmmac_change_mtu - entry point to change MTU size for the device. 54157ac6653aSJeff Kirsher * @dev : device pointer. 54167ac6653aSJeff Kirsher * @new_mtu : the new MTU size for the device. 54177ac6653aSJeff Kirsher * Description: the Maximum Transfer Unit (MTU) is used by the network layer 54187ac6653aSJeff Kirsher * to drive packet transmission. Ethernet has an MTU of 1500 octets 54197ac6653aSJeff Kirsher * (ETH_DATA_LEN). This value can be changed with ifconfig. 54207ac6653aSJeff Kirsher * Return value: 54217ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 54227ac6653aSJeff Kirsher * file on failure. 54237ac6653aSJeff Kirsher */ 54247ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 54257ac6653aSJeff Kirsher { 542638ddc59dSLABBE Corentin struct stmmac_priv *priv = netdev_priv(dev); 5427eaf4fac4SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 54285b55299eSDavid Wu const int mtu = new_mtu; 5429eaf4fac4SJose Abreu 5430eaf4fac4SJose Abreu if (txfifosz == 0) 5431eaf4fac4SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 5432eaf4fac4SJose Abreu 5433eaf4fac4SJose Abreu txfifosz /= priv->plat->tx_queues_to_use; 543438ddc59dSLABBE Corentin 54357ac6653aSJeff Kirsher if (netif_running(dev)) { 543638ddc59dSLABBE Corentin netdev_err(priv->dev, "must be stopped to change its MTU\n"); 54377ac6653aSJeff Kirsher return -EBUSY; 54387ac6653aSJeff Kirsher } 54397ac6653aSJeff Kirsher 54405fabb012SOng Boon Leong if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 54415fabb012SOng Boon Leong netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 54425fabb012SOng Boon Leong return -EINVAL; 54435fabb012SOng Boon Leong } 54445fabb012SOng Boon Leong 5445eaf4fac4SJose Abreu new_mtu = STMMAC_ALIGN(new_mtu); 5446eaf4fac4SJose Abreu 5447eaf4fac4SJose Abreu /* If condition true, FIFO is too small or MTU too large */ 5448eaf4fac4SJose Abreu if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5449eaf4fac4SJose Abreu return -EINVAL; 5450eaf4fac4SJose Abreu 54515b55299eSDavid Wu dev->mtu = mtu; 5452f748be53SAlexandre TORGUE 54537ac6653aSJeff Kirsher netdev_update_features(dev); 54547ac6653aSJeff Kirsher 54557ac6653aSJeff Kirsher return 0; 54567ac6653aSJeff Kirsher } 54577ac6653aSJeff Kirsher 5458c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev, 5459c8f44affSMichał Mirosław netdev_features_t features) 54607ac6653aSJeff Kirsher { 54617ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 54627ac6653aSJeff Kirsher 546338912bdbSDeepak SIKRI if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 54647ac6653aSJeff Kirsher features &= ~NETIF_F_RXCSUM; 5465d2afb5bdSGiuseppe CAVALLARO 54667ac6653aSJeff Kirsher if (!priv->plat->tx_coe) 5467a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 54687ac6653aSJeff Kirsher 54697ac6653aSJeff Kirsher /* Some GMAC devices have a bugged Jumbo frame support that 54707ac6653aSJeff Kirsher * needs to have the Tx COE disabled for oversized frames 54717ac6653aSJeff Kirsher * (due to limited buffer sizes). In this case we disable 5472ceb69499SGiuseppe CAVALLARO * the TX csum insertion in the TDES and not use SF. 5473ceb69499SGiuseppe CAVALLARO */ 54747ac6653aSJeff Kirsher if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5475a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 54767ac6653aSJeff Kirsher 5477f748be53SAlexandre TORGUE /* Disable tso if asked by ethtool */ 5478f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5479f748be53SAlexandre TORGUE if (features & NETIF_F_TSO) 5480f748be53SAlexandre TORGUE priv->tso = true; 5481f748be53SAlexandre TORGUE else 5482f748be53SAlexandre TORGUE priv->tso = false; 5483f748be53SAlexandre TORGUE } 5484f748be53SAlexandre TORGUE 54857ac6653aSJeff Kirsher return features; 54867ac6653aSJeff Kirsher } 54877ac6653aSJeff Kirsher 5488d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev, 5489d2afb5bdSGiuseppe CAVALLARO netdev_features_t features) 5490d2afb5bdSGiuseppe CAVALLARO { 5491d2afb5bdSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(netdev); 549267afd6d1SJose Abreu bool sph_en; 549367afd6d1SJose Abreu u32 chan; 5494d2afb5bdSGiuseppe CAVALLARO 5495d2afb5bdSGiuseppe CAVALLARO /* Keep the COE Type in case of csum is supporting */ 5496d2afb5bdSGiuseppe CAVALLARO if (features & NETIF_F_RXCSUM) 5497d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 5498d2afb5bdSGiuseppe CAVALLARO else 5499d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 5500d2afb5bdSGiuseppe CAVALLARO /* No check needed because rx_coe has been set before and it will be 5501d2afb5bdSGiuseppe CAVALLARO * fixed in case of issue. 5502d2afb5bdSGiuseppe CAVALLARO */ 5503c10d4c82SJose Abreu stmmac_rx_ipc(priv, priv->hw); 5504d2afb5bdSGiuseppe CAVALLARO 550567afd6d1SJose Abreu sph_en = (priv->hw->rx_csum > 0) && priv->sph; 55065fabb012SOng Boon Leong 550767afd6d1SJose Abreu for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 550867afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 550967afd6d1SJose Abreu 5510d2afb5bdSGiuseppe CAVALLARO return 0; 5511d2afb5bdSGiuseppe CAVALLARO } 5512d2afb5bdSGiuseppe CAVALLARO 55135a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 55145a558611SOng Boon Leong { 55155a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 55165a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 55175a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 55185a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 55195a558611SOng Boon Leong 55205a558611SOng Boon Leong if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 55215a558611SOng Boon Leong return; 55225a558611SOng Boon Leong 55235a558611SOng Boon Leong /* If LP has sent verify mPacket, LP is FPE capable */ 55245a558611SOng Boon Leong if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 55255a558611SOng Boon Leong if (*lp_state < FPE_STATE_CAPABLE) 55265a558611SOng Boon Leong *lp_state = FPE_STATE_CAPABLE; 55275a558611SOng Boon Leong 55285a558611SOng Boon Leong /* If user has requested FPE enable, quickly response */ 55295a558611SOng Boon Leong if (*hs_enable) 55305a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 55315a558611SOng Boon Leong MPACKET_RESPONSE); 55325a558611SOng Boon Leong } 55335a558611SOng Boon Leong 55345a558611SOng Boon Leong /* If Local has sent verify mPacket, Local is FPE capable */ 55355a558611SOng Boon Leong if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 55365a558611SOng Boon Leong if (*lo_state < FPE_STATE_CAPABLE) 55375a558611SOng Boon Leong *lo_state = FPE_STATE_CAPABLE; 55385a558611SOng Boon Leong } 55395a558611SOng Boon Leong 55405a558611SOng Boon Leong /* If LP has sent response mPacket, LP is entering FPE ON */ 55415a558611SOng Boon Leong if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 55425a558611SOng Boon Leong *lp_state = FPE_STATE_ENTERING_ON; 55435a558611SOng Boon Leong 55445a558611SOng Boon Leong /* If Local has sent response mPacket, Local is entering FPE ON */ 55455a558611SOng Boon Leong if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 55465a558611SOng Boon Leong *lo_state = FPE_STATE_ENTERING_ON; 55475a558611SOng Boon Leong 55485a558611SOng Boon Leong if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 55495a558611SOng Boon Leong !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 55505a558611SOng Boon Leong priv->fpe_wq) { 55515a558611SOng Boon Leong queue_work(priv->fpe_wq, &priv->fpe_task); 55525a558611SOng Boon Leong } 55535a558611SOng Boon Leong } 55545a558611SOng Boon Leong 555529e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv) 55567ac6653aSJeff Kirsher { 55577bac4e1eSJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 55587bac4e1eSJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 55597bac4e1eSJoao Pinto u32 queues_count; 55607bac4e1eSJoao Pinto u32 queue; 55617d9e6c5aSJose Abreu bool xmac; 55627bac4e1eSJoao Pinto 55637d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 55647bac4e1eSJoao Pinto queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 55657ac6653aSJeff Kirsher 556689f7f2cfSSrinivas Kandagatla if (priv->irq_wake) 556789f7f2cfSSrinivas Kandagatla pm_wakeup_event(priv->device, 0); 556889f7f2cfSSrinivas Kandagatla 5569e49aa315SVoon Weifeng if (priv->dma_cap.estsel) 55709f298959SOng Boon Leong stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 55719f298959SOng Boon Leong &priv->xstats, tx_cnt); 5572e49aa315SVoon Weifeng 55735a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 55745a558611SOng Boon Leong int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 55755a558611SOng Boon Leong priv->dev); 55765a558611SOng Boon Leong 55775a558611SOng Boon Leong stmmac_fpe_event_status(priv, status); 55785a558611SOng Boon Leong } 55795a558611SOng Boon Leong 55807ac6653aSJeff Kirsher /* To handle GMAC own interrupts */ 55817d9e6c5aSJose Abreu if ((priv->plat->has_gmac) || xmac) { 5582c10d4c82SJose Abreu int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 55838f71a88dSJoao Pinto 5584d765955dSGiuseppe CAVALLARO if (unlikely(status)) { 5585d765955dSGiuseppe CAVALLARO /* For LPI we need to save the tx status */ 55860982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5587d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = true; 55880982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5589d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 55907bac4e1eSJoao Pinto } 55917bac4e1eSJoao Pinto 55927bac4e1eSJoao Pinto for (queue = 0; queue < queues_count; queue++) { 55938a7cb245SYannick Vignon status = stmmac_host_mtl_irq_status(priv, priv->hw, 55947bac4e1eSJoao Pinto queue); 55957bac4e1eSJoao Pinto } 559670523e63SGiuseppe CAVALLARO 559770523e63SGiuseppe CAVALLARO /* PCS link status */ 55983fe5cadbSGiuseppe CAVALLARO if (priv->hw->pcs) { 559970523e63SGiuseppe CAVALLARO if (priv->xstats.pcs_link) 560029e6573cSOng Boon Leong netif_carrier_on(priv->dev); 560170523e63SGiuseppe CAVALLARO else 560229e6573cSOng Boon Leong netif_carrier_off(priv->dev); 560370523e63SGiuseppe CAVALLARO } 5604f4da5652STan Tee Min 5605f4da5652STan Tee Min stmmac_timestamp_interrupt(priv, priv); 5606d765955dSGiuseppe CAVALLARO } 560729e6573cSOng Boon Leong } 560829e6573cSOng Boon Leong 560929e6573cSOng Boon Leong /** 561029e6573cSOng Boon Leong * stmmac_interrupt - main ISR 561129e6573cSOng Boon Leong * @irq: interrupt number. 561229e6573cSOng Boon Leong * @dev_id: to pass the net device pointer. 561329e6573cSOng Boon Leong * Description: this is the main driver interrupt service routine. 561429e6573cSOng Boon Leong * It can call: 561529e6573cSOng Boon Leong * o DMA service routine (to manage incoming frame reception and transmission 561629e6573cSOng Boon Leong * status) 561729e6573cSOng Boon Leong * o Core interrupts to manage: remote wake-up, management counter, LPI 561829e6573cSOng Boon Leong * interrupts. 561929e6573cSOng Boon Leong */ 562029e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 562129e6573cSOng Boon Leong { 562229e6573cSOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 562329e6573cSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 562429e6573cSOng Boon Leong 562529e6573cSOng Boon Leong /* Check if adapter is up */ 562629e6573cSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 562729e6573cSOng Boon Leong return IRQ_HANDLED; 562829e6573cSOng Boon Leong 562929e6573cSOng Boon Leong /* Check if a fatal error happened */ 563029e6573cSOng Boon Leong if (stmmac_safety_feat_interrupt(priv)) 563129e6573cSOng Boon Leong return IRQ_HANDLED; 563229e6573cSOng Boon Leong 563329e6573cSOng Boon Leong /* To handle Common interrupts */ 563429e6573cSOng Boon Leong stmmac_common_interrupt(priv); 5635d765955dSGiuseppe CAVALLARO 5636d765955dSGiuseppe CAVALLARO /* To handle DMA interrupts */ 56377ac6653aSJeff Kirsher stmmac_dma_interrupt(priv); 56387ac6653aSJeff Kirsher 56397ac6653aSJeff Kirsher return IRQ_HANDLED; 56407ac6653aSJeff Kirsher } 56417ac6653aSJeff Kirsher 56428532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 56438532f613SOng Boon Leong { 56448532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 56458532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 56468532f613SOng Boon Leong 56478532f613SOng Boon Leong if (unlikely(!dev)) { 56488532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 56498532f613SOng Boon Leong return IRQ_NONE; 56508532f613SOng Boon Leong } 56518532f613SOng Boon Leong 56528532f613SOng Boon Leong /* Check if adapter is up */ 56538532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 56548532f613SOng Boon Leong return IRQ_HANDLED; 56558532f613SOng Boon Leong 56568532f613SOng Boon Leong /* To handle Common interrupts */ 56578532f613SOng Boon Leong stmmac_common_interrupt(priv); 56588532f613SOng Boon Leong 56598532f613SOng Boon Leong return IRQ_HANDLED; 56608532f613SOng Boon Leong } 56618532f613SOng Boon Leong 56628532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 56638532f613SOng Boon Leong { 56648532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 56658532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 56668532f613SOng Boon Leong 56678532f613SOng Boon Leong if (unlikely(!dev)) { 56688532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 56698532f613SOng Boon Leong return IRQ_NONE; 56708532f613SOng Boon Leong } 56718532f613SOng Boon Leong 56728532f613SOng Boon Leong /* Check if adapter is up */ 56738532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 56748532f613SOng Boon Leong return IRQ_HANDLED; 56758532f613SOng Boon Leong 56768532f613SOng Boon Leong /* Check if a fatal error happened */ 56778532f613SOng Boon Leong stmmac_safety_feat_interrupt(priv); 56788532f613SOng Boon Leong 56798532f613SOng Boon Leong return IRQ_HANDLED; 56808532f613SOng Boon Leong } 56818532f613SOng Boon Leong 56828532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 56838532f613SOng Boon Leong { 56848532f613SOng Boon Leong struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 56858532f613SOng Boon Leong int chan = tx_q->queue_index; 56868532f613SOng Boon Leong struct stmmac_priv *priv; 56878532f613SOng Boon Leong int status; 56888532f613SOng Boon Leong 56898532f613SOng Boon Leong priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); 56908532f613SOng Boon Leong 56918532f613SOng Boon Leong if (unlikely(!data)) { 56928532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 56938532f613SOng Boon Leong return IRQ_NONE; 56948532f613SOng Boon Leong } 56958532f613SOng Boon Leong 56968532f613SOng Boon Leong /* Check if adapter is up */ 56978532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 56988532f613SOng Boon Leong return IRQ_HANDLED; 56998532f613SOng Boon Leong 57008532f613SOng Boon Leong status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 57018532f613SOng Boon Leong 57028532f613SOng Boon Leong if (unlikely(status & tx_hard_error_bump_tc)) { 57038532f613SOng Boon Leong /* Try to bump up the dma threshold on this failure */ 57048532f613SOng Boon Leong if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 57058532f613SOng Boon Leong tc <= 256) { 57068532f613SOng Boon Leong tc += 64; 57078532f613SOng Boon Leong if (priv->plat->force_thresh_dma_mode) 57088532f613SOng Boon Leong stmmac_set_dma_operation_mode(priv, 57098532f613SOng Boon Leong tc, 57108532f613SOng Boon Leong tc, 57118532f613SOng Boon Leong chan); 57128532f613SOng Boon Leong else 57138532f613SOng Boon Leong stmmac_set_dma_operation_mode(priv, 57148532f613SOng Boon Leong tc, 57158532f613SOng Boon Leong SF_DMA_MODE, 57168532f613SOng Boon Leong chan); 57178532f613SOng Boon Leong priv->xstats.threshold = tc; 57188532f613SOng Boon Leong } 57198532f613SOng Boon Leong } else if (unlikely(status == tx_hard_error)) { 57208532f613SOng Boon Leong stmmac_tx_err(priv, chan); 57218532f613SOng Boon Leong } 57228532f613SOng Boon Leong 57238532f613SOng Boon Leong return IRQ_HANDLED; 57248532f613SOng Boon Leong } 57258532f613SOng Boon Leong 57268532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 57278532f613SOng Boon Leong { 57288532f613SOng Boon Leong struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 57298532f613SOng Boon Leong int chan = rx_q->queue_index; 57308532f613SOng Boon Leong struct stmmac_priv *priv; 57318532f613SOng Boon Leong 57328532f613SOng Boon Leong priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); 57338532f613SOng Boon Leong 57348532f613SOng Boon Leong if (unlikely(!data)) { 57358532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 57368532f613SOng Boon Leong return IRQ_NONE; 57378532f613SOng Boon Leong } 57388532f613SOng Boon Leong 57398532f613SOng Boon Leong /* Check if adapter is up */ 57408532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57418532f613SOng Boon Leong return IRQ_HANDLED; 57428532f613SOng Boon Leong 57438532f613SOng Boon Leong stmmac_napi_check(priv, chan, DMA_DIR_RX); 57448532f613SOng Boon Leong 57458532f613SOng Boon Leong return IRQ_HANDLED; 57468532f613SOng Boon Leong } 57478532f613SOng Boon Leong 57487ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 57497ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools 5750ceb69499SGiuseppe CAVALLARO * to allow network I/O with interrupts disabled. 5751ceb69499SGiuseppe CAVALLARO */ 57527ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev) 57537ac6653aSJeff Kirsher { 57548532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 57558532f613SOng Boon Leong int i; 57568532f613SOng Boon Leong 57578532f613SOng Boon Leong /* If adapter is down, do nothing */ 57588532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57598532f613SOng Boon Leong return; 57608532f613SOng Boon Leong 57618532f613SOng Boon Leong if (priv->plat->multi_msi_en) { 57628532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) 57638532f613SOng Boon Leong stmmac_msi_intr_rx(0, &priv->rx_queue[i]); 57648532f613SOng Boon Leong 57658532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) 57668532f613SOng Boon Leong stmmac_msi_intr_tx(0, &priv->tx_queue[i]); 57678532f613SOng Boon Leong } else { 57687ac6653aSJeff Kirsher disable_irq(dev->irq); 57697ac6653aSJeff Kirsher stmmac_interrupt(dev->irq, dev); 57707ac6653aSJeff Kirsher enable_irq(dev->irq); 57717ac6653aSJeff Kirsher } 57728532f613SOng Boon Leong } 57737ac6653aSJeff Kirsher #endif 57747ac6653aSJeff Kirsher 57757ac6653aSJeff Kirsher /** 57767ac6653aSJeff Kirsher * stmmac_ioctl - Entry point for the Ioctl 57777ac6653aSJeff Kirsher * @dev: Device pointer. 57787ac6653aSJeff Kirsher * @rq: An IOCTL specefic structure, that can contain a pointer to 57797ac6653aSJeff Kirsher * a proprietary structure used to pass information to the driver. 57807ac6653aSJeff Kirsher * @cmd: IOCTL command 57817ac6653aSJeff Kirsher * Description: 578232ceabcaSGiuseppe CAVALLARO * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 57837ac6653aSJeff Kirsher */ 57847ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 57857ac6653aSJeff Kirsher { 578674371272SJose Abreu struct stmmac_priv *priv = netdev_priv (dev); 5787891434b1SRayagond Kokatanur int ret = -EOPNOTSUPP; 57887ac6653aSJeff Kirsher 57897ac6653aSJeff Kirsher if (!netif_running(dev)) 57907ac6653aSJeff Kirsher return -EINVAL; 57917ac6653aSJeff Kirsher 5792891434b1SRayagond Kokatanur switch (cmd) { 5793891434b1SRayagond Kokatanur case SIOCGMIIPHY: 5794891434b1SRayagond Kokatanur case SIOCGMIIREG: 5795891434b1SRayagond Kokatanur case SIOCSMIIREG: 579674371272SJose Abreu ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 5797891434b1SRayagond Kokatanur break; 5798891434b1SRayagond Kokatanur case SIOCSHWTSTAMP: 5799d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_set(dev, rq); 5800d6228b7cSArtem Panfilov break; 5801d6228b7cSArtem Panfilov case SIOCGHWTSTAMP: 5802d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_get(dev, rq); 5803891434b1SRayagond Kokatanur break; 5804891434b1SRayagond Kokatanur default: 5805891434b1SRayagond Kokatanur break; 5806891434b1SRayagond Kokatanur } 58077ac6653aSJeff Kirsher 58087ac6653aSJeff Kirsher return ret; 58097ac6653aSJeff Kirsher } 58107ac6653aSJeff Kirsher 58114dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 58124dbbe8ddSJose Abreu void *cb_priv) 58134dbbe8ddSJose Abreu { 58144dbbe8ddSJose Abreu struct stmmac_priv *priv = cb_priv; 58154dbbe8ddSJose Abreu int ret = -EOPNOTSUPP; 58164dbbe8ddSJose Abreu 5817425eabddSJose Abreu if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 5818425eabddSJose Abreu return ret; 5819425eabddSJose Abreu 5820bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 58214dbbe8ddSJose Abreu 58224dbbe8ddSJose Abreu switch (type) { 58234dbbe8ddSJose Abreu case TC_SETUP_CLSU32: 58244dbbe8ddSJose Abreu ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 58254dbbe8ddSJose Abreu break; 5826425eabddSJose Abreu case TC_SETUP_CLSFLOWER: 5827425eabddSJose Abreu ret = stmmac_tc_setup_cls(priv, priv, type_data); 5828425eabddSJose Abreu break; 58294dbbe8ddSJose Abreu default: 58304dbbe8ddSJose Abreu break; 58314dbbe8ddSJose Abreu } 58324dbbe8ddSJose Abreu 58334dbbe8ddSJose Abreu stmmac_enable_all_queues(priv); 58344dbbe8ddSJose Abreu return ret; 58354dbbe8ddSJose Abreu } 58364dbbe8ddSJose Abreu 5837955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list); 5838955bcb6eSPablo Neira Ayuso 58394dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 58404dbbe8ddSJose Abreu void *type_data) 58414dbbe8ddSJose Abreu { 58424dbbe8ddSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 58434dbbe8ddSJose Abreu 58444dbbe8ddSJose Abreu switch (type) { 58454dbbe8ddSJose Abreu case TC_SETUP_BLOCK: 5846955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data, 5847955bcb6eSPablo Neira Ayuso &stmmac_block_cb_list, 58484e95bc26SPablo Neira Ayuso stmmac_setup_tc_block_cb, 58494e95bc26SPablo Neira Ayuso priv, priv, true); 58501f705bc6SJose Abreu case TC_SETUP_QDISC_CBS: 58511f705bc6SJose Abreu return stmmac_tc_setup_cbs(priv, priv, type_data); 5852b60189e0SJose Abreu case TC_SETUP_QDISC_TAPRIO: 5853b60189e0SJose Abreu return stmmac_tc_setup_taprio(priv, priv, type_data); 5854430b383cSJose Abreu case TC_SETUP_QDISC_ETF: 5855430b383cSJose Abreu return stmmac_tc_setup_etf(priv, priv, type_data); 58564dbbe8ddSJose Abreu default: 58574dbbe8ddSJose Abreu return -EOPNOTSUPP; 58584dbbe8ddSJose Abreu } 58594dbbe8ddSJose Abreu } 58604dbbe8ddSJose Abreu 58614993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 58624993e5b3SJose Abreu struct net_device *sb_dev) 58634993e5b3SJose Abreu { 5864b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 5865b7766206SJose Abreu 5866b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 58674993e5b3SJose Abreu /* 5868b7766206SJose Abreu * There is no way to determine the number of TSO/USO 58694993e5b3SJose Abreu * capable Queues. Let's use always the Queue 0 5870b7766206SJose Abreu * because if TSO/USO is supported then at least this 58714993e5b3SJose Abreu * one will be capable. 58724993e5b3SJose Abreu */ 58734993e5b3SJose Abreu return 0; 58744993e5b3SJose Abreu } 58754993e5b3SJose Abreu 58764993e5b3SJose Abreu return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 58774993e5b3SJose Abreu } 58784993e5b3SJose Abreu 5879a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 5880a830405eSBhadram Varka { 5881a830405eSBhadram Varka struct stmmac_priv *priv = netdev_priv(ndev); 5882a830405eSBhadram Varka int ret = 0; 5883a830405eSBhadram Varka 58844691ffb1SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 58854691ffb1SJoakim Zhang if (ret < 0) { 58864691ffb1SJoakim Zhang pm_runtime_put_noidle(priv->device); 58874691ffb1SJoakim Zhang return ret; 58884691ffb1SJoakim Zhang } 58894691ffb1SJoakim Zhang 5890a830405eSBhadram Varka ret = eth_mac_addr(ndev, addr); 5891a830405eSBhadram Varka if (ret) 58924691ffb1SJoakim Zhang goto set_mac_error; 5893a830405eSBhadram Varka 5894c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 5895a830405eSBhadram Varka 58964691ffb1SJoakim Zhang set_mac_error: 58974691ffb1SJoakim Zhang pm_runtime_put(priv->device); 58984691ffb1SJoakim Zhang 5899a830405eSBhadram Varka return ret; 5900a830405eSBhadram Varka } 5901a830405eSBhadram Varka 590250fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 59037ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir; 59047ac29055SGiuseppe CAVALLARO 5905c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc, 5906bfaf91caSJoakim Zhang struct seq_file *seq, dma_addr_t dma_phy_addr) 59077ac29055SGiuseppe CAVALLARO { 59087ac29055SGiuseppe CAVALLARO int i; 5909c24602efSGiuseppe CAVALLARO struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 5910c24602efSGiuseppe CAVALLARO struct dma_desc *p = (struct dma_desc *)head; 5911bfaf91caSJoakim Zhang dma_addr_t dma_addr; 59127ac29055SGiuseppe CAVALLARO 5913c24602efSGiuseppe CAVALLARO for (i = 0; i < size; i++) { 5914c24602efSGiuseppe CAVALLARO if (extend_desc) { 5915bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*ep); 5916bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5917bfaf91caSJoakim Zhang i, &dma_addr, 5918f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des0), 5919f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des1), 5920f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des2), 5921f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des3)); 5922c24602efSGiuseppe CAVALLARO ep++; 5923c24602efSGiuseppe CAVALLARO } else { 5924bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*p); 5925bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5926bfaf91caSJoakim Zhang i, &dma_addr, 5927f8be0d78SMichael Weiser le32_to_cpu(p->des0), le32_to_cpu(p->des1), 5928f8be0d78SMichael Weiser le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 5929c24602efSGiuseppe CAVALLARO p++; 5930c24602efSGiuseppe CAVALLARO } 59317ac29055SGiuseppe CAVALLARO seq_printf(seq, "\n"); 59327ac29055SGiuseppe CAVALLARO } 5933c24602efSGiuseppe CAVALLARO } 59347ac29055SGiuseppe CAVALLARO 5935fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v) 5936c24602efSGiuseppe CAVALLARO { 5937c24602efSGiuseppe CAVALLARO struct net_device *dev = seq->private; 5938c24602efSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 593954139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 5940ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 594154139cf3SJoao Pinto u32 queue; 594254139cf3SJoao Pinto 59435f2b8b62SThierry Reding if ((dev->flags & IFF_UP) == 0) 59445f2b8b62SThierry Reding return 0; 59455f2b8b62SThierry Reding 594654139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 594754139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 594854139cf3SJoao Pinto 594954139cf3SJoao Pinto seq_printf(seq, "RX Queue %d:\n", queue); 59507ac29055SGiuseppe CAVALLARO 5951c24602efSGiuseppe CAVALLARO if (priv->extend_desc) { 595254139cf3SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 595354139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_erx, 5954bfaf91caSJoakim Zhang priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 595554139cf3SJoao Pinto } else { 595654139cf3SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 595754139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_rx, 5958bfaf91caSJoakim Zhang priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 595954139cf3SJoao Pinto } 596054139cf3SJoao Pinto } 596154139cf3SJoao Pinto 5962ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 5963ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5964ce736788SJoao Pinto 5965ce736788SJoao Pinto seq_printf(seq, "TX Queue %d:\n", queue); 5966ce736788SJoao Pinto 596754139cf3SJoao Pinto if (priv->extend_desc) { 5968ce736788SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 5969ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_etx, 5970bfaf91caSJoakim Zhang priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 5971579a25a8SJose Abreu } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 5972ce736788SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 5973ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_tx, 5974bfaf91caSJoakim Zhang priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 5975ce736788SJoao Pinto } 59767ac29055SGiuseppe CAVALLARO } 59777ac29055SGiuseppe CAVALLARO 59787ac29055SGiuseppe CAVALLARO return 0; 59797ac29055SGiuseppe CAVALLARO } 5980fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 59817ac29055SGiuseppe CAVALLARO 5982fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 5983e7434821SGiuseppe CAVALLARO { 5984e7434821SGiuseppe CAVALLARO struct net_device *dev = seq->private; 5985e7434821SGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 5986e7434821SGiuseppe CAVALLARO 598719e30c14SGiuseppe CAVALLARO if (!priv->hw_cap_support) { 5988e7434821SGiuseppe CAVALLARO seq_printf(seq, "DMA HW features not supported\n"); 5989e7434821SGiuseppe CAVALLARO return 0; 5990e7434821SGiuseppe CAVALLARO } 5991e7434821SGiuseppe CAVALLARO 5992e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 5993e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tDMA HW features\n"); 5994e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 5995e7434821SGiuseppe CAVALLARO 599622d3efe5SPavel Machek seq_printf(seq, "\t10/100 Mbps: %s\n", 5997e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 599822d3efe5SPavel Machek seq_printf(seq, "\t1000 Mbps: %s\n", 5999e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_1000) ? "Y" : "N"); 600022d3efe5SPavel Machek seq_printf(seq, "\tHalf duplex: %s\n", 6001e7434821SGiuseppe CAVALLARO (priv->dma_cap.half_duplex) ? "Y" : "N"); 6002e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tHash Filter: %s\n", 6003e7434821SGiuseppe CAVALLARO (priv->dma_cap.hash_filter) ? "Y" : "N"); 6004e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6005e7434821SGiuseppe CAVALLARO (priv->dma_cap.multi_addr) ? "Y" : "N"); 60068d45e42bSLABBE Corentin seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6007e7434821SGiuseppe CAVALLARO (priv->dma_cap.pcs) ? "Y" : "N"); 6008e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6009e7434821SGiuseppe CAVALLARO (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6010e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Remote wake up: %s\n", 6011e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6012e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Magic Frame: %s\n", 6013e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6014e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRMON module: %s\n", 6015e7434821SGiuseppe CAVALLARO (priv->dma_cap.rmon) ? "Y" : "N"); 6016e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6017e7434821SGiuseppe CAVALLARO (priv->dma_cap.time_stamp) ? "Y" : "N"); 6018e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6019e7434821SGiuseppe CAVALLARO (priv->dma_cap.atime_stamp) ? "Y" : "N"); 602022d3efe5SPavel Machek seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6021e7434821SGiuseppe CAVALLARO (priv->dma_cap.eee) ? "Y" : "N"); 6022e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6023e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6024e7434821SGiuseppe CAVALLARO (priv->dma_cap.tx_coe) ? "Y" : "N"); 6025f748be53SAlexandre TORGUE if (priv->synopsys_id >= DWMAC_CORE_4_00) { 6026f748be53SAlexandre TORGUE seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6027f748be53SAlexandre TORGUE (priv->dma_cap.rx_coe) ? "Y" : "N"); 6028f748be53SAlexandre TORGUE } else { 6029e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6030e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6031e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6032e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6033f748be53SAlexandre TORGUE } 6034e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6035e7434821SGiuseppe CAVALLARO (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6036e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6037e7434821SGiuseppe CAVALLARO priv->dma_cap.number_rx_channel); 6038e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6039e7434821SGiuseppe CAVALLARO priv->dma_cap.number_tx_channel); 60407d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 60417d0b447aSJose Abreu priv->dma_cap.number_rx_queues); 60427d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 60437d0b447aSJose Abreu priv->dma_cap.number_tx_queues); 6044e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tEnhanced descriptors: %s\n", 6045e7434821SGiuseppe CAVALLARO (priv->dma_cap.enh_desc) ? "Y" : "N"); 60467d0b447aSJose Abreu seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 60477d0b447aSJose Abreu seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 60487d0b447aSJose Abreu seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 60497d0b447aSJose Abreu seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 60507d0b447aSJose Abreu seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 60517d0b447aSJose Abreu priv->dma_cap.pps_out_num); 60527d0b447aSJose Abreu seq_printf(seq, "\tSafety Features: %s\n", 60537d0b447aSJose Abreu priv->dma_cap.asp ? "Y" : "N"); 60547d0b447aSJose Abreu seq_printf(seq, "\tFlexible RX Parser: %s\n", 60557d0b447aSJose Abreu priv->dma_cap.frpsel ? "Y" : "N"); 60567d0b447aSJose Abreu seq_printf(seq, "\tEnhanced Addressing: %d\n", 60577d0b447aSJose Abreu priv->dma_cap.addr64); 60587d0b447aSJose Abreu seq_printf(seq, "\tReceive Side Scaling: %s\n", 60597d0b447aSJose Abreu priv->dma_cap.rssen ? "Y" : "N"); 60607d0b447aSJose Abreu seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 60617d0b447aSJose Abreu priv->dma_cap.vlhash ? "Y" : "N"); 60627d0b447aSJose Abreu seq_printf(seq, "\tSplit Header: %s\n", 60637d0b447aSJose Abreu priv->dma_cap.sphen ? "Y" : "N"); 60647d0b447aSJose Abreu seq_printf(seq, "\tVLAN TX Insertion: %s\n", 60657d0b447aSJose Abreu priv->dma_cap.vlins ? "Y" : "N"); 60667d0b447aSJose Abreu seq_printf(seq, "\tDouble VLAN: %s\n", 60677d0b447aSJose Abreu priv->dma_cap.dvlan ? "Y" : "N"); 60687d0b447aSJose Abreu seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 60697d0b447aSJose Abreu priv->dma_cap.l3l4fnum); 60707d0b447aSJose Abreu seq_printf(seq, "\tARP Offloading: %s\n", 60717d0b447aSJose Abreu priv->dma_cap.arpoffsel ? "Y" : "N"); 607244e65475SJose Abreu seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 607344e65475SJose Abreu priv->dma_cap.estsel ? "Y" : "N"); 607444e65475SJose Abreu seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 607544e65475SJose Abreu priv->dma_cap.fpesel ? "Y" : "N"); 607644e65475SJose Abreu seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 607744e65475SJose Abreu priv->dma_cap.tbssel ? "Y" : "N"); 6078e7434821SGiuseppe CAVALLARO return 0; 6079e7434821SGiuseppe CAVALLARO } 6080fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6081e7434821SGiuseppe CAVALLARO 6082481a7d15SJiping Ma /* Use network device events to rename debugfs file entries. 6083481a7d15SJiping Ma */ 6084481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused, 6085481a7d15SJiping Ma unsigned long event, void *ptr) 6086481a7d15SJiping Ma { 6087481a7d15SJiping Ma struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6088481a7d15SJiping Ma struct stmmac_priv *priv = netdev_priv(dev); 6089481a7d15SJiping Ma 6090481a7d15SJiping Ma if (dev->netdev_ops != &stmmac_netdev_ops) 6091481a7d15SJiping Ma goto done; 6092481a7d15SJiping Ma 6093481a7d15SJiping Ma switch (event) { 6094481a7d15SJiping Ma case NETDEV_CHANGENAME: 6095481a7d15SJiping Ma if (priv->dbgfs_dir) 6096481a7d15SJiping Ma priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6097481a7d15SJiping Ma priv->dbgfs_dir, 6098481a7d15SJiping Ma stmmac_fs_dir, 6099481a7d15SJiping Ma dev->name); 6100481a7d15SJiping Ma break; 6101481a7d15SJiping Ma } 6102481a7d15SJiping Ma done: 6103481a7d15SJiping Ma return NOTIFY_DONE; 6104481a7d15SJiping Ma } 6105481a7d15SJiping Ma 6106481a7d15SJiping Ma static struct notifier_block stmmac_notifier = { 6107481a7d15SJiping Ma .notifier_call = stmmac_device_event, 6108481a7d15SJiping Ma }; 6109481a7d15SJiping Ma 61108d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev) 61117ac29055SGiuseppe CAVALLARO { 6112466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 61137ac29055SGiuseppe CAVALLARO 6114474a31e1SAaro Koskinen rtnl_lock(); 6115474a31e1SAaro Koskinen 6116466c5ac8SMathieu Olivari /* Create per netdev entries */ 6117466c5ac8SMathieu Olivari priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6118466c5ac8SMathieu Olivari 61197ac29055SGiuseppe CAVALLARO /* Entry to report DMA RX/TX rings */ 61208d72ab11SGreg Kroah-Hartman debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 61217ac29055SGiuseppe CAVALLARO &stmmac_rings_status_fops); 61227ac29055SGiuseppe CAVALLARO 6123e7434821SGiuseppe CAVALLARO /* Entry to report the DMA HW features */ 61248d72ab11SGreg Kroah-Hartman debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 61258d72ab11SGreg Kroah-Hartman &stmmac_dma_cap_fops); 6126481a7d15SJiping Ma 6127474a31e1SAaro Koskinen rtnl_unlock(); 61287ac29055SGiuseppe CAVALLARO } 61297ac29055SGiuseppe CAVALLARO 6130466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev) 61317ac29055SGiuseppe CAVALLARO { 6132466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 6133466c5ac8SMathieu Olivari 6134466c5ac8SMathieu Olivari debugfs_remove_recursive(priv->dbgfs_dir); 61357ac29055SGiuseppe CAVALLARO } 613650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 61377ac29055SGiuseppe CAVALLARO 61383cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le) 61393cd1cfcbSJose Abreu { 61403cd1cfcbSJose Abreu unsigned char *data = (unsigned char *)&vid_le; 61413cd1cfcbSJose Abreu unsigned char data_byte = 0; 61423cd1cfcbSJose Abreu u32 crc = ~0x0; 61433cd1cfcbSJose Abreu u32 temp = 0; 61443cd1cfcbSJose Abreu int i, bits; 61453cd1cfcbSJose Abreu 61463cd1cfcbSJose Abreu bits = get_bitmask_order(VLAN_VID_MASK); 61473cd1cfcbSJose Abreu for (i = 0; i < bits; i++) { 61483cd1cfcbSJose Abreu if ((i % 8) == 0) 61493cd1cfcbSJose Abreu data_byte = data[i / 8]; 61503cd1cfcbSJose Abreu 61513cd1cfcbSJose Abreu temp = ((crc & 1) ^ data_byte) & 1; 61523cd1cfcbSJose Abreu crc >>= 1; 61533cd1cfcbSJose Abreu data_byte >>= 1; 61543cd1cfcbSJose Abreu 61553cd1cfcbSJose Abreu if (temp) 61563cd1cfcbSJose Abreu crc ^= 0xedb88320; 61573cd1cfcbSJose Abreu } 61583cd1cfcbSJose Abreu 61593cd1cfcbSJose Abreu return crc; 61603cd1cfcbSJose Abreu } 61613cd1cfcbSJose Abreu 61623cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 61633cd1cfcbSJose Abreu { 61643cd1cfcbSJose Abreu u32 crc, hash = 0; 6165a24cae70SJose Abreu __le16 pmatch = 0; 6166c7ab0b80SJose Abreu int count = 0; 6167c7ab0b80SJose Abreu u16 vid = 0; 61683cd1cfcbSJose Abreu 61693cd1cfcbSJose Abreu for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 61703cd1cfcbSJose Abreu __le16 vid_le = cpu_to_le16(vid); 61713cd1cfcbSJose Abreu crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 61723cd1cfcbSJose Abreu hash |= (1 << crc); 6173c7ab0b80SJose Abreu count++; 61743cd1cfcbSJose Abreu } 61753cd1cfcbSJose Abreu 6176c7ab0b80SJose Abreu if (!priv->dma_cap.vlhash) { 6177c7ab0b80SJose Abreu if (count > 2) /* VID = 0 always passes filter */ 6178c7ab0b80SJose Abreu return -EOPNOTSUPP; 6179c7ab0b80SJose Abreu 6180a24cae70SJose Abreu pmatch = cpu_to_le16(vid); 6181c7ab0b80SJose Abreu hash = 0; 6182c7ab0b80SJose Abreu } 6183c7ab0b80SJose Abreu 6184a24cae70SJose Abreu return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 61853cd1cfcbSJose Abreu } 61863cd1cfcbSJose Abreu 61873cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 61883cd1cfcbSJose Abreu { 61893cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 61903cd1cfcbSJose Abreu bool is_double = false; 61913cd1cfcbSJose Abreu int ret; 61923cd1cfcbSJose Abreu 61933cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 61943cd1cfcbSJose Abreu is_double = true; 61953cd1cfcbSJose Abreu 61963cd1cfcbSJose Abreu set_bit(vid, priv->active_vlans); 61973cd1cfcbSJose Abreu ret = stmmac_vlan_update(priv, is_double); 61983cd1cfcbSJose Abreu if (ret) { 61993cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 62003cd1cfcbSJose Abreu return ret; 62013cd1cfcbSJose Abreu } 62023cd1cfcbSJose Abreu 6203dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6204ed64639bSWong Vee Khee ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6205dd6a4998SJose Abreu if (ret) 62063cd1cfcbSJose Abreu return ret; 62073cd1cfcbSJose Abreu } 62083cd1cfcbSJose Abreu 6209dd6a4998SJose Abreu return 0; 6210dd6a4998SJose Abreu } 6211dd6a4998SJose Abreu 62123cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 62133cd1cfcbSJose Abreu { 62143cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 62153cd1cfcbSJose Abreu bool is_double = false; 6216ed64639bSWong Vee Khee int ret; 62173cd1cfcbSJose Abreu 6218b3dcb312SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 6219b3dcb312SJoakim Zhang if (ret < 0) { 6220b3dcb312SJoakim Zhang pm_runtime_put_noidle(priv->device); 6221b3dcb312SJoakim Zhang return ret; 6222b3dcb312SJoakim Zhang } 6223b3dcb312SJoakim Zhang 62243cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 62253cd1cfcbSJose Abreu is_double = true; 62263cd1cfcbSJose Abreu 62273cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 6228dd6a4998SJose Abreu 6229dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6230ed64639bSWong Vee Khee ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6231ed64639bSWong Vee Khee if (ret) 62325ec55823SJoakim Zhang goto del_vlan_error; 6233dd6a4998SJose Abreu } 6234ed64639bSWong Vee Khee 62355ec55823SJoakim Zhang ret = stmmac_vlan_update(priv, is_double); 62365ec55823SJoakim Zhang 62375ec55823SJoakim Zhang del_vlan_error: 62385ec55823SJoakim Zhang pm_runtime_put(priv->device); 62395ec55823SJoakim Zhang 62405ec55823SJoakim Zhang return ret; 62413cd1cfcbSJose Abreu } 62423cd1cfcbSJose Abreu 62435fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 62445fabb012SOng Boon Leong { 62455fabb012SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 62465fabb012SOng Boon Leong 62475fabb012SOng Boon Leong switch (bpf->command) { 62485fabb012SOng Boon Leong case XDP_SETUP_PROG: 62495fabb012SOng Boon Leong return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6250bba2556eSOng Boon Leong case XDP_SETUP_XSK_POOL: 6251bba2556eSOng Boon Leong return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6252bba2556eSOng Boon Leong bpf->xsk.queue_id); 62535fabb012SOng Boon Leong default: 62545fabb012SOng Boon Leong return -EOPNOTSUPP; 62555fabb012SOng Boon Leong } 62565fabb012SOng Boon Leong } 62575fabb012SOng Boon Leong 62588b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 62598b278a5bSOng Boon Leong struct xdp_frame **frames, u32 flags) 62608b278a5bSOng Boon Leong { 62618b278a5bSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 62628b278a5bSOng Boon Leong int cpu = smp_processor_id(); 62638b278a5bSOng Boon Leong struct netdev_queue *nq; 62648b278a5bSOng Boon Leong int i, nxmit = 0; 62658b278a5bSOng Boon Leong int queue; 62668b278a5bSOng Boon Leong 62678b278a5bSOng Boon Leong if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 62688b278a5bSOng Boon Leong return -ENETDOWN; 62698b278a5bSOng Boon Leong 62708b278a5bSOng Boon Leong if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 62718b278a5bSOng Boon Leong return -EINVAL; 62728b278a5bSOng Boon Leong 62738b278a5bSOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 62748b278a5bSOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 62758b278a5bSOng Boon Leong 62768b278a5bSOng Boon Leong __netif_tx_lock(nq, cpu); 62778b278a5bSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 62788b278a5bSOng Boon Leong nq->trans_start = jiffies; 62798b278a5bSOng Boon Leong 62808b278a5bSOng Boon Leong for (i = 0; i < num_frames; i++) { 62818b278a5bSOng Boon Leong int res; 62828b278a5bSOng Boon Leong 62838b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 62848b278a5bSOng Boon Leong if (res == STMMAC_XDP_CONSUMED) 62858b278a5bSOng Boon Leong break; 62868b278a5bSOng Boon Leong 62878b278a5bSOng Boon Leong nxmit++; 62888b278a5bSOng Boon Leong } 62898b278a5bSOng Boon Leong 62908b278a5bSOng Boon Leong if (flags & XDP_XMIT_FLUSH) { 62918b278a5bSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 62928b278a5bSOng Boon Leong stmmac_tx_timer_arm(priv, queue); 62938b278a5bSOng Boon Leong } 62948b278a5bSOng Boon Leong 62958b278a5bSOng Boon Leong __netif_tx_unlock(nq); 62968b278a5bSOng Boon Leong 62978b278a5bSOng Boon Leong return nxmit; 62988b278a5bSOng Boon Leong } 62998b278a5bSOng Boon Leong 6300bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6301bba2556eSOng Boon Leong { 6302bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6303bba2556eSOng Boon Leong unsigned long flags; 6304bba2556eSOng Boon Leong 6305bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6306bba2556eSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6307bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6308bba2556eSOng Boon Leong 6309bba2556eSOng Boon Leong stmmac_stop_rx_dma(priv, queue); 6310bba2556eSOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 6311bba2556eSOng Boon Leong } 6312bba2556eSOng Boon Leong 6313bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6314bba2556eSOng Boon Leong { 6315bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 6316bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6317bba2556eSOng Boon Leong unsigned long flags; 6318bba2556eSOng Boon Leong u32 buf_size; 6319bba2556eSOng Boon Leong int ret; 6320bba2556eSOng Boon Leong 6321bba2556eSOng Boon Leong ret = __alloc_dma_rx_desc_resources(priv, queue); 6322bba2556eSOng Boon Leong if (ret) { 6323bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6324bba2556eSOng Boon Leong return; 6325bba2556eSOng Boon Leong } 6326bba2556eSOng Boon Leong 6327bba2556eSOng Boon Leong ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); 6328bba2556eSOng Boon Leong if (ret) { 6329bba2556eSOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 6330bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to init RX desc.\n"); 6331bba2556eSOng Boon Leong return; 6332bba2556eSOng Boon Leong } 6333bba2556eSOng Boon Leong 6334bba2556eSOng Boon Leong stmmac_clear_rx_descriptors(priv, queue); 6335bba2556eSOng Boon Leong 6336bba2556eSOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6337bba2556eSOng Boon Leong rx_q->dma_rx_phy, rx_q->queue_index); 6338bba2556eSOng Boon Leong 6339bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6340bba2556eSOng Boon Leong sizeof(struct dma_desc)); 6341bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6342bba2556eSOng Boon Leong rx_q->rx_tail_addr, rx_q->queue_index); 6343bba2556eSOng Boon Leong 6344bba2556eSOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6345bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6346bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6347bba2556eSOng Boon Leong buf_size, 6348bba2556eSOng Boon Leong rx_q->queue_index); 6349bba2556eSOng Boon Leong } else { 6350bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6351bba2556eSOng Boon Leong priv->dma_buf_sz, 6352bba2556eSOng Boon Leong rx_q->queue_index); 6353bba2556eSOng Boon Leong } 6354bba2556eSOng Boon Leong 6355bba2556eSOng Boon Leong stmmac_start_rx_dma(priv, queue); 6356bba2556eSOng Boon Leong 6357bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6358bba2556eSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6359bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6360bba2556eSOng Boon Leong } 6361bba2556eSOng Boon Leong 6362132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6363132c32eeSOng Boon Leong { 6364132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6365132c32eeSOng Boon Leong unsigned long flags; 6366132c32eeSOng Boon Leong 6367132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6368132c32eeSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6369132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6370132c32eeSOng Boon Leong 6371132c32eeSOng Boon Leong stmmac_stop_tx_dma(priv, queue); 6372132c32eeSOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 6373132c32eeSOng Boon Leong } 6374132c32eeSOng Boon Leong 6375132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6376132c32eeSOng Boon Leong { 6377132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 6378132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6379132c32eeSOng Boon Leong unsigned long flags; 6380132c32eeSOng Boon Leong int ret; 6381132c32eeSOng Boon Leong 6382132c32eeSOng Boon Leong ret = __alloc_dma_tx_desc_resources(priv, queue); 6383132c32eeSOng Boon Leong if (ret) { 6384132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6385132c32eeSOng Boon Leong return; 6386132c32eeSOng Boon Leong } 6387132c32eeSOng Boon Leong 6388132c32eeSOng Boon Leong ret = __init_dma_tx_desc_rings(priv, queue); 6389132c32eeSOng Boon Leong if (ret) { 6390132c32eeSOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 6391132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to init TX desc.\n"); 6392132c32eeSOng Boon Leong return; 6393132c32eeSOng Boon Leong } 6394132c32eeSOng Boon Leong 6395132c32eeSOng Boon Leong stmmac_clear_tx_descriptors(priv, queue); 6396132c32eeSOng Boon Leong 6397132c32eeSOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6398132c32eeSOng Boon Leong tx_q->dma_tx_phy, tx_q->queue_index); 6399132c32eeSOng Boon Leong 6400132c32eeSOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 6401132c32eeSOng Boon Leong stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6402132c32eeSOng Boon Leong 6403132c32eeSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6404132c32eeSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6405132c32eeSOng Boon Leong tx_q->tx_tail_addr, tx_q->queue_index); 6406132c32eeSOng Boon Leong 6407132c32eeSOng Boon Leong stmmac_start_tx_dma(priv, queue); 6408132c32eeSOng Boon Leong 6409132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6410132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6411132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6412132c32eeSOng Boon Leong } 6413132c32eeSOng Boon Leong 6414bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6415bba2556eSOng Boon Leong { 6416bba2556eSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6417bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 6418132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q; 6419bba2556eSOng Boon Leong struct stmmac_channel *ch; 6420bba2556eSOng Boon Leong 6421bba2556eSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state) || 6422bba2556eSOng Boon Leong !netif_carrier_ok(priv->dev)) 6423bba2556eSOng Boon Leong return -ENETDOWN; 6424bba2556eSOng Boon Leong 6425bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv)) 6426bba2556eSOng Boon Leong return -ENXIO; 6427bba2556eSOng Boon Leong 6428132c32eeSOng Boon Leong if (queue >= priv->plat->rx_queues_to_use || 6429132c32eeSOng Boon Leong queue >= priv->plat->tx_queues_to_use) 6430bba2556eSOng Boon Leong return -EINVAL; 6431bba2556eSOng Boon Leong 6432bba2556eSOng Boon Leong rx_q = &priv->rx_queue[queue]; 6433132c32eeSOng Boon Leong tx_q = &priv->tx_queue[queue]; 6434bba2556eSOng Boon Leong ch = &priv->channel[queue]; 6435bba2556eSOng Boon Leong 6436132c32eeSOng Boon Leong if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6437bba2556eSOng Boon Leong return -ENXIO; 6438bba2556eSOng Boon Leong 6439132c32eeSOng Boon Leong if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6440bba2556eSOng Boon Leong /* EQoS does not have per-DMA channel SW interrupt, 6441bba2556eSOng Boon Leong * so we schedule RX Napi straight-away. 6442bba2556eSOng Boon Leong */ 6443132c32eeSOng Boon Leong if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6444132c32eeSOng Boon Leong __napi_schedule(&ch->rxtx_napi); 6445bba2556eSOng Boon Leong } 6446bba2556eSOng Boon Leong 6447bba2556eSOng Boon Leong return 0; 6448bba2556eSOng Boon Leong } 6449bba2556eSOng Boon Leong 64507ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = { 64517ac6653aSJeff Kirsher .ndo_open = stmmac_open, 64527ac6653aSJeff Kirsher .ndo_start_xmit = stmmac_xmit, 64537ac6653aSJeff Kirsher .ndo_stop = stmmac_release, 64547ac6653aSJeff Kirsher .ndo_change_mtu = stmmac_change_mtu, 64557ac6653aSJeff Kirsher .ndo_fix_features = stmmac_fix_features, 6456d2afb5bdSGiuseppe CAVALLARO .ndo_set_features = stmmac_set_features, 645701789349SJiri Pirko .ndo_set_rx_mode = stmmac_set_rx_mode, 64587ac6653aSJeff Kirsher .ndo_tx_timeout = stmmac_tx_timeout, 6459a7605370SArnd Bergmann .ndo_eth_ioctl = stmmac_ioctl, 64604dbbe8ddSJose Abreu .ndo_setup_tc = stmmac_setup_tc, 64614993e5b3SJose Abreu .ndo_select_queue = stmmac_select_queue, 64627ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 64637ac6653aSJeff Kirsher .ndo_poll_controller = stmmac_poll_controller, 64647ac6653aSJeff Kirsher #endif 6465a830405eSBhadram Varka .ndo_set_mac_address = stmmac_set_mac_address, 64663cd1cfcbSJose Abreu .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 64673cd1cfcbSJose Abreu .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 64685fabb012SOng Boon Leong .ndo_bpf = stmmac_bpf, 64698b278a5bSOng Boon Leong .ndo_xdp_xmit = stmmac_xdp_xmit, 6470bba2556eSOng Boon Leong .ndo_xsk_wakeup = stmmac_xsk_wakeup, 64717ac6653aSJeff Kirsher }; 64727ac6653aSJeff Kirsher 647334877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv) 647434877a15SJose Abreu { 647534877a15SJose Abreu if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 647634877a15SJose Abreu return; 647734877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 647834877a15SJose Abreu return; 647934877a15SJose Abreu 648034877a15SJose Abreu netdev_err(priv->dev, "Reset adapter.\n"); 648134877a15SJose Abreu 648234877a15SJose Abreu rtnl_lock(); 648334877a15SJose Abreu netif_trans_update(priv->dev); 648434877a15SJose Abreu while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 648534877a15SJose Abreu usleep_range(1000, 2000); 648634877a15SJose Abreu 648734877a15SJose Abreu set_bit(STMMAC_DOWN, &priv->state); 648834877a15SJose Abreu dev_close(priv->dev); 648900f54e68SPetr Machata dev_open(priv->dev, NULL); 649034877a15SJose Abreu clear_bit(STMMAC_DOWN, &priv->state); 649134877a15SJose Abreu clear_bit(STMMAC_RESETING, &priv->state); 649234877a15SJose Abreu rtnl_unlock(); 649334877a15SJose Abreu } 649434877a15SJose Abreu 649534877a15SJose Abreu static void stmmac_service_task(struct work_struct *work) 649634877a15SJose Abreu { 649734877a15SJose Abreu struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 649834877a15SJose Abreu service_task); 649934877a15SJose Abreu 650034877a15SJose Abreu stmmac_reset_subtask(priv); 650134877a15SJose Abreu clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 650234877a15SJose Abreu } 650334877a15SJose Abreu 65047ac6653aSJeff Kirsher /** 6505cf3f047bSGiuseppe CAVALLARO * stmmac_hw_init - Init the MAC device 650632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 6507732fdf0eSGiuseppe CAVALLARO * Description: this function is to configure the MAC device according to 6508732fdf0eSGiuseppe CAVALLARO * some platform parameters or the HW capability register. It prepares the 6509732fdf0eSGiuseppe CAVALLARO * driver to use either ring or chain modes and to setup either enhanced or 6510732fdf0eSGiuseppe CAVALLARO * normal descriptors. 6511cf3f047bSGiuseppe CAVALLARO */ 6512cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv) 6513cf3f047bSGiuseppe CAVALLARO { 65145f0456b4SJose Abreu int ret; 6515cf3f047bSGiuseppe CAVALLARO 65169f93ac8dSLABBE Corentin /* dwmac-sun8i only work in chain mode */ 65179f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) 65189f93ac8dSLABBE Corentin chain_mode = 1; 65195f0456b4SJose Abreu priv->chain_mode = chain_mode; 65209f93ac8dSLABBE Corentin 65215f0456b4SJose Abreu /* Initialize HW Interface */ 65225f0456b4SJose Abreu ret = stmmac_hwif_init(priv); 65235f0456b4SJose Abreu if (ret) 65245f0456b4SJose Abreu return ret; 65254a7d666aSGiuseppe CAVALLARO 6526cf3f047bSGiuseppe CAVALLARO /* Get the HW capability (new GMAC newer than 3.50a) */ 6527cf3f047bSGiuseppe CAVALLARO priv->hw_cap_support = stmmac_get_hw_features(priv); 6528cf3f047bSGiuseppe CAVALLARO if (priv->hw_cap_support) { 652938ddc59dSLABBE Corentin dev_info(priv->device, "DMA HW capability register supported\n"); 6530cf3f047bSGiuseppe CAVALLARO 6531cf3f047bSGiuseppe CAVALLARO /* We can override some gmac/dma configuration fields: e.g. 6532cf3f047bSGiuseppe CAVALLARO * enh_desc, tx_coe (e.g. that are passed through the 6533cf3f047bSGiuseppe CAVALLARO * platform) with the values from the HW capability 6534cf3f047bSGiuseppe CAVALLARO * register (if supported). 6535cf3f047bSGiuseppe CAVALLARO */ 6536cf3f047bSGiuseppe CAVALLARO priv->plat->enh_desc = priv->dma_cap.enh_desc; 65375a9b876eSLing Pei Lee priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 65385a9b876eSLing Pei Lee !priv->plat->use_phy_wol; 65393fe5cadbSGiuseppe CAVALLARO priv->hw->pmt = priv->plat->pmt; 6540b8ef7020SBiao Huang if (priv->dma_cap.hash_tb_sz) { 6541b8ef7020SBiao Huang priv->hw->multicast_filter_bins = 6542b8ef7020SBiao Huang (BIT(priv->dma_cap.hash_tb_sz) << 5); 6543b8ef7020SBiao Huang priv->hw->mcast_bits_log2 = 6544b8ef7020SBiao Huang ilog2(priv->hw->multicast_filter_bins); 6545b8ef7020SBiao Huang } 654638912bdbSDeepak SIKRI 6547a8df35d4SEzequiel Garcia /* TXCOE doesn't work in thresh DMA mode */ 6548a8df35d4SEzequiel Garcia if (priv->plat->force_thresh_dma_mode) 6549a8df35d4SEzequiel Garcia priv->plat->tx_coe = 0; 6550a8df35d4SEzequiel Garcia else 655138912bdbSDeepak SIKRI priv->plat->tx_coe = priv->dma_cap.tx_coe; 6552a8df35d4SEzequiel Garcia 6553f748be53SAlexandre TORGUE /* In case of GMAC4 rx_coe is from HW cap register. */ 6554f748be53SAlexandre TORGUE priv->plat->rx_coe = priv->dma_cap.rx_coe; 655538912bdbSDeepak SIKRI 655638912bdbSDeepak SIKRI if (priv->dma_cap.rx_coe_type2) 655738912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 655838912bdbSDeepak SIKRI else if (priv->dma_cap.rx_coe_type1) 655938912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 656038912bdbSDeepak SIKRI 656138ddc59dSLABBE Corentin } else { 656238ddc59dSLABBE Corentin dev_info(priv->device, "No HW DMA feature register supported\n"); 656338ddc59dSLABBE Corentin } 6564cf3f047bSGiuseppe CAVALLARO 6565d2afb5bdSGiuseppe CAVALLARO if (priv->plat->rx_coe) { 6566d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 656738ddc59dSLABBE Corentin dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 6568f748be53SAlexandre TORGUE if (priv->synopsys_id < DWMAC_CORE_4_00) 656938ddc59dSLABBE Corentin dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 6570d2afb5bdSGiuseppe CAVALLARO } 6571cf3f047bSGiuseppe CAVALLARO if (priv->plat->tx_coe) 657238ddc59dSLABBE Corentin dev_info(priv->device, "TX Checksum insertion supported\n"); 6573cf3f047bSGiuseppe CAVALLARO 6574cf3f047bSGiuseppe CAVALLARO if (priv->plat->pmt) { 657538ddc59dSLABBE Corentin dev_info(priv->device, "Wake-Up On Lan supported\n"); 6576cf3f047bSGiuseppe CAVALLARO device_set_wakeup_capable(priv->device, 1); 6577cf3f047bSGiuseppe CAVALLARO } 6578cf3f047bSGiuseppe CAVALLARO 6579f748be53SAlexandre TORGUE if (priv->dma_cap.tsoen) 658038ddc59dSLABBE Corentin dev_info(priv->device, "TSO supported\n"); 6581f748be53SAlexandre TORGUE 6582e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 6583e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 6584e0f9956aSChuah, Kim Tatt 65857cfde0afSJose Abreu /* Run HW quirks, if any */ 65867cfde0afSJose Abreu if (priv->hwif_quirks) { 65877cfde0afSJose Abreu ret = priv->hwif_quirks(priv); 65887cfde0afSJose Abreu if (ret) 65897cfde0afSJose Abreu return ret; 65907cfde0afSJose Abreu } 65917cfde0afSJose Abreu 65923b509466SJose Abreu /* Rx Watchdog is available in the COREs newer than the 3.40. 65933b509466SJose Abreu * In some case, for example on bugged HW this feature 65943b509466SJose Abreu * has to be disable and this can be done by passing the 65953b509466SJose Abreu * riwt_off field from the platform. 65963b509466SJose Abreu */ 65973b509466SJose Abreu if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 65983b509466SJose Abreu (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 65993b509466SJose Abreu priv->use_riwt = 1; 66003b509466SJose Abreu dev_info(priv->device, 66013b509466SJose Abreu "Enable RX Mitigation via HW Watchdog Timer\n"); 66023b509466SJose Abreu } 66033b509466SJose Abreu 6604c24602efSGiuseppe CAVALLARO return 0; 6605cf3f047bSGiuseppe CAVALLARO } 6606cf3f047bSGiuseppe CAVALLARO 66070366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev) 66080366f7e0SOng Boon Leong { 66090366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66100366f7e0SOng Boon Leong u32 queue, maxq; 66110366f7e0SOng Boon Leong 66120366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 66130366f7e0SOng Boon Leong 66140366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 66150366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 66160366f7e0SOng Boon Leong 66170366f7e0SOng Boon Leong ch->priv_data = priv; 66180366f7e0SOng Boon Leong ch->index = queue; 66192b94f526SMarek Szyprowski spin_lock_init(&ch->lock); 66200366f7e0SOng Boon Leong 66210366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) { 66220366f7e0SOng Boon Leong netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 66230366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 66240366f7e0SOng Boon Leong } 66250366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) { 66260366f7e0SOng Boon Leong netif_tx_napi_add(dev, &ch->tx_napi, 66270366f7e0SOng Boon Leong stmmac_napi_poll_tx, 66280366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 66290366f7e0SOng Boon Leong } 6630132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6631132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6632132c32eeSOng Boon Leong netif_napi_add(dev, &ch->rxtx_napi, 6633132c32eeSOng Boon Leong stmmac_napi_poll_rxtx, 6634132c32eeSOng Boon Leong NAPI_POLL_WEIGHT); 6635132c32eeSOng Boon Leong } 66360366f7e0SOng Boon Leong } 66370366f7e0SOng Boon Leong } 66380366f7e0SOng Boon Leong 66390366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev) 66400366f7e0SOng Boon Leong { 66410366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66420366f7e0SOng Boon Leong u32 queue, maxq; 66430366f7e0SOng Boon Leong 66440366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 66450366f7e0SOng Boon Leong 66460366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 66470366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 66480366f7e0SOng Boon Leong 66490366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) 66500366f7e0SOng Boon Leong netif_napi_del(&ch->rx_napi); 66510366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) 66520366f7e0SOng Boon Leong netif_napi_del(&ch->tx_napi); 6653132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6654132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6655132c32eeSOng Boon Leong netif_napi_del(&ch->rxtx_napi); 6656132c32eeSOng Boon Leong } 66570366f7e0SOng Boon Leong } 66580366f7e0SOng Boon Leong } 66590366f7e0SOng Boon Leong 66600366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 66610366f7e0SOng Boon Leong { 66620366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66630366f7e0SOng Boon Leong int ret = 0; 66640366f7e0SOng Boon Leong 66650366f7e0SOng Boon Leong if (netif_running(dev)) 66660366f7e0SOng Boon Leong stmmac_release(dev); 66670366f7e0SOng Boon Leong 66680366f7e0SOng Boon Leong stmmac_napi_del(dev); 66690366f7e0SOng Boon Leong 66700366f7e0SOng Boon Leong priv->plat->rx_queues_to_use = rx_cnt; 66710366f7e0SOng Boon Leong priv->plat->tx_queues_to_use = tx_cnt; 66720366f7e0SOng Boon Leong 66730366f7e0SOng Boon Leong stmmac_napi_add(dev); 66740366f7e0SOng Boon Leong 66750366f7e0SOng Boon Leong if (netif_running(dev)) 66760366f7e0SOng Boon Leong ret = stmmac_open(dev); 66770366f7e0SOng Boon Leong 66780366f7e0SOng Boon Leong return ret; 66790366f7e0SOng Boon Leong } 66800366f7e0SOng Boon Leong 6681aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 6682aa042f60SSong, Yoong Siang { 6683aa042f60SSong, Yoong Siang struct stmmac_priv *priv = netdev_priv(dev); 6684aa042f60SSong, Yoong Siang int ret = 0; 6685aa042f60SSong, Yoong Siang 6686aa042f60SSong, Yoong Siang if (netif_running(dev)) 6687aa042f60SSong, Yoong Siang stmmac_release(dev); 6688aa042f60SSong, Yoong Siang 6689aa042f60SSong, Yoong Siang priv->dma_rx_size = rx_size; 6690aa042f60SSong, Yoong Siang priv->dma_tx_size = tx_size; 6691aa042f60SSong, Yoong Siang 6692aa042f60SSong, Yoong Siang if (netif_running(dev)) 6693aa042f60SSong, Yoong Siang ret = stmmac_open(dev); 6694aa042f60SSong, Yoong Siang 6695aa042f60SSong, Yoong Siang return ret; 6696aa042f60SSong, Yoong Siang } 6697aa042f60SSong, Yoong Siang 66985a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 66995a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work) 67005a558611SOng Boon Leong { 67015a558611SOng Boon Leong struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 67025a558611SOng Boon Leong fpe_task); 67035a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 67045a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 67055a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 67065a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 67075a558611SOng Boon Leong bool *enable = &fpe_cfg->enable; 67085a558611SOng Boon Leong int retries = 20; 67095a558611SOng Boon Leong 67105a558611SOng Boon Leong while (retries-- > 0) { 67115a558611SOng Boon Leong /* Bail out immediately if FPE handshake is OFF */ 67125a558611SOng Boon Leong if (*lo_state == FPE_STATE_OFF || !*hs_enable) 67135a558611SOng Boon Leong break; 67145a558611SOng Boon Leong 67155a558611SOng Boon Leong if (*lo_state == FPE_STATE_ENTERING_ON && 67165a558611SOng Boon Leong *lp_state == FPE_STATE_ENTERING_ON) { 67175a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 67185a558611SOng Boon Leong priv->plat->tx_queues_to_use, 67195a558611SOng Boon Leong priv->plat->rx_queues_to_use, 67205a558611SOng Boon Leong *enable); 67215a558611SOng Boon Leong 67225a558611SOng Boon Leong netdev_info(priv->dev, "configured FPE\n"); 67235a558611SOng Boon Leong 67245a558611SOng Boon Leong *lo_state = FPE_STATE_ON; 67255a558611SOng Boon Leong *lp_state = FPE_STATE_ON; 67265a558611SOng Boon Leong netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 67275a558611SOng Boon Leong break; 67285a558611SOng Boon Leong } 67295a558611SOng Boon Leong 67305a558611SOng Boon Leong if ((*lo_state == FPE_STATE_CAPABLE || 67315a558611SOng Boon Leong *lo_state == FPE_STATE_ENTERING_ON) && 67325a558611SOng Boon Leong *lp_state != FPE_STATE_ON) { 67335a558611SOng Boon Leong netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 67345a558611SOng Boon Leong *lo_state, *lp_state); 67355a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 67365a558611SOng Boon Leong MPACKET_VERIFY); 67375a558611SOng Boon Leong } 67385a558611SOng Boon Leong /* Sleep then retry */ 67395a558611SOng Boon Leong msleep(500); 67405a558611SOng Boon Leong } 67415a558611SOng Boon Leong 67425a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 67435a558611SOng Boon Leong } 67445a558611SOng Boon Leong 67455a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 67465a558611SOng Boon Leong { 67475a558611SOng Boon Leong if (priv->plat->fpe_cfg->hs_enable != enable) { 67485a558611SOng Boon Leong if (enable) { 67495a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 67505a558611SOng Boon Leong MPACKET_VERIFY); 67515a558611SOng Boon Leong } else { 67525a558611SOng Boon Leong priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 67535a558611SOng Boon Leong priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 67545a558611SOng Boon Leong } 67555a558611SOng Boon Leong 67565a558611SOng Boon Leong priv->plat->fpe_cfg->hs_enable = enable; 67575a558611SOng Boon Leong } 67585a558611SOng Boon Leong } 67595a558611SOng Boon Leong 6760cf3f047bSGiuseppe CAVALLARO /** 6761bfab27a1SGiuseppe CAVALLARO * stmmac_dvr_probe 6762bfab27a1SGiuseppe CAVALLARO * @device: device pointer 6763ff3dd78cSGiuseppe CAVALLARO * @plat_dat: platform data pointer 6764e56788cfSJoachim Eastwood * @res: stmmac resource pointer 6765bfab27a1SGiuseppe CAVALLARO * Description: this is the main probe function used to 6766bfab27a1SGiuseppe CAVALLARO * call the alloc_etherdev, allocate the priv structure. 67679afec6efSAndy Shevchenko * Return: 676815ffac73SJoachim Eastwood * returns 0 on success, otherwise errno. 67697ac6653aSJeff Kirsher */ 677015ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device, 6771cf3f047bSGiuseppe CAVALLARO struct plat_stmmacenet_data *plat_dat, 6772e56788cfSJoachim Eastwood struct stmmac_resources *res) 67737ac6653aSJeff Kirsher { 6774bfab27a1SGiuseppe CAVALLARO struct net_device *ndev = NULL; 6775bfab27a1SGiuseppe CAVALLARO struct stmmac_priv *priv; 67760366f7e0SOng Boon Leong u32 rxq; 677776067459SJose Abreu int i, ret = 0; 67787ac6653aSJeff Kirsher 67799737070cSJisheng Zhang ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 67809737070cSJisheng Zhang MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 678141de8d4cSJoe Perches if (!ndev) 678215ffac73SJoachim Eastwood return -ENOMEM; 67837ac6653aSJeff Kirsher 6784bfab27a1SGiuseppe CAVALLARO SET_NETDEV_DEV(ndev, device); 67857ac6653aSJeff Kirsher 6786bfab27a1SGiuseppe CAVALLARO priv = netdev_priv(ndev); 6787bfab27a1SGiuseppe CAVALLARO priv->device = device; 6788bfab27a1SGiuseppe CAVALLARO priv->dev = ndev; 6789bfab27a1SGiuseppe CAVALLARO 6790bfab27a1SGiuseppe CAVALLARO stmmac_set_ethtool_ops(ndev); 6791cf3f047bSGiuseppe CAVALLARO priv->pause = pause; 6792cf3f047bSGiuseppe CAVALLARO priv->plat = plat_dat; 6793e56788cfSJoachim Eastwood priv->ioaddr = res->addr; 6794e56788cfSJoachim Eastwood priv->dev->base_addr = (unsigned long)res->addr; 67956ccf12aeSWong, Vee Khee priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 6796e56788cfSJoachim Eastwood 6797e56788cfSJoachim Eastwood priv->dev->irq = res->irq; 6798e56788cfSJoachim Eastwood priv->wol_irq = res->wol_irq; 6799e56788cfSJoachim Eastwood priv->lpi_irq = res->lpi_irq; 68008532f613SOng Boon Leong priv->sfty_ce_irq = res->sfty_ce_irq; 68018532f613SOng Boon Leong priv->sfty_ue_irq = res->sfty_ue_irq; 68028532f613SOng Boon Leong for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 68038532f613SOng Boon Leong priv->rx_irq[i] = res->rx_irq[i]; 68048532f613SOng Boon Leong for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 68058532f613SOng Boon Leong priv->tx_irq[i] = res->tx_irq[i]; 6806e56788cfSJoachim Eastwood 680783216e39SMichael Walle if (!is_zero_ether_addr(res->mac)) 6808e56788cfSJoachim Eastwood memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 6809bfab27a1SGiuseppe CAVALLARO 6810a7a62685SJoachim Eastwood dev_set_drvdata(device, priv->dev); 6811803f8fc4SJoachim Eastwood 6812cf3f047bSGiuseppe CAVALLARO /* Verify driver arguments */ 6813cf3f047bSGiuseppe CAVALLARO stmmac_verify_args(); 6814cf3f047bSGiuseppe CAVALLARO 6815bba2556eSOng Boon Leong priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 6816bba2556eSOng Boon Leong if (!priv->af_xdp_zc_qps) 6817bba2556eSOng Boon Leong return -ENOMEM; 6818bba2556eSOng Boon Leong 681934877a15SJose Abreu /* Allocate workqueue */ 682034877a15SJose Abreu priv->wq = create_singlethread_workqueue("stmmac_wq"); 682134877a15SJose Abreu if (!priv->wq) { 682234877a15SJose Abreu dev_err(priv->device, "failed to create workqueue\n"); 68239737070cSJisheng Zhang return -ENOMEM; 682434877a15SJose Abreu } 682534877a15SJose Abreu 682634877a15SJose Abreu INIT_WORK(&priv->service_task, stmmac_service_task); 682734877a15SJose Abreu 68285a558611SOng Boon Leong /* Initialize Link Partner FPE workqueue */ 68295a558611SOng Boon Leong INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 68305a558611SOng Boon Leong 6831cf3f047bSGiuseppe CAVALLARO /* Override with kernel parameters if supplied XXX CRS XXX 6832ceb69499SGiuseppe CAVALLARO * this needs to have multiple instances 6833ceb69499SGiuseppe CAVALLARO */ 6834cf3f047bSGiuseppe CAVALLARO if ((phyaddr >= 0) && (phyaddr <= 31)) 6835cf3f047bSGiuseppe CAVALLARO priv->plat->phy_addr = phyaddr; 6836cf3f047bSGiuseppe CAVALLARO 683790f522a2SEugeniy Paltsev if (priv->plat->stmmac_rst) { 683890f522a2SEugeniy Paltsev ret = reset_control_assert(priv->plat->stmmac_rst); 6839f573c0b9Sjpinto reset_control_deassert(priv->plat->stmmac_rst); 684090f522a2SEugeniy Paltsev /* Some reset controllers have only reset callback instead of 684190f522a2SEugeniy Paltsev * assert + deassert callbacks pair. 684290f522a2SEugeniy Paltsev */ 684390f522a2SEugeniy Paltsev if (ret == -ENOTSUPP) 684490f522a2SEugeniy Paltsev reset_control_reset(priv->plat->stmmac_rst); 684590f522a2SEugeniy Paltsev } 6846c5e4ddbdSChen-Yu Tsai 6847e67f325eSMatthew Hagan ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 6848e67f325eSMatthew Hagan if (ret == -ENOTSUPP) 6849e67f325eSMatthew Hagan dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 6850e67f325eSMatthew Hagan ERR_PTR(ret)); 6851e67f325eSMatthew Hagan 6852cf3f047bSGiuseppe CAVALLARO /* Init MAC and get the capabilities */ 6853c24602efSGiuseppe CAVALLARO ret = stmmac_hw_init(priv); 6854c24602efSGiuseppe CAVALLARO if (ret) 685562866e98SChen-Yu Tsai goto error_hw_init; 6856cf3f047bSGiuseppe CAVALLARO 685796874c61SMohammad Athari Bin Ismail /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 685896874c61SMohammad Athari Bin Ismail */ 685996874c61SMohammad Athari Bin Ismail if (priv->synopsys_id < DWMAC_CORE_5_20) 686096874c61SMohammad Athari Bin Ismail priv->plat->dma_cfg->dche = false; 686196874c61SMohammad Athari Bin Ismail 6862b561af36SVinod Koul stmmac_check_ether_addr(priv); 6863b561af36SVinod Koul 6864cf3f047bSGiuseppe CAVALLARO ndev->netdev_ops = &stmmac_netdev_ops; 6865cf3f047bSGiuseppe CAVALLARO 6866cf3f047bSGiuseppe CAVALLARO ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6867cf3f047bSGiuseppe CAVALLARO NETIF_F_RXCSUM; 6868f748be53SAlexandre TORGUE 68694dbbe8ddSJose Abreu ret = stmmac_tc_init(priv, priv); 68704dbbe8ddSJose Abreu if (!ret) { 68714dbbe8ddSJose Abreu ndev->hw_features |= NETIF_F_HW_TC; 68724dbbe8ddSJose Abreu } 68734dbbe8ddSJose Abreu 6874f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 68759edfa7daSNiklas Cassel ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 6876b7766206SJose Abreu if (priv->plat->has_gmac4) 6877b7766206SJose Abreu ndev->hw_features |= NETIF_F_GSO_UDP_L4; 6878f748be53SAlexandre TORGUE priv->tso = true; 687938ddc59dSLABBE Corentin dev_info(priv->device, "TSO feature enabled\n"); 6880f748be53SAlexandre TORGUE } 6881a993db88SJose Abreu 688267afd6d1SJose Abreu if (priv->dma_cap.sphen) { 688367afd6d1SJose Abreu ndev->hw_features |= NETIF_F_GRO; 6884d08d32d1SOng Boon Leong priv->sph_cap = true; 6885d08d32d1SOng Boon Leong priv->sph = priv->sph_cap; 688667afd6d1SJose Abreu dev_info(priv->device, "SPH feature enabled\n"); 688767afd6d1SJose Abreu } 688867afd6d1SJose Abreu 6889f119cc98SFugang Duan /* The current IP register MAC_HW_Feature1[ADDR64] only define 6890f119cc98SFugang Duan * 32/40/64 bit width, but some SOC support others like i.MX8MP 6891f119cc98SFugang Duan * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 6892f119cc98SFugang Duan * So overwrite dma_cap.addr64 according to HW real design. 6893f119cc98SFugang Duan */ 6894f119cc98SFugang Duan if (priv->plat->addr64) 6895f119cc98SFugang Duan priv->dma_cap.addr64 = priv->plat->addr64; 6896f119cc98SFugang Duan 6897a993db88SJose Abreu if (priv->dma_cap.addr64) { 6898a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, 6899a993db88SJose Abreu DMA_BIT_MASK(priv->dma_cap.addr64)); 6900a993db88SJose Abreu if (!ret) { 6901a993db88SJose Abreu dev_info(priv->device, "Using %d bits DMA width\n", 6902a993db88SJose Abreu priv->dma_cap.addr64); 6903968a2978SThierry Reding 6904968a2978SThierry Reding /* 6905968a2978SThierry Reding * If more than 32 bits can be addressed, make sure to 6906968a2978SThierry Reding * enable enhanced addressing mode. 6907968a2978SThierry Reding */ 6908968a2978SThierry Reding if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 6909968a2978SThierry Reding priv->plat->dma_cfg->eame = true; 6910a993db88SJose Abreu } else { 6911a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 6912a993db88SJose Abreu if (ret) { 6913a993db88SJose Abreu dev_err(priv->device, "Failed to set DMA Mask\n"); 6914a993db88SJose Abreu goto error_hw_init; 6915a993db88SJose Abreu } 6916a993db88SJose Abreu 6917a993db88SJose Abreu priv->dma_cap.addr64 = 32; 6918a993db88SJose Abreu } 6919a993db88SJose Abreu } 6920a993db88SJose Abreu 6921bfab27a1SGiuseppe CAVALLARO ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 6922bfab27a1SGiuseppe CAVALLARO ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 69237ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED 69247ac6653aSJeff Kirsher /* Both mac100 and gmac support receive VLAN tag detection */ 6925ab188e8fSElad Nachman ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 69263cd1cfcbSJose Abreu if (priv->dma_cap.vlhash) { 69273cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 69283cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 69293cd1cfcbSJose Abreu } 693030d93227SJose Abreu if (priv->dma_cap.vlins) { 693130d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 693230d93227SJose Abreu if (priv->dma_cap.dvlan) 693330d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 693430d93227SJose Abreu } 69357ac6653aSJeff Kirsher #endif 69367ac6653aSJeff Kirsher priv->msg_enable = netif_msg_init(debug, default_msg_level); 69377ac6653aSJeff Kirsher 693876067459SJose Abreu /* Initialize RSS */ 693976067459SJose Abreu rxq = priv->plat->rx_queues_to_use; 694076067459SJose Abreu netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 694176067459SJose Abreu for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 694276067459SJose Abreu priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 694376067459SJose Abreu 694476067459SJose Abreu if (priv->dma_cap.rssen && priv->plat->rss_en) 694576067459SJose Abreu ndev->features |= NETIF_F_RXHASH; 694676067459SJose Abreu 694744770e11SJarod Wilson /* MTU range: 46 - hw-specific max */ 694844770e11SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 694956bcd591SJose Abreu if (priv->plat->has_xgmac) 69507d9e6c5aSJose Abreu ndev->max_mtu = XGMAC_JUMBO_LEN; 695156bcd591SJose Abreu else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 695256bcd591SJose Abreu ndev->max_mtu = JUMBO_LEN; 695344770e11SJarod Wilson else 695444770e11SJarod Wilson ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 6955a2cd64f3SKweh, Hock Leong /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 6956a2cd64f3SKweh, Hock Leong * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 6957a2cd64f3SKweh, Hock Leong */ 6958a2cd64f3SKweh, Hock Leong if ((priv->plat->maxmtu < ndev->max_mtu) && 6959a2cd64f3SKweh, Hock Leong (priv->plat->maxmtu >= ndev->min_mtu)) 696044770e11SJarod Wilson ndev->max_mtu = priv->plat->maxmtu; 6961a2cd64f3SKweh, Hock Leong else if (priv->plat->maxmtu < ndev->min_mtu) 6962b618ab45SHeiner Kallweit dev_warn(priv->device, 6963a2cd64f3SKweh, Hock Leong "%s: warning: maxmtu having invalid value (%d)\n", 6964a2cd64f3SKweh, Hock Leong __func__, priv->plat->maxmtu); 696544770e11SJarod Wilson 69667ac6653aSJeff Kirsher if (flow_ctrl) 69677ac6653aSJeff Kirsher priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 69687ac6653aSJeff Kirsher 69698fce3331SJose Abreu /* Setup channels NAPI */ 69700366f7e0SOng Boon Leong stmmac_napi_add(ndev); 69717ac6653aSJeff Kirsher 697229555fa3SThierry Reding mutex_init(&priv->lock); 69737ac6653aSJeff Kirsher 6974cd7201f4SGiuseppe CAVALLARO /* If a specific clk_csr value is passed from the platform 6975cd7201f4SGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 6976cd7201f4SGiuseppe CAVALLARO * changed at run-time and it is fixed. Viceversa the driver'll try to 6977cd7201f4SGiuseppe CAVALLARO * set the MDC clock dynamically according to the csr actual 6978cd7201f4SGiuseppe CAVALLARO * clock input. 6979cd7201f4SGiuseppe CAVALLARO */ 69805e7f7fc5SBiao Huang if (priv->plat->clk_csr >= 0) 6981cd7201f4SGiuseppe CAVALLARO priv->clk_csr = priv->plat->clk_csr; 69825e7f7fc5SBiao Huang else 69835e7f7fc5SBiao Huang stmmac_clk_csr_set(priv); 6984cd7201f4SGiuseppe CAVALLARO 6985e58bb43fSGiuseppe CAVALLARO stmmac_check_pcs_mode(priv); 6986e58bb43fSGiuseppe CAVALLARO 69875ec55823SJoakim Zhang pm_runtime_get_noresume(device); 69885ec55823SJoakim Zhang pm_runtime_set_active(device); 69895ec55823SJoakim Zhang pm_runtime_enable(device); 69905ec55823SJoakim Zhang 6991a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 69923fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) { 69934bfcbd7aSFrancesco Virlinzi /* MDIO bus Registration */ 69944bfcbd7aSFrancesco Virlinzi ret = stmmac_mdio_register(ndev); 69954bfcbd7aSFrancesco Virlinzi if (ret < 0) { 6996b618ab45SHeiner Kallweit dev_err(priv->device, 699738ddc59dSLABBE Corentin "%s: MDIO bus (id: %d) registration failed", 69984bfcbd7aSFrancesco Virlinzi __func__, priv->plat->bus_id); 69996a81c26fSViresh Kumar goto error_mdio_register; 70004bfcbd7aSFrancesco Virlinzi } 7001e58bb43fSGiuseppe CAVALLARO } 70024bfcbd7aSFrancesco Virlinzi 700346682cb8SVoon Weifeng if (priv->plat->speed_mode_2500) 700446682cb8SVoon Weifeng priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 700546682cb8SVoon Weifeng 70067413f9a6SVladimir Oltean if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7007597a68ceSVoon Weifeng ret = stmmac_xpcs_setup(priv->mii); 7008597a68ceSVoon Weifeng if (ret) 7009597a68ceSVoon Weifeng goto error_xpcs_setup; 7010597a68ceSVoon Weifeng } 7011597a68ceSVoon Weifeng 701274371272SJose Abreu ret = stmmac_phy_setup(priv); 701374371272SJose Abreu if (ret) { 701474371272SJose Abreu netdev_err(ndev, "failed to setup phy (%d)\n", ret); 701574371272SJose Abreu goto error_phy_setup; 701674371272SJose Abreu } 701774371272SJose Abreu 701857016590SFlorian Fainelli ret = register_netdev(ndev); 7019b2eb09afSFlorian Fainelli if (ret) { 7020b618ab45SHeiner Kallweit dev_err(priv->device, "%s: ERROR %i registering the device\n", 702157016590SFlorian Fainelli __func__, ret); 7022b2eb09afSFlorian Fainelli goto error_netdev_register; 7023b2eb09afSFlorian Fainelli } 70247ac6653aSJeff Kirsher 7025b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7026b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7027b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7028b9663b7cSVoon Weifeng 7029b9663b7cSVoon Weifeng if (ret < 0) 7030801eb050SAndy Shevchenko goto error_serdes_powerup; 7031b9663b7cSVoon Weifeng } 7032b9663b7cSVoon Weifeng 70335f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS 70348d72ab11SGreg Kroah-Hartman stmmac_init_fs(ndev); 70355f2b8b62SThierry Reding #endif 70365f2b8b62SThierry Reding 70375ec55823SJoakim Zhang /* Let pm_runtime_put() disable the clocks. 70385ec55823SJoakim Zhang * If CONFIG_PM is not enabled, the clocks will stay powered. 70395ec55823SJoakim Zhang */ 70405ec55823SJoakim Zhang pm_runtime_put(device); 70415ec55823SJoakim Zhang 704257016590SFlorian Fainelli return ret; 70437ac6653aSJeff Kirsher 7044801eb050SAndy Shevchenko error_serdes_powerup: 7045801eb050SAndy Shevchenko unregister_netdev(ndev); 70466a81c26fSViresh Kumar error_netdev_register: 704774371272SJose Abreu phylink_destroy(priv->phylink); 7048597a68ceSVoon Weifeng error_xpcs_setup: 704974371272SJose Abreu error_phy_setup: 7050a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 7051b2eb09afSFlorian Fainelli priv->hw->pcs != STMMAC_PCS_RTBI) 7052b2eb09afSFlorian Fainelli stmmac_mdio_unregister(ndev); 70537ac6653aSJeff Kirsher error_mdio_register: 70540366f7e0SOng Boon Leong stmmac_napi_del(ndev); 705562866e98SChen-Yu Tsai error_hw_init: 705634877a15SJose Abreu destroy_workqueue(priv->wq); 7057d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 70587ac6653aSJeff Kirsher 705915ffac73SJoachim Eastwood return ret; 70607ac6653aSJeff Kirsher } 7061b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 70627ac6653aSJeff Kirsher 70637ac6653aSJeff Kirsher /** 70647ac6653aSJeff Kirsher * stmmac_dvr_remove 7065f4e7bd81SJoachim Eastwood * @dev: device pointer 70667ac6653aSJeff Kirsher * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7067bfab27a1SGiuseppe CAVALLARO * changes the link status, releases the DMA descriptor rings. 70687ac6653aSJeff Kirsher */ 7069f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev) 70707ac6653aSJeff Kirsher { 7071f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 70727ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 70737ac6653aSJeff Kirsher 707438ddc59dSLABBE Corentin netdev_info(priv->dev, "%s: removing driver", __func__); 70757ac6653aSJeff Kirsher 7076ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7077c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 70787ac6653aSJeff Kirsher netif_carrier_off(ndev); 70797ac6653aSJeff Kirsher unregister_netdev(ndev); 70809a7b3950SOng Boon Leong 70819a7b3950SOng Boon Leong /* Serdes power down needs to happen after VLAN filter 70829a7b3950SOng Boon Leong * is deleted that is triggered by unregister_netdev(). 70839a7b3950SOng Boon Leong */ 70849a7b3950SOng Boon Leong if (priv->plat->serdes_powerdown) 70859a7b3950SOng Boon Leong priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 70869a7b3950SOng Boon Leong 7087474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS 7088474a31e1SAaro Koskinen stmmac_exit_fs(ndev); 7089474a31e1SAaro Koskinen #endif 709074371272SJose Abreu phylink_destroy(priv->phylink); 7091f573c0b9Sjpinto if (priv->plat->stmmac_rst) 7092f573c0b9Sjpinto reset_control_assert(priv->plat->stmmac_rst); 7093e67f325eSMatthew Hagan reset_control_assert(priv->plat->stmmac_ahb_rst); 70945ec55823SJoakim Zhang pm_runtime_put(dev); 70955ec55823SJoakim Zhang pm_runtime_disable(dev); 7096a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 70973fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) 7098e743471fSBryan O'Donoghue stmmac_mdio_unregister(ndev); 709934877a15SJose Abreu destroy_workqueue(priv->wq); 710029555fa3SThierry Reding mutex_destroy(&priv->lock); 7101d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 71027ac6653aSJeff Kirsher 71037ac6653aSJeff Kirsher return 0; 71047ac6653aSJeff Kirsher } 7105b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 71067ac6653aSJeff Kirsher 7107732fdf0eSGiuseppe CAVALLARO /** 7108732fdf0eSGiuseppe CAVALLARO * stmmac_suspend - suspend callback 7109f4e7bd81SJoachim Eastwood * @dev: device pointer 7110732fdf0eSGiuseppe CAVALLARO * Description: this is the function to suspend the device and it is called 7111732fdf0eSGiuseppe CAVALLARO * by the platform driver to stop the network queue, release the resources, 7112732fdf0eSGiuseppe CAVALLARO * program the PMT register (for WoL), clean and release driver resources. 7113732fdf0eSGiuseppe CAVALLARO */ 7114f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev) 71157ac6653aSJeff Kirsher { 7116f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 71177ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 711814b41a29SNicolin Chen u32 chan; 71195ec55823SJoakim Zhang int ret; 71207ac6653aSJeff Kirsher 71217ac6653aSJeff Kirsher if (!ndev || !netif_running(ndev)) 71227ac6653aSJeff Kirsher return 0; 71237ac6653aSJeff Kirsher 71243e2bf04fSJose Abreu phylink_mac_change(priv->phylink, false); 71257ac6653aSJeff Kirsher 7126134cc4ceSThierry Reding mutex_lock(&priv->lock); 712719e13cb2SJose Abreu 71287ac6653aSJeff Kirsher netif_device_detach(ndev); 71297ac6653aSJeff Kirsher 7130c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 71317ac6653aSJeff Kirsher 713214b41a29SNicolin Chen for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7133d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 713414b41a29SNicolin Chen 71355f585913SFugang Duan if (priv->eee_enabled) { 71365f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 71375f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 71385f585913SFugang Duan } 71395f585913SFugang Duan 71407ac6653aSJeff Kirsher /* Stop TX/RX DMA */ 7141ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7142c24602efSGiuseppe CAVALLARO 7143b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 7144b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7145b9663b7cSVoon Weifeng 71467ac6653aSJeff Kirsher /* Enable Power down mode by programming the PMT regs */ 7147e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7148c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, priv->wolopts); 714989f7f2cfSSrinivas Kandagatla priv->irq_wake = 1; 715089f7f2cfSSrinivas Kandagatla } else { 7151134cc4ceSThierry Reding mutex_unlock(&priv->lock); 71523e2bf04fSJose Abreu rtnl_lock(); 715377b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 715477b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 71553e2bf04fSJose Abreu phylink_stop(priv->phylink); 71563e2bf04fSJose Abreu rtnl_unlock(); 7157134cc4ceSThierry Reding mutex_lock(&priv->lock); 71583e2bf04fSJose Abreu 7159c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 7160db88f10aSSrinivas Kandagatla pinctrl_pm_select_sleep_state(priv->device); 7161ba1377ffSGiuseppe CAVALLARO /* Disable clock in case of PWM is off */ 7162e497c20eSBiao Huang clk_disable_unprepare(priv->plat->clk_ptp_ref); 71635ec55823SJoakim Zhang ret = pm_runtime_force_suspend(dev); 716430f347aeSYang Yingliang if (ret) { 716530f347aeSYang Yingliang mutex_unlock(&priv->lock); 71665ec55823SJoakim Zhang return ret; 7167ba1377ffSGiuseppe CAVALLARO } 716830f347aeSYang Yingliang } 71695a558611SOng Boon Leong 717029555fa3SThierry Reding mutex_unlock(&priv->lock); 71712d871aa0SVince Bridgers 71725a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 71735a558611SOng Boon Leong /* Disable FPE */ 71745a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 71755a558611SOng Boon Leong priv->plat->tx_queues_to_use, 71765a558611SOng Boon Leong priv->plat->rx_queues_to_use, false); 71775a558611SOng Boon Leong 71785a558611SOng Boon Leong stmmac_fpe_handshake(priv, false); 71796b28a86dSMohammad Athari Bin Ismail stmmac_fpe_stop_wq(priv); 71805a558611SOng Boon Leong } 71815a558611SOng Boon Leong 7182bd00632cSLABBE Corentin priv->speed = SPEED_UNKNOWN; 71837ac6653aSJeff Kirsher return 0; 71847ac6653aSJeff Kirsher } 7185b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend); 71867ac6653aSJeff Kirsher 7187732fdf0eSGiuseppe CAVALLARO /** 718854139cf3SJoao Pinto * stmmac_reset_queues_param - reset queue parameters 7189d0ea5cbdSJesse Brandeburg * @priv: device pointer 719054139cf3SJoao Pinto */ 719154139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv) 719254139cf3SJoao Pinto { 719354139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 7194ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 719554139cf3SJoao Pinto u32 queue; 719654139cf3SJoao Pinto 719754139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 719854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 719954139cf3SJoao Pinto 720054139cf3SJoao Pinto rx_q->cur_rx = 0; 720154139cf3SJoao Pinto rx_q->dirty_rx = 0; 720254139cf3SJoao Pinto } 720354139cf3SJoao Pinto 7204ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 7205ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 7206ce736788SJoao Pinto 7207ce736788SJoao Pinto tx_q->cur_tx = 0; 7208ce736788SJoao Pinto tx_q->dirty_tx = 0; 72098d212a9eSNiklas Cassel tx_q->mss = 0; 7210c511819dSJoakim Zhang 7211c511819dSJoakim Zhang netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7212ce736788SJoao Pinto } 721354139cf3SJoao Pinto } 721454139cf3SJoao Pinto 721554139cf3SJoao Pinto /** 7216732fdf0eSGiuseppe CAVALLARO * stmmac_resume - resume callback 7217f4e7bd81SJoachim Eastwood * @dev: device pointer 7218732fdf0eSGiuseppe CAVALLARO * Description: when resume this function is invoked to setup the DMA and CORE 7219732fdf0eSGiuseppe CAVALLARO * in a usable state. 7220732fdf0eSGiuseppe CAVALLARO */ 7221f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev) 72227ac6653aSJeff Kirsher { 7223f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 72247ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 7225b9663b7cSVoon Weifeng int ret; 72267ac6653aSJeff Kirsher 72277ac6653aSJeff Kirsher if (!netif_running(ndev)) 72287ac6653aSJeff Kirsher return 0; 72297ac6653aSJeff Kirsher 72307ac6653aSJeff Kirsher /* Power Down bit, into the PM register, is cleared 72317ac6653aSJeff Kirsher * automatically as soon as a magic packet or a Wake-up frame 72327ac6653aSJeff Kirsher * is received. Anyway, it's better to manually clear 72337ac6653aSJeff Kirsher * this bit because it can generate problems while resuming 7234ceb69499SGiuseppe CAVALLARO * from another devices (e.g. serial console). 7235ceb69499SGiuseppe CAVALLARO */ 7236e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 723729555fa3SThierry Reding mutex_lock(&priv->lock); 7238c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, 0); 723929555fa3SThierry Reding mutex_unlock(&priv->lock); 724089f7f2cfSSrinivas Kandagatla priv->irq_wake = 0; 7241623997fbSSrinivas Kandagatla } else { 7242db88f10aSSrinivas Kandagatla pinctrl_pm_select_default_state(priv->device); 72438d45e42bSLABBE Corentin /* enable the clk previously disabled */ 72445ec55823SJoakim Zhang ret = pm_runtime_force_resume(dev); 72455ec55823SJoakim Zhang if (ret) 72465ec55823SJoakim Zhang return ret; 7247e497c20eSBiao Huang if (priv->plat->clk_ptp_ref) 7248e497c20eSBiao Huang clk_prepare_enable(priv->plat->clk_ptp_ref); 7249623997fbSSrinivas Kandagatla /* reset the phy so that it's ready */ 7250623997fbSSrinivas Kandagatla if (priv->mii) 7251623997fbSSrinivas Kandagatla stmmac_mdio_reset(priv->mii); 7252623997fbSSrinivas Kandagatla } 72537ac6653aSJeff Kirsher 7254b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7255b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7256b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7257b9663b7cSVoon Weifeng 7258b9663b7cSVoon Weifeng if (ret < 0) 7259b9663b7cSVoon Weifeng return ret; 7260b9663b7cSVoon Weifeng } 7261b9663b7cSVoon Weifeng 726236d18b56SFugang Duan if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 726336d18b56SFugang Duan rtnl_lock(); 726436d18b56SFugang Duan phylink_start(priv->phylink); 726536d18b56SFugang Duan /* We may have called phylink_speed_down before */ 726636d18b56SFugang Duan phylink_speed_up(priv->phylink); 726736d18b56SFugang Duan rtnl_unlock(); 726836d18b56SFugang Duan } 726936d18b56SFugang Duan 72708e5debedSWong Vee Khee rtnl_lock(); 727129555fa3SThierry Reding mutex_lock(&priv->lock); 7272f55d84b0SVincent Palatin 727354139cf3SJoao Pinto stmmac_reset_queues_param(priv); 727400423969SThierry Reding 72754ec236c7SFugang Duan stmmac_free_tx_skbufs(priv); 7276ae79a639SGiuseppe CAVALLARO stmmac_clear_descriptors(priv); 7277ae79a639SGiuseppe CAVALLARO 7278fe131929SHuacai Chen stmmac_hw_setup(ndev, false); 7279d429b66eSJose Abreu stmmac_init_coalesce(priv); 7280ac316c78SGiuseppe CAVALLARO stmmac_set_rx_mode(ndev); 72817ac6653aSJeff Kirsher 7282ed64639bSWong Vee Khee stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7283ed64639bSWong Vee Khee 7284c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 72857ac6653aSJeff Kirsher 7286134cc4ceSThierry Reding mutex_unlock(&priv->lock); 72878e5debedSWong Vee Khee rtnl_unlock(); 7288134cc4ceSThierry Reding 72893e2bf04fSJose Abreu phylink_mac_change(priv->phylink, true); 7290102463b1SFrancesco Virlinzi 729131096c3eSLeon Yu netif_device_attach(ndev); 729231096c3eSLeon Yu 72937ac6653aSJeff Kirsher return 0; 72947ac6653aSJeff Kirsher } 7295b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume); 7296ba27ec66SGiuseppe CAVALLARO 72977ac6653aSJeff Kirsher #ifndef MODULE 72987ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str) 72997ac6653aSJeff Kirsher { 73007ac6653aSJeff Kirsher char *opt; 73017ac6653aSJeff Kirsher 73027ac6653aSJeff Kirsher if (!str || !*str) 73037ac6653aSJeff Kirsher return -EINVAL; 73047ac6653aSJeff Kirsher while ((opt = strsep(&str, ",")) != NULL) { 73057ac6653aSJeff Kirsher if (!strncmp(opt, "debug:", 6)) { 7306ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &debug)) 73077ac6653aSJeff Kirsher goto err; 73087ac6653aSJeff Kirsher } else if (!strncmp(opt, "phyaddr:", 8)) { 7309ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 8, 0, &phyaddr)) 73107ac6653aSJeff Kirsher goto err; 73117ac6653aSJeff Kirsher } else if (!strncmp(opt, "buf_sz:", 7)) { 7312ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 7, 0, &buf_sz)) 73137ac6653aSJeff Kirsher goto err; 73147ac6653aSJeff Kirsher } else if (!strncmp(opt, "tc:", 3)) { 7315ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 3, 0, &tc)) 73167ac6653aSJeff Kirsher goto err; 73177ac6653aSJeff Kirsher } else if (!strncmp(opt, "watchdog:", 9)) { 7318ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 9, 0, &watchdog)) 73197ac6653aSJeff Kirsher goto err; 73207ac6653aSJeff Kirsher } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7321ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &flow_ctrl)) 73227ac6653aSJeff Kirsher goto err; 73237ac6653aSJeff Kirsher } else if (!strncmp(opt, "pause:", 6)) { 7324ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &pause)) 73257ac6653aSJeff Kirsher goto err; 7326506f669cSGiuseppe CAVALLARO } else if (!strncmp(opt, "eee_timer:", 10)) { 7327d765955dSGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &eee_timer)) 7328d765955dSGiuseppe CAVALLARO goto err; 73294a7d666aSGiuseppe CAVALLARO } else if (!strncmp(opt, "chain_mode:", 11)) { 73304a7d666aSGiuseppe CAVALLARO if (kstrtoint(opt + 11, 0, &chain_mode)) 73314a7d666aSGiuseppe CAVALLARO goto err; 73327ac6653aSJeff Kirsher } 73337ac6653aSJeff Kirsher } 73347ac6653aSJeff Kirsher return 0; 73357ac6653aSJeff Kirsher 73367ac6653aSJeff Kirsher err: 73377ac6653aSJeff Kirsher pr_err("%s: ERROR broken module parameter conversion", __func__); 73387ac6653aSJeff Kirsher return -EINVAL; 73397ac6653aSJeff Kirsher } 73407ac6653aSJeff Kirsher 73417ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt); 7342ceb69499SGiuseppe CAVALLARO #endif /* MODULE */ 73436fc0d0f2SGiuseppe Cavallaro 7344466c5ac8SMathieu Olivari static int __init stmmac_init(void) 7345466c5ac8SMathieu Olivari { 7346466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7347466c5ac8SMathieu Olivari /* Create debugfs main directory if it doesn't exist yet */ 73488d72ab11SGreg Kroah-Hartman if (!stmmac_fs_dir) 7349466c5ac8SMathieu Olivari stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7350474a31e1SAaro Koskinen register_netdevice_notifier(&stmmac_notifier); 7351466c5ac8SMathieu Olivari #endif 7352466c5ac8SMathieu Olivari 7353466c5ac8SMathieu Olivari return 0; 7354466c5ac8SMathieu Olivari } 7355466c5ac8SMathieu Olivari 7356466c5ac8SMathieu Olivari static void __exit stmmac_exit(void) 7357466c5ac8SMathieu Olivari { 7358466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7359474a31e1SAaro Koskinen unregister_netdevice_notifier(&stmmac_notifier); 7360466c5ac8SMathieu Olivari debugfs_remove_recursive(stmmac_fs_dir); 7361466c5ac8SMathieu Olivari #endif 7362466c5ac8SMathieu Olivari } 7363466c5ac8SMathieu Olivari 7364466c5ac8SMathieu Olivari module_init(stmmac_init) 7365466c5ac8SMathieu Olivari module_exit(stmmac_exit) 7366466c5ac8SMathieu Olivari 73676fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 73686fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 73696fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL"); 7370