14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27ac6653aSJeff Kirsher /******************************************************************************* 37ac6653aSJeff Kirsher This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 47ac6653aSJeff Kirsher ST Ethernet IPs are built around a Synopsys IP Core. 57ac6653aSJeff Kirsher 6286a8372SGiuseppe CAVALLARO Copyright(C) 2007-2011 STMicroelectronics Ltd 77ac6653aSJeff Kirsher 87ac6653aSJeff Kirsher 97ac6653aSJeff Kirsher Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 107ac6653aSJeff Kirsher 117ac6653aSJeff Kirsher Documentation available at: 127ac6653aSJeff Kirsher http://www.stlinux.com 137ac6653aSJeff Kirsher Support available at: 147ac6653aSJeff Kirsher https://bugzilla.stlinux.com/ 157ac6653aSJeff Kirsher *******************************************************************************/ 167ac6653aSJeff Kirsher 176a81c26fSViresh Kumar #include <linux/clk.h> 187ac6653aSJeff Kirsher #include <linux/kernel.h> 197ac6653aSJeff Kirsher #include <linux/interrupt.h> 207ac6653aSJeff Kirsher #include <linux/ip.h> 217ac6653aSJeff Kirsher #include <linux/tcp.h> 227ac6653aSJeff Kirsher #include <linux/skbuff.h> 237ac6653aSJeff Kirsher #include <linux/ethtool.h> 247ac6653aSJeff Kirsher #include <linux/if_ether.h> 257ac6653aSJeff Kirsher #include <linux/crc32.h> 267ac6653aSJeff Kirsher #include <linux/mii.h> 2701789349SJiri Pirko #include <linux/if.h> 287ac6653aSJeff Kirsher #include <linux/if_vlan.h> 297ac6653aSJeff Kirsher #include <linux/dma-mapping.h> 307ac6653aSJeff Kirsher #include <linux/slab.h> 315ec55823SJoakim Zhang #include <linux/pm_runtime.h> 327ac6653aSJeff Kirsher #include <linux/prefetch.h> 33db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h> 3450fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 357ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h> 367ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h> 3750fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 38891434b1SRayagond Kokatanur #include <linux/net_tstamp.h> 39eeef2f6bSJose Abreu #include <linux/phylink.h> 40b7766206SJose Abreu #include <linux/udp.h> 415fabb012SOng Boon Leong #include <linux/bpf_trace.h> 424dbbe8ddSJose Abreu #include <net/pkt_cls.h> 43bba2556eSOng Boon Leong #include <net/xdp_sock_drv.h> 44891434b1SRayagond Kokatanur #include "stmmac_ptp.h" 45286a8372SGiuseppe CAVALLARO #include "stmmac.h" 465fabb012SOng Boon Leong #include "stmmac_xdp.h" 47c5e4ddbdSChen-Yu Tsai #include <linux/reset.h> 485790cf3cSMathieu Olivari #include <linux/of_mdio.h> 4919d857c9SPhil Reid #include "dwmac1000.h" 507d9e6c5aSJose Abreu #include "dwxgmac2.h" 5142de047dSJose Abreu #include "hwif.h" 527ac6653aSJeff Kirsher 538d558f02SJose Abreu #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 54f748be53SAlexandre TORGUE #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 557ac6653aSJeff Kirsher 567ac6653aSJeff Kirsher /* Module parameters */ 5732ceabcaSGiuseppe CAVALLARO #define TX_TIMEO 5000 587ac6653aSJeff Kirsher static int watchdog = TX_TIMEO; 59d3757ba4SJoe Perches module_param(watchdog, int, 0644); 6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 617ac6653aSJeff Kirsher 6232ceabcaSGiuseppe CAVALLARO static int debug = -1; 63d3757ba4SJoe Perches module_param(debug, int, 0644); 6432ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 657ac6653aSJeff Kirsher 6647d1f71fSstephen hemminger static int phyaddr = -1; 67d3757ba4SJoe Perches module_param(phyaddr, int, 0444); 687ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address"); 697ac6653aSJeff Kirsher 70aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 71aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 727ac6653aSJeff Kirsher 73132c32eeSOng Boon Leong /* Limit to make sure XDP TX and slow path can coexist */ 74132c32eeSOng Boon Leong #define STMMAC_XSK_TX_BUDGET_MAX 256 75132c32eeSOng Boon Leong #define STMMAC_TX_XSK_AVAIL 16 76bba2556eSOng Boon Leong #define STMMAC_RX_FILL_BATCH 16 77bba2556eSOng Boon Leong 785fabb012SOng Boon Leong #define STMMAC_XDP_PASS 0 795fabb012SOng Boon Leong #define STMMAC_XDP_CONSUMED BIT(0) 80be8b38a7SOng Boon Leong #define STMMAC_XDP_TX BIT(1) 818b278a5bSOng Boon Leong #define STMMAC_XDP_REDIRECT BIT(2) 825fabb012SOng Boon Leong 83e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO; 84d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644); 857ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 867ac6653aSJeff Kirsher 877ac6653aSJeff Kirsher static int pause = PAUSE_TIME; 88d3757ba4SJoe Perches module_param(pause, int, 0644); 897ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 907ac6653aSJeff Kirsher 917ac6653aSJeff Kirsher #define TC_DEFAULT 64 927ac6653aSJeff Kirsher static int tc = TC_DEFAULT; 93d3757ba4SJoe Perches module_param(tc, int, 0644); 947ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value"); 957ac6653aSJeff Kirsher 96d916701cSGiuseppe CAVALLARO #define DEFAULT_BUFSIZE 1536 97d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE; 98d3757ba4SJoe Perches module_param(buf_sz, int, 0644); 997ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 1007ac6653aSJeff Kirsher 10122ad3838SGiuseppe Cavallaro #define STMMAC_RX_COPYBREAK 256 10222ad3838SGiuseppe Cavallaro 1037ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 1047ac6653aSJeff Kirsher NETIF_MSG_LINK | NETIF_MSG_IFUP | 1057ac6653aSJeff Kirsher NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 1067ac6653aSJeff Kirsher 107d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER 1000 108d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 109d3757ba4SJoe Perches module_param(eee_timer, int, 0644); 110d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 111388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 112d765955dSGiuseppe CAVALLARO 11322d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors, 11422d3efe5SPavel Machek * but allow user to force to use the chain instead of the ring 1154a7d666aSGiuseppe CAVALLARO */ 1164a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode; 117d3757ba4SJoe Perches module_param(chain_mode, int, 0444); 1184a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 1194a7d666aSGiuseppe CAVALLARO 1207ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 1218532f613SOng Boon Leong /* For MSI interrupts handling */ 1228532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 1238532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 1248532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 1258532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 126132c32eeSOng Boon Leong static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 127132c32eeSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 1287ac6653aSJeff Kirsher 12950fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 130481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops; 1318d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev); 132466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev); 133bfab27a1SGiuseppe CAVALLARO #endif 134bfab27a1SGiuseppe CAVALLARO 135d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 1369125cdd1SGiuseppe CAVALLARO 1375ec55823SJoakim Zhang int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 1385ec55823SJoakim Zhang { 1395ec55823SJoakim Zhang int ret = 0; 1405ec55823SJoakim Zhang 1415ec55823SJoakim Zhang if (enabled) { 1425ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->stmmac_clk); 1435ec55823SJoakim Zhang if (ret) 1445ec55823SJoakim Zhang return ret; 1455ec55823SJoakim Zhang ret = clk_prepare_enable(priv->plat->pclk); 1465ec55823SJoakim Zhang if (ret) { 1475ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1485ec55823SJoakim Zhang return ret; 1495ec55823SJoakim Zhang } 150b4d45aeeSJoakim Zhang if (priv->plat->clks_config) { 151b4d45aeeSJoakim Zhang ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 152b4d45aeeSJoakim Zhang if (ret) { 153b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 154b4d45aeeSJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 155b4d45aeeSJoakim Zhang return ret; 156b4d45aeeSJoakim Zhang } 157b4d45aeeSJoakim Zhang } 1585ec55823SJoakim Zhang } else { 1595ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->stmmac_clk); 1605ec55823SJoakim Zhang clk_disable_unprepare(priv->plat->pclk); 161b4d45aeeSJoakim Zhang if (priv->plat->clks_config) 162b4d45aeeSJoakim Zhang priv->plat->clks_config(priv->plat->bsp_priv, enabled); 1635ec55823SJoakim Zhang } 1645ec55823SJoakim Zhang 1655ec55823SJoakim Zhang return ret; 1665ec55823SJoakim Zhang } 1675ec55823SJoakim Zhang EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 1685ec55823SJoakim Zhang 1697ac6653aSJeff Kirsher /** 1707ac6653aSJeff Kirsher * stmmac_verify_args - verify the driver parameters. 171732fdf0eSGiuseppe CAVALLARO * Description: it checks the driver parameters and set a default in case of 172732fdf0eSGiuseppe CAVALLARO * errors. 1737ac6653aSJeff Kirsher */ 1747ac6653aSJeff Kirsher static void stmmac_verify_args(void) 1757ac6653aSJeff Kirsher { 1767ac6653aSJeff Kirsher if (unlikely(watchdog < 0)) 1777ac6653aSJeff Kirsher watchdog = TX_TIMEO; 178d916701cSGiuseppe CAVALLARO if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 179d916701cSGiuseppe CAVALLARO buf_sz = DEFAULT_BUFSIZE; 1807ac6653aSJeff Kirsher if (unlikely(flow_ctrl > 1)) 1817ac6653aSJeff Kirsher flow_ctrl = FLOW_AUTO; 1827ac6653aSJeff Kirsher else if (likely(flow_ctrl < 0)) 1837ac6653aSJeff Kirsher flow_ctrl = FLOW_OFF; 1847ac6653aSJeff Kirsher if (unlikely((pause < 0) || (pause > 0xffff))) 1857ac6653aSJeff Kirsher pause = PAUSE_TIME; 186d765955dSGiuseppe CAVALLARO if (eee_timer < 0) 187d765955dSGiuseppe CAVALLARO eee_timer = STMMAC_DEFAULT_LPI_TIMER; 1887ac6653aSJeff Kirsher } 1897ac6653aSJeff Kirsher 190bba2556eSOng Boon Leong static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 191c22a3f48SJoao Pinto { 192c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 1938fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 1948fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 195c22a3f48SJoao Pinto u32 queue; 196c22a3f48SJoao Pinto 1978fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 1988fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 199c22a3f48SJoao Pinto 200132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 201132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 202132c32eeSOng Boon Leong napi_disable(&ch->rxtx_napi); 203132c32eeSOng Boon Leong continue; 204132c32eeSOng Boon Leong } 205132c32eeSOng Boon Leong 2064ccb4585SJose Abreu if (queue < rx_queues_cnt) 2074ccb4585SJose Abreu napi_disable(&ch->rx_napi); 2084ccb4585SJose Abreu if (queue < tx_queues_cnt) 2094ccb4585SJose Abreu napi_disable(&ch->tx_napi); 210c22a3f48SJoao Pinto } 211c22a3f48SJoao Pinto } 212c22a3f48SJoao Pinto 213c22a3f48SJoao Pinto /** 214bba2556eSOng Boon Leong * stmmac_disable_all_queues - Disable all queues 215bba2556eSOng Boon Leong * @priv: driver private structure 216bba2556eSOng Boon Leong */ 217bba2556eSOng Boon Leong static void stmmac_disable_all_queues(struct stmmac_priv *priv) 218bba2556eSOng Boon Leong { 219bba2556eSOng Boon Leong u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 220bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 221bba2556eSOng Boon Leong u32 queue; 222bba2556eSOng Boon Leong 223bba2556eSOng Boon Leong /* synchronize_rcu() needed for pending XDP buffers to drain */ 224bba2556eSOng Boon Leong for (queue = 0; queue < rx_queues_cnt; queue++) { 225bba2556eSOng Boon Leong rx_q = &priv->rx_queue[queue]; 226bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 227bba2556eSOng Boon Leong synchronize_rcu(); 228bba2556eSOng Boon Leong break; 229bba2556eSOng Boon Leong } 230bba2556eSOng Boon Leong } 231bba2556eSOng Boon Leong 232bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 233bba2556eSOng Boon Leong } 234bba2556eSOng Boon Leong 235bba2556eSOng Boon Leong /** 236c22a3f48SJoao Pinto * stmmac_enable_all_queues - Enable all queues 237c22a3f48SJoao Pinto * @priv: driver private structure 238c22a3f48SJoao Pinto */ 239c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv) 240c22a3f48SJoao Pinto { 241c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 2428fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 2438fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 244c22a3f48SJoao Pinto u32 queue; 245c22a3f48SJoao Pinto 2468fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 2478fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 248c22a3f48SJoao Pinto 249132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 250132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) { 251132c32eeSOng Boon Leong napi_enable(&ch->rxtx_napi); 252132c32eeSOng Boon Leong continue; 253132c32eeSOng Boon Leong } 254132c32eeSOng Boon Leong 2554ccb4585SJose Abreu if (queue < rx_queues_cnt) 2564ccb4585SJose Abreu napi_enable(&ch->rx_napi); 2574ccb4585SJose Abreu if (queue < tx_queues_cnt) 2584ccb4585SJose Abreu napi_enable(&ch->tx_napi); 259c22a3f48SJoao Pinto } 260c22a3f48SJoao Pinto } 261c22a3f48SJoao Pinto 26234877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv) 26334877a15SJose Abreu { 26434877a15SJose Abreu if (!test_bit(STMMAC_DOWN, &priv->state) && 26534877a15SJose Abreu !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 26634877a15SJose Abreu queue_work(priv->wq, &priv->service_task); 26734877a15SJose Abreu } 26834877a15SJose Abreu 26934877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv) 27034877a15SJose Abreu { 27134877a15SJose Abreu netif_carrier_off(priv->dev); 27234877a15SJose Abreu set_bit(STMMAC_RESET_REQUESTED, &priv->state); 27334877a15SJose Abreu stmmac_service_event_schedule(priv); 27434877a15SJose Abreu } 27534877a15SJose Abreu 276c22a3f48SJoao Pinto /** 27732ceabcaSGiuseppe CAVALLARO * stmmac_clk_csr_set - dynamically set the MDC clock 27832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 27932ceabcaSGiuseppe CAVALLARO * Description: this is to dynamically set the MDC clock according to the csr 28032ceabcaSGiuseppe CAVALLARO * clock input. 28132ceabcaSGiuseppe CAVALLARO * Note: 28232ceabcaSGiuseppe CAVALLARO * If a specific clk_csr value is passed from the platform 28332ceabcaSGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 28432ceabcaSGiuseppe CAVALLARO * changed at run-time and it is fixed (as reported in the driver 28532ceabcaSGiuseppe CAVALLARO * documentation). Viceversa the driver will try to set the MDC 28632ceabcaSGiuseppe CAVALLARO * clock dynamically according to the actual clock input. 28732ceabcaSGiuseppe CAVALLARO */ 288cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv) 289cd7201f4SGiuseppe CAVALLARO { 290cd7201f4SGiuseppe CAVALLARO u32 clk_rate; 291cd7201f4SGiuseppe CAVALLARO 292f573c0b9Sjpinto clk_rate = clk_get_rate(priv->plat->stmmac_clk); 293cd7201f4SGiuseppe CAVALLARO 294cd7201f4SGiuseppe CAVALLARO /* Platform provided default clk_csr would be assumed valid 295ceb69499SGiuseppe CAVALLARO * for all other cases except for the below mentioned ones. 296ceb69499SGiuseppe CAVALLARO * For values higher than the IEEE 802.3 specified frequency 297ceb69499SGiuseppe CAVALLARO * we can not estimate the proper divider as it is not known 298ceb69499SGiuseppe CAVALLARO * the frequency of clk_csr_i. So we do not change the default 299ceb69499SGiuseppe CAVALLARO * divider. 300ceb69499SGiuseppe CAVALLARO */ 301cd7201f4SGiuseppe CAVALLARO if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 302cd7201f4SGiuseppe CAVALLARO if (clk_rate < CSR_F_35M) 303cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_20_35M; 304cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 305cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_35_60M; 306cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 307cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_60_100M; 308cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 309cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_100_150M; 310cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 311cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_150_250M; 31208dad2f4SJesper Nilsson else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 313cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_250_300M; 314ceb69499SGiuseppe CAVALLARO } 3159f93ac8dSLABBE Corentin 3169f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) { 3179f93ac8dSLABBE Corentin if (clk_rate > 160000000) 3189f93ac8dSLABBE Corentin priv->clk_csr = 0x03; 3199f93ac8dSLABBE Corentin else if (clk_rate > 80000000) 3209f93ac8dSLABBE Corentin priv->clk_csr = 0x02; 3219f93ac8dSLABBE Corentin else if (clk_rate > 40000000) 3229f93ac8dSLABBE Corentin priv->clk_csr = 0x01; 3239f93ac8dSLABBE Corentin else 3249f93ac8dSLABBE Corentin priv->clk_csr = 0; 3259f93ac8dSLABBE Corentin } 3267d9e6c5aSJose Abreu 3277d9e6c5aSJose Abreu if (priv->plat->has_xgmac) { 3287d9e6c5aSJose Abreu if (clk_rate > 400000000) 3297d9e6c5aSJose Abreu priv->clk_csr = 0x5; 3307d9e6c5aSJose Abreu else if (clk_rate > 350000000) 3317d9e6c5aSJose Abreu priv->clk_csr = 0x4; 3327d9e6c5aSJose Abreu else if (clk_rate > 300000000) 3337d9e6c5aSJose Abreu priv->clk_csr = 0x3; 3347d9e6c5aSJose Abreu else if (clk_rate > 250000000) 3357d9e6c5aSJose Abreu priv->clk_csr = 0x2; 3367d9e6c5aSJose Abreu else if (clk_rate > 150000000) 3377d9e6c5aSJose Abreu priv->clk_csr = 0x1; 3387d9e6c5aSJose Abreu else 3397d9e6c5aSJose Abreu priv->clk_csr = 0x0; 3407d9e6c5aSJose Abreu } 341cd7201f4SGiuseppe CAVALLARO } 342cd7201f4SGiuseppe CAVALLARO 3437ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len) 3447ac6653aSJeff Kirsher { 345424c4f78SAndy Shevchenko pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 346424c4f78SAndy Shevchenko print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 3477ac6653aSJeff Kirsher } 3487ac6653aSJeff Kirsher 349ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 3507ac6653aSJeff Kirsher { 351ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 352a6a3e026SLABBE Corentin u32 avail; 353e3ad57c9SGiuseppe Cavallaro 354ce736788SJoao Pinto if (tx_q->dirty_tx > tx_q->cur_tx) 355ce736788SJoao Pinto avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 356e3ad57c9SGiuseppe Cavallaro else 357aa042f60SSong, Yoong Siang avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 358e3ad57c9SGiuseppe Cavallaro 359e3ad57c9SGiuseppe Cavallaro return avail; 360e3ad57c9SGiuseppe Cavallaro } 361e3ad57c9SGiuseppe Cavallaro 36254139cf3SJoao Pinto /** 36354139cf3SJoao Pinto * stmmac_rx_dirty - Get RX queue dirty 36454139cf3SJoao Pinto * @priv: driver private structure 36554139cf3SJoao Pinto * @queue: RX queue index 36654139cf3SJoao Pinto */ 36754139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 368e3ad57c9SGiuseppe Cavallaro { 36954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 370a6a3e026SLABBE Corentin u32 dirty; 371e3ad57c9SGiuseppe Cavallaro 37254139cf3SJoao Pinto if (rx_q->dirty_rx <= rx_q->cur_rx) 37354139cf3SJoao Pinto dirty = rx_q->cur_rx - rx_q->dirty_rx; 374e3ad57c9SGiuseppe Cavallaro else 375aa042f60SSong, Yoong Siang dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 376e3ad57c9SGiuseppe Cavallaro 377e3ad57c9SGiuseppe Cavallaro return dirty; 3787ac6653aSJeff Kirsher } 3797ac6653aSJeff Kirsher 380be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 381be1c7eaeSVineetha G. Jaya Kumaran { 382be1c7eaeSVineetha G. Jaya Kumaran int tx_lpi_timer; 383be1c7eaeSVineetha G. Jaya Kumaran 384be1c7eaeSVineetha G. Jaya Kumaran /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 385be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en = en ? 0 : 1; 386be1c7eaeSVineetha G. Jaya Kumaran tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 387be1c7eaeSVineetha G. Jaya Kumaran stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 388be1c7eaeSVineetha G. Jaya Kumaran } 389be1c7eaeSVineetha G. Jaya Kumaran 39032ceabcaSGiuseppe CAVALLARO /** 391732fdf0eSGiuseppe CAVALLARO * stmmac_enable_eee_mode - check and enter in LPI mode 39232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 393732fdf0eSGiuseppe CAVALLARO * Description: this function is to verify and enter in LPI mode in case of 394732fdf0eSGiuseppe CAVALLARO * EEE. 39532ceabcaSGiuseppe CAVALLARO */ 396d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 397d765955dSGiuseppe CAVALLARO { 398ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 399ce736788SJoao Pinto u32 queue; 400ce736788SJoao Pinto 401ce736788SJoao Pinto /* check if all TX queues have the work finished */ 402ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 403ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 404ce736788SJoao Pinto 405ce736788SJoao Pinto if (tx_q->dirty_tx != tx_q->cur_tx) 406ce736788SJoao Pinto return; /* still unfinished work */ 407ce736788SJoao Pinto } 408ce736788SJoao Pinto 409d765955dSGiuseppe CAVALLARO /* Check and enter in LPI mode */ 410ce736788SJoao Pinto if (!priv->tx_path_in_lpi_mode) 411c10d4c82SJose Abreu stmmac_set_eee_mode(priv, priv->hw, 412b4b7b772Sjpinto priv->plat->en_tx_lpi_clockgating); 413d765955dSGiuseppe CAVALLARO } 414d765955dSGiuseppe CAVALLARO 41532ceabcaSGiuseppe CAVALLARO /** 416732fdf0eSGiuseppe CAVALLARO * stmmac_disable_eee_mode - disable and exit from LPI mode 41732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 41832ceabcaSGiuseppe CAVALLARO * Description: this function is to exit and disable EEE in case of 41932ceabcaSGiuseppe CAVALLARO * LPI state is true. This is called by the xmit. 42032ceabcaSGiuseppe CAVALLARO */ 421d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv) 422d765955dSGiuseppe CAVALLARO { 423be1c7eaeSVineetha G. Jaya Kumaran if (!priv->eee_sw_timer_en) { 424be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 425be1c7eaeSVineetha G. Jaya Kumaran return; 426be1c7eaeSVineetha G. Jaya Kumaran } 427be1c7eaeSVineetha G. Jaya Kumaran 428c10d4c82SJose Abreu stmmac_reset_eee_mode(priv, priv->hw); 429d765955dSGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 430d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 431d765955dSGiuseppe CAVALLARO } 432d765955dSGiuseppe CAVALLARO 433d765955dSGiuseppe CAVALLARO /** 434732fdf0eSGiuseppe CAVALLARO * stmmac_eee_ctrl_timer - EEE TX SW timer. 435d0ea5cbdSJesse Brandeburg * @t: timer_list struct containing private info 436d765955dSGiuseppe CAVALLARO * Description: 43732ceabcaSGiuseppe CAVALLARO * if there is no data transfer and if we are not in LPI state, 438d765955dSGiuseppe CAVALLARO * then MAC Transmitter can be moved to LPI state. 439d765955dSGiuseppe CAVALLARO */ 440e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t) 441d765955dSGiuseppe CAVALLARO { 442e99e88a9SKees Cook struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 443d765955dSGiuseppe CAVALLARO 444d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 445388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 446d765955dSGiuseppe CAVALLARO } 447d765955dSGiuseppe CAVALLARO 448d765955dSGiuseppe CAVALLARO /** 449732fdf0eSGiuseppe CAVALLARO * stmmac_eee_init - init EEE 45032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 451d765955dSGiuseppe CAVALLARO * Description: 452732fdf0eSGiuseppe CAVALLARO * if the GMAC supports the EEE (from the HW cap reg) and the phy device 453732fdf0eSGiuseppe CAVALLARO * can also manage EEE, this function enable the LPI state and start related 454732fdf0eSGiuseppe CAVALLARO * timer. 455d765955dSGiuseppe CAVALLARO */ 456d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv) 457d765955dSGiuseppe CAVALLARO { 458388e201dSVineetha G. Jaya Kumaran int eee_tw_timer = priv->eee_tw_timer; 459879626e3SJerome Brunet 460f5351ef7SGiuseppe CAVALLARO /* Using PCS we cannot dial with the phy registers at this stage 461f5351ef7SGiuseppe CAVALLARO * so we do not support extra feature like EEE. 462f5351ef7SGiuseppe CAVALLARO */ 463a47b9e15SDejin Zheng if (priv->hw->pcs == STMMAC_PCS_TBI || 464a47b9e15SDejin Zheng priv->hw->pcs == STMMAC_PCS_RTBI) 46574371272SJose Abreu return false; 466f5351ef7SGiuseppe CAVALLARO 46774371272SJose Abreu /* Check if MAC core supports the EEE feature. */ 46874371272SJose Abreu if (!priv->dma_cap.eee) 46974371272SJose Abreu return false; 470d765955dSGiuseppe CAVALLARO 47129555fa3SThierry Reding mutex_lock(&priv->lock); 47274371272SJose Abreu 47374371272SJose Abreu /* Check if it needs to be deactivated */ 474177d935aSJon Hunter if (!priv->eee_active) { 475177d935aSJon Hunter if (priv->eee_enabled) { 47638ddc59dSLABBE Corentin netdev_dbg(priv->dev, "disable EEE\n"); 477be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 47883bf79b6SGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 479388e201dSVineetha G. Jaya Kumaran stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 480d4aeaed8SWong Vee Khee if (priv->hw->xpcs) 481d4aeaed8SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 482d4aeaed8SWong Vee Khee priv->plat->mult_fact_100ns, 483d4aeaed8SWong Vee Khee false); 484177d935aSJon Hunter } 4850867bb97SJon Hunter mutex_unlock(&priv->lock); 48674371272SJose Abreu return false; 48774371272SJose Abreu } 48874371272SJose Abreu 48974371272SJose Abreu if (priv->eee_active && !priv->eee_enabled) { 49074371272SJose Abreu timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 49174371272SJose Abreu stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 492388e201dSVineetha G. Jaya Kumaran eee_tw_timer); 493656ed8b0SWong Vee Khee if (priv->hw->xpcs) 494656ed8b0SWong Vee Khee xpcs_config_eee(priv->hw->xpcs, 495656ed8b0SWong Vee Khee priv->plat->mult_fact_100ns, 496656ed8b0SWong Vee Khee true); 49783bf79b6SGiuseppe CAVALLARO } 49874371272SJose Abreu 499be1c7eaeSVineetha G. Jaya Kumaran if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 500be1c7eaeSVineetha G. Jaya Kumaran del_timer_sync(&priv->eee_ctrl_timer); 501be1c7eaeSVineetha G. Jaya Kumaran priv->tx_path_in_lpi_mode = false; 502be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 1); 503be1c7eaeSVineetha G. Jaya Kumaran } else { 504be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 505be1c7eaeSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, 506be1c7eaeSVineetha G. Jaya Kumaran STMMAC_LPI_T(priv->tx_lpi_timer)); 507be1c7eaeSVineetha G. Jaya Kumaran } 508388e201dSVineetha G. Jaya Kumaran 50929555fa3SThierry Reding mutex_unlock(&priv->lock); 51038ddc59dSLABBE Corentin netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 51174371272SJose Abreu return true; 512d765955dSGiuseppe CAVALLARO } 513d765955dSGiuseppe CAVALLARO 514*3751c3d3SThomas Gleixner static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv) 515*3751c3d3SThomas Gleixner { 516*3751c3d3SThomas Gleixner /* Correct the clk domain crossing(CDC) error */ 517*3751c3d3SThomas Gleixner if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) 518*3751c3d3SThomas Gleixner return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; 519*3751c3d3SThomas Gleixner return 0; 520*3751c3d3SThomas Gleixner } 521*3751c3d3SThomas Gleixner 522732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps 52332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 524ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 525891434b1SRayagond Kokatanur * @skb : the socket buffer 526891434b1SRayagond Kokatanur * Description : 527891434b1SRayagond Kokatanur * This function will read timestamp from the descriptor & pass it to stack. 528891434b1SRayagond Kokatanur * and also perform some sanity checks. 529891434b1SRayagond Kokatanur */ 530891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 531ba1ffd74SGiuseppe CAVALLARO struct dma_desc *p, struct sk_buff *skb) 532891434b1SRayagond Kokatanur { 533891434b1SRayagond Kokatanur struct skb_shared_hwtstamps shhwtstamp; 53425e80cd0SJose Abreu bool found = false; 535df103170SNathan Chancellor u64 ns = 0; 536891434b1SRayagond Kokatanur 537891434b1SRayagond Kokatanur if (!priv->hwts_tx_en) 538891434b1SRayagond Kokatanur return; 539891434b1SRayagond Kokatanur 540ceb69499SGiuseppe CAVALLARO /* exit if skb doesn't support hw tstamp */ 54175e4364fSdamuzi000 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 542891434b1SRayagond Kokatanur return; 543891434b1SRayagond Kokatanur 544891434b1SRayagond Kokatanur /* check tx tstamp status */ 54542de047dSJose Abreu if (stmmac_get_tx_timestamp_status(priv, p)) { 54642de047dSJose Abreu stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 54725e80cd0SJose Abreu found = true; 54825e80cd0SJose Abreu } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 54925e80cd0SJose Abreu found = true; 55025e80cd0SJose Abreu } 551891434b1SRayagond Kokatanur 55225e80cd0SJose Abreu if (found) { 553*3751c3d3SThomas Gleixner ns -= stmmac_cdc_adjust(priv); 5543600be5fSVoon Weifeng 555891434b1SRayagond Kokatanur memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 556891434b1SRayagond Kokatanur shhwtstamp.hwtstamp = ns_to_ktime(ns); 557ba1ffd74SGiuseppe CAVALLARO 55833d4c482SMario Molitor netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 559891434b1SRayagond Kokatanur /* pass tstamp to stack */ 560891434b1SRayagond Kokatanur skb_tstamp_tx(skb, &shhwtstamp); 561ba1ffd74SGiuseppe CAVALLARO } 562891434b1SRayagond Kokatanur } 563891434b1SRayagond Kokatanur 564732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps 56532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 566ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 567ba1ffd74SGiuseppe CAVALLARO * @np : next descriptor pointer 568891434b1SRayagond Kokatanur * @skb : the socket buffer 569891434b1SRayagond Kokatanur * Description : 570891434b1SRayagond Kokatanur * This function will read received packet's timestamp from the descriptor 571891434b1SRayagond Kokatanur * and pass it to stack. It also perform some sanity checks. 572891434b1SRayagond Kokatanur */ 573ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 574ba1ffd74SGiuseppe CAVALLARO struct dma_desc *np, struct sk_buff *skb) 575891434b1SRayagond Kokatanur { 576891434b1SRayagond Kokatanur struct skb_shared_hwtstamps *shhwtstamp = NULL; 57798870943SJose Abreu struct dma_desc *desc = p; 578df103170SNathan Chancellor u64 ns = 0; 579891434b1SRayagond Kokatanur 580891434b1SRayagond Kokatanur if (!priv->hwts_rx_en) 581891434b1SRayagond Kokatanur return; 582ba1ffd74SGiuseppe CAVALLARO /* For GMAC4, the valid timestamp is from CTX next desc. */ 5837d9e6c5aSJose Abreu if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 58498870943SJose Abreu desc = np; 585891434b1SRayagond Kokatanur 58698870943SJose Abreu /* Check if timestamp is available */ 58742de047dSJose Abreu if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 58842de047dSJose Abreu stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 5893600be5fSVoon Weifeng 590*3751c3d3SThomas Gleixner ns -= stmmac_cdc_adjust(priv); 5913600be5fSVoon Weifeng 59233d4c482SMario Molitor netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 593891434b1SRayagond Kokatanur shhwtstamp = skb_hwtstamps(skb); 594891434b1SRayagond Kokatanur memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 595891434b1SRayagond Kokatanur shhwtstamp->hwtstamp = ns_to_ktime(ns); 596ba1ffd74SGiuseppe CAVALLARO } else { 59733d4c482SMario Molitor netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 598ba1ffd74SGiuseppe CAVALLARO } 599891434b1SRayagond Kokatanur } 600891434b1SRayagond Kokatanur 601891434b1SRayagond Kokatanur /** 602d6228b7cSArtem Panfilov * stmmac_hwtstamp_set - control hardware timestamping. 603891434b1SRayagond Kokatanur * @dev: device pointer. 6048d45e42bSLABBE Corentin * @ifr: An IOCTL specific structure, that can contain a pointer to 605891434b1SRayagond Kokatanur * a proprietary structure used to pass information to the driver. 606891434b1SRayagond Kokatanur * Description: 607891434b1SRayagond Kokatanur * This function configures the MAC to enable/disable both outgoing(TX) 608891434b1SRayagond Kokatanur * and incoming(RX) packets time stamping based on user input. 609891434b1SRayagond Kokatanur * Return Value: 610891434b1SRayagond Kokatanur * 0 on success and an appropriate -ve integer on failure. 611891434b1SRayagond Kokatanur */ 612d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 613891434b1SRayagond Kokatanur { 614891434b1SRayagond Kokatanur struct stmmac_priv *priv = netdev_priv(dev); 615891434b1SRayagond Kokatanur struct hwtstamp_config config; 6160a624155SArnd Bergmann struct timespec64 now; 617891434b1SRayagond Kokatanur u64 temp = 0; 618891434b1SRayagond Kokatanur u32 ptp_v2 = 0; 619891434b1SRayagond Kokatanur u32 tstamp_all = 0; 620891434b1SRayagond Kokatanur u32 ptp_over_ipv4_udp = 0; 621891434b1SRayagond Kokatanur u32 ptp_over_ipv6_udp = 0; 622891434b1SRayagond Kokatanur u32 ptp_over_ethernet = 0; 623891434b1SRayagond Kokatanur u32 snap_type_sel = 0; 624891434b1SRayagond Kokatanur u32 ts_master_en = 0; 625891434b1SRayagond Kokatanur u32 ts_event_en = 0; 626df103170SNathan Chancellor u32 sec_inc = 0; 627891434b1SRayagond Kokatanur u32 value = 0; 6287d9e6c5aSJose Abreu bool xmac; 6297d9e6c5aSJose Abreu 6307d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 631891434b1SRayagond Kokatanur 632891434b1SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 633891434b1SRayagond Kokatanur netdev_alert(priv->dev, "No support for HW time stamping\n"); 634891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 635891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 636891434b1SRayagond Kokatanur 637891434b1SRayagond Kokatanur return -EOPNOTSUPP; 638891434b1SRayagond Kokatanur } 639891434b1SRayagond Kokatanur 640891434b1SRayagond Kokatanur if (copy_from_user(&config, ifr->ifr_data, 641d6228b7cSArtem Panfilov sizeof(config))) 642891434b1SRayagond Kokatanur return -EFAULT; 643891434b1SRayagond Kokatanur 64438ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 645891434b1SRayagond Kokatanur __func__, config.flags, config.tx_type, config.rx_filter); 646891434b1SRayagond Kokatanur 647891434b1SRayagond Kokatanur /* reserved for future extensions */ 648891434b1SRayagond Kokatanur if (config.flags) 649891434b1SRayagond Kokatanur return -EINVAL; 650891434b1SRayagond Kokatanur 6515f3da328SBen Hutchings if (config.tx_type != HWTSTAMP_TX_OFF && 6525f3da328SBen Hutchings config.tx_type != HWTSTAMP_TX_ON) 653891434b1SRayagond Kokatanur return -ERANGE; 654891434b1SRayagond Kokatanur 655891434b1SRayagond Kokatanur if (priv->adv_ts) { 656891434b1SRayagond Kokatanur switch (config.rx_filter) { 657891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 658ceb69499SGiuseppe CAVALLARO /* time stamp no incoming packet at all */ 659891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 660891434b1SRayagond Kokatanur break; 661891434b1SRayagond Kokatanur 662891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 663ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, any kind of event packet */ 664891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 6657d8e249fSIlias Apalodimas /* 'xmac' hardware can support Sync, Pdelay_Req and 6667d8e249fSIlias Apalodimas * Pdelay_resp by setting bit14 and bits17/16 to 01 6677d8e249fSIlias Apalodimas * This leaves Delay_Req timestamps out. 6687d8e249fSIlias Apalodimas * Enable all events *and* general purpose message 6697d8e249fSIlias Apalodimas * timestamping 6707d8e249fSIlias Apalodimas */ 671891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 672891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 673891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 674891434b1SRayagond Kokatanur break; 675891434b1SRayagond Kokatanur 676891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 677ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Sync packet */ 678891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 679891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 680891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 681891434b1SRayagond Kokatanur 682891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 683891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 684891434b1SRayagond Kokatanur break; 685891434b1SRayagond Kokatanur 686891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 687ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Delay_req packet */ 688891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 689891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 690891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 691891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 692891434b1SRayagond Kokatanur 693891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 694891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 695891434b1SRayagond Kokatanur break; 696891434b1SRayagond Kokatanur 697891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 698ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, any kind of event packet */ 699891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 700891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 701891434b1SRayagond Kokatanur /* take time stamp for all event messages */ 702891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 703891434b1SRayagond Kokatanur 704891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 705891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 706891434b1SRayagond Kokatanur break; 707891434b1SRayagond Kokatanur 708891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 709ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Sync packet */ 710891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 711891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 712891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 713891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 714891434b1SRayagond Kokatanur 715891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 716891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 717891434b1SRayagond Kokatanur break; 718891434b1SRayagond Kokatanur 719891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 720ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Delay_req packet */ 721891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 722891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 723891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 724891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 725891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 726891434b1SRayagond Kokatanur 727891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 728891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 729891434b1SRayagond Kokatanur break; 730891434b1SRayagond Kokatanur 731891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_EVENT: 732ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1 any layer, any kind of event packet */ 733891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 734891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 735891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 7363cb95802SKurt Kanzenbach if (priv->synopsys_id < DWMAC_CORE_4_10) 73714f34733SJose Abreu ts_event_en = PTP_TCR_TSEVNTENA; 738891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 739891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 740891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 741891434b1SRayagond Kokatanur break; 742891434b1SRayagond Kokatanur 743891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_SYNC: 744ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Sync packet */ 745891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 746891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 747891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 748891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 749891434b1SRayagond Kokatanur 750891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 751891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 752891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 753891434b1SRayagond Kokatanur break; 754891434b1SRayagond Kokatanur 755891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 756ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Delay_req packet */ 757891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 758891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 759891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 760891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 761891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 762891434b1SRayagond Kokatanur 763891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 764891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 765891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 766891434b1SRayagond Kokatanur break; 767891434b1SRayagond Kokatanur 768e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 769891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_ALL: 770ceb69499SGiuseppe CAVALLARO /* time stamp any incoming packet */ 771891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_ALL; 772891434b1SRayagond Kokatanur tstamp_all = PTP_TCR_TSENALL; 773891434b1SRayagond Kokatanur break; 774891434b1SRayagond Kokatanur 775891434b1SRayagond Kokatanur default: 776891434b1SRayagond Kokatanur return -ERANGE; 777891434b1SRayagond Kokatanur } 778891434b1SRayagond Kokatanur } else { 779891434b1SRayagond Kokatanur switch (config.rx_filter) { 780891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 781891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 782891434b1SRayagond Kokatanur break; 783891434b1SRayagond Kokatanur default: 784891434b1SRayagond Kokatanur /* PTP v1, UDP, any kind of event packet */ 785891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 786891434b1SRayagond Kokatanur break; 787891434b1SRayagond Kokatanur } 788891434b1SRayagond Kokatanur } 789891434b1SRayagond Kokatanur priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 7905f3da328SBen Hutchings priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 791891434b1SRayagond Kokatanur 792891434b1SRayagond Kokatanur if (!priv->hwts_tx_en && !priv->hwts_rx_en) 793cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 794891434b1SRayagond Kokatanur else { 795891434b1SRayagond Kokatanur value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 796891434b1SRayagond Kokatanur tstamp_all | ptp_v2 | ptp_over_ethernet | 797891434b1SRayagond Kokatanur ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 798891434b1SRayagond Kokatanur ts_master_en | snap_type_sel); 799cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 800891434b1SRayagond Kokatanur 801891434b1SRayagond Kokatanur /* program Sub Second Increment reg */ 802cc4c9001SJose Abreu stmmac_config_sub_second_increment(priv, 803f573c0b9Sjpinto priv->ptpaddr, priv->plat->clk_ptp_rate, 8047d9e6c5aSJose Abreu xmac, &sec_inc); 80519d857c9SPhil Reid temp = div_u64(1000000000ULL, sec_inc); 806891434b1SRayagond Kokatanur 8079a8a02c9SJose Abreu /* Store sub second increment and flags for later use */ 8089a8a02c9SJose Abreu priv->sub_second_inc = sec_inc; 8099a8a02c9SJose Abreu priv->systime_flags = value; 8109a8a02c9SJose Abreu 811891434b1SRayagond Kokatanur /* calculate default added value: 812891434b1SRayagond Kokatanur * formula is : 813891434b1SRayagond Kokatanur * addend = (2^32)/freq_div_ratio; 81419d857c9SPhil Reid * where, freq_div_ratio = 1e9ns/sec_inc 815891434b1SRayagond Kokatanur */ 81619d857c9SPhil Reid temp = (u64)(temp << 32); 817f573c0b9Sjpinto priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 818cc4c9001SJose Abreu stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 819891434b1SRayagond Kokatanur 820891434b1SRayagond Kokatanur /* initialize system time */ 8210a624155SArnd Bergmann ktime_get_real_ts64(&now); 8220a624155SArnd Bergmann 8230a624155SArnd Bergmann /* lower 32 bits of tv_sec are safe until y2106 */ 824cc4c9001SJose Abreu stmmac_init_systime(priv, priv->ptpaddr, 825cc4c9001SJose Abreu (u32)now.tv_sec, now.tv_nsec); 826891434b1SRayagond Kokatanur } 827891434b1SRayagond Kokatanur 828d6228b7cSArtem Panfilov memcpy(&priv->tstamp_config, &config, sizeof(config)); 829d6228b7cSArtem Panfilov 830891434b1SRayagond Kokatanur return copy_to_user(ifr->ifr_data, &config, 831d6228b7cSArtem Panfilov sizeof(config)) ? -EFAULT : 0; 832d6228b7cSArtem Panfilov } 833d6228b7cSArtem Panfilov 834d6228b7cSArtem Panfilov /** 835d6228b7cSArtem Panfilov * stmmac_hwtstamp_get - read hardware timestamping. 836d6228b7cSArtem Panfilov * @dev: device pointer. 837d6228b7cSArtem Panfilov * @ifr: An IOCTL specific structure, that can contain a pointer to 838d6228b7cSArtem Panfilov * a proprietary structure used to pass information to the driver. 839d6228b7cSArtem Panfilov * Description: 840d6228b7cSArtem Panfilov * This function obtain the current hardware timestamping settings 841d0ea5cbdSJesse Brandeburg * as requested. 842d6228b7cSArtem Panfilov */ 843d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 844d6228b7cSArtem Panfilov { 845d6228b7cSArtem Panfilov struct stmmac_priv *priv = netdev_priv(dev); 846d6228b7cSArtem Panfilov struct hwtstamp_config *config = &priv->tstamp_config; 847d6228b7cSArtem Panfilov 848d6228b7cSArtem Panfilov if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 849d6228b7cSArtem Panfilov return -EOPNOTSUPP; 850d6228b7cSArtem Panfilov 851d6228b7cSArtem Panfilov return copy_to_user(ifr->ifr_data, config, 852d6228b7cSArtem Panfilov sizeof(*config)) ? -EFAULT : 0; 853891434b1SRayagond Kokatanur } 854891434b1SRayagond Kokatanur 85532ceabcaSGiuseppe CAVALLARO /** 856732fdf0eSGiuseppe CAVALLARO * stmmac_init_ptp - init PTP 85732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 858732fdf0eSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 85932ceabcaSGiuseppe CAVALLARO * This is done by looking at the HW cap. register. 860732fdf0eSGiuseppe CAVALLARO * This function also registers the ptp driver. 86132ceabcaSGiuseppe CAVALLARO */ 86292ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv) 863891434b1SRayagond Kokatanur { 8647d9e6c5aSJose Abreu bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 8657d9e6c5aSJose Abreu 86692ba6888SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 86792ba6888SRayagond Kokatanur return -EOPNOTSUPP; 86892ba6888SRayagond Kokatanur 869891434b1SRayagond Kokatanur priv->adv_ts = 0; 8707d9e6c5aSJose Abreu /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 8717d9e6c5aSJose Abreu if (xmac && priv->dma_cap.atime_stamp) 872be9b3174SGiuseppe CAVALLARO priv->adv_ts = 1; 873be9b3174SGiuseppe CAVALLARO /* Dwmac 3.x core with extend_desc can support adv_ts */ 874be9b3174SGiuseppe CAVALLARO else if (priv->extend_desc && priv->dma_cap.atime_stamp) 875891434b1SRayagond Kokatanur priv->adv_ts = 1; 8767cd01399SVince Bridgers 877be9b3174SGiuseppe CAVALLARO if (priv->dma_cap.time_stamp) 878be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 8797cd01399SVince Bridgers 880be9b3174SGiuseppe CAVALLARO if (priv->adv_ts) 881be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, 882be9b3174SGiuseppe CAVALLARO "IEEE 1588-2008 Advanced Timestamp supported\n"); 883891434b1SRayagond Kokatanur 884891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 885891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 88692ba6888SRayagond Kokatanur 887c30a70d3SGiuseppe CAVALLARO stmmac_ptp_register(priv); 888c30a70d3SGiuseppe CAVALLARO 889c30a70d3SGiuseppe CAVALLARO return 0; 89092ba6888SRayagond Kokatanur } 89192ba6888SRayagond Kokatanur 89292ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv) 89392ba6888SRayagond Kokatanur { 894f573c0b9Sjpinto clk_disable_unprepare(priv->plat->clk_ptp_ref); 89592ba6888SRayagond Kokatanur stmmac_ptp_unregister(priv); 896891434b1SRayagond Kokatanur } 897891434b1SRayagond Kokatanur 8987ac6653aSJeff Kirsher /** 89929feff39SJoao Pinto * stmmac_mac_flow_ctrl - Configure flow control in all queues 90029feff39SJoao Pinto * @priv: driver private structure 901d0ea5cbdSJesse Brandeburg * @duplex: duplex passed to the next function 90229feff39SJoao Pinto * Description: It is used for configuring the flow control in all queues 90329feff39SJoao Pinto */ 90429feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 90529feff39SJoao Pinto { 90629feff39SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 90729feff39SJoao Pinto 908c10d4c82SJose Abreu stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 90929feff39SJoao Pinto priv->pause, tx_cnt); 91029feff39SJoao Pinto } 91129feff39SJoao Pinto 912eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config, 913eeef2f6bSJose Abreu unsigned long *supported, 914eeef2f6bSJose Abreu struct phylink_link_state *state) 915eeef2f6bSJose Abreu { 916eeef2f6bSJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 9175b0d7d7dSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 918eeef2f6bSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 919eeef2f6bSJose Abreu int tx_cnt = priv->plat->tx_queues_to_use; 920eeef2f6bSJose Abreu int max_speed = priv->plat->max_speed; 921eeef2f6bSJose Abreu 9225b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Half); 9235b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Full); 9245b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Half); 9255b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Full); 926df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Half); 927df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Full); 928df7699c7SJose Abreu phylink_set(mac_supported, 1000baseKX_Full); 9295b0d7d7dSJose Abreu 9305b0d7d7dSJose Abreu phylink_set(mac_supported, Autoneg); 9315b0d7d7dSJose Abreu phylink_set(mac_supported, Pause); 9325b0d7d7dSJose Abreu phylink_set(mac_supported, Asym_Pause); 9335b0d7d7dSJose Abreu phylink_set_port_modes(mac_supported); 9345b0d7d7dSJose Abreu 935eeef2f6bSJose Abreu /* Cut down 1G if asked to */ 936eeef2f6bSJose Abreu if ((max_speed > 0) && (max_speed < 1000)) { 937eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Full); 938eeef2f6bSJose Abreu phylink_set(mask, 1000baseX_Full); 93946682cb8SVoon Weifeng } else if (priv->plat->has_gmac4) { 940345502afSColin Ian King if (!max_speed || max_speed >= 2500) { 94146682cb8SVoon Weifeng phylink_set(mac_supported, 2500baseT_Full); 94246682cb8SVoon Weifeng phylink_set(mac_supported, 2500baseX_Full); 943345502afSColin Ian King } 9445b0d7d7dSJose Abreu } else if (priv->plat->has_xgmac) { 945d9da2c87SJose Abreu if (!max_speed || (max_speed >= 2500)) { 9465b0d7d7dSJose Abreu phylink_set(mac_supported, 2500baseT_Full); 947d9da2c87SJose Abreu phylink_set(mac_supported, 2500baseX_Full); 948d9da2c87SJose Abreu } 949d9da2c87SJose Abreu if (!max_speed || (max_speed >= 5000)) { 9505b0d7d7dSJose Abreu phylink_set(mac_supported, 5000baseT_Full); 951d9da2c87SJose Abreu } 952d9da2c87SJose Abreu if (!max_speed || (max_speed >= 10000)) { 9535b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseSR_Full); 9545b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLR_Full); 9555b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseER_Full); 9565b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLRM_Full); 9575b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseT_Full); 9585b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKX4_Full); 9595b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKR_Full); 960eeef2f6bSJose Abreu } 9618a880936SJose Abreu if (!max_speed || (max_speed >= 25000)) { 9628a880936SJose Abreu phylink_set(mac_supported, 25000baseCR_Full); 9638a880936SJose Abreu phylink_set(mac_supported, 25000baseKR_Full); 9648a880936SJose Abreu phylink_set(mac_supported, 25000baseSR_Full); 9658a880936SJose Abreu } 9668a880936SJose Abreu if (!max_speed || (max_speed >= 40000)) { 9678a880936SJose Abreu phylink_set(mac_supported, 40000baseKR4_Full); 9688a880936SJose Abreu phylink_set(mac_supported, 40000baseCR4_Full); 9698a880936SJose Abreu phylink_set(mac_supported, 40000baseSR4_Full); 9708a880936SJose Abreu phylink_set(mac_supported, 40000baseLR4_Full); 9718a880936SJose Abreu } 9728a880936SJose Abreu if (!max_speed || (max_speed >= 50000)) { 9738a880936SJose Abreu phylink_set(mac_supported, 50000baseCR2_Full); 9748a880936SJose Abreu phylink_set(mac_supported, 50000baseKR2_Full); 9758a880936SJose Abreu phylink_set(mac_supported, 50000baseSR2_Full); 9768a880936SJose Abreu phylink_set(mac_supported, 50000baseKR_Full); 9778a880936SJose Abreu phylink_set(mac_supported, 50000baseSR_Full); 9788a880936SJose Abreu phylink_set(mac_supported, 50000baseCR_Full); 9798a880936SJose Abreu phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 9808a880936SJose Abreu phylink_set(mac_supported, 50000baseDR_Full); 9818a880936SJose Abreu } 9828a880936SJose Abreu if (!max_speed || (max_speed >= 100000)) { 9838a880936SJose Abreu phylink_set(mac_supported, 100000baseKR4_Full); 9848a880936SJose Abreu phylink_set(mac_supported, 100000baseSR4_Full); 9858a880936SJose Abreu phylink_set(mac_supported, 100000baseCR4_Full); 9868a880936SJose Abreu phylink_set(mac_supported, 100000baseLR4_ER4_Full); 9878a880936SJose Abreu phylink_set(mac_supported, 100000baseKR2_Full); 9888a880936SJose Abreu phylink_set(mac_supported, 100000baseSR2_Full); 9898a880936SJose Abreu phylink_set(mac_supported, 100000baseCR2_Full); 9908a880936SJose Abreu phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 9918a880936SJose Abreu phylink_set(mac_supported, 100000baseDR2_Full); 9928a880936SJose Abreu } 993d9da2c87SJose Abreu } 994eeef2f6bSJose Abreu 995eeef2f6bSJose Abreu /* Half-Duplex can only work with single queue */ 996eeef2f6bSJose Abreu if (tx_cnt > 1) { 997eeef2f6bSJose Abreu phylink_set(mask, 10baseT_Half); 998eeef2f6bSJose Abreu phylink_set(mask, 100baseT_Half); 999eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Half); 1000eeef2f6bSJose Abreu } 1001eeef2f6bSJose Abreu 1002422829f9SJose Abreu linkmode_and(supported, supported, mac_supported); 1003422829f9SJose Abreu linkmode_andnot(supported, supported, mask); 1004422829f9SJose Abreu 1005422829f9SJose Abreu linkmode_and(state->advertising, state->advertising, mac_supported); 1006422829f9SJose Abreu linkmode_andnot(state->advertising, state->advertising, mask); 1007f213bbe8SJose Abreu 1008f213bbe8SJose Abreu /* If PCS is supported, check which modes it supports. */ 1009a1a753edSVladimir Oltean if (priv->hw->xpcs) 101011059740SVladimir Oltean xpcs_validate(priv->hw->xpcs, supported, state); 1011eeef2f6bSJose Abreu } 1012eeef2f6bSJose Abreu 101374371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 101474371272SJose Abreu const struct phylink_link_state *state) 10159ad372fcSJose Abreu { 101611059740SVladimir Oltean /* Nothing to do, xpcs_config() handles everything */ 1017eeef2f6bSJose Abreu } 1018eeef2f6bSJose Abreu 10195a558611SOng Boon Leong static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 10205a558611SOng Boon Leong { 10215a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 10225a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 10235a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 10245a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 10255a558611SOng Boon Leong 10265a558611SOng Boon Leong if (is_up && *hs_enable) { 10275a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 10285a558611SOng Boon Leong } else { 10291f7096f0SWong Vee Khee *lo_state = FPE_STATE_OFF; 10301f7096f0SWong Vee Khee *lp_state = FPE_STATE_OFF; 10315a558611SOng Boon Leong } 10325a558611SOng Boon Leong } 10335a558611SOng Boon Leong 103474371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config, 103574371272SJose Abreu unsigned int mode, phy_interface_t interface) 10369ad372fcSJose Abreu { 103774371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 10389ad372fcSJose Abreu 10399ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 104074371272SJose Abreu priv->eee_active = false; 1041388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = false; 1042d4aeaed8SWong Vee Khee priv->eee_enabled = stmmac_eee_init(priv); 104374371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, false); 10445a558611SOng Boon Leong 104563c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 10465a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, false); 10479ad372fcSJose Abreu } 10489ad372fcSJose Abreu 104974371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config, 105091a208f2SRussell King struct phy_device *phy, 105174371272SJose Abreu unsigned int mode, phy_interface_t interface, 105291a208f2SRussell King int speed, int duplex, 105391a208f2SRussell King bool tx_pause, bool rx_pause) 10549ad372fcSJose Abreu { 105574371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 105646f69dedSJose Abreu u32 ctrl; 105746f69dedSJose Abreu 105846f69dedSJose Abreu ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 105946f69dedSJose Abreu ctrl &= ~priv->hw->link.speed_mask; 106046f69dedSJose Abreu 106146f69dedSJose Abreu if (interface == PHY_INTERFACE_MODE_USXGMII) { 106246f69dedSJose Abreu switch (speed) { 106346f69dedSJose Abreu case SPEED_10000: 106446f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 106546f69dedSJose Abreu break; 106646f69dedSJose Abreu case SPEED_5000: 106746f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed5000; 106846f69dedSJose Abreu break; 106946f69dedSJose Abreu case SPEED_2500: 107046f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed2500; 107146f69dedSJose Abreu break; 107246f69dedSJose Abreu default: 107346f69dedSJose Abreu return; 107446f69dedSJose Abreu } 10758a880936SJose Abreu } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 10768a880936SJose Abreu switch (speed) { 10778a880936SJose Abreu case SPEED_100000: 10788a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed100000; 10798a880936SJose Abreu break; 10808a880936SJose Abreu case SPEED_50000: 10818a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed50000; 10828a880936SJose Abreu break; 10838a880936SJose Abreu case SPEED_40000: 10848a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed40000; 10858a880936SJose Abreu break; 10868a880936SJose Abreu case SPEED_25000: 10878a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed25000; 10888a880936SJose Abreu break; 10898a880936SJose Abreu case SPEED_10000: 10908a880936SJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 10918a880936SJose Abreu break; 10928a880936SJose Abreu case SPEED_2500: 10938a880936SJose Abreu ctrl |= priv->hw->link.speed2500; 10948a880936SJose Abreu break; 10958a880936SJose Abreu case SPEED_1000: 10968a880936SJose Abreu ctrl |= priv->hw->link.speed1000; 10978a880936SJose Abreu break; 10988a880936SJose Abreu default: 10998a880936SJose Abreu return; 11008a880936SJose Abreu } 110146f69dedSJose Abreu } else { 110246f69dedSJose Abreu switch (speed) { 110346f69dedSJose Abreu case SPEED_2500: 110446f69dedSJose Abreu ctrl |= priv->hw->link.speed2500; 110546f69dedSJose Abreu break; 110646f69dedSJose Abreu case SPEED_1000: 110746f69dedSJose Abreu ctrl |= priv->hw->link.speed1000; 110846f69dedSJose Abreu break; 110946f69dedSJose Abreu case SPEED_100: 111046f69dedSJose Abreu ctrl |= priv->hw->link.speed100; 111146f69dedSJose Abreu break; 111246f69dedSJose Abreu case SPEED_10: 111346f69dedSJose Abreu ctrl |= priv->hw->link.speed10; 111446f69dedSJose Abreu break; 111546f69dedSJose Abreu default: 111646f69dedSJose Abreu return; 111746f69dedSJose Abreu } 111846f69dedSJose Abreu } 111946f69dedSJose Abreu 112046f69dedSJose Abreu priv->speed = speed; 112146f69dedSJose Abreu 112246f69dedSJose Abreu if (priv->plat->fix_mac_speed) 112346f69dedSJose Abreu priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 112446f69dedSJose Abreu 112546f69dedSJose Abreu if (!duplex) 112646f69dedSJose Abreu ctrl &= ~priv->hw->link.duplex; 112746f69dedSJose Abreu else 112846f69dedSJose Abreu ctrl |= priv->hw->link.duplex; 112946f69dedSJose Abreu 113046f69dedSJose Abreu /* Flow Control operation */ 113146f69dedSJose Abreu if (tx_pause && rx_pause) 113246f69dedSJose Abreu stmmac_mac_flow_ctrl(priv, duplex); 113346f69dedSJose Abreu 113446f69dedSJose Abreu writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 11359ad372fcSJose Abreu 11369ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 11375b111770SJose Abreu if (phy && priv->dma_cap.eee) { 113874371272SJose Abreu priv->eee_active = phy_init_eee(phy, 1) >= 0; 113974371272SJose Abreu priv->eee_enabled = stmmac_eee_init(priv); 1140388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = priv->eee_enabled; 114174371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, true); 114274371272SJose Abreu } 11435a558611SOng Boon Leong 114463c173ffSMohammad Athari Bin Ismail if (priv->dma_cap.fpesel) 11455a558611SOng Boon Leong stmmac_fpe_link_state_handle(priv, true); 11469ad372fcSJose Abreu } 11479ad372fcSJose Abreu 114874371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1149eeef2f6bSJose Abreu .validate = stmmac_validate, 115074371272SJose Abreu .mac_config = stmmac_mac_config, 115174371272SJose Abreu .mac_link_down = stmmac_mac_link_down, 115274371272SJose Abreu .mac_link_up = stmmac_mac_link_up, 1153eeef2f6bSJose Abreu }; 1154eeef2f6bSJose Abreu 115529feff39SJoao Pinto /** 1156732fdf0eSGiuseppe CAVALLARO * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 115732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 115832ceabcaSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PCS. 115932ceabcaSGiuseppe CAVALLARO * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 116032ceabcaSGiuseppe CAVALLARO * configured for the TBI, RTBI, or SGMII PHY interface. 116132ceabcaSGiuseppe CAVALLARO */ 1162e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1163e58bb43fSGiuseppe CAVALLARO { 1164e58bb43fSGiuseppe CAVALLARO int interface = priv->plat->interface; 1165e58bb43fSGiuseppe CAVALLARO 1166e58bb43fSGiuseppe CAVALLARO if (priv->dma_cap.pcs) { 11670d909dcdSByungho An if ((interface == PHY_INTERFACE_MODE_RGMII) || 11680d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_ID) || 11690d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 11700d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 117138ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 11723fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_RGMII; 11730d909dcdSByungho An } else if (interface == PHY_INTERFACE_MODE_SGMII) { 117438ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 11753fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_SGMII; 1176e58bb43fSGiuseppe CAVALLARO } 1177e58bb43fSGiuseppe CAVALLARO } 1178e58bb43fSGiuseppe CAVALLARO } 1179e58bb43fSGiuseppe CAVALLARO 11807ac6653aSJeff Kirsher /** 11817ac6653aSJeff Kirsher * stmmac_init_phy - PHY initialization 11827ac6653aSJeff Kirsher * @dev: net device structure 11837ac6653aSJeff Kirsher * Description: it initializes the driver's PHY state, and attaches the PHY 11847ac6653aSJeff Kirsher * to the mac driver. 11857ac6653aSJeff Kirsher * Return value: 11867ac6653aSJeff Kirsher * 0 on success 11877ac6653aSJeff Kirsher */ 11887ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev) 11897ac6653aSJeff Kirsher { 11907ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 119174371272SJose Abreu struct device_node *node; 119274371272SJose Abreu int ret; 11937ac6653aSJeff Kirsher 11944838a540SJose Abreu node = priv->plat->phylink_node; 119574371272SJose Abreu 119642e87024SJose Abreu if (node) 119774371272SJose Abreu ret = phylink_of_phy_connect(priv->phylink, node, 0); 119842e87024SJose Abreu 119942e87024SJose Abreu /* Some DT bindings do not set-up the PHY handle. Let's try to 120042e87024SJose Abreu * manually parse it 120142e87024SJose Abreu */ 120242e87024SJose Abreu if (!node || ret) { 120374371272SJose Abreu int addr = priv->plat->phy_addr; 120474371272SJose Abreu struct phy_device *phydev; 1205f142af2eSSrinivas Kandagatla 120674371272SJose Abreu phydev = mdiobus_get_phy(priv->mii, addr); 120774371272SJose Abreu if (!phydev) { 120874371272SJose Abreu netdev_err(priv->dev, "no phy at addr %d\n", addr); 12097ac6653aSJeff Kirsher return -ENODEV; 12107ac6653aSJeff Kirsher } 12118e99fc5fSGiuseppe Cavallaro 121274371272SJose Abreu ret = phylink_connect_phy(priv->phylink, phydev); 121374371272SJose Abreu } 1214c51e424dSFlorian Fainelli 1215576f9eacSJoakim Zhang if (!priv->plat->pmt) { 1216576f9eacSJoakim Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1217576f9eacSJoakim Zhang 12181d8e5b0fSJisheng Zhang phylink_ethtool_get_wol(priv->phylink, &wol); 12191d8e5b0fSJisheng Zhang device_set_wakeup_capable(priv->device, !!wol.supported); 1220576f9eacSJoakim Zhang } 12211d8e5b0fSJisheng Zhang 122274371272SJose Abreu return ret; 122374371272SJose Abreu } 122474371272SJose Abreu 122574371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv) 122674371272SJose Abreu { 122711059740SVladimir Oltean struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 1228c63d1e5cSArnd Bergmann struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 12290060c878SAlexandru Ardelean int mode = priv->plat->phy_interface; 123074371272SJose Abreu struct phylink *phylink; 123174371272SJose Abreu 123274371272SJose Abreu priv->phylink_config.dev = &priv->dev->dev; 123374371272SJose Abreu priv->phylink_config.type = PHYLINK_NETDEV; 1234f213bbe8SJose Abreu priv->phylink_config.pcs_poll = true; 1235593f555fSSriranjani P if (priv->plat->mdio_bus_data) 1236e5e5b771SOng Boon Leong priv->phylink_config.ovr_an_inband = 123712628565SDavid S. Miller mdio_bus_data->xpcs_an_inband; 123874371272SJose Abreu 12398dc6051cSJose Abreu if (!fwnode) 12408dc6051cSJose Abreu fwnode = dev_fwnode(priv->device); 12418dc6051cSJose Abreu 1242c63d1e5cSArnd Bergmann phylink = phylink_create(&priv->phylink_config, fwnode, 124374371272SJose Abreu mode, &stmmac_phylink_mac_ops); 124474371272SJose Abreu if (IS_ERR(phylink)) 124574371272SJose Abreu return PTR_ERR(phylink); 124674371272SJose Abreu 1247b55b1d50SVladimir Oltean if (priv->hw->xpcs) 1248b55b1d50SVladimir Oltean phylink_set_pcs(phylink, &priv->hw->xpcs->pcs); 124911059740SVladimir Oltean 125074371272SJose Abreu priv->phylink = phylink; 12517ac6653aSJeff Kirsher return 0; 12527ac6653aSJeff Kirsher } 12537ac6653aSJeff Kirsher 125471fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1255c24602efSGiuseppe CAVALLARO { 125654139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 1257bfaf91caSJoakim Zhang unsigned int desc_size; 125871fedb01SJoao Pinto void *head_rx; 125954139cf3SJoao Pinto u32 queue; 126054139cf3SJoao Pinto 126154139cf3SJoao Pinto /* Display RX rings */ 126254139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 126354139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 126454139cf3SJoao Pinto 126554139cf3SJoao Pinto pr_info("\tRX Queue %u rings\n", queue); 1266d0225e7dSAlexandre TORGUE 1267bfaf91caSJoakim Zhang if (priv->extend_desc) { 126854139cf3SJoao Pinto head_rx = (void *)rx_q->dma_erx; 1269bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1270bfaf91caSJoakim Zhang } else { 127154139cf3SJoao Pinto head_rx = (void *)rx_q->dma_rx; 1272bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1273bfaf91caSJoakim Zhang } 127471fedb01SJoao Pinto 127571fedb01SJoao Pinto /* Display RX ring */ 1276bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1277bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 12785bacd778SLABBE Corentin } 127954139cf3SJoao Pinto } 1280d0225e7dSAlexandre TORGUE 128171fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv) 128271fedb01SJoao Pinto { 1283ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 1284bfaf91caSJoakim Zhang unsigned int desc_size; 128571fedb01SJoao Pinto void *head_tx; 1286ce736788SJoao Pinto u32 queue; 1287ce736788SJoao Pinto 1288ce736788SJoao Pinto /* Display TX rings */ 1289ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 1290ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1291ce736788SJoao Pinto 1292ce736788SJoao Pinto pr_info("\tTX Queue %d rings\n", queue); 129371fedb01SJoao Pinto 1294bfaf91caSJoakim Zhang if (priv->extend_desc) { 1295ce736788SJoao Pinto head_tx = (void *)tx_q->dma_etx; 1296bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1297bfaf91caSJoakim Zhang } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1298579a25a8SJose Abreu head_tx = (void *)tx_q->dma_entx; 1299bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_edesc); 1300bfaf91caSJoakim Zhang } else { 1301ce736788SJoao Pinto head_tx = (void *)tx_q->dma_tx; 1302bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1303bfaf91caSJoakim Zhang } 130471fedb01SJoao Pinto 1305bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1306bfaf91caSJoakim Zhang tx_q->dma_tx_phy, desc_size); 1307c24602efSGiuseppe CAVALLARO } 1308ce736788SJoao Pinto } 1309c24602efSGiuseppe CAVALLARO 131071fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv) 131171fedb01SJoao Pinto { 131271fedb01SJoao Pinto /* Display RX ring */ 131371fedb01SJoao Pinto stmmac_display_rx_rings(priv); 131471fedb01SJoao Pinto 131571fedb01SJoao Pinto /* Display TX ring */ 131671fedb01SJoao Pinto stmmac_display_tx_rings(priv); 131771fedb01SJoao Pinto } 131871fedb01SJoao Pinto 1319286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize) 1320286a8372SGiuseppe CAVALLARO { 1321286a8372SGiuseppe CAVALLARO int ret = bufsize; 1322286a8372SGiuseppe CAVALLARO 1323b2f3a481SJose Abreu if (mtu >= BUF_SIZE_8KiB) 1324b2f3a481SJose Abreu ret = BUF_SIZE_16KiB; 1325b2f3a481SJose Abreu else if (mtu >= BUF_SIZE_4KiB) 1326286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_8KiB; 1327286a8372SGiuseppe CAVALLARO else if (mtu >= BUF_SIZE_2KiB) 1328286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_4KiB; 1329d916701cSGiuseppe CAVALLARO else if (mtu > DEFAULT_BUFSIZE) 1330286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_2KiB; 1331286a8372SGiuseppe CAVALLARO else 1332d916701cSGiuseppe CAVALLARO ret = DEFAULT_BUFSIZE; 1333286a8372SGiuseppe CAVALLARO 1334286a8372SGiuseppe CAVALLARO return ret; 1335286a8372SGiuseppe CAVALLARO } 1336286a8372SGiuseppe CAVALLARO 133732ceabcaSGiuseppe CAVALLARO /** 133871fedb01SJoao Pinto * stmmac_clear_rx_descriptors - clear RX descriptors 133932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 134054139cf3SJoao Pinto * @queue: RX queue index 134171fedb01SJoao Pinto * Description: this function is called to clear the RX descriptors 134232ceabcaSGiuseppe CAVALLARO * in case of both basic and extended descriptors are used. 134332ceabcaSGiuseppe CAVALLARO */ 134454139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1345c24602efSGiuseppe CAVALLARO { 134654139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 13475bacd778SLABBE Corentin int i; 1348c24602efSGiuseppe CAVALLARO 134971fedb01SJoao Pinto /* Clear the RX descriptors */ 1350aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_rx_size; i++) 13515bacd778SLABBE Corentin if (priv->extend_desc) 135242de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 13535bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1354aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1355583e6361SAaro Koskinen priv->dma_buf_sz); 13565bacd778SLABBE Corentin else 135742de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 13585bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1359aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1360583e6361SAaro Koskinen priv->dma_buf_sz); 136171fedb01SJoao Pinto } 136271fedb01SJoao Pinto 136371fedb01SJoao Pinto /** 136471fedb01SJoao Pinto * stmmac_clear_tx_descriptors - clear tx descriptors 136571fedb01SJoao Pinto * @priv: driver private structure 1366ce736788SJoao Pinto * @queue: TX queue index. 136771fedb01SJoao Pinto * Description: this function is called to clear the TX descriptors 136871fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 136971fedb01SJoao Pinto */ 1370ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 137171fedb01SJoao Pinto { 1372ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 137371fedb01SJoao Pinto int i; 137471fedb01SJoao Pinto 137571fedb01SJoao Pinto /* Clear the TX descriptors */ 1376aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1377aa042f60SSong, Yoong Siang int last = (i == (priv->dma_tx_size - 1)); 1378579a25a8SJose Abreu struct dma_desc *p; 1379579a25a8SJose Abreu 13805bacd778SLABBE Corentin if (priv->extend_desc) 1381579a25a8SJose Abreu p = &tx_q->dma_etx[i].basic; 1382579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1383579a25a8SJose Abreu p = &tx_q->dma_entx[i].basic; 13845bacd778SLABBE Corentin else 1385579a25a8SJose Abreu p = &tx_q->dma_tx[i]; 1386579a25a8SJose Abreu 1387579a25a8SJose Abreu stmmac_init_tx_desc(priv, p, priv->mode, last); 1388579a25a8SJose Abreu } 1389c24602efSGiuseppe CAVALLARO } 1390c24602efSGiuseppe CAVALLARO 1391732fdf0eSGiuseppe CAVALLARO /** 139271fedb01SJoao Pinto * stmmac_clear_descriptors - clear descriptors 139371fedb01SJoao Pinto * @priv: driver private structure 139471fedb01SJoao Pinto * Description: this function is called to clear the TX and RX descriptors 139571fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 139671fedb01SJoao Pinto */ 139771fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv) 139871fedb01SJoao Pinto { 139954139cf3SJoao Pinto u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1400ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 140154139cf3SJoao Pinto u32 queue; 140254139cf3SJoao Pinto 140371fedb01SJoao Pinto /* Clear the RX descriptors */ 140454139cf3SJoao Pinto for (queue = 0; queue < rx_queue_cnt; queue++) 140554139cf3SJoao Pinto stmmac_clear_rx_descriptors(priv, queue); 140671fedb01SJoao Pinto 140771fedb01SJoao Pinto /* Clear the TX descriptors */ 1408ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) 1409ce736788SJoao Pinto stmmac_clear_tx_descriptors(priv, queue); 141071fedb01SJoao Pinto } 141171fedb01SJoao Pinto 141271fedb01SJoao Pinto /** 1413732fdf0eSGiuseppe CAVALLARO * stmmac_init_rx_buffers - init the RX descriptor buffer. 1414732fdf0eSGiuseppe CAVALLARO * @priv: driver private structure 1415732fdf0eSGiuseppe CAVALLARO * @p: descriptor pointer 1416732fdf0eSGiuseppe CAVALLARO * @i: descriptor index 141754139cf3SJoao Pinto * @flags: gfp flag 141854139cf3SJoao Pinto * @queue: RX queue index 1419732fdf0eSGiuseppe CAVALLARO * Description: this function is called to allocate a receive buffer, perform 1420732fdf0eSGiuseppe CAVALLARO * the DMA mapping and init the descriptor. 1421732fdf0eSGiuseppe CAVALLARO */ 1422c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 142354139cf3SJoao Pinto int i, gfp_t flags, u32 queue) 1424c24602efSGiuseppe CAVALLARO { 142554139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 14262af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1427c24602efSGiuseppe CAVALLARO 1428da5ec7f2SOng Boon Leong if (!buf->page) { 14292af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 14302af6106aSJose Abreu if (!buf->page) 143156329137SBartlomiej Zolnierkiewicz return -ENOMEM; 14325fabb012SOng Boon Leong buf->page_offset = stmmac_rx_offset(priv); 1433da5ec7f2SOng Boon Leong } 1434c24602efSGiuseppe CAVALLARO 1435da5ec7f2SOng Boon Leong if (priv->sph && !buf->sec_page) { 143667afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 143767afd6d1SJose Abreu if (!buf->sec_page) 143867afd6d1SJose Abreu return -ENOMEM; 143967afd6d1SJose Abreu 144067afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1441396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 144267afd6d1SJose Abreu } else { 144367afd6d1SJose Abreu buf->sec_page = NULL; 1444396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 144567afd6d1SJose Abreu } 144667afd6d1SJose Abreu 14475fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 14485fabb012SOng Boon Leong 14492af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 14502c520b1cSJose Abreu if (priv->dma_buf_sz == BUF_SIZE_16KiB) 14512c520b1cSJose Abreu stmmac_init_desc3(priv, p); 1452c24602efSGiuseppe CAVALLARO 1453c24602efSGiuseppe CAVALLARO return 0; 1454c24602efSGiuseppe CAVALLARO } 1455c24602efSGiuseppe CAVALLARO 145671fedb01SJoao Pinto /** 145771fedb01SJoao Pinto * stmmac_free_rx_buffer - free RX dma buffers 145871fedb01SJoao Pinto * @priv: private structure 145954139cf3SJoao Pinto * @queue: RX queue index 146071fedb01SJoao Pinto * @i: buffer index. 146171fedb01SJoao Pinto */ 146254139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 146356329137SBartlomiej Zolnierkiewicz { 146454139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 14652af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 146654139cf3SJoao Pinto 14672af6106aSJose Abreu if (buf->page) 1468458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->page, false); 14692af6106aSJose Abreu buf->page = NULL; 147067afd6d1SJose Abreu 147167afd6d1SJose Abreu if (buf->sec_page) 1472458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 147367afd6d1SJose Abreu buf->sec_page = NULL; 147456329137SBartlomiej Zolnierkiewicz } 147556329137SBartlomiej Zolnierkiewicz 14767ac6653aSJeff Kirsher /** 147771fedb01SJoao Pinto * stmmac_free_tx_buffer - free RX dma buffers 147871fedb01SJoao Pinto * @priv: private structure 1479ce736788SJoao Pinto * @queue: RX queue index 148071fedb01SJoao Pinto * @i: buffer index. 148171fedb01SJoao Pinto */ 1482ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 148371fedb01SJoao Pinto { 1484ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1485ce736788SJoao Pinto 1486be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf && 1487be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1488ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].map_as_page) 148971fedb01SJoao Pinto dma_unmap_page(priv->device, 1490ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1491ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 149271fedb01SJoao Pinto DMA_TO_DEVICE); 149371fedb01SJoao Pinto else 149471fedb01SJoao Pinto dma_unmap_single(priv->device, 1495ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1496ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 149771fedb01SJoao Pinto DMA_TO_DEVICE); 149871fedb01SJoao Pinto } 149971fedb01SJoao Pinto 1500be8b38a7SOng Boon Leong if (tx_q->xdpf[i] && 15018b278a5bSOng Boon Leong (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 15028b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1503be8b38a7SOng Boon Leong xdp_return_frame(tx_q->xdpf[i]); 1504be8b38a7SOng Boon Leong tx_q->xdpf[i] = NULL; 1505be8b38a7SOng Boon Leong } 1506be8b38a7SOng Boon Leong 1507132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1508132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 1509132c32eeSOng Boon Leong 1510be8b38a7SOng Boon Leong if (tx_q->tx_skbuff[i] && 1511be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1512ce736788SJoao Pinto dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1513ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 1514be8b38a7SOng Boon Leong } 1515be8b38a7SOng Boon Leong 1516ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1517ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 151871fedb01SJoao Pinto } 151971fedb01SJoao Pinto 152071fedb01SJoao Pinto /** 15214298255fSOng Boon Leong * dma_free_rx_skbufs - free RX dma buffers 15224298255fSOng Boon Leong * @priv: private structure 15234298255fSOng Boon Leong * @queue: RX queue index 15244298255fSOng Boon Leong */ 15254298255fSOng Boon Leong static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 15264298255fSOng Boon Leong { 15274298255fSOng Boon Leong int i; 15284298255fSOng Boon Leong 15294298255fSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) 15304298255fSOng Boon Leong stmmac_free_rx_buffer(priv, queue, i); 15314298255fSOng Boon Leong } 15324298255fSOng Boon Leong 15334298255fSOng Boon Leong static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, 15344298255fSOng Boon Leong gfp_t flags) 15354298255fSOng Boon Leong { 15364298255fSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 15374298255fSOng Boon Leong int i; 15384298255fSOng Boon Leong 15394298255fSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 15404298255fSOng Boon Leong struct dma_desc *p; 15414298255fSOng Boon Leong int ret; 15424298255fSOng Boon Leong 15434298255fSOng Boon Leong if (priv->extend_desc) 15444298255fSOng Boon Leong p = &((rx_q->dma_erx + i)->basic); 15454298255fSOng Boon Leong else 15464298255fSOng Boon Leong p = rx_q->dma_rx + i; 15474298255fSOng Boon Leong 15484298255fSOng Boon Leong ret = stmmac_init_rx_buffers(priv, p, i, flags, 15494298255fSOng Boon Leong queue); 15504298255fSOng Boon Leong if (ret) 15514298255fSOng Boon Leong return ret; 1552bba2556eSOng Boon Leong 1553bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 15544298255fSOng Boon Leong } 15554298255fSOng Boon Leong 15564298255fSOng Boon Leong return 0; 15574298255fSOng Boon Leong } 15584298255fSOng Boon Leong 15594298255fSOng Boon Leong /** 1560bba2556eSOng Boon Leong * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1561bba2556eSOng Boon Leong * @priv: private structure 1562bba2556eSOng Boon Leong * @queue: RX queue index 1563bba2556eSOng Boon Leong */ 1564bba2556eSOng Boon Leong static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) 1565bba2556eSOng Boon Leong { 1566bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1567bba2556eSOng Boon Leong int i; 1568bba2556eSOng Boon Leong 1569bba2556eSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 1570bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1571bba2556eSOng Boon Leong 1572bba2556eSOng Boon Leong if (!buf->xdp) 1573bba2556eSOng Boon Leong continue; 1574bba2556eSOng Boon Leong 1575bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 1576bba2556eSOng Boon Leong buf->xdp = NULL; 1577bba2556eSOng Boon Leong } 1578bba2556eSOng Boon Leong } 1579bba2556eSOng Boon Leong 1580bba2556eSOng Boon Leong static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) 1581bba2556eSOng Boon Leong { 1582bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1583bba2556eSOng Boon Leong int i; 1584bba2556eSOng Boon Leong 1585bba2556eSOng Boon Leong for (i = 0; i < priv->dma_rx_size; i++) { 1586bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 1587bba2556eSOng Boon Leong dma_addr_t dma_addr; 1588bba2556eSOng Boon Leong struct dma_desc *p; 1589bba2556eSOng Boon Leong 1590bba2556eSOng Boon Leong if (priv->extend_desc) 1591bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + i); 1592bba2556eSOng Boon Leong else 1593bba2556eSOng Boon Leong p = rx_q->dma_rx + i; 1594bba2556eSOng Boon Leong 1595bba2556eSOng Boon Leong buf = &rx_q->buf_pool[i]; 1596bba2556eSOng Boon Leong 1597bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1598bba2556eSOng Boon Leong if (!buf->xdp) 1599bba2556eSOng Boon Leong return -ENOMEM; 1600bba2556eSOng Boon Leong 1601bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1602bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, p, dma_addr); 1603bba2556eSOng Boon Leong rx_q->buf_alloc_num++; 1604bba2556eSOng Boon Leong } 1605bba2556eSOng Boon Leong 1606bba2556eSOng Boon Leong return 0; 1607bba2556eSOng Boon Leong } 1608bba2556eSOng Boon Leong 1609bba2556eSOng Boon Leong static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1610bba2556eSOng Boon Leong { 1611bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1612bba2556eSOng Boon Leong return NULL; 1613bba2556eSOng Boon Leong 1614bba2556eSOng Boon Leong return xsk_get_pool_from_qid(priv->dev, queue); 1615bba2556eSOng Boon Leong } 1616bba2556eSOng Boon Leong 16179c63faaaSJoakim Zhang /** 1618de0b90e5SOng Boon Leong * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1619de0b90e5SOng Boon Leong * @priv: driver private structure 1620de0b90e5SOng Boon Leong * @queue: RX queue index 16215bacd778SLABBE Corentin * @flags: gfp flag. 162271fedb01SJoao Pinto * Description: this function initializes the DMA RX descriptors 16235bacd778SLABBE Corentin * and allocates the socket buffers. It supports the chained and ring 1624286a8372SGiuseppe CAVALLARO * modes. 16257ac6653aSJeff Kirsher */ 1626de0b90e5SOng Boon Leong static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) 16277ac6653aSJeff Kirsher { 162854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1629de0b90e5SOng Boon Leong int ret; 163054139cf3SJoao Pinto 163154139cf3SJoao Pinto netif_dbg(priv, probe, priv->dev, 163254139cf3SJoao Pinto "(%s) dma_rx_phy=0x%08x\n", __func__, 163354139cf3SJoao Pinto (u32)rx_q->dma_rx_phy); 163454139cf3SJoao Pinto 1635cbcf0999SJose Abreu stmmac_clear_rx_descriptors(priv, queue); 1636cbcf0999SJose Abreu 1637bba2556eSOng Boon Leong xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1638bba2556eSOng Boon Leong 1639bba2556eSOng Boon Leong rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1640bba2556eSOng Boon Leong 1641bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1642bba2556eSOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1643bba2556eSOng Boon Leong MEM_TYPE_XSK_BUFF_POOL, 1644bba2556eSOng Boon Leong NULL)); 1645bba2556eSOng Boon Leong netdev_info(priv->dev, 1646bba2556eSOng Boon Leong "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1647bba2556eSOng Boon Leong rx_q->queue_index); 1648bba2556eSOng Boon Leong xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1649bba2556eSOng Boon Leong } else { 1650be8b38a7SOng Boon Leong WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1651be8b38a7SOng Boon Leong MEM_TYPE_PAGE_POOL, 1652be8b38a7SOng Boon Leong rx_q->page_pool)); 1653be8b38a7SOng Boon Leong netdev_info(priv->dev, 1654be8b38a7SOng Boon Leong "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1655be8b38a7SOng Boon Leong rx_q->queue_index); 1656bba2556eSOng Boon Leong } 1657be8b38a7SOng Boon Leong 1658bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 1659bba2556eSOng Boon Leong /* RX XDP ZC buffer pool may not be populated, e.g. 1660bba2556eSOng Boon Leong * xdpsock TX-only. 1661bba2556eSOng Boon Leong */ 1662bba2556eSOng Boon Leong stmmac_alloc_rx_buffers_zc(priv, queue); 1663bba2556eSOng Boon Leong } else { 16644298255fSOng Boon Leong ret = stmmac_alloc_rx_buffers(priv, queue, flags); 16654298255fSOng Boon Leong if (ret < 0) 1666de0b90e5SOng Boon Leong return -ENOMEM; 1667bba2556eSOng Boon Leong } 166854139cf3SJoao Pinto 166954139cf3SJoao Pinto rx_q->cur_rx = 0; 16704298255fSOng Boon Leong rx_q->dirty_rx = 0; 167154139cf3SJoao Pinto 1672c24602efSGiuseppe CAVALLARO /* Setup the chained descriptor addresses */ 1673c24602efSGiuseppe CAVALLARO if (priv->mode == STMMAC_CHAIN_MODE) { 167471fedb01SJoao Pinto if (priv->extend_desc) 16752c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_erx, 1676aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1677aa042f60SSong, Yoong Siang priv->dma_rx_size, 1); 167871fedb01SJoao Pinto else 16792c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_rx, 1680aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1681aa042f60SSong, Yoong Siang priv->dma_rx_size, 0); 168271fedb01SJoao Pinto } 1683de0b90e5SOng Boon Leong 1684de0b90e5SOng Boon Leong return 0; 1685de0b90e5SOng Boon Leong } 1686de0b90e5SOng Boon Leong 1687de0b90e5SOng Boon Leong static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1688de0b90e5SOng Boon Leong { 1689de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1690de0b90e5SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1691de0b90e5SOng Boon Leong u32 queue; 1692de0b90e5SOng Boon Leong int ret; 1693de0b90e5SOng Boon Leong 1694de0b90e5SOng Boon Leong /* RX INITIALIZATION */ 1695de0b90e5SOng Boon Leong netif_dbg(priv, probe, priv->dev, 1696de0b90e5SOng Boon Leong "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1697de0b90e5SOng Boon Leong 1698de0b90e5SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 1699de0b90e5SOng Boon Leong ret = __init_dma_rx_desc_rings(priv, queue, flags); 1700de0b90e5SOng Boon Leong if (ret) 1701de0b90e5SOng Boon Leong goto err_init_rx_buffers; 170254139cf3SJoao Pinto } 170354139cf3SJoao Pinto 170471fedb01SJoao Pinto return 0; 170554139cf3SJoao Pinto 170671fedb01SJoao Pinto err_init_rx_buffers: 170754139cf3SJoao Pinto while (queue >= 0) { 1708bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1709bba2556eSOng Boon Leong 1710bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1711bba2556eSOng Boon Leong dma_free_rx_xskbufs(priv, queue); 1712bba2556eSOng Boon Leong else 17134298255fSOng Boon Leong dma_free_rx_skbufs(priv, queue); 171454139cf3SJoao Pinto 1715bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1716bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1717bba2556eSOng Boon Leong 171854139cf3SJoao Pinto if (queue == 0) 171954139cf3SJoao Pinto break; 172054139cf3SJoao Pinto 172154139cf3SJoao Pinto queue--; 172254139cf3SJoao Pinto } 172354139cf3SJoao Pinto 172471fedb01SJoao Pinto return ret; 172571fedb01SJoao Pinto } 172671fedb01SJoao Pinto 172771fedb01SJoao Pinto /** 1728de0b90e5SOng Boon Leong * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1729de0b90e5SOng Boon Leong * @priv: driver private structure 1730de0b90e5SOng Boon Leong * @queue : TX queue index 173171fedb01SJoao Pinto * Description: this function initializes the DMA TX descriptors 173271fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 173371fedb01SJoao Pinto * modes. 173471fedb01SJoao Pinto */ 1735de0b90e5SOng Boon Leong static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) 173671fedb01SJoao Pinto { 1737ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1738de0b90e5SOng Boon Leong int i; 1739ce736788SJoao Pinto 174071fedb01SJoao Pinto netif_dbg(priv, probe, priv->dev, 1741ce736788SJoao Pinto "(%s) dma_tx_phy=0x%08x\n", __func__, 1742ce736788SJoao Pinto (u32)tx_q->dma_tx_phy); 174371fedb01SJoao Pinto 174471fedb01SJoao Pinto /* Setup the chained descriptor addresses */ 174571fedb01SJoao Pinto if (priv->mode == STMMAC_CHAIN_MODE) { 174671fedb01SJoao Pinto if (priv->extend_desc) 17472c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_etx, 1748aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1749aa042f60SSong, Yoong Siang priv->dma_tx_size, 1); 1750579a25a8SJose Abreu else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 17512c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_tx, 1752aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1753aa042f60SSong, Yoong Siang priv->dma_tx_size, 0); 1754c24602efSGiuseppe CAVALLARO } 1755286a8372SGiuseppe CAVALLARO 1756132c32eeSOng Boon Leong tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1757132c32eeSOng Boon Leong 1758aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1759c24602efSGiuseppe CAVALLARO struct dma_desc *p; 1760de0b90e5SOng Boon Leong 1761c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 1762ce736788SJoao Pinto p = &((tx_q->dma_etx + i)->basic); 1763579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1764579a25a8SJose Abreu p = &((tx_q->dma_entx + i)->basic); 1765c24602efSGiuseppe CAVALLARO else 1766ce736788SJoao Pinto p = tx_q->dma_tx + i; 1767f748be53SAlexandre TORGUE 176844c67f85SJose Abreu stmmac_clear_desc(priv, p); 1769f748be53SAlexandre TORGUE 1770ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1771ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 1772ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len = 0; 1773ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].last_segment = false; 1774ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 17754a7d666aSGiuseppe CAVALLARO } 1776c24602efSGiuseppe CAVALLARO 1777ce736788SJoao Pinto tx_q->dirty_tx = 0; 1778ce736788SJoao Pinto tx_q->cur_tx = 0; 17798d212a9eSNiklas Cassel tx_q->mss = 0; 1780ce736788SJoao Pinto 1781c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1782de0b90e5SOng Boon Leong 1783de0b90e5SOng Boon Leong return 0; 1784c22a3f48SJoao Pinto } 17857ac6653aSJeff Kirsher 1786de0b90e5SOng Boon Leong static int init_dma_tx_desc_rings(struct net_device *dev) 1787de0b90e5SOng Boon Leong { 1788de0b90e5SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 1789de0b90e5SOng Boon Leong u32 tx_queue_cnt; 1790de0b90e5SOng Boon Leong u32 queue; 1791de0b90e5SOng Boon Leong 1792de0b90e5SOng Boon Leong tx_queue_cnt = priv->plat->tx_queues_to_use; 1793de0b90e5SOng Boon Leong 1794de0b90e5SOng Boon Leong for (queue = 0; queue < tx_queue_cnt; queue++) 1795de0b90e5SOng Boon Leong __init_dma_tx_desc_rings(priv, queue); 1796de0b90e5SOng Boon Leong 179771fedb01SJoao Pinto return 0; 179871fedb01SJoao Pinto } 179971fedb01SJoao Pinto 180071fedb01SJoao Pinto /** 180171fedb01SJoao Pinto * init_dma_desc_rings - init the RX/TX descriptor rings 180271fedb01SJoao Pinto * @dev: net device structure 180371fedb01SJoao Pinto * @flags: gfp flag. 180471fedb01SJoao Pinto * Description: this function initializes the DMA RX/TX descriptors 180571fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 180671fedb01SJoao Pinto * modes. 180771fedb01SJoao Pinto */ 180871fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 180971fedb01SJoao Pinto { 181071fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 181171fedb01SJoao Pinto int ret; 181271fedb01SJoao Pinto 181371fedb01SJoao Pinto ret = init_dma_rx_desc_rings(dev, flags); 181471fedb01SJoao Pinto if (ret) 181571fedb01SJoao Pinto return ret; 181671fedb01SJoao Pinto 181771fedb01SJoao Pinto ret = init_dma_tx_desc_rings(dev); 181871fedb01SJoao Pinto 18195bacd778SLABBE Corentin stmmac_clear_descriptors(priv); 18207ac6653aSJeff Kirsher 1821c24602efSGiuseppe CAVALLARO if (netif_msg_hw(priv)) 1822c24602efSGiuseppe CAVALLARO stmmac_display_rings(priv); 182356329137SBartlomiej Zolnierkiewicz 182456329137SBartlomiej Zolnierkiewicz return ret; 18257ac6653aSJeff Kirsher } 18267ac6653aSJeff Kirsher 182771fedb01SJoao Pinto /** 182871fedb01SJoao Pinto * dma_free_tx_skbufs - free TX dma buffers 182971fedb01SJoao Pinto * @priv: private structure 1830ce736788SJoao Pinto * @queue: TX queue index 183171fedb01SJoao Pinto */ 1832ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 18337ac6653aSJeff Kirsher { 1834132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 18357ac6653aSJeff Kirsher int i; 18367ac6653aSJeff Kirsher 1837132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1838132c32eeSOng Boon Leong 1839aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) 1840ce736788SJoao Pinto stmmac_free_tx_buffer(priv, queue, i); 1841132c32eeSOng Boon Leong 1842132c32eeSOng Boon Leong if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1843132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1844132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 1845132c32eeSOng Boon Leong tx_q->xsk_pool = NULL; 1846132c32eeSOng Boon Leong } 18477ac6653aSJeff Kirsher } 18487ac6653aSJeff Kirsher 1849732fdf0eSGiuseppe CAVALLARO /** 18504ec236c7SFugang Duan * stmmac_free_tx_skbufs - free TX skb buffers 18514ec236c7SFugang Duan * @priv: private structure 18524ec236c7SFugang Duan */ 18534ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 18544ec236c7SFugang Duan { 18554ec236c7SFugang Duan u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 18564ec236c7SFugang Duan u32 queue; 18574ec236c7SFugang Duan 18584ec236c7SFugang Duan for (queue = 0; queue < tx_queue_cnt; queue++) 18594ec236c7SFugang Duan dma_free_tx_skbufs(priv, queue); 18604ec236c7SFugang Duan } 18614ec236c7SFugang Duan 18624ec236c7SFugang Duan /** 1863da5ec7f2SOng Boon Leong * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 186454139cf3SJoao Pinto * @priv: private structure 1865da5ec7f2SOng Boon Leong * @queue: RX queue index 186654139cf3SJoao Pinto */ 1867da5ec7f2SOng Boon Leong static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 186854139cf3SJoao Pinto { 186954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 187054139cf3SJoao Pinto 187154139cf3SJoao Pinto /* Release the DMA RX socket buffers */ 1872bba2556eSOng Boon Leong if (rx_q->xsk_pool) 1873bba2556eSOng Boon Leong dma_free_rx_xskbufs(priv, queue); 1874bba2556eSOng Boon Leong else 187554139cf3SJoao Pinto dma_free_rx_skbufs(priv, queue); 187654139cf3SJoao Pinto 1877bba2556eSOng Boon Leong rx_q->buf_alloc_num = 0; 1878bba2556eSOng Boon Leong rx_q->xsk_pool = NULL; 1879bba2556eSOng Boon Leong 188054139cf3SJoao Pinto /* Free DMA regions of consistent memory previously allocated */ 188154139cf3SJoao Pinto if (!priv->extend_desc) 1882aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 1883aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 188454139cf3SJoao Pinto rx_q->dma_rx, rx_q->dma_rx_phy); 188554139cf3SJoao Pinto else 1886aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 188754139cf3SJoao Pinto sizeof(struct dma_extended_desc), 188854139cf3SJoao Pinto rx_q->dma_erx, rx_q->dma_rx_phy); 188954139cf3SJoao Pinto 1890be8b38a7SOng Boon Leong if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1891be8b38a7SOng Boon Leong xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1892be8b38a7SOng Boon Leong 18932af6106aSJose Abreu kfree(rx_q->buf_pool); 1894c3f812ceSJonathan Lemon if (rx_q->page_pool) 18952af6106aSJose Abreu page_pool_destroy(rx_q->page_pool); 18962af6106aSJose Abreu } 1897da5ec7f2SOng Boon Leong 1898da5ec7f2SOng Boon Leong static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1899da5ec7f2SOng Boon Leong { 1900da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 1901da5ec7f2SOng Boon Leong u32 queue; 1902da5ec7f2SOng Boon Leong 1903da5ec7f2SOng Boon Leong /* Free RX queue resources */ 1904da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) 1905da5ec7f2SOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 190654139cf3SJoao Pinto } 190754139cf3SJoao Pinto 190854139cf3SJoao Pinto /** 1909da5ec7f2SOng Boon Leong * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1910ce736788SJoao Pinto * @priv: private structure 1911da5ec7f2SOng Boon Leong * @queue: TX queue index 1912ce736788SJoao Pinto */ 1913da5ec7f2SOng Boon Leong static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 1914ce736788SJoao Pinto { 1915ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1916579a25a8SJose Abreu size_t size; 1917579a25a8SJose Abreu void *addr; 1918ce736788SJoao Pinto 1919ce736788SJoao Pinto /* Release the DMA TX socket buffers */ 1920ce736788SJoao Pinto dma_free_tx_skbufs(priv, queue); 1921ce736788SJoao Pinto 1922579a25a8SJose Abreu if (priv->extend_desc) { 1923579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1924579a25a8SJose Abreu addr = tx_q->dma_etx; 1925579a25a8SJose Abreu } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1926579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1927579a25a8SJose Abreu addr = tx_q->dma_entx; 1928579a25a8SJose Abreu } else { 1929579a25a8SJose Abreu size = sizeof(struct dma_desc); 1930579a25a8SJose Abreu addr = tx_q->dma_tx; 1931579a25a8SJose Abreu } 1932579a25a8SJose Abreu 1933aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 1934579a25a8SJose Abreu 1935579a25a8SJose Abreu dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1936ce736788SJoao Pinto 1937ce736788SJoao Pinto kfree(tx_q->tx_skbuff_dma); 1938ce736788SJoao Pinto kfree(tx_q->tx_skbuff); 1939ce736788SJoao Pinto } 1940da5ec7f2SOng Boon Leong 1941da5ec7f2SOng Boon Leong static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1942da5ec7f2SOng Boon Leong { 1943da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 1944da5ec7f2SOng Boon Leong u32 queue; 1945da5ec7f2SOng Boon Leong 1946da5ec7f2SOng Boon Leong /* Free TX queue resources */ 1947da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) 1948da5ec7f2SOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 1949ce736788SJoao Pinto } 1950ce736788SJoao Pinto 1951ce736788SJoao Pinto /** 1952da5ec7f2SOng Boon Leong * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 1953732fdf0eSGiuseppe CAVALLARO * @priv: private structure 1954da5ec7f2SOng Boon Leong * @queue: RX queue index 1955732fdf0eSGiuseppe CAVALLARO * Description: according to which descriptor can be used (extend or basic) 1956732fdf0eSGiuseppe CAVALLARO * this function allocates the resources for TX and RX paths. In case of 1957732fdf0eSGiuseppe CAVALLARO * reception, for example, it pre-allocated the RX socket buffer in order to 1958732fdf0eSGiuseppe CAVALLARO * allow zero-copy mechanism. 1959732fdf0eSGiuseppe CAVALLARO */ 1960da5ec7f2SOng Boon Leong static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) 196109f8d696SSrinivas Kandagatla { 196254139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1963be8b38a7SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 1964da5ec7f2SOng Boon Leong bool xdp_prog = stmmac_xdp_is_enabled(priv); 19652af6106aSJose Abreu struct page_pool_params pp_params = { 0 }; 19664f28bd95SThierry Reding unsigned int num_pages; 1967132c32eeSOng Boon Leong unsigned int napi_id; 1968be8b38a7SOng Boon Leong int ret; 196954139cf3SJoao Pinto 197054139cf3SJoao Pinto rx_q->queue_index = queue; 197154139cf3SJoao Pinto rx_q->priv_data = priv; 197254139cf3SJoao Pinto 19735fabb012SOng Boon Leong pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 1974aa042f60SSong, Yoong Siang pp_params.pool_size = priv->dma_rx_size; 19754f28bd95SThierry Reding num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 19764f28bd95SThierry Reding pp_params.order = ilog2(num_pages); 19772af6106aSJose Abreu pp_params.nid = dev_to_node(priv->device); 19782af6106aSJose Abreu pp_params.dev = priv->device; 19795fabb012SOng Boon Leong pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 19805fabb012SOng Boon Leong pp_params.offset = stmmac_rx_offset(priv); 19815fabb012SOng Boon Leong pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 19825bacd778SLABBE Corentin 19832af6106aSJose Abreu rx_q->page_pool = page_pool_create(&pp_params); 19842af6106aSJose Abreu if (IS_ERR(rx_q->page_pool)) { 19852af6106aSJose Abreu ret = PTR_ERR(rx_q->page_pool); 19862af6106aSJose Abreu rx_q->page_pool = NULL; 1987da5ec7f2SOng Boon Leong return ret; 19882af6106aSJose Abreu } 19892af6106aSJose Abreu 1990aa042f60SSong, Yoong Siang rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1991aa042f60SSong, Yoong Siang sizeof(*rx_q->buf_pool), 19925bacd778SLABBE Corentin GFP_KERNEL); 19932af6106aSJose Abreu if (!rx_q->buf_pool) 1994da5ec7f2SOng Boon Leong return -ENOMEM; 19955bacd778SLABBE Corentin 19965bacd778SLABBE Corentin if (priv->extend_desc) { 1997750afb08SLuis Chamberlain rx_q->dma_erx = dma_alloc_coherent(priv->device, 1998aa042f60SSong, Yoong Siang priv->dma_rx_size * 1999aa042f60SSong, Yoong Siang sizeof(struct dma_extended_desc), 200054139cf3SJoao Pinto &rx_q->dma_rx_phy, 20015bacd778SLABBE Corentin GFP_KERNEL); 200254139cf3SJoao Pinto if (!rx_q->dma_erx) 2003da5ec7f2SOng Boon Leong return -ENOMEM; 20045bacd778SLABBE Corentin 200571fedb01SJoao Pinto } else { 2006750afb08SLuis Chamberlain rx_q->dma_rx = dma_alloc_coherent(priv->device, 2007aa042f60SSong, Yoong Siang priv->dma_rx_size * 2008aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 200954139cf3SJoao Pinto &rx_q->dma_rx_phy, 201071fedb01SJoao Pinto GFP_KERNEL); 201154139cf3SJoao Pinto if (!rx_q->dma_rx) 2012da5ec7f2SOng Boon Leong return -ENOMEM; 201371fedb01SJoao Pinto } 2014be8b38a7SOng Boon Leong 2015132c32eeSOng Boon Leong if (stmmac_xdp_is_enabled(priv) && 2016132c32eeSOng Boon Leong test_bit(queue, priv->af_xdp_zc_qps)) 2017132c32eeSOng Boon Leong napi_id = ch->rxtx_napi.napi_id; 2018132c32eeSOng Boon Leong else 2019132c32eeSOng Boon Leong napi_id = ch->rx_napi.napi_id; 2020132c32eeSOng Boon Leong 2021be8b38a7SOng Boon Leong ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2022be8b38a7SOng Boon Leong rx_q->queue_index, 2023132c32eeSOng Boon Leong napi_id); 2024be8b38a7SOng Boon Leong if (ret) { 2025be8b38a7SOng Boon Leong netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2026da5ec7f2SOng Boon Leong return -EINVAL; 2027be8b38a7SOng Boon Leong } 2028da5ec7f2SOng Boon Leong 2029da5ec7f2SOng Boon Leong return 0; 2030da5ec7f2SOng Boon Leong } 2031da5ec7f2SOng Boon Leong 2032da5ec7f2SOng Boon Leong static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 2033da5ec7f2SOng Boon Leong { 2034da5ec7f2SOng Boon Leong u32 rx_count = priv->plat->rx_queues_to_use; 2035da5ec7f2SOng Boon Leong u32 queue; 2036da5ec7f2SOng Boon Leong int ret; 2037da5ec7f2SOng Boon Leong 2038da5ec7f2SOng Boon Leong /* RX queues buffers and DMA */ 2039da5ec7f2SOng Boon Leong for (queue = 0; queue < rx_count; queue++) { 2040da5ec7f2SOng Boon Leong ret = __alloc_dma_rx_desc_resources(priv, queue); 2041da5ec7f2SOng Boon Leong if (ret) 2042da5ec7f2SOng Boon Leong goto err_dma; 204354139cf3SJoao Pinto } 204471fedb01SJoao Pinto 204571fedb01SJoao Pinto return 0; 204671fedb01SJoao Pinto 204771fedb01SJoao Pinto err_dma: 204854139cf3SJoao Pinto free_dma_rx_desc_resources(priv); 204954139cf3SJoao Pinto 205071fedb01SJoao Pinto return ret; 205171fedb01SJoao Pinto } 205271fedb01SJoao Pinto 205371fedb01SJoao Pinto /** 2054da5ec7f2SOng Boon Leong * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 205571fedb01SJoao Pinto * @priv: private structure 2056da5ec7f2SOng Boon Leong * @queue: TX queue index 205771fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 205871fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 205971fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 206071fedb01SJoao Pinto * allow zero-copy mechanism. 206171fedb01SJoao Pinto */ 2062da5ec7f2SOng Boon Leong static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) 206371fedb01SJoao Pinto { 2064ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2065579a25a8SJose Abreu size_t size; 2066579a25a8SJose Abreu void *addr; 2067ce736788SJoao Pinto 2068ce736788SJoao Pinto tx_q->queue_index = queue; 2069ce736788SJoao Pinto tx_q->priv_data = priv; 2070ce736788SJoao Pinto 2071aa042f60SSong, Yoong Siang tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 2072ce736788SJoao Pinto sizeof(*tx_q->tx_skbuff_dma), 207371fedb01SJoao Pinto GFP_KERNEL); 2074ce736788SJoao Pinto if (!tx_q->tx_skbuff_dma) 2075da5ec7f2SOng Boon Leong return -ENOMEM; 207671fedb01SJoao Pinto 2077aa042f60SSong, Yoong Siang tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 2078ce736788SJoao Pinto sizeof(struct sk_buff *), 207971fedb01SJoao Pinto GFP_KERNEL); 2080ce736788SJoao Pinto if (!tx_q->tx_skbuff) 2081da5ec7f2SOng Boon Leong return -ENOMEM; 208271fedb01SJoao Pinto 2083579a25a8SJose Abreu if (priv->extend_desc) 2084579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 2085579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2086579a25a8SJose Abreu size = sizeof(struct dma_edesc); 2087579a25a8SJose Abreu else 2088579a25a8SJose Abreu size = sizeof(struct dma_desc); 2089579a25a8SJose Abreu 2090aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 2091579a25a8SJose Abreu 2092579a25a8SJose Abreu addr = dma_alloc_coherent(priv->device, size, 2093579a25a8SJose Abreu &tx_q->dma_tx_phy, GFP_KERNEL); 2094579a25a8SJose Abreu if (!addr) 2095da5ec7f2SOng Boon Leong return -ENOMEM; 2096579a25a8SJose Abreu 2097579a25a8SJose Abreu if (priv->extend_desc) 2098579a25a8SJose Abreu tx_q->dma_etx = addr; 2099579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2100579a25a8SJose Abreu tx_q->dma_entx = addr; 2101579a25a8SJose Abreu else 2102579a25a8SJose Abreu tx_q->dma_tx = addr; 2103da5ec7f2SOng Boon Leong 2104da5ec7f2SOng Boon Leong return 0; 2105da5ec7f2SOng Boon Leong } 2106da5ec7f2SOng Boon Leong 2107da5ec7f2SOng Boon Leong static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 2108da5ec7f2SOng Boon Leong { 2109da5ec7f2SOng Boon Leong u32 tx_count = priv->plat->tx_queues_to_use; 2110da5ec7f2SOng Boon Leong u32 queue; 2111da5ec7f2SOng Boon Leong int ret; 2112da5ec7f2SOng Boon Leong 2113da5ec7f2SOng Boon Leong /* TX queues buffers and DMA */ 2114da5ec7f2SOng Boon Leong for (queue = 0; queue < tx_count; queue++) { 2115da5ec7f2SOng Boon Leong ret = __alloc_dma_tx_desc_resources(priv, queue); 2116da5ec7f2SOng Boon Leong if (ret) 2117da5ec7f2SOng Boon Leong goto err_dma; 21185bacd778SLABBE Corentin } 21195bacd778SLABBE Corentin 21205bacd778SLABBE Corentin return 0; 21215bacd778SLABBE Corentin 212262242260SChristophe Jaillet err_dma: 2123ce736788SJoao Pinto free_dma_tx_desc_resources(priv); 212409f8d696SSrinivas Kandagatla return ret; 21255bacd778SLABBE Corentin } 212609f8d696SSrinivas Kandagatla 212771fedb01SJoao Pinto /** 212871fedb01SJoao Pinto * alloc_dma_desc_resources - alloc TX/RX resources. 212971fedb01SJoao Pinto * @priv: private structure 213071fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 213171fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 213271fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 213371fedb01SJoao Pinto * allow zero-copy mechanism. 213471fedb01SJoao Pinto */ 213571fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv) 21365bacd778SLABBE Corentin { 213754139cf3SJoao Pinto /* RX Allocation */ 213871fedb01SJoao Pinto int ret = alloc_dma_rx_desc_resources(priv); 213971fedb01SJoao Pinto 214071fedb01SJoao Pinto if (ret) 214171fedb01SJoao Pinto return ret; 214271fedb01SJoao Pinto 214371fedb01SJoao Pinto ret = alloc_dma_tx_desc_resources(priv); 214471fedb01SJoao Pinto 214571fedb01SJoao Pinto return ret; 214671fedb01SJoao Pinto } 214771fedb01SJoao Pinto 214871fedb01SJoao Pinto /** 214971fedb01SJoao Pinto * free_dma_desc_resources - free dma desc resources 215071fedb01SJoao Pinto * @priv: private structure 215171fedb01SJoao Pinto */ 215271fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv) 215371fedb01SJoao Pinto { 215471fedb01SJoao Pinto /* Release the DMA TX socket buffers */ 215571fedb01SJoao Pinto free_dma_tx_desc_resources(priv); 2156be8b38a7SOng Boon Leong 2157be8b38a7SOng Boon Leong /* Release the DMA RX socket buffers later 2158be8b38a7SOng Boon Leong * to ensure all pending XDP_TX buffers are returned. 2159be8b38a7SOng Boon Leong */ 2160be8b38a7SOng Boon Leong free_dma_rx_desc_resources(priv); 216171fedb01SJoao Pinto } 216271fedb01SJoao Pinto 216371fedb01SJoao Pinto /** 21649eb12474Sjpinto * stmmac_mac_enable_rx_queues - Enable MAC rx queues 21659eb12474Sjpinto * @priv: driver private structure 21669eb12474Sjpinto * Description: It is used for enabling the rx queues in the MAC 21679eb12474Sjpinto */ 21689eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 21699eb12474Sjpinto { 21704f6046f5SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 21714f6046f5SJoao Pinto int queue; 21724f6046f5SJoao Pinto u8 mode; 21739eb12474Sjpinto 21744f6046f5SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 21754f6046f5SJoao Pinto mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2176c10d4c82SJose Abreu stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 21774f6046f5SJoao Pinto } 21789eb12474Sjpinto } 21799eb12474Sjpinto 21809eb12474Sjpinto /** 2181ae4f0d46SJoao Pinto * stmmac_start_rx_dma - start RX DMA channel 2182ae4f0d46SJoao Pinto * @priv: driver private structure 2183ae4f0d46SJoao Pinto * @chan: RX channel index 2184ae4f0d46SJoao Pinto * Description: 2185ae4f0d46SJoao Pinto * This starts a RX DMA channel 2186ae4f0d46SJoao Pinto */ 2187ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2188ae4f0d46SJoao Pinto { 2189ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2190a4e887faSJose Abreu stmmac_start_rx(priv, priv->ioaddr, chan); 2191ae4f0d46SJoao Pinto } 2192ae4f0d46SJoao Pinto 2193ae4f0d46SJoao Pinto /** 2194ae4f0d46SJoao Pinto * stmmac_start_tx_dma - start TX DMA channel 2195ae4f0d46SJoao Pinto * @priv: driver private structure 2196ae4f0d46SJoao Pinto * @chan: TX channel index 2197ae4f0d46SJoao Pinto * Description: 2198ae4f0d46SJoao Pinto * This starts a TX DMA channel 2199ae4f0d46SJoao Pinto */ 2200ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2201ae4f0d46SJoao Pinto { 2202ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2203a4e887faSJose Abreu stmmac_start_tx(priv, priv->ioaddr, chan); 2204ae4f0d46SJoao Pinto } 2205ae4f0d46SJoao Pinto 2206ae4f0d46SJoao Pinto /** 2207ae4f0d46SJoao Pinto * stmmac_stop_rx_dma - stop RX DMA channel 2208ae4f0d46SJoao Pinto * @priv: driver private structure 2209ae4f0d46SJoao Pinto * @chan: RX channel index 2210ae4f0d46SJoao Pinto * Description: 2211ae4f0d46SJoao Pinto * This stops a RX DMA channel 2212ae4f0d46SJoao Pinto */ 2213ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2214ae4f0d46SJoao Pinto { 2215ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2216a4e887faSJose Abreu stmmac_stop_rx(priv, priv->ioaddr, chan); 2217ae4f0d46SJoao Pinto } 2218ae4f0d46SJoao Pinto 2219ae4f0d46SJoao Pinto /** 2220ae4f0d46SJoao Pinto * stmmac_stop_tx_dma - stop TX DMA channel 2221ae4f0d46SJoao Pinto * @priv: driver private structure 2222ae4f0d46SJoao Pinto * @chan: TX channel index 2223ae4f0d46SJoao Pinto * Description: 2224ae4f0d46SJoao Pinto * This stops a TX DMA channel 2225ae4f0d46SJoao Pinto */ 2226ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2227ae4f0d46SJoao Pinto { 2228ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2229a4e887faSJose Abreu stmmac_stop_tx(priv, priv->ioaddr, chan); 2230ae4f0d46SJoao Pinto } 2231ae4f0d46SJoao Pinto 2232ae4f0d46SJoao Pinto /** 2233ae4f0d46SJoao Pinto * stmmac_start_all_dma - start all RX and TX DMA channels 2234ae4f0d46SJoao Pinto * @priv: driver private structure 2235ae4f0d46SJoao Pinto * Description: 2236ae4f0d46SJoao Pinto * This starts all the RX and TX DMA channels 2237ae4f0d46SJoao Pinto */ 2238ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv) 2239ae4f0d46SJoao Pinto { 2240ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2241ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2242ae4f0d46SJoao Pinto u32 chan = 0; 2243ae4f0d46SJoao Pinto 2244ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2245ae4f0d46SJoao Pinto stmmac_start_rx_dma(priv, chan); 2246ae4f0d46SJoao Pinto 2247ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2248ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 2249ae4f0d46SJoao Pinto } 2250ae4f0d46SJoao Pinto 2251ae4f0d46SJoao Pinto /** 2252ae4f0d46SJoao Pinto * stmmac_stop_all_dma - stop all RX and TX DMA channels 2253ae4f0d46SJoao Pinto * @priv: driver private structure 2254ae4f0d46SJoao Pinto * Description: 2255ae4f0d46SJoao Pinto * This stops the RX and TX DMA channels 2256ae4f0d46SJoao Pinto */ 2257ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2258ae4f0d46SJoao Pinto { 2259ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2260ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2261ae4f0d46SJoao Pinto u32 chan = 0; 2262ae4f0d46SJoao Pinto 2263ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2264ae4f0d46SJoao Pinto stmmac_stop_rx_dma(priv, chan); 2265ae4f0d46SJoao Pinto 2266ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2267ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2268ae4f0d46SJoao Pinto } 2269ae4f0d46SJoao Pinto 2270ae4f0d46SJoao Pinto /** 22717ac6653aSJeff Kirsher * stmmac_dma_operation_mode - HW DMA operation mode 227232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2273732fdf0eSGiuseppe CAVALLARO * Description: it is used for configuring the DMA operation mode register in 2274732fdf0eSGiuseppe CAVALLARO * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 22757ac6653aSJeff Kirsher */ 22767ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 22777ac6653aSJeff Kirsher { 22786deee222SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 22796deee222SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2280f88203a2SVince Bridgers int rxfifosz = priv->plat->rx_fifo_size; 228152a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 22826deee222SJoao Pinto u32 txmode = 0; 22836deee222SJoao Pinto u32 rxmode = 0; 22846deee222SJoao Pinto u32 chan = 0; 2285a0daae13SJose Abreu u8 qmode = 0; 2286f88203a2SVince Bridgers 228711fbf811SThierry Reding if (rxfifosz == 0) 228811fbf811SThierry Reding rxfifosz = priv->dma_cap.rx_fifo_size; 228952a76235SJose Abreu if (txfifosz == 0) 229052a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 229152a76235SJose Abreu 229252a76235SJose Abreu /* Adjust for real per queue fifo size */ 229352a76235SJose Abreu rxfifosz /= rx_channels_count; 229452a76235SJose Abreu txfifosz /= tx_channels_count; 229511fbf811SThierry Reding 22966deee222SJoao Pinto if (priv->plat->force_thresh_dma_mode) { 22976deee222SJoao Pinto txmode = tc; 22986deee222SJoao Pinto rxmode = tc; 22996deee222SJoao Pinto } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 23007ac6653aSJeff Kirsher /* 23017ac6653aSJeff Kirsher * In case of GMAC, SF mode can be enabled 23027ac6653aSJeff Kirsher * to perform the TX COE in HW. This depends on: 23037ac6653aSJeff Kirsher * 1) TX COE if actually supported 23047ac6653aSJeff Kirsher * 2) There is no bugged Jumbo frame support 23057ac6653aSJeff Kirsher * that needs to not insert csum in the TDES. 23067ac6653aSJeff Kirsher */ 23076deee222SJoao Pinto txmode = SF_DMA_MODE; 23086deee222SJoao Pinto rxmode = SF_DMA_MODE; 2309b2dec116SSonic Zhang priv->xstats.threshold = SF_DMA_MODE; 23106deee222SJoao Pinto } else { 23116deee222SJoao Pinto txmode = tc; 23126deee222SJoao Pinto rxmode = SF_DMA_MODE; 23136deee222SJoao Pinto } 23146deee222SJoao Pinto 23156deee222SJoao Pinto /* configure all channels */ 2316a0daae13SJose Abreu for (chan = 0; chan < rx_channels_count; chan++) { 2317bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2318bba2556eSOng Boon Leong u32 buf_size; 2319bba2556eSOng Boon Leong 2320a0daae13SJose Abreu qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 23216deee222SJoao Pinto 2322a4e887faSJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2323a0daae13SJose Abreu rxfifosz, qmode); 2324bba2556eSOng Boon Leong 2325bba2556eSOng Boon Leong if (rx_q->xsk_pool) { 2326bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2327bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2328bba2556eSOng Boon Leong buf_size, 23294205c88eSJose Abreu chan); 2330bba2556eSOng Boon Leong } else { 2331bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 2332bba2556eSOng Boon Leong priv->dma_buf_sz, 2333bba2556eSOng Boon Leong chan); 2334bba2556eSOng Boon Leong } 2335a0daae13SJose Abreu } 2336a0daae13SJose Abreu 2337a0daae13SJose Abreu for (chan = 0; chan < tx_channels_count; chan++) { 2338a0daae13SJose Abreu qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2339a0daae13SJose Abreu 2340a4e887faSJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2341a0daae13SJose Abreu txfifosz, qmode); 2342a0daae13SJose Abreu } 23437ac6653aSJeff Kirsher } 23447ac6653aSJeff Kirsher 2345132c32eeSOng Boon Leong static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2346132c32eeSOng Boon Leong { 2347132c32eeSOng Boon Leong struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2348132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2349132c32eeSOng Boon Leong struct xsk_buff_pool *pool = tx_q->xsk_pool; 2350132c32eeSOng Boon Leong unsigned int entry = tx_q->cur_tx; 2351132c32eeSOng Boon Leong struct dma_desc *tx_desc = NULL; 2352132c32eeSOng Boon Leong struct xdp_desc xdp_desc; 2353132c32eeSOng Boon Leong bool work_done = true; 2354132c32eeSOng Boon Leong 2355132c32eeSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 2356132c32eeSOng Boon Leong nq->trans_start = jiffies; 2357132c32eeSOng Boon Leong 2358132c32eeSOng Boon Leong budget = min(budget, stmmac_tx_avail(priv, queue)); 2359132c32eeSOng Boon Leong 2360132c32eeSOng Boon Leong while (budget-- > 0) { 2361132c32eeSOng Boon Leong dma_addr_t dma_addr; 2362132c32eeSOng Boon Leong bool set_ic; 2363132c32eeSOng Boon Leong 2364132c32eeSOng Boon Leong /* We are sharing with slow path and stop XSK TX desc submission when 2365132c32eeSOng Boon Leong * available TX ring is less than threshold. 2366132c32eeSOng Boon Leong */ 2367132c32eeSOng Boon Leong if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2368132c32eeSOng Boon Leong !netif_carrier_ok(priv->dev)) { 2369132c32eeSOng Boon Leong work_done = false; 2370132c32eeSOng Boon Leong break; 2371132c32eeSOng Boon Leong } 2372132c32eeSOng Boon Leong 2373132c32eeSOng Boon Leong if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2374132c32eeSOng Boon Leong break; 2375132c32eeSOng Boon Leong 2376132c32eeSOng Boon Leong if (likely(priv->extend_desc)) 2377132c32eeSOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2378132c32eeSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2379132c32eeSOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 2380132c32eeSOng Boon Leong else 2381132c32eeSOng Boon Leong tx_desc = tx_q->dma_tx + entry; 2382132c32eeSOng Boon Leong 2383132c32eeSOng Boon Leong dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2384132c32eeSOng Boon Leong xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2385132c32eeSOng Boon Leong 2386132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2387132c32eeSOng Boon Leong 2388132c32eeSOng Boon Leong /* To return XDP buffer to XSK pool, we simple call 2389132c32eeSOng Boon Leong * xsk_tx_completed(), so we don't need to fill up 2390132c32eeSOng Boon Leong * 'buf' and 'xdpf'. 2391132c32eeSOng Boon Leong */ 2392132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = 0; 2393132c32eeSOng Boon Leong tx_q->xdpf[entry] = NULL; 2394132c32eeSOng Boon Leong 2395132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 2396132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2397132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 2398132c32eeSOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2399132c32eeSOng Boon Leong 2400132c32eeSOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2401132c32eeSOng Boon Leong 2402132c32eeSOng Boon Leong tx_q->tx_count_frames++; 2403132c32eeSOng Boon Leong 2404132c32eeSOng Boon Leong if (!priv->tx_coal_frames[queue]) 2405132c32eeSOng Boon Leong set_ic = false; 2406132c32eeSOng Boon Leong else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2407132c32eeSOng Boon Leong set_ic = true; 2408132c32eeSOng Boon Leong else 2409132c32eeSOng Boon Leong set_ic = false; 2410132c32eeSOng Boon Leong 2411132c32eeSOng Boon Leong if (set_ic) { 2412132c32eeSOng Boon Leong tx_q->tx_count_frames = 0; 2413132c32eeSOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 2414132c32eeSOng Boon Leong priv->xstats.tx_set_ic_bit++; 2415132c32eeSOng Boon Leong } 2416132c32eeSOng Boon Leong 2417132c32eeSOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2418132c32eeSOng Boon Leong true, priv->mode, true, true, 2419132c32eeSOng Boon Leong xdp_desc.len); 2420132c32eeSOng Boon Leong 2421132c32eeSOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 2422132c32eeSOng Boon Leong 2423132c32eeSOng Boon Leong tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 2424132c32eeSOng Boon Leong entry = tx_q->cur_tx; 2425132c32eeSOng Boon Leong } 2426132c32eeSOng Boon Leong 2427132c32eeSOng Boon Leong if (tx_desc) { 2428132c32eeSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 2429132c32eeSOng Boon Leong xsk_tx_release(pool); 2430132c32eeSOng Boon Leong } 2431132c32eeSOng Boon Leong 2432132c32eeSOng Boon Leong /* Return true if all of the 3 conditions are met 2433132c32eeSOng Boon Leong * a) TX Budget is still available 2434132c32eeSOng Boon Leong * b) work_done = true when XSK TX desc peek is empty (no more 2435132c32eeSOng Boon Leong * pending XSK TX for transmission) 2436132c32eeSOng Boon Leong */ 2437132c32eeSOng Boon Leong return !!budget && work_done; 2438132c32eeSOng Boon Leong } 2439132c32eeSOng Boon Leong 24407ac6653aSJeff Kirsher /** 2441732fdf0eSGiuseppe CAVALLARO * stmmac_tx_clean - to manage the transmission completion 244232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2443d0ea5cbdSJesse Brandeburg * @budget: napi budget limiting this functions packet handling 2444ce736788SJoao Pinto * @queue: TX queue index 2445732fdf0eSGiuseppe CAVALLARO * Description: it reclaims the transmit resources after transmission completes. 24467ac6653aSJeff Kirsher */ 24478fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 24487ac6653aSJeff Kirsher { 2449ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 245038979574SBeniamino Galvani unsigned int bytes_compl = 0, pkts_compl = 0; 2451132c32eeSOng Boon Leong unsigned int entry, xmits = 0, count = 0; 24527ac6653aSJeff Kirsher 24538fce3331SJose Abreu __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2454a9097a96SGiuseppe CAVALLARO 24559125cdd1SGiuseppe CAVALLARO priv->xstats.tx_clean++; 24569125cdd1SGiuseppe CAVALLARO 2457132c32eeSOng Boon Leong tx_q->xsk_frames_done = 0; 2458132c32eeSOng Boon Leong 24598d5f4b07SBernd Edlinger entry = tx_q->dirty_tx; 2460132c32eeSOng Boon Leong 2461132c32eeSOng Boon Leong /* Try to clean all TX complete frame in 1 shot */ 2462132c32eeSOng Boon Leong while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { 2463be8b38a7SOng Boon Leong struct xdp_frame *xdpf; 2464be8b38a7SOng Boon Leong struct sk_buff *skb; 2465c24602efSGiuseppe CAVALLARO struct dma_desc *p; 2466c363b658SFabrice Gasnier int status; 2467c24602efSGiuseppe CAVALLARO 24688b278a5bSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 24698b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2470be8b38a7SOng Boon Leong xdpf = tx_q->xdpf[entry]; 2471be8b38a7SOng Boon Leong skb = NULL; 2472be8b38a7SOng Boon Leong } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2473be8b38a7SOng Boon Leong xdpf = NULL; 2474be8b38a7SOng Boon Leong skb = tx_q->tx_skbuff[entry]; 2475be8b38a7SOng Boon Leong } else { 2476be8b38a7SOng Boon Leong xdpf = NULL; 2477be8b38a7SOng Boon Leong skb = NULL; 2478be8b38a7SOng Boon Leong } 2479be8b38a7SOng Boon Leong 2480c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 2481ce736788SJoao Pinto p = (struct dma_desc *)(tx_q->dma_etx + entry); 2482579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2483579a25a8SJose Abreu p = &tx_q->dma_entx[entry].basic; 2484c24602efSGiuseppe CAVALLARO else 2485ce736788SJoao Pinto p = tx_q->dma_tx + entry; 24867ac6653aSJeff Kirsher 248742de047dSJose Abreu status = stmmac_tx_status(priv, &priv->dev->stats, 248842de047dSJose Abreu &priv->xstats, p, priv->ioaddr); 2489c363b658SFabrice Gasnier /* Check if the descriptor is owned by the DMA */ 2490c363b658SFabrice Gasnier if (unlikely(status & tx_dma_own)) 2491c363b658SFabrice Gasnier break; 2492c363b658SFabrice Gasnier 24938fce3331SJose Abreu count++; 24948fce3331SJose Abreu 2495a6b25da5SNiklas Cassel /* Make sure descriptor fields are read after reading 2496a6b25da5SNiklas Cassel * the own bit. 2497a6b25da5SNiklas Cassel */ 2498a6b25da5SNiklas Cassel dma_rmb(); 2499a6b25da5SNiklas Cassel 2500c363b658SFabrice Gasnier /* Just consider the last segment and ...*/ 2501c363b658SFabrice Gasnier if (likely(!(status & tx_not_ls))) { 2502c363b658SFabrice Gasnier /* ... verify the status error condition */ 2503c363b658SFabrice Gasnier if (unlikely(status & tx_err)) { 2504c363b658SFabrice Gasnier priv->dev->stats.tx_errors++; 2505c363b658SFabrice Gasnier } else { 25067ac6653aSJeff Kirsher priv->dev->stats.tx_packets++; 25077ac6653aSJeff Kirsher priv->xstats.tx_pkt_n++; 250868e9c5deSVijayakannan Ayyathurai priv->xstats.txq_stats[queue].tx_pkt_n++; 2509c363b658SFabrice Gasnier } 2510be8b38a7SOng Boon Leong if (skb) 2511ba1ffd74SGiuseppe CAVALLARO stmmac_get_tx_hwtstamp(priv, p, skb); 25127ac6653aSJeff Kirsher } 25137ac6653aSJeff Kirsher 2514be8b38a7SOng Boon Leong if (likely(tx_q->tx_skbuff_dma[entry].buf && 2515be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2516ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[entry].map_as_page) 2517362b37beSGiuseppe CAVALLARO dma_unmap_page(priv->device, 2518ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2519ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 25207ac6653aSJeff Kirsher DMA_TO_DEVICE); 2521362b37beSGiuseppe CAVALLARO else 2522362b37beSGiuseppe CAVALLARO dma_unmap_single(priv->device, 2523ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2524ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 2525362b37beSGiuseppe CAVALLARO DMA_TO_DEVICE); 2526ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = 0; 2527ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = 0; 2528ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = false; 2529cf32deecSRayagond Kokatanur } 2530f748be53SAlexandre TORGUE 25312c520b1cSJose Abreu stmmac_clean_desc3(priv, tx_q, p); 2532f748be53SAlexandre TORGUE 2533ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = false; 2534ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].is_jumbo = false; 25357ac6653aSJeff Kirsher 2536be8b38a7SOng Boon Leong if (xdpf && 2537be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2538be8b38a7SOng Boon Leong xdp_return_frame_rx_napi(xdpf); 2539be8b38a7SOng Boon Leong tx_q->xdpf[entry] = NULL; 2540be8b38a7SOng Boon Leong } 2541be8b38a7SOng Boon Leong 25428b278a5bSOng Boon Leong if (xdpf && 25438b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 25448b278a5bSOng Boon Leong xdp_return_frame(xdpf); 25458b278a5bSOng Boon Leong tx_q->xdpf[entry] = NULL; 25468b278a5bSOng Boon Leong } 25478b278a5bSOng Boon Leong 2548132c32eeSOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2549132c32eeSOng Boon Leong tx_q->xsk_frames_done++; 2550132c32eeSOng Boon Leong 2551be8b38a7SOng Boon Leong if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2552be8b38a7SOng Boon Leong if (likely(skb)) { 255338979574SBeniamino Galvani pkts_compl++; 255438979574SBeniamino Galvani bytes_compl += skb->len; 25557c565c33SEric W. Biederman dev_consume_skb_any(skb); 2556ce736788SJoao Pinto tx_q->tx_skbuff[entry] = NULL; 25577ac6653aSJeff Kirsher } 2558be8b38a7SOng Boon Leong } 25597ac6653aSJeff Kirsher 256042de047dSJose Abreu stmmac_release_tx_desc(priv, p, priv->mode); 25617ac6653aSJeff Kirsher 2562aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 25637ac6653aSJeff Kirsher } 2564ce736788SJoao Pinto tx_q->dirty_tx = entry; 256538979574SBeniamino Galvani 2566c22a3f48SJoao Pinto netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2567c22a3f48SJoao Pinto pkts_compl, bytes_compl); 256838979574SBeniamino Galvani 2569c22a3f48SJoao Pinto if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2570c22a3f48SJoao Pinto queue))) && 2571aa042f60SSong, Yoong Siang stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2572c22a3f48SJoao Pinto 2573b3e51069SLABBE Corentin netif_dbg(priv, tx_done, priv->dev, 2574b3e51069SLABBE Corentin "%s: restart transmit\n", __func__); 2575c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 25767ac6653aSJeff Kirsher } 2577d765955dSGiuseppe CAVALLARO 2578132c32eeSOng Boon Leong if (tx_q->xsk_pool) { 2579132c32eeSOng Boon Leong bool work_done; 2580132c32eeSOng Boon Leong 2581132c32eeSOng Boon Leong if (tx_q->xsk_frames_done) 2582132c32eeSOng Boon Leong xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2583132c32eeSOng Boon Leong 2584132c32eeSOng Boon Leong if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2585132c32eeSOng Boon Leong xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2586132c32eeSOng Boon Leong 2587132c32eeSOng Boon Leong /* For XSK TX, we try to send as many as possible. 2588132c32eeSOng Boon Leong * If XSK work done (XSK TX desc empty and budget still 2589132c32eeSOng Boon Leong * available), return "budget - 1" to reenable TX IRQ. 2590132c32eeSOng Boon Leong * Else, return "budget" to make NAPI continue polling. 2591132c32eeSOng Boon Leong */ 2592132c32eeSOng Boon Leong work_done = stmmac_xdp_xmit_zc(priv, queue, 2593132c32eeSOng Boon Leong STMMAC_XSK_TX_BUDGET_MAX); 2594132c32eeSOng Boon Leong if (work_done) 2595132c32eeSOng Boon Leong xmits = budget - 1; 2596132c32eeSOng Boon Leong else 2597132c32eeSOng Boon Leong xmits = budget; 2598132c32eeSOng Boon Leong } 2599132c32eeSOng Boon Leong 2600be1c7eaeSVineetha G. Jaya Kumaran if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2601be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en) { 2602d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 2603388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2604d765955dSGiuseppe CAVALLARO } 26058fce3331SJose Abreu 26064ccb4585SJose Abreu /* We still have pending packets, let's call for a new scheduling */ 26074ccb4585SJose Abreu if (tx_q->dirty_tx != tx_q->cur_tx) 2608db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2609db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2610d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 26114ccb4585SJose Abreu 26128fce3331SJose Abreu __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 26138fce3331SJose Abreu 2614132c32eeSOng Boon Leong /* Combine decisions from TX clean and XSK TX */ 2615132c32eeSOng Boon Leong return max(count, xmits); 26167ac6653aSJeff Kirsher } 26177ac6653aSJeff Kirsher 26187ac6653aSJeff Kirsher /** 2619732fdf0eSGiuseppe CAVALLARO * stmmac_tx_err - to manage the tx error 262032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 26215bacd778SLABBE Corentin * @chan: channel index 26227ac6653aSJeff Kirsher * Description: it cleans the descriptors and restarts the transmission 2623732fdf0eSGiuseppe CAVALLARO * in case of transmission errors. 26247ac6653aSJeff Kirsher */ 26255bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 26267ac6653aSJeff Kirsher { 2627ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2628ce736788SJoao Pinto 2629c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 26307ac6653aSJeff Kirsher 2631ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2632ce736788SJoao Pinto dma_free_tx_skbufs(priv, chan); 2633579a25a8SJose Abreu stmmac_clear_tx_descriptors(priv, chan); 2634ce736788SJoao Pinto tx_q->dirty_tx = 0; 2635ce736788SJoao Pinto tx_q->cur_tx = 0; 26368d212a9eSNiklas Cassel tx_q->mss = 0; 2637c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2638f421031eSJongsung Kim stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2639f421031eSJongsung Kim tx_q->dma_tx_phy, chan); 2640ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 26417ac6653aSJeff Kirsher 26427ac6653aSJeff Kirsher priv->dev->stats.tx_errors++; 2643c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 26447ac6653aSJeff Kirsher } 26457ac6653aSJeff Kirsher 264632ceabcaSGiuseppe CAVALLARO /** 26476deee222SJoao Pinto * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 26486deee222SJoao Pinto * @priv: driver private structure 26496deee222SJoao Pinto * @txmode: TX operating mode 26506deee222SJoao Pinto * @rxmode: RX operating mode 26516deee222SJoao Pinto * @chan: channel index 26526deee222SJoao Pinto * Description: it is used for configuring of the DMA operation mode in 26536deee222SJoao Pinto * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 26546deee222SJoao Pinto * mode. 26556deee222SJoao Pinto */ 26566deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 26576deee222SJoao Pinto u32 rxmode, u32 chan) 26586deee222SJoao Pinto { 2659a0daae13SJose Abreu u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2660a0daae13SJose Abreu u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 266152a76235SJose Abreu u32 rx_channels_count = priv->plat->rx_queues_to_use; 266252a76235SJose Abreu u32 tx_channels_count = priv->plat->tx_queues_to_use; 26636deee222SJoao Pinto int rxfifosz = priv->plat->rx_fifo_size; 266452a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 26656deee222SJoao Pinto 26666deee222SJoao Pinto if (rxfifosz == 0) 26676deee222SJoao Pinto rxfifosz = priv->dma_cap.rx_fifo_size; 266852a76235SJose Abreu if (txfifosz == 0) 266952a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 267052a76235SJose Abreu 267152a76235SJose Abreu /* Adjust for real per queue fifo size */ 267252a76235SJose Abreu rxfifosz /= rx_channels_count; 267352a76235SJose Abreu txfifosz /= tx_channels_count; 26746deee222SJoao Pinto 2675ab0204e3SJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2676ab0204e3SJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 26776deee222SJoao Pinto } 26786deee222SJoao Pinto 26798bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 26808bf993a5SJose Abreu { 268163a550fcSJose Abreu int ret; 26828bf993a5SJose Abreu 2683c10d4c82SJose Abreu ret = stmmac_safety_feat_irq_status(priv, priv->dev, 26848bf993a5SJose Abreu priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2685c10d4c82SJose Abreu if (ret && (ret != -EINVAL)) { 26868bf993a5SJose Abreu stmmac_global_err(priv); 2687c10d4c82SJose Abreu return true; 2688c10d4c82SJose Abreu } 2689c10d4c82SJose Abreu 2690c10d4c82SJose Abreu return false; 26918bf993a5SJose Abreu } 26928bf993a5SJose Abreu 26937e1c520cSOng Boon Leong static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 26948fce3331SJose Abreu { 26958fce3331SJose Abreu int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 26967e1c520cSOng Boon Leong &priv->xstats, chan, dir); 2697132c32eeSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; 2698132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 26998fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[chan]; 2700132c32eeSOng Boon Leong struct napi_struct *rx_napi; 2701132c32eeSOng Boon Leong struct napi_struct *tx_napi; 2702021bd5e3SJose Abreu unsigned long flags; 27038fce3331SJose Abreu 2704132c32eeSOng Boon Leong rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2705132c32eeSOng Boon Leong tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2706132c32eeSOng Boon Leong 27074ccb4585SJose Abreu if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2708132c32eeSOng Boon Leong if (napi_schedule_prep(rx_napi)) { 2709021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2710021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2711021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2712132c32eeSOng Boon Leong __napi_schedule(rx_napi); 27133ba07debSJose Abreu } 27144ccb4585SJose Abreu } 27154ccb4585SJose Abreu 2716021bd5e3SJose Abreu if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2717132c32eeSOng Boon Leong if (napi_schedule_prep(tx_napi)) { 2718021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2719021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2720021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2721132c32eeSOng Boon Leong __napi_schedule(tx_napi); 2722021bd5e3SJose Abreu } 2723021bd5e3SJose Abreu } 27248fce3331SJose Abreu 27258fce3331SJose Abreu return status; 27268fce3331SJose Abreu } 27278fce3331SJose Abreu 27286deee222SJoao Pinto /** 2729732fdf0eSGiuseppe CAVALLARO * stmmac_dma_interrupt - DMA ISR 273032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 273132ceabcaSGiuseppe CAVALLARO * Description: this is the DMA ISR. It is called by the main ISR. 2732732fdf0eSGiuseppe CAVALLARO * It calls the dwmac dma routine and schedule poll method in case of some 2733732fdf0eSGiuseppe CAVALLARO * work can be done. 273432ceabcaSGiuseppe CAVALLARO */ 27357ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv) 27367ac6653aSJeff Kirsher { 2737d62a107aSJoao Pinto u32 tx_channel_count = priv->plat->tx_queues_to_use; 27385a6a0445SNiklas Cassel u32 rx_channel_count = priv->plat->rx_queues_to_use; 27395a6a0445SNiklas Cassel u32 channels_to_check = tx_channel_count > rx_channel_count ? 27405a6a0445SNiklas Cassel tx_channel_count : rx_channel_count; 2741d62a107aSJoao Pinto u32 chan; 27428ac60ffbSKees Cook int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 27438ac60ffbSKees Cook 27448ac60ffbSKees Cook /* Make sure we never check beyond our status buffer. */ 27458ac60ffbSKees Cook if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 27468ac60ffbSKees Cook channels_to_check = ARRAY_SIZE(status); 274768e5cfafSJoao Pinto 27485a6a0445SNiklas Cassel for (chan = 0; chan < channels_to_check; chan++) 27497e1c520cSOng Boon Leong status[chan] = stmmac_napi_check(priv, chan, 27507e1c520cSOng Boon Leong DMA_DIR_RXTX); 2751d62a107aSJoao Pinto 27525a6a0445SNiklas Cassel for (chan = 0; chan < tx_channel_count; chan++) { 27535a6a0445SNiklas Cassel if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 27547ac6653aSJeff Kirsher /* Try to bump up the dma threshold on this failure */ 2755b2dec116SSonic Zhang if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2756b2dec116SSonic Zhang (tc <= 256)) { 27577ac6653aSJeff Kirsher tc += 64; 2758c405abe2SSonic Zhang if (priv->plat->force_thresh_dma_mode) 2759d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2760d62a107aSJoao Pinto tc, 2761d62a107aSJoao Pinto tc, 2762d62a107aSJoao Pinto chan); 2763c405abe2SSonic Zhang else 2764d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2765d62a107aSJoao Pinto tc, 2766d62a107aSJoao Pinto SF_DMA_MODE, 2767d62a107aSJoao Pinto chan); 27687ac6653aSJeff Kirsher priv->xstats.threshold = tc; 27697ac6653aSJeff Kirsher } 27705a6a0445SNiklas Cassel } else if (unlikely(status[chan] == tx_hard_error)) { 27714e593262SJoao Pinto stmmac_tx_err(priv, chan); 27727ac6653aSJeff Kirsher } 2773d62a107aSJoao Pinto } 2774d62a107aSJoao Pinto } 27757ac6653aSJeff Kirsher 277632ceabcaSGiuseppe CAVALLARO /** 277732ceabcaSGiuseppe CAVALLARO * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 277832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 277932ceabcaSGiuseppe CAVALLARO * Description: this masks the MMC irq, in fact, the counters are managed in SW. 278032ceabcaSGiuseppe CAVALLARO */ 27811c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv) 27821c901a46SGiuseppe CAVALLARO { 27831c901a46SGiuseppe CAVALLARO unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 27841c901a46SGiuseppe CAVALLARO MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 27851c901a46SGiuseppe CAVALLARO 27863b1dd2c5SJose Abreu stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 27874f795b25SGiuseppe CAVALLARO 27884f795b25SGiuseppe CAVALLARO if (priv->dma_cap.rmon) { 27893b1dd2c5SJose Abreu stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 27901c901a46SGiuseppe CAVALLARO memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 27914f795b25SGiuseppe CAVALLARO } else 279238ddc59dSLABBE Corentin netdev_info(priv->dev, "No MAC Management Counters available\n"); 27931c901a46SGiuseppe CAVALLARO } 27941c901a46SGiuseppe CAVALLARO 2795732fdf0eSGiuseppe CAVALLARO /** 2796732fdf0eSGiuseppe CAVALLARO * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 279732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 279819e30c14SGiuseppe CAVALLARO * Description: 279919e30c14SGiuseppe CAVALLARO * new GMAC chip generations have a new register to indicate the 2800e7434821SGiuseppe CAVALLARO * presence of the optional feature/functions. 280119e30c14SGiuseppe CAVALLARO * This can be also used to override the value passed through the 280219e30c14SGiuseppe CAVALLARO * platform and necessary for old MAC10/100 and GMAC chips. 2803e7434821SGiuseppe CAVALLARO */ 2804e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv) 2805e7434821SGiuseppe CAVALLARO { 2806a4e887faSJose Abreu return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2807e7434821SGiuseppe CAVALLARO } 2808e7434821SGiuseppe CAVALLARO 280932ceabcaSGiuseppe CAVALLARO /** 2810732fdf0eSGiuseppe CAVALLARO * stmmac_check_ether_addr - check if the MAC addr is valid 281132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 281232ceabcaSGiuseppe CAVALLARO * Description: 281332ceabcaSGiuseppe CAVALLARO * it is to verify if the MAC address is valid, in case of failures it 281432ceabcaSGiuseppe CAVALLARO * generates a random MAC address 281532ceabcaSGiuseppe CAVALLARO */ 2816bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2817bfab27a1SGiuseppe CAVALLARO { 28187f9b8fe5SJakub Kicinski u8 addr[ETH_ALEN]; 28197f9b8fe5SJakub Kicinski 2820bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) { 28217f9b8fe5SJakub Kicinski stmmac_get_umac_addr(priv, priv->hw, addr, 0); 28227f9b8fe5SJakub Kicinski if (is_valid_ether_addr(addr)) 28237f9b8fe5SJakub Kicinski eth_hw_addr_set(priv->dev, addr); 28247f9b8fe5SJakub Kicinski else 2825f2cedb63SDanny Kukawka eth_hw_addr_random(priv->dev); 2826af649352SJisheng Zhang dev_info(priv->device, "device MAC address %pM\n", 2827bfab27a1SGiuseppe CAVALLARO priv->dev->dev_addr); 2828bfab27a1SGiuseppe CAVALLARO } 2829c88460b7SHans de Goede } 2830bfab27a1SGiuseppe CAVALLARO 283132ceabcaSGiuseppe CAVALLARO /** 2832732fdf0eSGiuseppe CAVALLARO * stmmac_init_dma_engine - DMA init. 283332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 283432ceabcaSGiuseppe CAVALLARO * Description: 283532ceabcaSGiuseppe CAVALLARO * It inits the DMA invoking the specific MAC/GMAC callback. 283632ceabcaSGiuseppe CAVALLARO * Some DMA parameters can be passed from the platform; 283732ceabcaSGiuseppe CAVALLARO * in case of these are not passed a default is kept for the MAC or GMAC. 283832ceabcaSGiuseppe CAVALLARO */ 28390f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv) 28400f1f88a8SGiuseppe CAVALLARO { 284147f2a9ceSJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 284247f2a9ceSJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 284324aaed0cSJose Abreu u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 284454139cf3SJoao Pinto struct stmmac_rx_queue *rx_q; 2845ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 284647f2a9ceSJoao Pinto u32 chan = 0; 2847c24602efSGiuseppe CAVALLARO int atds = 0; 2848495db273SGiuseppe Cavallaro int ret = 0; 28490f1f88a8SGiuseppe CAVALLARO 2850a332e2faSNiklas Cassel if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2851a332e2faSNiklas Cassel dev_err(priv->device, "Invalid DMA configuration\n"); 285289ab75bfSNiklas Cassel return -EINVAL; 28530f1f88a8SGiuseppe CAVALLARO } 28540f1f88a8SGiuseppe CAVALLARO 2855c24602efSGiuseppe CAVALLARO if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2856c24602efSGiuseppe CAVALLARO atds = 1; 2857c24602efSGiuseppe CAVALLARO 2858a4e887faSJose Abreu ret = stmmac_reset(priv, priv->ioaddr); 2859495db273SGiuseppe Cavallaro if (ret) { 2860495db273SGiuseppe Cavallaro dev_err(priv->device, "Failed to reset the dma\n"); 2861495db273SGiuseppe Cavallaro return ret; 2862495db273SGiuseppe Cavallaro } 2863495db273SGiuseppe Cavallaro 28647d9e6c5aSJose Abreu /* DMA Configuration */ 28657d9e6c5aSJose Abreu stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 28667d9e6c5aSJose Abreu 28677d9e6c5aSJose Abreu if (priv->plat->axi) 28687d9e6c5aSJose Abreu stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 28697d9e6c5aSJose Abreu 2870af8f3fb7SWeifeng Voon /* DMA CSR Channel configuration */ 2871af8f3fb7SWeifeng Voon for (chan = 0; chan < dma_csr_ch; chan++) 2872af8f3fb7SWeifeng Voon stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2873af8f3fb7SWeifeng Voon 287447f2a9ceSJoao Pinto /* DMA RX Channel Configuration */ 287547f2a9ceSJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) { 287654139cf3SJoao Pinto rx_q = &priv->rx_queue[chan]; 287754139cf3SJoao Pinto 287824aaed0cSJose Abreu stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 287924aaed0cSJose Abreu rx_q->dma_rx_phy, chan); 288047f2a9ceSJoao Pinto 288154139cf3SJoao Pinto rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2882bba2556eSOng Boon Leong (rx_q->buf_alloc_num * 2883aa042f60SSong, Yoong Siang sizeof(struct dma_desc)); 2884a4e887faSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2885a4e887faSJose Abreu rx_q->rx_tail_addr, chan); 288647f2a9ceSJoao Pinto } 288747f2a9ceSJoao Pinto 288847f2a9ceSJoao Pinto /* DMA TX Channel Configuration */ 288947f2a9ceSJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) { 2890ce736788SJoao Pinto tx_q = &priv->tx_queue[chan]; 2891ce736788SJoao Pinto 289224aaed0cSJose Abreu stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 289324aaed0cSJose Abreu tx_q->dma_tx_phy, chan); 2894f748be53SAlexandre TORGUE 28950431100bSJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2896a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2897a4e887faSJose Abreu tx_q->tx_tail_addr, chan); 289847f2a9ceSJoao Pinto } 289924aaed0cSJose Abreu 2900495db273SGiuseppe Cavallaro return ret; 29010f1f88a8SGiuseppe CAVALLARO } 29020f1f88a8SGiuseppe CAVALLARO 29038fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 29048fce3331SJose Abreu { 29058fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 29068fce3331SJose Abreu 2907db2f2842SOng Boon Leong hrtimer_start(&tx_q->txtimer, 2908db2f2842SOng Boon Leong STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2909d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 29108fce3331SJose Abreu } 29118fce3331SJose Abreu 2912bfab27a1SGiuseppe CAVALLARO /** 2913732fdf0eSGiuseppe CAVALLARO * stmmac_tx_timer - mitigation sw timer for tx. 2914d0ea5cbdSJesse Brandeburg * @t: data pointer 29159125cdd1SGiuseppe CAVALLARO * Description: 29169125cdd1SGiuseppe CAVALLARO * This is the timer handler to directly invoke the stmmac_tx_clean. 29179125cdd1SGiuseppe CAVALLARO */ 2918d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 29199125cdd1SGiuseppe CAVALLARO { 2920d5a05e69SVincent Whitchurch struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 29218fce3331SJose Abreu struct stmmac_priv *priv = tx_q->priv_data; 29228fce3331SJose Abreu struct stmmac_channel *ch; 2923132c32eeSOng Boon Leong struct napi_struct *napi; 29249125cdd1SGiuseppe CAVALLARO 29258fce3331SJose Abreu ch = &priv->channel[tx_q->queue_index]; 2926132c32eeSOng Boon Leong napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 29278fce3331SJose Abreu 2928132c32eeSOng Boon Leong if (likely(napi_schedule_prep(napi))) { 2929021bd5e3SJose Abreu unsigned long flags; 2930021bd5e3SJose Abreu 2931021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2932021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2933021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 2934132c32eeSOng Boon Leong __napi_schedule(napi); 2935021bd5e3SJose Abreu } 2936d5a05e69SVincent Whitchurch 2937d5a05e69SVincent Whitchurch return HRTIMER_NORESTART; 29389125cdd1SGiuseppe CAVALLARO } 29399125cdd1SGiuseppe CAVALLARO 29409125cdd1SGiuseppe CAVALLARO /** 2941d429b66eSJose Abreu * stmmac_init_coalesce - init mitigation options. 294232ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 29439125cdd1SGiuseppe CAVALLARO * Description: 2944d429b66eSJose Abreu * This inits the coalesce parameters: i.e. timer rate, 29459125cdd1SGiuseppe CAVALLARO * timer handler and default threshold used for enabling the 29469125cdd1SGiuseppe CAVALLARO * interrupt on completion bit. 29479125cdd1SGiuseppe CAVALLARO */ 2948d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv) 29499125cdd1SGiuseppe CAVALLARO { 29508fce3331SJose Abreu u32 tx_channel_count = priv->plat->tx_queues_to_use; 2951db2f2842SOng Boon Leong u32 rx_channel_count = priv->plat->rx_queues_to_use; 29528fce3331SJose Abreu u32 chan; 29538fce3331SJose Abreu 29548fce3331SJose Abreu for (chan = 0; chan < tx_channel_count; chan++) { 29558fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 29568fce3331SJose Abreu 2957db2f2842SOng Boon Leong priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 2958db2f2842SOng Boon Leong priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 2959db2f2842SOng Boon Leong 2960d5a05e69SVincent Whitchurch hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2961d5a05e69SVincent Whitchurch tx_q->txtimer.function = stmmac_tx_timer; 29628fce3331SJose Abreu } 2963db2f2842SOng Boon Leong 2964db2f2842SOng Boon Leong for (chan = 0; chan < rx_channel_count; chan++) 2965db2f2842SOng Boon Leong priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 29669125cdd1SGiuseppe CAVALLARO } 29679125cdd1SGiuseppe CAVALLARO 29684854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv) 29694854ab99SJoao Pinto { 29704854ab99SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 29714854ab99SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 29724854ab99SJoao Pinto u32 chan; 29734854ab99SJoao Pinto 29744854ab99SJoao Pinto /* set TX ring length */ 29754854ab99SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2976a4e887faSJose Abreu stmmac_set_tx_ring_len(priv, priv->ioaddr, 2977aa042f60SSong, Yoong Siang (priv->dma_tx_size - 1), chan); 29784854ab99SJoao Pinto 29794854ab99SJoao Pinto /* set RX ring length */ 29804854ab99SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2981a4e887faSJose Abreu stmmac_set_rx_ring_len(priv, priv->ioaddr, 2982aa042f60SSong, Yoong Siang (priv->dma_rx_size - 1), chan); 29834854ab99SJoao Pinto } 29844854ab99SJoao Pinto 29859125cdd1SGiuseppe CAVALLARO /** 29866a3a7193SJoao Pinto * stmmac_set_tx_queue_weight - Set TX queue weight 29876a3a7193SJoao Pinto * @priv: driver private structure 29886a3a7193SJoao Pinto * Description: It is used for setting TX queues weight 29896a3a7193SJoao Pinto */ 29906a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 29916a3a7193SJoao Pinto { 29926a3a7193SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 29936a3a7193SJoao Pinto u32 weight; 29946a3a7193SJoao Pinto u32 queue; 29956a3a7193SJoao Pinto 29966a3a7193SJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 29976a3a7193SJoao Pinto weight = priv->plat->tx_queues_cfg[queue].weight; 2998c10d4c82SJose Abreu stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 29996a3a7193SJoao Pinto } 30006a3a7193SJoao Pinto } 30016a3a7193SJoao Pinto 30026a3a7193SJoao Pinto /** 300319d91873SJoao Pinto * stmmac_configure_cbs - Configure CBS in TX queue 300419d91873SJoao Pinto * @priv: driver private structure 300519d91873SJoao Pinto * Description: It is used for configuring CBS in AVB TX queues 300619d91873SJoao Pinto */ 300719d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv) 300819d91873SJoao Pinto { 300919d91873SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 301019d91873SJoao Pinto u32 mode_to_use; 301119d91873SJoao Pinto u32 queue; 301219d91873SJoao Pinto 301344781fefSJoao Pinto /* queue 0 is reserved for legacy traffic */ 301444781fefSJoao Pinto for (queue = 1; queue < tx_queues_count; queue++) { 301519d91873SJoao Pinto mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 301619d91873SJoao Pinto if (mode_to_use == MTL_QUEUE_DCB) 301719d91873SJoao Pinto continue; 301819d91873SJoao Pinto 3019c10d4c82SJose Abreu stmmac_config_cbs(priv, priv->hw, 302019d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].send_slope, 302119d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].idle_slope, 302219d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].high_credit, 302319d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].low_credit, 302419d91873SJoao Pinto queue); 302519d91873SJoao Pinto } 302619d91873SJoao Pinto } 302719d91873SJoao Pinto 302819d91873SJoao Pinto /** 3029d43042f4SJoao Pinto * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3030d43042f4SJoao Pinto * @priv: driver private structure 3031d43042f4SJoao Pinto * Description: It is used for mapping RX queues to RX dma channels 3032d43042f4SJoao Pinto */ 3033d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3034d43042f4SJoao Pinto { 3035d43042f4SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3036d43042f4SJoao Pinto u32 queue; 3037d43042f4SJoao Pinto u32 chan; 3038d43042f4SJoao Pinto 3039d43042f4SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3040d43042f4SJoao Pinto chan = priv->plat->rx_queues_cfg[queue].chan; 3041c10d4c82SJose Abreu stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3042d43042f4SJoao Pinto } 3043d43042f4SJoao Pinto } 3044d43042f4SJoao Pinto 3045d43042f4SJoao Pinto /** 3046a8f5102aSJoao Pinto * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3047a8f5102aSJoao Pinto * @priv: driver private structure 3048a8f5102aSJoao Pinto * Description: It is used for configuring the RX Queue Priority 3049a8f5102aSJoao Pinto */ 3050a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3051a8f5102aSJoao Pinto { 3052a8f5102aSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3053a8f5102aSJoao Pinto u32 queue; 3054a8f5102aSJoao Pinto u32 prio; 3055a8f5102aSJoao Pinto 3056a8f5102aSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3057a8f5102aSJoao Pinto if (!priv->plat->rx_queues_cfg[queue].use_prio) 3058a8f5102aSJoao Pinto continue; 3059a8f5102aSJoao Pinto 3060a8f5102aSJoao Pinto prio = priv->plat->rx_queues_cfg[queue].prio; 3061c10d4c82SJose Abreu stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3062a8f5102aSJoao Pinto } 3063a8f5102aSJoao Pinto } 3064a8f5102aSJoao Pinto 3065a8f5102aSJoao Pinto /** 3066a8f5102aSJoao Pinto * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3067a8f5102aSJoao Pinto * @priv: driver private structure 3068a8f5102aSJoao Pinto * Description: It is used for configuring the TX Queue Priority 3069a8f5102aSJoao Pinto */ 3070a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3071a8f5102aSJoao Pinto { 3072a8f5102aSJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3073a8f5102aSJoao Pinto u32 queue; 3074a8f5102aSJoao Pinto u32 prio; 3075a8f5102aSJoao Pinto 3076a8f5102aSJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 3077a8f5102aSJoao Pinto if (!priv->plat->tx_queues_cfg[queue].use_prio) 3078a8f5102aSJoao Pinto continue; 3079a8f5102aSJoao Pinto 3080a8f5102aSJoao Pinto prio = priv->plat->tx_queues_cfg[queue].prio; 3081c10d4c82SJose Abreu stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3082a8f5102aSJoao Pinto } 3083a8f5102aSJoao Pinto } 3084a8f5102aSJoao Pinto 3085a8f5102aSJoao Pinto /** 3086abe80fdcSJoao Pinto * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3087abe80fdcSJoao Pinto * @priv: driver private structure 3088abe80fdcSJoao Pinto * Description: It is used for configuring the RX queue routing 3089abe80fdcSJoao Pinto */ 3090abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3091abe80fdcSJoao Pinto { 3092abe80fdcSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3093abe80fdcSJoao Pinto u32 queue; 3094abe80fdcSJoao Pinto u8 packet; 3095abe80fdcSJoao Pinto 3096abe80fdcSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 3097abe80fdcSJoao Pinto /* no specific packet type routing specified for the queue */ 3098abe80fdcSJoao Pinto if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3099abe80fdcSJoao Pinto continue; 3100abe80fdcSJoao Pinto 3101abe80fdcSJoao Pinto packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3102c10d4c82SJose Abreu stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3103abe80fdcSJoao Pinto } 3104abe80fdcSJoao Pinto } 3105abe80fdcSJoao Pinto 310676067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv) 310776067459SJose Abreu { 310876067459SJose Abreu if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 310976067459SJose Abreu priv->rss.enable = false; 311076067459SJose Abreu return; 311176067459SJose Abreu } 311276067459SJose Abreu 311376067459SJose Abreu if (priv->dev->features & NETIF_F_RXHASH) 311476067459SJose Abreu priv->rss.enable = true; 311576067459SJose Abreu else 311676067459SJose Abreu priv->rss.enable = false; 311776067459SJose Abreu 311876067459SJose Abreu stmmac_rss_configure(priv, priv->hw, &priv->rss, 311976067459SJose Abreu priv->plat->rx_queues_to_use); 312076067459SJose Abreu } 312176067459SJose Abreu 3122abe80fdcSJoao Pinto /** 3123d0a9c9f9SJoao Pinto * stmmac_mtl_configuration - Configure MTL 3124d0a9c9f9SJoao Pinto * @priv: driver private structure 3125d0a9c9f9SJoao Pinto * Description: It is used for configurring MTL 3126d0a9c9f9SJoao Pinto */ 3127d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3128d0a9c9f9SJoao Pinto { 3129d0a9c9f9SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 3130d0a9c9f9SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 3131d0a9c9f9SJoao Pinto 3132c10d4c82SJose Abreu if (tx_queues_count > 1) 31336a3a7193SJoao Pinto stmmac_set_tx_queue_weight(priv); 31346a3a7193SJoao Pinto 3135d0a9c9f9SJoao Pinto /* Configure MTL RX algorithms */ 3136c10d4c82SJose Abreu if (rx_queues_count > 1) 3137c10d4c82SJose Abreu stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3138d0a9c9f9SJoao Pinto priv->plat->rx_sched_algorithm); 3139d0a9c9f9SJoao Pinto 3140d0a9c9f9SJoao Pinto /* Configure MTL TX algorithms */ 3141c10d4c82SJose Abreu if (tx_queues_count > 1) 3142c10d4c82SJose Abreu stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3143d0a9c9f9SJoao Pinto priv->plat->tx_sched_algorithm); 3144d0a9c9f9SJoao Pinto 314519d91873SJoao Pinto /* Configure CBS in AVB TX queues */ 3146c10d4c82SJose Abreu if (tx_queues_count > 1) 314719d91873SJoao Pinto stmmac_configure_cbs(priv); 314819d91873SJoao Pinto 3149d43042f4SJoao Pinto /* Map RX MTL to DMA channels */ 3150d43042f4SJoao Pinto stmmac_rx_queue_dma_chan_map(priv); 3151d43042f4SJoao Pinto 3152d0a9c9f9SJoao Pinto /* Enable MAC RX Queues */ 3153d0a9c9f9SJoao Pinto stmmac_mac_enable_rx_queues(priv); 31546deee222SJoao Pinto 3155a8f5102aSJoao Pinto /* Set RX priorities */ 3156c10d4c82SJose Abreu if (rx_queues_count > 1) 3157a8f5102aSJoao Pinto stmmac_mac_config_rx_queues_prio(priv); 3158a8f5102aSJoao Pinto 3159a8f5102aSJoao Pinto /* Set TX priorities */ 3160c10d4c82SJose Abreu if (tx_queues_count > 1) 3161a8f5102aSJoao Pinto stmmac_mac_config_tx_queues_prio(priv); 3162abe80fdcSJoao Pinto 3163abe80fdcSJoao Pinto /* Set RX routing */ 3164c10d4c82SJose Abreu if (rx_queues_count > 1) 3165abe80fdcSJoao Pinto stmmac_mac_config_rx_queues_routing(priv); 316676067459SJose Abreu 316776067459SJose Abreu /* Receive Side Scaling */ 316876067459SJose Abreu if (rx_queues_count > 1) 316976067459SJose Abreu stmmac_mac_config_rss(priv); 3170d0a9c9f9SJoao Pinto } 3171d0a9c9f9SJoao Pinto 31728bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 31738bf993a5SJose Abreu { 3174c10d4c82SJose Abreu if (priv->dma_cap.asp) { 31758bf993a5SJose Abreu netdev_info(priv->dev, "Enabling Safety Features\n"); 31765ac712dcSWong Vee Khee stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 31775ac712dcSWong Vee Khee priv->plat->safety_feat_cfg); 31788bf993a5SJose Abreu } else { 31798bf993a5SJose Abreu netdev_info(priv->dev, "No Safety Features support found\n"); 31808bf993a5SJose Abreu } 31818bf993a5SJose Abreu } 31828bf993a5SJose Abreu 31835a558611SOng Boon Leong static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 31845a558611SOng Boon Leong { 31855a558611SOng Boon Leong char *name; 31865a558611SOng Boon Leong 31875a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3188db7c691dSMohammad Athari Bin Ismail clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 31895a558611SOng Boon Leong 31905a558611SOng Boon Leong name = priv->wq_name; 31915a558611SOng Boon Leong sprintf(name, "%s-fpe", priv->dev->name); 31925a558611SOng Boon Leong 31935a558611SOng Boon Leong priv->fpe_wq = create_singlethread_workqueue(name); 31945a558611SOng Boon Leong if (!priv->fpe_wq) { 31955a558611SOng Boon Leong netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 31965a558611SOng Boon Leong 31975a558611SOng Boon Leong return -ENOMEM; 31985a558611SOng Boon Leong } 31995a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue start"); 32005a558611SOng Boon Leong 32015a558611SOng Boon Leong return 0; 32025a558611SOng Boon Leong } 32035a558611SOng Boon Leong 3204d0a9c9f9SJoao Pinto /** 3205732fdf0eSGiuseppe CAVALLARO * stmmac_hw_setup - setup mac in a usable state. 3206523f11b5SSrinivas Kandagatla * @dev : pointer to the device structure. 3207d0ea5cbdSJesse Brandeburg * @init_ptp: initialize PTP if set 3208523f11b5SSrinivas Kandagatla * Description: 3209732fdf0eSGiuseppe CAVALLARO * this is the main function to setup the HW in a usable state because the 3210732fdf0eSGiuseppe CAVALLARO * dma engine is reset, the core registers are configured (e.g. AXI, 3211732fdf0eSGiuseppe CAVALLARO * Checksum features, timers). The DMA is ready to start receiving and 3212732fdf0eSGiuseppe CAVALLARO * transmitting. 3213523f11b5SSrinivas Kandagatla * Return value: 3214523f11b5SSrinivas Kandagatla * 0 on success and an appropriate (-)ve integer as defined in errno.h 3215523f11b5SSrinivas Kandagatla * file on failure. 3216523f11b5SSrinivas Kandagatla */ 3217fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 3218523f11b5SSrinivas Kandagatla { 3219523f11b5SSrinivas Kandagatla struct stmmac_priv *priv = netdev_priv(dev); 32203c55d4d0SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 3221146617b8SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 3222d08d32d1SOng Boon Leong bool sph_en; 3223146617b8SJoao Pinto u32 chan; 3224523f11b5SSrinivas Kandagatla int ret; 3225523f11b5SSrinivas Kandagatla 3226523f11b5SSrinivas Kandagatla /* DMA initialization and SW reset */ 3227523f11b5SSrinivas Kandagatla ret = stmmac_init_dma_engine(priv); 3228523f11b5SSrinivas Kandagatla if (ret < 0) { 322938ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 323038ddc59dSLABBE Corentin __func__); 3231523f11b5SSrinivas Kandagatla return ret; 3232523f11b5SSrinivas Kandagatla } 3233523f11b5SSrinivas Kandagatla 3234523f11b5SSrinivas Kandagatla /* Copy the MAC addr into the HW */ 3235c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3236523f11b5SSrinivas Kandagatla 323702e57b9dSGiuseppe CAVALLARO /* PS and related bits will be programmed according to the speed */ 323802e57b9dSGiuseppe CAVALLARO if (priv->hw->pcs) { 323902e57b9dSGiuseppe CAVALLARO int speed = priv->plat->mac_port_sel_speed; 324002e57b9dSGiuseppe CAVALLARO 324102e57b9dSGiuseppe CAVALLARO if ((speed == SPEED_10) || (speed == SPEED_100) || 324202e57b9dSGiuseppe CAVALLARO (speed == SPEED_1000)) { 324302e57b9dSGiuseppe CAVALLARO priv->hw->ps = speed; 324402e57b9dSGiuseppe CAVALLARO } else { 324502e57b9dSGiuseppe CAVALLARO dev_warn(priv->device, "invalid port speed\n"); 324602e57b9dSGiuseppe CAVALLARO priv->hw->ps = 0; 324702e57b9dSGiuseppe CAVALLARO } 324802e57b9dSGiuseppe CAVALLARO } 324902e57b9dSGiuseppe CAVALLARO 3250523f11b5SSrinivas Kandagatla /* Initialize the MAC Core */ 3251c10d4c82SJose Abreu stmmac_core_init(priv, priv->hw, dev); 3252523f11b5SSrinivas Kandagatla 3253d0a9c9f9SJoao Pinto /* Initialize MTL*/ 3254d0a9c9f9SJoao Pinto stmmac_mtl_configuration(priv); 32559eb12474Sjpinto 32568bf993a5SJose Abreu /* Initialize Safety Features */ 32578bf993a5SJose Abreu stmmac_safety_feat_configuration(priv); 32588bf993a5SJose Abreu 3259c10d4c82SJose Abreu ret = stmmac_rx_ipc(priv, priv->hw); 3260978aded4SGiuseppe CAVALLARO if (!ret) { 326138ddc59dSLABBE Corentin netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3262978aded4SGiuseppe CAVALLARO priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3263d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 3264978aded4SGiuseppe CAVALLARO } 3265978aded4SGiuseppe CAVALLARO 3266523f11b5SSrinivas Kandagatla /* Enable the MAC Rx/Tx */ 3267c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 3268523f11b5SSrinivas Kandagatla 3269b4f0a661SJoao Pinto /* Set the HW DMA mode and the COE */ 3270b4f0a661SJoao Pinto stmmac_dma_operation_mode(priv); 3271b4f0a661SJoao Pinto 3272523f11b5SSrinivas Kandagatla stmmac_mmc_setup(priv); 3273523f11b5SSrinivas Kandagatla 3274fe131929SHuacai Chen if (init_ptp) { 32750ad2be79SThierry Reding ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 32760ad2be79SThierry Reding if (ret < 0) 32770ad2be79SThierry Reding netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 32780ad2be79SThierry Reding 3279523f11b5SSrinivas Kandagatla ret = stmmac_init_ptp(priv); 3280722eef28SHeiner Kallweit if (ret == -EOPNOTSUPP) 3281722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP not supported by HW\n"); 3282722eef28SHeiner Kallweit else if (ret) 3283722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP init failed\n"); 3284fe131929SHuacai Chen } 3285523f11b5SSrinivas Kandagatla 3286388e201dSVineetha G. Jaya Kumaran priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3287388e201dSVineetha G. Jaya Kumaran 3288388e201dSVineetha G. Jaya Kumaran /* Convert the timer from msec to usec */ 3289388e201dSVineetha G. Jaya Kumaran if (!priv->tx_lpi_timer) 3290388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_timer = eee_timer * 1000; 3291523f11b5SSrinivas Kandagatla 3292a4e887faSJose Abreu if (priv->use_riwt) { 3293db2f2842SOng Boon Leong u32 queue; 32944e4337ccSJose Abreu 3295db2f2842SOng Boon Leong for (queue = 0; queue < rx_cnt; queue++) { 3296db2f2842SOng Boon Leong if (!priv->rx_riwt[queue]) 3297db2f2842SOng Boon Leong priv->rx_riwt[queue] = DEF_DMA_RIWT; 3298db2f2842SOng Boon Leong 3299db2f2842SOng Boon Leong stmmac_rx_watchdog(priv, priv->ioaddr, 3300db2f2842SOng Boon Leong priv->rx_riwt[queue], queue); 3301db2f2842SOng Boon Leong } 3302523f11b5SSrinivas Kandagatla } 3303523f11b5SSrinivas Kandagatla 3304c10d4c82SJose Abreu if (priv->hw->pcs) 3305c9ad4c10SBen Dooks (Codethink) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3306523f11b5SSrinivas Kandagatla 33074854ab99SJoao Pinto /* set TX and RX rings length */ 33084854ab99SJoao Pinto stmmac_set_rings_length(priv); 33094854ab99SJoao Pinto 3310f748be53SAlexandre TORGUE /* Enable TSO */ 3311146617b8SJoao Pinto if (priv->tso) { 33125e6038b8SOng Boon Leong for (chan = 0; chan < tx_cnt; chan++) { 33135e6038b8SOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 33145e6038b8SOng Boon Leong 33155e6038b8SOng Boon Leong /* TSO and TBS cannot co-exist */ 33165e6038b8SOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 33175e6038b8SOng Boon Leong continue; 33185e6038b8SOng Boon Leong 3319a4e887faSJose Abreu stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3320146617b8SJoao Pinto } 33215e6038b8SOng Boon Leong } 3322f748be53SAlexandre TORGUE 332367afd6d1SJose Abreu /* Enable Split Header */ 3324d08d32d1SOng Boon Leong sph_en = (priv->hw->rx_csum > 0) && priv->sph; 332567afd6d1SJose Abreu for (chan = 0; chan < rx_cnt; chan++) 3326d08d32d1SOng Boon Leong stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3327d08d32d1SOng Boon Leong 332867afd6d1SJose Abreu 332930d93227SJose Abreu /* VLAN Tag Insertion */ 333030d93227SJose Abreu if (priv->dma_cap.vlins) 333130d93227SJose Abreu stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 333230d93227SJose Abreu 3333579a25a8SJose Abreu /* TBS */ 3334579a25a8SJose Abreu for (chan = 0; chan < tx_cnt; chan++) { 3335579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3336579a25a8SJose Abreu int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3337579a25a8SJose Abreu 3338579a25a8SJose Abreu stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3339579a25a8SJose Abreu } 3340579a25a8SJose Abreu 3341686cff3dSAashish Verma /* Configure real RX and TX queues */ 3342686cff3dSAashish Verma netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3343686cff3dSAashish Verma netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3344686cff3dSAashish Verma 33457d9e6c5aSJose Abreu /* Start the ball rolling... */ 33467d9e6c5aSJose Abreu stmmac_start_all_dma(priv); 33477d9e6c5aSJose Abreu 33485a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 33495a558611SOng Boon Leong stmmac_fpe_start_wq(priv); 33505a558611SOng Boon Leong 33515a558611SOng Boon Leong if (priv->plat->fpe_cfg->enable) 33525a558611SOng Boon Leong stmmac_fpe_handshake(priv, true); 33535a558611SOng Boon Leong } 33545a558611SOng Boon Leong 3355523f11b5SSrinivas Kandagatla return 0; 3356523f11b5SSrinivas Kandagatla } 3357523f11b5SSrinivas Kandagatla 3358c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev) 3359c66f6c37SThierry Reding { 3360c66f6c37SThierry Reding struct stmmac_priv *priv = netdev_priv(dev); 3361c66f6c37SThierry Reding 3362c66f6c37SThierry Reding clk_disable_unprepare(priv->plat->clk_ptp_ref); 3363c66f6c37SThierry Reding } 3364c66f6c37SThierry Reding 33658532f613SOng Boon Leong static void stmmac_free_irq(struct net_device *dev, 33668532f613SOng Boon Leong enum request_irq_err irq_err, int irq_idx) 33678532f613SOng Boon Leong { 33688532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 33698532f613SOng Boon Leong int j; 33708532f613SOng Boon Leong 33718532f613SOng Boon Leong switch (irq_err) { 33728532f613SOng Boon Leong case REQ_IRQ_ERR_ALL: 33738532f613SOng Boon Leong irq_idx = priv->plat->tx_queues_to_use; 33748532f613SOng Boon Leong fallthrough; 33758532f613SOng Boon Leong case REQ_IRQ_ERR_TX: 33768532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 33778deec94cSOng Boon Leong if (priv->tx_irq[j] > 0) { 33788deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[j], NULL); 33798532f613SOng Boon Leong free_irq(priv->tx_irq[j], &priv->tx_queue[j]); 33808532f613SOng Boon Leong } 33818deec94cSOng Boon Leong } 33828532f613SOng Boon Leong irq_idx = priv->plat->rx_queues_to_use; 33838532f613SOng Boon Leong fallthrough; 33848532f613SOng Boon Leong case REQ_IRQ_ERR_RX: 33858532f613SOng Boon Leong for (j = irq_idx - 1; j >= 0; j--) { 33868deec94cSOng Boon Leong if (priv->rx_irq[j] > 0) { 33878deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[j], NULL); 33888532f613SOng Boon Leong free_irq(priv->rx_irq[j], &priv->rx_queue[j]); 33898532f613SOng Boon Leong } 33908deec94cSOng Boon Leong } 33918532f613SOng Boon Leong 33928532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 33938532f613SOng Boon Leong free_irq(priv->sfty_ue_irq, dev); 33948532f613SOng Boon Leong fallthrough; 33958532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_UE: 33968532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 33978532f613SOng Boon Leong free_irq(priv->sfty_ce_irq, dev); 33988532f613SOng Boon Leong fallthrough; 33998532f613SOng Boon Leong case REQ_IRQ_ERR_SFTY_CE: 34008532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 34018532f613SOng Boon Leong free_irq(priv->lpi_irq, dev); 34028532f613SOng Boon Leong fallthrough; 34038532f613SOng Boon Leong case REQ_IRQ_ERR_LPI: 34048532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 34058532f613SOng Boon Leong free_irq(priv->wol_irq, dev); 34068532f613SOng Boon Leong fallthrough; 34078532f613SOng Boon Leong case REQ_IRQ_ERR_WOL: 34088532f613SOng Boon Leong free_irq(dev->irq, dev); 34098532f613SOng Boon Leong fallthrough; 34108532f613SOng Boon Leong case REQ_IRQ_ERR_MAC: 34118532f613SOng Boon Leong case REQ_IRQ_ERR_NO: 34128532f613SOng Boon Leong /* If MAC IRQ request error, no more IRQ to free */ 34138532f613SOng Boon Leong break; 34148532f613SOng Boon Leong } 34158532f613SOng Boon Leong } 34168532f613SOng Boon Leong 34178532f613SOng Boon Leong static int stmmac_request_irq_multi_msi(struct net_device *dev) 34188532f613SOng Boon Leong { 34198532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 34203e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 34218deec94cSOng Boon Leong cpumask_t cpu_mask; 34228532f613SOng Boon Leong int irq_idx = 0; 34238532f613SOng Boon Leong char *int_name; 34248532f613SOng Boon Leong int ret; 34258532f613SOng Boon Leong int i; 34268532f613SOng Boon Leong 34278532f613SOng Boon Leong /* For common interrupt */ 34288532f613SOng Boon Leong int_name = priv->int_name_mac; 34298532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "mac"); 34308532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_mac_interrupt, 34318532f613SOng Boon Leong 0, int_name, dev); 34328532f613SOng Boon Leong if (unlikely(ret < 0)) { 34338532f613SOng Boon Leong netdev_err(priv->dev, 34348532f613SOng Boon Leong "%s: alloc mac MSI %d (error: %d)\n", 34358532f613SOng Boon Leong __func__, dev->irq, ret); 34368532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 34378532f613SOng Boon Leong goto irq_error; 34388532f613SOng Boon Leong } 34398532f613SOng Boon Leong 34408532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 34418532f613SOng Boon Leong * is used for WoL 34428532f613SOng Boon Leong */ 34438532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 34448532f613SOng Boon Leong int_name = priv->int_name_wol; 34458532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "wol"); 34468532f613SOng Boon Leong ret = request_irq(priv->wol_irq, 34478532f613SOng Boon Leong stmmac_mac_interrupt, 34488532f613SOng Boon Leong 0, int_name, dev); 34498532f613SOng Boon Leong if (unlikely(ret < 0)) { 34508532f613SOng Boon Leong netdev_err(priv->dev, 34518532f613SOng Boon Leong "%s: alloc wol MSI %d (error: %d)\n", 34528532f613SOng Boon Leong __func__, priv->wol_irq, ret); 34538532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 34548532f613SOng Boon Leong goto irq_error; 34558532f613SOng Boon Leong } 34568532f613SOng Boon Leong } 34578532f613SOng Boon Leong 34588532f613SOng Boon Leong /* Request the LPI IRQ in case of another line 34598532f613SOng Boon Leong * is used for LPI 34608532f613SOng Boon Leong */ 34618532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 34628532f613SOng Boon Leong int_name = priv->int_name_lpi; 34638532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "lpi"); 34648532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, 34658532f613SOng Boon Leong stmmac_mac_interrupt, 34668532f613SOng Boon Leong 0, int_name, dev); 34678532f613SOng Boon Leong if (unlikely(ret < 0)) { 34688532f613SOng Boon Leong netdev_err(priv->dev, 34698532f613SOng Boon Leong "%s: alloc lpi MSI %d (error: %d)\n", 34708532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 34718532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 34728532f613SOng Boon Leong goto irq_error; 34738532f613SOng Boon Leong } 34748532f613SOng Boon Leong } 34758532f613SOng Boon Leong 34768532f613SOng Boon Leong /* Request the Safety Feature Correctible Error line in 34778532f613SOng Boon Leong * case of another line is used 34788532f613SOng Boon Leong */ 34798532f613SOng Boon Leong if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 34808532f613SOng Boon Leong int_name = priv->int_name_sfty_ce; 34818532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 34828532f613SOng Boon Leong ret = request_irq(priv->sfty_ce_irq, 34838532f613SOng Boon Leong stmmac_safety_interrupt, 34848532f613SOng Boon Leong 0, int_name, dev); 34858532f613SOng Boon Leong if (unlikely(ret < 0)) { 34868532f613SOng Boon Leong netdev_err(priv->dev, 34878532f613SOng Boon Leong "%s: alloc sfty ce MSI %d (error: %d)\n", 34888532f613SOng Boon Leong __func__, priv->sfty_ce_irq, ret); 34898532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_CE; 34908532f613SOng Boon Leong goto irq_error; 34918532f613SOng Boon Leong } 34928532f613SOng Boon Leong } 34938532f613SOng Boon Leong 34948532f613SOng Boon Leong /* Request the Safety Feature Uncorrectible Error line in 34958532f613SOng Boon Leong * case of another line is used 34968532f613SOng Boon Leong */ 34978532f613SOng Boon Leong if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 34988532f613SOng Boon Leong int_name = priv->int_name_sfty_ue; 34998532f613SOng Boon Leong sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 35008532f613SOng Boon Leong ret = request_irq(priv->sfty_ue_irq, 35018532f613SOng Boon Leong stmmac_safety_interrupt, 35028532f613SOng Boon Leong 0, int_name, dev); 35038532f613SOng Boon Leong if (unlikely(ret < 0)) { 35048532f613SOng Boon Leong netdev_err(priv->dev, 35058532f613SOng Boon Leong "%s: alloc sfty ue MSI %d (error: %d)\n", 35068532f613SOng Boon Leong __func__, priv->sfty_ue_irq, ret); 35078532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_SFTY_UE; 35088532f613SOng Boon Leong goto irq_error; 35098532f613SOng Boon Leong } 35108532f613SOng Boon Leong } 35118532f613SOng Boon Leong 35128532f613SOng Boon Leong /* Request Rx MSI irq */ 35138532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3514d68c2e1dSArnd Bergmann if (i >= MTL_MAX_RX_QUEUES) 35153e0d5699SArnd Bergmann break; 35168532f613SOng Boon Leong if (priv->rx_irq[i] == 0) 35178532f613SOng Boon Leong continue; 35188532f613SOng Boon Leong 35198532f613SOng Boon Leong int_name = priv->int_name_rx_irq[i]; 35208532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 35218532f613SOng Boon Leong ret = request_irq(priv->rx_irq[i], 35228532f613SOng Boon Leong stmmac_msi_intr_rx, 35238532f613SOng Boon Leong 0, int_name, &priv->rx_queue[i]); 35248532f613SOng Boon Leong if (unlikely(ret < 0)) { 35258532f613SOng Boon Leong netdev_err(priv->dev, 35268532f613SOng Boon Leong "%s: alloc rx-%d MSI %d (error: %d)\n", 35278532f613SOng Boon Leong __func__, i, priv->rx_irq[i], ret); 35288532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_RX; 35298532f613SOng Boon Leong irq_idx = i; 35308532f613SOng Boon Leong goto irq_error; 35318532f613SOng Boon Leong } 35328deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35338deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35348deec94cSOng Boon Leong irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 35358532f613SOng Boon Leong } 35368532f613SOng Boon Leong 35378532f613SOng Boon Leong /* Request Tx MSI irq */ 35388532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3539d68c2e1dSArnd Bergmann if (i >= MTL_MAX_TX_QUEUES) 35403e0d5699SArnd Bergmann break; 35418532f613SOng Boon Leong if (priv->tx_irq[i] == 0) 35428532f613SOng Boon Leong continue; 35438532f613SOng Boon Leong 35448532f613SOng Boon Leong int_name = priv->int_name_tx_irq[i]; 35458532f613SOng Boon Leong sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 35468532f613SOng Boon Leong ret = request_irq(priv->tx_irq[i], 35478532f613SOng Boon Leong stmmac_msi_intr_tx, 35488532f613SOng Boon Leong 0, int_name, &priv->tx_queue[i]); 35498532f613SOng Boon Leong if (unlikely(ret < 0)) { 35508532f613SOng Boon Leong netdev_err(priv->dev, 35518532f613SOng Boon Leong "%s: alloc tx-%d MSI %d (error: %d)\n", 35528532f613SOng Boon Leong __func__, i, priv->tx_irq[i], ret); 35538532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_TX; 35548532f613SOng Boon Leong irq_idx = i; 35558532f613SOng Boon Leong goto irq_error; 35568532f613SOng Boon Leong } 35578deec94cSOng Boon Leong cpumask_clear(&cpu_mask); 35588deec94cSOng Boon Leong cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 35598deec94cSOng Boon Leong irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 35608532f613SOng Boon Leong } 35618532f613SOng Boon Leong 35628532f613SOng Boon Leong return 0; 35638532f613SOng Boon Leong 35648532f613SOng Boon Leong irq_error: 35658532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, irq_idx); 35668532f613SOng Boon Leong return ret; 35678532f613SOng Boon Leong } 35688532f613SOng Boon Leong 35698532f613SOng Boon Leong static int stmmac_request_irq_single(struct net_device *dev) 35708532f613SOng Boon Leong { 35718532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 35723e6dc7b6SWong Vee Khee enum request_irq_err irq_err; 35738532f613SOng Boon Leong int ret; 35748532f613SOng Boon Leong 35758532f613SOng Boon Leong ret = request_irq(dev->irq, stmmac_interrupt, 35768532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 35778532f613SOng Boon Leong if (unlikely(ret < 0)) { 35788532f613SOng Boon Leong netdev_err(priv->dev, 35798532f613SOng Boon Leong "%s: ERROR: allocating the IRQ %d (error: %d)\n", 35808532f613SOng Boon Leong __func__, dev->irq, ret); 35818532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_MAC; 35823e6dc7b6SWong Vee Khee goto irq_error; 35838532f613SOng Boon Leong } 35848532f613SOng Boon Leong 35858532f613SOng Boon Leong /* Request the Wake IRQ in case of another line 35868532f613SOng Boon Leong * is used for WoL 35878532f613SOng Boon Leong */ 35888532f613SOng Boon Leong if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 35898532f613SOng Boon Leong ret = request_irq(priv->wol_irq, stmmac_interrupt, 35908532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 35918532f613SOng Boon Leong if (unlikely(ret < 0)) { 35928532f613SOng Boon Leong netdev_err(priv->dev, 35938532f613SOng Boon Leong "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 35948532f613SOng Boon Leong __func__, priv->wol_irq, ret); 35958532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_WOL; 35963e6dc7b6SWong Vee Khee goto irq_error; 35978532f613SOng Boon Leong } 35988532f613SOng Boon Leong } 35998532f613SOng Boon Leong 36008532f613SOng Boon Leong /* Request the IRQ lines */ 36018532f613SOng Boon Leong if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 36028532f613SOng Boon Leong ret = request_irq(priv->lpi_irq, stmmac_interrupt, 36038532f613SOng Boon Leong IRQF_SHARED, dev->name, dev); 36048532f613SOng Boon Leong if (unlikely(ret < 0)) { 36058532f613SOng Boon Leong netdev_err(priv->dev, 36068532f613SOng Boon Leong "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 36078532f613SOng Boon Leong __func__, priv->lpi_irq, ret); 36088532f613SOng Boon Leong irq_err = REQ_IRQ_ERR_LPI; 36098532f613SOng Boon Leong goto irq_error; 36108532f613SOng Boon Leong } 36118532f613SOng Boon Leong } 36128532f613SOng Boon Leong 36138532f613SOng Boon Leong return 0; 36148532f613SOng Boon Leong 36158532f613SOng Boon Leong irq_error: 36168532f613SOng Boon Leong stmmac_free_irq(dev, irq_err, 0); 36178532f613SOng Boon Leong return ret; 36188532f613SOng Boon Leong } 36198532f613SOng Boon Leong 36208532f613SOng Boon Leong static int stmmac_request_irq(struct net_device *dev) 36218532f613SOng Boon Leong { 36228532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 36238532f613SOng Boon Leong int ret; 36248532f613SOng Boon Leong 36258532f613SOng Boon Leong /* Request the IRQ lines */ 36268532f613SOng Boon Leong if (priv->plat->multi_msi_en) 36278532f613SOng Boon Leong ret = stmmac_request_irq_multi_msi(dev); 36288532f613SOng Boon Leong else 36298532f613SOng Boon Leong ret = stmmac_request_irq_single(dev); 36308532f613SOng Boon Leong 36318532f613SOng Boon Leong return ret; 36328532f613SOng Boon Leong } 36338532f613SOng Boon Leong 3634523f11b5SSrinivas Kandagatla /** 36357ac6653aSJeff Kirsher * stmmac_open - open entry point of the driver 36367ac6653aSJeff Kirsher * @dev : pointer to the device structure. 36377ac6653aSJeff Kirsher * Description: 36387ac6653aSJeff Kirsher * This function is the open entry point of the driver. 36397ac6653aSJeff Kirsher * Return value: 36407ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 36417ac6653aSJeff Kirsher * file on failure. 36427ac6653aSJeff Kirsher */ 36435fabb012SOng Boon Leong int stmmac_open(struct net_device *dev) 36447ac6653aSJeff Kirsher { 36457ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 36469900074eSVladimir Oltean int mode = priv->plat->phy_interface; 36475d626c87SJose Abreu int bfsize = 0; 36488fce3331SJose Abreu u32 chan; 36497ac6653aSJeff Kirsher int ret; 36507ac6653aSJeff Kirsher 36515ec55823SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 36525ec55823SJoakim Zhang if (ret < 0) { 36535ec55823SJoakim Zhang pm_runtime_put_noidle(priv->device); 36545ec55823SJoakim Zhang return ret; 36555ec55823SJoakim Zhang } 36565ec55823SJoakim Zhang 3657a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 3658f213bbe8SJose Abreu priv->hw->pcs != STMMAC_PCS_RTBI && 36599900074eSVladimir Oltean (!priv->hw->xpcs || 366011059740SVladimir Oltean xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { 36617ac6653aSJeff Kirsher ret = stmmac_init_phy(dev); 3662e58bb43fSGiuseppe CAVALLARO if (ret) { 366338ddc59dSLABBE Corentin netdev_err(priv->dev, 366438ddc59dSLABBE Corentin "%s: Cannot attach to PHY (error: %d)\n", 3665e58bb43fSGiuseppe CAVALLARO __func__, ret); 36665ec55823SJoakim Zhang goto init_phy_error; 36677ac6653aSJeff Kirsher } 3668e58bb43fSGiuseppe CAVALLARO } 36697ac6653aSJeff Kirsher 3670523f11b5SSrinivas Kandagatla /* Extra statistics */ 3671523f11b5SSrinivas Kandagatla memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 3672523f11b5SSrinivas Kandagatla priv->xstats.threshold = tc; 3673523f11b5SSrinivas Kandagatla 36745d626c87SJose Abreu bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 36755d626c87SJose Abreu if (bfsize < 0) 36765d626c87SJose Abreu bfsize = 0; 36775d626c87SJose Abreu 36785d626c87SJose Abreu if (bfsize < BUF_SIZE_16KiB) 36795d626c87SJose Abreu bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 36805d626c87SJose Abreu 36815d626c87SJose Abreu priv->dma_buf_sz = bfsize; 36825d626c87SJose Abreu buf_sz = bfsize; 36835d626c87SJose Abreu 368422ad3838SGiuseppe Cavallaro priv->rx_copybreak = STMMAC_RX_COPYBREAK; 368556329137SBartlomiej Zolnierkiewicz 3686aa042f60SSong, Yoong Siang if (!priv->dma_tx_size) 3687aa042f60SSong, Yoong Siang priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3688aa042f60SSong, Yoong Siang if (!priv->dma_rx_size) 3689aa042f60SSong, Yoong Siang priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3690aa042f60SSong, Yoong Siang 3691579a25a8SJose Abreu /* Earlier check for TBS */ 3692579a25a8SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3693579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3694579a25a8SJose Abreu int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3695579a25a8SJose Abreu 36965e6038b8SOng Boon Leong /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3697579a25a8SJose Abreu tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3698579a25a8SJose Abreu } 3699579a25a8SJose Abreu 37005bacd778SLABBE Corentin ret = alloc_dma_desc_resources(priv); 37015bacd778SLABBE Corentin if (ret < 0) { 37025bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 37035bacd778SLABBE Corentin __func__); 37045bacd778SLABBE Corentin goto dma_desc_error; 37055bacd778SLABBE Corentin } 37065bacd778SLABBE Corentin 37075bacd778SLABBE Corentin ret = init_dma_desc_rings(dev, GFP_KERNEL); 37085bacd778SLABBE Corentin if (ret < 0) { 37095bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 37105bacd778SLABBE Corentin __func__); 37115bacd778SLABBE Corentin goto init_error; 37125bacd778SLABBE Corentin } 37135bacd778SLABBE Corentin 3714fe131929SHuacai Chen ret = stmmac_hw_setup(dev, true); 371556329137SBartlomiej Zolnierkiewicz if (ret < 0) { 371638ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3717c9324d18SGiuseppe CAVALLARO goto init_error; 37187ac6653aSJeff Kirsher } 37197ac6653aSJeff Kirsher 3720d429b66eSJose Abreu stmmac_init_coalesce(priv); 3721777da230SGiuseppe CAVALLARO 372274371272SJose Abreu phylink_start(priv->phylink); 372377b28983SJisheng Zhang /* We may have called phylink_speed_down before */ 372477b28983SJisheng Zhang phylink_speed_up(priv->phylink); 37257ac6653aSJeff Kirsher 37268532f613SOng Boon Leong ret = stmmac_request_irq(dev); 37278532f613SOng Boon Leong if (ret) 37286c1e5abeSThierry Reding goto irq_error; 3729d765955dSGiuseppe CAVALLARO 3730c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 37319f19306dSOng Boon Leong netif_tx_start_all_queues(priv->dev); 37327ac6653aSJeff Kirsher 37337ac6653aSJeff Kirsher return 0; 37347ac6653aSJeff Kirsher 37356c1e5abeSThierry Reding irq_error: 373674371272SJose Abreu phylink_stop(priv->phylink); 37377a13f8f5SFrancesco Virlinzi 37388fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3739d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 37408fce3331SJose Abreu 3741c66f6c37SThierry Reding stmmac_hw_teardown(dev); 3742c9324d18SGiuseppe CAVALLARO init_error: 3743c9324d18SGiuseppe CAVALLARO free_dma_desc_resources(priv); 37445bacd778SLABBE Corentin dma_desc_error: 374574371272SJose Abreu phylink_disconnect_phy(priv->phylink); 37465ec55823SJoakim Zhang init_phy_error: 37475ec55823SJoakim Zhang pm_runtime_put(priv->device); 37487ac6653aSJeff Kirsher return ret; 37497ac6653aSJeff Kirsher } 37507ac6653aSJeff Kirsher 37515a558611SOng Boon Leong static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 37525a558611SOng Boon Leong { 37535a558611SOng Boon Leong set_bit(__FPE_REMOVING, &priv->fpe_task_state); 37545a558611SOng Boon Leong 37555a558611SOng Boon Leong if (priv->fpe_wq) 37565a558611SOng Boon Leong destroy_workqueue(priv->fpe_wq); 37575a558611SOng Boon Leong 37585a558611SOng Boon Leong netdev_info(priv->dev, "FPE workqueue stop"); 37595a558611SOng Boon Leong } 37605a558611SOng Boon Leong 37617ac6653aSJeff Kirsher /** 37627ac6653aSJeff Kirsher * stmmac_release - close entry point of the driver 37637ac6653aSJeff Kirsher * @dev : device pointer. 37647ac6653aSJeff Kirsher * Description: 37657ac6653aSJeff Kirsher * This is the stop entry point of the driver. 37667ac6653aSJeff Kirsher */ 37675fabb012SOng Boon Leong int stmmac_release(struct net_device *dev) 37687ac6653aSJeff Kirsher { 37697ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 37708fce3331SJose Abreu u32 chan; 37717ac6653aSJeff Kirsher 377277b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 377377b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 37747ac6653aSJeff Kirsher /* Stop and disconnect the PHY */ 377574371272SJose Abreu phylink_stop(priv->phylink); 377674371272SJose Abreu phylink_disconnect_phy(priv->phylink); 37777ac6653aSJeff Kirsher 3778c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 37797ac6653aSJeff Kirsher 37808fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3781d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 37829125cdd1SGiuseppe CAVALLARO 37837ac6653aSJeff Kirsher /* Free the IRQ lines */ 37848532f613SOng Boon Leong stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 37857ac6653aSJeff Kirsher 37865f585913SFugang Duan if (priv->eee_enabled) { 37875f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 37885f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 37895f585913SFugang Duan } 37905f585913SFugang Duan 37917ac6653aSJeff Kirsher /* Stop TX/RX DMA and clear the descriptors */ 3792ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 37937ac6653aSJeff Kirsher 37947ac6653aSJeff Kirsher /* Release and free the Rx/Tx resources */ 37957ac6653aSJeff Kirsher free_dma_desc_resources(priv); 37967ac6653aSJeff Kirsher 37977ac6653aSJeff Kirsher /* Disable the MAC Rx/Tx */ 3798c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 37997ac6653aSJeff Kirsher 38007ac6653aSJeff Kirsher netif_carrier_off(dev); 38017ac6653aSJeff Kirsher 380292ba6888SRayagond Kokatanur stmmac_release_ptp(priv); 380392ba6888SRayagond Kokatanur 38045ec55823SJoakim Zhang pm_runtime_put(priv->device); 38055ec55823SJoakim Zhang 38065a558611SOng Boon Leong if (priv->dma_cap.fpesel) 38075a558611SOng Boon Leong stmmac_fpe_stop_wq(priv); 38085a558611SOng Boon Leong 38097ac6653aSJeff Kirsher return 0; 38107ac6653aSJeff Kirsher } 38117ac6653aSJeff Kirsher 381230d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 381330d93227SJose Abreu struct stmmac_tx_queue *tx_q) 381430d93227SJose Abreu { 381530d93227SJose Abreu u16 tag = 0x0, inner_tag = 0x0; 381630d93227SJose Abreu u32 inner_type = 0x0; 381730d93227SJose Abreu struct dma_desc *p; 381830d93227SJose Abreu 381930d93227SJose Abreu if (!priv->dma_cap.vlins) 382030d93227SJose Abreu return false; 382130d93227SJose Abreu if (!skb_vlan_tag_present(skb)) 382230d93227SJose Abreu return false; 382330d93227SJose Abreu if (skb->vlan_proto == htons(ETH_P_8021AD)) { 382430d93227SJose Abreu inner_tag = skb_vlan_tag_get(skb); 382530d93227SJose Abreu inner_type = STMMAC_VLAN_INSERT; 382630d93227SJose Abreu } 382730d93227SJose Abreu 382830d93227SJose Abreu tag = skb_vlan_tag_get(skb); 382930d93227SJose Abreu 3830579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3831579a25a8SJose Abreu p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3832579a25a8SJose Abreu else 3833579a25a8SJose Abreu p = &tx_q->dma_tx[tx_q->cur_tx]; 3834579a25a8SJose Abreu 383530d93227SJose Abreu if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 383630d93227SJose Abreu return false; 383730d93227SJose Abreu 383830d93227SJose Abreu stmmac_set_tx_owner(priv, p); 3839aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 384030d93227SJose Abreu return true; 384130d93227SJose Abreu } 384230d93227SJose Abreu 38437ac6653aSJeff Kirsher /** 3844f748be53SAlexandre TORGUE * stmmac_tso_allocator - close entry point of the driver 3845f748be53SAlexandre TORGUE * @priv: driver private structure 3846f748be53SAlexandre TORGUE * @des: buffer start address 3847f748be53SAlexandre TORGUE * @total_len: total length to fill in descriptors 3848d0ea5cbdSJesse Brandeburg * @last_segment: condition for the last descriptor 3849ce736788SJoao Pinto * @queue: TX queue index 3850f748be53SAlexandre TORGUE * Description: 3851f748be53SAlexandre TORGUE * This function fills descriptor and request new descriptors according to 3852f748be53SAlexandre TORGUE * buffer length to fill 3853f748be53SAlexandre TORGUE */ 3854a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3855ce736788SJoao Pinto int total_len, bool last_segment, u32 queue) 3856f748be53SAlexandre TORGUE { 3857ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3858f748be53SAlexandre TORGUE struct dma_desc *desc; 38595bacd778SLABBE Corentin u32 buff_size; 3860ce736788SJoao Pinto int tmp_len; 3861f748be53SAlexandre TORGUE 3862f748be53SAlexandre TORGUE tmp_len = total_len; 3863f748be53SAlexandre TORGUE 3864f748be53SAlexandre TORGUE while (tmp_len > 0) { 3865a993db88SJose Abreu dma_addr_t curr_addr; 3866a993db88SJose Abreu 3867aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3868aa042f60SSong, Yoong Siang priv->dma_tx_size); 3869b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3870579a25a8SJose Abreu 3871579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3872579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3873579a25a8SJose Abreu else 3874579a25a8SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 3875f748be53SAlexandre TORGUE 3876a993db88SJose Abreu curr_addr = des + (total_len - tmp_len); 3877a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) 3878a993db88SJose Abreu desc->des0 = cpu_to_le32(curr_addr); 3879a993db88SJose Abreu else 3880a993db88SJose Abreu stmmac_set_desc_addr(priv, desc, curr_addr); 3881a993db88SJose Abreu 3882f748be53SAlexandre TORGUE buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3883f748be53SAlexandre TORGUE TSO_MAX_BUFF_SIZE : tmp_len; 3884f748be53SAlexandre TORGUE 388542de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3886f748be53SAlexandre TORGUE 0, 1, 3887426849e6SNiklas Cassel (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3888f748be53SAlexandre TORGUE 0, 0); 3889f748be53SAlexandre TORGUE 3890f748be53SAlexandre TORGUE tmp_len -= TSO_MAX_BUFF_SIZE; 3891f748be53SAlexandre TORGUE } 3892f748be53SAlexandre TORGUE } 3893f748be53SAlexandre TORGUE 3894d96febedSOng Boon Leong static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 3895d96febedSOng Boon Leong { 3896d96febedSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3897d96febedSOng Boon Leong int desc_size; 3898d96febedSOng Boon Leong 3899d96febedSOng Boon Leong if (likely(priv->extend_desc)) 3900d96febedSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 3901d96febedSOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3902d96febedSOng Boon Leong desc_size = sizeof(struct dma_edesc); 3903d96febedSOng Boon Leong else 3904d96febedSOng Boon Leong desc_size = sizeof(struct dma_desc); 3905d96febedSOng Boon Leong 3906d96febedSOng Boon Leong /* The own bit must be the latest setting done when prepare the 3907d96febedSOng Boon Leong * descriptor and then barrier is needed to make sure that 3908d96febedSOng Boon Leong * all is coherent before granting the DMA engine. 3909d96febedSOng Boon Leong */ 3910d96febedSOng Boon Leong wmb(); 3911d96febedSOng Boon Leong 3912d96febedSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3913d96febedSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3914d96febedSOng Boon Leong } 3915d96febedSOng Boon Leong 3916f748be53SAlexandre TORGUE /** 3917f748be53SAlexandre TORGUE * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3918f748be53SAlexandre TORGUE * @skb : the socket buffer 3919f748be53SAlexandre TORGUE * @dev : device pointer 3920f748be53SAlexandre TORGUE * Description: this is the transmit function that is called on TSO frames 3921f748be53SAlexandre TORGUE * (support available on GMAC4 and newer chips). 3922f748be53SAlexandre TORGUE * Diagram below show the ring programming in case of TSO frames: 3923f748be53SAlexandre TORGUE * 3924f748be53SAlexandre TORGUE * First Descriptor 3925f748be53SAlexandre TORGUE * -------- 3926f748be53SAlexandre TORGUE * | DES0 |---> buffer1 = L2/L3/L4 header 3927f748be53SAlexandre TORGUE * | DES1 |---> TCP Payload (can continue on next descr...) 3928f748be53SAlexandre TORGUE * | DES2 |---> buffer 1 and 2 len 3929f748be53SAlexandre TORGUE * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3930f748be53SAlexandre TORGUE * -------- 3931f748be53SAlexandre TORGUE * | 3932f748be53SAlexandre TORGUE * ... 3933f748be53SAlexandre TORGUE * | 3934f748be53SAlexandre TORGUE * -------- 3935f748be53SAlexandre TORGUE * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3936f748be53SAlexandre TORGUE * | DES1 | --| 3937f748be53SAlexandre TORGUE * | DES2 | --> buffer 1 and 2 len 3938f748be53SAlexandre TORGUE * | DES3 | 3939f748be53SAlexandre TORGUE * -------- 3940f748be53SAlexandre TORGUE * 3941f748be53SAlexandre TORGUE * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3942f748be53SAlexandre TORGUE */ 3943f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3944f748be53SAlexandre TORGUE { 3945ce736788SJoao Pinto struct dma_desc *desc, *first, *mss_desc = NULL; 3946f748be53SAlexandre TORGUE struct stmmac_priv *priv = netdev_priv(dev); 3947f748be53SAlexandre TORGUE int nfrags = skb_shinfo(skb)->nr_frags; 3948ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 3949c2837423SJose Abreu unsigned int first_entry, tx_packets; 3950d96febedSOng Boon Leong int tmp_pay_len = 0, first_tx; 3951ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 3952c2837423SJose Abreu bool has_vlan, set_ic; 3953579a25a8SJose Abreu u8 proto_hdr_len, hdr; 3954ce736788SJoao Pinto u32 pay_len, mss; 3955a993db88SJose Abreu dma_addr_t des; 3956f748be53SAlexandre TORGUE int i; 3957f748be53SAlexandre TORGUE 3958ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 3959c2837423SJose Abreu first_tx = tx_q->cur_tx; 3960ce736788SJoao Pinto 3961f748be53SAlexandre TORGUE /* Compute header lengths */ 3962b7766206SJose Abreu if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3963b7766206SJose Abreu proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3964b7766206SJose Abreu hdr = sizeof(struct udphdr); 3965b7766206SJose Abreu } else { 3966f748be53SAlexandre TORGUE proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3967b7766206SJose Abreu hdr = tcp_hdrlen(skb); 3968b7766206SJose Abreu } 3969f748be53SAlexandre TORGUE 3970f748be53SAlexandre TORGUE /* Desc availability based on threshold should be enough safe */ 3971ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < 3972f748be53SAlexandre TORGUE (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3973c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3974c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3975c22a3f48SJoao Pinto queue)); 3976f748be53SAlexandre TORGUE /* This is a hard error, log it. */ 397738ddc59dSLABBE Corentin netdev_err(priv->dev, 397838ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 397938ddc59dSLABBE Corentin __func__); 3980f748be53SAlexandre TORGUE } 3981f748be53SAlexandre TORGUE return NETDEV_TX_BUSY; 3982f748be53SAlexandre TORGUE } 3983f748be53SAlexandre TORGUE 3984f748be53SAlexandre TORGUE pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3985f748be53SAlexandre TORGUE 3986f748be53SAlexandre TORGUE mss = skb_shinfo(skb)->gso_size; 3987f748be53SAlexandre TORGUE 3988f748be53SAlexandre TORGUE /* set new MSS value if needed */ 39898d212a9eSNiklas Cassel if (mss != tx_q->mss) { 3990579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3991579a25a8SJose Abreu mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3992579a25a8SJose Abreu else 3993579a25a8SJose Abreu mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3994579a25a8SJose Abreu 399542de047dSJose Abreu stmmac_set_mss(priv, mss_desc, mss); 39968d212a9eSNiklas Cassel tx_q->mss = mss; 3997aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3998aa042f60SSong, Yoong Siang priv->dma_tx_size); 3999b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4000f748be53SAlexandre TORGUE } 4001f748be53SAlexandre TORGUE 4002f748be53SAlexandre TORGUE if (netif_msg_tx_queued(priv)) { 4003b7766206SJose Abreu pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4004b7766206SJose Abreu __func__, hdr, proto_hdr_len, pay_len, mss); 4005f748be53SAlexandre TORGUE pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4006f748be53SAlexandre TORGUE skb->data_len); 4007f748be53SAlexandre TORGUE } 4008f748be53SAlexandre TORGUE 400930d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 401030d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 401130d93227SJose Abreu 4012ce736788SJoao Pinto first_entry = tx_q->cur_tx; 4013b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 4014f748be53SAlexandre TORGUE 4015579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4016579a25a8SJose Abreu desc = &tx_q->dma_entx[first_entry].basic; 4017579a25a8SJose Abreu else 4018579a25a8SJose Abreu desc = &tx_q->dma_tx[first_entry]; 4019f748be53SAlexandre TORGUE first = desc; 4020f748be53SAlexandre TORGUE 402130d93227SJose Abreu if (has_vlan) 402230d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 402330d93227SJose Abreu 4024f748be53SAlexandre TORGUE /* first descriptor: fill Headers on Buf1 */ 4025f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4026f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4027f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4028f748be53SAlexandre TORGUE goto dma_map_err; 4029f748be53SAlexandre TORGUE 4030ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4031ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4032be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4033be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4034f748be53SAlexandre TORGUE 4035a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) { 4036f8be0d78SMichael Weiser first->des0 = cpu_to_le32(des); 4037f748be53SAlexandre TORGUE 4038f748be53SAlexandre TORGUE /* Fill start of payload in buff2 of first descriptor */ 4039f748be53SAlexandre TORGUE if (pay_len) 4040f8be0d78SMichael Weiser first->des1 = cpu_to_le32(des + proto_hdr_len); 4041f748be53SAlexandre TORGUE 4042f748be53SAlexandre TORGUE /* If needed take extra descriptors to fill the remaining payload */ 4043f748be53SAlexandre TORGUE tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4044a993db88SJose Abreu } else { 4045a993db88SJose Abreu stmmac_set_desc_addr(priv, first, des); 4046a993db88SJose Abreu tmp_pay_len = pay_len; 404734c15202Syuqi jin des += proto_hdr_len; 4048b2f07199SJose Abreu pay_len = 0; 4049a993db88SJose Abreu } 4050f748be53SAlexandre TORGUE 4051ce736788SJoao Pinto stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4052f748be53SAlexandre TORGUE 4053f748be53SAlexandre TORGUE /* Prepare fragments */ 4054f748be53SAlexandre TORGUE for (i = 0; i < nfrags; i++) { 4055f748be53SAlexandre TORGUE const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4056f748be53SAlexandre TORGUE 4057f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, 4058f748be53SAlexandre TORGUE skb_frag_size(frag), 4059f748be53SAlexandre TORGUE DMA_TO_DEVICE); 4060937071c1SThierry Reding if (dma_mapping_error(priv->device, des)) 4061937071c1SThierry Reding goto dma_map_err; 4062f748be53SAlexandre TORGUE 4063f748be53SAlexandre TORGUE stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4064ce736788SJoao Pinto (i == nfrags - 1), queue); 4065f748be53SAlexandre TORGUE 4066ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4067ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4068ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4069be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4070f748be53SAlexandre TORGUE } 4071f748be53SAlexandre TORGUE 4072ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4073f748be53SAlexandre TORGUE 407405cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 407505cf0d1bSNiklas Cassel tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4076be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 407705cf0d1bSNiklas Cassel 40787df4a3a7SJose Abreu /* Manage tx mitigation */ 4079c2837423SJose Abreu tx_packets = (tx_q->cur_tx + 1) - first_tx; 4080c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4081c2837423SJose Abreu 4082c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4083c2837423SJose Abreu set_ic = true; 4084db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4085c2837423SJose Abreu set_ic = false; 4086db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4087c2837423SJose Abreu set_ic = true; 4088db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4089db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4090c2837423SJose Abreu set_ic = true; 4091c2837423SJose Abreu else 4092c2837423SJose Abreu set_ic = false; 4093c2837423SJose Abreu 4094c2837423SJose Abreu if (set_ic) { 4095579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 4096579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4097579a25a8SJose Abreu else 40987df4a3a7SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 4099579a25a8SJose Abreu 41007df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 41017df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 41027df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 41037df4a3a7SJose Abreu } 41047df4a3a7SJose Abreu 410505cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 410605cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 410705cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 410805cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 410905cf0d1bSNiklas Cassel */ 4110aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 4111f748be53SAlexandre TORGUE 4112ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4113b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 411438ddc59dSLABBE Corentin __func__); 4115c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4116f748be53SAlexandre TORGUE } 4117f748be53SAlexandre TORGUE 4118f748be53SAlexandre TORGUE dev->stats.tx_bytes += skb->len; 4119f748be53SAlexandre TORGUE priv->xstats.tx_tso_frames++; 4120f748be53SAlexandre TORGUE priv->xstats.tx_tso_nfrags += nfrags; 4121f748be53SAlexandre TORGUE 41228000ddc0SJose Abreu if (priv->sarc_type) 41238000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 41248000ddc0SJose Abreu 4125f748be53SAlexandre TORGUE skb_tx_timestamp(skb); 4126f748be53SAlexandre TORGUE 4127f748be53SAlexandre TORGUE if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4128f748be53SAlexandre TORGUE priv->hwts_tx_en)) { 4129f748be53SAlexandre TORGUE /* declare that device is doing timestamping */ 4130f748be53SAlexandre TORGUE skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 413142de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4132f748be53SAlexandre TORGUE } 4133f748be53SAlexandre TORGUE 4134f748be53SAlexandre TORGUE /* Complete the first descriptor before granting the DMA */ 413542de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, first, 1, 4136f748be53SAlexandre TORGUE proto_hdr_len, 4137f748be53SAlexandre TORGUE pay_len, 4138ce736788SJoao Pinto 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4139b7766206SJose Abreu hdr / 4, (skb->len - proto_hdr_len)); 4140f748be53SAlexandre TORGUE 4141f748be53SAlexandre TORGUE /* If context desc is used to change MSS */ 414215d2ee42SNiklas Cassel if (mss_desc) { 414315d2ee42SNiklas Cassel /* Make sure that first descriptor has been completely 414415d2ee42SNiklas Cassel * written, including its own bit. This is because MSS is 414515d2ee42SNiklas Cassel * actually before first descriptor, so we need to make 414615d2ee42SNiklas Cassel * sure that MSS's own bit is the last thing written. 414715d2ee42SNiklas Cassel */ 414815d2ee42SNiklas Cassel dma_wmb(); 414942de047dSJose Abreu stmmac_set_tx_owner(priv, mss_desc); 415015d2ee42SNiklas Cassel } 4151f748be53SAlexandre TORGUE 4152f748be53SAlexandre TORGUE if (netif_msg_pktdata(priv)) { 4153f748be53SAlexandre TORGUE pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4154ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4155ce736788SJoao Pinto tx_q->cur_tx, first, nfrags); 4156f748be53SAlexandre TORGUE pr_info(">>> frame to be transmitted: "); 4157f748be53SAlexandre TORGUE print_pkt(skb->data, skb_headlen(skb)); 4158f748be53SAlexandre TORGUE } 4159f748be53SAlexandre TORGUE 4160c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4161f748be53SAlexandre TORGUE 4162d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 41634772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 4164f748be53SAlexandre TORGUE 4165f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4166f748be53SAlexandre TORGUE 4167f748be53SAlexandre TORGUE dma_map_err: 4168f748be53SAlexandre TORGUE dev_err(priv->device, "Tx dma map failed\n"); 4169f748be53SAlexandre TORGUE dev_kfree_skb(skb); 4170f748be53SAlexandre TORGUE priv->dev->stats.tx_dropped++; 4171f748be53SAlexandre TORGUE return NETDEV_TX_OK; 4172f748be53SAlexandre TORGUE } 4173f748be53SAlexandre TORGUE 4174f748be53SAlexandre TORGUE /** 4175732fdf0eSGiuseppe CAVALLARO * stmmac_xmit - Tx entry point of the driver 41767ac6653aSJeff Kirsher * @skb : the socket buffer 41777ac6653aSJeff Kirsher * @dev : device pointer 417832ceabcaSGiuseppe CAVALLARO * Description : this is the tx entry point of the driver. 417932ceabcaSGiuseppe CAVALLARO * It programs the chain or the ring and supports oversized frames 418032ceabcaSGiuseppe CAVALLARO * and SG feature. 41817ac6653aSJeff Kirsher */ 41827ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 41837ac6653aSJeff Kirsher { 4184c2837423SJose Abreu unsigned int first_entry, tx_packets, enh_desc; 41857ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 41860e80bdc9SGiuseppe Cavallaro unsigned int nopaged_len = skb_headlen(skb); 41874a7d666aSGiuseppe CAVALLARO int i, csum_insertion = 0, is_jumbo = 0; 4188ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 41897ac6653aSJeff Kirsher int nfrags = skb_shinfo(skb)->nr_frags; 4190b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 4191579a25a8SJose Abreu struct dma_edesc *tbs_desc = NULL; 41927ac6653aSJeff Kirsher struct dma_desc *desc, *first; 4193ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 4194c2837423SJose Abreu bool has_vlan, set_ic; 4195d96febedSOng Boon Leong int entry, first_tx; 4196a993db88SJose Abreu dma_addr_t des; 4197f748be53SAlexandre TORGUE 4198ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 4199c2837423SJose Abreu first_tx = tx_q->cur_tx; 4200ce736788SJoao Pinto 4201be1c7eaeSVineetha G. Jaya Kumaran if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4202e2cd682dSJose Abreu stmmac_disable_eee_mode(priv); 4203e2cd682dSJose Abreu 4204f748be53SAlexandre TORGUE /* Manage oversized TCP frames for GMAC4 device */ 4205f748be53SAlexandre TORGUE if (skb_is_gso(skb) && priv->tso) { 4206b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4207b7766206SJose Abreu return stmmac_tso_xmit(skb, dev); 4208b7766206SJose Abreu if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4209f748be53SAlexandre TORGUE return stmmac_tso_xmit(skb, dev); 4210f748be53SAlexandre TORGUE } 42117ac6653aSJeff Kirsher 4212ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4213c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4214c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4215c22a3f48SJoao Pinto queue)); 42167ac6653aSJeff Kirsher /* This is a hard error, log it. */ 421738ddc59dSLABBE Corentin netdev_err(priv->dev, 421838ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 421938ddc59dSLABBE Corentin __func__); 42207ac6653aSJeff Kirsher } 42217ac6653aSJeff Kirsher return NETDEV_TX_BUSY; 42227ac6653aSJeff Kirsher } 42237ac6653aSJeff Kirsher 422430d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 422530d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 422630d93227SJose Abreu 4227ce736788SJoao Pinto entry = tx_q->cur_tx; 42280e80bdc9SGiuseppe Cavallaro first_entry = entry; 4229b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 42307ac6653aSJeff Kirsher 42317ac6653aSJeff Kirsher csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 42327ac6653aSJeff Kirsher 42330e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4234ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4235579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4236579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4237c24602efSGiuseppe CAVALLARO else 4238ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 4239c24602efSGiuseppe CAVALLARO 42407ac6653aSJeff Kirsher first = desc; 42417ac6653aSJeff Kirsher 424230d93227SJose Abreu if (has_vlan) 424330d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 424430d93227SJose Abreu 42450e80bdc9SGiuseppe Cavallaro enh_desc = priv->plat->enh_desc; 42464a7d666aSGiuseppe CAVALLARO /* To program the descriptors according to the size of the frame */ 424729896a67SGiuseppe CAVALLARO if (enh_desc) 42482c520b1cSJose Abreu is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 424929896a67SGiuseppe CAVALLARO 425063a550fcSJose Abreu if (unlikely(is_jumbo)) { 42512c520b1cSJose Abreu entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 425263a550fcSJose Abreu if (unlikely(entry < 0) && (entry != -EINVAL)) 4253362b37beSGiuseppe CAVALLARO goto dma_map_err; 425429896a67SGiuseppe CAVALLARO } 42557ac6653aSJeff Kirsher 42567ac6653aSJeff Kirsher for (i = 0; i < nfrags; i++) { 42579e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 42589e903e08SEric Dumazet int len = skb_frag_size(frag); 4259be434d50SGiuseppe Cavallaro bool last_segment = (i == (nfrags - 1)); 42607ac6653aSJeff Kirsher 4261aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4262b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[entry]); 4263e3ad57c9SGiuseppe Cavallaro 42640e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 4265ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4266579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4267579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 4268c24602efSGiuseppe CAVALLARO else 4269ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 42707ac6653aSJeff Kirsher 4271f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, len, 4272f722380dSIan Campbell DMA_TO_DEVICE); 4273f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 4274362b37beSGiuseppe CAVALLARO goto dma_map_err; /* should reuse desc w/o issues */ 4275362b37beSGiuseppe CAVALLARO 4276ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = des; 42776844171dSJose Abreu 42786844171dSJose Abreu stmmac_set_desc_addr(priv, desc, des); 4279f748be53SAlexandre TORGUE 4280ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = true; 4281ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = len; 4282ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4283be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 42840e80bdc9SGiuseppe Cavallaro 42850e80bdc9SGiuseppe Cavallaro /* Prepare the descriptor and set the own bit too */ 428642de047dSJose Abreu stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 428742de047dSJose Abreu priv->mode, 1, last_segment, skb->len); 42887ac6653aSJeff Kirsher } 42897ac6653aSJeff Kirsher 429005cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 429105cf0d1bSNiklas Cassel tx_q->tx_skbuff[entry] = skb; 4292be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4293e3ad57c9SGiuseppe Cavallaro 42947df4a3a7SJose Abreu /* According to the coalesce parameter the IC bit for the latest 42957df4a3a7SJose Abreu * segment is reset and the timer re-started to clean the tx status. 42967df4a3a7SJose Abreu * This approach takes care about the fragments: desc is the first 42977df4a3a7SJose Abreu * element in case of no SG. 42987df4a3a7SJose Abreu */ 4299c2837423SJose Abreu tx_packets = (entry + 1) - first_tx; 4300c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 4301c2837423SJose Abreu 4302c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4303c2837423SJose Abreu set_ic = true; 4304db2f2842SOng Boon Leong else if (!priv->tx_coal_frames[queue]) 4305c2837423SJose Abreu set_ic = false; 4306db2f2842SOng Boon Leong else if (tx_packets > priv->tx_coal_frames[queue]) 4307c2837423SJose Abreu set_ic = true; 4308db2f2842SOng Boon Leong else if ((tx_q->tx_count_frames % 4309db2f2842SOng Boon Leong priv->tx_coal_frames[queue]) < tx_packets) 4310c2837423SJose Abreu set_ic = true; 4311c2837423SJose Abreu else 4312c2837423SJose Abreu set_ic = false; 4313c2837423SJose Abreu 4314c2837423SJose Abreu if (set_ic) { 43157df4a3a7SJose Abreu if (likely(priv->extend_desc)) 43167df4a3a7SJose Abreu desc = &tx_q->dma_etx[entry].basic; 4317579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4318579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 43197df4a3a7SJose Abreu else 43207df4a3a7SJose Abreu desc = &tx_q->dma_tx[entry]; 43217df4a3a7SJose Abreu 43227df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 43237df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 43247df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 43257df4a3a7SJose Abreu } 43267df4a3a7SJose Abreu 432705cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 432805cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 432905cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 433005cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 433105cf0d1bSNiklas Cassel */ 4332aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4333ce736788SJoao Pinto tx_q->cur_tx = entry; 43347ac6653aSJeff Kirsher 43357ac6653aSJeff Kirsher if (netif_msg_pktdata(priv)) { 433638ddc59dSLABBE Corentin netdev_dbg(priv->dev, 433738ddc59dSLABBE Corentin "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4338ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 43390e80bdc9SGiuseppe Cavallaro entry, first, nfrags); 434083d7af64SGiuseppe CAVALLARO 434138ddc59dSLABBE Corentin netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 43427ac6653aSJeff Kirsher print_pkt(skb->data, skb->len); 43437ac6653aSJeff Kirsher } 43440e80bdc9SGiuseppe Cavallaro 4345ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4346b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4347b3e51069SLABBE Corentin __func__); 4348c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 43497ac6653aSJeff Kirsher } 43507ac6653aSJeff Kirsher 43517ac6653aSJeff Kirsher dev->stats.tx_bytes += skb->len; 43527ac6653aSJeff Kirsher 43538000ddc0SJose Abreu if (priv->sarc_type) 43548000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 43558000ddc0SJose Abreu 43560e80bdc9SGiuseppe Cavallaro skb_tx_timestamp(skb); 43570e80bdc9SGiuseppe Cavallaro 43580e80bdc9SGiuseppe Cavallaro /* Ready to fill the first descriptor and set the OWN bit w/o any 43590e80bdc9SGiuseppe Cavallaro * problems because all the descriptors are actually ready to be 43600e80bdc9SGiuseppe Cavallaro * passed to the DMA engine. 43610e80bdc9SGiuseppe Cavallaro */ 43620e80bdc9SGiuseppe Cavallaro if (likely(!is_jumbo)) { 43630e80bdc9SGiuseppe Cavallaro bool last_segment = (nfrags == 0); 43640e80bdc9SGiuseppe Cavallaro 4365f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, 43660e80bdc9SGiuseppe Cavallaro nopaged_len, DMA_TO_DEVICE); 4367f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 43680e80bdc9SGiuseppe Cavallaro goto dma_map_err; 43690e80bdc9SGiuseppe Cavallaro 4370ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 4371be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4372be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 43736844171dSJose Abreu 43746844171dSJose Abreu stmmac_set_desc_addr(priv, first, des); 4375f748be53SAlexandre TORGUE 4376ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4377ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 43780e80bdc9SGiuseppe Cavallaro 4379891434b1SRayagond Kokatanur if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4380891434b1SRayagond Kokatanur priv->hwts_tx_en)) { 4381891434b1SRayagond Kokatanur /* declare that device is doing timestamping */ 4382891434b1SRayagond Kokatanur skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 438342de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 4384891434b1SRayagond Kokatanur } 4385891434b1SRayagond Kokatanur 43860e80bdc9SGiuseppe Cavallaro /* Prepare the first descriptor setting the OWN bit too */ 438742de047dSJose Abreu stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4388579a25a8SJose Abreu csum_insertion, priv->mode, 0, last_segment, 438942de047dSJose Abreu skb->len); 439080acbed9SAaro Koskinen } 43910e80bdc9SGiuseppe Cavallaro 4392579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_EN) { 4393579a25a8SJose Abreu struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4394579a25a8SJose Abreu 4395579a25a8SJose Abreu tbs_desc = &tx_q->dma_entx[first_entry]; 4396579a25a8SJose Abreu stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4397579a25a8SJose Abreu } 4398579a25a8SJose Abreu 4399579a25a8SJose Abreu stmmac_set_tx_owner(priv, first); 4400579a25a8SJose Abreu 4401c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4402f748be53SAlexandre TORGUE 4403a4e887faSJose Abreu stmmac_enable_dma_transmission(priv, priv->ioaddr); 44048fce3331SJose Abreu 4405d96febedSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 44064772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 44077ac6653aSJeff Kirsher 4408362b37beSGiuseppe CAVALLARO return NETDEV_TX_OK; 4409a9097a96SGiuseppe CAVALLARO 4410362b37beSGiuseppe CAVALLARO dma_map_err: 441138ddc59dSLABBE Corentin netdev_err(priv->dev, "Tx DMA map failed\n"); 4412362b37beSGiuseppe CAVALLARO dev_kfree_skb(skb); 4413362b37beSGiuseppe CAVALLARO priv->dev->stats.tx_dropped++; 44147ac6653aSJeff Kirsher return NETDEV_TX_OK; 44157ac6653aSJeff Kirsher } 44167ac6653aSJeff Kirsher 4417b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4418b9381985SVince Bridgers { 4419ab188e8fSElad Nachman struct vlan_ethhdr *veth; 4420ab188e8fSElad Nachman __be16 vlan_proto; 4421b9381985SVince Bridgers u16 vlanid; 4422b9381985SVince Bridgers 4423ab188e8fSElad Nachman veth = (struct vlan_ethhdr *)skb->data; 4424ab188e8fSElad Nachman vlan_proto = veth->h_vlan_proto; 4425ab188e8fSElad Nachman 4426ab188e8fSElad Nachman if ((vlan_proto == htons(ETH_P_8021Q) && 4427ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4428ab188e8fSElad Nachman (vlan_proto == htons(ETH_P_8021AD) && 4429ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4430b9381985SVince Bridgers /* pop the vlan tag */ 4431ab188e8fSElad Nachman vlanid = ntohs(veth->h_vlan_TCI); 4432ab188e8fSElad Nachman memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4433b9381985SVince Bridgers skb_pull(skb, VLAN_HLEN); 4434ab188e8fSElad Nachman __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4435b9381985SVince Bridgers } 4436b9381985SVince Bridgers } 4437b9381985SVince Bridgers 443832ceabcaSGiuseppe CAVALLARO /** 4439732fdf0eSGiuseppe CAVALLARO * stmmac_rx_refill - refill used skb preallocated buffers 444032ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 444154139cf3SJoao Pinto * @queue: RX queue index 444232ceabcaSGiuseppe CAVALLARO * Description : this is to reallocate the skb for the reception process 444332ceabcaSGiuseppe CAVALLARO * that is based on zero-copy. 444432ceabcaSGiuseppe CAVALLARO */ 444554139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 44467ac6653aSJeff Kirsher { 444754139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 44485fabb012SOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 444954139cf3SJoao Pinto unsigned int entry = rx_q->dirty_rx; 445054139cf3SJoao Pinto 4451e3ad57c9SGiuseppe Cavallaro while (dirty-- > 0) { 44522af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4453c24602efSGiuseppe CAVALLARO struct dma_desc *p; 4454d429b66eSJose Abreu bool use_rx_wd; 4455c24602efSGiuseppe CAVALLARO 4456c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 445754139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 4458c24602efSGiuseppe CAVALLARO else 445954139cf3SJoao Pinto p = rx_q->dma_rx + entry; 4460c24602efSGiuseppe CAVALLARO 44612af6106aSJose Abreu if (!buf->page) { 44622af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 44632af6106aSJose Abreu if (!buf->page) 44647ac6653aSJeff Kirsher break; 4465120e87f9SGiuseppe Cavallaro } 44667ac6653aSJeff Kirsher 446767afd6d1SJose Abreu if (priv->sph && !buf->sec_page) { 446867afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 446967afd6d1SJose Abreu if (!buf->sec_page) 447067afd6d1SJose Abreu break; 447167afd6d1SJose Abreu 447267afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 447367afd6d1SJose Abreu } 447467afd6d1SJose Abreu 44755fabb012SOng Boon Leong buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 44763caa61c2SJose Abreu 44772af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 4478396e13e1SJoakim Zhang if (priv->sph) 4479396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4480396e13e1SJoakim Zhang else 4481396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 44822c520b1cSJose Abreu stmmac_refill_desc3(priv, rx_q, p); 4483286a8372SGiuseppe CAVALLARO 4484d429b66eSJose Abreu rx_q->rx_count_frames++; 4485db2f2842SOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4486db2f2842SOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 44876fa9d691SJose Abreu rx_q->rx_count_frames = 0; 448809146abeSJose Abreu 4489db2f2842SOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 449009146abeSJose Abreu use_rx_wd |= rx_q->rx_count_frames > 0; 449109146abeSJose Abreu if (!priv->use_riwt) 449209146abeSJose Abreu use_rx_wd = false; 4493d429b66eSJose Abreu 4494ad688cdbSPavel Machek dma_wmb(); 44952af6106aSJose Abreu stmmac_set_rx_owner(priv, p, use_rx_wd); 4496e3ad57c9SGiuseppe Cavallaro 4497aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 44987ac6653aSJeff Kirsher } 449954139cf3SJoao Pinto rx_q->dirty_rx = entry; 4500858a31ffSJose Abreu rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4501858a31ffSJose Abreu (rx_q->dirty_rx * sizeof(struct dma_desc)); 45024523a561SBiao Huang stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 45037ac6653aSJeff Kirsher } 45047ac6653aSJeff Kirsher 450588ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 450688ebe2cfSJose Abreu struct dma_desc *p, 450788ebe2cfSJose Abreu int status, unsigned int len) 450888ebe2cfSJose Abreu { 450988ebe2cfSJose Abreu unsigned int plen = 0, hlen = 0; 451031f2760eSLuo Jiaxing int coe = priv->hw->rx_csum; 451188ebe2cfSJose Abreu 451288ebe2cfSJose Abreu /* Not first descriptor, buffer is always zero */ 451388ebe2cfSJose Abreu if (priv->sph && len) 451488ebe2cfSJose Abreu return 0; 451588ebe2cfSJose Abreu 451688ebe2cfSJose Abreu /* First descriptor, get split header length */ 451731f2760eSLuo Jiaxing stmmac_get_rx_header_len(priv, p, &hlen); 451888ebe2cfSJose Abreu if (priv->sph && hlen) { 451988ebe2cfSJose Abreu priv->xstats.rx_split_hdr_pkt_n++; 452088ebe2cfSJose Abreu return hlen; 452188ebe2cfSJose Abreu } 452288ebe2cfSJose Abreu 452388ebe2cfSJose Abreu /* First descriptor, not last descriptor and not split header */ 452488ebe2cfSJose Abreu if (status & rx_not_ls) 452588ebe2cfSJose Abreu return priv->dma_buf_sz; 452688ebe2cfSJose Abreu 452788ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 452888ebe2cfSJose Abreu 452988ebe2cfSJose Abreu /* First descriptor and last descriptor and not split header */ 453088ebe2cfSJose Abreu return min_t(unsigned int, priv->dma_buf_sz, plen); 453188ebe2cfSJose Abreu } 453288ebe2cfSJose Abreu 453388ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 453488ebe2cfSJose Abreu struct dma_desc *p, 453588ebe2cfSJose Abreu int status, unsigned int len) 453688ebe2cfSJose Abreu { 453788ebe2cfSJose Abreu int coe = priv->hw->rx_csum; 453888ebe2cfSJose Abreu unsigned int plen = 0; 453988ebe2cfSJose Abreu 454088ebe2cfSJose Abreu /* Not split header, buffer is not available */ 454188ebe2cfSJose Abreu if (!priv->sph) 454288ebe2cfSJose Abreu return 0; 454388ebe2cfSJose Abreu 454488ebe2cfSJose Abreu /* Not last descriptor */ 454588ebe2cfSJose Abreu if (status & rx_not_ls) 454688ebe2cfSJose Abreu return priv->dma_buf_sz; 454788ebe2cfSJose Abreu 454888ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 454988ebe2cfSJose Abreu 455088ebe2cfSJose Abreu /* Last descriptor */ 455188ebe2cfSJose Abreu return plen - len; 455288ebe2cfSJose Abreu } 455388ebe2cfSJose Abreu 4554be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 45558b278a5bSOng Boon Leong struct xdp_frame *xdpf, bool dma_map) 4556be8b38a7SOng Boon Leong { 4557be8b38a7SOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4558be8b38a7SOng Boon Leong unsigned int entry = tx_q->cur_tx; 4559be8b38a7SOng Boon Leong struct dma_desc *tx_desc; 4560be8b38a7SOng Boon Leong dma_addr_t dma_addr; 4561be8b38a7SOng Boon Leong bool set_ic; 4562be8b38a7SOng Boon Leong 4563be8b38a7SOng Boon Leong if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4564be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4565be8b38a7SOng Boon Leong 4566be8b38a7SOng Boon Leong if (likely(priv->extend_desc)) 4567be8b38a7SOng Boon Leong tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4568be8b38a7SOng Boon Leong else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4569be8b38a7SOng Boon Leong tx_desc = &tx_q->dma_entx[entry].basic; 4570be8b38a7SOng Boon Leong else 4571be8b38a7SOng Boon Leong tx_desc = tx_q->dma_tx + entry; 4572be8b38a7SOng Boon Leong 45738b278a5bSOng Boon Leong if (dma_map) { 45748b278a5bSOng Boon Leong dma_addr = dma_map_single(priv->device, xdpf->data, 45758b278a5bSOng Boon Leong xdpf->len, DMA_TO_DEVICE); 45768b278a5bSOng Boon Leong if (dma_mapping_error(priv->device, dma_addr)) 45778b278a5bSOng Boon Leong return STMMAC_XDP_CONSUMED; 45788b278a5bSOng Boon Leong 45798b278a5bSOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 45808b278a5bSOng Boon Leong } else { 45818b278a5bSOng Boon Leong struct page *page = virt_to_page(xdpf->data); 45828b278a5bSOng Boon Leong 4583be8b38a7SOng Boon Leong dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4584be8b38a7SOng Boon Leong xdpf->headroom; 4585be8b38a7SOng Boon Leong dma_sync_single_for_device(priv->device, dma_addr, 4586be8b38a7SOng Boon Leong xdpf->len, DMA_BIDIRECTIONAL); 4587be8b38a7SOng Boon Leong 4588be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 45898b278a5bSOng Boon Leong } 4590be8b38a7SOng Boon Leong 4591be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4592be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].map_as_page = false; 4593be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4594be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].last_segment = true; 4595be8b38a7SOng Boon Leong tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4596be8b38a7SOng Boon Leong 4597be8b38a7SOng Boon Leong tx_q->xdpf[entry] = xdpf; 4598be8b38a7SOng Boon Leong 4599be8b38a7SOng Boon Leong stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4600be8b38a7SOng Boon Leong 4601be8b38a7SOng Boon Leong stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4602be8b38a7SOng Boon Leong true, priv->mode, true, true, 4603be8b38a7SOng Boon Leong xdpf->len); 4604be8b38a7SOng Boon Leong 4605be8b38a7SOng Boon Leong tx_q->tx_count_frames++; 4606be8b38a7SOng Boon Leong 4607be8b38a7SOng Boon Leong if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4608be8b38a7SOng Boon Leong set_ic = true; 4609be8b38a7SOng Boon Leong else 4610be8b38a7SOng Boon Leong set_ic = false; 4611be8b38a7SOng Boon Leong 4612be8b38a7SOng Boon Leong if (set_ic) { 4613be8b38a7SOng Boon Leong tx_q->tx_count_frames = 0; 4614be8b38a7SOng Boon Leong stmmac_set_tx_ic(priv, tx_desc); 4615be8b38a7SOng Boon Leong priv->xstats.tx_set_ic_bit++; 4616be8b38a7SOng Boon Leong } 4617be8b38a7SOng Boon Leong 4618be8b38a7SOng Boon Leong stmmac_enable_dma_transmission(priv, priv->ioaddr); 4619be8b38a7SOng Boon Leong 4620be8b38a7SOng Boon Leong entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 4621be8b38a7SOng Boon Leong tx_q->cur_tx = entry; 4622be8b38a7SOng Boon Leong 4623be8b38a7SOng Boon Leong return STMMAC_XDP_TX; 4624be8b38a7SOng Boon Leong } 4625be8b38a7SOng Boon Leong 4626be8b38a7SOng Boon Leong static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4627be8b38a7SOng Boon Leong int cpu) 4628be8b38a7SOng Boon Leong { 4629be8b38a7SOng Boon Leong int index = cpu; 4630be8b38a7SOng Boon Leong 4631be8b38a7SOng Boon Leong if (unlikely(index < 0)) 4632be8b38a7SOng Boon Leong index = 0; 4633be8b38a7SOng Boon Leong 4634be8b38a7SOng Boon Leong while (index >= priv->plat->tx_queues_to_use) 4635be8b38a7SOng Boon Leong index -= priv->plat->tx_queues_to_use; 4636be8b38a7SOng Boon Leong 4637be8b38a7SOng Boon Leong return index; 4638be8b38a7SOng Boon Leong } 4639be8b38a7SOng Boon Leong 4640be8b38a7SOng Boon Leong static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4641be8b38a7SOng Boon Leong struct xdp_buff *xdp) 4642be8b38a7SOng Boon Leong { 4643be8b38a7SOng Boon Leong struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4644be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4645be8b38a7SOng Boon Leong struct netdev_queue *nq; 4646be8b38a7SOng Boon Leong int queue; 4647be8b38a7SOng Boon Leong int res; 4648be8b38a7SOng Boon Leong 4649be8b38a7SOng Boon Leong if (unlikely(!xdpf)) 4650be8b38a7SOng Boon Leong return STMMAC_XDP_CONSUMED; 4651be8b38a7SOng Boon Leong 4652be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4653be8b38a7SOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 4654be8b38a7SOng Boon Leong 4655be8b38a7SOng Boon Leong __netif_tx_lock(nq, cpu); 4656be8b38a7SOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 4657be8b38a7SOng Boon Leong nq->trans_start = jiffies; 4658be8b38a7SOng Boon Leong 46598b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4660be8b38a7SOng Boon Leong if (res == STMMAC_XDP_TX) 4661be8b38a7SOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 4662be8b38a7SOng Boon Leong 4663be8b38a7SOng Boon Leong __netif_tx_unlock(nq); 4664be8b38a7SOng Boon Leong 4665be8b38a7SOng Boon Leong return res; 4666be8b38a7SOng Boon Leong } 4667be8b38a7SOng Boon Leong 4668bba71cacSOng Boon Leong static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4669bba71cacSOng Boon Leong struct bpf_prog *prog, 46705fabb012SOng Boon Leong struct xdp_buff *xdp) 46715fabb012SOng Boon Leong { 46725fabb012SOng Boon Leong u32 act; 4673bba71cacSOng Boon Leong int res; 46745fabb012SOng Boon Leong 46755fabb012SOng Boon Leong act = bpf_prog_run_xdp(prog, xdp); 46765fabb012SOng Boon Leong switch (act) { 46775fabb012SOng Boon Leong case XDP_PASS: 46785fabb012SOng Boon Leong res = STMMAC_XDP_PASS; 46795fabb012SOng Boon Leong break; 4680be8b38a7SOng Boon Leong case XDP_TX: 4681be8b38a7SOng Boon Leong res = stmmac_xdp_xmit_back(priv, xdp); 4682be8b38a7SOng Boon Leong break; 46838b278a5bSOng Boon Leong case XDP_REDIRECT: 46848b278a5bSOng Boon Leong if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 46858b278a5bSOng Boon Leong res = STMMAC_XDP_CONSUMED; 46868b278a5bSOng Boon Leong else 46878b278a5bSOng Boon Leong res = STMMAC_XDP_REDIRECT; 46888b278a5bSOng Boon Leong break; 46895fabb012SOng Boon Leong default: 46905fabb012SOng Boon Leong bpf_warn_invalid_xdp_action(act); 46915fabb012SOng Boon Leong fallthrough; 46925fabb012SOng Boon Leong case XDP_ABORTED: 46935fabb012SOng Boon Leong trace_xdp_exception(priv->dev, prog, act); 46945fabb012SOng Boon Leong fallthrough; 46955fabb012SOng Boon Leong case XDP_DROP: 46965fabb012SOng Boon Leong res = STMMAC_XDP_CONSUMED; 46975fabb012SOng Boon Leong break; 46985fabb012SOng Boon Leong } 46995fabb012SOng Boon Leong 4700bba71cacSOng Boon Leong return res; 4701bba71cacSOng Boon Leong } 4702bba71cacSOng Boon Leong 4703bba71cacSOng Boon Leong static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4704bba71cacSOng Boon Leong struct xdp_buff *xdp) 4705bba71cacSOng Boon Leong { 4706bba71cacSOng Boon Leong struct bpf_prog *prog; 4707bba71cacSOng Boon Leong int res; 4708bba71cacSOng Boon Leong 4709bba71cacSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4710bba71cacSOng Boon Leong if (!prog) { 4711bba71cacSOng Boon Leong res = STMMAC_XDP_PASS; 47122f1e432dSToke Høiland-Jørgensen goto out; 4713bba71cacSOng Boon Leong } 4714bba71cacSOng Boon Leong 4715bba71cacSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, xdp); 47162f1e432dSToke Høiland-Jørgensen out: 47175fabb012SOng Boon Leong return ERR_PTR(-res); 47185fabb012SOng Boon Leong } 47195fabb012SOng Boon Leong 4720be8b38a7SOng Boon Leong static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4721be8b38a7SOng Boon Leong int xdp_status) 4722be8b38a7SOng Boon Leong { 4723be8b38a7SOng Boon Leong int cpu = smp_processor_id(); 4724be8b38a7SOng Boon Leong int queue; 4725be8b38a7SOng Boon Leong 4726be8b38a7SOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 4727be8b38a7SOng Boon Leong 4728be8b38a7SOng Boon Leong if (xdp_status & STMMAC_XDP_TX) 4729be8b38a7SOng Boon Leong stmmac_tx_timer_arm(priv, queue); 47308b278a5bSOng Boon Leong 47318b278a5bSOng Boon Leong if (xdp_status & STMMAC_XDP_REDIRECT) 47328b278a5bSOng Boon Leong xdp_do_flush(); 4733be8b38a7SOng Boon Leong } 4734be8b38a7SOng Boon Leong 4735bba2556eSOng Boon Leong static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4736bba2556eSOng Boon Leong struct xdp_buff *xdp) 4737bba2556eSOng Boon Leong { 4738bba2556eSOng Boon Leong unsigned int metasize = xdp->data - xdp->data_meta; 4739bba2556eSOng Boon Leong unsigned int datasize = xdp->data_end - xdp->data; 4740bba2556eSOng Boon Leong struct sk_buff *skb; 4741bba2556eSOng Boon Leong 4742132c32eeSOng Boon Leong skb = __napi_alloc_skb(&ch->rxtx_napi, 4743bba2556eSOng Boon Leong xdp->data_end - xdp->data_hard_start, 4744bba2556eSOng Boon Leong GFP_ATOMIC | __GFP_NOWARN); 4745bba2556eSOng Boon Leong if (unlikely(!skb)) 4746bba2556eSOng Boon Leong return NULL; 4747bba2556eSOng Boon Leong 4748bba2556eSOng Boon Leong skb_reserve(skb, xdp->data - xdp->data_hard_start); 4749bba2556eSOng Boon Leong memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4750bba2556eSOng Boon Leong if (metasize) 4751bba2556eSOng Boon Leong skb_metadata_set(skb, metasize); 4752bba2556eSOng Boon Leong 4753bba2556eSOng Boon Leong return skb; 4754bba2556eSOng Boon Leong } 4755bba2556eSOng Boon Leong 4756bba2556eSOng Boon Leong static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4757bba2556eSOng Boon Leong struct dma_desc *p, struct dma_desc *np, 4758bba2556eSOng Boon Leong struct xdp_buff *xdp) 4759bba2556eSOng Boon Leong { 4760bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 4761bba2556eSOng Boon Leong unsigned int len = xdp->data_end - xdp->data; 4762bba2556eSOng Boon Leong enum pkt_hash_types hash_type; 4763bba2556eSOng Boon Leong int coe = priv->hw->rx_csum; 4764bba2556eSOng Boon Leong struct sk_buff *skb; 4765bba2556eSOng Boon Leong u32 hash; 4766bba2556eSOng Boon Leong 4767bba2556eSOng Boon Leong skb = stmmac_construct_skb_zc(ch, xdp); 4768bba2556eSOng Boon Leong if (!skb) { 4769bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4770bba2556eSOng Boon Leong return; 4771bba2556eSOng Boon Leong } 4772bba2556eSOng Boon Leong 4773bba2556eSOng Boon Leong stmmac_get_rx_hwtstamp(priv, p, np, skb); 4774bba2556eSOng Boon Leong stmmac_rx_vlan(priv->dev, skb); 4775bba2556eSOng Boon Leong skb->protocol = eth_type_trans(skb, priv->dev); 4776bba2556eSOng Boon Leong 4777bba2556eSOng Boon Leong if (unlikely(!coe)) 4778bba2556eSOng Boon Leong skb_checksum_none_assert(skb); 4779bba2556eSOng Boon Leong else 4780bba2556eSOng Boon Leong skb->ip_summed = CHECKSUM_UNNECESSARY; 4781bba2556eSOng Boon Leong 4782bba2556eSOng Boon Leong if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4783bba2556eSOng Boon Leong skb_set_hash(skb, hash, hash_type); 4784bba2556eSOng Boon Leong 4785bba2556eSOng Boon Leong skb_record_rx_queue(skb, queue); 4786132c32eeSOng Boon Leong napi_gro_receive(&ch->rxtx_napi, skb); 4787bba2556eSOng Boon Leong 4788bba2556eSOng Boon Leong priv->dev->stats.rx_packets++; 4789bba2556eSOng Boon Leong priv->dev->stats.rx_bytes += len; 4790bba2556eSOng Boon Leong } 4791bba2556eSOng Boon Leong 4792bba2556eSOng Boon Leong static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4793bba2556eSOng Boon Leong { 4794bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4795bba2556eSOng Boon Leong unsigned int entry = rx_q->dirty_rx; 4796bba2556eSOng Boon Leong struct dma_desc *rx_desc = NULL; 4797bba2556eSOng Boon Leong bool ret = true; 4798bba2556eSOng Boon Leong 4799bba2556eSOng Boon Leong budget = min(budget, stmmac_rx_dirty(priv, queue)); 4800bba2556eSOng Boon Leong 4801bba2556eSOng Boon Leong while (budget-- > 0 && entry != rx_q->cur_rx) { 4802bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4803bba2556eSOng Boon Leong dma_addr_t dma_addr; 4804bba2556eSOng Boon Leong bool use_rx_wd; 4805bba2556eSOng Boon Leong 4806bba2556eSOng Boon Leong if (!buf->xdp) { 4807bba2556eSOng Boon Leong buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 4808bba2556eSOng Boon Leong if (!buf->xdp) { 4809bba2556eSOng Boon Leong ret = false; 4810bba2556eSOng Boon Leong break; 4811bba2556eSOng Boon Leong } 4812bba2556eSOng Boon Leong } 4813bba2556eSOng Boon Leong 4814bba2556eSOng Boon Leong if (priv->extend_desc) 4815bba2556eSOng Boon Leong rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 4816bba2556eSOng Boon Leong else 4817bba2556eSOng Boon Leong rx_desc = rx_q->dma_rx + entry; 4818bba2556eSOng Boon Leong 4819bba2556eSOng Boon Leong dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 4820bba2556eSOng Boon Leong stmmac_set_desc_addr(priv, rx_desc, dma_addr); 4821bba2556eSOng Boon Leong stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 4822bba2556eSOng Boon Leong stmmac_refill_desc3(priv, rx_q, rx_desc); 4823bba2556eSOng Boon Leong 4824bba2556eSOng Boon Leong rx_q->rx_count_frames++; 4825bba2556eSOng Boon Leong rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4826bba2556eSOng Boon Leong if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4827bba2556eSOng Boon Leong rx_q->rx_count_frames = 0; 4828bba2556eSOng Boon Leong 4829bba2556eSOng Boon Leong use_rx_wd = !priv->rx_coal_frames[queue]; 4830bba2556eSOng Boon Leong use_rx_wd |= rx_q->rx_count_frames > 0; 4831bba2556eSOng Boon Leong if (!priv->use_riwt) 4832bba2556eSOng Boon Leong use_rx_wd = false; 4833bba2556eSOng Boon Leong 4834bba2556eSOng Boon Leong dma_wmb(); 4835bba2556eSOng Boon Leong stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 4836bba2556eSOng Boon Leong 4837bba2556eSOng Boon Leong entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 4838bba2556eSOng Boon Leong } 4839bba2556eSOng Boon Leong 4840bba2556eSOng Boon Leong if (rx_desc) { 4841bba2556eSOng Boon Leong rx_q->dirty_rx = entry; 4842bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4843bba2556eSOng Boon Leong (rx_q->dirty_rx * sizeof(struct dma_desc)); 4844bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4845bba2556eSOng Boon Leong } 4846bba2556eSOng Boon Leong 4847bba2556eSOng Boon Leong return ret; 4848bba2556eSOng Boon Leong } 4849bba2556eSOng Boon Leong 4850bba2556eSOng Boon Leong static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 4851bba2556eSOng Boon Leong { 4852bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4853bba2556eSOng Boon Leong unsigned int count = 0, error = 0, len = 0; 4854bba2556eSOng Boon Leong int dirty = stmmac_rx_dirty(priv, queue); 4855bba2556eSOng Boon Leong unsigned int next_entry = rx_q->cur_rx; 4856bba2556eSOng Boon Leong unsigned int desc_size; 4857bba2556eSOng Boon Leong struct bpf_prog *prog; 4858bba2556eSOng Boon Leong bool failure = false; 4859bba2556eSOng Boon Leong int xdp_status = 0; 4860bba2556eSOng Boon Leong int status = 0; 4861bba2556eSOng Boon Leong 4862bba2556eSOng Boon Leong if (netif_msg_rx_status(priv)) { 4863bba2556eSOng Boon Leong void *rx_head; 4864bba2556eSOng Boon Leong 4865bba2556eSOng Boon Leong netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 4866bba2556eSOng Boon Leong if (priv->extend_desc) { 4867bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_erx; 4868bba2556eSOng Boon Leong desc_size = sizeof(struct dma_extended_desc); 4869bba2556eSOng Boon Leong } else { 4870bba2556eSOng Boon Leong rx_head = (void *)rx_q->dma_rx; 4871bba2556eSOng Boon Leong desc_size = sizeof(struct dma_desc); 4872bba2556eSOng Boon Leong } 4873bba2556eSOng Boon Leong 4874bba2556eSOng Boon Leong stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 4875bba2556eSOng Boon Leong rx_q->dma_rx_phy, desc_size); 4876bba2556eSOng Boon Leong } 4877bba2556eSOng Boon Leong while (count < limit) { 4878bba2556eSOng Boon Leong struct stmmac_rx_buffer *buf; 4879bba2556eSOng Boon Leong unsigned int buf1_len = 0; 4880bba2556eSOng Boon Leong struct dma_desc *np, *p; 4881bba2556eSOng Boon Leong int entry; 4882bba2556eSOng Boon Leong int res; 4883bba2556eSOng Boon Leong 4884bba2556eSOng Boon Leong if (!count && rx_q->state_saved) { 4885bba2556eSOng Boon Leong error = rx_q->state.error; 4886bba2556eSOng Boon Leong len = rx_q->state.len; 4887bba2556eSOng Boon Leong } else { 4888bba2556eSOng Boon Leong rx_q->state_saved = false; 4889bba2556eSOng Boon Leong error = 0; 4890bba2556eSOng Boon Leong len = 0; 4891bba2556eSOng Boon Leong } 4892bba2556eSOng Boon Leong 4893bba2556eSOng Boon Leong if (count >= limit) 4894bba2556eSOng Boon Leong break; 4895bba2556eSOng Boon Leong 4896bba2556eSOng Boon Leong read_again: 4897bba2556eSOng Boon Leong buf1_len = 0; 4898bba2556eSOng Boon Leong entry = next_entry; 4899bba2556eSOng Boon Leong buf = &rx_q->buf_pool[entry]; 4900bba2556eSOng Boon Leong 4901bba2556eSOng Boon Leong if (dirty >= STMMAC_RX_FILL_BATCH) { 4902bba2556eSOng Boon Leong failure = failure || 4903bba2556eSOng Boon Leong !stmmac_rx_refill_zc(priv, queue, dirty); 4904bba2556eSOng Boon Leong dirty = 0; 4905bba2556eSOng Boon Leong } 4906bba2556eSOng Boon Leong 4907bba2556eSOng Boon Leong if (priv->extend_desc) 4908bba2556eSOng Boon Leong p = (struct dma_desc *)(rx_q->dma_erx + entry); 4909bba2556eSOng Boon Leong else 4910bba2556eSOng Boon Leong p = rx_q->dma_rx + entry; 4911bba2556eSOng Boon Leong 4912bba2556eSOng Boon Leong /* read the status of the incoming frame */ 4913bba2556eSOng Boon Leong status = stmmac_rx_status(priv, &priv->dev->stats, 4914bba2556eSOng Boon Leong &priv->xstats, p); 4915bba2556eSOng Boon Leong /* check if managed by the DMA otherwise go ahead */ 4916bba2556eSOng Boon Leong if (unlikely(status & dma_own)) 4917bba2556eSOng Boon Leong break; 4918bba2556eSOng Boon Leong 4919bba2556eSOng Boon Leong /* Prefetch the next RX descriptor */ 4920bba2556eSOng Boon Leong rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 4921bba2556eSOng Boon Leong priv->dma_rx_size); 4922bba2556eSOng Boon Leong next_entry = rx_q->cur_rx; 4923bba2556eSOng Boon Leong 4924bba2556eSOng Boon Leong if (priv->extend_desc) 4925bba2556eSOng Boon Leong np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 4926bba2556eSOng Boon Leong else 4927bba2556eSOng Boon Leong np = rx_q->dma_rx + next_entry; 4928bba2556eSOng Boon Leong 4929bba2556eSOng Boon Leong prefetch(np); 4930bba2556eSOng Boon Leong 49312b9fff64SSong Yoong Siang /* Ensure a valid XSK buffer before proceed */ 49322b9fff64SSong Yoong Siang if (!buf->xdp) 49332b9fff64SSong Yoong Siang break; 49342b9fff64SSong Yoong Siang 4935bba2556eSOng Boon Leong if (priv->extend_desc) 4936bba2556eSOng Boon Leong stmmac_rx_extended_status(priv, &priv->dev->stats, 4937bba2556eSOng Boon Leong &priv->xstats, 4938bba2556eSOng Boon Leong rx_q->dma_erx + entry); 4939bba2556eSOng Boon Leong if (unlikely(status == discard_frame)) { 4940bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4941bba2556eSOng Boon Leong buf->xdp = NULL; 4942bba2556eSOng Boon Leong dirty++; 4943bba2556eSOng Boon Leong error = 1; 4944bba2556eSOng Boon Leong if (!priv->hwts_rx_en) 4945bba2556eSOng Boon Leong priv->dev->stats.rx_errors++; 4946bba2556eSOng Boon Leong } 4947bba2556eSOng Boon Leong 4948bba2556eSOng Boon Leong if (unlikely(error && (status & rx_not_ls))) 4949bba2556eSOng Boon Leong goto read_again; 4950bba2556eSOng Boon Leong if (unlikely(error)) { 4951bba2556eSOng Boon Leong count++; 4952bba2556eSOng Boon Leong continue; 4953bba2556eSOng Boon Leong } 4954bba2556eSOng Boon Leong 4955bba2556eSOng Boon Leong /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 4956bba2556eSOng Boon Leong if (likely(status & rx_not_ls)) { 4957bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4958bba2556eSOng Boon Leong buf->xdp = NULL; 4959bba2556eSOng Boon Leong dirty++; 4960bba2556eSOng Boon Leong count++; 4961bba2556eSOng Boon Leong goto read_again; 4962bba2556eSOng Boon Leong } 4963bba2556eSOng Boon Leong 4964bba2556eSOng Boon Leong /* XDP ZC Frame only support primary buffers for now */ 4965bba2556eSOng Boon Leong buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 4966bba2556eSOng Boon Leong len += buf1_len; 4967bba2556eSOng Boon Leong 4968bba2556eSOng Boon Leong /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 4969bba2556eSOng Boon Leong * Type frames (LLC/LLC-SNAP) 4970bba2556eSOng Boon Leong * 4971bba2556eSOng Boon Leong * llc_snap is never checked in GMAC >= 4, so this ACS 4972bba2556eSOng Boon Leong * feature is always disabled and packets need to be 4973bba2556eSOng Boon Leong * stripped manually. 4974bba2556eSOng Boon Leong */ 4975bba2556eSOng Boon Leong if (likely(!(status & rx_not_ls)) && 4976bba2556eSOng Boon Leong (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 4977bba2556eSOng Boon Leong unlikely(status != llc_snap))) { 4978bba2556eSOng Boon Leong buf1_len -= ETH_FCS_LEN; 4979bba2556eSOng Boon Leong len -= ETH_FCS_LEN; 4980bba2556eSOng Boon Leong } 4981bba2556eSOng Boon Leong 4982bba2556eSOng Boon Leong /* RX buffer is good and fit into a XSK pool buffer */ 4983bba2556eSOng Boon Leong buf->xdp->data_end = buf->xdp->data + buf1_len; 4984bba2556eSOng Boon Leong xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 4985bba2556eSOng Boon Leong 4986bba2556eSOng Boon Leong prog = READ_ONCE(priv->xdp_prog); 4987bba2556eSOng Boon Leong res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 4988bba2556eSOng Boon Leong 4989bba2556eSOng Boon Leong switch (res) { 4990bba2556eSOng Boon Leong case STMMAC_XDP_PASS: 4991bba2556eSOng Boon Leong stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 4992bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4993bba2556eSOng Boon Leong break; 4994bba2556eSOng Boon Leong case STMMAC_XDP_CONSUMED: 4995bba2556eSOng Boon Leong xsk_buff_free(buf->xdp); 4996bba2556eSOng Boon Leong priv->dev->stats.rx_dropped++; 4997bba2556eSOng Boon Leong break; 4998bba2556eSOng Boon Leong case STMMAC_XDP_TX: 4999bba2556eSOng Boon Leong case STMMAC_XDP_REDIRECT: 5000bba2556eSOng Boon Leong xdp_status |= res; 5001bba2556eSOng Boon Leong break; 5002bba2556eSOng Boon Leong } 5003bba2556eSOng Boon Leong 5004bba2556eSOng Boon Leong buf->xdp = NULL; 5005bba2556eSOng Boon Leong dirty++; 5006bba2556eSOng Boon Leong count++; 5007bba2556eSOng Boon Leong } 5008bba2556eSOng Boon Leong 5009bba2556eSOng Boon Leong if (status & rx_not_ls) { 5010bba2556eSOng Boon Leong rx_q->state_saved = true; 5011bba2556eSOng Boon Leong rx_q->state.error = error; 5012bba2556eSOng Boon Leong rx_q->state.len = len; 5013bba2556eSOng Boon Leong } 5014bba2556eSOng Boon Leong 5015bba2556eSOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5016bba2556eSOng Boon Leong 501768e9c5deSVijayakannan Ayyathurai priv->xstats.rx_pkt_n += count; 501868e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 501968e9c5deSVijayakannan Ayyathurai 5020bba2556eSOng Boon Leong if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5021bba2556eSOng Boon Leong if (failure || stmmac_rx_dirty(priv, queue) > 0) 5022bba2556eSOng Boon Leong xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5023bba2556eSOng Boon Leong else 5024bba2556eSOng Boon Leong xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5025bba2556eSOng Boon Leong 5026bba2556eSOng Boon Leong return (int)count; 5027bba2556eSOng Boon Leong } 5028bba2556eSOng Boon Leong 5029bba2556eSOng Boon Leong return failure ? limit : (int)count; 5030bba2556eSOng Boon Leong } 5031bba2556eSOng Boon Leong 503232ceabcaSGiuseppe CAVALLARO /** 5033732fdf0eSGiuseppe CAVALLARO * stmmac_rx - manage the receive process 503432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 503554139cf3SJoao Pinto * @limit: napi bugget 503654139cf3SJoao Pinto * @queue: RX queue index. 503732ceabcaSGiuseppe CAVALLARO * Description : this the function called by the napi poll method. 503832ceabcaSGiuseppe CAVALLARO * It gets all the frames inside the ring. 503932ceabcaSGiuseppe CAVALLARO */ 504054139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 50417ac6653aSJeff Kirsher { 504254139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 50438fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 5044ec222003SJose Abreu unsigned int count = 0, error = 0, len = 0; 5045ec222003SJose Abreu int status = 0, coe = priv->hw->rx_csum; 504607b39753SAaro Koskinen unsigned int next_entry = rx_q->cur_rx; 50475fabb012SOng Boon Leong enum dma_data_direction dma_dir; 5048bfaf91caSJoakim Zhang unsigned int desc_size; 5049ec222003SJose Abreu struct sk_buff *skb = NULL; 50505fabb012SOng Boon Leong struct xdp_buff xdp; 5051be8b38a7SOng Boon Leong int xdp_status = 0; 50525fabb012SOng Boon Leong int buf_sz; 50535fabb012SOng Boon Leong 50545fabb012SOng Boon Leong dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 50555fabb012SOng Boon Leong buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 50567ac6653aSJeff Kirsher 505783d7af64SGiuseppe CAVALLARO if (netif_msg_rx_status(priv)) { 5058d0225e7dSAlexandre TORGUE void *rx_head; 5059d0225e7dSAlexandre TORGUE 506038ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5061bfaf91caSJoakim Zhang if (priv->extend_desc) { 506254139cf3SJoao Pinto rx_head = (void *)rx_q->dma_erx; 5063bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 5064bfaf91caSJoakim Zhang } else { 506554139cf3SJoao Pinto rx_head = (void *)rx_q->dma_rx; 5066bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 5067bfaf91caSJoakim Zhang } 5068d0225e7dSAlexandre TORGUE 5069bfaf91caSJoakim Zhang stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 5070bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 50717ac6653aSJeff Kirsher } 5072c24602efSGiuseppe CAVALLARO while (count < limit) { 507388ebe2cfSJose Abreu unsigned int buf1_len = 0, buf2_len = 0; 5074ec222003SJose Abreu enum pkt_hash_types hash_type; 50752af6106aSJose Abreu struct stmmac_rx_buffer *buf; 50762af6106aSJose Abreu struct dma_desc *np, *p; 5077ec222003SJose Abreu int entry; 5078ec222003SJose Abreu u32 hash; 50797ac6653aSJeff Kirsher 5080ec222003SJose Abreu if (!count && rx_q->state_saved) { 5081ec222003SJose Abreu skb = rx_q->state.skb; 5082ec222003SJose Abreu error = rx_q->state.error; 5083ec222003SJose Abreu len = rx_q->state.len; 5084ec222003SJose Abreu } else { 5085ec222003SJose Abreu rx_q->state_saved = false; 5086ec222003SJose Abreu skb = NULL; 5087ec222003SJose Abreu error = 0; 5088ec222003SJose Abreu len = 0; 5089ec222003SJose Abreu } 5090ec222003SJose Abreu 5091ec222003SJose Abreu if (count >= limit) 5092ec222003SJose Abreu break; 5093ec222003SJose Abreu 5094ec222003SJose Abreu read_again: 509588ebe2cfSJose Abreu buf1_len = 0; 509688ebe2cfSJose Abreu buf2_len = 0; 509707b39753SAaro Koskinen entry = next_entry; 50982af6106aSJose Abreu buf = &rx_q->buf_pool[entry]; 509907b39753SAaro Koskinen 5100c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 510154139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 5102c24602efSGiuseppe CAVALLARO else 510354139cf3SJoao Pinto p = rx_q->dma_rx + entry; 5104c24602efSGiuseppe CAVALLARO 5105c1fa3212SFabrice Gasnier /* read the status of the incoming frame */ 510642de047dSJose Abreu status = stmmac_rx_status(priv, &priv->dev->stats, 5107c1fa3212SFabrice Gasnier &priv->xstats, p); 5108c1fa3212SFabrice Gasnier /* check if managed by the DMA otherwise go ahead */ 5109c1fa3212SFabrice Gasnier if (unlikely(status & dma_own)) 51107ac6653aSJeff Kirsher break; 51117ac6653aSJeff Kirsher 5112aa042f60SSong, Yoong Siang rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5113aa042f60SSong, Yoong Siang priv->dma_rx_size); 511454139cf3SJoao Pinto next_entry = rx_q->cur_rx; 5115e3ad57c9SGiuseppe Cavallaro 5116c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 511754139cf3SJoao Pinto np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5118c24602efSGiuseppe CAVALLARO else 511954139cf3SJoao Pinto np = rx_q->dma_rx + next_entry; 5120ba1ffd74SGiuseppe CAVALLARO 5121ba1ffd74SGiuseppe CAVALLARO prefetch(np); 51227ac6653aSJeff Kirsher 512342de047dSJose Abreu if (priv->extend_desc) 512442de047dSJose Abreu stmmac_rx_extended_status(priv, &priv->dev->stats, 512542de047dSJose Abreu &priv->xstats, rx_q->dma_erx + entry); 5126891434b1SRayagond Kokatanur if (unlikely(status == discard_frame)) { 51272af6106aSJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 51282af6106aSJose Abreu buf->page = NULL; 5129ec222003SJose Abreu error = 1; 51300b273ca4SJose Abreu if (!priv->hwts_rx_en) 51310b273ca4SJose Abreu priv->dev->stats.rx_errors++; 5132ec222003SJose Abreu } 5133f748be53SAlexandre TORGUE 5134ec222003SJose Abreu if (unlikely(error && (status & rx_not_ls))) 5135ec222003SJose Abreu goto read_again; 5136ec222003SJose Abreu if (unlikely(error)) { 5137ec222003SJose Abreu dev_kfree_skb(skb); 513888ebe2cfSJose Abreu skb = NULL; 5139cda4985aSJose Abreu count++; 514007b39753SAaro Koskinen continue; 5141e527c4a7SGiuseppe CAVALLARO } 5142e527c4a7SGiuseppe CAVALLARO 5143ec222003SJose Abreu /* Buffer is good. Go on. */ 5144ec222003SJose Abreu 51454744bf07SMatteo Croce prefetch(page_address(buf->page) + buf->page_offset); 514688ebe2cfSJose Abreu if (buf->sec_page) 514788ebe2cfSJose Abreu prefetch(page_address(buf->sec_page)); 514888ebe2cfSJose Abreu 514988ebe2cfSJose Abreu buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 515088ebe2cfSJose Abreu len += buf1_len; 515188ebe2cfSJose Abreu buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 515288ebe2cfSJose Abreu len += buf2_len; 5153ec222003SJose Abreu 51547ac6653aSJeff Kirsher /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 5155ceb69499SGiuseppe CAVALLARO * Type frames (LLC/LLC-SNAP) 5156565020aaSJose Abreu * 5157565020aaSJose Abreu * llc_snap is never checked in GMAC >= 4, so this ACS 5158565020aaSJose Abreu * feature is always disabled and packets need to be 5159565020aaSJose Abreu * stripped manually. 5160ceb69499SGiuseppe CAVALLARO */ 516193b5dce4SJose Abreu if (likely(!(status & rx_not_ls)) && 516293b5dce4SJose Abreu (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 516393b5dce4SJose Abreu unlikely(status != llc_snap))) { 516488ebe2cfSJose Abreu if (buf2_len) 516588ebe2cfSJose Abreu buf2_len -= ETH_FCS_LEN; 516688ebe2cfSJose Abreu else 516788ebe2cfSJose Abreu buf1_len -= ETH_FCS_LEN; 516888ebe2cfSJose Abreu 5169ec222003SJose Abreu len -= ETH_FCS_LEN; 517083d7af64SGiuseppe CAVALLARO } 517122ad3838SGiuseppe Cavallaro 5172ec222003SJose Abreu if (!skb) { 5173be8b38a7SOng Boon Leong unsigned int pre_len, sync_len; 5174be8b38a7SOng Boon Leong 51755fabb012SOng Boon Leong dma_sync_single_for_cpu(priv->device, buf->addr, 51765fabb012SOng Boon Leong buf1_len, dma_dir); 51775fabb012SOng Boon Leong 5178d172268fSMatteo Croce xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq); 5179d172268fSMatteo Croce xdp_prepare_buff(&xdp, page_address(buf->page), 5180d172268fSMatteo Croce buf->page_offset, buf1_len, false); 51815fabb012SOng Boon Leong 5182be8b38a7SOng Boon Leong pre_len = xdp.data_end - xdp.data_hard_start - 5183be8b38a7SOng Boon Leong buf->page_offset; 51845fabb012SOng Boon Leong skb = stmmac_xdp_run_prog(priv, &xdp); 5185be8b38a7SOng Boon Leong /* Due xdp_adjust_tail: DMA sync for_device 5186be8b38a7SOng Boon Leong * cover max len CPU touch 5187be8b38a7SOng Boon Leong */ 5188be8b38a7SOng Boon Leong sync_len = xdp.data_end - xdp.data_hard_start - 5189be8b38a7SOng Boon Leong buf->page_offset; 5190be8b38a7SOng Boon Leong sync_len = max(sync_len, pre_len); 51915fabb012SOng Boon Leong 51925fabb012SOng Boon Leong /* For Not XDP_PASS verdict */ 51935fabb012SOng Boon Leong if (IS_ERR(skb)) { 51945fabb012SOng Boon Leong unsigned int xdp_res = -PTR_ERR(skb); 51955fabb012SOng Boon Leong 51965fabb012SOng Boon Leong if (xdp_res & STMMAC_XDP_CONSUMED) { 5197be8b38a7SOng Boon Leong page_pool_put_page(rx_q->page_pool, 5198be8b38a7SOng Boon Leong virt_to_head_page(xdp.data), 5199be8b38a7SOng Boon Leong sync_len, true); 52005fabb012SOng Boon Leong buf->page = NULL; 52015fabb012SOng Boon Leong priv->dev->stats.rx_dropped++; 52025fabb012SOng Boon Leong 52035fabb012SOng Boon Leong /* Clear skb as it was set as 52045fabb012SOng Boon Leong * status by XDP program. 52055fabb012SOng Boon Leong */ 52065fabb012SOng Boon Leong skb = NULL; 52075fabb012SOng Boon Leong 52085fabb012SOng Boon Leong if (unlikely((status & rx_not_ls))) 52095fabb012SOng Boon Leong goto read_again; 52105fabb012SOng Boon Leong 52115fabb012SOng Boon Leong count++; 52125fabb012SOng Boon Leong continue; 52138b278a5bSOng Boon Leong } else if (xdp_res & (STMMAC_XDP_TX | 52148b278a5bSOng Boon Leong STMMAC_XDP_REDIRECT)) { 5215be8b38a7SOng Boon Leong xdp_status |= xdp_res; 5216be8b38a7SOng Boon Leong buf->page = NULL; 5217be8b38a7SOng Boon Leong skb = NULL; 5218be8b38a7SOng Boon Leong count++; 5219be8b38a7SOng Boon Leong continue; 52205fabb012SOng Boon Leong } 52215fabb012SOng Boon Leong } 52225fabb012SOng Boon Leong } 52235fabb012SOng Boon Leong 52245fabb012SOng Boon Leong if (!skb) { 52255fabb012SOng Boon Leong /* XDP program may expand or reduce tail */ 52265fabb012SOng Boon Leong buf1_len = xdp.data_end - xdp.data; 52275fabb012SOng Boon Leong 522888ebe2cfSJose Abreu skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5229ec222003SJose Abreu if (!skb) { 523022ad3838SGiuseppe Cavallaro priv->dev->stats.rx_dropped++; 5231cda4985aSJose Abreu count++; 523288ebe2cfSJose Abreu goto drain_data; 523322ad3838SGiuseppe Cavallaro } 523422ad3838SGiuseppe Cavallaro 52355fabb012SOng Boon Leong /* XDP program may adjust header */ 52365fabb012SOng Boon Leong skb_copy_to_linear_data(skb, xdp.data, buf1_len); 523788ebe2cfSJose Abreu skb_put(skb, buf1_len); 523822ad3838SGiuseppe Cavallaro 5239ec222003SJose Abreu /* Data payload copied into SKB, page ready for recycle */ 5240ec222003SJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 5241ec222003SJose Abreu buf->page = NULL; 524288ebe2cfSJose Abreu } else if (buf1_len) { 5243ec222003SJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 52445fabb012SOng Boon Leong buf1_len, dma_dir); 5245ec222003SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 52465fabb012SOng Boon Leong buf->page, buf->page_offset, buf1_len, 5247ec222003SJose Abreu priv->dma_buf_sz); 5248ec222003SJose Abreu 5249ec222003SJose Abreu /* Data payload appended into SKB */ 5250ec222003SJose Abreu page_pool_release_page(rx_q->page_pool, buf->page); 5251ec222003SJose Abreu buf->page = NULL; 52527ac6653aSJeff Kirsher } 525383d7af64SGiuseppe CAVALLARO 525488ebe2cfSJose Abreu if (buf2_len) { 525567afd6d1SJose Abreu dma_sync_single_for_cpu(priv->device, buf->sec_addr, 52565fabb012SOng Boon Leong buf2_len, dma_dir); 525767afd6d1SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 525888ebe2cfSJose Abreu buf->sec_page, 0, buf2_len, 525967afd6d1SJose Abreu priv->dma_buf_sz); 526067afd6d1SJose Abreu 526167afd6d1SJose Abreu /* Data payload appended into SKB */ 526267afd6d1SJose Abreu page_pool_release_page(rx_q->page_pool, buf->sec_page); 526367afd6d1SJose Abreu buf->sec_page = NULL; 526467afd6d1SJose Abreu } 526567afd6d1SJose Abreu 526688ebe2cfSJose Abreu drain_data: 5267ec222003SJose Abreu if (likely(status & rx_not_ls)) 5268ec222003SJose Abreu goto read_again; 526988ebe2cfSJose Abreu if (!skb) 527088ebe2cfSJose Abreu continue; 5271ec222003SJose Abreu 5272ec222003SJose Abreu /* Got entire packet into SKB. Finish it. */ 5273ec222003SJose Abreu 5274ba1ffd74SGiuseppe CAVALLARO stmmac_get_rx_hwtstamp(priv, p, np, skb); 5275b9381985SVince Bridgers stmmac_rx_vlan(priv->dev, skb); 52767ac6653aSJeff Kirsher skb->protocol = eth_type_trans(skb, priv->dev); 52777ac6653aSJeff Kirsher 5278ceb69499SGiuseppe CAVALLARO if (unlikely(!coe)) 52797ac6653aSJeff Kirsher skb_checksum_none_assert(skb); 528062a2ab93SGiuseppe CAVALLARO else 52817ac6653aSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 528262a2ab93SGiuseppe CAVALLARO 528376067459SJose Abreu if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 528476067459SJose Abreu skb_set_hash(skb, hash, hash_type); 528576067459SJose Abreu 528676067459SJose Abreu skb_record_rx_queue(skb, queue); 52874ccb4585SJose Abreu napi_gro_receive(&ch->rx_napi, skb); 528888ebe2cfSJose Abreu skb = NULL; 52897ac6653aSJeff Kirsher 52907ac6653aSJeff Kirsher priv->dev->stats.rx_packets++; 5291ec222003SJose Abreu priv->dev->stats.rx_bytes += len; 5292cda4985aSJose Abreu count++; 52937ac6653aSJeff Kirsher } 5294ec222003SJose Abreu 529588ebe2cfSJose Abreu if (status & rx_not_ls || skb) { 5296ec222003SJose Abreu rx_q->state_saved = true; 5297ec222003SJose Abreu rx_q->state.skb = skb; 5298ec222003SJose Abreu rx_q->state.error = error; 5299ec222003SJose Abreu rx_q->state.len = len; 53007ac6653aSJeff Kirsher } 53017ac6653aSJeff Kirsher 5302be8b38a7SOng Boon Leong stmmac_finalize_xdp_rx(priv, xdp_status); 5303be8b38a7SOng Boon Leong 530454139cf3SJoao Pinto stmmac_rx_refill(priv, queue); 53057ac6653aSJeff Kirsher 53067ac6653aSJeff Kirsher priv->xstats.rx_pkt_n += count; 530768e9c5deSVijayakannan Ayyathurai priv->xstats.rxq_stats[queue].rx_pkt_n += count; 53087ac6653aSJeff Kirsher 53097ac6653aSJeff Kirsher return count; 53107ac6653aSJeff Kirsher } 53117ac6653aSJeff Kirsher 53124ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 53137ac6653aSJeff Kirsher { 53148fce3331SJose Abreu struct stmmac_channel *ch = 53154ccb4585SJose Abreu container_of(napi, struct stmmac_channel, rx_napi); 53168fce3331SJose Abreu struct stmmac_priv *priv = ch->priv_data; 53178fce3331SJose Abreu u32 chan = ch->index; 53184ccb4585SJose Abreu int work_done; 53197ac6653aSJeff Kirsher 53209125cdd1SGiuseppe CAVALLARO priv->xstats.napi_poll++; 5321ce736788SJoao Pinto 5322132c32eeSOng Boon Leong work_done = stmmac_rx(priv, budget, chan); 5323021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5324021bd5e3SJose Abreu unsigned long flags; 5325021bd5e3SJose Abreu 5326021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5327021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5328021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5329021bd5e3SJose Abreu } 5330021bd5e3SJose Abreu 53314ccb4585SJose Abreu return work_done; 53324ccb4585SJose Abreu } 5333ce736788SJoao Pinto 53344ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 53354ccb4585SJose Abreu { 53364ccb4585SJose Abreu struct stmmac_channel *ch = 53374ccb4585SJose Abreu container_of(napi, struct stmmac_channel, tx_napi); 53384ccb4585SJose Abreu struct stmmac_priv *priv = ch->priv_data; 53394ccb4585SJose Abreu u32 chan = ch->index; 53404ccb4585SJose Abreu int work_done; 53414ccb4585SJose Abreu 53424ccb4585SJose Abreu priv->xstats.napi_poll++; 53434ccb4585SJose Abreu 5344132c32eeSOng Boon Leong work_done = stmmac_tx_clean(priv, budget, chan); 5345fa0be0a4SJose Abreu work_done = min(work_done, budget); 53468fce3331SJose Abreu 5347021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 5348021bd5e3SJose Abreu unsigned long flags; 53494ccb4585SJose Abreu 5350021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 5351021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5352021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 5353fa0be0a4SJose Abreu } 53548fce3331SJose Abreu 53557ac6653aSJeff Kirsher return work_done; 53567ac6653aSJeff Kirsher } 53577ac6653aSJeff Kirsher 5358132c32eeSOng Boon Leong static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5359132c32eeSOng Boon Leong { 5360132c32eeSOng Boon Leong struct stmmac_channel *ch = 5361132c32eeSOng Boon Leong container_of(napi, struct stmmac_channel, rxtx_napi); 5362132c32eeSOng Boon Leong struct stmmac_priv *priv = ch->priv_data; 536381d0885dSSong Yoong Siang int rx_done, tx_done, rxtx_done; 5364132c32eeSOng Boon Leong u32 chan = ch->index; 5365132c32eeSOng Boon Leong 5366132c32eeSOng Boon Leong priv->xstats.napi_poll++; 5367132c32eeSOng Boon Leong 5368132c32eeSOng Boon Leong tx_done = stmmac_tx_clean(priv, budget, chan); 5369132c32eeSOng Boon Leong tx_done = min(tx_done, budget); 5370132c32eeSOng Boon Leong 5371132c32eeSOng Boon Leong rx_done = stmmac_rx_zc(priv, budget, chan); 5372132c32eeSOng Boon Leong 537381d0885dSSong Yoong Siang rxtx_done = max(tx_done, rx_done); 537481d0885dSSong Yoong Siang 5375132c32eeSOng Boon Leong /* If either TX or RX work is not complete, return budget 5376132c32eeSOng Boon Leong * and keep pooling 5377132c32eeSOng Boon Leong */ 537881d0885dSSong Yoong Siang if (rxtx_done >= budget) 5379132c32eeSOng Boon Leong return budget; 5380132c32eeSOng Boon Leong 5381132c32eeSOng Boon Leong /* all work done, exit the polling mode */ 538281d0885dSSong Yoong Siang if (napi_complete_done(napi, rxtx_done)) { 5383132c32eeSOng Boon Leong unsigned long flags; 5384132c32eeSOng Boon Leong 5385132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 5386132c32eeSOng Boon Leong /* Both RX and TX work done are compelte, 5387132c32eeSOng Boon Leong * so enable both RX & TX IRQs. 5388132c32eeSOng Boon Leong */ 5389132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5390132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 5391132c32eeSOng Boon Leong } 5392132c32eeSOng Boon Leong 539381d0885dSSong Yoong Siang return min(rxtx_done, budget - 1); 5394132c32eeSOng Boon Leong } 5395132c32eeSOng Boon Leong 53967ac6653aSJeff Kirsher /** 53977ac6653aSJeff Kirsher * stmmac_tx_timeout 53987ac6653aSJeff Kirsher * @dev : Pointer to net device structure 5399d0ea5cbdSJesse Brandeburg * @txqueue: the index of the hanging transmit queue 54007ac6653aSJeff Kirsher * Description: this function is called when a packet transmission fails to 54017284a3f1SGiuseppe CAVALLARO * complete within a reasonable time. The driver will mark the error in the 54027ac6653aSJeff Kirsher * netdev structure and arrange for the device to be reset to a sane state 54037ac6653aSJeff Kirsher * in order to transmit a new packet. 54047ac6653aSJeff Kirsher */ 54050290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 54067ac6653aSJeff Kirsher { 54077ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 54087ac6653aSJeff Kirsher 540934877a15SJose Abreu stmmac_global_err(priv); 54107ac6653aSJeff Kirsher } 54117ac6653aSJeff Kirsher 54127ac6653aSJeff Kirsher /** 541301789349SJiri Pirko * stmmac_set_rx_mode - entry point for multicast addressing 54147ac6653aSJeff Kirsher * @dev : pointer to the device structure 54157ac6653aSJeff Kirsher * Description: 54167ac6653aSJeff Kirsher * This function is a driver entry point which gets called by the kernel 54177ac6653aSJeff Kirsher * whenever multicast addresses must be enabled/disabled. 54187ac6653aSJeff Kirsher * Return value: 54197ac6653aSJeff Kirsher * void. 54207ac6653aSJeff Kirsher */ 542101789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev) 54227ac6653aSJeff Kirsher { 54237ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 54247ac6653aSJeff Kirsher 5425c10d4c82SJose Abreu stmmac_set_filter(priv, priv->hw, dev); 54267ac6653aSJeff Kirsher } 54277ac6653aSJeff Kirsher 54287ac6653aSJeff Kirsher /** 54297ac6653aSJeff Kirsher * stmmac_change_mtu - entry point to change MTU size for the device. 54307ac6653aSJeff Kirsher * @dev : device pointer. 54317ac6653aSJeff Kirsher * @new_mtu : the new MTU size for the device. 54327ac6653aSJeff Kirsher * Description: the Maximum Transfer Unit (MTU) is used by the network layer 54337ac6653aSJeff Kirsher * to drive packet transmission. Ethernet has an MTU of 1500 octets 54347ac6653aSJeff Kirsher * (ETH_DATA_LEN). This value can be changed with ifconfig. 54357ac6653aSJeff Kirsher * Return value: 54367ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 54377ac6653aSJeff Kirsher * file on failure. 54387ac6653aSJeff Kirsher */ 54397ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 54407ac6653aSJeff Kirsher { 544138ddc59dSLABBE Corentin struct stmmac_priv *priv = netdev_priv(dev); 5442eaf4fac4SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 54435b55299eSDavid Wu const int mtu = new_mtu; 5444eaf4fac4SJose Abreu 5445eaf4fac4SJose Abreu if (txfifosz == 0) 5446eaf4fac4SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 5447eaf4fac4SJose Abreu 5448eaf4fac4SJose Abreu txfifosz /= priv->plat->tx_queues_to_use; 544938ddc59dSLABBE Corentin 54507ac6653aSJeff Kirsher if (netif_running(dev)) { 545138ddc59dSLABBE Corentin netdev_err(priv->dev, "must be stopped to change its MTU\n"); 54527ac6653aSJeff Kirsher return -EBUSY; 54537ac6653aSJeff Kirsher } 54547ac6653aSJeff Kirsher 54555fabb012SOng Boon Leong if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 54565fabb012SOng Boon Leong netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 54575fabb012SOng Boon Leong return -EINVAL; 54585fabb012SOng Boon Leong } 54595fabb012SOng Boon Leong 5460eaf4fac4SJose Abreu new_mtu = STMMAC_ALIGN(new_mtu); 5461eaf4fac4SJose Abreu 5462eaf4fac4SJose Abreu /* If condition true, FIFO is too small or MTU too large */ 5463eaf4fac4SJose Abreu if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5464eaf4fac4SJose Abreu return -EINVAL; 5465eaf4fac4SJose Abreu 54665b55299eSDavid Wu dev->mtu = mtu; 5467f748be53SAlexandre TORGUE 54687ac6653aSJeff Kirsher netdev_update_features(dev); 54697ac6653aSJeff Kirsher 54707ac6653aSJeff Kirsher return 0; 54717ac6653aSJeff Kirsher } 54727ac6653aSJeff Kirsher 5473c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev, 5474c8f44affSMichał Mirosław netdev_features_t features) 54757ac6653aSJeff Kirsher { 54767ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 54777ac6653aSJeff Kirsher 547838912bdbSDeepak SIKRI if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 54797ac6653aSJeff Kirsher features &= ~NETIF_F_RXCSUM; 5480d2afb5bdSGiuseppe CAVALLARO 54817ac6653aSJeff Kirsher if (!priv->plat->tx_coe) 5482a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 54837ac6653aSJeff Kirsher 54847ac6653aSJeff Kirsher /* Some GMAC devices have a bugged Jumbo frame support that 54857ac6653aSJeff Kirsher * needs to have the Tx COE disabled for oversized frames 54867ac6653aSJeff Kirsher * (due to limited buffer sizes). In this case we disable 5487ceb69499SGiuseppe CAVALLARO * the TX csum insertion in the TDES and not use SF. 5488ceb69499SGiuseppe CAVALLARO */ 54897ac6653aSJeff Kirsher if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5490a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 54917ac6653aSJeff Kirsher 5492f748be53SAlexandre TORGUE /* Disable tso if asked by ethtool */ 5493f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5494f748be53SAlexandre TORGUE if (features & NETIF_F_TSO) 5495f748be53SAlexandre TORGUE priv->tso = true; 5496f748be53SAlexandre TORGUE else 5497f748be53SAlexandre TORGUE priv->tso = false; 5498f748be53SAlexandre TORGUE } 5499f748be53SAlexandre TORGUE 55007ac6653aSJeff Kirsher return features; 55017ac6653aSJeff Kirsher } 55027ac6653aSJeff Kirsher 5503d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev, 5504d2afb5bdSGiuseppe CAVALLARO netdev_features_t features) 5505d2afb5bdSGiuseppe CAVALLARO { 5506d2afb5bdSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(netdev); 550767afd6d1SJose Abreu bool sph_en; 550867afd6d1SJose Abreu u32 chan; 5509d2afb5bdSGiuseppe CAVALLARO 5510d2afb5bdSGiuseppe CAVALLARO /* Keep the COE Type in case of csum is supporting */ 5511d2afb5bdSGiuseppe CAVALLARO if (features & NETIF_F_RXCSUM) 5512d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 5513d2afb5bdSGiuseppe CAVALLARO else 5514d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 5515d2afb5bdSGiuseppe CAVALLARO /* No check needed because rx_coe has been set before and it will be 5516d2afb5bdSGiuseppe CAVALLARO * fixed in case of issue. 5517d2afb5bdSGiuseppe CAVALLARO */ 5518c10d4c82SJose Abreu stmmac_rx_ipc(priv, priv->hw); 5519d2afb5bdSGiuseppe CAVALLARO 552067afd6d1SJose Abreu sph_en = (priv->hw->rx_csum > 0) && priv->sph; 55215fabb012SOng Boon Leong 552267afd6d1SJose Abreu for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 552367afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 552467afd6d1SJose Abreu 5525d2afb5bdSGiuseppe CAVALLARO return 0; 5526d2afb5bdSGiuseppe CAVALLARO } 5527d2afb5bdSGiuseppe CAVALLARO 55285a558611SOng Boon Leong static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 55295a558611SOng Boon Leong { 55305a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 55315a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 55325a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 55335a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 55345a558611SOng Boon Leong 55355a558611SOng Boon Leong if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 55365a558611SOng Boon Leong return; 55375a558611SOng Boon Leong 55385a558611SOng Boon Leong /* If LP has sent verify mPacket, LP is FPE capable */ 55395a558611SOng Boon Leong if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 55405a558611SOng Boon Leong if (*lp_state < FPE_STATE_CAPABLE) 55415a558611SOng Boon Leong *lp_state = FPE_STATE_CAPABLE; 55425a558611SOng Boon Leong 55435a558611SOng Boon Leong /* If user has requested FPE enable, quickly response */ 55445a558611SOng Boon Leong if (*hs_enable) 55455a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 55465a558611SOng Boon Leong MPACKET_RESPONSE); 55475a558611SOng Boon Leong } 55485a558611SOng Boon Leong 55495a558611SOng Boon Leong /* If Local has sent verify mPacket, Local is FPE capable */ 55505a558611SOng Boon Leong if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 55515a558611SOng Boon Leong if (*lo_state < FPE_STATE_CAPABLE) 55525a558611SOng Boon Leong *lo_state = FPE_STATE_CAPABLE; 55535a558611SOng Boon Leong } 55545a558611SOng Boon Leong 55555a558611SOng Boon Leong /* If LP has sent response mPacket, LP is entering FPE ON */ 55565a558611SOng Boon Leong if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 55575a558611SOng Boon Leong *lp_state = FPE_STATE_ENTERING_ON; 55585a558611SOng Boon Leong 55595a558611SOng Boon Leong /* If Local has sent response mPacket, Local is entering FPE ON */ 55605a558611SOng Boon Leong if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 55615a558611SOng Boon Leong *lo_state = FPE_STATE_ENTERING_ON; 55625a558611SOng Boon Leong 55635a558611SOng Boon Leong if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 55645a558611SOng Boon Leong !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 55655a558611SOng Boon Leong priv->fpe_wq) { 55665a558611SOng Boon Leong queue_work(priv->fpe_wq, &priv->fpe_task); 55675a558611SOng Boon Leong } 55685a558611SOng Boon Leong } 55695a558611SOng Boon Leong 557029e6573cSOng Boon Leong static void stmmac_common_interrupt(struct stmmac_priv *priv) 55717ac6653aSJeff Kirsher { 55727bac4e1eSJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 55737bac4e1eSJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 55747bac4e1eSJoao Pinto u32 queues_count; 55757bac4e1eSJoao Pinto u32 queue; 55767d9e6c5aSJose Abreu bool xmac; 55777bac4e1eSJoao Pinto 55787d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 55797bac4e1eSJoao Pinto queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 55807ac6653aSJeff Kirsher 558189f7f2cfSSrinivas Kandagatla if (priv->irq_wake) 558289f7f2cfSSrinivas Kandagatla pm_wakeup_event(priv->device, 0); 558389f7f2cfSSrinivas Kandagatla 5584e49aa315SVoon Weifeng if (priv->dma_cap.estsel) 55859f298959SOng Boon Leong stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 55869f298959SOng Boon Leong &priv->xstats, tx_cnt); 5587e49aa315SVoon Weifeng 55885a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 55895a558611SOng Boon Leong int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 55905a558611SOng Boon Leong priv->dev); 55915a558611SOng Boon Leong 55925a558611SOng Boon Leong stmmac_fpe_event_status(priv, status); 55935a558611SOng Boon Leong } 55945a558611SOng Boon Leong 55957ac6653aSJeff Kirsher /* To handle GMAC own interrupts */ 55967d9e6c5aSJose Abreu if ((priv->plat->has_gmac) || xmac) { 5597c10d4c82SJose Abreu int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 55988f71a88dSJoao Pinto 5599d765955dSGiuseppe CAVALLARO if (unlikely(status)) { 5600d765955dSGiuseppe CAVALLARO /* For LPI we need to save the tx status */ 56010982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5602d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = true; 56030982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5604d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 56057bac4e1eSJoao Pinto } 56067bac4e1eSJoao Pinto 56077bac4e1eSJoao Pinto for (queue = 0; queue < queues_count; queue++) { 56088a7cb245SYannick Vignon status = stmmac_host_mtl_irq_status(priv, priv->hw, 56097bac4e1eSJoao Pinto queue); 56107bac4e1eSJoao Pinto } 561170523e63SGiuseppe CAVALLARO 561270523e63SGiuseppe CAVALLARO /* PCS link status */ 56133fe5cadbSGiuseppe CAVALLARO if (priv->hw->pcs) { 561470523e63SGiuseppe CAVALLARO if (priv->xstats.pcs_link) 561529e6573cSOng Boon Leong netif_carrier_on(priv->dev); 561670523e63SGiuseppe CAVALLARO else 561729e6573cSOng Boon Leong netif_carrier_off(priv->dev); 561870523e63SGiuseppe CAVALLARO } 5619f4da5652STan Tee Min 5620f4da5652STan Tee Min stmmac_timestamp_interrupt(priv, priv); 5621d765955dSGiuseppe CAVALLARO } 562229e6573cSOng Boon Leong } 562329e6573cSOng Boon Leong 562429e6573cSOng Boon Leong /** 562529e6573cSOng Boon Leong * stmmac_interrupt - main ISR 562629e6573cSOng Boon Leong * @irq: interrupt number. 562729e6573cSOng Boon Leong * @dev_id: to pass the net device pointer. 562829e6573cSOng Boon Leong * Description: this is the main driver interrupt service routine. 562929e6573cSOng Boon Leong * It can call: 563029e6573cSOng Boon Leong * o DMA service routine (to manage incoming frame reception and transmission 563129e6573cSOng Boon Leong * status) 563229e6573cSOng Boon Leong * o Core interrupts to manage: remote wake-up, management counter, LPI 563329e6573cSOng Boon Leong * interrupts. 563429e6573cSOng Boon Leong */ 563529e6573cSOng Boon Leong static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 563629e6573cSOng Boon Leong { 563729e6573cSOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 563829e6573cSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 563929e6573cSOng Boon Leong 564029e6573cSOng Boon Leong /* Check if adapter is up */ 564129e6573cSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 564229e6573cSOng Boon Leong return IRQ_HANDLED; 564329e6573cSOng Boon Leong 564429e6573cSOng Boon Leong /* Check if a fatal error happened */ 564529e6573cSOng Boon Leong if (stmmac_safety_feat_interrupt(priv)) 564629e6573cSOng Boon Leong return IRQ_HANDLED; 564729e6573cSOng Boon Leong 564829e6573cSOng Boon Leong /* To handle Common interrupts */ 564929e6573cSOng Boon Leong stmmac_common_interrupt(priv); 5650d765955dSGiuseppe CAVALLARO 5651d765955dSGiuseppe CAVALLARO /* To handle DMA interrupts */ 56527ac6653aSJeff Kirsher stmmac_dma_interrupt(priv); 56537ac6653aSJeff Kirsher 56547ac6653aSJeff Kirsher return IRQ_HANDLED; 56557ac6653aSJeff Kirsher } 56567ac6653aSJeff Kirsher 56578532f613SOng Boon Leong static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 56588532f613SOng Boon Leong { 56598532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 56608532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 56618532f613SOng Boon Leong 56628532f613SOng Boon Leong if (unlikely(!dev)) { 56638532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 56648532f613SOng Boon Leong return IRQ_NONE; 56658532f613SOng Boon Leong } 56668532f613SOng Boon Leong 56678532f613SOng Boon Leong /* Check if adapter is up */ 56688532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 56698532f613SOng Boon Leong return IRQ_HANDLED; 56708532f613SOng Boon Leong 56718532f613SOng Boon Leong /* To handle Common interrupts */ 56728532f613SOng Boon Leong stmmac_common_interrupt(priv); 56738532f613SOng Boon Leong 56748532f613SOng Boon Leong return IRQ_HANDLED; 56758532f613SOng Boon Leong } 56768532f613SOng Boon Leong 56778532f613SOng Boon Leong static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 56788532f613SOng Boon Leong { 56798532f613SOng Boon Leong struct net_device *dev = (struct net_device *)dev_id; 56808532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 56818532f613SOng Boon Leong 56828532f613SOng Boon Leong if (unlikely(!dev)) { 56838532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 56848532f613SOng Boon Leong return IRQ_NONE; 56858532f613SOng Boon Leong } 56868532f613SOng Boon Leong 56878532f613SOng Boon Leong /* Check if adapter is up */ 56888532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 56898532f613SOng Boon Leong return IRQ_HANDLED; 56908532f613SOng Boon Leong 56918532f613SOng Boon Leong /* Check if a fatal error happened */ 56928532f613SOng Boon Leong stmmac_safety_feat_interrupt(priv); 56938532f613SOng Boon Leong 56948532f613SOng Boon Leong return IRQ_HANDLED; 56958532f613SOng Boon Leong } 56968532f613SOng Boon Leong 56978532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 56988532f613SOng Boon Leong { 56998532f613SOng Boon Leong struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 57008532f613SOng Boon Leong int chan = tx_q->queue_index; 57018532f613SOng Boon Leong struct stmmac_priv *priv; 57028532f613SOng Boon Leong int status; 57038532f613SOng Boon Leong 57048532f613SOng Boon Leong priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); 57058532f613SOng Boon Leong 57068532f613SOng Boon Leong if (unlikely(!data)) { 57078532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 57088532f613SOng Boon Leong return IRQ_NONE; 57098532f613SOng Boon Leong } 57108532f613SOng Boon Leong 57118532f613SOng Boon Leong /* Check if adapter is up */ 57128532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57138532f613SOng Boon Leong return IRQ_HANDLED; 57148532f613SOng Boon Leong 57158532f613SOng Boon Leong status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 57168532f613SOng Boon Leong 57178532f613SOng Boon Leong if (unlikely(status & tx_hard_error_bump_tc)) { 57188532f613SOng Boon Leong /* Try to bump up the dma threshold on this failure */ 57198532f613SOng Boon Leong if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 57208532f613SOng Boon Leong tc <= 256) { 57218532f613SOng Boon Leong tc += 64; 57228532f613SOng Boon Leong if (priv->plat->force_thresh_dma_mode) 57238532f613SOng Boon Leong stmmac_set_dma_operation_mode(priv, 57248532f613SOng Boon Leong tc, 57258532f613SOng Boon Leong tc, 57268532f613SOng Boon Leong chan); 57278532f613SOng Boon Leong else 57288532f613SOng Boon Leong stmmac_set_dma_operation_mode(priv, 57298532f613SOng Boon Leong tc, 57308532f613SOng Boon Leong SF_DMA_MODE, 57318532f613SOng Boon Leong chan); 57328532f613SOng Boon Leong priv->xstats.threshold = tc; 57338532f613SOng Boon Leong } 57348532f613SOng Boon Leong } else if (unlikely(status == tx_hard_error)) { 57358532f613SOng Boon Leong stmmac_tx_err(priv, chan); 57368532f613SOng Boon Leong } 57378532f613SOng Boon Leong 57388532f613SOng Boon Leong return IRQ_HANDLED; 57398532f613SOng Boon Leong } 57408532f613SOng Boon Leong 57418532f613SOng Boon Leong static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 57428532f613SOng Boon Leong { 57438532f613SOng Boon Leong struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 57448532f613SOng Boon Leong int chan = rx_q->queue_index; 57458532f613SOng Boon Leong struct stmmac_priv *priv; 57468532f613SOng Boon Leong 57478532f613SOng Boon Leong priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); 57488532f613SOng Boon Leong 57498532f613SOng Boon Leong if (unlikely(!data)) { 57508532f613SOng Boon Leong netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 57518532f613SOng Boon Leong return IRQ_NONE; 57528532f613SOng Boon Leong } 57538532f613SOng Boon Leong 57548532f613SOng Boon Leong /* Check if adapter is up */ 57558532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57568532f613SOng Boon Leong return IRQ_HANDLED; 57578532f613SOng Boon Leong 57588532f613SOng Boon Leong stmmac_napi_check(priv, chan, DMA_DIR_RX); 57598532f613SOng Boon Leong 57608532f613SOng Boon Leong return IRQ_HANDLED; 57618532f613SOng Boon Leong } 57628532f613SOng Boon Leong 57637ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 57647ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools 5765ceb69499SGiuseppe CAVALLARO * to allow network I/O with interrupts disabled. 5766ceb69499SGiuseppe CAVALLARO */ 57677ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev) 57687ac6653aSJeff Kirsher { 57698532f613SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 57708532f613SOng Boon Leong int i; 57718532f613SOng Boon Leong 57728532f613SOng Boon Leong /* If adapter is down, do nothing */ 57738532f613SOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state)) 57748532f613SOng Boon Leong return; 57758532f613SOng Boon Leong 57768532f613SOng Boon Leong if (priv->plat->multi_msi_en) { 57778532f613SOng Boon Leong for (i = 0; i < priv->plat->rx_queues_to_use; i++) 57788532f613SOng Boon Leong stmmac_msi_intr_rx(0, &priv->rx_queue[i]); 57798532f613SOng Boon Leong 57808532f613SOng Boon Leong for (i = 0; i < priv->plat->tx_queues_to_use; i++) 57818532f613SOng Boon Leong stmmac_msi_intr_tx(0, &priv->tx_queue[i]); 57828532f613SOng Boon Leong } else { 57837ac6653aSJeff Kirsher disable_irq(dev->irq); 57847ac6653aSJeff Kirsher stmmac_interrupt(dev->irq, dev); 57857ac6653aSJeff Kirsher enable_irq(dev->irq); 57867ac6653aSJeff Kirsher } 57878532f613SOng Boon Leong } 57887ac6653aSJeff Kirsher #endif 57897ac6653aSJeff Kirsher 57907ac6653aSJeff Kirsher /** 57917ac6653aSJeff Kirsher * stmmac_ioctl - Entry point for the Ioctl 57927ac6653aSJeff Kirsher * @dev: Device pointer. 57937ac6653aSJeff Kirsher * @rq: An IOCTL specefic structure, that can contain a pointer to 57947ac6653aSJeff Kirsher * a proprietary structure used to pass information to the driver. 57957ac6653aSJeff Kirsher * @cmd: IOCTL command 57967ac6653aSJeff Kirsher * Description: 579732ceabcaSGiuseppe CAVALLARO * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 57987ac6653aSJeff Kirsher */ 57997ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 58007ac6653aSJeff Kirsher { 580174371272SJose Abreu struct stmmac_priv *priv = netdev_priv (dev); 5802891434b1SRayagond Kokatanur int ret = -EOPNOTSUPP; 58037ac6653aSJeff Kirsher 58047ac6653aSJeff Kirsher if (!netif_running(dev)) 58057ac6653aSJeff Kirsher return -EINVAL; 58067ac6653aSJeff Kirsher 5807891434b1SRayagond Kokatanur switch (cmd) { 5808891434b1SRayagond Kokatanur case SIOCGMIIPHY: 5809891434b1SRayagond Kokatanur case SIOCGMIIREG: 5810891434b1SRayagond Kokatanur case SIOCSMIIREG: 581174371272SJose Abreu ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 5812891434b1SRayagond Kokatanur break; 5813891434b1SRayagond Kokatanur case SIOCSHWTSTAMP: 5814d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_set(dev, rq); 5815d6228b7cSArtem Panfilov break; 5816d6228b7cSArtem Panfilov case SIOCGHWTSTAMP: 5817d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_get(dev, rq); 5818891434b1SRayagond Kokatanur break; 5819891434b1SRayagond Kokatanur default: 5820891434b1SRayagond Kokatanur break; 5821891434b1SRayagond Kokatanur } 58227ac6653aSJeff Kirsher 58237ac6653aSJeff Kirsher return ret; 58247ac6653aSJeff Kirsher } 58257ac6653aSJeff Kirsher 58264dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 58274dbbe8ddSJose Abreu void *cb_priv) 58284dbbe8ddSJose Abreu { 58294dbbe8ddSJose Abreu struct stmmac_priv *priv = cb_priv; 58304dbbe8ddSJose Abreu int ret = -EOPNOTSUPP; 58314dbbe8ddSJose Abreu 5832425eabddSJose Abreu if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 5833425eabddSJose Abreu return ret; 5834425eabddSJose Abreu 5835bba2556eSOng Boon Leong __stmmac_disable_all_queues(priv); 58364dbbe8ddSJose Abreu 58374dbbe8ddSJose Abreu switch (type) { 58384dbbe8ddSJose Abreu case TC_SETUP_CLSU32: 58394dbbe8ddSJose Abreu ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 58404dbbe8ddSJose Abreu break; 5841425eabddSJose Abreu case TC_SETUP_CLSFLOWER: 5842425eabddSJose Abreu ret = stmmac_tc_setup_cls(priv, priv, type_data); 5843425eabddSJose Abreu break; 58444dbbe8ddSJose Abreu default: 58454dbbe8ddSJose Abreu break; 58464dbbe8ddSJose Abreu } 58474dbbe8ddSJose Abreu 58484dbbe8ddSJose Abreu stmmac_enable_all_queues(priv); 58494dbbe8ddSJose Abreu return ret; 58504dbbe8ddSJose Abreu } 58514dbbe8ddSJose Abreu 5852955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list); 5853955bcb6eSPablo Neira Ayuso 58544dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 58554dbbe8ddSJose Abreu void *type_data) 58564dbbe8ddSJose Abreu { 58574dbbe8ddSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 58584dbbe8ddSJose Abreu 58594dbbe8ddSJose Abreu switch (type) { 58604dbbe8ddSJose Abreu case TC_SETUP_BLOCK: 5861955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data, 5862955bcb6eSPablo Neira Ayuso &stmmac_block_cb_list, 58634e95bc26SPablo Neira Ayuso stmmac_setup_tc_block_cb, 58644e95bc26SPablo Neira Ayuso priv, priv, true); 58651f705bc6SJose Abreu case TC_SETUP_QDISC_CBS: 58661f705bc6SJose Abreu return stmmac_tc_setup_cbs(priv, priv, type_data); 5867b60189e0SJose Abreu case TC_SETUP_QDISC_TAPRIO: 5868b60189e0SJose Abreu return stmmac_tc_setup_taprio(priv, priv, type_data); 5869430b383cSJose Abreu case TC_SETUP_QDISC_ETF: 5870430b383cSJose Abreu return stmmac_tc_setup_etf(priv, priv, type_data); 58714dbbe8ddSJose Abreu default: 58724dbbe8ddSJose Abreu return -EOPNOTSUPP; 58734dbbe8ddSJose Abreu } 58744dbbe8ddSJose Abreu } 58754dbbe8ddSJose Abreu 58764993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 58774993e5b3SJose Abreu struct net_device *sb_dev) 58784993e5b3SJose Abreu { 5879b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 5880b7766206SJose Abreu 5881b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 58824993e5b3SJose Abreu /* 5883b7766206SJose Abreu * There is no way to determine the number of TSO/USO 58844993e5b3SJose Abreu * capable Queues. Let's use always the Queue 0 5885b7766206SJose Abreu * because if TSO/USO is supported then at least this 58864993e5b3SJose Abreu * one will be capable. 58874993e5b3SJose Abreu */ 58884993e5b3SJose Abreu return 0; 58894993e5b3SJose Abreu } 58904993e5b3SJose Abreu 58914993e5b3SJose Abreu return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 58924993e5b3SJose Abreu } 58934993e5b3SJose Abreu 5894a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 5895a830405eSBhadram Varka { 5896a830405eSBhadram Varka struct stmmac_priv *priv = netdev_priv(ndev); 5897a830405eSBhadram Varka int ret = 0; 5898a830405eSBhadram Varka 58994691ffb1SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 59004691ffb1SJoakim Zhang if (ret < 0) { 59014691ffb1SJoakim Zhang pm_runtime_put_noidle(priv->device); 59024691ffb1SJoakim Zhang return ret; 59034691ffb1SJoakim Zhang } 59044691ffb1SJoakim Zhang 5905a830405eSBhadram Varka ret = eth_mac_addr(ndev, addr); 5906a830405eSBhadram Varka if (ret) 59074691ffb1SJoakim Zhang goto set_mac_error; 5908a830405eSBhadram Varka 5909c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 5910a830405eSBhadram Varka 59114691ffb1SJoakim Zhang set_mac_error: 59124691ffb1SJoakim Zhang pm_runtime_put(priv->device); 59134691ffb1SJoakim Zhang 5914a830405eSBhadram Varka return ret; 5915a830405eSBhadram Varka } 5916a830405eSBhadram Varka 591750fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 59187ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir; 59197ac29055SGiuseppe CAVALLARO 5920c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc, 5921bfaf91caSJoakim Zhang struct seq_file *seq, dma_addr_t dma_phy_addr) 59227ac29055SGiuseppe CAVALLARO { 59237ac29055SGiuseppe CAVALLARO int i; 5924c24602efSGiuseppe CAVALLARO struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 5925c24602efSGiuseppe CAVALLARO struct dma_desc *p = (struct dma_desc *)head; 5926bfaf91caSJoakim Zhang dma_addr_t dma_addr; 59277ac29055SGiuseppe CAVALLARO 5928c24602efSGiuseppe CAVALLARO for (i = 0; i < size; i++) { 5929c24602efSGiuseppe CAVALLARO if (extend_desc) { 5930bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*ep); 5931bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5932bfaf91caSJoakim Zhang i, &dma_addr, 5933f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des0), 5934f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des1), 5935f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des2), 5936f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des3)); 5937c24602efSGiuseppe CAVALLARO ep++; 5938c24602efSGiuseppe CAVALLARO } else { 5939bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*p); 5940bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 5941bfaf91caSJoakim Zhang i, &dma_addr, 5942f8be0d78SMichael Weiser le32_to_cpu(p->des0), le32_to_cpu(p->des1), 5943f8be0d78SMichael Weiser le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 5944c24602efSGiuseppe CAVALLARO p++; 5945c24602efSGiuseppe CAVALLARO } 59467ac29055SGiuseppe CAVALLARO seq_printf(seq, "\n"); 59477ac29055SGiuseppe CAVALLARO } 5948c24602efSGiuseppe CAVALLARO } 59497ac29055SGiuseppe CAVALLARO 5950fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v) 5951c24602efSGiuseppe CAVALLARO { 5952c24602efSGiuseppe CAVALLARO struct net_device *dev = seq->private; 5953c24602efSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 595454139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 5955ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 595654139cf3SJoao Pinto u32 queue; 595754139cf3SJoao Pinto 59585f2b8b62SThierry Reding if ((dev->flags & IFF_UP) == 0) 59595f2b8b62SThierry Reding return 0; 59605f2b8b62SThierry Reding 596154139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 596254139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 596354139cf3SJoao Pinto 596454139cf3SJoao Pinto seq_printf(seq, "RX Queue %d:\n", queue); 59657ac29055SGiuseppe CAVALLARO 5966c24602efSGiuseppe CAVALLARO if (priv->extend_desc) { 596754139cf3SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 596854139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_erx, 5969bfaf91caSJoakim Zhang priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 597054139cf3SJoao Pinto } else { 597154139cf3SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 597254139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_rx, 5973bfaf91caSJoakim Zhang priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 597454139cf3SJoao Pinto } 597554139cf3SJoao Pinto } 597654139cf3SJoao Pinto 5977ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 5978ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5979ce736788SJoao Pinto 5980ce736788SJoao Pinto seq_printf(seq, "TX Queue %d:\n", queue); 5981ce736788SJoao Pinto 598254139cf3SJoao Pinto if (priv->extend_desc) { 5983ce736788SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 5984ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_etx, 5985bfaf91caSJoakim Zhang priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 5986579a25a8SJose Abreu } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 5987ce736788SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 5988ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_tx, 5989bfaf91caSJoakim Zhang priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 5990ce736788SJoao Pinto } 59917ac29055SGiuseppe CAVALLARO } 59927ac29055SGiuseppe CAVALLARO 59937ac29055SGiuseppe CAVALLARO return 0; 59947ac29055SGiuseppe CAVALLARO } 5995fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 59967ac29055SGiuseppe CAVALLARO 5997fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 5998e7434821SGiuseppe CAVALLARO { 5999e7434821SGiuseppe CAVALLARO struct net_device *dev = seq->private; 6000e7434821SGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 6001e7434821SGiuseppe CAVALLARO 600219e30c14SGiuseppe CAVALLARO if (!priv->hw_cap_support) { 6003e7434821SGiuseppe CAVALLARO seq_printf(seq, "DMA HW features not supported\n"); 6004e7434821SGiuseppe CAVALLARO return 0; 6005e7434821SGiuseppe CAVALLARO } 6006e7434821SGiuseppe CAVALLARO 6007e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6008e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tDMA HW features\n"); 6009e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 6010e7434821SGiuseppe CAVALLARO 601122d3efe5SPavel Machek seq_printf(seq, "\t10/100 Mbps: %s\n", 6012e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 601322d3efe5SPavel Machek seq_printf(seq, "\t1000 Mbps: %s\n", 6014e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_1000) ? "Y" : "N"); 601522d3efe5SPavel Machek seq_printf(seq, "\tHalf duplex: %s\n", 6016e7434821SGiuseppe CAVALLARO (priv->dma_cap.half_duplex) ? "Y" : "N"); 6017e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tHash Filter: %s\n", 6018e7434821SGiuseppe CAVALLARO (priv->dma_cap.hash_filter) ? "Y" : "N"); 6019e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6020e7434821SGiuseppe CAVALLARO (priv->dma_cap.multi_addr) ? "Y" : "N"); 60218d45e42bSLABBE Corentin seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6022e7434821SGiuseppe CAVALLARO (priv->dma_cap.pcs) ? "Y" : "N"); 6023e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6024e7434821SGiuseppe CAVALLARO (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6025e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Remote wake up: %s\n", 6026e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6027e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Magic Frame: %s\n", 6028e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6029e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRMON module: %s\n", 6030e7434821SGiuseppe CAVALLARO (priv->dma_cap.rmon) ? "Y" : "N"); 6031e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6032e7434821SGiuseppe CAVALLARO (priv->dma_cap.time_stamp) ? "Y" : "N"); 6033e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6034e7434821SGiuseppe CAVALLARO (priv->dma_cap.atime_stamp) ? "Y" : "N"); 603522d3efe5SPavel Machek seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6036e7434821SGiuseppe CAVALLARO (priv->dma_cap.eee) ? "Y" : "N"); 6037e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6038e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6039e7434821SGiuseppe CAVALLARO (priv->dma_cap.tx_coe) ? "Y" : "N"); 6040f748be53SAlexandre TORGUE if (priv->synopsys_id >= DWMAC_CORE_4_00) { 6041f748be53SAlexandre TORGUE seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6042f748be53SAlexandre TORGUE (priv->dma_cap.rx_coe) ? "Y" : "N"); 6043f748be53SAlexandre TORGUE } else { 6044e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6045e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6046e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6047e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6048f748be53SAlexandre TORGUE } 6049e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6050e7434821SGiuseppe CAVALLARO (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6051e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6052e7434821SGiuseppe CAVALLARO priv->dma_cap.number_rx_channel); 6053e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6054e7434821SGiuseppe CAVALLARO priv->dma_cap.number_tx_channel); 60557d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 60567d0b447aSJose Abreu priv->dma_cap.number_rx_queues); 60577d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 60587d0b447aSJose Abreu priv->dma_cap.number_tx_queues); 6059e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tEnhanced descriptors: %s\n", 6060e7434821SGiuseppe CAVALLARO (priv->dma_cap.enh_desc) ? "Y" : "N"); 60617d0b447aSJose Abreu seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 60627d0b447aSJose Abreu seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 60637d0b447aSJose Abreu seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 60647d0b447aSJose Abreu seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 60657d0b447aSJose Abreu seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 60667d0b447aSJose Abreu priv->dma_cap.pps_out_num); 60677d0b447aSJose Abreu seq_printf(seq, "\tSafety Features: %s\n", 60687d0b447aSJose Abreu priv->dma_cap.asp ? "Y" : "N"); 60697d0b447aSJose Abreu seq_printf(seq, "\tFlexible RX Parser: %s\n", 60707d0b447aSJose Abreu priv->dma_cap.frpsel ? "Y" : "N"); 60717d0b447aSJose Abreu seq_printf(seq, "\tEnhanced Addressing: %d\n", 60727d0b447aSJose Abreu priv->dma_cap.addr64); 60737d0b447aSJose Abreu seq_printf(seq, "\tReceive Side Scaling: %s\n", 60747d0b447aSJose Abreu priv->dma_cap.rssen ? "Y" : "N"); 60757d0b447aSJose Abreu seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 60767d0b447aSJose Abreu priv->dma_cap.vlhash ? "Y" : "N"); 60777d0b447aSJose Abreu seq_printf(seq, "\tSplit Header: %s\n", 60787d0b447aSJose Abreu priv->dma_cap.sphen ? "Y" : "N"); 60797d0b447aSJose Abreu seq_printf(seq, "\tVLAN TX Insertion: %s\n", 60807d0b447aSJose Abreu priv->dma_cap.vlins ? "Y" : "N"); 60817d0b447aSJose Abreu seq_printf(seq, "\tDouble VLAN: %s\n", 60827d0b447aSJose Abreu priv->dma_cap.dvlan ? "Y" : "N"); 60837d0b447aSJose Abreu seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 60847d0b447aSJose Abreu priv->dma_cap.l3l4fnum); 60857d0b447aSJose Abreu seq_printf(seq, "\tARP Offloading: %s\n", 60867d0b447aSJose Abreu priv->dma_cap.arpoffsel ? "Y" : "N"); 608744e65475SJose Abreu seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 608844e65475SJose Abreu priv->dma_cap.estsel ? "Y" : "N"); 608944e65475SJose Abreu seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 609044e65475SJose Abreu priv->dma_cap.fpesel ? "Y" : "N"); 609144e65475SJose Abreu seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 609244e65475SJose Abreu priv->dma_cap.tbssel ? "Y" : "N"); 6093e7434821SGiuseppe CAVALLARO return 0; 6094e7434821SGiuseppe CAVALLARO } 6095fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6096e7434821SGiuseppe CAVALLARO 6097481a7d15SJiping Ma /* Use network device events to rename debugfs file entries. 6098481a7d15SJiping Ma */ 6099481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused, 6100481a7d15SJiping Ma unsigned long event, void *ptr) 6101481a7d15SJiping Ma { 6102481a7d15SJiping Ma struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6103481a7d15SJiping Ma struct stmmac_priv *priv = netdev_priv(dev); 6104481a7d15SJiping Ma 6105481a7d15SJiping Ma if (dev->netdev_ops != &stmmac_netdev_ops) 6106481a7d15SJiping Ma goto done; 6107481a7d15SJiping Ma 6108481a7d15SJiping Ma switch (event) { 6109481a7d15SJiping Ma case NETDEV_CHANGENAME: 6110481a7d15SJiping Ma if (priv->dbgfs_dir) 6111481a7d15SJiping Ma priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6112481a7d15SJiping Ma priv->dbgfs_dir, 6113481a7d15SJiping Ma stmmac_fs_dir, 6114481a7d15SJiping Ma dev->name); 6115481a7d15SJiping Ma break; 6116481a7d15SJiping Ma } 6117481a7d15SJiping Ma done: 6118481a7d15SJiping Ma return NOTIFY_DONE; 6119481a7d15SJiping Ma } 6120481a7d15SJiping Ma 6121481a7d15SJiping Ma static struct notifier_block stmmac_notifier = { 6122481a7d15SJiping Ma .notifier_call = stmmac_device_event, 6123481a7d15SJiping Ma }; 6124481a7d15SJiping Ma 61258d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev) 61267ac29055SGiuseppe CAVALLARO { 6127466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 61287ac29055SGiuseppe CAVALLARO 6129474a31e1SAaro Koskinen rtnl_lock(); 6130474a31e1SAaro Koskinen 6131466c5ac8SMathieu Olivari /* Create per netdev entries */ 6132466c5ac8SMathieu Olivari priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6133466c5ac8SMathieu Olivari 61347ac29055SGiuseppe CAVALLARO /* Entry to report DMA RX/TX rings */ 61358d72ab11SGreg Kroah-Hartman debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 61367ac29055SGiuseppe CAVALLARO &stmmac_rings_status_fops); 61377ac29055SGiuseppe CAVALLARO 6138e7434821SGiuseppe CAVALLARO /* Entry to report the DMA HW features */ 61398d72ab11SGreg Kroah-Hartman debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 61408d72ab11SGreg Kroah-Hartman &stmmac_dma_cap_fops); 6141481a7d15SJiping Ma 6142474a31e1SAaro Koskinen rtnl_unlock(); 61437ac29055SGiuseppe CAVALLARO } 61447ac29055SGiuseppe CAVALLARO 6145466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev) 61467ac29055SGiuseppe CAVALLARO { 6147466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 6148466c5ac8SMathieu Olivari 6149466c5ac8SMathieu Olivari debugfs_remove_recursive(priv->dbgfs_dir); 61507ac29055SGiuseppe CAVALLARO } 615150fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 61527ac29055SGiuseppe CAVALLARO 61533cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le) 61543cd1cfcbSJose Abreu { 61553cd1cfcbSJose Abreu unsigned char *data = (unsigned char *)&vid_le; 61563cd1cfcbSJose Abreu unsigned char data_byte = 0; 61573cd1cfcbSJose Abreu u32 crc = ~0x0; 61583cd1cfcbSJose Abreu u32 temp = 0; 61593cd1cfcbSJose Abreu int i, bits; 61603cd1cfcbSJose Abreu 61613cd1cfcbSJose Abreu bits = get_bitmask_order(VLAN_VID_MASK); 61623cd1cfcbSJose Abreu for (i = 0; i < bits; i++) { 61633cd1cfcbSJose Abreu if ((i % 8) == 0) 61643cd1cfcbSJose Abreu data_byte = data[i / 8]; 61653cd1cfcbSJose Abreu 61663cd1cfcbSJose Abreu temp = ((crc & 1) ^ data_byte) & 1; 61673cd1cfcbSJose Abreu crc >>= 1; 61683cd1cfcbSJose Abreu data_byte >>= 1; 61693cd1cfcbSJose Abreu 61703cd1cfcbSJose Abreu if (temp) 61713cd1cfcbSJose Abreu crc ^= 0xedb88320; 61723cd1cfcbSJose Abreu } 61733cd1cfcbSJose Abreu 61743cd1cfcbSJose Abreu return crc; 61753cd1cfcbSJose Abreu } 61763cd1cfcbSJose Abreu 61773cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 61783cd1cfcbSJose Abreu { 61793cd1cfcbSJose Abreu u32 crc, hash = 0; 6180a24cae70SJose Abreu __le16 pmatch = 0; 6181c7ab0b80SJose Abreu int count = 0; 6182c7ab0b80SJose Abreu u16 vid = 0; 61833cd1cfcbSJose Abreu 61843cd1cfcbSJose Abreu for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 61853cd1cfcbSJose Abreu __le16 vid_le = cpu_to_le16(vid); 61863cd1cfcbSJose Abreu crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 61873cd1cfcbSJose Abreu hash |= (1 << crc); 6188c7ab0b80SJose Abreu count++; 61893cd1cfcbSJose Abreu } 61903cd1cfcbSJose Abreu 6191c7ab0b80SJose Abreu if (!priv->dma_cap.vlhash) { 6192c7ab0b80SJose Abreu if (count > 2) /* VID = 0 always passes filter */ 6193c7ab0b80SJose Abreu return -EOPNOTSUPP; 6194c7ab0b80SJose Abreu 6195a24cae70SJose Abreu pmatch = cpu_to_le16(vid); 6196c7ab0b80SJose Abreu hash = 0; 6197c7ab0b80SJose Abreu } 6198c7ab0b80SJose Abreu 6199a24cae70SJose Abreu return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 62003cd1cfcbSJose Abreu } 62013cd1cfcbSJose Abreu 62023cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 62033cd1cfcbSJose Abreu { 62043cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 62053cd1cfcbSJose Abreu bool is_double = false; 62063cd1cfcbSJose Abreu int ret; 62073cd1cfcbSJose Abreu 62083cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 62093cd1cfcbSJose Abreu is_double = true; 62103cd1cfcbSJose Abreu 62113cd1cfcbSJose Abreu set_bit(vid, priv->active_vlans); 62123cd1cfcbSJose Abreu ret = stmmac_vlan_update(priv, is_double); 62133cd1cfcbSJose Abreu if (ret) { 62143cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 62153cd1cfcbSJose Abreu return ret; 62163cd1cfcbSJose Abreu } 62173cd1cfcbSJose Abreu 6218dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6219ed64639bSWong Vee Khee ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6220dd6a4998SJose Abreu if (ret) 62213cd1cfcbSJose Abreu return ret; 62223cd1cfcbSJose Abreu } 62233cd1cfcbSJose Abreu 6224dd6a4998SJose Abreu return 0; 6225dd6a4998SJose Abreu } 6226dd6a4998SJose Abreu 62273cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 62283cd1cfcbSJose Abreu { 62293cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 62303cd1cfcbSJose Abreu bool is_double = false; 6231ed64639bSWong Vee Khee int ret; 62323cd1cfcbSJose Abreu 6233b3dcb312SJoakim Zhang ret = pm_runtime_get_sync(priv->device); 6234b3dcb312SJoakim Zhang if (ret < 0) { 6235b3dcb312SJoakim Zhang pm_runtime_put_noidle(priv->device); 6236b3dcb312SJoakim Zhang return ret; 6237b3dcb312SJoakim Zhang } 6238b3dcb312SJoakim Zhang 62393cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 62403cd1cfcbSJose Abreu is_double = true; 62413cd1cfcbSJose Abreu 62423cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 6243dd6a4998SJose Abreu 6244dd6a4998SJose Abreu if (priv->hw->num_vlan) { 6245ed64639bSWong Vee Khee ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6246ed64639bSWong Vee Khee if (ret) 62475ec55823SJoakim Zhang goto del_vlan_error; 6248dd6a4998SJose Abreu } 6249ed64639bSWong Vee Khee 62505ec55823SJoakim Zhang ret = stmmac_vlan_update(priv, is_double); 62515ec55823SJoakim Zhang 62525ec55823SJoakim Zhang del_vlan_error: 62535ec55823SJoakim Zhang pm_runtime_put(priv->device); 62545ec55823SJoakim Zhang 62555ec55823SJoakim Zhang return ret; 62563cd1cfcbSJose Abreu } 62573cd1cfcbSJose Abreu 62585fabb012SOng Boon Leong static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 62595fabb012SOng Boon Leong { 62605fabb012SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 62615fabb012SOng Boon Leong 62625fabb012SOng Boon Leong switch (bpf->command) { 62635fabb012SOng Boon Leong case XDP_SETUP_PROG: 62645fabb012SOng Boon Leong return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6265bba2556eSOng Boon Leong case XDP_SETUP_XSK_POOL: 6266bba2556eSOng Boon Leong return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6267bba2556eSOng Boon Leong bpf->xsk.queue_id); 62685fabb012SOng Boon Leong default: 62695fabb012SOng Boon Leong return -EOPNOTSUPP; 62705fabb012SOng Boon Leong } 62715fabb012SOng Boon Leong } 62725fabb012SOng Boon Leong 62738b278a5bSOng Boon Leong static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 62748b278a5bSOng Boon Leong struct xdp_frame **frames, u32 flags) 62758b278a5bSOng Boon Leong { 62768b278a5bSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 62778b278a5bSOng Boon Leong int cpu = smp_processor_id(); 62788b278a5bSOng Boon Leong struct netdev_queue *nq; 62798b278a5bSOng Boon Leong int i, nxmit = 0; 62808b278a5bSOng Boon Leong int queue; 62818b278a5bSOng Boon Leong 62828b278a5bSOng Boon Leong if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 62838b278a5bSOng Boon Leong return -ENETDOWN; 62848b278a5bSOng Boon Leong 62858b278a5bSOng Boon Leong if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 62868b278a5bSOng Boon Leong return -EINVAL; 62878b278a5bSOng Boon Leong 62888b278a5bSOng Boon Leong queue = stmmac_xdp_get_tx_queue(priv, cpu); 62898b278a5bSOng Boon Leong nq = netdev_get_tx_queue(priv->dev, queue); 62908b278a5bSOng Boon Leong 62918b278a5bSOng Boon Leong __netif_tx_lock(nq, cpu); 62928b278a5bSOng Boon Leong /* Avoids TX time-out as we are sharing with slow path */ 62938b278a5bSOng Boon Leong nq->trans_start = jiffies; 62948b278a5bSOng Boon Leong 62958b278a5bSOng Boon Leong for (i = 0; i < num_frames; i++) { 62968b278a5bSOng Boon Leong int res; 62978b278a5bSOng Boon Leong 62988b278a5bSOng Boon Leong res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 62998b278a5bSOng Boon Leong if (res == STMMAC_XDP_CONSUMED) 63008b278a5bSOng Boon Leong break; 63018b278a5bSOng Boon Leong 63028b278a5bSOng Boon Leong nxmit++; 63038b278a5bSOng Boon Leong } 63048b278a5bSOng Boon Leong 63058b278a5bSOng Boon Leong if (flags & XDP_XMIT_FLUSH) { 63068b278a5bSOng Boon Leong stmmac_flush_tx_descriptors(priv, queue); 63078b278a5bSOng Boon Leong stmmac_tx_timer_arm(priv, queue); 63088b278a5bSOng Boon Leong } 63098b278a5bSOng Boon Leong 63108b278a5bSOng Boon Leong __netif_tx_unlock(nq); 63118b278a5bSOng Boon Leong 63128b278a5bSOng Boon Leong return nxmit; 63138b278a5bSOng Boon Leong } 63148b278a5bSOng Boon Leong 6315bba2556eSOng Boon Leong void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6316bba2556eSOng Boon Leong { 6317bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6318bba2556eSOng Boon Leong unsigned long flags; 6319bba2556eSOng Boon Leong 6320bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6321bba2556eSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6322bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6323bba2556eSOng Boon Leong 6324bba2556eSOng Boon Leong stmmac_stop_rx_dma(priv, queue); 6325bba2556eSOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 6326bba2556eSOng Boon Leong } 6327bba2556eSOng Boon Leong 6328bba2556eSOng Boon Leong void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6329bba2556eSOng Boon Leong { 6330bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 6331bba2556eSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6332bba2556eSOng Boon Leong unsigned long flags; 6333bba2556eSOng Boon Leong u32 buf_size; 6334bba2556eSOng Boon Leong int ret; 6335bba2556eSOng Boon Leong 6336bba2556eSOng Boon Leong ret = __alloc_dma_rx_desc_resources(priv, queue); 6337bba2556eSOng Boon Leong if (ret) { 6338bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6339bba2556eSOng Boon Leong return; 6340bba2556eSOng Boon Leong } 6341bba2556eSOng Boon Leong 6342bba2556eSOng Boon Leong ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); 6343bba2556eSOng Boon Leong if (ret) { 6344bba2556eSOng Boon Leong __free_dma_rx_desc_resources(priv, queue); 6345bba2556eSOng Boon Leong netdev_err(priv->dev, "Failed to init RX desc.\n"); 6346bba2556eSOng Boon Leong return; 6347bba2556eSOng Boon Leong } 6348bba2556eSOng Boon Leong 6349bba2556eSOng Boon Leong stmmac_clear_rx_descriptors(priv, queue); 6350bba2556eSOng Boon Leong 6351bba2556eSOng Boon Leong stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6352bba2556eSOng Boon Leong rx_q->dma_rx_phy, rx_q->queue_index); 6353bba2556eSOng Boon Leong 6354bba2556eSOng Boon Leong rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6355bba2556eSOng Boon Leong sizeof(struct dma_desc)); 6356bba2556eSOng Boon Leong stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6357bba2556eSOng Boon Leong rx_q->rx_tail_addr, rx_q->queue_index); 6358bba2556eSOng Boon Leong 6359bba2556eSOng Boon Leong if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6360bba2556eSOng Boon Leong buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6361bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6362bba2556eSOng Boon Leong buf_size, 6363bba2556eSOng Boon Leong rx_q->queue_index); 6364bba2556eSOng Boon Leong } else { 6365bba2556eSOng Boon Leong stmmac_set_dma_bfsize(priv, priv->ioaddr, 6366bba2556eSOng Boon Leong priv->dma_buf_sz, 6367bba2556eSOng Boon Leong rx_q->queue_index); 6368bba2556eSOng Boon Leong } 6369bba2556eSOng Boon Leong 6370bba2556eSOng Boon Leong stmmac_start_rx_dma(priv, queue); 6371bba2556eSOng Boon Leong 6372bba2556eSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6373bba2556eSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6374bba2556eSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6375bba2556eSOng Boon Leong } 6376bba2556eSOng Boon Leong 6377132c32eeSOng Boon Leong void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6378132c32eeSOng Boon Leong { 6379132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6380132c32eeSOng Boon Leong unsigned long flags; 6381132c32eeSOng Boon Leong 6382132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6383132c32eeSOng Boon Leong stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6384132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6385132c32eeSOng Boon Leong 6386132c32eeSOng Boon Leong stmmac_stop_tx_dma(priv, queue); 6387132c32eeSOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 6388132c32eeSOng Boon Leong } 6389132c32eeSOng Boon Leong 6390132c32eeSOng Boon Leong void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6391132c32eeSOng Boon Leong { 6392132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 6393132c32eeSOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 6394132c32eeSOng Boon Leong unsigned long flags; 6395132c32eeSOng Boon Leong int ret; 6396132c32eeSOng Boon Leong 6397132c32eeSOng Boon Leong ret = __alloc_dma_tx_desc_resources(priv, queue); 6398132c32eeSOng Boon Leong if (ret) { 6399132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6400132c32eeSOng Boon Leong return; 6401132c32eeSOng Boon Leong } 6402132c32eeSOng Boon Leong 6403132c32eeSOng Boon Leong ret = __init_dma_tx_desc_rings(priv, queue); 6404132c32eeSOng Boon Leong if (ret) { 6405132c32eeSOng Boon Leong __free_dma_tx_desc_resources(priv, queue); 6406132c32eeSOng Boon Leong netdev_err(priv->dev, "Failed to init TX desc.\n"); 6407132c32eeSOng Boon Leong return; 6408132c32eeSOng Boon Leong } 6409132c32eeSOng Boon Leong 6410132c32eeSOng Boon Leong stmmac_clear_tx_descriptors(priv, queue); 6411132c32eeSOng Boon Leong 6412132c32eeSOng Boon Leong stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6413132c32eeSOng Boon Leong tx_q->dma_tx_phy, tx_q->queue_index); 6414132c32eeSOng Boon Leong 6415132c32eeSOng Boon Leong if (tx_q->tbs & STMMAC_TBS_AVAIL) 6416132c32eeSOng Boon Leong stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6417132c32eeSOng Boon Leong 6418132c32eeSOng Boon Leong tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6419132c32eeSOng Boon Leong stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6420132c32eeSOng Boon Leong tx_q->tx_tail_addr, tx_q->queue_index); 6421132c32eeSOng Boon Leong 6422132c32eeSOng Boon Leong stmmac_start_tx_dma(priv, queue); 6423132c32eeSOng Boon Leong 6424132c32eeSOng Boon Leong spin_lock_irqsave(&ch->lock, flags); 6425132c32eeSOng Boon Leong stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6426132c32eeSOng Boon Leong spin_unlock_irqrestore(&ch->lock, flags); 6427132c32eeSOng Boon Leong } 6428132c32eeSOng Boon Leong 6429bba2556eSOng Boon Leong int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6430bba2556eSOng Boon Leong { 6431bba2556eSOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 6432bba2556eSOng Boon Leong struct stmmac_rx_queue *rx_q; 6433132c32eeSOng Boon Leong struct stmmac_tx_queue *tx_q; 6434bba2556eSOng Boon Leong struct stmmac_channel *ch; 6435bba2556eSOng Boon Leong 6436bba2556eSOng Boon Leong if (test_bit(STMMAC_DOWN, &priv->state) || 6437bba2556eSOng Boon Leong !netif_carrier_ok(priv->dev)) 6438bba2556eSOng Boon Leong return -ENETDOWN; 6439bba2556eSOng Boon Leong 6440bba2556eSOng Boon Leong if (!stmmac_xdp_is_enabled(priv)) 6441bba2556eSOng Boon Leong return -ENXIO; 6442bba2556eSOng Boon Leong 6443132c32eeSOng Boon Leong if (queue >= priv->plat->rx_queues_to_use || 6444132c32eeSOng Boon Leong queue >= priv->plat->tx_queues_to_use) 6445bba2556eSOng Boon Leong return -EINVAL; 6446bba2556eSOng Boon Leong 6447bba2556eSOng Boon Leong rx_q = &priv->rx_queue[queue]; 6448132c32eeSOng Boon Leong tx_q = &priv->tx_queue[queue]; 6449bba2556eSOng Boon Leong ch = &priv->channel[queue]; 6450bba2556eSOng Boon Leong 6451132c32eeSOng Boon Leong if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6452bba2556eSOng Boon Leong return -ENXIO; 6453bba2556eSOng Boon Leong 6454132c32eeSOng Boon Leong if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6455bba2556eSOng Boon Leong /* EQoS does not have per-DMA channel SW interrupt, 6456bba2556eSOng Boon Leong * so we schedule RX Napi straight-away. 6457bba2556eSOng Boon Leong */ 6458132c32eeSOng Boon Leong if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6459132c32eeSOng Boon Leong __napi_schedule(&ch->rxtx_napi); 6460bba2556eSOng Boon Leong } 6461bba2556eSOng Boon Leong 6462bba2556eSOng Boon Leong return 0; 6463bba2556eSOng Boon Leong } 6464bba2556eSOng Boon Leong 64657ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = { 64667ac6653aSJeff Kirsher .ndo_open = stmmac_open, 64677ac6653aSJeff Kirsher .ndo_start_xmit = stmmac_xmit, 64687ac6653aSJeff Kirsher .ndo_stop = stmmac_release, 64697ac6653aSJeff Kirsher .ndo_change_mtu = stmmac_change_mtu, 64707ac6653aSJeff Kirsher .ndo_fix_features = stmmac_fix_features, 6471d2afb5bdSGiuseppe CAVALLARO .ndo_set_features = stmmac_set_features, 647201789349SJiri Pirko .ndo_set_rx_mode = stmmac_set_rx_mode, 64737ac6653aSJeff Kirsher .ndo_tx_timeout = stmmac_tx_timeout, 6474a7605370SArnd Bergmann .ndo_eth_ioctl = stmmac_ioctl, 64754dbbe8ddSJose Abreu .ndo_setup_tc = stmmac_setup_tc, 64764993e5b3SJose Abreu .ndo_select_queue = stmmac_select_queue, 64777ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 64787ac6653aSJeff Kirsher .ndo_poll_controller = stmmac_poll_controller, 64797ac6653aSJeff Kirsher #endif 6480a830405eSBhadram Varka .ndo_set_mac_address = stmmac_set_mac_address, 64813cd1cfcbSJose Abreu .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 64823cd1cfcbSJose Abreu .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 64835fabb012SOng Boon Leong .ndo_bpf = stmmac_bpf, 64848b278a5bSOng Boon Leong .ndo_xdp_xmit = stmmac_xdp_xmit, 6485bba2556eSOng Boon Leong .ndo_xsk_wakeup = stmmac_xsk_wakeup, 64867ac6653aSJeff Kirsher }; 64877ac6653aSJeff Kirsher 648834877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv) 648934877a15SJose Abreu { 649034877a15SJose Abreu if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 649134877a15SJose Abreu return; 649234877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 649334877a15SJose Abreu return; 649434877a15SJose Abreu 649534877a15SJose Abreu netdev_err(priv->dev, "Reset adapter.\n"); 649634877a15SJose Abreu 649734877a15SJose Abreu rtnl_lock(); 649834877a15SJose Abreu netif_trans_update(priv->dev); 649934877a15SJose Abreu while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 650034877a15SJose Abreu usleep_range(1000, 2000); 650134877a15SJose Abreu 650234877a15SJose Abreu set_bit(STMMAC_DOWN, &priv->state); 650334877a15SJose Abreu dev_close(priv->dev); 650400f54e68SPetr Machata dev_open(priv->dev, NULL); 650534877a15SJose Abreu clear_bit(STMMAC_DOWN, &priv->state); 650634877a15SJose Abreu clear_bit(STMMAC_RESETING, &priv->state); 650734877a15SJose Abreu rtnl_unlock(); 650834877a15SJose Abreu } 650934877a15SJose Abreu 651034877a15SJose Abreu static void stmmac_service_task(struct work_struct *work) 651134877a15SJose Abreu { 651234877a15SJose Abreu struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 651334877a15SJose Abreu service_task); 651434877a15SJose Abreu 651534877a15SJose Abreu stmmac_reset_subtask(priv); 651634877a15SJose Abreu clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 651734877a15SJose Abreu } 651834877a15SJose Abreu 65197ac6653aSJeff Kirsher /** 6520cf3f047bSGiuseppe CAVALLARO * stmmac_hw_init - Init the MAC device 652132ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 6522732fdf0eSGiuseppe CAVALLARO * Description: this function is to configure the MAC device according to 6523732fdf0eSGiuseppe CAVALLARO * some platform parameters or the HW capability register. It prepares the 6524732fdf0eSGiuseppe CAVALLARO * driver to use either ring or chain modes and to setup either enhanced or 6525732fdf0eSGiuseppe CAVALLARO * normal descriptors. 6526cf3f047bSGiuseppe CAVALLARO */ 6527cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv) 6528cf3f047bSGiuseppe CAVALLARO { 65295f0456b4SJose Abreu int ret; 6530cf3f047bSGiuseppe CAVALLARO 65319f93ac8dSLABBE Corentin /* dwmac-sun8i only work in chain mode */ 65329f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) 65339f93ac8dSLABBE Corentin chain_mode = 1; 65345f0456b4SJose Abreu priv->chain_mode = chain_mode; 65359f93ac8dSLABBE Corentin 65365f0456b4SJose Abreu /* Initialize HW Interface */ 65375f0456b4SJose Abreu ret = stmmac_hwif_init(priv); 65385f0456b4SJose Abreu if (ret) 65395f0456b4SJose Abreu return ret; 65404a7d666aSGiuseppe CAVALLARO 6541cf3f047bSGiuseppe CAVALLARO /* Get the HW capability (new GMAC newer than 3.50a) */ 6542cf3f047bSGiuseppe CAVALLARO priv->hw_cap_support = stmmac_get_hw_features(priv); 6543cf3f047bSGiuseppe CAVALLARO if (priv->hw_cap_support) { 654438ddc59dSLABBE Corentin dev_info(priv->device, "DMA HW capability register supported\n"); 6545cf3f047bSGiuseppe CAVALLARO 6546cf3f047bSGiuseppe CAVALLARO /* We can override some gmac/dma configuration fields: e.g. 6547cf3f047bSGiuseppe CAVALLARO * enh_desc, tx_coe (e.g. that are passed through the 6548cf3f047bSGiuseppe CAVALLARO * platform) with the values from the HW capability 6549cf3f047bSGiuseppe CAVALLARO * register (if supported). 6550cf3f047bSGiuseppe CAVALLARO */ 6551cf3f047bSGiuseppe CAVALLARO priv->plat->enh_desc = priv->dma_cap.enh_desc; 65525a9b876eSLing Pei Lee priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 65535a9b876eSLing Pei Lee !priv->plat->use_phy_wol; 65543fe5cadbSGiuseppe CAVALLARO priv->hw->pmt = priv->plat->pmt; 6555b8ef7020SBiao Huang if (priv->dma_cap.hash_tb_sz) { 6556b8ef7020SBiao Huang priv->hw->multicast_filter_bins = 6557b8ef7020SBiao Huang (BIT(priv->dma_cap.hash_tb_sz) << 5); 6558b8ef7020SBiao Huang priv->hw->mcast_bits_log2 = 6559b8ef7020SBiao Huang ilog2(priv->hw->multicast_filter_bins); 6560b8ef7020SBiao Huang } 656138912bdbSDeepak SIKRI 6562a8df35d4SEzequiel Garcia /* TXCOE doesn't work in thresh DMA mode */ 6563a8df35d4SEzequiel Garcia if (priv->plat->force_thresh_dma_mode) 6564a8df35d4SEzequiel Garcia priv->plat->tx_coe = 0; 6565a8df35d4SEzequiel Garcia else 656638912bdbSDeepak SIKRI priv->plat->tx_coe = priv->dma_cap.tx_coe; 6567a8df35d4SEzequiel Garcia 6568f748be53SAlexandre TORGUE /* In case of GMAC4 rx_coe is from HW cap register. */ 6569f748be53SAlexandre TORGUE priv->plat->rx_coe = priv->dma_cap.rx_coe; 657038912bdbSDeepak SIKRI 657138912bdbSDeepak SIKRI if (priv->dma_cap.rx_coe_type2) 657238912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 657338912bdbSDeepak SIKRI else if (priv->dma_cap.rx_coe_type1) 657438912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 657538912bdbSDeepak SIKRI 657638ddc59dSLABBE Corentin } else { 657738ddc59dSLABBE Corentin dev_info(priv->device, "No HW DMA feature register supported\n"); 657838ddc59dSLABBE Corentin } 6579cf3f047bSGiuseppe CAVALLARO 6580d2afb5bdSGiuseppe CAVALLARO if (priv->plat->rx_coe) { 6581d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 658238ddc59dSLABBE Corentin dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 6583f748be53SAlexandre TORGUE if (priv->synopsys_id < DWMAC_CORE_4_00) 658438ddc59dSLABBE Corentin dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 6585d2afb5bdSGiuseppe CAVALLARO } 6586cf3f047bSGiuseppe CAVALLARO if (priv->plat->tx_coe) 658738ddc59dSLABBE Corentin dev_info(priv->device, "TX Checksum insertion supported\n"); 6588cf3f047bSGiuseppe CAVALLARO 6589cf3f047bSGiuseppe CAVALLARO if (priv->plat->pmt) { 659038ddc59dSLABBE Corentin dev_info(priv->device, "Wake-Up On Lan supported\n"); 6591cf3f047bSGiuseppe CAVALLARO device_set_wakeup_capable(priv->device, 1); 6592cf3f047bSGiuseppe CAVALLARO } 6593cf3f047bSGiuseppe CAVALLARO 6594f748be53SAlexandre TORGUE if (priv->dma_cap.tsoen) 659538ddc59dSLABBE Corentin dev_info(priv->device, "TSO supported\n"); 6596f748be53SAlexandre TORGUE 6597e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 6598e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 6599e0f9956aSChuah, Kim Tatt 66007cfde0afSJose Abreu /* Run HW quirks, if any */ 66017cfde0afSJose Abreu if (priv->hwif_quirks) { 66027cfde0afSJose Abreu ret = priv->hwif_quirks(priv); 66037cfde0afSJose Abreu if (ret) 66047cfde0afSJose Abreu return ret; 66057cfde0afSJose Abreu } 66067cfde0afSJose Abreu 66073b509466SJose Abreu /* Rx Watchdog is available in the COREs newer than the 3.40. 66083b509466SJose Abreu * In some case, for example on bugged HW this feature 66093b509466SJose Abreu * has to be disable and this can be done by passing the 66103b509466SJose Abreu * riwt_off field from the platform. 66113b509466SJose Abreu */ 66123b509466SJose Abreu if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 66133b509466SJose Abreu (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 66143b509466SJose Abreu priv->use_riwt = 1; 66153b509466SJose Abreu dev_info(priv->device, 66163b509466SJose Abreu "Enable RX Mitigation via HW Watchdog Timer\n"); 66173b509466SJose Abreu } 66183b509466SJose Abreu 6619c24602efSGiuseppe CAVALLARO return 0; 6620cf3f047bSGiuseppe CAVALLARO } 6621cf3f047bSGiuseppe CAVALLARO 66220366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev) 66230366f7e0SOng Boon Leong { 66240366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66250366f7e0SOng Boon Leong u32 queue, maxq; 66260366f7e0SOng Boon Leong 66270366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 66280366f7e0SOng Boon Leong 66290366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 66300366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 66310366f7e0SOng Boon Leong 66320366f7e0SOng Boon Leong ch->priv_data = priv; 66330366f7e0SOng Boon Leong ch->index = queue; 66342b94f526SMarek Szyprowski spin_lock_init(&ch->lock); 66350366f7e0SOng Boon Leong 66360366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) { 66370366f7e0SOng Boon Leong netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 66380366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 66390366f7e0SOng Boon Leong } 66400366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) { 66410366f7e0SOng Boon Leong netif_tx_napi_add(dev, &ch->tx_napi, 66420366f7e0SOng Boon Leong stmmac_napi_poll_tx, 66430366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 66440366f7e0SOng Boon Leong } 6645132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6646132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6647132c32eeSOng Boon Leong netif_napi_add(dev, &ch->rxtx_napi, 6648132c32eeSOng Boon Leong stmmac_napi_poll_rxtx, 6649132c32eeSOng Boon Leong NAPI_POLL_WEIGHT); 6650132c32eeSOng Boon Leong } 66510366f7e0SOng Boon Leong } 66520366f7e0SOng Boon Leong } 66530366f7e0SOng Boon Leong 66540366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev) 66550366f7e0SOng Boon Leong { 66560366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66570366f7e0SOng Boon Leong u32 queue, maxq; 66580366f7e0SOng Boon Leong 66590366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 66600366f7e0SOng Boon Leong 66610366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 66620366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 66630366f7e0SOng Boon Leong 66640366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) 66650366f7e0SOng Boon Leong netif_napi_del(&ch->rx_napi); 66660366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) 66670366f7e0SOng Boon Leong netif_napi_del(&ch->tx_napi); 6668132c32eeSOng Boon Leong if (queue < priv->plat->rx_queues_to_use && 6669132c32eeSOng Boon Leong queue < priv->plat->tx_queues_to_use) { 6670132c32eeSOng Boon Leong netif_napi_del(&ch->rxtx_napi); 6671132c32eeSOng Boon Leong } 66720366f7e0SOng Boon Leong } 66730366f7e0SOng Boon Leong } 66740366f7e0SOng Boon Leong 66750366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 66760366f7e0SOng Boon Leong { 66770366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 66780366f7e0SOng Boon Leong int ret = 0; 66790366f7e0SOng Boon Leong 66800366f7e0SOng Boon Leong if (netif_running(dev)) 66810366f7e0SOng Boon Leong stmmac_release(dev); 66820366f7e0SOng Boon Leong 66830366f7e0SOng Boon Leong stmmac_napi_del(dev); 66840366f7e0SOng Boon Leong 66850366f7e0SOng Boon Leong priv->plat->rx_queues_to_use = rx_cnt; 66860366f7e0SOng Boon Leong priv->plat->tx_queues_to_use = tx_cnt; 66870366f7e0SOng Boon Leong 66880366f7e0SOng Boon Leong stmmac_napi_add(dev); 66890366f7e0SOng Boon Leong 66900366f7e0SOng Boon Leong if (netif_running(dev)) 66910366f7e0SOng Boon Leong ret = stmmac_open(dev); 66920366f7e0SOng Boon Leong 66930366f7e0SOng Boon Leong return ret; 66940366f7e0SOng Boon Leong } 66950366f7e0SOng Boon Leong 6696aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 6697aa042f60SSong, Yoong Siang { 6698aa042f60SSong, Yoong Siang struct stmmac_priv *priv = netdev_priv(dev); 6699aa042f60SSong, Yoong Siang int ret = 0; 6700aa042f60SSong, Yoong Siang 6701aa042f60SSong, Yoong Siang if (netif_running(dev)) 6702aa042f60SSong, Yoong Siang stmmac_release(dev); 6703aa042f60SSong, Yoong Siang 6704aa042f60SSong, Yoong Siang priv->dma_rx_size = rx_size; 6705aa042f60SSong, Yoong Siang priv->dma_tx_size = tx_size; 6706aa042f60SSong, Yoong Siang 6707aa042f60SSong, Yoong Siang if (netif_running(dev)) 6708aa042f60SSong, Yoong Siang ret = stmmac_open(dev); 6709aa042f60SSong, Yoong Siang 6710aa042f60SSong, Yoong Siang return ret; 6711aa042f60SSong, Yoong Siang } 6712aa042f60SSong, Yoong Siang 67135a558611SOng Boon Leong #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 67145a558611SOng Boon Leong static void stmmac_fpe_lp_task(struct work_struct *work) 67155a558611SOng Boon Leong { 67165a558611SOng Boon Leong struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 67175a558611SOng Boon Leong fpe_task); 67185a558611SOng Boon Leong struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 67195a558611SOng Boon Leong enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 67205a558611SOng Boon Leong enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 67215a558611SOng Boon Leong bool *hs_enable = &fpe_cfg->hs_enable; 67225a558611SOng Boon Leong bool *enable = &fpe_cfg->enable; 67235a558611SOng Boon Leong int retries = 20; 67245a558611SOng Boon Leong 67255a558611SOng Boon Leong while (retries-- > 0) { 67265a558611SOng Boon Leong /* Bail out immediately if FPE handshake is OFF */ 67275a558611SOng Boon Leong if (*lo_state == FPE_STATE_OFF || !*hs_enable) 67285a558611SOng Boon Leong break; 67295a558611SOng Boon Leong 67305a558611SOng Boon Leong if (*lo_state == FPE_STATE_ENTERING_ON && 67315a558611SOng Boon Leong *lp_state == FPE_STATE_ENTERING_ON) { 67325a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 67335a558611SOng Boon Leong priv->plat->tx_queues_to_use, 67345a558611SOng Boon Leong priv->plat->rx_queues_to_use, 67355a558611SOng Boon Leong *enable); 67365a558611SOng Boon Leong 67375a558611SOng Boon Leong netdev_info(priv->dev, "configured FPE\n"); 67385a558611SOng Boon Leong 67395a558611SOng Boon Leong *lo_state = FPE_STATE_ON; 67405a558611SOng Boon Leong *lp_state = FPE_STATE_ON; 67415a558611SOng Boon Leong netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 67425a558611SOng Boon Leong break; 67435a558611SOng Boon Leong } 67445a558611SOng Boon Leong 67455a558611SOng Boon Leong if ((*lo_state == FPE_STATE_CAPABLE || 67465a558611SOng Boon Leong *lo_state == FPE_STATE_ENTERING_ON) && 67475a558611SOng Boon Leong *lp_state != FPE_STATE_ON) { 67485a558611SOng Boon Leong netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 67495a558611SOng Boon Leong *lo_state, *lp_state); 67505a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 67515a558611SOng Boon Leong MPACKET_VERIFY); 67525a558611SOng Boon Leong } 67535a558611SOng Boon Leong /* Sleep then retry */ 67545a558611SOng Boon Leong msleep(500); 67555a558611SOng Boon Leong } 67565a558611SOng Boon Leong 67575a558611SOng Boon Leong clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 67585a558611SOng Boon Leong } 67595a558611SOng Boon Leong 67605a558611SOng Boon Leong void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 67615a558611SOng Boon Leong { 67625a558611SOng Boon Leong if (priv->plat->fpe_cfg->hs_enable != enable) { 67635a558611SOng Boon Leong if (enable) { 67645a558611SOng Boon Leong stmmac_fpe_send_mpacket(priv, priv->ioaddr, 67655a558611SOng Boon Leong MPACKET_VERIFY); 67665a558611SOng Boon Leong } else { 67675a558611SOng Boon Leong priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 67685a558611SOng Boon Leong priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 67695a558611SOng Boon Leong } 67705a558611SOng Boon Leong 67715a558611SOng Boon Leong priv->plat->fpe_cfg->hs_enable = enable; 67725a558611SOng Boon Leong } 67735a558611SOng Boon Leong } 67745a558611SOng Boon Leong 6775cf3f047bSGiuseppe CAVALLARO /** 6776bfab27a1SGiuseppe CAVALLARO * stmmac_dvr_probe 6777bfab27a1SGiuseppe CAVALLARO * @device: device pointer 6778ff3dd78cSGiuseppe CAVALLARO * @plat_dat: platform data pointer 6779e56788cfSJoachim Eastwood * @res: stmmac resource pointer 6780bfab27a1SGiuseppe CAVALLARO * Description: this is the main probe function used to 6781bfab27a1SGiuseppe CAVALLARO * call the alloc_etherdev, allocate the priv structure. 67829afec6efSAndy Shevchenko * Return: 678315ffac73SJoachim Eastwood * returns 0 on success, otherwise errno. 67847ac6653aSJeff Kirsher */ 678515ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device, 6786cf3f047bSGiuseppe CAVALLARO struct plat_stmmacenet_data *plat_dat, 6787e56788cfSJoachim Eastwood struct stmmac_resources *res) 67887ac6653aSJeff Kirsher { 6789bfab27a1SGiuseppe CAVALLARO struct net_device *ndev = NULL; 6790bfab27a1SGiuseppe CAVALLARO struct stmmac_priv *priv; 67910366f7e0SOng Boon Leong u32 rxq; 679276067459SJose Abreu int i, ret = 0; 67937ac6653aSJeff Kirsher 67949737070cSJisheng Zhang ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 67959737070cSJisheng Zhang MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 679641de8d4cSJoe Perches if (!ndev) 679715ffac73SJoachim Eastwood return -ENOMEM; 67987ac6653aSJeff Kirsher 6799bfab27a1SGiuseppe CAVALLARO SET_NETDEV_DEV(ndev, device); 68007ac6653aSJeff Kirsher 6801bfab27a1SGiuseppe CAVALLARO priv = netdev_priv(ndev); 6802bfab27a1SGiuseppe CAVALLARO priv->device = device; 6803bfab27a1SGiuseppe CAVALLARO priv->dev = ndev; 6804bfab27a1SGiuseppe CAVALLARO 6805bfab27a1SGiuseppe CAVALLARO stmmac_set_ethtool_ops(ndev); 6806cf3f047bSGiuseppe CAVALLARO priv->pause = pause; 6807cf3f047bSGiuseppe CAVALLARO priv->plat = plat_dat; 6808e56788cfSJoachim Eastwood priv->ioaddr = res->addr; 6809e56788cfSJoachim Eastwood priv->dev->base_addr = (unsigned long)res->addr; 68106ccf12aeSWong, Vee Khee priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en; 6811e56788cfSJoachim Eastwood 6812e56788cfSJoachim Eastwood priv->dev->irq = res->irq; 6813e56788cfSJoachim Eastwood priv->wol_irq = res->wol_irq; 6814e56788cfSJoachim Eastwood priv->lpi_irq = res->lpi_irq; 68158532f613SOng Boon Leong priv->sfty_ce_irq = res->sfty_ce_irq; 68168532f613SOng Boon Leong priv->sfty_ue_irq = res->sfty_ue_irq; 68178532f613SOng Boon Leong for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 68188532f613SOng Boon Leong priv->rx_irq[i] = res->rx_irq[i]; 68198532f613SOng Boon Leong for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 68208532f613SOng Boon Leong priv->tx_irq[i] = res->tx_irq[i]; 6821e56788cfSJoachim Eastwood 682283216e39SMichael Walle if (!is_zero_ether_addr(res->mac)) 6823a96d317fSJakub Kicinski eth_hw_addr_set(priv->dev, res->mac); 6824bfab27a1SGiuseppe CAVALLARO 6825a7a62685SJoachim Eastwood dev_set_drvdata(device, priv->dev); 6826803f8fc4SJoachim Eastwood 6827cf3f047bSGiuseppe CAVALLARO /* Verify driver arguments */ 6828cf3f047bSGiuseppe CAVALLARO stmmac_verify_args(); 6829cf3f047bSGiuseppe CAVALLARO 6830bba2556eSOng Boon Leong priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 6831bba2556eSOng Boon Leong if (!priv->af_xdp_zc_qps) 6832bba2556eSOng Boon Leong return -ENOMEM; 6833bba2556eSOng Boon Leong 683434877a15SJose Abreu /* Allocate workqueue */ 683534877a15SJose Abreu priv->wq = create_singlethread_workqueue("stmmac_wq"); 683634877a15SJose Abreu if (!priv->wq) { 683734877a15SJose Abreu dev_err(priv->device, "failed to create workqueue\n"); 68389737070cSJisheng Zhang return -ENOMEM; 683934877a15SJose Abreu } 684034877a15SJose Abreu 684134877a15SJose Abreu INIT_WORK(&priv->service_task, stmmac_service_task); 684234877a15SJose Abreu 68435a558611SOng Boon Leong /* Initialize Link Partner FPE workqueue */ 68445a558611SOng Boon Leong INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 68455a558611SOng Boon Leong 6846cf3f047bSGiuseppe CAVALLARO /* Override with kernel parameters if supplied XXX CRS XXX 6847ceb69499SGiuseppe CAVALLARO * this needs to have multiple instances 6848ceb69499SGiuseppe CAVALLARO */ 6849cf3f047bSGiuseppe CAVALLARO if ((phyaddr >= 0) && (phyaddr <= 31)) 6850cf3f047bSGiuseppe CAVALLARO priv->plat->phy_addr = phyaddr; 6851cf3f047bSGiuseppe CAVALLARO 685290f522a2SEugeniy Paltsev if (priv->plat->stmmac_rst) { 685390f522a2SEugeniy Paltsev ret = reset_control_assert(priv->plat->stmmac_rst); 6854f573c0b9Sjpinto reset_control_deassert(priv->plat->stmmac_rst); 685590f522a2SEugeniy Paltsev /* Some reset controllers have only reset callback instead of 685690f522a2SEugeniy Paltsev * assert + deassert callbacks pair. 685790f522a2SEugeniy Paltsev */ 685890f522a2SEugeniy Paltsev if (ret == -ENOTSUPP) 685990f522a2SEugeniy Paltsev reset_control_reset(priv->plat->stmmac_rst); 686090f522a2SEugeniy Paltsev } 6861c5e4ddbdSChen-Yu Tsai 6862e67f325eSMatthew Hagan ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 6863e67f325eSMatthew Hagan if (ret == -ENOTSUPP) 6864e67f325eSMatthew Hagan dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 6865e67f325eSMatthew Hagan ERR_PTR(ret)); 6866e67f325eSMatthew Hagan 6867cf3f047bSGiuseppe CAVALLARO /* Init MAC and get the capabilities */ 6868c24602efSGiuseppe CAVALLARO ret = stmmac_hw_init(priv); 6869c24602efSGiuseppe CAVALLARO if (ret) 687062866e98SChen-Yu Tsai goto error_hw_init; 6871cf3f047bSGiuseppe CAVALLARO 687296874c61SMohammad Athari Bin Ismail /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 687396874c61SMohammad Athari Bin Ismail */ 687496874c61SMohammad Athari Bin Ismail if (priv->synopsys_id < DWMAC_CORE_5_20) 687596874c61SMohammad Athari Bin Ismail priv->plat->dma_cfg->dche = false; 687696874c61SMohammad Athari Bin Ismail 6877b561af36SVinod Koul stmmac_check_ether_addr(priv); 6878b561af36SVinod Koul 6879cf3f047bSGiuseppe CAVALLARO ndev->netdev_ops = &stmmac_netdev_ops; 6880cf3f047bSGiuseppe CAVALLARO 6881cf3f047bSGiuseppe CAVALLARO ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6882cf3f047bSGiuseppe CAVALLARO NETIF_F_RXCSUM; 6883f748be53SAlexandre TORGUE 68844dbbe8ddSJose Abreu ret = stmmac_tc_init(priv, priv); 68854dbbe8ddSJose Abreu if (!ret) { 68864dbbe8ddSJose Abreu ndev->hw_features |= NETIF_F_HW_TC; 68874dbbe8ddSJose Abreu } 68884dbbe8ddSJose Abreu 6889f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 68909edfa7daSNiklas Cassel ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 6891b7766206SJose Abreu if (priv->plat->has_gmac4) 6892b7766206SJose Abreu ndev->hw_features |= NETIF_F_GSO_UDP_L4; 6893f748be53SAlexandre TORGUE priv->tso = true; 689438ddc59dSLABBE Corentin dev_info(priv->device, "TSO feature enabled\n"); 6895f748be53SAlexandre TORGUE } 6896a993db88SJose Abreu 689767afd6d1SJose Abreu if (priv->dma_cap.sphen) { 689867afd6d1SJose Abreu ndev->hw_features |= NETIF_F_GRO; 6899d08d32d1SOng Boon Leong priv->sph_cap = true; 6900d08d32d1SOng Boon Leong priv->sph = priv->sph_cap; 690167afd6d1SJose Abreu dev_info(priv->device, "SPH feature enabled\n"); 690267afd6d1SJose Abreu } 690367afd6d1SJose Abreu 6904f119cc98SFugang Duan /* The current IP register MAC_HW_Feature1[ADDR64] only define 6905f119cc98SFugang Duan * 32/40/64 bit width, but some SOC support others like i.MX8MP 6906f119cc98SFugang Duan * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 6907f119cc98SFugang Duan * So overwrite dma_cap.addr64 according to HW real design. 6908f119cc98SFugang Duan */ 6909f119cc98SFugang Duan if (priv->plat->addr64) 6910f119cc98SFugang Duan priv->dma_cap.addr64 = priv->plat->addr64; 6911f119cc98SFugang Duan 6912a993db88SJose Abreu if (priv->dma_cap.addr64) { 6913a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, 6914a993db88SJose Abreu DMA_BIT_MASK(priv->dma_cap.addr64)); 6915a993db88SJose Abreu if (!ret) { 6916a993db88SJose Abreu dev_info(priv->device, "Using %d bits DMA width\n", 6917a993db88SJose Abreu priv->dma_cap.addr64); 6918968a2978SThierry Reding 6919968a2978SThierry Reding /* 6920968a2978SThierry Reding * If more than 32 bits can be addressed, make sure to 6921968a2978SThierry Reding * enable enhanced addressing mode. 6922968a2978SThierry Reding */ 6923968a2978SThierry Reding if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 6924968a2978SThierry Reding priv->plat->dma_cfg->eame = true; 6925a993db88SJose Abreu } else { 6926a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 6927a993db88SJose Abreu if (ret) { 6928a993db88SJose Abreu dev_err(priv->device, "Failed to set DMA Mask\n"); 6929a993db88SJose Abreu goto error_hw_init; 6930a993db88SJose Abreu } 6931a993db88SJose Abreu 6932a993db88SJose Abreu priv->dma_cap.addr64 = 32; 6933a993db88SJose Abreu } 6934a993db88SJose Abreu } 6935a993db88SJose Abreu 6936bfab27a1SGiuseppe CAVALLARO ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 6937bfab27a1SGiuseppe CAVALLARO ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 69387ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED 69397ac6653aSJeff Kirsher /* Both mac100 and gmac support receive VLAN tag detection */ 6940ab188e8fSElad Nachman ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 69413cd1cfcbSJose Abreu if (priv->dma_cap.vlhash) { 69423cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 69433cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 69443cd1cfcbSJose Abreu } 694530d93227SJose Abreu if (priv->dma_cap.vlins) { 694630d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 694730d93227SJose Abreu if (priv->dma_cap.dvlan) 694830d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 694930d93227SJose Abreu } 69507ac6653aSJeff Kirsher #endif 69517ac6653aSJeff Kirsher priv->msg_enable = netif_msg_init(debug, default_msg_level); 69527ac6653aSJeff Kirsher 695376067459SJose Abreu /* Initialize RSS */ 695476067459SJose Abreu rxq = priv->plat->rx_queues_to_use; 695576067459SJose Abreu netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 695676067459SJose Abreu for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 695776067459SJose Abreu priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 695876067459SJose Abreu 695976067459SJose Abreu if (priv->dma_cap.rssen && priv->plat->rss_en) 696076067459SJose Abreu ndev->features |= NETIF_F_RXHASH; 696176067459SJose Abreu 696244770e11SJarod Wilson /* MTU range: 46 - hw-specific max */ 696344770e11SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 696456bcd591SJose Abreu if (priv->plat->has_xgmac) 69657d9e6c5aSJose Abreu ndev->max_mtu = XGMAC_JUMBO_LEN; 696656bcd591SJose Abreu else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 696756bcd591SJose Abreu ndev->max_mtu = JUMBO_LEN; 696844770e11SJarod Wilson else 696944770e11SJarod Wilson ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 6970a2cd64f3SKweh, Hock Leong /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 6971a2cd64f3SKweh, Hock Leong * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 6972a2cd64f3SKweh, Hock Leong */ 6973a2cd64f3SKweh, Hock Leong if ((priv->plat->maxmtu < ndev->max_mtu) && 6974a2cd64f3SKweh, Hock Leong (priv->plat->maxmtu >= ndev->min_mtu)) 697544770e11SJarod Wilson ndev->max_mtu = priv->plat->maxmtu; 6976a2cd64f3SKweh, Hock Leong else if (priv->plat->maxmtu < ndev->min_mtu) 6977b618ab45SHeiner Kallweit dev_warn(priv->device, 6978a2cd64f3SKweh, Hock Leong "%s: warning: maxmtu having invalid value (%d)\n", 6979a2cd64f3SKweh, Hock Leong __func__, priv->plat->maxmtu); 698044770e11SJarod Wilson 69817ac6653aSJeff Kirsher if (flow_ctrl) 69827ac6653aSJeff Kirsher priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 69837ac6653aSJeff Kirsher 69848fce3331SJose Abreu /* Setup channels NAPI */ 69850366f7e0SOng Boon Leong stmmac_napi_add(ndev); 69867ac6653aSJeff Kirsher 698729555fa3SThierry Reding mutex_init(&priv->lock); 69887ac6653aSJeff Kirsher 6989cd7201f4SGiuseppe CAVALLARO /* If a specific clk_csr value is passed from the platform 6990cd7201f4SGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 6991cd7201f4SGiuseppe CAVALLARO * changed at run-time and it is fixed. Viceversa the driver'll try to 6992cd7201f4SGiuseppe CAVALLARO * set the MDC clock dynamically according to the csr actual 6993cd7201f4SGiuseppe CAVALLARO * clock input. 6994cd7201f4SGiuseppe CAVALLARO */ 69955e7f7fc5SBiao Huang if (priv->plat->clk_csr >= 0) 6996cd7201f4SGiuseppe CAVALLARO priv->clk_csr = priv->plat->clk_csr; 69975e7f7fc5SBiao Huang else 69985e7f7fc5SBiao Huang stmmac_clk_csr_set(priv); 6999cd7201f4SGiuseppe CAVALLARO 7000e58bb43fSGiuseppe CAVALLARO stmmac_check_pcs_mode(priv); 7001e58bb43fSGiuseppe CAVALLARO 70025ec55823SJoakim Zhang pm_runtime_get_noresume(device); 70035ec55823SJoakim Zhang pm_runtime_set_active(device); 70045ec55823SJoakim Zhang pm_runtime_enable(device); 70055ec55823SJoakim Zhang 7006a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 70073fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) { 70084bfcbd7aSFrancesco Virlinzi /* MDIO bus Registration */ 70094bfcbd7aSFrancesco Virlinzi ret = stmmac_mdio_register(ndev); 70104bfcbd7aSFrancesco Virlinzi if (ret < 0) { 7011b618ab45SHeiner Kallweit dev_err(priv->device, 701238ddc59dSLABBE Corentin "%s: MDIO bus (id: %d) registration failed", 70134bfcbd7aSFrancesco Virlinzi __func__, priv->plat->bus_id); 70146a81c26fSViresh Kumar goto error_mdio_register; 70154bfcbd7aSFrancesco Virlinzi } 7016e58bb43fSGiuseppe CAVALLARO } 70174bfcbd7aSFrancesco Virlinzi 701846682cb8SVoon Weifeng if (priv->plat->speed_mode_2500) 701946682cb8SVoon Weifeng priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 702046682cb8SVoon Weifeng 70217413f9a6SVladimir Oltean if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7022597a68ceSVoon Weifeng ret = stmmac_xpcs_setup(priv->mii); 7023597a68ceSVoon Weifeng if (ret) 7024597a68ceSVoon Weifeng goto error_xpcs_setup; 7025597a68ceSVoon Weifeng } 7026597a68ceSVoon Weifeng 702774371272SJose Abreu ret = stmmac_phy_setup(priv); 702874371272SJose Abreu if (ret) { 702974371272SJose Abreu netdev_err(ndev, "failed to setup phy (%d)\n", ret); 703074371272SJose Abreu goto error_phy_setup; 703174371272SJose Abreu } 703274371272SJose Abreu 703357016590SFlorian Fainelli ret = register_netdev(ndev); 7034b2eb09afSFlorian Fainelli if (ret) { 7035b618ab45SHeiner Kallweit dev_err(priv->device, "%s: ERROR %i registering the device\n", 703657016590SFlorian Fainelli __func__, ret); 7037b2eb09afSFlorian Fainelli goto error_netdev_register; 7038b2eb09afSFlorian Fainelli } 70397ac6653aSJeff Kirsher 7040b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7041b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7042b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7043b9663b7cSVoon Weifeng 7044b9663b7cSVoon Weifeng if (ret < 0) 7045801eb050SAndy Shevchenko goto error_serdes_powerup; 7046b9663b7cSVoon Weifeng } 7047b9663b7cSVoon Weifeng 70485f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS 70498d72ab11SGreg Kroah-Hartman stmmac_init_fs(ndev); 70505f2b8b62SThierry Reding #endif 70515f2b8b62SThierry Reding 70525ec55823SJoakim Zhang /* Let pm_runtime_put() disable the clocks. 70535ec55823SJoakim Zhang * If CONFIG_PM is not enabled, the clocks will stay powered. 70545ec55823SJoakim Zhang */ 70555ec55823SJoakim Zhang pm_runtime_put(device); 70565ec55823SJoakim Zhang 705757016590SFlorian Fainelli return ret; 70587ac6653aSJeff Kirsher 7059801eb050SAndy Shevchenko error_serdes_powerup: 7060801eb050SAndy Shevchenko unregister_netdev(ndev); 70616a81c26fSViresh Kumar error_netdev_register: 706274371272SJose Abreu phylink_destroy(priv->phylink); 7063597a68ceSVoon Weifeng error_xpcs_setup: 706474371272SJose Abreu error_phy_setup: 7065a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 7066b2eb09afSFlorian Fainelli priv->hw->pcs != STMMAC_PCS_RTBI) 7067b2eb09afSFlorian Fainelli stmmac_mdio_unregister(ndev); 70687ac6653aSJeff Kirsher error_mdio_register: 70690366f7e0SOng Boon Leong stmmac_napi_del(ndev); 707062866e98SChen-Yu Tsai error_hw_init: 707134877a15SJose Abreu destroy_workqueue(priv->wq); 7072d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 70737ac6653aSJeff Kirsher 707415ffac73SJoachim Eastwood return ret; 70757ac6653aSJeff Kirsher } 7076b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 70777ac6653aSJeff Kirsher 70787ac6653aSJeff Kirsher /** 70797ac6653aSJeff Kirsher * stmmac_dvr_remove 7080f4e7bd81SJoachim Eastwood * @dev: device pointer 70817ac6653aSJeff Kirsher * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7082bfab27a1SGiuseppe CAVALLARO * changes the link status, releases the DMA descriptor rings. 70837ac6653aSJeff Kirsher */ 7084f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev) 70857ac6653aSJeff Kirsher { 7086f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 70877ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 70887ac6653aSJeff Kirsher 708938ddc59dSLABBE Corentin netdev_info(priv->dev, "%s: removing driver", __func__); 70907ac6653aSJeff Kirsher 7091ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7092c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 70937ac6653aSJeff Kirsher netif_carrier_off(ndev); 70947ac6653aSJeff Kirsher unregister_netdev(ndev); 70959a7b3950SOng Boon Leong 70969a7b3950SOng Boon Leong /* Serdes power down needs to happen after VLAN filter 70979a7b3950SOng Boon Leong * is deleted that is triggered by unregister_netdev(). 70989a7b3950SOng Boon Leong */ 70999a7b3950SOng Boon Leong if (priv->plat->serdes_powerdown) 71009a7b3950SOng Boon Leong priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 71019a7b3950SOng Boon Leong 7102474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS 7103474a31e1SAaro Koskinen stmmac_exit_fs(ndev); 7104474a31e1SAaro Koskinen #endif 710574371272SJose Abreu phylink_destroy(priv->phylink); 7106f573c0b9Sjpinto if (priv->plat->stmmac_rst) 7107f573c0b9Sjpinto reset_control_assert(priv->plat->stmmac_rst); 7108e67f325eSMatthew Hagan reset_control_assert(priv->plat->stmmac_ahb_rst); 71095ec55823SJoakim Zhang pm_runtime_put(dev); 71105ec55823SJoakim Zhang pm_runtime_disable(dev); 7111a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 71123fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) 7113e743471fSBryan O'Donoghue stmmac_mdio_unregister(ndev); 711434877a15SJose Abreu destroy_workqueue(priv->wq); 711529555fa3SThierry Reding mutex_destroy(&priv->lock); 7116d7f576dcSWong Vee Khee bitmap_free(priv->af_xdp_zc_qps); 71177ac6653aSJeff Kirsher 71187ac6653aSJeff Kirsher return 0; 71197ac6653aSJeff Kirsher } 7120b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 71217ac6653aSJeff Kirsher 7122732fdf0eSGiuseppe CAVALLARO /** 7123732fdf0eSGiuseppe CAVALLARO * stmmac_suspend - suspend callback 7124f4e7bd81SJoachim Eastwood * @dev: device pointer 7125732fdf0eSGiuseppe CAVALLARO * Description: this is the function to suspend the device and it is called 7126732fdf0eSGiuseppe CAVALLARO * by the platform driver to stop the network queue, release the resources, 7127732fdf0eSGiuseppe CAVALLARO * program the PMT register (for WoL), clean and release driver resources. 7128732fdf0eSGiuseppe CAVALLARO */ 7129f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev) 71307ac6653aSJeff Kirsher { 7131f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 71327ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 713314b41a29SNicolin Chen u32 chan; 71347ac6653aSJeff Kirsher 71357ac6653aSJeff Kirsher if (!ndev || !netif_running(ndev)) 71367ac6653aSJeff Kirsher return 0; 71377ac6653aSJeff Kirsher 7138134cc4ceSThierry Reding mutex_lock(&priv->lock); 713919e13cb2SJose Abreu 71407ac6653aSJeff Kirsher netif_device_detach(ndev); 71417ac6653aSJeff Kirsher 7142c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 71437ac6653aSJeff Kirsher 714414b41a29SNicolin Chen for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7145d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 714614b41a29SNicolin Chen 71475f585913SFugang Duan if (priv->eee_enabled) { 71485f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 71495f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 71505f585913SFugang Duan } 71515f585913SFugang Duan 71527ac6653aSJeff Kirsher /* Stop TX/RX DMA */ 7153ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 7154c24602efSGiuseppe CAVALLARO 7155b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 7156b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7157b9663b7cSVoon Weifeng 71587ac6653aSJeff Kirsher /* Enable Power down mode by programming the PMT regs */ 7159e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7160c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, priv->wolopts); 716189f7f2cfSSrinivas Kandagatla priv->irq_wake = 1; 716289f7f2cfSSrinivas Kandagatla } else { 7163c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 7164db88f10aSSrinivas Kandagatla pinctrl_pm_select_sleep_state(priv->device); 716530f347aeSYang Yingliang } 71665a558611SOng Boon Leong 716729555fa3SThierry Reding mutex_unlock(&priv->lock); 71682d871aa0SVince Bridgers 716990702dcdSJoakim Zhang rtnl_lock(); 717090702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 717190702dcdSJoakim Zhang phylink_suspend(priv->phylink, true); 717290702dcdSJoakim Zhang } else { 717390702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 717490702dcdSJoakim Zhang phylink_speed_down(priv->phylink, false); 717590702dcdSJoakim Zhang phylink_suspend(priv->phylink, false); 717690702dcdSJoakim Zhang } 717790702dcdSJoakim Zhang rtnl_unlock(); 717890702dcdSJoakim Zhang 71795a558611SOng Boon Leong if (priv->dma_cap.fpesel) { 71805a558611SOng Boon Leong /* Disable FPE */ 71815a558611SOng Boon Leong stmmac_fpe_configure(priv, priv->ioaddr, 71825a558611SOng Boon Leong priv->plat->tx_queues_to_use, 71835a558611SOng Boon Leong priv->plat->rx_queues_to_use, false); 71845a558611SOng Boon Leong 71855a558611SOng Boon Leong stmmac_fpe_handshake(priv, false); 71866b28a86dSMohammad Athari Bin Ismail stmmac_fpe_stop_wq(priv); 71875a558611SOng Boon Leong } 71885a558611SOng Boon Leong 7189bd00632cSLABBE Corentin priv->speed = SPEED_UNKNOWN; 71907ac6653aSJeff Kirsher return 0; 71917ac6653aSJeff Kirsher } 7192b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend); 71937ac6653aSJeff Kirsher 7194732fdf0eSGiuseppe CAVALLARO /** 719554139cf3SJoao Pinto * stmmac_reset_queues_param - reset queue parameters 7196d0ea5cbdSJesse Brandeburg * @priv: device pointer 719754139cf3SJoao Pinto */ 719854139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv) 719954139cf3SJoao Pinto { 720054139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 7201ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 720254139cf3SJoao Pinto u32 queue; 720354139cf3SJoao Pinto 720454139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 720554139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 720654139cf3SJoao Pinto 720754139cf3SJoao Pinto rx_q->cur_rx = 0; 720854139cf3SJoao Pinto rx_q->dirty_rx = 0; 720954139cf3SJoao Pinto } 721054139cf3SJoao Pinto 7211ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 7212ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 7213ce736788SJoao Pinto 7214ce736788SJoao Pinto tx_q->cur_tx = 0; 7215ce736788SJoao Pinto tx_q->dirty_tx = 0; 72168d212a9eSNiklas Cassel tx_q->mss = 0; 7217c511819dSJoakim Zhang 7218c511819dSJoakim Zhang netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7219ce736788SJoao Pinto } 722054139cf3SJoao Pinto } 722154139cf3SJoao Pinto 722254139cf3SJoao Pinto /** 7223732fdf0eSGiuseppe CAVALLARO * stmmac_resume - resume callback 7224f4e7bd81SJoachim Eastwood * @dev: device pointer 7225732fdf0eSGiuseppe CAVALLARO * Description: when resume this function is invoked to setup the DMA and CORE 7226732fdf0eSGiuseppe CAVALLARO * in a usable state. 7227732fdf0eSGiuseppe CAVALLARO */ 7228f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev) 72297ac6653aSJeff Kirsher { 7230f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 72317ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 7232b9663b7cSVoon Weifeng int ret; 72337ac6653aSJeff Kirsher 72347ac6653aSJeff Kirsher if (!netif_running(ndev)) 72357ac6653aSJeff Kirsher return 0; 72367ac6653aSJeff Kirsher 72377ac6653aSJeff Kirsher /* Power Down bit, into the PM register, is cleared 72387ac6653aSJeff Kirsher * automatically as soon as a magic packet or a Wake-up frame 72397ac6653aSJeff Kirsher * is received. Anyway, it's better to manually clear 72407ac6653aSJeff Kirsher * this bit because it can generate problems while resuming 7241ceb69499SGiuseppe CAVALLARO * from another devices (e.g. serial console). 7242ceb69499SGiuseppe CAVALLARO */ 7243e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 724429555fa3SThierry Reding mutex_lock(&priv->lock); 7245c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, 0); 724629555fa3SThierry Reding mutex_unlock(&priv->lock); 724789f7f2cfSSrinivas Kandagatla priv->irq_wake = 0; 7248623997fbSSrinivas Kandagatla } else { 7249db88f10aSSrinivas Kandagatla pinctrl_pm_select_default_state(priv->device); 7250623997fbSSrinivas Kandagatla /* reset the phy so that it's ready */ 7251623997fbSSrinivas Kandagatla if (priv->mii) 7252623997fbSSrinivas Kandagatla stmmac_mdio_reset(priv->mii); 7253623997fbSSrinivas Kandagatla } 72547ac6653aSJeff Kirsher 7255b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 7256b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 7257b9663b7cSVoon Weifeng priv->plat->bsp_priv); 7258b9663b7cSVoon Weifeng 7259b9663b7cSVoon Weifeng if (ret < 0) 7260b9663b7cSVoon Weifeng return ret; 7261b9663b7cSVoon Weifeng } 7262b9663b7cSVoon Weifeng 726336d18b56SFugang Duan rtnl_lock(); 726490702dcdSJoakim Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 726590702dcdSJoakim Zhang phylink_resume(priv->phylink); 726690702dcdSJoakim Zhang } else { 726790702dcdSJoakim Zhang phylink_resume(priv->phylink); 726890702dcdSJoakim Zhang if (device_may_wakeup(priv->device)) 726936d18b56SFugang Duan phylink_speed_up(priv->phylink); 727036d18b56SFugang Duan } 727190702dcdSJoakim Zhang rtnl_unlock(); 727236d18b56SFugang Duan 72738e5debedSWong Vee Khee rtnl_lock(); 727429555fa3SThierry Reding mutex_lock(&priv->lock); 7275f55d84b0SVincent Palatin 727654139cf3SJoao Pinto stmmac_reset_queues_param(priv); 727700423969SThierry Reding 72784ec236c7SFugang Duan stmmac_free_tx_skbufs(priv); 7279ae79a639SGiuseppe CAVALLARO stmmac_clear_descriptors(priv); 7280ae79a639SGiuseppe CAVALLARO 7281fe131929SHuacai Chen stmmac_hw_setup(ndev, false); 7282d429b66eSJose Abreu stmmac_init_coalesce(priv); 7283ac316c78SGiuseppe CAVALLARO stmmac_set_rx_mode(ndev); 72847ac6653aSJeff Kirsher 7285ed64639bSWong Vee Khee stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7286ed64639bSWong Vee Khee 7287c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 72887ac6653aSJeff Kirsher 7289134cc4ceSThierry Reding mutex_unlock(&priv->lock); 72908e5debedSWong Vee Khee rtnl_unlock(); 7291134cc4ceSThierry Reding 729231096c3eSLeon Yu netif_device_attach(ndev); 729331096c3eSLeon Yu 72947ac6653aSJeff Kirsher return 0; 72957ac6653aSJeff Kirsher } 7296b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume); 7297ba27ec66SGiuseppe CAVALLARO 72987ac6653aSJeff Kirsher #ifndef MODULE 72997ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str) 73007ac6653aSJeff Kirsher { 73017ac6653aSJeff Kirsher char *opt; 73027ac6653aSJeff Kirsher 73037ac6653aSJeff Kirsher if (!str || !*str) 73047ac6653aSJeff Kirsher return -EINVAL; 73057ac6653aSJeff Kirsher while ((opt = strsep(&str, ",")) != NULL) { 73067ac6653aSJeff Kirsher if (!strncmp(opt, "debug:", 6)) { 7307ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &debug)) 73087ac6653aSJeff Kirsher goto err; 73097ac6653aSJeff Kirsher } else if (!strncmp(opt, "phyaddr:", 8)) { 7310ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 8, 0, &phyaddr)) 73117ac6653aSJeff Kirsher goto err; 73127ac6653aSJeff Kirsher } else if (!strncmp(opt, "buf_sz:", 7)) { 7313ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 7, 0, &buf_sz)) 73147ac6653aSJeff Kirsher goto err; 73157ac6653aSJeff Kirsher } else if (!strncmp(opt, "tc:", 3)) { 7316ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 3, 0, &tc)) 73177ac6653aSJeff Kirsher goto err; 73187ac6653aSJeff Kirsher } else if (!strncmp(opt, "watchdog:", 9)) { 7319ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 9, 0, &watchdog)) 73207ac6653aSJeff Kirsher goto err; 73217ac6653aSJeff Kirsher } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7322ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &flow_ctrl)) 73237ac6653aSJeff Kirsher goto err; 73247ac6653aSJeff Kirsher } else if (!strncmp(opt, "pause:", 6)) { 7325ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &pause)) 73267ac6653aSJeff Kirsher goto err; 7327506f669cSGiuseppe CAVALLARO } else if (!strncmp(opt, "eee_timer:", 10)) { 7328d765955dSGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &eee_timer)) 7329d765955dSGiuseppe CAVALLARO goto err; 73304a7d666aSGiuseppe CAVALLARO } else if (!strncmp(opt, "chain_mode:", 11)) { 73314a7d666aSGiuseppe CAVALLARO if (kstrtoint(opt + 11, 0, &chain_mode)) 73324a7d666aSGiuseppe CAVALLARO goto err; 73337ac6653aSJeff Kirsher } 73347ac6653aSJeff Kirsher } 73357ac6653aSJeff Kirsher return 0; 73367ac6653aSJeff Kirsher 73377ac6653aSJeff Kirsher err: 73387ac6653aSJeff Kirsher pr_err("%s: ERROR broken module parameter conversion", __func__); 73397ac6653aSJeff Kirsher return -EINVAL; 73407ac6653aSJeff Kirsher } 73417ac6653aSJeff Kirsher 73427ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt); 7343ceb69499SGiuseppe CAVALLARO #endif /* MODULE */ 73446fc0d0f2SGiuseppe Cavallaro 7345466c5ac8SMathieu Olivari static int __init stmmac_init(void) 7346466c5ac8SMathieu Olivari { 7347466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7348466c5ac8SMathieu Olivari /* Create debugfs main directory if it doesn't exist yet */ 73498d72ab11SGreg Kroah-Hartman if (!stmmac_fs_dir) 7350466c5ac8SMathieu Olivari stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7351474a31e1SAaro Koskinen register_netdevice_notifier(&stmmac_notifier); 7352466c5ac8SMathieu Olivari #endif 7353466c5ac8SMathieu Olivari 7354466c5ac8SMathieu Olivari return 0; 7355466c5ac8SMathieu Olivari } 7356466c5ac8SMathieu Olivari 7357466c5ac8SMathieu Olivari static void __exit stmmac_exit(void) 7358466c5ac8SMathieu Olivari { 7359466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 7360474a31e1SAaro Koskinen unregister_netdevice_notifier(&stmmac_notifier); 7361466c5ac8SMathieu Olivari debugfs_remove_recursive(stmmac_fs_dir); 7362466c5ac8SMathieu Olivari #endif 7363466c5ac8SMathieu Olivari } 7364466c5ac8SMathieu Olivari 7365466c5ac8SMathieu Olivari module_init(stmmac_init) 7366466c5ac8SMathieu Olivari module_exit(stmmac_exit) 7367466c5ac8SMathieu Olivari 73686fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 73696fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 73706fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL"); 7371