14fa9c49fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27ac6653aSJeff Kirsher /******************************************************************************* 37ac6653aSJeff Kirsher This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 47ac6653aSJeff Kirsher ST Ethernet IPs are built around a Synopsys IP Core. 57ac6653aSJeff Kirsher 6286a8372SGiuseppe CAVALLARO Copyright(C) 2007-2011 STMicroelectronics Ltd 77ac6653aSJeff Kirsher 87ac6653aSJeff Kirsher 97ac6653aSJeff Kirsher Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 107ac6653aSJeff Kirsher 117ac6653aSJeff Kirsher Documentation available at: 127ac6653aSJeff Kirsher http://www.stlinux.com 137ac6653aSJeff Kirsher Support available at: 147ac6653aSJeff Kirsher https://bugzilla.stlinux.com/ 157ac6653aSJeff Kirsher *******************************************************************************/ 167ac6653aSJeff Kirsher 176a81c26fSViresh Kumar #include <linux/clk.h> 187ac6653aSJeff Kirsher #include <linux/kernel.h> 197ac6653aSJeff Kirsher #include <linux/interrupt.h> 207ac6653aSJeff Kirsher #include <linux/ip.h> 217ac6653aSJeff Kirsher #include <linux/tcp.h> 227ac6653aSJeff Kirsher #include <linux/skbuff.h> 237ac6653aSJeff Kirsher #include <linux/ethtool.h> 247ac6653aSJeff Kirsher #include <linux/if_ether.h> 257ac6653aSJeff Kirsher #include <linux/crc32.h> 267ac6653aSJeff Kirsher #include <linux/mii.h> 2701789349SJiri Pirko #include <linux/if.h> 287ac6653aSJeff Kirsher #include <linux/if_vlan.h> 297ac6653aSJeff Kirsher #include <linux/dma-mapping.h> 307ac6653aSJeff Kirsher #include <linux/slab.h> 317ac6653aSJeff Kirsher #include <linux/prefetch.h> 32db88f10aSSrinivas Kandagatla #include <linux/pinctrl/consumer.h> 3350fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 347ac29055SGiuseppe CAVALLARO #include <linux/debugfs.h> 357ac29055SGiuseppe CAVALLARO #include <linux/seq_file.h> 3650fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 37891434b1SRayagond Kokatanur #include <linux/net_tstamp.h> 38eeef2f6bSJose Abreu #include <linux/phylink.h> 39b7766206SJose Abreu #include <linux/udp.h> 404dbbe8ddSJose Abreu #include <net/pkt_cls.h> 41891434b1SRayagond Kokatanur #include "stmmac_ptp.h" 42286a8372SGiuseppe CAVALLARO #include "stmmac.h" 43c5e4ddbdSChen-Yu Tsai #include <linux/reset.h> 445790cf3cSMathieu Olivari #include <linux/of_mdio.h> 4519d857c9SPhil Reid #include "dwmac1000.h" 467d9e6c5aSJose Abreu #include "dwxgmac2.h" 4742de047dSJose Abreu #include "hwif.h" 487ac6653aSJeff Kirsher 498d558f02SJose Abreu #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 50f748be53SAlexandre TORGUE #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 517ac6653aSJeff Kirsher 527ac6653aSJeff Kirsher /* Module parameters */ 5332ceabcaSGiuseppe CAVALLARO #define TX_TIMEO 5000 547ac6653aSJeff Kirsher static int watchdog = TX_TIMEO; 55d3757ba4SJoe Perches module_param(watchdog, int, 0644); 5632ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 577ac6653aSJeff Kirsher 5832ceabcaSGiuseppe CAVALLARO static int debug = -1; 59d3757ba4SJoe Perches module_param(debug, int, 0644); 6032ceabcaSGiuseppe CAVALLARO MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 617ac6653aSJeff Kirsher 6247d1f71fSstephen hemminger static int phyaddr = -1; 63d3757ba4SJoe Perches module_param(phyaddr, int, 0444); 647ac6653aSJeff Kirsher MODULE_PARM_DESC(phyaddr, "Physical device address"); 657ac6653aSJeff Kirsher 66aa042f60SSong, Yoong Siang #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 67aa042f60SSong, Yoong Siang #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 687ac6653aSJeff Kirsher 69e9989339SJose Abreu static int flow_ctrl = FLOW_AUTO; 70d3757ba4SJoe Perches module_param(flow_ctrl, int, 0644); 717ac6653aSJeff Kirsher MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 727ac6653aSJeff Kirsher 737ac6653aSJeff Kirsher static int pause = PAUSE_TIME; 74d3757ba4SJoe Perches module_param(pause, int, 0644); 757ac6653aSJeff Kirsher MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 767ac6653aSJeff Kirsher 777ac6653aSJeff Kirsher #define TC_DEFAULT 64 787ac6653aSJeff Kirsher static int tc = TC_DEFAULT; 79d3757ba4SJoe Perches module_param(tc, int, 0644); 807ac6653aSJeff Kirsher MODULE_PARM_DESC(tc, "DMA threshold control value"); 817ac6653aSJeff Kirsher 82d916701cSGiuseppe CAVALLARO #define DEFAULT_BUFSIZE 1536 83d916701cSGiuseppe CAVALLARO static int buf_sz = DEFAULT_BUFSIZE; 84d3757ba4SJoe Perches module_param(buf_sz, int, 0644); 857ac6653aSJeff Kirsher MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 867ac6653aSJeff Kirsher 8722ad3838SGiuseppe Cavallaro #define STMMAC_RX_COPYBREAK 256 8822ad3838SGiuseppe Cavallaro 897ac6653aSJeff Kirsher static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 907ac6653aSJeff Kirsher NETIF_MSG_LINK | NETIF_MSG_IFUP | 917ac6653aSJeff Kirsher NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 927ac6653aSJeff Kirsher 93d765955dSGiuseppe CAVALLARO #define STMMAC_DEFAULT_LPI_TIMER 1000 94d765955dSGiuseppe CAVALLARO static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 95d3757ba4SJoe Perches module_param(eee_timer, int, 0644); 96d765955dSGiuseppe CAVALLARO MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 97388e201dSVineetha G. Jaya Kumaran #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 98d765955dSGiuseppe CAVALLARO 9922d3efe5SPavel Machek /* By default the driver will use the ring mode to manage tx and rx descriptors, 10022d3efe5SPavel Machek * but allow user to force to use the chain instead of the ring 1014a7d666aSGiuseppe CAVALLARO */ 1024a7d666aSGiuseppe CAVALLARO static unsigned int chain_mode; 103d3757ba4SJoe Perches module_param(chain_mode, int, 0444); 1044a7d666aSGiuseppe CAVALLARO MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 1054a7d666aSGiuseppe CAVALLARO 1067ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 1077ac6653aSJeff Kirsher 10850fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 109481a7d15SJiping Ma static const struct net_device_ops stmmac_netdev_ops; 1108d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev); 111466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev); 112bfab27a1SGiuseppe CAVALLARO #endif 113bfab27a1SGiuseppe CAVALLARO 114d5a05e69SVincent Whitchurch #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 1159125cdd1SGiuseppe CAVALLARO 1167ac6653aSJeff Kirsher /** 1177ac6653aSJeff Kirsher * stmmac_verify_args - verify the driver parameters. 118732fdf0eSGiuseppe CAVALLARO * Description: it checks the driver parameters and set a default in case of 119732fdf0eSGiuseppe CAVALLARO * errors. 1207ac6653aSJeff Kirsher */ 1217ac6653aSJeff Kirsher static void stmmac_verify_args(void) 1227ac6653aSJeff Kirsher { 1237ac6653aSJeff Kirsher if (unlikely(watchdog < 0)) 1247ac6653aSJeff Kirsher watchdog = TX_TIMEO; 125d916701cSGiuseppe CAVALLARO if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 126d916701cSGiuseppe CAVALLARO buf_sz = DEFAULT_BUFSIZE; 1277ac6653aSJeff Kirsher if (unlikely(flow_ctrl > 1)) 1287ac6653aSJeff Kirsher flow_ctrl = FLOW_AUTO; 1297ac6653aSJeff Kirsher else if (likely(flow_ctrl < 0)) 1307ac6653aSJeff Kirsher flow_ctrl = FLOW_OFF; 1317ac6653aSJeff Kirsher if (unlikely((pause < 0) || (pause > 0xffff))) 1327ac6653aSJeff Kirsher pause = PAUSE_TIME; 133d765955dSGiuseppe CAVALLARO if (eee_timer < 0) 134d765955dSGiuseppe CAVALLARO eee_timer = STMMAC_DEFAULT_LPI_TIMER; 1357ac6653aSJeff Kirsher } 1367ac6653aSJeff Kirsher 13732ceabcaSGiuseppe CAVALLARO /** 138c22a3f48SJoao Pinto * stmmac_disable_all_queues - Disable all queues 139c22a3f48SJoao Pinto * @priv: driver private structure 140c22a3f48SJoao Pinto */ 141c22a3f48SJoao Pinto static void stmmac_disable_all_queues(struct stmmac_priv *priv) 142c22a3f48SJoao Pinto { 143c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 1448fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 1458fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 146c22a3f48SJoao Pinto u32 queue; 147c22a3f48SJoao Pinto 1488fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 1498fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 150c22a3f48SJoao Pinto 1514ccb4585SJose Abreu if (queue < rx_queues_cnt) 1524ccb4585SJose Abreu napi_disable(&ch->rx_napi); 1534ccb4585SJose Abreu if (queue < tx_queues_cnt) 1544ccb4585SJose Abreu napi_disable(&ch->tx_napi); 155c22a3f48SJoao Pinto } 156c22a3f48SJoao Pinto } 157c22a3f48SJoao Pinto 158c22a3f48SJoao Pinto /** 159c22a3f48SJoao Pinto * stmmac_enable_all_queues - Enable all queues 160c22a3f48SJoao Pinto * @priv: driver private structure 161c22a3f48SJoao Pinto */ 162c22a3f48SJoao Pinto static void stmmac_enable_all_queues(struct stmmac_priv *priv) 163c22a3f48SJoao Pinto { 164c22a3f48SJoao Pinto u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 1658fce3331SJose Abreu u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 1668fce3331SJose Abreu u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 167c22a3f48SJoao Pinto u32 queue; 168c22a3f48SJoao Pinto 1698fce3331SJose Abreu for (queue = 0; queue < maxq; queue++) { 1708fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 171c22a3f48SJoao Pinto 1724ccb4585SJose Abreu if (queue < rx_queues_cnt) 1734ccb4585SJose Abreu napi_enable(&ch->rx_napi); 1744ccb4585SJose Abreu if (queue < tx_queues_cnt) 1754ccb4585SJose Abreu napi_enable(&ch->tx_napi); 176c22a3f48SJoao Pinto } 177c22a3f48SJoao Pinto } 178c22a3f48SJoao Pinto 17934877a15SJose Abreu static void stmmac_service_event_schedule(struct stmmac_priv *priv) 18034877a15SJose Abreu { 18134877a15SJose Abreu if (!test_bit(STMMAC_DOWN, &priv->state) && 18234877a15SJose Abreu !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 18334877a15SJose Abreu queue_work(priv->wq, &priv->service_task); 18434877a15SJose Abreu } 18534877a15SJose Abreu 18634877a15SJose Abreu static void stmmac_global_err(struct stmmac_priv *priv) 18734877a15SJose Abreu { 18834877a15SJose Abreu netif_carrier_off(priv->dev); 18934877a15SJose Abreu set_bit(STMMAC_RESET_REQUESTED, &priv->state); 19034877a15SJose Abreu stmmac_service_event_schedule(priv); 19134877a15SJose Abreu } 19234877a15SJose Abreu 193c22a3f48SJoao Pinto /** 19432ceabcaSGiuseppe CAVALLARO * stmmac_clk_csr_set - dynamically set the MDC clock 19532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 19632ceabcaSGiuseppe CAVALLARO * Description: this is to dynamically set the MDC clock according to the csr 19732ceabcaSGiuseppe CAVALLARO * clock input. 19832ceabcaSGiuseppe CAVALLARO * Note: 19932ceabcaSGiuseppe CAVALLARO * If a specific clk_csr value is passed from the platform 20032ceabcaSGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 20132ceabcaSGiuseppe CAVALLARO * changed at run-time and it is fixed (as reported in the driver 20232ceabcaSGiuseppe CAVALLARO * documentation). Viceversa the driver will try to set the MDC 20332ceabcaSGiuseppe CAVALLARO * clock dynamically according to the actual clock input. 20432ceabcaSGiuseppe CAVALLARO */ 205cd7201f4SGiuseppe CAVALLARO static void stmmac_clk_csr_set(struct stmmac_priv *priv) 206cd7201f4SGiuseppe CAVALLARO { 207cd7201f4SGiuseppe CAVALLARO u32 clk_rate; 208cd7201f4SGiuseppe CAVALLARO 209f573c0b9Sjpinto clk_rate = clk_get_rate(priv->plat->stmmac_clk); 210cd7201f4SGiuseppe CAVALLARO 211cd7201f4SGiuseppe CAVALLARO /* Platform provided default clk_csr would be assumed valid 212ceb69499SGiuseppe CAVALLARO * for all other cases except for the below mentioned ones. 213ceb69499SGiuseppe CAVALLARO * For values higher than the IEEE 802.3 specified frequency 214ceb69499SGiuseppe CAVALLARO * we can not estimate the proper divider as it is not known 215ceb69499SGiuseppe CAVALLARO * the frequency of clk_csr_i. So we do not change the default 216ceb69499SGiuseppe CAVALLARO * divider. 217ceb69499SGiuseppe CAVALLARO */ 218cd7201f4SGiuseppe CAVALLARO if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 219cd7201f4SGiuseppe CAVALLARO if (clk_rate < CSR_F_35M) 220cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_20_35M; 221cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 222cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_35_60M; 223cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 224cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_60_100M; 225cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 226cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_100_150M; 227cd7201f4SGiuseppe CAVALLARO else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 228cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_150_250M; 22919d857c9SPhil Reid else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 230cd7201f4SGiuseppe CAVALLARO priv->clk_csr = STMMAC_CSR_250_300M; 231ceb69499SGiuseppe CAVALLARO } 2329f93ac8dSLABBE Corentin 2339f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) { 2349f93ac8dSLABBE Corentin if (clk_rate > 160000000) 2359f93ac8dSLABBE Corentin priv->clk_csr = 0x03; 2369f93ac8dSLABBE Corentin else if (clk_rate > 80000000) 2379f93ac8dSLABBE Corentin priv->clk_csr = 0x02; 2389f93ac8dSLABBE Corentin else if (clk_rate > 40000000) 2399f93ac8dSLABBE Corentin priv->clk_csr = 0x01; 2409f93ac8dSLABBE Corentin else 2419f93ac8dSLABBE Corentin priv->clk_csr = 0; 2429f93ac8dSLABBE Corentin } 2437d9e6c5aSJose Abreu 2447d9e6c5aSJose Abreu if (priv->plat->has_xgmac) { 2457d9e6c5aSJose Abreu if (clk_rate > 400000000) 2467d9e6c5aSJose Abreu priv->clk_csr = 0x5; 2477d9e6c5aSJose Abreu else if (clk_rate > 350000000) 2487d9e6c5aSJose Abreu priv->clk_csr = 0x4; 2497d9e6c5aSJose Abreu else if (clk_rate > 300000000) 2507d9e6c5aSJose Abreu priv->clk_csr = 0x3; 2517d9e6c5aSJose Abreu else if (clk_rate > 250000000) 2527d9e6c5aSJose Abreu priv->clk_csr = 0x2; 2537d9e6c5aSJose Abreu else if (clk_rate > 150000000) 2547d9e6c5aSJose Abreu priv->clk_csr = 0x1; 2557d9e6c5aSJose Abreu else 2567d9e6c5aSJose Abreu priv->clk_csr = 0x0; 2577d9e6c5aSJose Abreu } 258cd7201f4SGiuseppe CAVALLARO } 259cd7201f4SGiuseppe CAVALLARO 2607ac6653aSJeff Kirsher static void print_pkt(unsigned char *buf, int len) 2617ac6653aSJeff Kirsher { 262424c4f78SAndy Shevchenko pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 263424c4f78SAndy Shevchenko print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 2647ac6653aSJeff Kirsher } 2657ac6653aSJeff Kirsher 266ce736788SJoao Pinto static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 2677ac6653aSJeff Kirsher { 268ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 269a6a3e026SLABBE Corentin u32 avail; 270e3ad57c9SGiuseppe Cavallaro 271ce736788SJoao Pinto if (tx_q->dirty_tx > tx_q->cur_tx) 272ce736788SJoao Pinto avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 273e3ad57c9SGiuseppe Cavallaro else 274aa042f60SSong, Yoong Siang avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 275e3ad57c9SGiuseppe Cavallaro 276e3ad57c9SGiuseppe Cavallaro return avail; 277e3ad57c9SGiuseppe Cavallaro } 278e3ad57c9SGiuseppe Cavallaro 27954139cf3SJoao Pinto /** 28054139cf3SJoao Pinto * stmmac_rx_dirty - Get RX queue dirty 28154139cf3SJoao Pinto * @priv: driver private structure 28254139cf3SJoao Pinto * @queue: RX queue index 28354139cf3SJoao Pinto */ 28454139cf3SJoao Pinto static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 285e3ad57c9SGiuseppe Cavallaro { 28654139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 287a6a3e026SLABBE Corentin u32 dirty; 288e3ad57c9SGiuseppe Cavallaro 28954139cf3SJoao Pinto if (rx_q->dirty_rx <= rx_q->cur_rx) 29054139cf3SJoao Pinto dirty = rx_q->cur_rx - rx_q->dirty_rx; 291e3ad57c9SGiuseppe Cavallaro else 292aa042f60SSong, Yoong Siang dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 293e3ad57c9SGiuseppe Cavallaro 294e3ad57c9SGiuseppe Cavallaro return dirty; 2957ac6653aSJeff Kirsher } 2967ac6653aSJeff Kirsher 297be1c7eaeSVineetha G. Jaya Kumaran static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 298be1c7eaeSVineetha G. Jaya Kumaran { 299be1c7eaeSVineetha G. Jaya Kumaran int tx_lpi_timer; 300be1c7eaeSVineetha G. Jaya Kumaran 301be1c7eaeSVineetha G. Jaya Kumaran /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 302be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en = en ? 0 : 1; 303be1c7eaeSVineetha G. Jaya Kumaran tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 304be1c7eaeSVineetha G. Jaya Kumaran stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 305be1c7eaeSVineetha G. Jaya Kumaran } 306be1c7eaeSVineetha G. Jaya Kumaran 30732ceabcaSGiuseppe CAVALLARO /** 308732fdf0eSGiuseppe CAVALLARO * stmmac_enable_eee_mode - check and enter in LPI mode 30932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 310732fdf0eSGiuseppe CAVALLARO * Description: this function is to verify and enter in LPI mode in case of 311732fdf0eSGiuseppe CAVALLARO * EEE. 31232ceabcaSGiuseppe CAVALLARO */ 313d765955dSGiuseppe CAVALLARO static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 314d765955dSGiuseppe CAVALLARO { 315ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 316ce736788SJoao Pinto u32 queue; 317ce736788SJoao Pinto 318ce736788SJoao Pinto /* check if all TX queues have the work finished */ 319ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 320ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 321ce736788SJoao Pinto 322ce736788SJoao Pinto if (tx_q->dirty_tx != tx_q->cur_tx) 323ce736788SJoao Pinto return; /* still unfinished work */ 324ce736788SJoao Pinto } 325ce736788SJoao Pinto 326d765955dSGiuseppe CAVALLARO /* Check and enter in LPI mode */ 327ce736788SJoao Pinto if (!priv->tx_path_in_lpi_mode) 328c10d4c82SJose Abreu stmmac_set_eee_mode(priv, priv->hw, 329b4b7b772Sjpinto priv->plat->en_tx_lpi_clockgating); 330d765955dSGiuseppe CAVALLARO } 331d765955dSGiuseppe CAVALLARO 33232ceabcaSGiuseppe CAVALLARO /** 333732fdf0eSGiuseppe CAVALLARO * stmmac_disable_eee_mode - disable and exit from LPI mode 33432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 33532ceabcaSGiuseppe CAVALLARO * Description: this function is to exit and disable EEE in case of 33632ceabcaSGiuseppe CAVALLARO * LPI state is true. This is called by the xmit. 33732ceabcaSGiuseppe CAVALLARO */ 338d765955dSGiuseppe CAVALLARO void stmmac_disable_eee_mode(struct stmmac_priv *priv) 339d765955dSGiuseppe CAVALLARO { 340be1c7eaeSVineetha G. Jaya Kumaran if (!priv->eee_sw_timer_en) { 341be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 342be1c7eaeSVineetha G. Jaya Kumaran return; 343be1c7eaeSVineetha G. Jaya Kumaran } 344be1c7eaeSVineetha G. Jaya Kumaran 345c10d4c82SJose Abreu stmmac_reset_eee_mode(priv, priv->hw); 346d765955dSGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 347d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 348d765955dSGiuseppe CAVALLARO } 349d765955dSGiuseppe CAVALLARO 350d765955dSGiuseppe CAVALLARO /** 351732fdf0eSGiuseppe CAVALLARO * stmmac_eee_ctrl_timer - EEE TX SW timer. 352d0ea5cbdSJesse Brandeburg * @t: timer_list struct containing private info 353d765955dSGiuseppe CAVALLARO * Description: 35432ceabcaSGiuseppe CAVALLARO * if there is no data transfer and if we are not in LPI state, 355d765955dSGiuseppe CAVALLARO * then MAC Transmitter can be moved to LPI state. 356d765955dSGiuseppe CAVALLARO */ 357e99e88a9SKees Cook static void stmmac_eee_ctrl_timer(struct timer_list *t) 358d765955dSGiuseppe CAVALLARO { 359e99e88a9SKees Cook struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 360d765955dSGiuseppe CAVALLARO 361d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 362388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 363d765955dSGiuseppe CAVALLARO } 364d765955dSGiuseppe CAVALLARO 365d765955dSGiuseppe CAVALLARO /** 366732fdf0eSGiuseppe CAVALLARO * stmmac_eee_init - init EEE 36732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 368d765955dSGiuseppe CAVALLARO * Description: 369732fdf0eSGiuseppe CAVALLARO * if the GMAC supports the EEE (from the HW cap reg) and the phy device 370732fdf0eSGiuseppe CAVALLARO * can also manage EEE, this function enable the LPI state and start related 371732fdf0eSGiuseppe CAVALLARO * timer. 372d765955dSGiuseppe CAVALLARO */ 373d765955dSGiuseppe CAVALLARO bool stmmac_eee_init(struct stmmac_priv *priv) 374d765955dSGiuseppe CAVALLARO { 375388e201dSVineetha G. Jaya Kumaran int eee_tw_timer = priv->eee_tw_timer; 376879626e3SJerome Brunet 377f5351ef7SGiuseppe CAVALLARO /* Using PCS we cannot dial with the phy registers at this stage 378f5351ef7SGiuseppe CAVALLARO * so we do not support extra feature like EEE. 379f5351ef7SGiuseppe CAVALLARO */ 380a47b9e15SDejin Zheng if (priv->hw->pcs == STMMAC_PCS_TBI || 381a47b9e15SDejin Zheng priv->hw->pcs == STMMAC_PCS_RTBI) 38274371272SJose Abreu return false; 383f5351ef7SGiuseppe CAVALLARO 38474371272SJose Abreu /* Check if MAC core supports the EEE feature. */ 38574371272SJose Abreu if (!priv->dma_cap.eee) 38674371272SJose Abreu return false; 387d765955dSGiuseppe CAVALLARO 38829555fa3SThierry Reding mutex_lock(&priv->lock); 38974371272SJose Abreu 39074371272SJose Abreu /* Check if it needs to be deactivated */ 391177d935aSJon Hunter if (!priv->eee_active) { 392177d935aSJon Hunter if (priv->eee_enabled) { 39338ddc59dSLABBE Corentin netdev_dbg(priv->dev, "disable EEE\n"); 394be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 39583bf79b6SGiuseppe CAVALLARO del_timer_sync(&priv->eee_ctrl_timer); 396388e201dSVineetha G. Jaya Kumaran stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 397177d935aSJon Hunter } 3980867bb97SJon Hunter mutex_unlock(&priv->lock); 39974371272SJose Abreu return false; 40074371272SJose Abreu } 40174371272SJose Abreu 40274371272SJose Abreu if (priv->eee_active && !priv->eee_enabled) { 40374371272SJose Abreu timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 40474371272SJose Abreu stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 405388e201dSVineetha G. Jaya Kumaran eee_tw_timer); 40683bf79b6SGiuseppe CAVALLARO } 40774371272SJose Abreu 408be1c7eaeSVineetha G. Jaya Kumaran if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 409be1c7eaeSVineetha G. Jaya Kumaran del_timer_sync(&priv->eee_ctrl_timer); 410be1c7eaeSVineetha G. Jaya Kumaran priv->tx_path_in_lpi_mode = false; 411be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 1); 412be1c7eaeSVineetha G. Jaya Kumaran } else { 413be1c7eaeSVineetha G. Jaya Kumaran stmmac_lpi_entry_timer_config(priv, 0); 414be1c7eaeSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, 415be1c7eaeSVineetha G. Jaya Kumaran STMMAC_LPI_T(priv->tx_lpi_timer)); 416be1c7eaeSVineetha G. Jaya Kumaran } 417388e201dSVineetha G. Jaya Kumaran 41829555fa3SThierry Reding mutex_unlock(&priv->lock); 41938ddc59dSLABBE Corentin netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 42074371272SJose Abreu return true; 421d765955dSGiuseppe CAVALLARO } 422d765955dSGiuseppe CAVALLARO 423732fdf0eSGiuseppe CAVALLARO /* stmmac_get_tx_hwtstamp - get HW TX timestamps 42432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 425ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 426891434b1SRayagond Kokatanur * @skb : the socket buffer 427891434b1SRayagond Kokatanur * Description : 428891434b1SRayagond Kokatanur * This function will read timestamp from the descriptor & pass it to stack. 429891434b1SRayagond Kokatanur * and also perform some sanity checks. 430891434b1SRayagond Kokatanur */ 431891434b1SRayagond Kokatanur static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 432ba1ffd74SGiuseppe CAVALLARO struct dma_desc *p, struct sk_buff *skb) 433891434b1SRayagond Kokatanur { 434891434b1SRayagond Kokatanur struct skb_shared_hwtstamps shhwtstamp; 43525e80cd0SJose Abreu bool found = false; 436df103170SNathan Chancellor u64 ns = 0; 437891434b1SRayagond Kokatanur 438891434b1SRayagond Kokatanur if (!priv->hwts_tx_en) 439891434b1SRayagond Kokatanur return; 440891434b1SRayagond Kokatanur 441ceb69499SGiuseppe CAVALLARO /* exit if skb doesn't support hw tstamp */ 44275e4364fSdamuzi000 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 443891434b1SRayagond Kokatanur return; 444891434b1SRayagond Kokatanur 445891434b1SRayagond Kokatanur /* check tx tstamp status */ 44642de047dSJose Abreu if (stmmac_get_tx_timestamp_status(priv, p)) { 44742de047dSJose Abreu stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 44825e80cd0SJose Abreu found = true; 44925e80cd0SJose Abreu } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 45025e80cd0SJose Abreu found = true; 45125e80cd0SJose Abreu } 452891434b1SRayagond Kokatanur 45325e80cd0SJose Abreu if (found) { 454891434b1SRayagond Kokatanur memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 455891434b1SRayagond Kokatanur shhwtstamp.hwtstamp = ns_to_ktime(ns); 456ba1ffd74SGiuseppe CAVALLARO 45733d4c482SMario Molitor netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 458891434b1SRayagond Kokatanur /* pass tstamp to stack */ 459891434b1SRayagond Kokatanur skb_tstamp_tx(skb, &shhwtstamp); 460ba1ffd74SGiuseppe CAVALLARO } 461891434b1SRayagond Kokatanur } 462891434b1SRayagond Kokatanur 463732fdf0eSGiuseppe CAVALLARO /* stmmac_get_rx_hwtstamp - get HW RX timestamps 46432ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 465ba1ffd74SGiuseppe CAVALLARO * @p : descriptor pointer 466ba1ffd74SGiuseppe CAVALLARO * @np : next descriptor pointer 467891434b1SRayagond Kokatanur * @skb : the socket buffer 468891434b1SRayagond Kokatanur * Description : 469891434b1SRayagond Kokatanur * This function will read received packet's timestamp from the descriptor 470891434b1SRayagond Kokatanur * and pass it to stack. It also perform some sanity checks. 471891434b1SRayagond Kokatanur */ 472ba1ffd74SGiuseppe CAVALLARO static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 473ba1ffd74SGiuseppe CAVALLARO struct dma_desc *np, struct sk_buff *skb) 474891434b1SRayagond Kokatanur { 475891434b1SRayagond Kokatanur struct skb_shared_hwtstamps *shhwtstamp = NULL; 47698870943SJose Abreu struct dma_desc *desc = p; 477df103170SNathan Chancellor u64 ns = 0; 478891434b1SRayagond Kokatanur 479891434b1SRayagond Kokatanur if (!priv->hwts_rx_en) 480891434b1SRayagond Kokatanur return; 481ba1ffd74SGiuseppe CAVALLARO /* For GMAC4, the valid timestamp is from CTX next desc. */ 4827d9e6c5aSJose Abreu if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 48398870943SJose Abreu desc = np; 484891434b1SRayagond Kokatanur 48598870943SJose Abreu /* Check if timestamp is available */ 48642de047dSJose Abreu if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 48742de047dSJose Abreu stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 48833d4c482SMario Molitor netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 489891434b1SRayagond Kokatanur shhwtstamp = skb_hwtstamps(skb); 490891434b1SRayagond Kokatanur memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 491891434b1SRayagond Kokatanur shhwtstamp->hwtstamp = ns_to_ktime(ns); 492ba1ffd74SGiuseppe CAVALLARO } else { 49333d4c482SMario Molitor netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 494ba1ffd74SGiuseppe CAVALLARO } 495891434b1SRayagond Kokatanur } 496891434b1SRayagond Kokatanur 497891434b1SRayagond Kokatanur /** 498d6228b7cSArtem Panfilov * stmmac_hwtstamp_set - control hardware timestamping. 499891434b1SRayagond Kokatanur * @dev: device pointer. 5008d45e42bSLABBE Corentin * @ifr: An IOCTL specific structure, that can contain a pointer to 501891434b1SRayagond Kokatanur * a proprietary structure used to pass information to the driver. 502891434b1SRayagond Kokatanur * Description: 503891434b1SRayagond Kokatanur * This function configures the MAC to enable/disable both outgoing(TX) 504891434b1SRayagond Kokatanur * and incoming(RX) packets time stamping based on user input. 505891434b1SRayagond Kokatanur * Return Value: 506891434b1SRayagond Kokatanur * 0 on success and an appropriate -ve integer on failure. 507891434b1SRayagond Kokatanur */ 508d6228b7cSArtem Panfilov static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 509891434b1SRayagond Kokatanur { 510891434b1SRayagond Kokatanur struct stmmac_priv *priv = netdev_priv(dev); 511891434b1SRayagond Kokatanur struct hwtstamp_config config; 5120a624155SArnd Bergmann struct timespec64 now; 513891434b1SRayagond Kokatanur u64 temp = 0; 514891434b1SRayagond Kokatanur u32 ptp_v2 = 0; 515891434b1SRayagond Kokatanur u32 tstamp_all = 0; 516891434b1SRayagond Kokatanur u32 ptp_over_ipv4_udp = 0; 517891434b1SRayagond Kokatanur u32 ptp_over_ipv6_udp = 0; 518891434b1SRayagond Kokatanur u32 ptp_over_ethernet = 0; 519891434b1SRayagond Kokatanur u32 snap_type_sel = 0; 520891434b1SRayagond Kokatanur u32 ts_master_en = 0; 521891434b1SRayagond Kokatanur u32 ts_event_en = 0; 522df103170SNathan Chancellor u32 sec_inc = 0; 523891434b1SRayagond Kokatanur u32 value = 0; 5247d9e6c5aSJose Abreu bool xmac; 5257d9e6c5aSJose Abreu 5267d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 527891434b1SRayagond Kokatanur 528891434b1SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 529891434b1SRayagond Kokatanur netdev_alert(priv->dev, "No support for HW time stamping\n"); 530891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 531891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 532891434b1SRayagond Kokatanur 533891434b1SRayagond Kokatanur return -EOPNOTSUPP; 534891434b1SRayagond Kokatanur } 535891434b1SRayagond Kokatanur 536891434b1SRayagond Kokatanur if (copy_from_user(&config, ifr->ifr_data, 537d6228b7cSArtem Panfilov sizeof(config))) 538891434b1SRayagond Kokatanur return -EFAULT; 539891434b1SRayagond Kokatanur 54038ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 541891434b1SRayagond Kokatanur __func__, config.flags, config.tx_type, config.rx_filter); 542891434b1SRayagond Kokatanur 543891434b1SRayagond Kokatanur /* reserved for future extensions */ 544891434b1SRayagond Kokatanur if (config.flags) 545891434b1SRayagond Kokatanur return -EINVAL; 546891434b1SRayagond Kokatanur 5475f3da328SBen Hutchings if (config.tx_type != HWTSTAMP_TX_OFF && 5485f3da328SBen Hutchings config.tx_type != HWTSTAMP_TX_ON) 549891434b1SRayagond Kokatanur return -ERANGE; 550891434b1SRayagond Kokatanur 551891434b1SRayagond Kokatanur if (priv->adv_ts) { 552891434b1SRayagond Kokatanur switch (config.rx_filter) { 553891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 554ceb69499SGiuseppe CAVALLARO /* time stamp no incoming packet at all */ 555891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 556891434b1SRayagond Kokatanur break; 557891434b1SRayagond Kokatanur 558891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 559ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, any kind of event packet */ 560891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 5617d8e249fSIlias Apalodimas /* 'xmac' hardware can support Sync, Pdelay_Req and 5627d8e249fSIlias Apalodimas * Pdelay_resp by setting bit14 and bits17/16 to 01 5637d8e249fSIlias Apalodimas * This leaves Delay_Req timestamps out. 5647d8e249fSIlias Apalodimas * Enable all events *and* general purpose message 5657d8e249fSIlias Apalodimas * timestamping 5667d8e249fSIlias Apalodimas */ 567891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 568891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 569891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 570891434b1SRayagond Kokatanur break; 571891434b1SRayagond Kokatanur 572891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 573ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Sync packet */ 574891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 575891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 576891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 577891434b1SRayagond Kokatanur 578891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 579891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 580891434b1SRayagond Kokatanur break; 581891434b1SRayagond Kokatanur 582891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 583ceb69499SGiuseppe CAVALLARO /* PTP v1, UDP, Delay_req packet */ 584891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 585891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 586891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 587891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 588891434b1SRayagond Kokatanur 589891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 590891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 591891434b1SRayagond Kokatanur break; 592891434b1SRayagond Kokatanur 593891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 594ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, any kind of event packet */ 595891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 596891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 597891434b1SRayagond Kokatanur /* take time stamp for all event messages */ 598891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 599891434b1SRayagond Kokatanur 600891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 601891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 602891434b1SRayagond Kokatanur break; 603891434b1SRayagond Kokatanur 604891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 605ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Sync packet */ 606891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 607891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 608891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 609891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 610891434b1SRayagond Kokatanur 611891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 612891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 613891434b1SRayagond Kokatanur break; 614891434b1SRayagond Kokatanur 615891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 616ceb69499SGiuseppe CAVALLARO /* PTP v2, UDP, Delay_req packet */ 617891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 618891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 619891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 620891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 621891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 622891434b1SRayagond Kokatanur 623891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 624891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 625891434b1SRayagond Kokatanur break; 626891434b1SRayagond Kokatanur 627891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_EVENT: 628ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1 any layer, any kind of event packet */ 629891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 630891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 631891434b1SRayagond Kokatanur snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 632f2fb6b62SFugang Duan if (priv->synopsys_id != DWMAC_CORE_5_10) 63314f34733SJose Abreu ts_event_en = PTP_TCR_TSEVNTENA; 634891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 635891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 636891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 637891434b1SRayagond Kokatanur break; 638891434b1SRayagond Kokatanur 639891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_SYNC: 640ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Sync packet */ 641891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 642891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 643891434b1SRayagond Kokatanur /* take time stamp for SYNC messages only */ 644891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 645891434b1SRayagond Kokatanur 646891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 647891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 648891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 649891434b1SRayagond Kokatanur break; 650891434b1SRayagond Kokatanur 651891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 652ceb69499SGiuseppe CAVALLARO /* PTP v2/802.AS1, any layer, Delay_req packet */ 653891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 654891434b1SRayagond Kokatanur ptp_v2 = PTP_TCR_TSVER2ENA; 655891434b1SRayagond Kokatanur /* take time stamp for Delay_Req messages only */ 656891434b1SRayagond Kokatanur ts_master_en = PTP_TCR_TSMSTRENA; 657891434b1SRayagond Kokatanur ts_event_en = PTP_TCR_TSEVNTENA; 658891434b1SRayagond Kokatanur 659891434b1SRayagond Kokatanur ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 660891434b1SRayagond Kokatanur ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 661891434b1SRayagond Kokatanur ptp_over_ethernet = PTP_TCR_TSIPENA; 662891434b1SRayagond Kokatanur break; 663891434b1SRayagond Kokatanur 664e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 665891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_ALL: 666ceb69499SGiuseppe CAVALLARO /* time stamp any incoming packet */ 667891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_ALL; 668891434b1SRayagond Kokatanur tstamp_all = PTP_TCR_TSENALL; 669891434b1SRayagond Kokatanur break; 670891434b1SRayagond Kokatanur 671891434b1SRayagond Kokatanur default: 672891434b1SRayagond Kokatanur return -ERANGE; 673891434b1SRayagond Kokatanur } 674891434b1SRayagond Kokatanur } else { 675891434b1SRayagond Kokatanur switch (config.rx_filter) { 676891434b1SRayagond Kokatanur case HWTSTAMP_FILTER_NONE: 677891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_NONE; 678891434b1SRayagond Kokatanur break; 679891434b1SRayagond Kokatanur default: 680891434b1SRayagond Kokatanur /* PTP v1, UDP, any kind of event packet */ 681891434b1SRayagond Kokatanur config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 682891434b1SRayagond Kokatanur break; 683891434b1SRayagond Kokatanur } 684891434b1SRayagond Kokatanur } 685891434b1SRayagond Kokatanur priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 6865f3da328SBen Hutchings priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 687891434b1SRayagond Kokatanur 688891434b1SRayagond Kokatanur if (!priv->hwts_tx_en && !priv->hwts_rx_en) 689cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 690891434b1SRayagond Kokatanur else { 691891434b1SRayagond Kokatanur value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 692891434b1SRayagond Kokatanur tstamp_all | ptp_v2 | ptp_over_ethernet | 693891434b1SRayagond Kokatanur ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 694891434b1SRayagond Kokatanur ts_master_en | snap_type_sel); 695cc4c9001SJose Abreu stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 696891434b1SRayagond Kokatanur 697891434b1SRayagond Kokatanur /* program Sub Second Increment reg */ 698cc4c9001SJose Abreu stmmac_config_sub_second_increment(priv, 699f573c0b9Sjpinto priv->ptpaddr, priv->plat->clk_ptp_rate, 7007d9e6c5aSJose Abreu xmac, &sec_inc); 70119d857c9SPhil Reid temp = div_u64(1000000000ULL, sec_inc); 702891434b1SRayagond Kokatanur 7039a8a02c9SJose Abreu /* Store sub second increment and flags for later use */ 7049a8a02c9SJose Abreu priv->sub_second_inc = sec_inc; 7059a8a02c9SJose Abreu priv->systime_flags = value; 7069a8a02c9SJose Abreu 707891434b1SRayagond Kokatanur /* calculate default added value: 708891434b1SRayagond Kokatanur * formula is : 709891434b1SRayagond Kokatanur * addend = (2^32)/freq_div_ratio; 71019d857c9SPhil Reid * where, freq_div_ratio = 1e9ns/sec_inc 711891434b1SRayagond Kokatanur */ 71219d857c9SPhil Reid temp = (u64)(temp << 32); 713f573c0b9Sjpinto priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 714cc4c9001SJose Abreu stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 715891434b1SRayagond Kokatanur 716891434b1SRayagond Kokatanur /* initialize system time */ 7170a624155SArnd Bergmann ktime_get_real_ts64(&now); 7180a624155SArnd Bergmann 7190a624155SArnd Bergmann /* lower 32 bits of tv_sec are safe until y2106 */ 720cc4c9001SJose Abreu stmmac_init_systime(priv, priv->ptpaddr, 721cc4c9001SJose Abreu (u32)now.tv_sec, now.tv_nsec); 722891434b1SRayagond Kokatanur } 723891434b1SRayagond Kokatanur 724d6228b7cSArtem Panfilov memcpy(&priv->tstamp_config, &config, sizeof(config)); 725d6228b7cSArtem Panfilov 726891434b1SRayagond Kokatanur return copy_to_user(ifr->ifr_data, &config, 727d6228b7cSArtem Panfilov sizeof(config)) ? -EFAULT : 0; 728d6228b7cSArtem Panfilov } 729d6228b7cSArtem Panfilov 730d6228b7cSArtem Panfilov /** 731d6228b7cSArtem Panfilov * stmmac_hwtstamp_get - read hardware timestamping. 732d6228b7cSArtem Panfilov * @dev: device pointer. 733d6228b7cSArtem Panfilov * @ifr: An IOCTL specific structure, that can contain a pointer to 734d6228b7cSArtem Panfilov * a proprietary structure used to pass information to the driver. 735d6228b7cSArtem Panfilov * Description: 736d6228b7cSArtem Panfilov * This function obtain the current hardware timestamping settings 737d0ea5cbdSJesse Brandeburg * as requested. 738d6228b7cSArtem Panfilov */ 739d6228b7cSArtem Panfilov static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 740d6228b7cSArtem Panfilov { 741d6228b7cSArtem Panfilov struct stmmac_priv *priv = netdev_priv(dev); 742d6228b7cSArtem Panfilov struct hwtstamp_config *config = &priv->tstamp_config; 743d6228b7cSArtem Panfilov 744d6228b7cSArtem Panfilov if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 745d6228b7cSArtem Panfilov return -EOPNOTSUPP; 746d6228b7cSArtem Panfilov 747d6228b7cSArtem Panfilov return copy_to_user(ifr->ifr_data, config, 748d6228b7cSArtem Panfilov sizeof(*config)) ? -EFAULT : 0; 749891434b1SRayagond Kokatanur } 750891434b1SRayagond Kokatanur 75132ceabcaSGiuseppe CAVALLARO /** 752732fdf0eSGiuseppe CAVALLARO * stmmac_init_ptp - init PTP 75332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 754732fdf0eSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 75532ceabcaSGiuseppe CAVALLARO * This is done by looking at the HW cap. register. 756732fdf0eSGiuseppe CAVALLARO * This function also registers the ptp driver. 75732ceabcaSGiuseppe CAVALLARO */ 75892ba6888SRayagond Kokatanur static int stmmac_init_ptp(struct stmmac_priv *priv) 759891434b1SRayagond Kokatanur { 7607d9e6c5aSJose Abreu bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 7617d9e6c5aSJose Abreu 76292ba6888SRayagond Kokatanur if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 76392ba6888SRayagond Kokatanur return -EOPNOTSUPP; 76492ba6888SRayagond Kokatanur 765891434b1SRayagond Kokatanur priv->adv_ts = 0; 7667d9e6c5aSJose Abreu /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 7677d9e6c5aSJose Abreu if (xmac && priv->dma_cap.atime_stamp) 768be9b3174SGiuseppe CAVALLARO priv->adv_ts = 1; 769be9b3174SGiuseppe CAVALLARO /* Dwmac 3.x core with extend_desc can support adv_ts */ 770be9b3174SGiuseppe CAVALLARO else if (priv->extend_desc && priv->dma_cap.atime_stamp) 771891434b1SRayagond Kokatanur priv->adv_ts = 1; 7727cd01399SVince Bridgers 773be9b3174SGiuseppe CAVALLARO if (priv->dma_cap.time_stamp) 774be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 7757cd01399SVince Bridgers 776be9b3174SGiuseppe CAVALLARO if (priv->adv_ts) 777be9b3174SGiuseppe CAVALLARO netdev_info(priv->dev, 778be9b3174SGiuseppe CAVALLARO "IEEE 1588-2008 Advanced Timestamp supported\n"); 779891434b1SRayagond Kokatanur 780891434b1SRayagond Kokatanur priv->hwts_tx_en = 0; 781891434b1SRayagond Kokatanur priv->hwts_rx_en = 0; 78292ba6888SRayagond Kokatanur 783c30a70d3SGiuseppe CAVALLARO stmmac_ptp_register(priv); 784c30a70d3SGiuseppe CAVALLARO 785c30a70d3SGiuseppe CAVALLARO return 0; 78692ba6888SRayagond Kokatanur } 78792ba6888SRayagond Kokatanur 78892ba6888SRayagond Kokatanur static void stmmac_release_ptp(struct stmmac_priv *priv) 78992ba6888SRayagond Kokatanur { 790f573c0b9Sjpinto clk_disable_unprepare(priv->plat->clk_ptp_ref); 79192ba6888SRayagond Kokatanur stmmac_ptp_unregister(priv); 792891434b1SRayagond Kokatanur } 793891434b1SRayagond Kokatanur 7947ac6653aSJeff Kirsher /** 79529feff39SJoao Pinto * stmmac_mac_flow_ctrl - Configure flow control in all queues 79629feff39SJoao Pinto * @priv: driver private structure 797d0ea5cbdSJesse Brandeburg * @duplex: duplex passed to the next function 79829feff39SJoao Pinto * Description: It is used for configuring the flow control in all queues 79929feff39SJoao Pinto */ 80029feff39SJoao Pinto static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 80129feff39SJoao Pinto { 80229feff39SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 80329feff39SJoao Pinto 804c10d4c82SJose Abreu stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 80529feff39SJoao Pinto priv->pause, tx_cnt); 80629feff39SJoao Pinto } 80729feff39SJoao Pinto 808eeef2f6bSJose Abreu static void stmmac_validate(struct phylink_config *config, 809eeef2f6bSJose Abreu unsigned long *supported, 810eeef2f6bSJose Abreu struct phylink_link_state *state) 811eeef2f6bSJose Abreu { 812eeef2f6bSJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 8135b0d7d7dSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 814eeef2f6bSJose Abreu __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 815eeef2f6bSJose Abreu int tx_cnt = priv->plat->tx_queues_to_use; 816eeef2f6bSJose Abreu int max_speed = priv->plat->max_speed; 817eeef2f6bSJose Abreu 8185b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Half); 8195b0d7d7dSJose Abreu phylink_set(mac_supported, 10baseT_Full); 8205b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Half); 8215b0d7d7dSJose Abreu phylink_set(mac_supported, 100baseT_Full); 822df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Half); 823df7699c7SJose Abreu phylink_set(mac_supported, 1000baseT_Full); 824df7699c7SJose Abreu phylink_set(mac_supported, 1000baseKX_Full); 8255b0d7d7dSJose Abreu 8265b0d7d7dSJose Abreu phylink_set(mac_supported, Autoneg); 8275b0d7d7dSJose Abreu phylink_set(mac_supported, Pause); 8285b0d7d7dSJose Abreu phylink_set(mac_supported, Asym_Pause); 8295b0d7d7dSJose Abreu phylink_set_port_modes(mac_supported); 8305b0d7d7dSJose Abreu 831eeef2f6bSJose Abreu /* Cut down 1G if asked to */ 832eeef2f6bSJose Abreu if ((max_speed > 0) && (max_speed < 1000)) { 833eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Full); 834eeef2f6bSJose Abreu phylink_set(mask, 1000baseX_Full); 8355b0d7d7dSJose Abreu } else if (priv->plat->has_xgmac) { 836d9da2c87SJose Abreu if (!max_speed || (max_speed >= 2500)) { 8375b0d7d7dSJose Abreu phylink_set(mac_supported, 2500baseT_Full); 838d9da2c87SJose Abreu phylink_set(mac_supported, 2500baseX_Full); 839d9da2c87SJose Abreu } 840d9da2c87SJose Abreu if (!max_speed || (max_speed >= 5000)) { 8415b0d7d7dSJose Abreu phylink_set(mac_supported, 5000baseT_Full); 842d9da2c87SJose Abreu } 843d9da2c87SJose Abreu if (!max_speed || (max_speed >= 10000)) { 8445b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseSR_Full); 8455b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLR_Full); 8465b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseER_Full); 8475b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseLRM_Full); 8485b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseT_Full); 8495b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKX4_Full); 8505b0d7d7dSJose Abreu phylink_set(mac_supported, 10000baseKR_Full); 851eeef2f6bSJose Abreu } 8528a880936SJose Abreu if (!max_speed || (max_speed >= 25000)) { 8538a880936SJose Abreu phylink_set(mac_supported, 25000baseCR_Full); 8548a880936SJose Abreu phylink_set(mac_supported, 25000baseKR_Full); 8558a880936SJose Abreu phylink_set(mac_supported, 25000baseSR_Full); 8568a880936SJose Abreu } 8578a880936SJose Abreu if (!max_speed || (max_speed >= 40000)) { 8588a880936SJose Abreu phylink_set(mac_supported, 40000baseKR4_Full); 8598a880936SJose Abreu phylink_set(mac_supported, 40000baseCR4_Full); 8608a880936SJose Abreu phylink_set(mac_supported, 40000baseSR4_Full); 8618a880936SJose Abreu phylink_set(mac_supported, 40000baseLR4_Full); 8628a880936SJose Abreu } 8638a880936SJose Abreu if (!max_speed || (max_speed >= 50000)) { 8648a880936SJose Abreu phylink_set(mac_supported, 50000baseCR2_Full); 8658a880936SJose Abreu phylink_set(mac_supported, 50000baseKR2_Full); 8668a880936SJose Abreu phylink_set(mac_supported, 50000baseSR2_Full); 8678a880936SJose Abreu phylink_set(mac_supported, 50000baseKR_Full); 8688a880936SJose Abreu phylink_set(mac_supported, 50000baseSR_Full); 8698a880936SJose Abreu phylink_set(mac_supported, 50000baseCR_Full); 8708a880936SJose Abreu phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 8718a880936SJose Abreu phylink_set(mac_supported, 50000baseDR_Full); 8728a880936SJose Abreu } 8738a880936SJose Abreu if (!max_speed || (max_speed >= 100000)) { 8748a880936SJose Abreu phylink_set(mac_supported, 100000baseKR4_Full); 8758a880936SJose Abreu phylink_set(mac_supported, 100000baseSR4_Full); 8768a880936SJose Abreu phylink_set(mac_supported, 100000baseCR4_Full); 8778a880936SJose Abreu phylink_set(mac_supported, 100000baseLR4_ER4_Full); 8788a880936SJose Abreu phylink_set(mac_supported, 100000baseKR2_Full); 8798a880936SJose Abreu phylink_set(mac_supported, 100000baseSR2_Full); 8808a880936SJose Abreu phylink_set(mac_supported, 100000baseCR2_Full); 8818a880936SJose Abreu phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 8828a880936SJose Abreu phylink_set(mac_supported, 100000baseDR2_Full); 8838a880936SJose Abreu } 884d9da2c87SJose Abreu } 885eeef2f6bSJose Abreu 886eeef2f6bSJose Abreu /* Half-Duplex can only work with single queue */ 887eeef2f6bSJose Abreu if (tx_cnt > 1) { 888eeef2f6bSJose Abreu phylink_set(mask, 10baseT_Half); 889eeef2f6bSJose Abreu phylink_set(mask, 100baseT_Half); 890eeef2f6bSJose Abreu phylink_set(mask, 1000baseT_Half); 891eeef2f6bSJose Abreu } 892eeef2f6bSJose Abreu 893422829f9SJose Abreu linkmode_and(supported, supported, mac_supported); 894422829f9SJose Abreu linkmode_andnot(supported, supported, mask); 895422829f9SJose Abreu 896422829f9SJose Abreu linkmode_and(state->advertising, state->advertising, mac_supported); 897422829f9SJose Abreu linkmode_andnot(state->advertising, state->advertising, mask); 898f213bbe8SJose Abreu 899f213bbe8SJose Abreu /* If PCS is supported, check which modes it supports. */ 900f213bbe8SJose Abreu stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); 901eeef2f6bSJose Abreu } 902eeef2f6bSJose Abreu 903d46b7e4fSRussell King static void stmmac_mac_pcs_get_state(struct phylink_config *config, 904eeef2f6bSJose Abreu struct phylink_link_state *state) 905eeef2f6bSJose Abreu { 906f213bbe8SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 907f213bbe8SJose Abreu 908d46b7e4fSRussell King state->link = 0; 909f213bbe8SJose Abreu stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); 910eeef2f6bSJose Abreu } 911eeef2f6bSJose Abreu 91274371272SJose Abreu static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 91374371272SJose Abreu const struct phylink_link_state *state) 9149ad372fcSJose Abreu { 915f213bbe8SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 916f213bbe8SJose Abreu 917f213bbe8SJose Abreu stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); 9189ad372fcSJose Abreu } 9199ad372fcSJose Abreu 920eeef2f6bSJose Abreu static void stmmac_mac_an_restart(struct phylink_config *config) 921eeef2f6bSJose Abreu { 922eeef2f6bSJose Abreu /* Not Supported */ 923eeef2f6bSJose Abreu } 924eeef2f6bSJose Abreu 92574371272SJose Abreu static void stmmac_mac_link_down(struct phylink_config *config, 92674371272SJose Abreu unsigned int mode, phy_interface_t interface) 9279ad372fcSJose Abreu { 92874371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 9299ad372fcSJose Abreu 9309ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 93174371272SJose Abreu priv->eee_active = false; 932388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = false; 93374371272SJose Abreu stmmac_eee_init(priv); 93474371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, false); 9359ad372fcSJose Abreu } 9369ad372fcSJose Abreu 93774371272SJose Abreu static void stmmac_mac_link_up(struct phylink_config *config, 93891a208f2SRussell King struct phy_device *phy, 93974371272SJose Abreu unsigned int mode, phy_interface_t interface, 94091a208f2SRussell King int speed, int duplex, 94191a208f2SRussell King bool tx_pause, bool rx_pause) 9429ad372fcSJose Abreu { 94374371272SJose Abreu struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 94446f69dedSJose Abreu u32 ctrl; 94546f69dedSJose Abreu 946f213bbe8SJose Abreu stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); 947f213bbe8SJose Abreu 94846f69dedSJose Abreu ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 94946f69dedSJose Abreu ctrl &= ~priv->hw->link.speed_mask; 95046f69dedSJose Abreu 95146f69dedSJose Abreu if (interface == PHY_INTERFACE_MODE_USXGMII) { 95246f69dedSJose Abreu switch (speed) { 95346f69dedSJose Abreu case SPEED_10000: 95446f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 95546f69dedSJose Abreu break; 95646f69dedSJose Abreu case SPEED_5000: 95746f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed5000; 95846f69dedSJose Abreu break; 95946f69dedSJose Abreu case SPEED_2500: 96046f69dedSJose Abreu ctrl |= priv->hw->link.xgmii.speed2500; 96146f69dedSJose Abreu break; 96246f69dedSJose Abreu default: 96346f69dedSJose Abreu return; 96446f69dedSJose Abreu } 9658a880936SJose Abreu } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 9668a880936SJose Abreu switch (speed) { 9678a880936SJose Abreu case SPEED_100000: 9688a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed100000; 9698a880936SJose Abreu break; 9708a880936SJose Abreu case SPEED_50000: 9718a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed50000; 9728a880936SJose Abreu break; 9738a880936SJose Abreu case SPEED_40000: 9748a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed40000; 9758a880936SJose Abreu break; 9768a880936SJose Abreu case SPEED_25000: 9778a880936SJose Abreu ctrl |= priv->hw->link.xlgmii.speed25000; 9788a880936SJose Abreu break; 9798a880936SJose Abreu case SPEED_10000: 9808a880936SJose Abreu ctrl |= priv->hw->link.xgmii.speed10000; 9818a880936SJose Abreu break; 9828a880936SJose Abreu case SPEED_2500: 9838a880936SJose Abreu ctrl |= priv->hw->link.speed2500; 9848a880936SJose Abreu break; 9858a880936SJose Abreu case SPEED_1000: 9868a880936SJose Abreu ctrl |= priv->hw->link.speed1000; 9878a880936SJose Abreu break; 9888a880936SJose Abreu default: 9898a880936SJose Abreu return; 9908a880936SJose Abreu } 99146f69dedSJose Abreu } else { 99246f69dedSJose Abreu switch (speed) { 99346f69dedSJose Abreu case SPEED_2500: 99446f69dedSJose Abreu ctrl |= priv->hw->link.speed2500; 99546f69dedSJose Abreu break; 99646f69dedSJose Abreu case SPEED_1000: 99746f69dedSJose Abreu ctrl |= priv->hw->link.speed1000; 99846f69dedSJose Abreu break; 99946f69dedSJose Abreu case SPEED_100: 100046f69dedSJose Abreu ctrl |= priv->hw->link.speed100; 100146f69dedSJose Abreu break; 100246f69dedSJose Abreu case SPEED_10: 100346f69dedSJose Abreu ctrl |= priv->hw->link.speed10; 100446f69dedSJose Abreu break; 100546f69dedSJose Abreu default: 100646f69dedSJose Abreu return; 100746f69dedSJose Abreu } 100846f69dedSJose Abreu } 100946f69dedSJose Abreu 101046f69dedSJose Abreu priv->speed = speed; 101146f69dedSJose Abreu 101246f69dedSJose Abreu if (priv->plat->fix_mac_speed) 101346f69dedSJose Abreu priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 101446f69dedSJose Abreu 101546f69dedSJose Abreu if (!duplex) 101646f69dedSJose Abreu ctrl &= ~priv->hw->link.duplex; 101746f69dedSJose Abreu else 101846f69dedSJose Abreu ctrl |= priv->hw->link.duplex; 101946f69dedSJose Abreu 102046f69dedSJose Abreu /* Flow Control operation */ 102146f69dedSJose Abreu if (tx_pause && rx_pause) 102246f69dedSJose Abreu stmmac_mac_flow_ctrl(priv, duplex); 102346f69dedSJose Abreu 102446f69dedSJose Abreu writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 10259ad372fcSJose Abreu 10269ad372fcSJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 10275b111770SJose Abreu if (phy && priv->dma_cap.eee) { 102874371272SJose Abreu priv->eee_active = phy_init_eee(phy, 1) >= 0; 102974371272SJose Abreu priv->eee_enabled = stmmac_eee_init(priv); 1030388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_enabled = priv->eee_enabled; 103174371272SJose Abreu stmmac_set_eee_pls(priv, priv->hw, true); 103274371272SJose Abreu } 10339ad372fcSJose Abreu } 10349ad372fcSJose Abreu 103574371272SJose Abreu static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1036eeef2f6bSJose Abreu .validate = stmmac_validate, 1037d46b7e4fSRussell King .mac_pcs_get_state = stmmac_mac_pcs_get_state, 103874371272SJose Abreu .mac_config = stmmac_mac_config, 1039eeef2f6bSJose Abreu .mac_an_restart = stmmac_mac_an_restart, 104074371272SJose Abreu .mac_link_down = stmmac_mac_link_down, 104174371272SJose Abreu .mac_link_up = stmmac_mac_link_up, 1042eeef2f6bSJose Abreu }; 1043eeef2f6bSJose Abreu 104429feff39SJoao Pinto /** 1045732fdf0eSGiuseppe CAVALLARO * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 104632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 104732ceabcaSGiuseppe CAVALLARO * Description: this is to verify if the HW supports the PCS. 104832ceabcaSGiuseppe CAVALLARO * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 104932ceabcaSGiuseppe CAVALLARO * configured for the TBI, RTBI, or SGMII PHY interface. 105032ceabcaSGiuseppe CAVALLARO */ 1051e58bb43fSGiuseppe CAVALLARO static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1052e58bb43fSGiuseppe CAVALLARO { 1053e58bb43fSGiuseppe CAVALLARO int interface = priv->plat->interface; 1054e58bb43fSGiuseppe CAVALLARO 1055e58bb43fSGiuseppe CAVALLARO if (priv->dma_cap.pcs) { 10560d909dcdSByungho An if ((interface == PHY_INTERFACE_MODE_RGMII) || 10570d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_ID) || 10580d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 10590d909dcdSByungho An (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 106038ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 10613fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_RGMII; 10620d909dcdSByungho An } else if (interface == PHY_INTERFACE_MODE_SGMII) { 106338ddc59dSLABBE Corentin netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 10643fe5cadbSGiuseppe CAVALLARO priv->hw->pcs = STMMAC_PCS_SGMII; 1065e58bb43fSGiuseppe CAVALLARO } 1066e58bb43fSGiuseppe CAVALLARO } 1067e58bb43fSGiuseppe CAVALLARO } 1068e58bb43fSGiuseppe CAVALLARO 10697ac6653aSJeff Kirsher /** 10707ac6653aSJeff Kirsher * stmmac_init_phy - PHY initialization 10717ac6653aSJeff Kirsher * @dev: net device structure 10727ac6653aSJeff Kirsher * Description: it initializes the driver's PHY state, and attaches the PHY 10737ac6653aSJeff Kirsher * to the mac driver. 10747ac6653aSJeff Kirsher * Return value: 10757ac6653aSJeff Kirsher * 0 on success 10767ac6653aSJeff Kirsher */ 10777ac6653aSJeff Kirsher static int stmmac_init_phy(struct net_device *dev) 10787ac6653aSJeff Kirsher { 10791d8e5b0fSJisheng Zhang struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 10807ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 108174371272SJose Abreu struct device_node *node; 108274371272SJose Abreu int ret; 10837ac6653aSJeff Kirsher 10844838a540SJose Abreu node = priv->plat->phylink_node; 108574371272SJose Abreu 108642e87024SJose Abreu if (node) 108774371272SJose Abreu ret = phylink_of_phy_connect(priv->phylink, node, 0); 108842e87024SJose Abreu 108942e87024SJose Abreu /* Some DT bindings do not set-up the PHY handle. Let's try to 109042e87024SJose Abreu * manually parse it 109142e87024SJose Abreu */ 109242e87024SJose Abreu if (!node || ret) { 109374371272SJose Abreu int addr = priv->plat->phy_addr; 109474371272SJose Abreu struct phy_device *phydev; 1095f142af2eSSrinivas Kandagatla 109674371272SJose Abreu phydev = mdiobus_get_phy(priv->mii, addr); 109774371272SJose Abreu if (!phydev) { 109874371272SJose Abreu netdev_err(priv->dev, "no phy at addr %d\n", addr); 10997ac6653aSJeff Kirsher return -ENODEV; 11007ac6653aSJeff Kirsher } 11018e99fc5fSGiuseppe Cavallaro 110274371272SJose Abreu ret = phylink_connect_phy(priv->phylink, phydev); 110374371272SJose Abreu } 1104c51e424dSFlorian Fainelli 11051d8e5b0fSJisheng Zhang phylink_ethtool_get_wol(priv->phylink, &wol); 11061d8e5b0fSJisheng Zhang device_set_wakeup_capable(priv->device, !!wol.supported); 11071d8e5b0fSJisheng Zhang 110874371272SJose Abreu return ret; 110974371272SJose Abreu } 111074371272SJose Abreu 111174371272SJose Abreu static int stmmac_phy_setup(struct stmmac_priv *priv) 111274371272SJose Abreu { 1113c63d1e5cSArnd Bergmann struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 11140060c878SAlexandru Ardelean int mode = priv->plat->phy_interface; 111574371272SJose Abreu struct phylink *phylink; 111674371272SJose Abreu 111774371272SJose Abreu priv->phylink_config.dev = &priv->dev->dev; 111874371272SJose Abreu priv->phylink_config.type = PHYLINK_NETDEV; 1119f213bbe8SJose Abreu priv->phylink_config.pcs_poll = true; 112074371272SJose Abreu 11218dc6051cSJose Abreu if (!fwnode) 11228dc6051cSJose Abreu fwnode = dev_fwnode(priv->device); 11238dc6051cSJose Abreu 1124c63d1e5cSArnd Bergmann phylink = phylink_create(&priv->phylink_config, fwnode, 112574371272SJose Abreu mode, &stmmac_phylink_mac_ops); 112674371272SJose Abreu if (IS_ERR(phylink)) 112774371272SJose Abreu return PTR_ERR(phylink); 112874371272SJose Abreu 112974371272SJose Abreu priv->phylink = phylink; 11307ac6653aSJeff Kirsher return 0; 11317ac6653aSJeff Kirsher } 11327ac6653aSJeff Kirsher 113371fedb01SJoao Pinto static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1134c24602efSGiuseppe CAVALLARO { 113554139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 1136bfaf91caSJoakim Zhang unsigned int desc_size; 113771fedb01SJoao Pinto void *head_rx; 113854139cf3SJoao Pinto u32 queue; 113954139cf3SJoao Pinto 114054139cf3SJoao Pinto /* Display RX rings */ 114154139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 114254139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 114354139cf3SJoao Pinto 114454139cf3SJoao Pinto pr_info("\tRX Queue %u rings\n", queue); 1145d0225e7dSAlexandre TORGUE 1146bfaf91caSJoakim Zhang if (priv->extend_desc) { 114754139cf3SJoao Pinto head_rx = (void *)rx_q->dma_erx; 1148bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1149bfaf91caSJoakim Zhang } else { 115054139cf3SJoao Pinto head_rx = (void *)rx_q->dma_rx; 1151bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1152bfaf91caSJoakim Zhang } 115371fedb01SJoao Pinto 115471fedb01SJoao Pinto /* Display RX ring */ 1155bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1156bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 11575bacd778SLABBE Corentin } 115854139cf3SJoao Pinto } 1159d0225e7dSAlexandre TORGUE 116071fedb01SJoao Pinto static void stmmac_display_tx_rings(struct stmmac_priv *priv) 116171fedb01SJoao Pinto { 1162ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 1163bfaf91caSJoakim Zhang unsigned int desc_size; 116471fedb01SJoao Pinto void *head_tx; 1165ce736788SJoao Pinto u32 queue; 1166ce736788SJoao Pinto 1167ce736788SJoao Pinto /* Display TX rings */ 1168ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 1169ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1170ce736788SJoao Pinto 1171ce736788SJoao Pinto pr_info("\tTX Queue %d rings\n", queue); 117271fedb01SJoao Pinto 1173bfaf91caSJoakim Zhang if (priv->extend_desc) { 1174ce736788SJoao Pinto head_tx = (void *)tx_q->dma_etx; 1175bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 1176bfaf91caSJoakim Zhang } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1177579a25a8SJose Abreu head_tx = (void *)tx_q->dma_entx; 1178bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_edesc); 1179bfaf91caSJoakim Zhang } else { 1180ce736788SJoao Pinto head_tx = (void *)tx_q->dma_tx; 1181bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 1182bfaf91caSJoakim Zhang } 118371fedb01SJoao Pinto 1184bfaf91caSJoakim Zhang stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1185bfaf91caSJoakim Zhang tx_q->dma_tx_phy, desc_size); 1186c24602efSGiuseppe CAVALLARO } 1187ce736788SJoao Pinto } 1188c24602efSGiuseppe CAVALLARO 118971fedb01SJoao Pinto static void stmmac_display_rings(struct stmmac_priv *priv) 119071fedb01SJoao Pinto { 119171fedb01SJoao Pinto /* Display RX ring */ 119271fedb01SJoao Pinto stmmac_display_rx_rings(priv); 119371fedb01SJoao Pinto 119471fedb01SJoao Pinto /* Display TX ring */ 119571fedb01SJoao Pinto stmmac_display_tx_rings(priv); 119671fedb01SJoao Pinto } 119771fedb01SJoao Pinto 1198286a8372SGiuseppe CAVALLARO static int stmmac_set_bfsize(int mtu, int bufsize) 1199286a8372SGiuseppe CAVALLARO { 1200286a8372SGiuseppe CAVALLARO int ret = bufsize; 1201286a8372SGiuseppe CAVALLARO 1202b2f3a481SJose Abreu if (mtu >= BUF_SIZE_8KiB) 1203b2f3a481SJose Abreu ret = BUF_SIZE_16KiB; 1204b2f3a481SJose Abreu else if (mtu >= BUF_SIZE_4KiB) 1205286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_8KiB; 1206286a8372SGiuseppe CAVALLARO else if (mtu >= BUF_SIZE_2KiB) 1207286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_4KiB; 1208d916701cSGiuseppe CAVALLARO else if (mtu > DEFAULT_BUFSIZE) 1209286a8372SGiuseppe CAVALLARO ret = BUF_SIZE_2KiB; 1210286a8372SGiuseppe CAVALLARO else 1211d916701cSGiuseppe CAVALLARO ret = DEFAULT_BUFSIZE; 1212286a8372SGiuseppe CAVALLARO 1213286a8372SGiuseppe CAVALLARO return ret; 1214286a8372SGiuseppe CAVALLARO } 1215286a8372SGiuseppe CAVALLARO 121632ceabcaSGiuseppe CAVALLARO /** 121771fedb01SJoao Pinto * stmmac_clear_rx_descriptors - clear RX descriptors 121832ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 121954139cf3SJoao Pinto * @queue: RX queue index 122071fedb01SJoao Pinto * Description: this function is called to clear the RX descriptors 122132ceabcaSGiuseppe CAVALLARO * in case of both basic and extended descriptors are used. 122232ceabcaSGiuseppe CAVALLARO */ 122354139cf3SJoao Pinto static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1224c24602efSGiuseppe CAVALLARO { 122554139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 12265bacd778SLABBE Corentin int i; 1227c24602efSGiuseppe CAVALLARO 122871fedb01SJoao Pinto /* Clear the RX descriptors */ 1229aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_rx_size; i++) 12305bacd778SLABBE Corentin if (priv->extend_desc) 123142de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 12325bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1233aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1234583e6361SAaro Koskinen priv->dma_buf_sz); 12355bacd778SLABBE Corentin else 123642de047dSJose Abreu stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 12375bacd778SLABBE Corentin priv->use_riwt, priv->mode, 1238aa042f60SSong, Yoong Siang (i == priv->dma_rx_size - 1), 1239583e6361SAaro Koskinen priv->dma_buf_sz); 124071fedb01SJoao Pinto } 124171fedb01SJoao Pinto 124271fedb01SJoao Pinto /** 124371fedb01SJoao Pinto * stmmac_clear_tx_descriptors - clear tx descriptors 124471fedb01SJoao Pinto * @priv: driver private structure 1245ce736788SJoao Pinto * @queue: TX queue index. 124671fedb01SJoao Pinto * Description: this function is called to clear the TX descriptors 124771fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 124871fedb01SJoao Pinto */ 1249ce736788SJoao Pinto static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 125071fedb01SJoao Pinto { 1251ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 125271fedb01SJoao Pinto int i; 125371fedb01SJoao Pinto 125471fedb01SJoao Pinto /* Clear the TX descriptors */ 1255aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1256aa042f60SSong, Yoong Siang int last = (i == (priv->dma_tx_size - 1)); 1257579a25a8SJose Abreu struct dma_desc *p; 1258579a25a8SJose Abreu 12595bacd778SLABBE Corentin if (priv->extend_desc) 1260579a25a8SJose Abreu p = &tx_q->dma_etx[i].basic; 1261579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1262579a25a8SJose Abreu p = &tx_q->dma_entx[i].basic; 12635bacd778SLABBE Corentin else 1264579a25a8SJose Abreu p = &tx_q->dma_tx[i]; 1265579a25a8SJose Abreu 1266579a25a8SJose Abreu stmmac_init_tx_desc(priv, p, priv->mode, last); 1267579a25a8SJose Abreu } 1268c24602efSGiuseppe CAVALLARO } 1269c24602efSGiuseppe CAVALLARO 1270732fdf0eSGiuseppe CAVALLARO /** 127171fedb01SJoao Pinto * stmmac_clear_descriptors - clear descriptors 127271fedb01SJoao Pinto * @priv: driver private structure 127371fedb01SJoao Pinto * Description: this function is called to clear the TX and RX descriptors 127471fedb01SJoao Pinto * in case of both basic and extended descriptors are used. 127571fedb01SJoao Pinto */ 127671fedb01SJoao Pinto static void stmmac_clear_descriptors(struct stmmac_priv *priv) 127771fedb01SJoao Pinto { 127854139cf3SJoao Pinto u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1279ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 128054139cf3SJoao Pinto u32 queue; 128154139cf3SJoao Pinto 128271fedb01SJoao Pinto /* Clear the RX descriptors */ 128354139cf3SJoao Pinto for (queue = 0; queue < rx_queue_cnt; queue++) 128454139cf3SJoao Pinto stmmac_clear_rx_descriptors(priv, queue); 128571fedb01SJoao Pinto 128671fedb01SJoao Pinto /* Clear the TX descriptors */ 1287ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) 1288ce736788SJoao Pinto stmmac_clear_tx_descriptors(priv, queue); 128971fedb01SJoao Pinto } 129071fedb01SJoao Pinto 129171fedb01SJoao Pinto /** 1292732fdf0eSGiuseppe CAVALLARO * stmmac_init_rx_buffers - init the RX descriptor buffer. 1293732fdf0eSGiuseppe CAVALLARO * @priv: driver private structure 1294732fdf0eSGiuseppe CAVALLARO * @p: descriptor pointer 1295732fdf0eSGiuseppe CAVALLARO * @i: descriptor index 129654139cf3SJoao Pinto * @flags: gfp flag 129754139cf3SJoao Pinto * @queue: RX queue index 1298732fdf0eSGiuseppe CAVALLARO * Description: this function is called to allocate a receive buffer, perform 1299732fdf0eSGiuseppe CAVALLARO * the DMA mapping and init the descriptor. 1300732fdf0eSGiuseppe CAVALLARO */ 1301c24602efSGiuseppe CAVALLARO static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 130254139cf3SJoao Pinto int i, gfp_t flags, u32 queue) 1303c24602efSGiuseppe CAVALLARO { 130454139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 13052af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1306c24602efSGiuseppe CAVALLARO 13072af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 13082af6106aSJose Abreu if (!buf->page) 130956329137SBartlomiej Zolnierkiewicz return -ENOMEM; 1310c24602efSGiuseppe CAVALLARO 131167afd6d1SJose Abreu if (priv->sph) { 131267afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 131367afd6d1SJose Abreu if (!buf->sec_page) 131467afd6d1SJose Abreu return -ENOMEM; 131567afd6d1SJose Abreu 131667afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1317396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 131867afd6d1SJose Abreu } else { 131967afd6d1SJose Abreu buf->sec_page = NULL; 1320396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 132167afd6d1SJose Abreu } 132267afd6d1SJose Abreu 13232af6106aSJose Abreu buf->addr = page_pool_get_dma_addr(buf->page); 13242af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 13252c520b1cSJose Abreu if (priv->dma_buf_sz == BUF_SIZE_16KiB) 13262c520b1cSJose Abreu stmmac_init_desc3(priv, p); 1327c24602efSGiuseppe CAVALLARO 1328c24602efSGiuseppe CAVALLARO return 0; 1329c24602efSGiuseppe CAVALLARO } 1330c24602efSGiuseppe CAVALLARO 133171fedb01SJoao Pinto /** 133271fedb01SJoao Pinto * stmmac_free_rx_buffer - free RX dma buffers 133371fedb01SJoao Pinto * @priv: private structure 133454139cf3SJoao Pinto * @queue: RX queue index 133571fedb01SJoao Pinto * @i: buffer index. 133671fedb01SJoao Pinto */ 133754139cf3SJoao Pinto static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 133856329137SBartlomiej Zolnierkiewicz { 133954139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 13402af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 134154139cf3SJoao Pinto 13422af6106aSJose Abreu if (buf->page) 1343458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->page, false); 13442af6106aSJose Abreu buf->page = NULL; 134567afd6d1SJose Abreu 134667afd6d1SJose Abreu if (buf->sec_page) 1347458de8a9SIlias Apalodimas page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 134867afd6d1SJose Abreu buf->sec_page = NULL; 134956329137SBartlomiej Zolnierkiewicz } 135056329137SBartlomiej Zolnierkiewicz 13517ac6653aSJeff Kirsher /** 135271fedb01SJoao Pinto * stmmac_free_tx_buffer - free RX dma buffers 135371fedb01SJoao Pinto * @priv: private structure 1354ce736788SJoao Pinto * @queue: RX queue index 135571fedb01SJoao Pinto * @i: buffer index. 135671fedb01SJoao Pinto */ 1357ce736788SJoao Pinto static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 135871fedb01SJoao Pinto { 1359ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1360ce736788SJoao Pinto 1361ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].buf) { 1362ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[i].map_as_page) 136371fedb01SJoao Pinto dma_unmap_page(priv->device, 1364ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1365ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 136671fedb01SJoao Pinto DMA_TO_DEVICE); 136771fedb01SJoao Pinto else 136871fedb01SJoao Pinto dma_unmap_single(priv->device, 1369ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf, 1370ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len, 137171fedb01SJoao Pinto DMA_TO_DEVICE); 137271fedb01SJoao Pinto } 137371fedb01SJoao Pinto 1374ce736788SJoao Pinto if (tx_q->tx_skbuff[i]) { 1375ce736788SJoao Pinto dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1376ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 1377ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1378ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 137971fedb01SJoao Pinto } 138071fedb01SJoao Pinto } 138171fedb01SJoao Pinto 138271fedb01SJoao Pinto /** 1383*9c63faaaSJoakim Zhang * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. 1384*9c63faaaSJoakim Zhang * @priv: driver private structure 1385*9c63faaaSJoakim Zhang * Description: this function is called to re-allocate a receive buffer, perform 1386*9c63faaaSJoakim Zhang * the DMA mapping and init the descriptor. 1387*9c63faaaSJoakim Zhang */ 1388*9c63faaaSJoakim Zhang static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) 1389*9c63faaaSJoakim Zhang { 1390*9c63faaaSJoakim Zhang u32 rx_count = priv->plat->rx_queues_to_use; 1391*9c63faaaSJoakim Zhang u32 queue; 1392*9c63faaaSJoakim Zhang int i; 1393*9c63faaaSJoakim Zhang 1394*9c63faaaSJoakim Zhang for (queue = 0; queue < rx_count; queue++) { 1395*9c63faaaSJoakim Zhang struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1396*9c63faaaSJoakim Zhang 1397*9c63faaaSJoakim Zhang for (i = 0; i < priv->dma_rx_size; i++) { 1398*9c63faaaSJoakim Zhang struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1399*9c63faaaSJoakim Zhang 1400*9c63faaaSJoakim Zhang if (buf->page) { 1401*9c63faaaSJoakim Zhang page_pool_recycle_direct(rx_q->page_pool, buf->page); 1402*9c63faaaSJoakim Zhang buf->page = NULL; 1403*9c63faaaSJoakim Zhang } 1404*9c63faaaSJoakim Zhang 1405*9c63faaaSJoakim Zhang if (priv->sph && buf->sec_page) { 1406*9c63faaaSJoakim Zhang page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); 1407*9c63faaaSJoakim Zhang buf->sec_page = NULL; 1408*9c63faaaSJoakim Zhang } 1409*9c63faaaSJoakim Zhang } 1410*9c63faaaSJoakim Zhang } 1411*9c63faaaSJoakim Zhang 1412*9c63faaaSJoakim Zhang for (queue = 0; queue < rx_count; queue++) { 1413*9c63faaaSJoakim Zhang struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1414*9c63faaaSJoakim Zhang 1415*9c63faaaSJoakim Zhang for (i = 0; i < priv->dma_rx_size; i++) { 1416*9c63faaaSJoakim Zhang struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1417*9c63faaaSJoakim Zhang struct dma_desc *p; 1418*9c63faaaSJoakim Zhang 1419*9c63faaaSJoakim Zhang if (priv->extend_desc) 1420*9c63faaaSJoakim Zhang p = &((rx_q->dma_erx + i)->basic); 1421*9c63faaaSJoakim Zhang else 1422*9c63faaaSJoakim Zhang p = rx_q->dma_rx + i; 1423*9c63faaaSJoakim Zhang 1424*9c63faaaSJoakim Zhang if (!buf->page) { 1425*9c63faaaSJoakim Zhang buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1426*9c63faaaSJoakim Zhang if (!buf->page) 1427*9c63faaaSJoakim Zhang goto err_reinit_rx_buffers; 1428*9c63faaaSJoakim Zhang 1429*9c63faaaSJoakim Zhang buf->addr = page_pool_get_dma_addr(buf->page); 1430*9c63faaaSJoakim Zhang } 1431*9c63faaaSJoakim Zhang 1432*9c63faaaSJoakim Zhang if (priv->sph && !buf->sec_page) { 1433*9c63faaaSJoakim Zhang buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1434*9c63faaaSJoakim Zhang if (!buf->sec_page) 1435*9c63faaaSJoakim Zhang goto err_reinit_rx_buffers; 1436*9c63faaaSJoakim Zhang 1437*9c63faaaSJoakim Zhang buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1438*9c63faaaSJoakim Zhang } 1439*9c63faaaSJoakim Zhang 1440*9c63faaaSJoakim Zhang stmmac_set_desc_addr(priv, p, buf->addr); 1441*9c63faaaSJoakim Zhang if (priv->sph) 1442*9c63faaaSJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1443*9c63faaaSJoakim Zhang else 1444*9c63faaaSJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1445*9c63faaaSJoakim Zhang if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1446*9c63faaaSJoakim Zhang stmmac_init_desc3(priv, p); 1447*9c63faaaSJoakim Zhang } 1448*9c63faaaSJoakim Zhang } 1449*9c63faaaSJoakim Zhang 1450*9c63faaaSJoakim Zhang return; 1451*9c63faaaSJoakim Zhang 1452*9c63faaaSJoakim Zhang err_reinit_rx_buffers: 1453*9c63faaaSJoakim Zhang do { 1454*9c63faaaSJoakim Zhang while (--i >= 0) 1455*9c63faaaSJoakim Zhang stmmac_free_rx_buffer(priv, queue, i); 1456*9c63faaaSJoakim Zhang 1457*9c63faaaSJoakim Zhang if (queue == 0) 1458*9c63faaaSJoakim Zhang break; 1459*9c63faaaSJoakim Zhang 1460*9c63faaaSJoakim Zhang i = priv->dma_rx_size; 1461*9c63faaaSJoakim Zhang } while (queue-- > 0); 1462*9c63faaaSJoakim Zhang } 1463*9c63faaaSJoakim Zhang 1464*9c63faaaSJoakim Zhang /** 146571fedb01SJoao Pinto * init_dma_rx_desc_rings - init the RX descriptor rings 14667ac6653aSJeff Kirsher * @dev: net device structure 14675bacd778SLABBE Corentin * @flags: gfp flag. 146871fedb01SJoao Pinto * Description: this function initializes the DMA RX descriptors 14695bacd778SLABBE Corentin * and allocates the socket buffers. It supports the chained and ring 1470286a8372SGiuseppe CAVALLARO * modes. 14717ac6653aSJeff Kirsher */ 147271fedb01SJoao Pinto static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 14737ac6653aSJeff Kirsher { 14747ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 147554139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 14765bacd778SLABBE Corentin int ret = -ENOMEM; 14771d3028f4SColin Ian King int queue; 147854139cf3SJoao Pinto int i; 14797ac6653aSJeff Kirsher 148054139cf3SJoao Pinto /* RX INITIALIZATION */ 14815bacd778SLABBE Corentin netif_dbg(priv, probe, priv->dev, 14825bacd778SLABBE Corentin "SKB addresses:\nskb\t\tskb data\tdma data\n"); 14835bacd778SLABBE Corentin 148454139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 148554139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 148654139cf3SJoao Pinto 148754139cf3SJoao Pinto netif_dbg(priv, probe, priv->dev, 148854139cf3SJoao Pinto "(%s) dma_rx_phy=0x%08x\n", __func__, 148954139cf3SJoao Pinto (u32)rx_q->dma_rx_phy); 149054139cf3SJoao Pinto 1491cbcf0999SJose Abreu stmmac_clear_rx_descriptors(priv, queue); 1492cbcf0999SJose Abreu 1493aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_rx_size; i++) { 14945bacd778SLABBE Corentin struct dma_desc *p; 14955bacd778SLABBE Corentin 149654139cf3SJoao Pinto if (priv->extend_desc) 149754139cf3SJoao Pinto p = &((rx_q->dma_erx + i)->basic); 149854139cf3SJoao Pinto else 149954139cf3SJoao Pinto p = rx_q->dma_rx + i; 150054139cf3SJoao Pinto 150154139cf3SJoao Pinto ret = stmmac_init_rx_buffers(priv, p, i, flags, 150254139cf3SJoao Pinto queue); 15035bacd778SLABBE Corentin if (ret) 15045bacd778SLABBE Corentin goto err_init_rx_buffers; 15055bacd778SLABBE Corentin } 150654139cf3SJoao Pinto 150754139cf3SJoao Pinto rx_q->cur_rx = 0; 1508aa042f60SSong, Yoong Siang rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); 150954139cf3SJoao Pinto 1510c24602efSGiuseppe CAVALLARO /* Setup the chained descriptor addresses */ 1511c24602efSGiuseppe CAVALLARO if (priv->mode == STMMAC_CHAIN_MODE) { 151271fedb01SJoao Pinto if (priv->extend_desc) 15132c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_erx, 1514aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1515aa042f60SSong, Yoong Siang priv->dma_rx_size, 1); 151671fedb01SJoao Pinto else 15172c520b1cSJose Abreu stmmac_mode_init(priv, rx_q->dma_rx, 1518aa042f60SSong, Yoong Siang rx_q->dma_rx_phy, 1519aa042f60SSong, Yoong Siang priv->dma_rx_size, 0); 152071fedb01SJoao Pinto } 152154139cf3SJoao Pinto } 152254139cf3SJoao Pinto 152371fedb01SJoao Pinto return 0; 152454139cf3SJoao Pinto 152571fedb01SJoao Pinto err_init_rx_buffers: 152654139cf3SJoao Pinto while (queue >= 0) { 152771fedb01SJoao Pinto while (--i >= 0) 152854139cf3SJoao Pinto stmmac_free_rx_buffer(priv, queue, i); 152954139cf3SJoao Pinto 153054139cf3SJoao Pinto if (queue == 0) 153154139cf3SJoao Pinto break; 153254139cf3SJoao Pinto 1533aa042f60SSong, Yoong Siang i = priv->dma_rx_size; 153454139cf3SJoao Pinto queue--; 153554139cf3SJoao Pinto } 153654139cf3SJoao Pinto 153771fedb01SJoao Pinto return ret; 153871fedb01SJoao Pinto } 153971fedb01SJoao Pinto 154071fedb01SJoao Pinto /** 154171fedb01SJoao Pinto * init_dma_tx_desc_rings - init the TX descriptor rings 154271fedb01SJoao Pinto * @dev: net device structure. 154371fedb01SJoao Pinto * Description: this function initializes the DMA TX descriptors 154471fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 154571fedb01SJoao Pinto * modes. 154671fedb01SJoao Pinto */ 154771fedb01SJoao Pinto static int init_dma_tx_desc_rings(struct net_device *dev) 154871fedb01SJoao Pinto { 154971fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 1550ce736788SJoao Pinto u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1551ce736788SJoao Pinto u32 queue; 155271fedb01SJoao Pinto int i; 155371fedb01SJoao Pinto 1554ce736788SJoao Pinto for (queue = 0; queue < tx_queue_cnt; queue++) { 1555ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1556ce736788SJoao Pinto 155771fedb01SJoao Pinto netif_dbg(priv, probe, priv->dev, 1558ce736788SJoao Pinto "(%s) dma_tx_phy=0x%08x\n", __func__, 1559ce736788SJoao Pinto (u32)tx_q->dma_tx_phy); 156071fedb01SJoao Pinto 156171fedb01SJoao Pinto /* Setup the chained descriptor addresses */ 156271fedb01SJoao Pinto if (priv->mode == STMMAC_CHAIN_MODE) { 156371fedb01SJoao Pinto if (priv->extend_desc) 15642c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_etx, 1565aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1566aa042f60SSong, Yoong Siang priv->dma_tx_size, 1); 1567579a25a8SJose Abreu else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 15682c520b1cSJose Abreu stmmac_mode_init(priv, tx_q->dma_tx, 1569aa042f60SSong, Yoong Siang tx_q->dma_tx_phy, 1570aa042f60SSong, Yoong Siang priv->dma_tx_size, 0); 1571c24602efSGiuseppe CAVALLARO } 1572286a8372SGiuseppe CAVALLARO 1573aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) { 1574c24602efSGiuseppe CAVALLARO struct dma_desc *p; 1575c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 1576ce736788SJoao Pinto p = &((tx_q->dma_etx + i)->basic); 1577579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1578579a25a8SJose Abreu p = &((tx_q->dma_entx + i)->basic); 1579c24602efSGiuseppe CAVALLARO else 1580ce736788SJoao Pinto p = tx_q->dma_tx + i; 1581f748be53SAlexandre TORGUE 158244c67f85SJose Abreu stmmac_clear_desc(priv, p); 1583f748be53SAlexandre TORGUE 1584ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].buf = 0; 1585ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].map_as_page = false; 1586ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].len = 0; 1587ce736788SJoao Pinto tx_q->tx_skbuff_dma[i].last_segment = false; 1588ce736788SJoao Pinto tx_q->tx_skbuff[i] = NULL; 15894a7d666aSGiuseppe CAVALLARO } 1590c24602efSGiuseppe CAVALLARO 1591ce736788SJoao Pinto tx_q->dirty_tx = 0; 1592ce736788SJoao Pinto tx_q->cur_tx = 0; 15938d212a9eSNiklas Cassel tx_q->mss = 0; 1594ce736788SJoao Pinto 1595c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1596c22a3f48SJoao Pinto } 15977ac6653aSJeff Kirsher 159871fedb01SJoao Pinto return 0; 159971fedb01SJoao Pinto } 160071fedb01SJoao Pinto 160171fedb01SJoao Pinto /** 160271fedb01SJoao Pinto * init_dma_desc_rings - init the RX/TX descriptor rings 160371fedb01SJoao Pinto * @dev: net device structure 160471fedb01SJoao Pinto * @flags: gfp flag. 160571fedb01SJoao Pinto * Description: this function initializes the DMA RX/TX descriptors 160671fedb01SJoao Pinto * and allocates the socket buffers. It supports the chained and ring 160771fedb01SJoao Pinto * modes. 160871fedb01SJoao Pinto */ 160971fedb01SJoao Pinto static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 161071fedb01SJoao Pinto { 161171fedb01SJoao Pinto struct stmmac_priv *priv = netdev_priv(dev); 161271fedb01SJoao Pinto int ret; 161371fedb01SJoao Pinto 161471fedb01SJoao Pinto ret = init_dma_rx_desc_rings(dev, flags); 161571fedb01SJoao Pinto if (ret) 161671fedb01SJoao Pinto return ret; 161771fedb01SJoao Pinto 161871fedb01SJoao Pinto ret = init_dma_tx_desc_rings(dev); 161971fedb01SJoao Pinto 16205bacd778SLABBE Corentin stmmac_clear_descriptors(priv); 16217ac6653aSJeff Kirsher 1622c24602efSGiuseppe CAVALLARO if (netif_msg_hw(priv)) 1623c24602efSGiuseppe CAVALLARO stmmac_display_rings(priv); 162456329137SBartlomiej Zolnierkiewicz 162556329137SBartlomiej Zolnierkiewicz return ret; 16267ac6653aSJeff Kirsher } 16277ac6653aSJeff Kirsher 162871fedb01SJoao Pinto /** 162971fedb01SJoao Pinto * dma_free_rx_skbufs - free RX dma buffers 163071fedb01SJoao Pinto * @priv: private structure 163154139cf3SJoao Pinto * @queue: RX queue index 163271fedb01SJoao Pinto */ 163354139cf3SJoao Pinto static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 16347ac6653aSJeff Kirsher { 16357ac6653aSJeff Kirsher int i; 16367ac6653aSJeff Kirsher 1637aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_rx_size; i++) 163854139cf3SJoao Pinto stmmac_free_rx_buffer(priv, queue, i); 16397ac6653aSJeff Kirsher } 16407ac6653aSJeff Kirsher 164171fedb01SJoao Pinto /** 164271fedb01SJoao Pinto * dma_free_tx_skbufs - free TX dma buffers 164371fedb01SJoao Pinto * @priv: private structure 1644ce736788SJoao Pinto * @queue: TX queue index 164571fedb01SJoao Pinto */ 1646ce736788SJoao Pinto static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 16477ac6653aSJeff Kirsher { 16487ac6653aSJeff Kirsher int i; 16497ac6653aSJeff Kirsher 1650aa042f60SSong, Yoong Siang for (i = 0; i < priv->dma_tx_size; i++) 1651ce736788SJoao Pinto stmmac_free_tx_buffer(priv, queue, i); 16527ac6653aSJeff Kirsher } 16537ac6653aSJeff Kirsher 1654732fdf0eSGiuseppe CAVALLARO /** 16554ec236c7SFugang Duan * stmmac_free_tx_skbufs - free TX skb buffers 16564ec236c7SFugang Duan * @priv: private structure 16574ec236c7SFugang Duan */ 16584ec236c7SFugang Duan static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 16594ec236c7SFugang Duan { 16604ec236c7SFugang Duan u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 16614ec236c7SFugang Duan u32 queue; 16624ec236c7SFugang Duan 16634ec236c7SFugang Duan for (queue = 0; queue < tx_queue_cnt; queue++) 16644ec236c7SFugang Duan dma_free_tx_skbufs(priv, queue); 16654ec236c7SFugang Duan } 16664ec236c7SFugang Duan 16674ec236c7SFugang Duan /** 166854139cf3SJoao Pinto * free_dma_rx_desc_resources - free RX dma desc resources 166954139cf3SJoao Pinto * @priv: private structure 167054139cf3SJoao Pinto */ 167154139cf3SJoao Pinto static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 167254139cf3SJoao Pinto { 167354139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 167454139cf3SJoao Pinto u32 queue; 167554139cf3SJoao Pinto 167654139cf3SJoao Pinto /* Free RX queue resources */ 167754139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 167854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 167954139cf3SJoao Pinto 168054139cf3SJoao Pinto /* Release the DMA RX socket buffers */ 168154139cf3SJoao Pinto dma_free_rx_skbufs(priv, queue); 168254139cf3SJoao Pinto 168354139cf3SJoao Pinto /* Free DMA regions of consistent memory previously allocated */ 168454139cf3SJoao Pinto if (!priv->extend_desc) 1685aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 1686aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 168754139cf3SJoao Pinto rx_q->dma_rx, rx_q->dma_rx_phy); 168854139cf3SJoao Pinto else 1689aa042f60SSong, Yoong Siang dma_free_coherent(priv->device, priv->dma_rx_size * 169054139cf3SJoao Pinto sizeof(struct dma_extended_desc), 169154139cf3SJoao Pinto rx_q->dma_erx, rx_q->dma_rx_phy); 169254139cf3SJoao Pinto 16932af6106aSJose Abreu kfree(rx_q->buf_pool); 1694c3f812ceSJonathan Lemon if (rx_q->page_pool) 16952af6106aSJose Abreu page_pool_destroy(rx_q->page_pool); 16962af6106aSJose Abreu } 169754139cf3SJoao Pinto } 169854139cf3SJoao Pinto 169954139cf3SJoao Pinto /** 1700ce736788SJoao Pinto * free_dma_tx_desc_resources - free TX dma desc resources 1701ce736788SJoao Pinto * @priv: private structure 1702ce736788SJoao Pinto */ 1703ce736788SJoao Pinto static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1704ce736788SJoao Pinto { 1705ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 170662242260SChristophe Jaillet u32 queue; 1707ce736788SJoao Pinto 1708ce736788SJoao Pinto /* Free TX queue resources */ 1709ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 1710ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1711579a25a8SJose Abreu size_t size; 1712579a25a8SJose Abreu void *addr; 1713ce736788SJoao Pinto 1714ce736788SJoao Pinto /* Release the DMA TX socket buffers */ 1715ce736788SJoao Pinto dma_free_tx_skbufs(priv, queue); 1716ce736788SJoao Pinto 1717579a25a8SJose Abreu if (priv->extend_desc) { 1718579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1719579a25a8SJose Abreu addr = tx_q->dma_etx; 1720579a25a8SJose Abreu } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1721579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1722579a25a8SJose Abreu addr = tx_q->dma_entx; 1723579a25a8SJose Abreu } else { 1724579a25a8SJose Abreu size = sizeof(struct dma_desc); 1725579a25a8SJose Abreu addr = tx_q->dma_tx; 1726579a25a8SJose Abreu } 1727579a25a8SJose Abreu 1728aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 1729579a25a8SJose Abreu 1730579a25a8SJose Abreu dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1731ce736788SJoao Pinto 1732ce736788SJoao Pinto kfree(tx_q->tx_skbuff_dma); 1733ce736788SJoao Pinto kfree(tx_q->tx_skbuff); 1734ce736788SJoao Pinto } 1735ce736788SJoao Pinto } 1736ce736788SJoao Pinto 1737ce736788SJoao Pinto /** 173871fedb01SJoao Pinto * alloc_dma_rx_desc_resources - alloc RX resources. 1739732fdf0eSGiuseppe CAVALLARO * @priv: private structure 1740732fdf0eSGiuseppe CAVALLARO * Description: according to which descriptor can be used (extend or basic) 1741732fdf0eSGiuseppe CAVALLARO * this function allocates the resources for TX and RX paths. In case of 1742732fdf0eSGiuseppe CAVALLARO * reception, for example, it pre-allocated the RX socket buffer in order to 1743732fdf0eSGiuseppe CAVALLARO * allow zero-copy mechanism. 1744732fdf0eSGiuseppe CAVALLARO */ 174571fedb01SJoao Pinto static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 174609f8d696SSrinivas Kandagatla { 174754139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 17485bacd778SLABBE Corentin int ret = -ENOMEM; 174954139cf3SJoao Pinto u32 queue; 175009f8d696SSrinivas Kandagatla 175154139cf3SJoao Pinto /* RX queues buffers and DMA */ 175254139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 175354139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 17542af6106aSJose Abreu struct page_pool_params pp_params = { 0 }; 17554f28bd95SThierry Reding unsigned int num_pages; 175654139cf3SJoao Pinto 175754139cf3SJoao Pinto rx_q->queue_index = queue; 175854139cf3SJoao Pinto rx_q->priv_data = priv; 175954139cf3SJoao Pinto 17602af6106aSJose Abreu pp_params.flags = PP_FLAG_DMA_MAP; 1761aa042f60SSong, Yoong Siang pp_params.pool_size = priv->dma_rx_size; 17624f28bd95SThierry Reding num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 17634f28bd95SThierry Reding pp_params.order = ilog2(num_pages); 17642af6106aSJose Abreu pp_params.nid = dev_to_node(priv->device); 17652af6106aSJose Abreu pp_params.dev = priv->device; 17662af6106aSJose Abreu pp_params.dma_dir = DMA_FROM_DEVICE; 17675bacd778SLABBE Corentin 17682af6106aSJose Abreu rx_q->page_pool = page_pool_create(&pp_params); 17692af6106aSJose Abreu if (IS_ERR(rx_q->page_pool)) { 17702af6106aSJose Abreu ret = PTR_ERR(rx_q->page_pool); 17712af6106aSJose Abreu rx_q->page_pool = NULL; 17722af6106aSJose Abreu goto err_dma; 17732af6106aSJose Abreu } 17742af6106aSJose Abreu 1775aa042f60SSong, Yoong Siang rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1776aa042f60SSong, Yoong Siang sizeof(*rx_q->buf_pool), 17775bacd778SLABBE Corentin GFP_KERNEL); 17782af6106aSJose Abreu if (!rx_q->buf_pool) 177954139cf3SJoao Pinto goto err_dma; 17805bacd778SLABBE Corentin 17815bacd778SLABBE Corentin if (priv->extend_desc) { 1782750afb08SLuis Chamberlain rx_q->dma_erx = dma_alloc_coherent(priv->device, 1783aa042f60SSong, Yoong Siang priv->dma_rx_size * 1784aa042f60SSong, Yoong Siang sizeof(struct dma_extended_desc), 178554139cf3SJoao Pinto &rx_q->dma_rx_phy, 17865bacd778SLABBE Corentin GFP_KERNEL); 178754139cf3SJoao Pinto if (!rx_q->dma_erx) 17885bacd778SLABBE Corentin goto err_dma; 17895bacd778SLABBE Corentin 179071fedb01SJoao Pinto } else { 1791750afb08SLuis Chamberlain rx_q->dma_rx = dma_alloc_coherent(priv->device, 1792aa042f60SSong, Yoong Siang priv->dma_rx_size * 1793aa042f60SSong, Yoong Siang sizeof(struct dma_desc), 179454139cf3SJoao Pinto &rx_q->dma_rx_phy, 179571fedb01SJoao Pinto GFP_KERNEL); 179654139cf3SJoao Pinto if (!rx_q->dma_rx) 179771fedb01SJoao Pinto goto err_dma; 179871fedb01SJoao Pinto } 179954139cf3SJoao Pinto } 180071fedb01SJoao Pinto 180171fedb01SJoao Pinto return 0; 180271fedb01SJoao Pinto 180371fedb01SJoao Pinto err_dma: 180454139cf3SJoao Pinto free_dma_rx_desc_resources(priv); 180554139cf3SJoao Pinto 180671fedb01SJoao Pinto return ret; 180771fedb01SJoao Pinto } 180871fedb01SJoao Pinto 180971fedb01SJoao Pinto /** 181071fedb01SJoao Pinto * alloc_dma_tx_desc_resources - alloc TX resources. 181171fedb01SJoao Pinto * @priv: private structure 181271fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 181371fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 181471fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 181571fedb01SJoao Pinto * allow zero-copy mechanism. 181671fedb01SJoao Pinto */ 181771fedb01SJoao Pinto static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 181871fedb01SJoao Pinto { 1819ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 182071fedb01SJoao Pinto int ret = -ENOMEM; 1821ce736788SJoao Pinto u32 queue; 182271fedb01SJoao Pinto 1823ce736788SJoao Pinto /* TX queues buffers and DMA */ 1824ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 1825ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1826579a25a8SJose Abreu size_t size; 1827579a25a8SJose Abreu void *addr; 1828ce736788SJoao Pinto 1829ce736788SJoao Pinto tx_q->queue_index = queue; 1830ce736788SJoao Pinto tx_q->priv_data = priv; 1831ce736788SJoao Pinto 1832aa042f60SSong, Yoong Siang tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 1833ce736788SJoao Pinto sizeof(*tx_q->tx_skbuff_dma), 183471fedb01SJoao Pinto GFP_KERNEL); 1835ce736788SJoao Pinto if (!tx_q->tx_skbuff_dma) 183662242260SChristophe Jaillet goto err_dma; 183771fedb01SJoao Pinto 1838aa042f60SSong, Yoong Siang tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 1839ce736788SJoao Pinto sizeof(struct sk_buff *), 184071fedb01SJoao Pinto GFP_KERNEL); 1841ce736788SJoao Pinto if (!tx_q->tx_skbuff) 184262242260SChristophe Jaillet goto err_dma; 184371fedb01SJoao Pinto 1844579a25a8SJose Abreu if (priv->extend_desc) 1845579a25a8SJose Abreu size = sizeof(struct dma_extended_desc); 1846579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1847579a25a8SJose Abreu size = sizeof(struct dma_edesc); 1848579a25a8SJose Abreu else 1849579a25a8SJose Abreu size = sizeof(struct dma_desc); 1850579a25a8SJose Abreu 1851aa042f60SSong, Yoong Siang size *= priv->dma_tx_size; 1852579a25a8SJose Abreu 1853579a25a8SJose Abreu addr = dma_alloc_coherent(priv->device, size, 1854579a25a8SJose Abreu &tx_q->dma_tx_phy, GFP_KERNEL); 1855579a25a8SJose Abreu if (!addr) 185662242260SChristophe Jaillet goto err_dma; 1857579a25a8SJose Abreu 1858579a25a8SJose Abreu if (priv->extend_desc) 1859579a25a8SJose Abreu tx_q->dma_etx = addr; 1860579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1861579a25a8SJose Abreu tx_q->dma_entx = addr; 1862579a25a8SJose Abreu else 1863579a25a8SJose Abreu tx_q->dma_tx = addr; 18645bacd778SLABBE Corentin } 18655bacd778SLABBE Corentin 18665bacd778SLABBE Corentin return 0; 18675bacd778SLABBE Corentin 186862242260SChristophe Jaillet err_dma: 1869ce736788SJoao Pinto free_dma_tx_desc_resources(priv); 187009f8d696SSrinivas Kandagatla return ret; 18715bacd778SLABBE Corentin } 187209f8d696SSrinivas Kandagatla 187371fedb01SJoao Pinto /** 187471fedb01SJoao Pinto * alloc_dma_desc_resources - alloc TX/RX resources. 187571fedb01SJoao Pinto * @priv: private structure 187671fedb01SJoao Pinto * Description: according to which descriptor can be used (extend or basic) 187771fedb01SJoao Pinto * this function allocates the resources for TX and RX paths. In case of 187871fedb01SJoao Pinto * reception, for example, it pre-allocated the RX socket buffer in order to 187971fedb01SJoao Pinto * allow zero-copy mechanism. 188071fedb01SJoao Pinto */ 188171fedb01SJoao Pinto static int alloc_dma_desc_resources(struct stmmac_priv *priv) 18825bacd778SLABBE Corentin { 188354139cf3SJoao Pinto /* RX Allocation */ 188471fedb01SJoao Pinto int ret = alloc_dma_rx_desc_resources(priv); 188571fedb01SJoao Pinto 188671fedb01SJoao Pinto if (ret) 188771fedb01SJoao Pinto return ret; 188871fedb01SJoao Pinto 188971fedb01SJoao Pinto ret = alloc_dma_tx_desc_resources(priv); 189071fedb01SJoao Pinto 189171fedb01SJoao Pinto return ret; 189271fedb01SJoao Pinto } 189371fedb01SJoao Pinto 189471fedb01SJoao Pinto /** 189571fedb01SJoao Pinto * free_dma_desc_resources - free dma desc resources 189671fedb01SJoao Pinto * @priv: private structure 189771fedb01SJoao Pinto */ 189871fedb01SJoao Pinto static void free_dma_desc_resources(struct stmmac_priv *priv) 189971fedb01SJoao Pinto { 190071fedb01SJoao Pinto /* Release the DMA RX socket buffers */ 190171fedb01SJoao Pinto free_dma_rx_desc_resources(priv); 190271fedb01SJoao Pinto 190371fedb01SJoao Pinto /* Release the DMA TX socket buffers */ 190471fedb01SJoao Pinto free_dma_tx_desc_resources(priv); 190571fedb01SJoao Pinto } 190671fedb01SJoao Pinto 190771fedb01SJoao Pinto /** 19089eb12474Sjpinto * stmmac_mac_enable_rx_queues - Enable MAC rx queues 19099eb12474Sjpinto * @priv: driver private structure 19109eb12474Sjpinto * Description: It is used for enabling the rx queues in the MAC 19119eb12474Sjpinto */ 19129eb12474Sjpinto static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 19139eb12474Sjpinto { 19144f6046f5SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 19154f6046f5SJoao Pinto int queue; 19164f6046f5SJoao Pinto u8 mode; 19179eb12474Sjpinto 19184f6046f5SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 19194f6046f5SJoao Pinto mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1920c10d4c82SJose Abreu stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 19214f6046f5SJoao Pinto } 19229eb12474Sjpinto } 19239eb12474Sjpinto 19249eb12474Sjpinto /** 1925ae4f0d46SJoao Pinto * stmmac_start_rx_dma - start RX DMA channel 1926ae4f0d46SJoao Pinto * @priv: driver private structure 1927ae4f0d46SJoao Pinto * @chan: RX channel index 1928ae4f0d46SJoao Pinto * Description: 1929ae4f0d46SJoao Pinto * This starts a RX DMA channel 1930ae4f0d46SJoao Pinto */ 1931ae4f0d46SJoao Pinto static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1932ae4f0d46SJoao Pinto { 1933ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1934a4e887faSJose Abreu stmmac_start_rx(priv, priv->ioaddr, chan); 1935ae4f0d46SJoao Pinto } 1936ae4f0d46SJoao Pinto 1937ae4f0d46SJoao Pinto /** 1938ae4f0d46SJoao Pinto * stmmac_start_tx_dma - start TX DMA channel 1939ae4f0d46SJoao Pinto * @priv: driver private structure 1940ae4f0d46SJoao Pinto * @chan: TX channel index 1941ae4f0d46SJoao Pinto * Description: 1942ae4f0d46SJoao Pinto * This starts a TX DMA channel 1943ae4f0d46SJoao Pinto */ 1944ae4f0d46SJoao Pinto static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1945ae4f0d46SJoao Pinto { 1946ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1947a4e887faSJose Abreu stmmac_start_tx(priv, priv->ioaddr, chan); 1948ae4f0d46SJoao Pinto } 1949ae4f0d46SJoao Pinto 1950ae4f0d46SJoao Pinto /** 1951ae4f0d46SJoao Pinto * stmmac_stop_rx_dma - stop RX DMA channel 1952ae4f0d46SJoao Pinto * @priv: driver private structure 1953ae4f0d46SJoao Pinto * @chan: RX channel index 1954ae4f0d46SJoao Pinto * Description: 1955ae4f0d46SJoao Pinto * This stops a RX DMA channel 1956ae4f0d46SJoao Pinto */ 1957ae4f0d46SJoao Pinto static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 1958ae4f0d46SJoao Pinto { 1959ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 1960a4e887faSJose Abreu stmmac_stop_rx(priv, priv->ioaddr, chan); 1961ae4f0d46SJoao Pinto } 1962ae4f0d46SJoao Pinto 1963ae4f0d46SJoao Pinto /** 1964ae4f0d46SJoao Pinto * stmmac_stop_tx_dma - stop TX DMA channel 1965ae4f0d46SJoao Pinto * @priv: driver private structure 1966ae4f0d46SJoao Pinto * @chan: TX channel index 1967ae4f0d46SJoao Pinto * Description: 1968ae4f0d46SJoao Pinto * This stops a TX DMA channel 1969ae4f0d46SJoao Pinto */ 1970ae4f0d46SJoao Pinto static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 1971ae4f0d46SJoao Pinto { 1972ae4f0d46SJoao Pinto netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 1973a4e887faSJose Abreu stmmac_stop_tx(priv, priv->ioaddr, chan); 1974ae4f0d46SJoao Pinto } 1975ae4f0d46SJoao Pinto 1976ae4f0d46SJoao Pinto /** 1977ae4f0d46SJoao Pinto * stmmac_start_all_dma - start all RX and TX DMA channels 1978ae4f0d46SJoao Pinto * @priv: driver private structure 1979ae4f0d46SJoao Pinto * Description: 1980ae4f0d46SJoao Pinto * This starts all the RX and TX DMA channels 1981ae4f0d46SJoao Pinto */ 1982ae4f0d46SJoao Pinto static void stmmac_start_all_dma(struct stmmac_priv *priv) 1983ae4f0d46SJoao Pinto { 1984ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 1985ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 1986ae4f0d46SJoao Pinto u32 chan = 0; 1987ae4f0d46SJoao Pinto 1988ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 1989ae4f0d46SJoao Pinto stmmac_start_rx_dma(priv, chan); 1990ae4f0d46SJoao Pinto 1991ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 1992ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 1993ae4f0d46SJoao Pinto } 1994ae4f0d46SJoao Pinto 1995ae4f0d46SJoao Pinto /** 1996ae4f0d46SJoao Pinto * stmmac_stop_all_dma - stop all RX and TX DMA channels 1997ae4f0d46SJoao Pinto * @priv: driver private structure 1998ae4f0d46SJoao Pinto * Description: 1999ae4f0d46SJoao Pinto * This stops the RX and TX DMA channels 2000ae4f0d46SJoao Pinto */ 2001ae4f0d46SJoao Pinto static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2002ae4f0d46SJoao Pinto { 2003ae4f0d46SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 2004ae4f0d46SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2005ae4f0d46SJoao Pinto u32 chan = 0; 2006ae4f0d46SJoao Pinto 2007ae4f0d46SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2008ae4f0d46SJoao Pinto stmmac_stop_rx_dma(priv, chan); 2009ae4f0d46SJoao Pinto 2010ae4f0d46SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2011ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2012ae4f0d46SJoao Pinto } 2013ae4f0d46SJoao Pinto 2014ae4f0d46SJoao Pinto /** 20157ac6653aSJeff Kirsher * stmmac_dma_operation_mode - HW DMA operation mode 201632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2017732fdf0eSGiuseppe CAVALLARO * Description: it is used for configuring the DMA operation mode register in 2018732fdf0eSGiuseppe CAVALLARO * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 20197ac6653aSJeff Kirsher */ 20207ac6653aSJeff Kirsher static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 20217ac6653aSJeff Kirsher { 20226deee222SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 20236deee222SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 2024f88203a2SVince Bridgers int rxfifosz = priv->plat->rx_fifo_size; 202552a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 20266deee222SJoao Pinto u32 txmode = 0; 20276deee222SJoao Pinto u32 rxmode = 0; 20286deee222SJoao Pinto u32 chan = 0; 2029a0daae13SJose Abreu u8 qmode = 0; 2030f88203a2SVince Bridgers 203111fbf811SThierry Reding if (rxfifosz == 0) 203211fbf811SThierry Reding rxfifosz = priv->dma_cap.rx_fifo_size; 203352a76235SJose Abreu if (txfifosz == 0) 203452a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 203552a76235SJose Abreu 203652a76235SJose Abreu /* Adjust for real per queue fifo size */ 203752a76235SJose Abreu rxfifosz /= rx_channels_count; 203852a76235SJose Abreu txfifosz /= tx_channels_count; 203911fbf811SThierry Reding 20406deee222SJoao Pinto if (priv->plat->force_thresh_dma_mode) { 20416deee222SJoao Pinto txmode = tc; 20426deee222SJoao Pinto rxmode = tc; 20436deee222SJoao Pinto } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 20447ac6653aSJeff Kirsher /* 20457ac6653aSJeff Kirsher * In case of GMAC, SF mode can be enabled 20467ac6653aSJeff Kirsher * to perform the TX COE in HW. This depends on: 20477ac6653aSJeff Kirsher * 1) TX COE if actually supported 20487ac6653aSJeff Kirsher * 2) There is no bugged Jumbo frame support 20497ac6653aSJeff Kirsher * that needs to not insert csum in the TDES. 20507ac6653aSJeff Kirsher */ 20516deee222SJoao Pinto txmode = SF_DMA_MODE; 20526deee222SJoao Pinto rxmode = SF_DMA_MODE; 2053b2dec116SSonic Zhang priv->xstats.threshold = SF_DMA_MODE; 20546deee222SJoao Pinto } else { 20556deee222SJoao Pinto txmode = tc; 20566deee222SJoao Pinto rxmode = SF_DMA_MODE; 20576deee222SJoao Pinto } 20586deee222SJoao Pinto 20596deee222SJoao Pinto /* configure all channels */ 2060a0daae13SJose Abreu for (chan = 0; chan < rx_channels_count; chan++) { 2061a0daae13SJose Abreu qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 20626deee222SJoao Pinto 2063a4e887faSJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2064a0daae13SJose Abreu rxfifosz, qmode); 20654205c88eSJose Abreu stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 20664205c88eSJose Abreu chan); 2067a0daae13SJose Abreu } 2068a0daae13SJose Abreu 2069a0daae13SJose Abreu for (chan = 0; chan < tx_channels_count; chan++) { 2070a0daae13SJose Abreu qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2071a0daae13SJose Abreu 2072a4e887faSJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2073a0daae13SJose Abreu txfifosz, qmode); 2074a0daae13SJose Abreu } 20757ac6653aSJeff Kirsher } 20767ac6653aSJeff Kirsher 20777ac6653aSJeff Kirsher /** 2078732fdf0eSGiuseppe CAVALLARO * stmmac_tx_clean - to manage the transmission completion 207932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 2080d0ea5cbdSJesse Brandeburg * @budget: napi budget limiting this functions packet handling 2081ce736788SJoao Pinto * @queue: TX queue index 2082732fdf0eSGiuseppe CAVALLARO * Description: it reclaims the transmit resources after transmission completes. 20837ac6653aSJeff Kirsher */ 20848fce3331SJose Abreu static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 20857ac6653aSJeff Kirsher { 2086ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 208738979574SBeniamino Galvani unsigned int bytes_compl = 0, pkts_compl = 0; 20888fce3331SJose Abreu unsigned int entry, count = 0; 20897ac6653aSJeff Kirsher 20908fce3331SJose Abreu __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2091a9097a96SGiuseppe CAVALLARO 20929125cdd1SGiuseppe CAVALLARO priv->xstats.tx_clean++; 20939125cdd1SGiuseppe CAVALLARO 20948d5f4b07SBernd Edlinger entry = tx_q->dirty_tx; 20958fce3331SJose Abreu while ((entry != tx_q->cur_tx) && (count < budget)) { 2096ce736788SJoao Pinto struct sk_buff *skb = tx_q->tx_skbuff[entry]; 2097c24602efSGiuseppe CAVALLARO struct dma_desc *p; 2098c363b658SFabrice Gasnier int status; 2099c24602efSGiuseppe CAVALLARO 2100c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 2101ce736788SJoao Pinto p = (struct dma_desc *)(tx_q->dma_etx + entry); 2102579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2103579a25a8SJose Abreu p = &tx_q->dma_entx[entry].basic; 2104c24602efSGiuseppe CAVALLARO else 2105ce736788SJoao Pinto p = tx_q->dma_tx + entry; 21067ac6653aSJeff Kirsher 210742de047dSJose Abreu status = stmmac_tx_status(priv, &priv->dev->stats, 210842de047dSJose Abreu &priv->xstats, p, priv->ioaddr); 2109c363b658SFabrice Gasnier /* Check if the descriptor is owned by the DMA */ 2110c363b658SFabrice Gasnier if (unlikely(status & tx_dma_own)) 2111c363b658SFabrice Gasnier break; 2112c363b658SFabrice Gasnier 21138fce3331SJose Abreu count++; 21148fce3331SJose Abreu 2115a6b25da5SNiklas Cassel /* Make sure descriptor fields are read after reading 2116a6b25da5SNiklas Cassel * the own bit. 2117a6b25da5SNiklas Cassel */ 2118a6b25da5SNiklas Cassel dma_rmb(); 2119a6b25da5SNiklas Cassel 2120c363b658SFabrice Gasnier /* Just consider the last segment and ...*/ 2121c363b658SFabrice Gasnier if (likely(!(status & tx_not_ls))) { 2122c363b658SFabrice Gasnier /* ... verify the status error condition */ 2123c363b658SFabrice Gasnier if (unlikely(status & tx_err)) { 2124c363b658SFabrice Gasnier priv->dev->stats.tx_errors++; 2125c363b658SFabrice Gasnier } else { 21267ac6653aSJeff Kirsher priv->dev->stats.tx_packets++; 21277ac6653aSJeff Kirsher priv->xstats.tx_pkt_n++; 2128c363b658SFabrice Gasnier } 2129ba1ffd74SGiuseppe CAVALLARO stmmac_get_tx_hwtstamp(priv, p, skb); 21307ac6653aSJeff Kirsher } 21317ac6653aSJeff Kirsher 2132ce736788SJoao Pinto if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 2133ce736788SJoao Pinto if (tx_q->tx_skbuff_dma[entry].map_as_page) 2134362b37beSGiuseppe CAVALLARO dma_unmap_page(priv->device, 2135ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2136ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 21377ac6653aSJeff Kirsher DMA_TO_DEVICE); 2138362b37beSGiuseppe CAVALLARO else 2139362b37beSGiuseppe CAVALLARO dma_unmap_single(priv->device, 2140ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf, 2141ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len, 2142362b37beSGiuseppe CAVALLARO DMA_TO_DEVICE); 2143ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = 0; 2144ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = 0; 2145ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = false; 2146cf32deecSRayagond Kokatanur } 2147f748be53SAlexandre TORGUE 21482c520b1cSJose Abreu stmmac_clean_desc3(priv, tx_q, p); 2149f748be53SAlexandre TORGUE 2150ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = false; 2151ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].is_jumbo = false; 21527ac6653aSJeff Kirsher 21537ac6653aSJeff Kirsher if (likely(skb != NULL)) { 215438979574SBeniamino Galvani pkts_compl++; 215538979574SBeniamino Galvani bytes_compl += skb->len; 21567c565c33SEric W. Biederman dev_consume_skb_any(skb); 2157ce736788SJoao Pinto tx_q->tx_skbuff[entry] = NULL; 21587ac6653aSJeff Kirsher } 21597ac6653aSJeff Kirsher 216042de047dSJose Abreu stmmac_release_tx_desc(priv, p, priv->mode); 21617ac6653aSJeff Kirsher 2162aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 21637ac6653aSJeff Kirsher } 2164ce736788SJoao Pinto tx_q->dirty_tx = entry; 216538979574SBeniamino Galvani 2166c22a3f48SJoao Pinto netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2167c22a3f48SJoao Pinto pkts_compl, bytes_compl); 216838979574SBeniamino Galvani 2169c22a3f48SJoao Pinto if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2170c22a3f48SJoao Pinto queue))) && 2171aa042f60SSong, Yoong Siang stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2172c22a3f48SJoao Pinto 2173b3e51069SLABBE Corentin netif_dbg(priv, tx_done, priv->dev, 2174b3e51069SLABBE Corentin "%s: restart transmit\n", __func__); 2175c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 21767ac6653aSJeff Kirsher } 2177d765955dSGiuseppe CAVALLARO 2178be1c7eaeSVineetha G. Jaya Kumaran if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2179be1c7eaeSVineetha G. Jaya Kumaran priv->eee_sw_timer_en) { 2180d765955dSGiuseppe CAVALLARO stmmac_enable_eee_mode(priv); 2181388e201dSVineetha G. Jaya Kumaran mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2182d765955dSGiuseppe CAVALLARO } 21838fce3331SJose Abreu 21844ccb4585SJose Abreu /* We still have pending packets, let's call for a new scheduling */ 21854ccb4585SJose Abreu if (tx_q->dirty_tx != tx_q->cur_tx) 2186d5a05e69SVincent Whitchurch hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), 2187d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 21884ccb4585SJose Abreu 21898fce3331SJose Abreu __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 21908fce3331SJose Abreu 21918fce3331SJose Abreu return count; 21927ac6653aSJeff Kirsher } 21937ac6653aSJeff Kirsher 21947ac6653aSJeff Kirsher /** 2195732fdf0eSGiuseppe CAVALLARO * stmmac_tx_err - to manage the tx error 219632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 21975bacd778SLABBE Corentin * @chan: channel index 21987ac6653aSJeff Kirsher * Description: it cleans the descriptors and restarts the transmission 2199732fdf0eSGiuseppe CAVALLARO * in case of transmission errors. 22007ac6653aSJeff Kirsher */ 22015bacd778SLABBE Corentin static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 22027ac6653aSJeff Kirsher { 2203ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2204ce736788SJoao Pinto 2205c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 22067ac6653aSJeff Kirsher 2207ae4f0d46SJoao Pinto stmmac_stop_tx_dma(priv, chan); 2208ce736788SJoao Pinto dma_free_tx_skbufs(priv, chan); 2209579a25a8SJose Abreu stmmac_clear_tx_descriptors(priv, chan); 2210ce736788SJoao Pinto tx_q->dirty_tx = 0; 2211ce736788SJoao Pinto tx_q->cur_tx = 0; 22128d212a9eSNiklas Cassel tx_q->mss = 0; 2213c22a3f48SJoao Pinto netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2214f421031eSJongsung Kim stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2215f421031eSJongsung Kim tx_q->dma_tx_phy, chan); 2216ae4f0d46SJoao Pinto stmmac_start_tx_dma(priv, chan); 22177ac6653aSJeff Kirsher 22187ac6653aSJeff Kirsher priv->dev->stats.tx_errors++; 2219c22a3f48SJoao Pinto netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 22207ac6653aSJeff Kirsher } 22217ac6653aSJeff Kirsher 222232ceabcaSGiuseppe CAVALLARO /** 22236deee222SJoao Pinto * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 22246deee222SJoao Pinto * @priv: driver private structure 22256deee222SJoao Pinto * @txmode: TX operating mode 22266deee222SJoao Pinto * @rxmode: RX operating mode 22276deee222SJoao Pinto * @chan: channel index 22286deee222SJoao Pinto * Description: it is used for configuring of the DMA operation mode in 22296deee222SJoao Pinto * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 22306deee222SJoao Pinto * mode. 22316deee222SJoao Pinto */ 22326deee222SJoao Pinto static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 22336deee222SJoao Pinto u32 rxmode, u32 chan) 22346deee222SJoao Pinto { 2235a0daae13SJose Abreu u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2236a0daae13SJose Abreu u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 223752a76235SJose Abreu u32 rx_channels_count = priv->plat->rx_queues_to_use; 223852a76235SJose Abreu u32 tx_channels_count = priv->plat->tx_queues_to_use; 22396deee222SJoao Pinto int rxfifosz = priv->plat->rx_fifo_size; 224052a76235SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 22416deee222SJoao Pinto 22426deee222SJoao Pinto if (rxfifosz == 0) 22436deee222SJoao Pinto rxfifosz = priv->dma_cap.rx_fifo_size; 224452a76235SJose Abreu if (txfifosz == 0) 224552a76235SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 224652a76235SJose Abreu 224752a76235SJose Abreu /* Adjust for real per queue fifo size */ 224852a76235SJose Abreu rxfifosz /= rx_channels_count; 224952a76235SJose Abreu txfifosz /= tx_channels_count; 22506deee222SJoao Pinto 2251ab0204e3SJose Abreu stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2252ab0204e3SJose Abreu stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 22536deee222SJoao Pinto } 22546deee222SJoao Pinto 22558bf993a5SJose Abreu static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 22568bf993a5SJose Abreu { 225763a550fcSJose Abreu int ret; 22588bf993a5SJose Abreu 2259c10d4c82SJose Abreu ret = stmmac_safety_feat_irq_status(priv, priv->dev, 22608bf993a5SJose Abreu priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2261c10d4c82SJose Abreu if (ret && (ret != -EINVAL)) { 22628bf993a5SJose Abreu stmmac_global_err(priv); 2263c10d4c82SJose Abreu return true; 2264c10d4c82SJose Abreu } 2265c10d4c82SJose Abreu 2266c10d4c82SJose Abreu return false; 22678bf993a5SJose Abreu } 22688bf993a5SJose Abreu 22698fce3331SJose Abreu static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) 22708fce3331SJose Abreu { 22718fce3331SJose Abreu int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 22728fce3331SJose Abreu &priv->xstats, chan); 22738fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[chan]; 2274021bd5e3SJose Abreu unsigned long flags; 22758fce3331SJose Abreu 22764ccb4585SJose Abreu if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 22773ba07debSJose Abreu if (napi_schedule_prep(&ch->rx_napi)) { 2278021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2279021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2280021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 22811f02efd1SSeb Laveze __napi_schedule(&ch->rx_napi); 22823ba07debSJose Abreu } 22834ccb4585SJose Abreu } 22844ccb4585SJose Abreu 2285021bd5e3SJose Abreu if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2286021bd5e3SJose Abreu if (napi_schedule_prep(&ch->tx_napi)) { 2287021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2288021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2289021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 22901f02efd1SSeb Laveze __napi_schedule(&ch->tx_napi); 2291021bd5e3SJose Abreu } 2292021bd5e3SJose Abreu } 22938fce3331SJose Abreu 22948fce3331SJose Abreu return status; 22958fce3331SJose Abreu } 22968fce3331SJose Abreu 22976deee222SJoao Pinto /** 2298732fdf0eSGiuseppe CAVALLARO * stmmac_dma_interrupt - DMA ISR 229932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 230032ceabcaSGiuseppe CAVALLARO * Description: this is the DMA ISR. It is called by the main ISR. 2301732fdf0eSGiuseppe CAVALLARO * It calls the dwmac dma routine and schedule poll method in case of some 2302732fdf0eSGiuseppe CAVALLARO * work can be done. 230332ceabcaSGiuseppe CAVALLARO */ 23047ac6653aSJeff Kirsher static void stmmac_dma_interrupt(struct stmmac_priv *priv) 23057ac6653aSJeff Kirsher { 2306d62a107aSJoao Pinto u32 tx_channel_count = priv->plat->tx_queues_to_use; 23075a6a0445SNiklas Cassel u32 rx_channel_count = priv->plat->rx_queues_to_use; 23085a6a0445SNiklas Cassel u32 channels_to_check = tx_channel_count > rx_channel_count ? 23095a6a0445SNiklas Cassel tx_channel_count : rx_channel_count; 2310d62a107aSJoao Pinto u32 chan; 23118ac60ffbSKees Cook int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 23128ac60ffbSKees Cook 23138ac60ffbSKees Cook /* Make sure we never check beyond our status buffer. */ 23148ac60ffbSKees Cook if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 23158ac60ffbSKees Cook channels_to_check = ARRAY_SIZE(status); 231668e5cfafSJoao Pinto 23175a6a0445SNiklas Cassel for (chan = 0; chan < channels_to_check; chan++) 23188fce3331SJose Abreu status[chan] = stmmac_napi_check(priv, chan); 2319d62a107aSJoao Pinto 23205a6a0445SNiklas Cassel for (chan = 0; chan < tx_channel_count; chan++) { 23215a6a0445SNiklas Cassel if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 23227ac6653aSJeff Kirsher /* Try to bump up the dma threshold on this failure */ 2323b2dec116SSonic Zhang if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2324b2dec116SSonic Zhang (tc <= 256)) { 23257ac6653aSJeff Kirsher tc += 64; 2326c405abe2SSonic Zhang if (priv->plat->force_thresh_dma_mode) 2327d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2328d62a107aSJoao Pinto tc, 2329d62a107aSJoao Pinto tc, 2330d62a107aSJoao Pinto chan); 2331c405abe2SSonic Zhang else 2332d62a107aSJoao Pinto stmmac_set_dma_operation_mode(priv, 2333d62a107aSJoao Pinto tc, 2334d62a107aSJoao Pinto SF_DMA_MODE, 2335d62a107aSJoao Pinto chan); 23367ac6653aSJeff Kirsher priv->xstats.threshold = tc; 23377ac6653aSJeff Kirsher } 23385a6a0445SNiklas Cassel } else if (unlikely(status[chan] == tx_hard_error)) { 23394e593262SJoao Pinto stmmac_tx_err(priv, chan); 23407ac6653aSJeff Kirsher } 2341d62a107aSJoao Pinto } 2342d62a107aSJoao Pinto } 23437ac6653aSJeff Kirsher 234432ceabcaSGiuseppe CAVALLARO /** 234532ceabcaSGiuseppe CAVALLARO * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 234632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 234732ceabcaSGiuseppe CAVALLARO * Description: this masks the MMC irq, in fact, the counters are managed in SW. 234832ceabcaSGiuseppe CAVALLARO */ 23491c901a46SGiuseppe CAVALLARO static void stmmac_mmc_setup(struct stmmac_priv *priv) 23501c901a46SGiuseppe CAVALLARO { 23511c901a46SGiuseppe CAVALLARO unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 23521c901a46SGiuseppe CAVALLARO MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 23531c901a46SGiuseppe CAVALLARO 23543b1dd2c5SJose Abreu stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 23554f795b25SGiuseppe CAVALLARO 23564f795b25SGiuseppe CAVALLARO if (priv->dma_cap.rmon) { 23573b1dd2c5SJose Abreu stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 23581c901a46SGiuseppe CAVALLARO memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 23594f795b25SGiuseppe CAVALLARO } else 236038ddc59dSLABBE Corentin netdev_info(priv->dev, "No MAC Management Counters available\n"); 23611c901a46SGiuseppe CAVALLARO } 23621c901a46SGiuseppe CAVALLARO 2363732fdf0eSGiuseppe CAVALLARO /** 2364732fdf0eSGiuseppe CAVALLARO * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 236532ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 236619e30c14SGiuseppe CAVALLARO * Description: 236719e30c14SGiuseppe CAVALLARO * new GMAC chip generations have a new register to indicate the 2368e7434821SGiuseppe CAVALLARO * presence of the optional feature/functions. 236919e30c14SGiuseppe CAVALLARO * This can be also used to override the value passed through the 237019e30c14SGiuseppe CAVALLARO * platform and necessary for old MAC10/100 and GMAC chips. 2371e7434821SGiuseppe CAVALLARO */ 2372e7434821SGiuseppe CAVALLARO static int stmmac_get_hw_features(struct stmmac_priv *priv) 2373e7434821SGiuseppe CAVALLARO { 2374a4e887faSJose Abreu return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2375e7434821SGiuseppe CAVALLARO } 2376e7434821SGiuseppe CAVALLARO 237732ceabcaSGiuseppe CAVALLARO /** 2378732fdf0eSGiuseppe CAVALLARO * stmmac_check_ether_addr - check if the MAC addr is valid 237932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 238032ceabcaSGiuseppe CAVALLARO * Description: 238132ceabcaSGiuseppe CAVALLARO * it is to verify if the MAC address is valid, in case of failures it 238232ceabcaSGiuseppe CAVALLARO * generates a random MAC address 238332ceabcaSGiuseppe CAVALLARO */ 2384bfab27a1SGiuseppe CAVALLARO static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2385bfab27a1SGiuseppe CAVALLARO { 2386bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2387c10d4c82SJose Abreu stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2388bfab27a1SGiuseppe CAVALLARO if (!is_valid_ether_addr(priv->dev->dev_addr)) 2389f2cedb63SDanny Kukawka eth_hw_addr_random(priv->dev); 2390af649352SJisheng Zhang dev_info(priv->device, "device MAC address %pM\n", 2391bfab27a1SGiuseppe CAVALLARO priv->dev->dev_addr); 2392bfab27a1SGiuseppe CAVALLARO } 2393c88460b7SHans de Goede } 2394bfab27a1SGiuseppe CAVALLARO 239532ceabcaSGiuseppe CAVALLARO /** 2396732fdf0eSGiuseppe CAVALLARO * stmmac_init_dma_engine - DMA init. 239732ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 239832ceabcaSGiuseppe CAVALLARO * Description: 239932ceabcaSGiuseppe CAVALLARO * It inits the DMA invoking the specific MAC/GMAC callback. 240032ceabcaSGiuseppe CAVALLARO * Some DMA parameters can be passed from the platform; 240132ceabcaSGiuseppe CAVALLARO * in case of these are not passed a default is kept for the MAC or GMAC. 240232ceabcaSGiuseppe CAVALLARO */ 24030f1f88a8SGiuseppe CAVALLARO static int stmmac_init_dma_engine(struct stmmac_priv *priv) 24040f1f88a8SGiuseppe CAVALLARO { 240547f2a9ceSJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 240647f2a9ceSJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 240724aaed0cSJose Abreu u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 240854139cf3SJoao Pinto struct stmmac_rx_queue *rx_q; 2409ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 241047f2a9ceSJoao Pinto u32 chan = 0; 2411c24602efSGiuseppe CAVALLARO int atds = 0; 2412495db273SGiuseppe Cavallaro int ret = 0; 24130f1f88a8SGiuseppe CAVALLARO 2414a332e2faSNiklas Cassel if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2415a332e2faSNiklas Cassel dev_err(priv->device, "Invalid DMA configuration\n"); 241689ab75bfSNiklas Cassel return -EINVAL; 24170f1f88a8SGiuseppe CAVALLARO } 24180f1f88a8SGiuseppe CAVALLARO 2419c24602efSGiuseppe CAVALLARO if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2420c24602efSGiuseppe CAVALLARO atds = 1; 2421c24602efSGiuseppe CAVALLARO 2422a4e887faSJose Abreu ret = stmmac_reset(priv, priv->ioaddr); 2423495db273SGiuseppe Cavallaro if (ret) { 2424495db273SGiuseppe Cavallaro dev_err(priv->device, "Failed to reset the dma\n"); 2425495db273SGiuseppe Cavallaro return ret; 2426495db273SGiuseppe Cavallaro } 2427495db273SGiuseppe Cavallaro 24287d9e6c5aSJose Abreu /* DMA Configuration */ 24297d9e6c5aSJose Abreu stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 24307d9e6c5aSJose Abreu 24317d9e6c5aSJose Abreu if (priv->plat->axi) 24327d9e6c5aSJose Abreu stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 24337d9e6c5aSJose Abreu 2434af8f3fb7SWeifeng Voon /* DMA CSR Channel configuration */ 2435af8f3fb7SWeifeng Voon for (chan = 0; chan < dma_csr_ch; chan++) 2436af8f3fb7SWeifeng Voon stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2437af8f3fb7SWeifeng Voon 243847f2a9ceSJoao Pinto /* DMA RX Channel Configuration */ 243947f2a9ceSJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) { 244054139cf3SJoao Pinto rx_q = &priv->rx_queue[chan]; 244154139cf3SJoao Pinto 244224aaed0cSJose Abreu stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 244324aaed0cSJose Abreu rx_q->dma_rx_phy, chan); 244447f2a9ceSJoao Pinto 244554139cf3SJoao Pinto rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2446aa042f60SSong, Yoong Siang (priv->dma_rx_size * 2447aa042f60SSong, Yoong Siang sizeof(struct dma_desc)); 2448a4e887faSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2449a4e887faSJose Abreu rx_q->rx_tail_addr, chan); 245047f2a9ceSJoao Pinto } 245147f2a9ceSJoao Pinto 245247f2a9ceSJoao Pinto /* DMA TX Channel Configuration */ 245347f2a9ceSJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) { 2454ce736788SJoao Pinto tx_q = &priv->tx_queue[chan]; 2455ce736788SJoao Pinto 245624aaed0cSJose Abreu stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 245724aaed0cSJose Abreu tx_q->dma_tx_phy, chan); 2458f748be53SAlexandre TORGUE 24590431100bSJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2460a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2461a4e887faSJose Abreu tx_q->tx_tail_addr, chan); 246247f2a9ceSJoao Pinto } 246324aaed0cSJose Abreu 2464495db273SGiuseppe Cavallaro return ret; 24650f1f88a8SGiuseppe CAVALLARO } 24660f1f88a8SGiuseppe CAVALLARO 24678fce3331SJose Abreu static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 24688fce3331SJose Abreu { 24698fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 24708fce3331SJose Abreu 2471d5a05e69SVincent Whitchurch hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), 2472d5a05e69SVincent Whitchurch HRTIMER_MODE_REL); 24738fce3331SJose Abreu } 24748fce3331SJose Abreu 2475bfab27a1SGiuseppe CAVALLARO /** 2476732fdf0eSGiuseppe CAVALLARO * stmmac_tx_timer - mitigation sw timer for tx. 2477d0ea5cbdSJesse Brandeburg * @t: data pointer 24789125cdd1SGiuseppe CAVALLARO * Description: 24799125cdd1SGiuseppe CAVALLARO * This is the timer handler to directly invoke the stmmac_tx_clean. 24809125cdd1SGiuseppe CAVALLARO */ 2481d5a05e69SVincent Whitchurch static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 24829125cdd1SGiuseppe CAVALLARO { 2483d5a05e69SVincent Whitchurch struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 24848fce3331SJose Abreu struct stmmac_priv *priv = tx_q->priv_data; 24858fce3331SJose Abreu struct stmmac_channel *ch; 24869125cdd1SGiuseppe CAVALLARO 24878fce3331SJose Abreu ch = &priv->channel[tx_q->queue_index]; 24888fce3331SJose Abreu 2489021bd5e3SJose Abreu if (likely(napi_schedule_prep(&ch->tx_napi))) { 2490021bd5e3SJose Abreu unsigned long flags; 2491021bd5e3SJose Abreu 2492021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 2493021bd5e3SJose Abreu stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2494021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 24954ccb4585SJose Abreu __napi_schedule(&ch->tx_napi); 2496021bd5e3SJose Abreu } 2497d5a05e69SVincent Whitchurch 2498d5a05e69SVincent Whitchurch return HRTIMER_NORESTART; 24999125cdd1SGiuseppe CAVALLARO } 25009125cdd1SGiuseppe CAVALLARO 25019125cdd1SGiuseppe CAVALLARO /** 2502d429b66eSJose Abreu * stmmac_init_coalesce - init mitigation options. 250332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 25049125cdd1SGiuseppe CAVALLARO * Description: 2505d429b66eSJose Abreu * This inits the coalesce parameters: i.e. timer rate, 25069125cdd1SGiuseppe CAVALLARO * timer handler and default threshold used for enabling the 25079125cdd1SGiuseppe CAVALLARO * interrupt on completion bit. 25089125cdd1SGiuseppe CAVALLARO */ 2509d429b66eSJose Abreu static void stmmac_init_coalesce(struct stmmac_priv *priv) 25109125cdd1SGiuseppe CAVALLARO { 25118fce3331SJose Abreu u32 tx_channel_count = priv->plat->tx_queues_to_use; 25128fce3331SJose Abreu u32 chan; 25138fce3331SJose Abreu 25149125cdd1SGiuseppe CAVALLARO priv->tx_coal_frames = STMMAC_TX_FRAMES; 25159125cdd1SGiuseppe CAVALLARO priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2516d429b66eSJose Abreu priv->rx_coal_frames = STMMAC_RX_FRAMES; 25178fce3331SJose Abreu 25188fce3331SJose Abreu for (chan = 0; chan < tx_channel_count; chan++) { 25198fce3331SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 25208fce3331SJose Abreu 2521d5a05e69SVincent Whitchurch hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2522d5a05e69SVincent Whitchurch tx_q->txtimer.function = stmmac_tx_timer; 25238fce3331SJose Abreu } 25249125cdd1SGiuseppe CAVALLARO } 25259125cdd1SGiuseppe CAVALLARO 25264854ab99SJoao Pinto static void stmmac_set_rings_length(struct stmmac_priv *priv) 25274854ab99SJoao Pinto { 25284854ab99SJoao Pinto u32 rx_channels_count = priv->plat->rx_queues_to_use; 25294854ab99SJoao Pinto u32 tx_channels_count = priv->plat->tx_queues_to_use; 25304854ab99SJoao Pinto u32 chan; 25314854ab99SJoao Pinto 25324854ab99SJoao Pinto /* set TX ring length */ 25334854ab99SJoao Pinto for (chan = 0; chan < tx_channels_count; chan++) 2534a4e887faSJose Abreu stmmac_set_tx_ring_len(priv, priv->ioaddr, 2535aa042f60SSong, Yoong Siang (priv->dma_tx_size - 1), chan); 25364854ab99SJoao Pinto 25374854ab99SJoao Pinto /* set RX ring length */ 25384854ab99SJoao Pinto for (chan = 0; chan < rx_channels_count; chan++) 2539a4e887faSJose Abreu stmmac_set_rx_ring_len(priv, priv->ioaddr, 2540aa042f60SSong, Yoong Siang (priv->dma_rx_size - 1), chan); 25414854ab99SJoao Pinto } 25424854ab99SJoao Pinto 25439125cdd1SGiuseppe CAVALLARO /** 25446a3a7193SJoao Pinto * stmmac_set_tx_queue_weight - Set TX queue weight 25456a3a7193SJoao Pinto * @priv: driver private structure 25466a3a7193SJoao Pinto * Description: It is used for setting TX queues weight 25476a3a7193SJoao Pinto */ 25486a3a7193SJoao Pinto static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 25496a3a7193SJoao Pinto { 25506a3a7193SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 25516a3a7193SJoao Pinto u32 weight; 25526a3a7193SJoao Pinto u32 queue; 25536a3a7193SJoao Pinto 25546a3a7193SJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 25556a3a7193SJoao Pinto weight = priv->plat->tx_queues_cfg[queue].weight; 2556c10d4c82SJose Abreu stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 25576a3a7193SJoao Pinto } 25586a3a7193SJoao Pinto } 25596a3a7193SJoao Pinto 25606a3a7193SJoao Pinto /** 256119d91873SJoao Pinto * stmmac_configure_cbs - Configure CBS in TX queue 256219d91873SJoao Pinto * @priv: driver private structure 256319d91873SJoao Pinto * Description: It is used for configuring CBS in AVB TX queues 256419d91873SJoao Pinto */ 256519d91873SJoao Pinto static void stmmac_configure_cbs(struct stmmac_priv *priv) 256619d91873SJoao Pinto { 256719d91873SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 256819d91873SJoao Pinto u32 mode_to_use; 256919d91873SJoao Pinto u32 queue; 257019d91873SJoao Pinto 257144781fefSJoao Pinto /* queue 0 is reserved for legacy traffic */ 257244781fefSJoao Pinto for (queue = 1; queue < tx_queues_count; queue++) { 257319d91873SJoao Pinto mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 257419d91873SJoao Pinto if (mode_to_use == MTL_QUEUE_DCB) 257519d91873SJoao Pinto continue; 257619d91873SJoao Pinto 2577c10d4c82SJose Abreu stmmac_config_cbs(priv, priv->hw, 257819d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].send_slope, 257919d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].idle_slope, 258019d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].high_credit, 258119d91873SJoao Pinto priv->plat->tx_queues_cfg[queue].low_credit, 258219d91873SJoao Pinto queue); 258319d91873SJoao Pinto } 258419d91873SJoao Pinto } 258519d91873SJoao Pinto 258619d91873SJoao Pinto /** 2587d43042f4SJoao Pinto * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2588d43042f4SJoao Pinto * @priv: driver private structure 2589d43042f4SJoao Pinto * Description: It is used for mapping RX queues to RX dma channels 2590d43042f4SJoao Pinto */ 2591d43042f4SJoao Pinto static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2592d43042f4SJoao Pinto { 2593d43042f4SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 2594d43042f4SJoao Pinto u32 queue; 2595d43042f4SJoao Pinto u32 chan; 2596d43042f4SJoao Pinto 2597d43042f4SJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 2598d43042f4SJoao Pinto chan = priv->plat->rx_queues_cfg[queue].chan; 2599c10d4c82SJose Abreu stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2600d43042f4SJoao Pinto } 2601d43042f4SJoao Pinto } 2602d43042f4SJoao Pinto 2603d43042f4SJoao Pinto /** 2604a8f5102aSJoao Pinto * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2605a8f5102aSJoao Pinto * @priv: driver private structure 2606a8f5102aSJoao Pinto * Description: It is used for configuring the RX Queue Priority 2607a8f5102aSJoao Pinto */ 2608a8f5102aSJoao Pinto static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2609a8f5102aSJoao Pinto { 2610a8f5102aSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 2611a8f5102aSJoao Pinto u32 queue; 2612a8f5102aSJoao Pinto u32 prio; 2613a8f5102aSJoao Pinto 2614a8f5102aSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 2615a8f5102aSJoao Pinto if (!priv->plat->rx_queues_cfg[queue].use_prio) 2616a8f5102aSJoao Pinto continue; 2617a8f5102aSJoao Pinto 2618a8f5102aSJoao Pinto prio = priv->plat->rx_queues_cfg[queue].prio; 2619c10d4c82SJose Abreu stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2620a8f5102aSJoao Pinto } 2621a8f5102aSJoao Pinto } 2622a8f5102aSJoao Pinto 2623a8f5102aSJoao Pinto /** 2624a8f5102aSJoao Pinto * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2625a8f5102aSJoao Pinto * @priv: driver private structure 2626a8f5102aSJoao Pinto * Description: It is used for configuring the TX Queue Priority 2627a8f5102aSJoao Pinto */ 2628a8f5102aSJoao Pinto static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2629a8f5102aSJoao Pinto { 2630a8f5102aSJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 2631a8f5102aSJoao Pinto u32 queue; 2632a8f5102aSJoao Pinto u32 prio; 2633a8f5102aSJoao Pinto 2634a8f5102aSJoao Pinto for (queue = 0; queue < tx_queues_count; queue++) { 2635a8f5102aSJoao Pinto if (!priv->plat->tx_queues_cfg[queue].use_prio) 2636a8f5102aSJoao Pinto continue; 2637a8f5102aSJoao Pinto 2638a8f5102aSJoao Pinto prio = priv->plat->tx_queues_cfg[queue].prio; 2639c10d4c82SJose Abreu stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2640a8f5102aSJoao Pinto } 2641a8f5102aSJoao Pinto } 2642a8f5102aSJoao Pinto 2643a8f5102aSJoao Pinto /** 2644abe80fdcSJoao Pinto * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2645abe80fdcSJoao Pinto * @priv: driver private structure 2646abe80fdcSJoao Pinto * Description: It is used for configuring the RX queue routing 2647abe80fdcSJoao Pinto */ 2648abe80fdcSJoao Pinto static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2649abe80fdcSJoao Pinto { 2650abe80fdcSJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 2651abe80fdcSJoao Pinto u32 queue; 2652abe80fdcSJoao Pinto u8 packet; 2653abe80fdcSJoao Pinto 2654abe80fdcSJoao Pinto for (queue = 0; queue < rx_queues_count; queue++) { 2655abe80fdcSJoao Pinto /* no specific packet type routing specified for the queue */ 2656abe80fdcSJoao Pinto if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2657abe80fdcSJoao Pinto continue; 2658abe80fdcSJoao Pinto 2659abe80fdcSJoao Pinto packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2660c10d4c82SJose Abreu stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2661abe80fdcSJoao Pinto } 2662abe80fdcSJoao Pinto } 2663abe80fdcSJoao Pinto 266476067459SJose Abreu static void stmmac_mac_config_rss(struct stmmac_priv *priv) 266576067459SJose Abreu { 266676067459SJose Abreu if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 266776067459SJose Abreu priv->rss.enable = false; 266876067459SJose Abreu return; 266976067459SJose Abreu } 267076067459SJose Abreu 267176067459SJose Abreu if (priv->dev->features & NETIF_F_RXHASH) 267276067459SJose Abreu priv->rss.enable = true; 267376067459SJose Abreu else 267476067459SJose Abreu priv->rss.enable = false; 267576067459SJose Abreu 267676067459SJose Abreu stmmac_rss_configure(priv, priv->hw, &priv->rss, 267776067459SJose Abreu priv->plat->rx_queues_to_use); 267876067459SJose Abreu } 267976067459SJose Abreu 2680abe80fdcSJoao Pinto /** 2681d0a9c9f9SJoao Pinto * stmmac_mtl_configuration - Configure MTL 2682d0a9c9f9SJoao Pinto * @priv: driver private structure 2683d0a9c9f9SJoao Pinto * Description: It is used for configurring MTL 2684d0a9c9f9SJoao Pinto */ 2685d0a9c9f9SJoao Pinto static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2686d0a9c9f9SJoao Pinto { 2687d0a9c9f9SJoao Pinto u32 rx_queues_count = priv->plat->rx_queues_to_use; 2688d0a9c9f9SJoao Pinto u32 tx_queues_count = priv->plat->tx_queues_to_use; 2689d0a9c9f9SJoao Pinto 2690c10d4c82SJose Abreu if (tx_queues_count > 1) 26916a3a7193SJoao Pinto stmmac_set_tx_queue_weight(priv); 26926a3a7193SJoao Pinto 2693d0a9c9f9SJoao Pinto /* Configure MTL RX algorithms */ 2694c10d4c82SJose Abreu if (rx_queues_count > 1) 2695c10d4c82SJose Abreu stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2696d0a9c9f9SJoao Pinto priv->plat->rx_sched_algorithm); 2697d0a9c9f9SJoao Pinto 2698d0a9c9f9SJoao Pinto /* Configure MTL TX algorithms */ 2699c10d4c82SJose Abreu if (tx_queues_count > 1) 2700c10d4c82SJose Abreu stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2701d0a9c9f9SJoao Pinto priv->plat->tx_sched_algorithm); 2702d0a9c9f9SJoao Pinto 270319d91873SJoao Pinto /* Configure CBS in AVB TX queues */ 2704c10d4c82SJose Abreu if (tx_queues_count > 1) 270519d91873SJoao Pinto stmmac_configure_cbs(priv); 270619d91873SJoao Pinto 2707d43042f4SJoao Pinto /* Map RX MTL to DMA channels */ 2708d43042f4SJoao Pinto stmmac_rx_queue_dma_chan_map(priv); 2709d43042f4SJoao Pinto 2710d0a9c9f9SJoao Pinto /* Enable MAC RX Queues */ 2711d0a9c9f9SJoao Pinto stmmac_mac_enable_rx_queues(priv); 27126deee222SJoao Pinto 2713a8f5102aSJoao Pinto /* Set RX priorities */ 2714c10d4c82SJose Abreu if (rx_queues_count > 1) 2715a8f5102aSJoao Pinto stmmac_mac_config_rx_queues_prio(priv); 2716a8f5102aSJoao Pinto 2717a8f5102aSJoao Pinto /* Set TX priorities */ 2718c10d4c82SJose Abreu if (tx_queues_count > 1) 2719a8f5102aSJoao Pinto stmmac_mac_config_tx_queues_prio(priv); 2720abe80fdcSJoao Pinto 2721abe80fdcSJoao Pinto /* Set RX routing */ 2722c10d4c82SJose Abreu if (rx_queues_count > 1) 2723abe80fdcSJoao Pinto stmmac_mac_config_rx_queues_routing(priv); 272476067459SJose Abreu 272576067459SJose Abreu /* Receive Side Scaling */ 272676067459SJose Abreu if (rx_queues_count > 1) 272776067459SJose Abreu stmmac_mac_config_rss(priv); 2728d0a9c9f9SJoao Pinto } 2729d0a9c9f9SJoao Pinto 27308bf993a5SJose Abreu static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 27318bf993a5SJose Abreu { 2732c10d4c82SJose Abreu if (priv->dma_cap.asp) { 27338bf993a5SJose Abreu netdev_info(priv->dev, "Enabling Safety Features\n"); 2734c10d4c82SJose Abreu stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 27358bf993a5SJose Abreu } else { 27368bf993a5SJose Abreu netdev_info(priv->dev, "No Safety Features support found\n"); 27378bf993a5SJose Abreu } 27388bf993a5SJose Abreu } 27398bf993a5SJose Abreu 2740d0a9c9f9SJoao Pinto /** 2741732fdf0eSGiuseppe CAVALLARO * stmmac_hw_setup - setup mac in a usable state. 2742523f11b5SSrinivas Kandagatla * @dev : pointer to the device structure. 2743d0ea5cbdSJesse Brandeburg * @init_ptp: initialize PTP if set 2744523f11b5SSrinivas Kandagatla * Description: 2745732fdf0eSGiuseppe CAVALLARO * this is the main function to setup the HW in a usable state because the 2746732fdf0eSGiuseppe CAVALLARO * dma engine is reset, the core registers are configured (e.g. AXI, 2747732fdf0eSGiuseppe CAVALLARO * Checksum features, timers). The DMA is ready to start receiving and 2748732fdf0eSGiuseppe CAVALLARO * transmitting. 2749523f11b5SSrinivas Kandagatla * Return value: 2750523f11b5SSrinivas Kandagatla * 0 on success and an appropriate (-)ve integer as defined in errno.h 2751523f11b5SSrinivas Kandagatla * file on failure. 2752523f11b5SSrinivas Kandagatla */ 2753fe131929SHuacai Chen static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2754523f11b5SSrinivas Kandagatla { 2755523f11b5SSrinivas Kandagatla struct stmmac_priv *priv = netdev_priv(dev); 27563c55d4d0SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 2757146617b8SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 2758146617b8SJoao Pinto u32 chan; 2759523f11b5SSrinivas Kandagatla int ret; 2760523f11b5SSrinivas Kandagatla 2761523f11b5SSrinivas Kandagatla /* DMA initialization and SW reset */ 2762523f11b5SSrinivas Kandagatla ret = stmmac_init_dma_engine(priv); 2763523f11b5SSrinivas Kandagatla if (ret < 0) { 276438ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 276538ddc59dSLABBE Corentin __func__); 2766523f11b5SSrinivas Kandagatla return ret; 2767523f11b5SSrinivas Kandagatla } 2768523f11b5SSrinivas Kandagatla 2769523f11b5SSrinivas Kandagatla /* Copy the MAC addr into the HW */ 2770c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2771523f11b5SSrinivas Kandagatla 277202e57b9dSGiuseppe CAVALLARO /* PS and related bits will be programmed according to the speed */ 277302e57b9dSGiuseppe CAVALLARO if (priv->hw->pcs) { 277402e57b9dSGiuseppe CAVALLARO int speed = priv->plat->mac_port_sel_speed; 277502e57b9dSGiuseppe CAVALLARO 277602e57b9dSGiuseppe CAVALLARO if ((speed == SPEED_10) || (speed == SPEED_100) || 277702e57b9dSGiuseppe CAVALLARO (speed == SPEED_1000)) { 277802e57b9dSGiuseppe CAVALLARO priv->hw->ps = speed; 277902e57b9dSGiuseppe CAVALLARO } else { 278002e57b9dSGiuseppe CAVALLARO dev_warn(priv->device, "invalid port speed\n"); 278102e57b9dSGiuseppe CAVALLARO priv->hw->ps = 0; 278202e57b9dSGiuseppe CAVALLARO } 278302e57b9dSGiuseppe CAVALLARO } 278402e57b9dSGiuseppe CAVALLARO 2785523f11b5SSrinivas Kandagatla /* Initialize the MAC Core */ 2786c10d4c82SJose Abreu stmmac_core_init(priv, priv->hw, dev); 2787523f11b5SSrinivas Kandagatla 2788d0a9c9f9SJoao Pinto /* Initialize MTL*/ 2789d0a9c9f9SJoao Pinto stmmac_mtl_configuration(priv); 27909eb12474Sjpinto 27918bf993a5SJose Abreu /* Initialize Safety Features */ 27928bf993a5SJose Abreu stmmac_safety_feat_configuration(priv); 27938bf993a5SJose Abreu 2794c10d4c82SJose Abreu ret = stmmac_rx_ipc(priv, priv->hw); 2795978aded4SGiuseppe CAVALLARO if (!ret) { 279638ddc59dSLABBE Corentin netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2797978aded4SGiuseppe CAVALLARO priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2798d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 2799978aded4SGiuseppe CAVALLARO } 2800978aded4SGiuseppe CAVALLARO 2801523f11b5SSrinivas Kandagatla /* Enable the MAC Rx/Tx */ 2802c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, true); 2803523f11b5SSrinivas Kandagatla 2804b4f0a661SJoao Pinto /* Set the HW DMA mode and the COE */ 2805b4f0a661SJoao Pinto stmmac_dma_operation_mode(priv); 2806b4f0a661SJoao Pinto 2807523f11b5SSrinivas Kandagatla stmmac_mmc_setup(priv); 2808523f11b5SSrinivas Kandagatla 2809fe131929SHuacai Chen if (init_ptp) { 28100ad2be79SThierry Reding ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 28110ad2be79SThierry Reding if (ret < 0) 28120ad2be79SThierry Reding netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 28130ad2be79SThierry Reding 2814523f11b5SSrinivas Kandagatla ret = stmmac_init_ptp(priv); 2815722eef28SHeiner Kallweit if (ret == -EOPNOTSUPP) 2816722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP not supported by HW\n"); 2817722eef28SHeiner Kallweit else if (ret) 2818722eef28SHeiner Kallweit netdev_warn(priv->dev, "PTP init failed\n"); 2819fe131929SHuacai Chen } 2820523f11b5SSrinivas Kandagatla 2821388e201dSVineetha G. Jaya Kumaran priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 2822388e201dSVineetha G. Jaya Kumaran 2823388e201dSVineetha G. Jaya Kumaran /* Convert the timer from msec to usec */ 2824388e201dSVineetha G. Jaya Kumaran if (!priv->tx_lpi_timer) 2825388e201dSVineetha G. Jaya Kumaran priv->tx_lpi_timer = eee_timer * 1000; 2826523f11b5SSrinivas Kandagatla 2827a4e887faSJose Abreu if (priv->use_riwt) { 28284e4337ccSJose Abreu if (!priv->rx_riwt) 28294e4337ccSJose Abreu priv->rx_riwt = DEF_DMA_RIWT; 28304e4337ccSJose Abreu 28314e4337ccSJose Abreu ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); 2832523f11b5SSrinivas Kandagatla } 2833523f11b5SSrinivas Kandagatla 2834c10d4c82SJose Abreu if (priv->hw->pcs) 2835c9ad4c10SBen Dooks (Codethink) stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 2836523f11b5SSrinivas Kandagatla 28374854ab99SJoao Pinto /* set TX and RX rings length */ 28384854ab99SJoao Pinto stmmac_set_rings_length(priv); 28394854ab99SJoao Pinto 2840f748be53SAlexandre TORGUE /* Enable TSO */ 2841146617b8SJoao Pinto if (priv->tso) { 2842146617b8SJoao Pinto for (chan = 0; chan < tx_cnt; chan++) 2843a4e887faSJose Abreu stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2844146617b8SJoao Pinto } 2845f748be53SAlexandre TORGUE 284667afd6d1SJose Abreu /* Enable Split Header */ 284767afd6d1SJose Abreu if (priv->sph && priv->hw->rx_csum) { 284867afd6d1SJose Abreu for (chan = 0; chan < rx_cnt; chan++) 284967afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, 1, chan); 285067afd6d1SJose Abreu } 285167afd6d1SJose Abreu 285230d93227SJose Abreu /* VLAN Tag Insertion */ 285330d93227SJose Abreu if (priv->dma_cap.vlins) 285430d93227SJose Abreu stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 285530d93227SJose Abreu 2856579a25a8SJose Abreu /* TBS */ 2857579a25a8SJose Abreu for (chan = 0; chan < tx_cnt; chan++) { 2858579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2859579a25a8SJose Abreu int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 2860579a25a8SJose Abreu 2861579a25a8SJose Abreu stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 2862579a25a8SJose Abreu } 2863579a25a8SJose Abreu 2864686cff3dSAashish Verma /* Configure real RX and TX queues */ 2865686cff3dSAashish Verma netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 2866686cff3dSAashish Verma netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 2867686cff3dSAashish Verma 28687d9e6c5aSJose Abreu /* Start the ball rolling... */ 28697d9e6c5aSJose Abreu stmmac_start_all_dma(priv); 28707d9e6c5aSJose Abreu 2871523f11b5SSrinivas Kandagatla return 0; 2872523f11b5SSrinivas Kandagatla } 2873523f11b5SSrinivas Kandagatla 2874c66f6c37SThierry Reding static void stmmac_hw_teardown(struct net_device *dev) 2875c66f6c37SThierry Reding { 2876c66f6c37SThierry Reding struct stmmac_priv *priv = netdev_priv(dev); 2877c66f6c37SThierry Reding 2878c66f6c37SThierry Reding clk_disable_unprepare(priv->plat->clk_ptp_ref); 2879c66f6c37SThierry Reding } 2880c66f6c37SThierry Reding 2881523f11b5SSrinivas Kandagatla /** 28827ac6653aSJeff Kirsher * stmmac_open - open entry point of the driver 28837ac6653aSJeff Kirsher * @dev : pointer to the device structure. 28847ac6653aSJeff Kirsher * Description: 28857ac6653aSJeff Kirsher * This function is the open entry point of the driver. 28867ac6653aSJeff Kirsher * Return value: 28877ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 28887ac6653aSJeff Kirsher * file on failure. 28897ac6653aSJeff Kirsher */ 28907ac6653aSJeff Kirsher static int stmmac_open(struct net_device *dev) 28917ac6653aSJeff Kirsher { 28927ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 28935d626c87SJose Abreu int bfsize = 0; 28948fce3331SJose Abreu u32 chan; 28957ac6653aSJeff Kirsher int ret; 28967ac6653aSJeff Kirsher 2897a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 2898f213bbe8SJose Abreu priv->hw->pcs != STMMAC_PCS_RTBI && 2899f213bbe8SJose Abreu priv->hw->xpcs == NULL) { 29007ac6653aSJeff Kirsher ret = stmmac_init_phy(dev); 2901e58bb43fSGiuseppe CAVALLARO if (ret) { 290238ddc59dSLABBE Corentin netdev_err(priv->dev, 290338ddc59dSLABBE Corentin "%s: Cannot attach to PHY (error: %d)\n", 2904e58bb43fSGiuseppe CAVALLARO __func__, ret); 290589df20d9SHans de Goede return ret; 29067ac6653aSJeff Kirsher } 2907e58bb43fSGiuseppe CAVALLARO } 29087ac6653aSJeff Kirsher 2909523f11b5SSrinivas Kandagatla /* Extra statistics */ 2910523f11b5SSrinivas Kandagatla memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2911523f11b5SSrinivas Kandagatla priv->xstats.threshold = tc; 2912523f11b5SSrinivas Kandagatla 29135d626c87SJose Abreu bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 29145d626c87SJose Abreu if (bfsize < 0) 29155d626c87SJose Abreu bfsize = 0; 29165d626c87SJose Abreu 29175d626c87SJose Abreu if (bfsize < BUF_SIZE_16KiB) 29185d626c87SJose Abreu bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 29195d626c87SJose Abreu 29205d626c87SJose Abreu priv->dma_buf_sz = bfsize; 29215d626c87SJose Abreu buf_sz = bfsize; 29225d626c87SJose Abreu 292322ad3838SGiuseppe Cavallaro priv->rx_copybreak = STMMAC_RX_COPYBREAK; 292456329137SBartlomiej Zolnierkiewicz 2925aa042f60SSong, Yoong Siang if (!priv->dma_tx_size) 2926aa042f60SSong, Yoong Siang priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 2927aa042f60SSong, Yoong Siang if (!priv->dma_rx_size) 2928aa042f60SSong, Yoong Siang priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 2929aa042f60SSong, Yoong Siang 2930579a25a8SJose Abreu /* Earlier check for TBS */ 2931579a25a8SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 2932579a25a8SJose Abreu struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2933579a25a8SJose Abreu int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 2934579a25a8SJose Abreu 2935579a25a8SJose Abreu tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 2936579a25a8SJose Abreu if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) 2937579a25a8SJose Abreu tx_q->tbs &= ~STMMAC_TBS_AVAIL; 2938579a25a8SJose Abreu } 2939579a25a8SJose Abreu 29405bacd778SLABBE Corentin ret = alloc_dma_desc_resources(priv); 29415bacd778SLABBE Corentin if (ret < 0) { 29425bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 29435bacd778SLABBE Corentin __func__); 29445bacd778SLABBE Corentin goto dma_desc_error; 29455bacd778SLABBE Corentin } 29465bacd778SLABBE Corentin 29475bacd778SLABBE Corentin ret = init_dma_desc_rings(dev, GFP_KERNEL); 29485bacd778SLABBE Corentin if (ret < 0) { 29495bacd778SLABBE Corentin netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 29505bacd778SLABBE Corentin __func__); 29515bacd778SLABBE Corentin goto init_error; 29525bacd778SLABBE Corentin } 29535bacd778SLABBE Corentin 2954fe131929SHuacai Chen ret = stmmac_hw_setup(dev, true); 295556329137SBartlomiej Zolnierkiewicz if (ret < 0) { 295638ddc59dSLABBE Corentin netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 2957c9324d18SGiuseppe CAVALLARO goto init_error; 29587ac6653aSJeff Kirsher } 29597ac6653aSJeff Kirsher 2960d429b66eSJose Abreu stmmac_init_coalesce(priv); 2961777da230SGiuseppe CAVALLARO 296274371272SJose Abreu phylink_start(priv->phylink); 296377b28983SJisheng Zhang /* We may have called phylink_speed_down before */ 296477b28983SJisheng Zhang phylink_speed_up(priv->phylink); 29657ac6653aSJeff Kirsher 29667ac6653aSJeff Kirsher /* Request the IRQ lines */ 29677ac6653aSJeff Kirsher ret = request_irq(dev->irq, stmmac_interrupt, 29687ac6653aSJeff Kirsher IRQF_SHARED, dev->name, dev); 29697ac6653aSJeff Kirsher if (unlikely(ret < 0)) { 297038ddc59dSLABBE Corentin netdev_err(priv->dev, 297138ddc59dSLABBE Corentin "%s: ERROR: allocating the IRQ %d (error: %d)\n", 29727ac6653aSJeff Kirsher __func__, dev->irq, ret); 29736c1e5abeSThierry Reding goto irq_error; 29747ac6653aSJeff Kirsher } 29757ac6653aSJeff Kirsher 29767a13f8f5SFrancesco Virlinzi /* Request the Wake IRQ in case of another line is used for WoL */ 29777a13f8f5SFrancesco Virlinzi if (priv->wol_irq != dev->irq) { 29787a13f8f5SFrancesco Virlinzi ret = request_irq(priv->wol_irq, stmmac_interrupt, 29797a13f8f5SFrancesco Virlinzi IRQF_SHARED, dev->name, dev); 29807a13f8f5SFrancesco Virlinzi if (unlikely(ret < 0)) { 298138ddc59dSLABBE Corentin netdev_err(priv->dev, 298238ddc59dSLABBE Corentin "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 2983ceb69499SGiuseppe CAVALLARO __func__, priv->wol_irq, ret); 2984c9324d18SGiuseppe CAVALLARO goto wolirq_error; 29857a13f8f5SFrancesco Virlinzi } 29867a13f8f5SFrancesco Virlinzi } 29877a13f8f5SFrancesco Virlinzi 2988d765955dSGiuseppe CAVALLARO /* Request the IRQ lines */ 2989d7ec8584SChen-Yu Tsai if (priv->lpi_irq > 0) { 2990d765955dSGiuseppe CAVALLARO ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 2991d765955dSGiuseppe CAVALLARO dev->name, dev); 2992d765955dSGiuseppe CAVALLARO if (unlikely(ret < 0)) { 299338ddc59dSLABBE Corentin netdev_err(priv->dev, 299438ddc59dSLABBE Corentin "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 2995d765955dSGiuseppe CAVALLARO __func__, priv->lpi_irq, ret); 2996c9324d18SGiuseppe CAVALLARO goto lpiirq_error; 2997d765955dSGiuseppe CAVALLARO } 2998d765955dSGiuseppe CAVALLARO } 2999d765955dSGiuseppe CAVALLARO 3000c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 30019f19306dSOng Boon Leong netif_tx_start_all_queues(priv->dev); 30027ac6653aSJeff Kirsher 30037ac6653aSJeff Kirsher return 0; 30047ac6653aSJeff Kirsher 3005c9324d18SGiuseppe CAVALLARO lpiirq_error: 3006d765955dSGiuseppe CAVALLARO if (priv->wol_irq != dev->irq) 3007d765955dSGiuseppe CAVALLARO free_irq(priv->wol_irq, dev); 3008c9324d18SGiuseppe CAVALLARO wolirq_error: 30097a13f8f5SFrancesco Virlinzi free_irq(dev->irq, dev); 30106c1e5abeSThierry Reding irq_error: 301174371272SJose Abreu phylink_stop(priv->phylink); 30127a13f8f5SFrancesco Virlinzi 30138fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3014d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 30158fce3331SJose Abreu 3016c66f6c37SThierry Reding stmmac_hw_teardown(dev); 3017c9324d18SGiuseppe CAVALLARO init_error: 3018c9324d18SGiuseppe CAVALLARO free_dma_desc_resources(priv); 30195bacd778SLABBE Corentin dma_desc_error: 302074371272SJose Abreu phylink_disconnect_phy(priv->phylink); 30217ac6653aSJeff Kirsher return ret; 30227ac6653aSJeff Kirsher } 30237ac6653aSJeff Kirsher 30247ac6653aSJeff Kirsher /** 30257ac6653aSJeff Kirsher * stmmac_release - close entry point of the driver 30267ac6653aSJeff Kirsher * @dev : device pointer. 30277ac6653aSJeff Kirsher * Description: 30287ac6653aSJeff Kirsher * This is the stop entry point of the driver. 30297ac6653aSJeff Kirsher */ 30307ac6653aSJeff Kirsher static int stmmac_release(struct net_device *dev) 30317ac6653aSJeff Kirsher { 30327ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 30338fce3331SJose Abreu u32 chan; 30347ac6653aSJeff Kirsher 303577b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 303677b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 30377ac6653aSJeff Kirsher /* Stop and disconnect the PHY */ 303874371272SJose Abreu phylink_stop(priv->phylink); 303974371272SJose Abreu phylink_disconnect_phy(priv->phylink); 30407ac6653aSJeff Kirsher 3041c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 30427ac6653aSJeff Kirsher 30438fce3331SJose Abreu for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3044d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 30459125cdd1SGiuseppe CAVALLARO 30467ac6653aSJeff Kirsher /* Free the IRQ lines */ 30477ac6653aSJeff Kirsher free_irq(dev->irq, dev); 30487a13f8f5SFrancesco Virlinzi if (priv->wol_irq != dev->irq) 30497a13f8f5SFrancesco Virlinzi free_irq(priv->wol_irq, dev); 3050d7ec8584SChen-Yu Tsai if (priv->lpi_irq > 0) 3051d765955dSGiuseppe CAVALLARO free_irq(priv->lpi_irq, dev); 30527ac6653aSJeff Kirsher 30535f585913SFugang Duan if (priv->eee_enabled) { 30545f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 30555f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 30565f585913SFugang Duan } 30575f585913SFugang Duan 30587ac6653aSJeff Kirsher /* Stop TX/RX DMA and clear the descriptors */ 3059ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 30607ac6653aSJeff Kirsher 30617ac6653aSJeff Kirsher /* Release and free the Rx/Tx resources */ 30627ac6653aSJeff Kirsher free_dma_desc_resources(priv); 30637ac6653aSJeff Kirsher 30647ac6653aSJeff Kirsher /* Disable the MAC Rx/Tx */ 3065c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 30667ac6653aSJeff Kirsher 30677ac6653aSJeff Kirsher netif_carrier_off(dev); 30687ac6653aSJeff Kirsher 306992ba6888SRayagond Kokatanur stmmac_release_ptp(priv); 307092ba6888SRayagond Kokatanur 30717ac6653aSJeff Kirsher return 0; 30727ac6653aSJeff Kirsher } 30737ac6653aSJeff Kirsher 307430d93227SJose Abreu static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 307530d93227SJose Abreu struct stmmac_tx_queue *tx_q) 307630d93227SJose Abreu { 307730d93227SJose Abreu u16 tag = 0x0, inner_tag = 0x0; 307830d93227SJose Abreu u32 inner_type = 0x0; 307930d93227SJose Abreu struct dma_desc *p; 308030d93227SJose Abreu 308130d93227SJose Abreu if (!priv->dma_cap.vlins) 308230d93227SJose Abreu return false; 308330d93227SJose Abreu if (!skb_vlan_tag_present(skb)) 308430d93227SJose Abreu return false; 308530d93227SJose Abreu if (skb->vlan_proto == htons(ETH_P_8021AD)) { 308630d93227SJose Abreu inner_tag = skb_vlan_tag_get(skb); 308730d93227SJose Abreu inner_type = STMMAC_VLAN_INSERT; 308830d93227SJose Abreu } 308930d93227SJose Abreu 309030d93227SJose Abreu tag = skb_vlan_tag_get(skb); 309130d93227SJose Abreu 3092579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3093579a25a8SJose Abreu p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3094579a25a8SJose Abreu else 3095579a25a8SJose Abreu p = &tx_q->dma_tx[tx_q->cur_tx]; 3096579a25a8SJose Abreu 309730d93227SJose Abreu if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 309830d93227SJose Abreu return false; 309930d93227SJose Abreu 310030d93227SJose Abreu stmmac_set_tx_owner(priv, p); 3101aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 310230d93227SJose Abreu return true; 310330d93227SJose Abreu } 310430d93227SJose Abreu 31057ac6653aSJeff Kirsher /** 3106f748be53SAlexandre TORGUE * stmmac_tso_allocator - close entry point of the driver 3107f748be53SAlexandre TORGUE * @priv: driver private structure 3108f748be53SAlexandre TORGUE * @des: buffer start address 3109f748be53SAlexandre TORGUE * @total_len: total length to fill in descriptors 3110d0ea5cbdSJesse Brandeburg * @last_segment: condition for the last descriptor 3111ce736788SJoao Pinto * @queue: TX queue index 3112f748be53SAlexandre TORGUE * Description: 3113f748be53SAlexandre TORGUE * This function fills descriptor and request new descriptors according to 3114f748be53SAlexandre TORGUE * buffer length to fill 3115f748be53SAlexandre TORGUE */ 3116a993db88SJose Abreu static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3117ce736788SJoao Pinto int total_len, bool last_segment, u32 queue) 3118f748be53SAlexandre TORGUE { 3119ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3120f748be53SAlexandre TORGUE struct dma_desc *desc; 31215bacd778SLABBE Corentin u32 buff_size; 3122ce736788SJoao Pinto int tmp_len; 3123f748be53SAlexandre TORGUE 3124f748be53SAlexandre TORGUE tmp_len = total_len; 3125f748be53SAlexandre TORGUE 3126f748be53SAlexandre TORGUE while (tmp_len > 0) { 3127a993db88SJose Abreu dma_addr_t curr_addr; 3128a993db88SJose Abreu 3129aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3130aa042f60SSong, Yoong Siang priv->dma_tx_size); 3131b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3132579a25a8SJose Abreu 3133579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3134579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3135579a25a8SJose Abreu else 3136579a25a8SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 3137f748be53SAlexandre TORGUE 3138a993db88SJose Abreu curr_addr = des + (total_len - tmp_len); 3139a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) 3140a993db88SJose Abreu desc->des0 = cpu_to_le32(curr_addr); 3141a993db88SJose Abreu else 3142a993db88SJose Abreu stmmac_set_desc_addr(priv, desc, curr_addr); 3143a993db88SJose Abreu 3144f748be53SAlexandre TORGUE buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3145f748be53SAlexandre TORGUE TSO_MAX_BUFF_SIZE : tmp_len; 3146f748be53SAlexandre TORGUE 314742de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3148f748be53SAlexandre TORGUE 0, 1, 3149426849e6SNiklas Cassel (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3150f748be53SAlexandre TORGUE 0, 0); 3151f748be53SAlexandre TORGUE 3152f748be53SAlexandre TORGUE tmp_len -= TSO_MAX_BUFF_SIZE; 3153f748be53SAlexandre TORGUE } 3154f748be53SAlexandre TORGUE } 3155f748be53SAlexandre TORGUE 3156f748be53SAlexandre TORGUE /** 3157f748be53SAlexandre TORGUE * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3158f748be53SAlexandre TORGUE * @skb : the socket buffer 3159f748be53SAlexandre TORGUE * @dev : device pointer 3160f748be53SAlexandre TORGUE * Description: this is the transmit function that is called on TSO frames 3161f748be53SAlexandre TORGUE * (support available on GMAC4 and newer chips). 3162f748be53SAlexandre TORGUE * Diagram below show the ring programming in case of TSO frames: 3163f748be53SAlexandre TORGUE * 3164f748be53SAlexandre TORGUE * First Descriptor 3165f748be53SAlexandre TORGUE * -------- 3166f748be53SAlexandre TORGUE * | DES0 |---> buffer1 = L2/L3/L4 header 3167f748be53SAlexandre TORGUE * | DES1 |---> TCP Payload (can continue on next descr...) 3168f748be53SAlexandre TORGUE * | DES2 |---> buffer 1 and 2 len 3169f748be53SAlexandre TORGUE * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3170f748be53SAlexandre TORGUE * -------- 3171f748be53SAlexandre TORGUE * | 3172f748be53SAlexandre TORGUE * ... 3173f748be53SAlexandre TORGUE * | 3174f748be53SAlexandre TORGUE * -------- 3175f748be53SAlexandre TORGUE * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3176f748be53SAlexandre TORGUE * | DES1 | --| 3177f748be53SAlexandre TORGUE * | DES2 | --> buffer 1 and 2 len 3178f748be53SAlexandre TORGUE * | DES3 | 3179f748be53SAlexandre TORGUE * -------- 3180f748be53SAlexandre TORGUE * 3181f748be53SAlexandre TORGUE * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3182f748be53SAlexandre TORGUE */ 3183f748be53SAlexandre TORGUE static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3184f748be53SAlexandre TORGUE { 3185ce736788SJoao Pinto struct dma_desc *desc, *first, *mss_desc = NULL; 3186f748be53SAlexandre TORGUE struct stmmac_priv *priv = netdev_priv(dev); 3187579a25a8SJose Abreu int desc_size, tmp_pay_len = 0, first_tx; 3188f748be53SAlexandre TORGUE int nfrags = skb_shinfo(skb)->nr_frags; 3189ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 3190c2837423SJose Abreu unsigned int first_entry, tx_packets; 3191ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 3192c2837423SJose Abreu bool has_vlan, set_ic; 3193579a25a8SJose Abreu u8 proto_hdr_len, hdr; 3194ce736788SJoao Pinto u32 pay_len, mss; 3195a993db88SJose Abreu dma_addr_t des; 3196f748be53SAlexandre TORGUE int i; 3197f748be53SAlexandre TORGUE 3198ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 3199c2837423SJose Abreu first_tx = tx_q->cur_tx; 3200ce736788SJoao Pinto 3201f748be53SAlexandre TORGUE /* Compute header lengths */ 3202b7766206SJose Abreu if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3203b7766206SJose Abreu proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3204b7766206SJose Abreu hdr = sizeof(struct udphdr); 3205b7766206SJose Abreu } else { 3206f748be53SAlexandre TORGUE proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3207b7766206SJose Abreu hdr = tcp_hdrlen(skb); 3208b7766206SJose Abreu } 3209f748be53SAlexandre TORGUE 3210f748be53SAlexandre TORGUE /* Desc availability based on threshold should be enough safe */ 3211ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < 3212f748be53SAlexandre TORGUE (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3213c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3214c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3215c22a3f48SJoao Pinto queue)); 3216f748be53SAlexandre TORGUE /* This is a hard error, log it. */ 321738ddc59dSLABBE Corentin netdev_err(priv->dev, 321838ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 321938ddc59dSLABBE Corentin __func__); 3220f748be53SAlexandre TORGUE } 3221f748be53SAlexandre TORGUE return NETDEV_TX_BUSY; 3222f748be53SAlexandre TORGUE } 3223f748be53SAlexandre TORGUE 3224f748be53SAlexandre TORGUE pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3225f748be53SAlexandre TORGUE 3226f748be53SAlexandre TORGUE mss = skb_shinfo(skb)->gso_size; 3227f748be53SAlexandre TORGUE 3228f748be53SAlexandre TORGUE /* set new MSS value if needed */ 32298d212a9eSNiklas Cassel if (mss != tx_q->mss) { 3230579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3231579a25a8SJose Abreu mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3232579a25a8SJose Abreu else 3233579a25a8SJose Abreu mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3234579a25a8SJose Abreu 323542de047dSJose Abreu stmmac_set_mss(priv, mss_desc, mss); 32368d212a9eSNiklas Cassel tx_q->mss = mss; 3237aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3238aa042f60SSong, Yoong Siang priv->dma_tx_size); 3239b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3240f748be53SAlexandre TORGUE } 3241f748be53SAlexandre TORGUE 3242f748be53SAlexandre TORGUE if (netif_msg_tx_queued(priv)) { 3243b7766206SJose Abreu pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 3244b7766206SJose Abreu __func__, hdr, proto_hdr_len, pay_len, mss); 3245f748be53SAlexandre TORGUE pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 3246f748be53SAlexandre TORGUE skb->data_len); 3247f748be53SAlexandre TORGUE } 3248f748be53SAlexandre TORGUE 324930d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 325030d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 325130d93227SJose Abreu 3252ce736788SJoao Pinto first_entry = tx_q->cur_tx; 3253b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 3254f748be53SAlexandre TORGUE 3255579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3256579a25a8SJose Abreu desc = &tx_q->dma_entx[first_entry].basic; 3257579a25a8SJose Abreu else 3258579a25a8SJose Abreu desc = &tx_q->dma_tx[first_entry]; 3259f748be53SAlexandre TORGUE first = desc; 3260f748be53SAlexandre TORGUE 326130d93227SJose Abreu if (has_vlan) 326230d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 326330d93227SJose Abreu 3264f748be53SAlexandre TORGUE /* first descriptor: fill Headers on Buf1 */ 3265f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 3266f748be53SAlexandre TORGUE DMA_TO_DEVICE); 3267f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 3268f748be53SAlexandre TORGUE goto dma_map_err; 3269f748be53SAlexandre TORGUE 3270ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 3271ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 3272f748be53SAlexandre TORGUE 3273a993db88SJose Abreu if (priv->dma_cap.addr64 <= 32) { 3274f8be0d78SMichael Weiser first->des0 = cpu_to_le32(des); 3275f748be53SAlexandre TORGUE 3276f748be53SAlexandre TORGUE /* Fill start of payload in buff2 of first descriptor */ 3277f748be53SAlexandre TORGUE if (pay_len) 3278f8be0d78SMichael Weiser first->des1 = cpu_to_le32(des + proto_hdr_len); 3279f748be53SAlexandre TORGUE 3280f748be53SAlexandre TORGUE /* If needed take extra descriptors to fill the remaining payload */ 3281f748be53SAlexandre TORGUE tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 3282a993db88SJose Abreu } else { 3283a993db88SJose Abreu stmmac_set_desc_addr(priv, first, des); 3284a993db88SJose Abreu tmp_pay_len = pay_len; 328534c15202Syuqi jin des += proto_hdr_len; 3286b2f07199SJose Abreu pay_len = 0; 3287a993db88SJose Abreu } 3288f748be53SAlexandre TORGUE 3289ce736788SJoao Pinto stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3290f748be53SAlexandre TORGUE 3291f748be53SAlexandre TORGUE /* Prepare fragments */ 3292f748be53SAlexandre TORGUE for (i = 0; i < nfrags; i++) { 3293f748be53SAlexandre TORGUE const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3294f748be53SAlexandre TORGUE 3295f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, 3296f748be53SAlexandre TORGUE skb_frag_size(frag), 3297f748be53SAlexandre TORGUE DMA_TO_DEVICE); 3298937071c1SThierry Reding if (dma_mapping_error(priv->device, des)) 3299937071c1SThierry Reding goto dma_map_err; 3300f748be53SAlexandre TORGUE 3301f748be53SAlexandre TORGUE stmmac_tso_allocator(priv, des, skb_frag_size(frag), 3302ce736788SJoao Pinto (i == nfrags - 1), queue); 3303f748be53SAlexandre TORGUE 3304ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 3305ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 3306ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 3307f748be53SAlexandre TORGUE } 3308f748be53SAlexandre TORGUE 3309ce736788SJoao Pinto tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 3310f748be53SAlexandre TORGUE 331105cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 331205cf0d1bSNiklas Cassel tx_q->tx_skbuff[tx_q->cur_tx] = skb; 331305cf0d1bSNiklas Cassel 33147df4a3a7SJose Abreu /* Manage tx mitigation */ 3315c2837423SJose Abreu tx_packets = (tx_q->cur_tx + 1) - first_tx; 3316c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 3317c2837423SJose Abreu 3318c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3319c2837423SJose Abreu set_ic = true; 3320c2837423SJose Abreu else if (!priv->tx_coal_frames) 3321c2837423SJose Abreu set_ic = false; 3322c2837423SJose Abreu else if (tx_packets > priv->tx_coal_frames) 3323c2837423SJose Abreu set_ic = true; 3324c2837423SJose Abreu else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) 3325c2837423SJose Abreu set_ic = true; 3326c2837423SJose Abreu else 3327c2837423SJose Abreu set_ic = false; 3328c2837423SJose Abreu 3329c2837423SJose Abreu if (set_ic) { 3330579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3331579a25a8SJose Abreu desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3332579a25a8SJose Abreu else 33337df4a3a7SJose Abreu desc = &tx_q->dma_tx[tx_q->cur_tx]; 3334579a25a8SJose Abreu 33357df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 33367df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 33377df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 33387df4a3a7SJose Abreu } 33397df4a3a7SJose Abreu 334005cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 334105cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 334205cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 334305cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 334405cf0d1bSNiklas Cassel */ 3345aa042f60SSong, Yoong Siang tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3346f748be53SAlexandre TORGUE 3347ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3348b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 334938ddc59dSLABBE Corentin __func__); 3350c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3351f748be53SAlexandre TORGUE } 3352f748be53SAlexandre TORGUE 3353f748be53SAlexandre TORGUE dev->stats.tx_bytes += skb->len; 3354f748be53SAlexandre TORGUE priv->xstats.tx_tso_frames++; 3355f748be53SAlexandre TORGUE priv->xstats.tx_tso_nfrags += nfrags; 3356f748be53SAlexandre TORGUE 33578000ddc0SJose Abreu if (priv->sarc_type) 33588000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 33598000ddc0SJose Abreu 3360f748be53SAlexandre TORGUE skb_tx_timestamp(skb); 3361f748be53SAlexandre TORGUE 3362f748be53SAlexandre TORGUE if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3363f748be53SAlexandre TORGUE priv->hwts_tx_en)) { 3364f748be53SAlexandre TORGUE /* declare that device is doing timestamping */ 3365f748be53SAlexandre TORGUE skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 336642de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 3367f748be53SAlexandre TORGUE } 3368f748be53SAlexandre TORGUE 3369f748be53SAlexandre TORGUE /* Complete the first descriptor before granting the DMA */ 337042de047dSJose Abreu stmmac_prepare_tso_tx_desc(priv, first, 1, 3371f748be53SAlexandre TORGUE proto_hdr_len, 3372f748be53SAlexandre TORGUE pay_len, 3373ce736788SJoao Pinto 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 3374b7766206SJose Abreu hdr / 4, (skb->len - proto_hdr_len)); 3375f748be53SAlexandre TORGUE 3376f748be53SAlexandre TORGUE /* If context desc is used to change MSS */ 337715d2ee42SNiklas Cassel if (mss_desc) { 337815d2ee42SNiklas Cassel /* Make sure that first descriptor has been completely 337915d2ee42SNiklas Cassel * written, including its own bit. This is because MSS is 338015d2ee42SNiklas Cassel * actually before first descriptor, so we need to make 338115d2ee42SNiklas Cassel * sure that MSS's own bit is the last thing written. 338215d2ee42SNiklas Cassel */ 338315d2ee42SNiklas Cassel dma_wmb(); 338442de047dSJose Abreu stmmac_set_tx_owner(priv, mss_desc); 338515d2ee42SNiklas Cassel } 3386f748be53SAlexandre TORGUE 3387f748be53SAlexandre TORGUE /* The own bit must be the latest setting done when prepare the 3388f748be53SAlexandre TORGUE * descriptor and then barrier is needed to make sure that 3389f748be53SAlexandre TORGUE * all is coherent before granting the DMA engine. 3390f748be53SAlexandre TORGUE */ 339195eb930aSNiklas Cassel wmb(); 3392f748be53SAlexandre TORGUE 3393f748be53SAlexandre TORGUE if (netif_msg_pktdata(priv)) { 3394f748be53SAlexandre TORGUE pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 3395ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3396ce736788SJoao Pinto tx_q->cur_tx, first, nfrags); 3397f748be53SAlexandre TORGUE pr_info(">>> frame to be transmitted: "); 3398f748be53SAlexandre TORGUE print_pkt(skb->data, skb_headlen(skb)); 3399f748be53SAlexandre TORGUE } 3400f748be53SAlexandre TORGUE 3401c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3402f748be53SAlexandre TORGUE 3403579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_AVAIL) 3404579a25a8SJose Abreu desc_size = sizeof(struct dma_edesc); 3405579a25a8SJose Abreu else 3406579a25a8SJose Abreu desc_size = sizeof(struct dma_desc); 3407579a25a8SJose Abreu 3408579a25a8SJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3409a4e887faSJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 34104772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 3411f748be53SAlexandre TORGUE 3412f748be53SAlexandre TORGUE return NETDEV_TX_OK; 3413f748be53SAlexandre TORGUE 3414f748be53SAlexandre TORGUE dma_map_err: 3415f748be53SAlexandre TORGUE dev_err(priv->device, "Tx dma map failed\n"); 3416f748be53SAlexandre TORGUE dev_kfree_skb(skb); 3417f748be53SAlexandre TORGUE priv->dev->stats.tx_dropped++; 3418f748be53SAlexandre TORGUE return NETDEV_TX_OK; 3419f748be53SAlexandre TORGUE } 3420f748be53SAlexandre TORGUE 3421f748be53SAlexandre TORGUE /** 3422732fdf0eSGiuseppe CAVALLARO * stmmac_xmit - Tx entry point of the driver 34237ac6653aSJeff Kirsher * @skb : the socket buffer 34247ac6653aSJeff Kirsher * @dev : device pointer 342532ceabcaSGiuseppe CAVALLARO * Description : this is the tx entry point of the driver. 342632ceabcaSGiuseppe CAVALLARO * It programs the chain or the ring and supports oversized frames 342732ceabcaSGiuseppe CAVALLARO * and SG feature. 34287ac6653aSJeff Kirsher */ 34297ac6653aSJeff Kirsher static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 34307ac6653aSJeff Kirsher { 3431c2837423SJose Abreu unsigned int first_entry, tx_packets, enh_desc; 34327ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 34330e80bdc9SGiuseppe Cavallaro unsigned int nopaged_len = skb_headlen(skb); 34344a7d666aSGiuseppe CAVALLARO int i, csum_insertion = 0, is_jumbo = 0; 3435ce736788SJoao Pinto u32 queue = skb_get_queue_mapping(skb); 34367ac6653aSJeff Kirsher int nfrags = skb_shinfo(skb)->nr_frags; 3437b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 3438579a25a8SJose Abreu struct dma_edesc *tbs_desc = NULL; 3439579a25a8SJose Abreu int entry, desc_size, first_tx; 34407ac6653aSJeff Kirsher struct dma_desc *desc, *first; 3441ce736788SJoao Pinto struct stmmac_tx_queue *tx_q; 3442c2837423SJose Abreu bool has_vlan, set_ic; 3443a993db88SJose Abreu dma_addr_t des; 3444f748be53SAlexandre TORGUE 3445ce736788SJoao Pinto tx_q = &priv->tx_queue[queue]; 3446c2837423SJose Abreu first_tx = tx_q->cur_tx; 3447ce736788SJoao Pinto 3448be1c7eaeSVineetha G. Jaya Kumaran if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 3449e2cd682dSJose Abreu stmmac_disable_eee_mode(priv); 3450e2cd682dSJose Abreu 3451f748be53SAlexandre TORGUE /* Manage oversized TCP frames for GMAC4 device */ 3452f748be53SAlexandre TORGUE if (skb_is_gso(skb) && priv->tso) { 3453b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3454b7766206SJose Abreu return stmmac_tso_xmit(skb, dev); 3455b7766206SJose Abreu if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 3456f748be53SAlexandre TORGUE return stmmac_tso_xmit(skb, dev); 3457f748be53SAlexandre TORGUE } 34587ac6653aSJeff Kirsher 3459ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3460c22a3f48SJoao Pinto if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3461c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3462c22a3f48SJoao Pinto queue)); 34637ac6653aSJeff Kirsher /* This is a hard error, log it. */ 346438ddc59dSLABBE Corentin netdev_err(priv->dev, 346538ddc59dSLABBE Corentin "%s: Tx Ring full when queue awake\n", 346638ddc59dSLABBE Corentin __func__); 34677ac6653aSJeff Kirsher } 34687ac6653aSJeff Kirsher return NETDEV_TX_BUSY; 34697ac6653aSJeff Kirsher } 34707ac6653aSJeff Kirsher 347130d93227SJose Abreu /* Check if VLAN can be inserted by HW */ 347230d93227SJose Abreu has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 347330d93227SJose Abreu 3474ce736788SJoao Pinto entry = tx_q->cur_tx; 34750e80bdc9SGiuseppe Cavallaro first_entry = entry; 3476b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[first_entry]); 34777ac6653aSJeff Kirsher 34787ac6653aSJeff Kirsher csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 34797ac6653aSJeff Kirsher 34800e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 3481ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3482579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3483579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 3484c24602efSGiuseppe CAVALLARO else 3485ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 3486c24602efSGiuseppe CAVALLARO 34877ac6653aSJeff Kirsher first = desc; 34887ac6653aSJeff Kirsher 348930d93227SJose Abreu if (has_vlan) 349030d93227SJose Abreu stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 349130d93227SJose Abreu 34920e80bdc9SGiuseppe Cavallaro enh_desc = priv->plat->enh_desc; 34934a7d666aSGiuseppe CAVALLARO /* To program the descriptors according to the size of the frame */ 349429896a67SGiuseppe CAVALLARO if (enh_desc) 34952c520b1cSJose Abreu is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 349629896a67SGiuseppe CAVALLARO 349763a550fcSJose Abreu if (unlikely(is_jumbo)) { 34982c520b1cSJose Abreu entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 349963a550fcSJose Abreu if (unlikely(entry < 0) && (entry != -EINVAL)) 3500362b37beSGiuseppe CAVALLARO goto dma_map_err; 350129896a67SGiuseppe CAVALLARO } 35027ac6653aSJeff Kirsher 35037ac6653aSJeff Kirsher for (i = 0; i < nfrags; i++) { 35049e903e08SEric Dumazet const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 35059e903e08SEric Dumazet int len = skb_frag_size(frag); 3506be434d50SGiuseppe Cavallaro bool last_segment = (i == (nfrags - 1)); 35077ac6653aSJeff Kirsher 3508aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3509b4c9784cSNiklas Cassel WARN_ON(tx_q->tx_skbuff[entry]); 3510e3ad57c9SGiuseppe Cavallaro 35110e80bdc9SGiuseppe Cavallaro if (likely(priv->extend_desc)) 3512ce736788SJoao Pinto desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3513579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3514579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 3515c24602efSGiuseppe CAVALLARO else 3516ce736788SJoao Pinto desc = tx_q->dma_tx + entry; 35177ac6653aSJeff Kirsher 3518f748be53SAlexandre TORGUE des = skb_frag_dma_map(priv->device, frag, 0, len, 3519f722380dSIan Campbell DMA_TO_DEVICE); 3520f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 3521362b37beSGiuseppe CAVALLARO goto dma_map_err; /* should reuse desc w/o issues */ 3522362b37beSGiuseppe CAVALLARO 3523ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].buf = des; 35246844171dSJose Abreu 35256844171dSJose Abreu stmmac_set_desc_addr(priv, desc, des); 3526f748be53SAlexandre TORGUE 3527ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].map_as_page = true; 3528ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].len = len; 3529ce736788SJoao Pinto tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 35300e80bdc9SGiuseppe Cavallaro 35310e80bdc9SGiuseppe Cavallaro /* Prepare the descriptor and set the own bit too */ 353242de047dSJose Abreu stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 353342de047dSJose Abreu priv->mode, 1, last_segment, skb->len); 35347ac6653aSJeff Kirsher } 35357ac6653aSJeff Kirsher 353605cf0d1bSNiklas Cassel /* Only the last descriptor gets to point to the skb. */ 353705cf0d1bSNiklas Cassel tx_q->tx_skbuff[entry] = skb; 3538e3ad57c9SGiuseppe Cavallaro 35397df4a3a7SJose Abreu /* According to the coalesce parameter the IC bit for the latest 35407df4a3a7SJose Abreu * segment is reset and the timer re-started to clean the tx status. 35417df4a3a7SJose Abreu * This approach takes care about the fragments: desc is the first 35427df4a3a7SJose Abreu * element in case of no SG. 35437df4a3a7SJose Abreu */ 3544c2837423SJose Abreu tx_packets = (entry + 1) - first_tx; 3545c2837423SJose Abreu tx_q->tx_count_frames += tx_packets; 3546c2837423SJose Abreu 3547c2837423SJose Abreu if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3548c2837423SJose Abreu set_ic = true; 3549c2837423SJose Abreu else if (!priv->tx_coal_frames) 3550c2837423SJose Abreu set_ic = false; 3551c2837423SJose Abreu else if (tx_packets > priv->tx_coal_frames) 3552c2837423SJose Abreu set_ic = true; 3553c2837423SJose Abreu else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) 3554c2837423SJose Abreu set_ic = true; 3555c2837423SJose Abreu else 3556c2837423SJose Abreu set_ic = false; 3557c2837423SJose Abreu 3558c2837423SJose Abreu if (set_ic) { 35597df4a3a7SJose Abreu if (likely(priv->extend_desc)) 35607df4a3a7SJose Abreu desc = &tx_q->dma_etx[entry].basic; 3561579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3562579a25a8SJose Abreu desc = &tx_q->dma_entx[entry].basic; 35637df4a3a7SJose Abreu else 35647df4a3a7SJose Abreu desc = &tx_q->dma_tx[entry]; 35657df4a3a7SJose Abreu 35667df4a3a7SJose Abreu tx_q->tx_count_frames = 0; 35677df4a3a7SJose Abreu stmmac_set_tx_ic(priv, desc); 35687df4a3a7SJose Abreu priv->xstats.tx_set_ic_bit++; 35697df4a3a7SJose Abreu } 35707df4a3a7SJose Abreu 357105cf0d1bSNiklas Cassel /* We've used all descriptors we need for this skb, however, 357205cf0d1bSNiklas Cassel * advance cur_tx so that it references a fresh descriptor. 357305cf0d1bSNiklas Cassel * ndo_start_xmit will fill this descriptor the next time it's 357405cf0d1bSNiklas Cassel * called and stmmac_tx_clean may clean up to this descriptor. 357505cf0d1bSNiklas Cassel */ 3576aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3577ce736788SJoao Pinto tx_q->cur_tx = entry; 35787ac6653aSJeff Kirsher 35797ac6653aSJeff Kirsher if (netif_msg_pktdata(priv)) { 358038ddc59dSLABBE Corentin netdev_dbg(priv->dev, 358138ddc59dSLABBE Corentin "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3582ce736788SJoao Pinto __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 35830e80bdc9SGiuseppe Cavallaro entry, first, nfrags); 358483d7af64SGiuseppe CAVALLARO 358538ddc59dSLABBE Corentin netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 35867ac6653aSJeff Kirsher print_pkt(skb->data, skb->len); 35877ac6653aSJeff Kirsher } 35880e80bdc9SGiuseppe Cavallaro 3589ce736788SJoao Pinto if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3590b3e51069SLABBE Corentin netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3591b3e51069SLABBE Corentin __func__); 3592c22a3f48SJoao Pinto netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 35937ac6653aSJeff Kirsher } 35947ac6653aSJeff Kirsher 35957ac6653aSJeff Kirsher dev->stats.tx_bytes += skb->len; 35967ac6653aSJeff Kirsher 35978000ddc0SJose Abreu if (priv->sarc_type) 35988000ddc0SJose Abreu stmmac_set_desc_sarc(priv, first, priv->sarc_type); 35998000ddc0SJose Abreu 36000e80bdc9SGiuseppe Cavallaro skb_tx_timestamp(skb); 36010e80bdc9SGiuseppe Cavallaro 36020e80bdc9SGiuseppe Cavallaro /* Ready to fill the first descriptor and set the OWN bit w/o any 36030e80bdc9SGiuseppe Cavallaro * problems because all the descriptors are actually ready to be 36040e80bdc9SGiuseppe Cavallaro * passed to the DMA engine. 36050e80bdc9SGiuseppe Cavallaro */ 36060e80bdc9SGiuseppe Cavallaro if (likely(!is_jumbo)) { 36070e80bdc9SGiuseppe Cavallaro bool last_segment = (nfrags == 0); 36080e80bdc9SGiuseppe Cavallaro 3609f748be53SAlexandre TORGUE des = dma_map_single(priv->device, skb->data, 36100e80bdc9SGiuseppe Cavallaro nopaged_len, DMA_TO_DEVICE); 3611f748be53SAlexandre TORGUE if (dma_mapping_error(priv->device, des)) 36120e80bdc9SGiuseppe Cavallaro goto dma_map_err; 36130e80bdc9SGiuseppe Cavallaro 3614ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].buf = des; 36156844171dSJose Abreu 36166844171dSJose Abreu stmmac_set_desc_addr(priv, first, des); 3617f748be53SAlexandre TORGUE 3618ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3619ce736788SJoao Pinto tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 36200e80bdc9SGiuseppe Cavallaro 3621891434b1SRayagond Kokatanur if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3622891434b1SRayagond Kokatanur priv->hwts_tx_en)) { 3623891434b1SRayagond Kokatanur /* declare that device is doing timestamping */ 3624891434b1SRayagond Kokatanur skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 362542de047dSJose Abreu stmmac_enable_tx_timestamp(priv, first); 3626891434b1SRayagond Kokatanur } 3627891434b1SRayagond Kokatanur 36280e80bdc9SGiuseppe Cavallaro /* Prepare the first descriptor setting the OWN bit too */ 362942de047dSJose Abreu stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3630579a25a8SJose Abreu csum_insertion, priv->mode, 0, last_segment, 363142de047dSJose Abreu skb->len); 363280acbed9SAaro Koskinen } 36330e80bdc9SGiuseppe Cavallaro 3634579a25a8SJose Abreu if (tx_q->tbs & STMMAC_TBS_EN) { 3635579a25a8SJose Abreu struct timespec64 ts = ns_to_timespec64(skb->tstamp); 3636579a25a8SJose Abreu 3637579a25a8SJose Abreu tbs_desc = &tx_q->dma_entx[first_entry]; 3638579a25a8SJose Abreu stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 3639579a25a8SJose Abreu } 3640579a25a8SJose Abreu 3641579a25a8SJose Abreu stmmac_set_tx_owner(priv, first); 3642579a25a8SJose Abreu 36430e80bdc9SGiuseppe Cavallaro /* The own bit must be the latest setting done when prepare the 36440e80bdc9SGiuseppe Cavallaro * descriptor and then barrier is needed to make sure that 36450e80bdc9SGiuseppe Cavallaro * all is coherent before granting the DMA engine. 36460e80bdc9SGiuseppe Cavallaro */ 364795eb930aSNiklas Cassel wmb(); 36487ac6653aSJeff Kirsher 3649c22a3f48SJoao Pinto netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3650f748be53SAlexandre TORGUE 3651a4e887faSJose Abreu stmmac_enable_dma_transmission(priv, priv->ioaddr); 36528fce3331SJose Abreu 3653579a25a8SJose Abreu if (likely(priv->extend_desc)) 3654579a25a8SJose Abreu desc_size = sizeof(struct dma_extended_desc); 3655579a25a8SJose Abreu else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3656579a25a8SJose Abreu desc_size = sizeof(struct dma_edesc); 3657579a25a8SJose Abreu else 3658579a25a8SJose Abreu desc_size = sizeof(struct dma_desc); 3659579a25a8SJose Abreu 3660579a25a8SJose Abreu tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3661f1565c60SJose Abreu stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 36624772f26dSJose Abreu stmmac_tx_timer_arm(priv, queue); 36637ac6653aSJeff Kirsher 3664362b37beSGiuseppe CAVALLARO return NETDEV_TX_OK; 3665a9097a96SGiuseppe CAVALLARO 3666362b37beSGiuseppe CAVALLARO dma_map_err: 366738ddc59dSLABBE Corentin netdev_err(priv->dev, "Tx DMA map failed\n"); 3668362b37beSGiuseppe CAVALLARO dev_kfree_skb(skb); 3669362b37beSGiuseppe CAVALLARO priv->dev->stats.tx_dropped++; 36707ac6653aSJeff Kirsher return NETDEV_TX_OK; 36717ac6653aSJeff Kirsher } 36727ac6653aSJeff Kirsher 3673b9381985SVince Bridgers static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3674b9381985SVince Bridgers { 3675ab188e8fSElad Nachman struct vlan_ethhdr *veth; 3676ab188e8fSElad Nachman __be16 vlan_proto; 3677b9381985SVince Bridgers u16 vlanid; 3678b9381985SVince Bridgers 3679ab188e8fSElad Nachman veth = (struct vlan_ethhdr *)skb->data; 3680ab188e8fSElad Nachman vlan_proto = veth->h_vlan_proto; 3681ab188e8fSElad Nachman 3682ab188e8fSElad Nachman if ((vlan_proto == htons(ETH_P_8021Q) && 3683ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 3684ab188e8fSElad Nachman (vlan_proto == htons(ETH_P_8021AD) && 3685ab188e8fSElad Nachman dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 3686b9381985SVince Bridgers /* pop the vlan tag */ 3687ab188e8fSElad Nachman vlanid = ntohs(veth->h_vlan_TCI); 3688ab188e8fSElad Nachman memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 3689b9381985SVince Bridgers skb_pull(skb, VLAN_HLEN); 3690ab188e8fSElad Nachman __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 3691b9381985SVince Bridgers } 3692b9381985SVince Bridgers } 3693b9381985SVince Bridgers 369432ceabcaSGiuseppe CAVALLARO /** 3695732fdf0eSGiuseppe CAVALLARO * stmmac_rx_refill - refill used skb preallocated buffers 369632ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 369754139cf3SJoao Pinto * @queue: RX queue index 369832ceabcaSGiuseppe CAVALLARO * Description : this is to reallocate the skb for the reception process 369932ceabcaSGiuseppe CAVALLARO * that is based on zero-copy. 370032ceabcaSGiuseppe CAVALLARO */ 370154139cf3SJoao Pinto static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 37027ac6653aSJeff Kirsher { 370354139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 37043caa61c2SJose Abreu int len, dirty = stmmac_rx_dirty(priv, queue); 370554139cf3SJoao Pinto unsigned int entry = rx_q->dirty_rx; 370654139cf3SJoao Pinto 37073caa61c2SJose Abreu len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 37083caa61c2SJose Abreu 3709e3ad57c9SGiuseppe Cavallaro while (dirty-- > 0) { 37102af6106aSJose Abreu struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 3711c24602efSGiuseppe CAVALLARO struct dma_desc *p; 3712d429b66eSJose Abreu bool use_rx_wd; 3713c24602efSGiuseppe CAVALLARO 3714c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 371554139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 3716c24602efSGiuseppe CAVALLARO else 371754139cf3SJoao Pinto p = rx_q->dma_rx + entry; 3718c24602efSGiuseppe CAVALLARO 37192af6106aSJose Abreu if (!buf->page) { 37202af6106aSJose Abreu buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 37212af6106aSJose Abreu if (!buf->page) 37227ac6653aSJeff Kirsher break; 3723120e87f9SGiuseppe Cavallaro } 37247ac6653aSJeff Kirsher 372567afd6d1SJose Abreu if (priv->sph && !buf->sec_page) { 372667afd6d1SJose Abreu buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 372767afd6d1SJose Abreu if (!buf->sec_page) 372867afd6d1SJose Abreu break; 372967afd6d1SJose Abreu 373067afd6d1SJose Abreu buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 373167afd6d1SJose Abreu 373267afd6d1SJose Abreu dma_sync_single_for_device(priv->device, buf->sec_addr, 373367afd6d1SJose Abreu len, DMA_FROM_DEVICE); 373467afd6d1SJose Abreu } 373567afd6d1SJose Abreu 37362af6106aSJose Abreu buf->addr = page_pool_get_dma_addr(buf->page); 37373caa61c2SJose Abreu 37383caa61c2SJose Abreu /* Sync whole allocation to device. This will invalidate old 37393caa61c2SJose Abreu * data. 37403caa61c2SJose Abreu */ 37413caa61c2SJose Abreu dma_sync_single_for_device(priv->device, buf->addr, len, 37423caa61c2SJose Abreu DMA_FROM_DEVICE); 37433caa61c2SJose Abreu 37442af6106aSJose Abreu stmmac_set_desc_addr(priv, p, buf->addr); 3745396e13e1SJoakim Zhang if (priv->sph) 3746396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 3747396e13e1SJoakim Zhang else 3748396e13e1SJoakim Zhang stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 37492c520b1cSJose Abreu stmmac_refill_desc3(priv, rx_q, p); 3750286a8372SGiuseppe CAVALLARO 3751d429b66eSJose Abreu rx_q->rx_count_frames++; 37526fa9d691SJose Abreu rx_q->rx_count_frames += priv->rx_coal_frames; 37536fa9d691SJose Abreu if (rx_q->rx_count_frames > priv->rx_coal_frames) 37546fa9d691SJose Abreu rx_q->rx_count_frames = 0; 375509146abeSJose Abreu 375609146abeSJose Abreu use_rx_wd = !priv->rx_coal_frames; 375709146abeSJose Abreu use_rx_wd |= rx_q->rx_count_frames > 0; 375809146abeSJose Abreu if (!priv->use_riwt) 375909146abeSJose Abreu use_rx_wd = false; 3760d429b66eSJose Abreu 3761ad688cdbSPavel Machek dma_wmb(); 37622af6106aSJose Abreu stmmac_set_rx_owner(priv, p, use_rx_wd); 3763e3ad57c9SGiuseppe Cavallaro 3764aa042f60SSong, Yoong Siang entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 37657ac6653aSJeff Kirsher } 376654139cf3SJoao Pinto rx_q->dirty_rx = entry; 3767858a31ffSJose Abreu rx_q->rx_tail_addr = rx_q->dma_rx_phy + 3768858a31ffSJose Abreu (rx_q->dirty_rx * sizeof(struct dma_desc)); 37694523a561SBiao Huang stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 37707ac6653aSJeff Kirsher } 37717ac6653aSJeff Kirsher 377288ebe2cfSJose Abreu static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 377388ebe2cfSJose Abreu struct dma_desc *p, 377488ebe2cfSJose Abreu int status, unsigned int len) 377588ebe2cfSJose Abreu { 377688ebe2cfSJose Abreu unsigned int plen = 0, hlen = 0; 377731f2760eSLuo Jiaxing int coe = priv->hw->rx_csum; 377888ebe2cfSJose Abreu 377988ebe2cfSJose Abreu /* Not first descriptor, buffer is always zero */ 378088ebe2cfSJose Abreu if (priv->sph && len) 378188ebe2cfSJose Abreu return 0; 378288ebe2cfSJose Abreu 378388ebe2cfSJose Abreu /* First descriptor, get split header length */ 378431f2760eSLuo Jiaxing stmmac_get_rx_header_len(priv, p, &hlen); 378588ebe2cfSJose Abreu if (priv->sph && hlen) { 378688ebe2cfSJose Abreu priv->xstats.rx_split_hdr_pkt_n++; 378788ebe2cfSJose Abreu return hlen; 378888ebe2cfSJose Abreu } 378988ebe2cfSJose Abreu 379088ebe2cfSJose Abreu /* First descriptor, not last descriptor and not split header */ 379188ebe2cfSJose Abreu if (status & rx_not_ls) 379288ebe2cfSJose Abreu return priv->dma_buf_sz; 379388ebe2cfSJose Abreu 379488ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 379588ebe2cfSJose Abreu 379688ebe2cfSJose Abreu /* First descriptor and last descriptor and not split header */ 379788ebe2cfSJose Abreu return min_t(unsigned int, priv->dma_buf_sz, plen); 379888ebe2cfSJose Abreu } 379988ebe2cfSJose Abreu 380088ebe2cfSJose Abreu static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 380188ebe2cfSJose Abreu struct dma_desc *p, 380288ebe2cfSJose Abreu int status, unsigned int len) 380388ebe2cfSJose Abreu { 380488ebe2cfSJose Abreu int coe = priv->hw->rx_csum; 380588ebe2cfSJose Abreu unsigned int plen = 0; 380688ebe2cfSJose Abreu 380788ebe2cfSJose Abreu /* Not split header, buffer is not available */ 380888ebe2cfSJose Abreu if (!priv->sph) 380988ebe2cfSJose Abreu return 0; 381088ebe2cfSJose Abreu 381188ebe2cfSJose Abreu /* Not last descriptor */ 381288ebe2cfSJose Abreu if (status & rx_not_ls) 381388ebe2cfSJose Abreu return priv->dma_buf_sz; 381488ebe2cfSJose Abreu 381588ebe2cfSJose Abreu plen = stmmac_get_rx_frame_len(priv, p, coe); 381688ebe2cfSJose Abreu 381788ebe2cfSJose Abreu /* Last descriptor */ 381888ebe2cfSJose Abreu return plen - len; 381988ebe2cfSJose Abreu } 382088ebe2cfSJose Abreu 382132ceabcaSGiuseppe CAVALLARO /** 3822732fdf0eSGiuseppe CAVALLARO * stmmac_rx - manage the receive process 382332ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 382454139cf3SJoao Pinto * @limit: napi bugget 382554139cf3SJoao Pinto * @queue: RX queue index. 382632ceabcaSGiuseppe CAVALLARO * Description : this the function called by the napi poll method. 382732ceabcaSGiuseppe CAVALLARO * It gets all the frames inside the ring. 382832ceabcaSGiuseppe CAVALLARO */ 382954139cf3SJoao Pinto static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 38307ac6653aSJeff Kirsher { 383154139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 38328fce3331SJose Abreu struct stmmac_channel *ch = &priv->channel[queue]; 3833ec222003SJose Abreu unsigned int count = 0, error = 0, len = 0; 3834ec222003SJose Abreu int status = 0, coe = priv->hw->rx_csum; 383507b39753SAaro Koskinen unsigned int next_entry = rx_q->cur_rx; 3836bfaf91caSJoakim Zhang unsigned int desc_size; 3837ec222003SJose Abreu struct sk_buff *skb = NULL; 38387ac6653aSJeff Kirsher 383983d7af64SGiuseppe CAVALLARO if (netif_msg_rx_status(priv)) { 3840d0225e7dSAlexandre TORGUE void *rx_head; 3841d0225e7dSAlexandre TORGUE 384238ddc59dSLABBE Corentin netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3843bfaf91caSJoakim Zhang if (priv->extend_desc) { 384454139cf3SJoao Pinto rx_head = (void *)rx_q->dma_erx; 3845bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_extended_desc); 3846bfaf91caSJoakim Zhang } else { 384754139cf3SJoao Pinto rx_head = (void *)rx_q->dma_rx; 3848bfaf91caSJoakim Zhang desc_size = sizeof(struct dma_desc); 3849bfaf91caSJoakim Zhang } 3850d0225e7dSAlexandre TORGUE 3851bfaf91caSJoakim Zhang stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 3852bfaf91caSJoakim Zhang rx_q->dma_rx_phy, desc_size); 38537ac6653aSJeff Kirsher } 3854c24602efSGiuseppe CAVALLARO while (count < limit) { 385588ebe2cfSJose Abreu unsigned int buf1_len = 0, buf2_len = 0; 3856ec222003SJose Abreu enum pkt_hash_types hash_type; 38572af6106aSJose Abreu struct stmmac_rx_buffer *buf; 38582af6106aSJose Abreu struct dma_desc *np, *p; 3859ec222003SJose Abreu int entry; 3860ec222003SJose Abreu u32 hash; 38617ac6653aSJeff Kirsher 3862ec222003SJose Abreu if (!count && rx_q->state_saved) { 3863ec222003SJose Abreu skb = rx_q->state.skb; 3864ec222003SJose Abreu error = rx_q->state.error; 3865ec222003SJose Abreu len = rx_q->state.len; 3866ec222003SJose Abreu } else { 3867ec222003SJose Abreu rx_q->state_saved = false; 3868ec222003SJose Abreu skb = NULL; 3869ec222003SJose Abreu error = 0; 3870ec222003SJose Abreu len = 0; 3871ec222003SJose Abreu } 3872ec222003SJose Abreu 3873ec222003SJose Abreu if (count >= limit) 3874ec222003SJose Abreu break; 3875ec222003SJose Abreu 3876ec222003SJose Abreu read_again: 387788ebe2cfSJose Abreu buf1_len = 0; 387888ebe2cfSJose Abreu buf2_len = 0; 387907b39753SAaro Koskinen entry = next_entry; 38802af6106aSJose Abreu buf = &rx_q->buf_pool[entry]; 388107b39753SAaro Koskinen 3882c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 388354139cf3SJoao Pinto p = (struct dma_desc *)(rx_q->dma_erx + entry); 3884c24602efSGiuseppe CAVALLARO else 388554139cf3SJoao Pinto p = rx_q->dma_rx + entry; 3886c24602efSGiuseppe CAVALLARO 3887c1fa3212SFabrice Gasnier /* read the status of the incoming frame */ 388842de047dSJose Abreu status = stmmac_rx_status(priv, &priv->dev->stats, 3889c1fa3212SFabrice Gasnier &priv->xstats, p); 3890c1fa3212SFabrice Gasnier /* check if managed by the DMA otherwise go ahead */ 3891c1fa3212SFabrice Gasnier if (unlikely(status & dma_own)) 38927ac6653aSJeff Kirsher break; 38937ac6653aSJeff Kirsher 3894aa042f60SSong, Yoong Siang rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 3895aa042f60SSong, Yoong Siang priv->dma_rx_size); 389654139cf3SJoao Pinto next_entry = rx_q->cur_rx; 3897e3ad57c9SGiuseppe Cavallaro 3898c24602efSGiuseppe CAVALLARO if (priv->extend_desc) 389954139cf3SJoao Pinto np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 3900c24602efSGiuseppe CAVALLARO else 390154139cf3SJoao Pinto np = rx_q->dma_rx + next_entry; 3902ba1ffd74SGiuseppe CAVALLARO 3903ba1ffd74SGiuseppe CAVALLARO prefetch(np); 39047ac6653aSJeff Kirsher 390542de047dSJose Abreu if (priv->extend_desc) 390642de047dSJose Abreu stmmac_rx_extended_status(priv, &priv->dev->stats, 390742de047dSJose Abreu &priv->xstats, rx_q->dma_erx + entry); 3908891434b1SRayagond Kokatanur if (unlikely(status == discard_frame)) { 39092af6106aSJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 39102af6106aSJose Abreu buf->page = NULL; 3911ec222003SJose Abreu error = 1; 39120b273ca4SJose Abreu if (!priv->hwts_rx_en) 39130b273ca4SJose Abreu priv->dev->stats.rx_errors++; 3914ec222003SJose Abreu } 3915f748be53SAlexandre TORGUE 3916ec222003SJose Abreu if (unlikely(error && (status & rx_not_ls))) 3917ec222003SJose Abreu goto read_again; 3918ec222003SJose Abreu if (unlikely(error)) { 3919ec222003SJose Abreu dev_kfree_skb(skb); 392088ebe2cfSJose Abreu skb = NULL; 3921cda4985aSJose Abreu count++; 392207b39753SAaro Koskinen continue; 3923e527c4a7SGiuseppe CAVALLARO } 3924e527c4a7SGiuseppe CAVALLARO 3925ec222003SJose Abreu /* Buffer is good. Go on. */ 3926ec222003SJose Abreu 392788ebe2cfSJose Abreu prefetch(page_address(buf->page)); 392888ebe2cfSJose Abreu if (buf->sec_page) 392988ebe2cfSJose Abreu prefetch(page_address(buf->sec_page)); 393088ebe2cfSJose Abreu 393188ebe2cfSJose Abreu buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 393288ebe2cfSJose Abreu len += buf1_len; 393388ebe2cfSJose Abreu buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 393488ebe2cfSJose Abreu len += buf2_len; 3935ec222003SJose Abreu 39367ac6653aSJeff Kirsher /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3937ceb69499SGiuseppe CAVALLARO * Type frames (LLC/LLC-SNAP) 3938565020aaSJose Abreu * 3939565020aaSJose Abreu * llc_snap is never checked in GMAC >= 4, so this ACS 3940565020aaSJose Abreu * feature is always disabled and packets need to be 3941565020aaSJose Abreu * stripped manually. 3942ceb69499SGiuseppe CAVALLARO */ 394393b5dce4SJose Abreu if (likely(!(status & rx_not_ls)) && 394493b5dce4SJose Abreu (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 394593b5dce4SJose Abreu unlikely(status != llc_snap))) { 394688ebe2cfSJose Abreu if (buf2_len) 394788ebe2cfSJose Abreu buf2_len -= ETH_FCS_LEN; 394888ebe2cfSJose Abreu else 394988ebe2cfSJose Abreu buf1_len -= ETH_FCS_LEN; 395088ebe2cfSJose Abreu 3951ec222003SJose Abreu len -= ETH_FCS_LEN; 395283d7af64SGiuseppe CAVALLARO } 395322ad3838SGiuseppe Cavallaro 3954ec222003SJose Abreu if (!skb) { 395588ebe2cfSJose Abreu skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 3956ec222003SJose Abreu if (!skb) { 395722ad3838SGiuseppe Cavallaro priv->dev->stats.rx_dropped++; 3958cda4985aSJose Abreu count++; 395988ebe2cfSJose Abreu goto drain_data; 396022ad3838SGiuseppe Cavallaro } 396122ad3838SGiuseppe Cavallaro 396288ebe2cfSJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 396388ebe2cfSJose Abreu buf1_len, DMA_FROM_DEVICE); 39642af6106aSJose Abreu skb_copy_to_linear_data(skb, page_address(buf->page), 396588ebe2cfSJose Abreu buf1_len); 396688ebe2cfSJose Abreu skb_put(skb, buf1_len); 396722ad3838SGiuseppe Cavallaro 3968ec222003SJose Abreu /* Data payload copied into SKB, page ready for recycle */ 3969ec222003SJose Abreu page_pool_recycle_direct(rx_q->page_pool, buf->page); 3970ec222003SJose Abreu buf->page = NULL; 397188ebe2cfSJose Abreu } else if (buf1_len) { 3972ec222003SJose Abreu dma_sync_single_for_cpu(priv->device, buf->addr, 397388ebe2cfSJose Abreu buf1_len, DMA_FROM_DEVICE); 3974ec222003SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 397588ebe2cfSJose Abreu buf->page, 0, buf1_len, 3976ec222003SJose Abreu priv->dma_buf_sz); 3977ec222003SJose Abreu 3978ec222003SJose Abreu /* Data payload appended into SKB */ 3979ec222003SJose Abreu page_pool_release_page(rx_q->page_pool, buf->page); 3980ec222003SJose Abreu buf->page = NULL; 39817ac6653aSJeff Kirsher } 398283d7af64SGiuseppe CAVALLARO 398388ebe2cfSJose Abreu if (buf2_len) { 398467afd6d1SJose Abreu dma_sync_single_for_cpu(priv->device, buf->sec_addr, 398588ebe2cfSJose Abreu buf2_len, DMA_FROM_DEVICE); 398667afd6d1SJose Abreu skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 398788ebe2cfSJose Abreu buf->sec_page, 0, buf2_len, 398867afd6d1SJose Abreu priv->dma_buf_sz); 398967afd6d1SJose Abreu 399067afd6d1SJose Abreu /* Data payload appended into SKB */ 399167afd6d1SJose Abreu page_pool_release_page(rx_q->page_pool, buf->sec_page); 399267afd6d1SJose Abreu buf->sec_page = NULL; 399367afd6d1SJose Abreu } 399467afd6d1SJose Abreu 399588ebe2cfSJose Abreu drain_data: 3996ec222003SJose Abreu if (likely(status & rx_not_ls)) 3997ec222003SJose Abreu goto read_again; 399888ebe2cfSJose Abreu if (!skb) 399988ebe2cfSJose Abreu continue; 4000ec222003SJose Abreu 4001ec222003SJose Abreu /* Got entire packet into SKB. Finish it. */ 4002ec222003SJose Abreu 4003ba1ffd74SGiuseppe CAVALLARO stmmac_get_rx_hwtstamp(priv, p, np, skb); 4004b9381985SVince Bridgers stmmac_rx_vlan(priv->dev, skb); 40057ac6653aSJeff Kirsher skb->protocol = eth_type_trans(skb, priv->dev); 40067ac6653aSJeff Kirsher 4007ceb69499SGiuseppe CAVALLARO if (unlikely(!coe)) 40087ac6653aSJeff Kirsher skb_checksum_none_assert(skb); 400962a2ab93SGiuseppe CAVALLARO else 40107ac6653aSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY; 401162a2ab93SGiuseppe CAVALLARO 401276067459SJose Abreu if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 401376067459SJose Abreu skb_set_hash(skb, hash, hash_type); 401476067459SJose Abreu 401576067459SJose Abreu skb_record_rx_queue(skb, queue); 40164ccb4585SJose Abreu napi_gro_receive(&ch->rx_napi, skb); 401788ebe2cfSJose Abreu skb = NULL; 40187ac6653aSJeff Kirsher 40197ac6653aSJeff Kirsher priv->dev->stats.rx_packets++; 4020ec222003SJose Abreu priv->dev->stats.rx_bytes += len; 4021cda4985aSJose Abreu count++; 40227ac6653aSJeff Kirsher } 4023ec222003SJose Abreu 402488ebe2cfSJose Abreu if (status & rx_not_ls || skb) { 4025ec222003SJose Abreu rx_q->state_saved = true; 4026ec222003SJose Abreu rx_q->state.skb = skb; 4027ec222003SJose Abreu rx_q->state.error = error; 4028ec222003SJose Abreu rx_q->state.len = len; 40297ac6653aSJeff Kirsher } 40307ac6653aSJeff Kirsher 403154139cf3SJoao Pinto stmmac_rx_refill(priv, queue); 40327ac6653aSJeff Kirsher 40337ac6653aSJeff Kirsher priv->xstats.rx_pkt_n += count; 40347ac6653aSJeff Kirsher 40357ac6653aSJeff Kirsher return count; 40367ac6653aSJeff Kirsher } 40377ac6653aSJeff Kirsher 40384ccb4585SJose Abreu static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 40397ac6653aSJeff Kirsher { 40408fce3331SJose Abreu struct stmmac_channel *ch = 40414ccb4585SJose Abreu container_of(napi, struct stmmac_channel, rx_napi); 40428fce3331SJose Abreu struct stmmac_priv *priv = ch->priv_data; 40438fce3331SJose Abreu u32 chan = ch->index; 40444ccb4585SJose Abreu int work_done; 40457ac6653aSJeff Kirsher 40469125cdd1SGiuseppe CAVALLARO priv->xstats.napi_poll++; 4047ce736788SJoao Pinto 40484ccb4585SJose Abreu work_done = stmmac_rx(priv, budget, chan); 4049021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 4050021bd5e3SJose Abreu unsigned long flags; 4051021bd5e3SJose Abreu 4052021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 4053021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 4054021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 4055021bd5e3SJose Abreu } 4056021bd5e3SJose Abreu 40574ccb4585SJose Abreu return work_done; 40584ccb4585SJose Abreu } 4059ce736788SJoao Pinto 40604ccb4585SJose Abreu static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 40614ccb4585SJose Abreu { 40624ccb4585SJose Abreu struct stmmac_channel *ch = 40634ccb4585SJose Abreu container_of(napi, struct stmmac_channel, tx_napi); 40644ccb4585SJose Abreu struct stmmac_priv *priv = ch->priv_data; 40654ccb4585SJose Abreu u32 chan = ch->index; 40664ccb4585SJose Abreu int work_done; 40674ccb4585SJose Abreu 40684ccb4585SJose Abreu priv->xstats.napi_poll++; 40694ccb4585SJose Abreu 4070aa042f60SSong, Yoong Siang work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); 4071fa0be0a4SJose Abreu work_done = min(work_done, budget); 40728fce3331SJose Abreu 4073021bd5e3SJose Abreu if (work_done < budget && napi_complete_done(napi, work_done)) { 4074021bd5e3SJose Abreu unsigned long flags; 40754ccb4585SJose Abreu 4076021bd5e3SJose Abreu spin_lock_irqsave(&ch->lock, flags); 4077021bd5e3SJose Abreu stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 4078021bd5e3SJose Abreu spin_unlock_irqrestore(&ch->lock, flags); 4079fa0be0a4SJose Abreu } 40808fce3331SJose Abreu 40817ac6653aSJeff Kirsher return work_done; 40827ac6653aSJeff Kirsher } 40837ac6653aSJeff Kirsher 40847ac6653aSJeff Kirsher /** 40857ac6653aSJeff Kirsher * stmmac_tx_timeout 40867ac6653aSJeff Kirsher * @dev : Pointer to net device structure 4087d0ea5cbdSJesse Brandeburg * @txqueue: the index of the hanging transmit queue 40887ac6653aSJeff Kirsher * Description: this function is called when a packet transmission fails to 40897284a3f1SGiuseppe CAVALLARO * complete within a reasonable time. The driver will mark the error in the 40907ac6653aSJeff Kirsher * netdev structure and arrange for the device to be reset to a sane state 40917ac6653aSJeff Kirsher * in order to transmit a new packet. 40927ac6653aSJeff Kirsher */ 40930290bd29SMichael S. Tsirkin static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 40947ac6653aSJeff Kirsher { 40957ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 40967ac6653aSJeff Kirsher 409734877a15SJose Abreu stmmac_global_err(priv); 40987ac6653aSJeff Kirsher } 40997ac6653aSJeff Kirsher 41007ac6653aSJeff Kirsher /** 410101789349SJiri Pirko * stmmac_set_rx_mode - entry point for multicast addressing 41027ac6653aSJeff Kirsher * @dev : pointer to the device structure 41037ac6653aSJeff Kirsher * Description: 41047ac6653aSJeff Kirsher * This function is a driver entry point which gets called by the kernel 41057ac6653aSJeff Kirsher * whenever multicast addresses must be enabled/disabled. 41067ac6653aSJeff Kirsher * Return value: 41077ac6653aSJeff Kirsher * void. 41087ac6653aSJeff Kirsher */ 410901789349SJiri Pirko static void stmmac_set_rx_mode(struct net_device *dev) 41107ac6653aSJeff Kirsher { 41117ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 41127ac6653aSJeff Kirsher 4113c10d4c82SJose Abreu stmmac_set_filter(priv, priv->hw, dev); 41147ac6653aSJeff Kirsher } 41157ac6653aSJeff Kirsher 41167ac6653aSJeff Kirsher /** 41177ac6653aSJeff Kirsher * stmmac_change_mtu - entry point to change MTU size for the device. 41187ac6653aSJeff Kirsher * @dev : device pointer. 41197ac6653aSJeff Kirsher * @new_mtu : the new MTU size for the device. 41207ac6653aSJeff Kirsher * Description: the Maximum Transfer Unit (MTU) is used by the network layer 41217ac6653aSJeff Kirsher * to drive packet transmission. Ethernet has an MTU of 1500 octets 41227ac6653aSJeff Kirsher * (ETH_DATA_LEN). This value can be changed with ifconfig. 41237ac6653aSJeff Kirsher * Return value: 41247ac6653aSJeff Kirsher * 0 on success and an appropriate (-)ve integer as defined in errno.h 41257ac6653aSJeff Kirsher * file on failure. 41267ac6653aSJeff Kirsher */ 41277ac6653aSJeff Kirsher static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 41287ac6653aSJeff Kirsher { 412938ddc59dSLABBE Corentin struct stmmac_priv *priv = netdev_priv(dev); 4130eaf4fac4SJose Abreu int txfifosz = priv->plat->tx_fifo_size; 41315b55299eSDavid Wu const int mtu = new_mtu; 4132eaf4fac4SJose Abreu 4133eaf4fac4SJose Abreu if (txfifosz == 0) 4134eaf4fac4SJose Abreu txfifosz = priv->dma_cap.tx_fifo_size; 4135eaf4fac4SJose Abreu 4136eaf4fac4SJose Abreu txfifosz /= priv->plat->tx_queues_to_use; 413738ddc59dSLABBE Corentin 41387ac6653aSJeff Kirsher if (netif_running(dev)) { 413938ddc59dSLABBE Corentin netdev_err(priv->dev, "must be stopped to change its MTU\n"); 41407ac6653aSJeff Kirsher return -EBUSY; 41417ac6653aSJeff Kirsher } 41427ac6653aSJeff Kirsher 4143eaf4fac4SJose Abreu new_mtu = STMMAC_ALIGN(new_mtu); 4144eaf4fac4SJose Abreu 4145eaf4fac4SJose Abreu /* If condition true, FIFO is too small or MTU too large */ 4146eaf4fac4SJose Abreu if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 4147eaf4fac4SJose Abreu return -EINVAL; 4148eaf4fac4SJose Abreu 41495b55299eSDavid Wu dev->mtu = mtu; 4150f748be53SAlexandre TORGUE 41517ac6653aSJeff Kirsher netdev_update_features(dev); 41527ac6653aSJeff Kirsher 41537ac6653aSJeff Kirsher return 0; 41547ac6653aSJeff Kirsher } 41557ac6653aSJeff Kirsher 4156c8f44affSMichał Mirosław static netdev_features_t stmmac_fix_features(struct net_device *dev, 4157c8f44affSMichał Mirosław netdev_features_t features) 41587ac6653aSJeff Kirsher { 41597ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 41607ac6653aSJeff Kirsher 416138912bdbSDeepak SIKRI if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 41627ac6653aSJeff Kirsher features &= ~NETIF_F_RXCSUM; 4163d2afb5bdSGiuseppe CAVALLARO 41647ac6653aSJeff Kirsher if (!priv->plat->tx_coe) 4165a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 41667ac6653aSJeff Kirsher 41677ac6653aSJeff Kirsher /* Some GMAC devices have a bugged Jumbo frame support that 41687ac6653aSJeff Kirsher * needs to have the Tx COE disabled for oversized frames 41697ac6653aSJeff Kirsher * (due to limited buffer sizes). In this case we disable 4170ceb69499SGiuseppe CAVALLARO * the TX csum insertion in the TDES and not use SF. 4171ceb69499SGiuseppe CAVALLARO */ 41727ac6653aSJeff Kirsher if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 4173a188222bSTom Herbert features &= ~NETIF_F_CSUM_MASK; 41747ac6653aSJeff Kirsher 4175f748be53SAlexandre TORGUE /* Disable tso if asked by ethtool */ 4176f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4177f748be53SAlexandre TORGUE if (features & NETIF_F_TSO) 4178f748be53SAlexandre TORGUE priv->tso = true; 4179f748be53SAlexandre TORGUE else 4180f748be53SAlexandre TORGUE priv->tso = false; 4181f748be53SAlexandre TORGUE } 4182f748be53SAlexandre TORGUE 41837ac6653aSJeff Kirsher return features; 41847ac6653aSJeff Kirsher } 41857ac6653aSJeff Kirsher 4186d2afb5bdSGiuseppe CAVALLARO static int stmmac_set_features(struct net_device *netdev, 4187d2afb5bdSGiuseppe CAVALLARO netdev_features_t features) 4188d2afb5bdSGiuseppe CAVALLARO { 4189d2afb5bdSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(netdev); 419067afd6d1SJose Abreu bool sph_en; 419167afd6d1SJose Abreu u32 chan; 4192d2afb5bdSGiuseppe CAVALLARO 4193d2afb5bdSGiuseppe CAVALLARO /* Keep the COE Type in case of csum is supporting */ 4194d2afb5bdSGiuseppe CAVALLARO if (features & NETIF_F_RXCSUM) 4195d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 4196d2afb5bdSGiuseppe CAVALLARO else 4197d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = 0; 4198d2afb5bdSGiuseppe CAVALLARO /* No check needed because rx_coe has been set before and it will be 4199d2afb5bdSGiuseppe CAVALLARO * fixed in case of issue. 4200d2afb5bdSGiuseppe CAVALLARO */ 4201c10d4c82SJose Abreu stmmac_rx_ipc(priv, priv->hw); 4202d2afb5bdSGiuseppe CAVALLARO 420367afd6d1SJose Abreu sph_en = (priv->hw->rx_csum > 0) && priv->sph; 420467afd6d1SJose Abreu for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 420567afd6d1SJose Abreu stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 420667afd6d1SJose Abreu 4207d2afb5bdSGiuseppe CAVALLARO return 0; 4208d2afb5bdSGiuseppe CAVALLARO } 4209d2afb5bdSGiuseppe CAVALLARO 421032ceabcaSGiuseppe CAVALLARO /** 421132ceabcaSGiuseppe CAVALLARO * stmmac_interrupt - main ISR 421232ceabcaSGiuseppe CAVALLARO * @irq: interrupt number. 4213f42234ffSMaxim Petrov * @dev_id: to pass the net device pointer (must be valid). 421432ceabcaSGiuseppe CAVALLARO * Description: this is the main driver interrupt service routine. 4215732fdf0eSGiuseppe CAVALLARO * It can call: 4216732fdf0eSGiuseppe CAVALLARO * o DMA service routine (to manage incoming frame reception and transmission 4217732fdf0eSGiuseppe CAVALLARO * status) 4218732fdf0eSGiuseppe CAVALLARO * o Core interrupts to manage: remote wake-up, management counter, LPI 421932ceabcaSGiuseppe CAVALLARO * interrupts. 422032ceabcaSGiuseppe CAVALLARO */ 42217ac6653aSJeff Kirsher static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 42227ac6653aSJeff Kirsher { 42237ac6653aSJeff Kirsher struct net_device *dev = (struct net_device *)dev_id; 42247ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(dev); 42257bac4e1eSJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 42267bac4e1eSJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 42277bac4e1eSJoao Pinto u32 queues_count; 42287bac4e1eSJoao Pinto u32 queue; 42297d9e6c5aSJose Abreu bool xmac; 42307bac4e1eSJoao Pinto 42317d9e6c5aSJose Abreu xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 42327bac4e1eSJoao Pinto queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 42337ac6653aSJeff Kirsher 423489f7f2cfSSrinivas Kandagatla if (priv->irq_wake) 423589f7f2cfSSrinivas Kandagatla pm_wakeup_event(priv->device, 0); 423689f7f2cfSSrinivas Kandagatla 423734877a15SJose Abreu /* Check if adapter is up */ 423834877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 423934877a15SJose Abreu return IRQ_HANDLED; 42408bf993a5SJose Abreu /* Check if a fatal error happened */ 42418bf993a5SJose Abreu if (stmmac_safety_feat_interrupt(priv)) 42428bf993a5SJose Abreu return IRQ_HANDLED; 424334877a15SJose Abreu 42447ac6653aSJeff Kirsher /* To handle GMAC own interrupts */ 42457d9e6c5aSJose Abreu if ((priv->plat->has_gmac) || xmac) { 4246c10d4c82SJose Abreu int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 424761fac60aSJose Abreu int mtl_status; 42488f71a88dSJoao Pinto 4249d765955dSGiuseppe CAVALLARO if (unlikely(status)) { 4250d765955dSGiuseppe CAVALLARO /* For LPI we need to save the tx status */ 42510982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 4252d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = true; 42530982a0f6SGiuseppe CAVALLARO if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 4254d765955dSGiuseppe CAVALLARO priv->tx_path_in_lpi_mode = false; 42557bac4e1eSJoao Pinto } 42567bac4e1eSJoao Pinto 42577bac4e1eSJoao Pinto for (queue = 0; queue < queues_count; queue++) { 425861fac60aSJose Abreu struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 425954139cf3SJoao Pinto 426061fac60aSJose Abreu mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, 426161fac60aSJose Abreu queue); 426261fac60aSJose Abreu if (mtl_status != -EINVAL) 426361fac60aSJose Abreu status |= mtl_status; 42647bac4e1eSJoao Pinto 4265a4e887faSJose Abreu if (status & CORE_IRQ_MTL_RX_OVERFLOW) 426661fac60aSJose Abreu stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 426754139cf3SJoao Pinto rx_q->rx_tail_addr, 42687bac4e1eSJoao Pinto queue); 42697bac4e1eSJoao Pinto } 427070523e63SGiuseppe CAVALLARO 427170523e63SGiuseppe CAVALLARO /* PCS link status */ 42723fe5cadbSGiuseppe CAVALLARO if (priv->hw->pcs) { 427370523e63SGiuseppe CAVALLARO if (priv->xstats.pcs_link) 427470523e63SGiuseppe CAVALLARO netif_carrier_on(dev); 427570523e63SGiuseppe CAVALLARO else 427670523e63SGiuseppe CAVALLARO netif_carrier_off(dev); 427770523e63SGiuseppe CAVALLARO } 4278d765955dSGiuseppe CAVALLARO } 4279d765955dSGiuseppe CAVALLARO 4280d765955dSGiuseppe CAVALLARO /* To handle DMA interrupts */ 42817ac6653aSJeff Kirsher stmmac_dma_interrupt(priv); 42827ac6653aSJeff Kirsher 42837ac6653aSJeff Kirsher return IRQ_HANDLED; 42847ac6653aSJeff Kirsher } 42857ac6653aSJeff Kirsher 42867ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 42877ac6653aSJeff Kirsher /* Polling receive - used by NETCONSOLE and other diagnostic tools 4288ceb69499SGiuseppe CAVALLARO * to allow network I/O with interrupts disabled. 4289ceb69499SGiuseppe CAVALLARO */ 42907ac6653aSJeff Kirsher static void stmmac_poll_controller(struct net_device *dev) 42917ac6653aSJeff Kirsher { 42927ac6653aSJeff Kirsher disable_irq(dev->irq); 42937ac6653aSJeff Kirsher stmmac_interrupt(dev->irq, dev); 42947ac6653aSJeff Kirsher enable_irq(dev->irq); 42957ac6653aSJeff Kirsher } 42967ac6653aSJeff Kirsher #endif 42977ac6653aSJeff Kirsher 42987ac6653aSJeff Kirsher /** 42997ac6653aSJeff Kirsher * stmmac_ioctl - Entry point for the Ioctl 43007ac6653aSJeff Kirsher * @dev: Device pointer. 43017ac6653aSJeff Kirsher * @rq: An IOCTL specefic structure, that can contain a pointer to 43027ac6653aSJeff Kirsher * a proprietary structure used to pass information to the driver. 43037ac6653aSJeff Kirsher * @cmd: IOCTL command 43047ac6653aSJeff Kirsher * Description: 430532ceabcaSGiuseppe CAVALLARO * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 43067ac6653aSJeff Kirsher */ 43077ac6653aSJeff Kirsher static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 43087ac6653aSJeff Kirsher { 430974371272SJose Abreu struct stmmac_priv *priv = netdev_priv (dev); 4310891434b1SRayagond Kokatanur int ret = -EOPNOTSUPP; 43117ac6653aSJeff Kirsher 43127ac6653aSJeff Kirsher if (!netif_running(dev)) 43137ac6653aSJeff Kirsher return -EINVAL; 43147ac6653aSJeff Kirsher 4315891434b1SRayagond Kokatanur switch (cmd) { 4316891434b1SRayagond Kokatanur case SIOCGMIIPHY: 4317891434b1SRayagond Kokatanur case SIOCGMIIREG: 4318891434b1SRayagond Kokatanur case SIOCSMIIREG: 431974371272SJose Abreu ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 4320891434b1SRayagond Kokatanur break; 4321891434b1SRayagond Kokatanur case SIOCSHWTSTAMP: 4322d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_set(dev, rq); 4323d6228b7cSArtem Panfilov break; 4324d6228b7cSArtem Panfilov case SIOCGHWTSTAMP: 4325d6228b7cSArtem Panfilov ret = stmmac_hwtstamp_get(dev, rq); 4326891434b1SRayagond Kokatanur break; 4327891434b1SRayagond Kokatanur default: 4328891434b1SRayagond Kokatanur break; 4329891434b1SRayagond Kokatanur } 43307ac6653aSJeff Kirsher 43317ac6653aSJeff Kirsher return ret; 43327ac6653aSJeff Kirsher } 43337ac6653aSJeff Kirsher 43344dbbe8ddSJose Abreu static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 43354dbbe8ddSJose Abreu void *cb_priv) 43364dbbe8ddSJose Abreu { 43374dbbe8ddSJose Abreu struct stmmac_priv *priv = cb_priv; 43384dbbe8ddSJose Abreu int ret = -EOPNOTSUPP; 43394dbbe8ddSJose Abreu 4340425eabddSJose Abreu if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 4341425eabddSJose Abreu return ret; 4342425eabddSJose Abreu 43434dbbe8ddSJose Abreu stmmac_disable_all_queues(priv); 43444dbbe8ddSJose Abreu 43454dbbe8ddSJose Abreu switch (type) { 43464dbbe8ddSJose Abreu case TC_SETUP_CLSU32: 43474dbbe8ddSJose Abreu ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 43484dbbe8ddSJose Abreu break; 4349425eabddSJose Abreu case TC_SETUP_CLSFLOWER: 4350425eabddSJose Abreu ret = stmmac_tc_setup_cls(priv, priv, type_data); 4351425eabddSJose Abreu break; 43524dbbe8ddSJose Abreu default: 43534dbbe8ddSJose Abreu break; 43544dbbe8ddSJose Abreu } 43554dbbe8ddSJose Abreu 43564dbbe8ddSJose Abreu stmmac_enable_all_queues(priv); 43574dbbe8ddSJose Abreu return ret; 43584dbbe8ddSJose Abreu } 43594dbbe8ddSJose Abreu 4360955bcb6eSPablo Neira Ayuso static LIST_HEAD(stmmac_block_cb_list); 4361955bcb6eSPablo Neira Ayuso 43624dbbe8ddSJose Abreu static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 43634dbbe8ddSJose Abreu void *type_data) 43644dbbe8ddSJose Abreu { 43654dbbe8ddSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 43664dbbe8ddSJose Abreu 43674dbbe8ddSJose Abreu switch (type) { 43684dbbe8ddSJose Abreu case TC_SETUP_BLOCK: 4369955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data, 4370955bcb6eSPablo Neira Ayuso &stmmac_block_cb_list, 43714e95bc26SPablo Neira Ayuso stmmac_setup_tc_block_cb, 43724e95bc26SPablo Neira Ayuso priv, priv, true); 43731f705bc6SJose Abreu case TC_SETUP_QDISC_CBS: 43741f705bc6SJose Abreu return stmmac_tc_setup_cbs(priv, priv, type_data); 4375b60189e0SJose Abreu case TC_SETUP_QDISC_TAPRIO: 4376b60189e0SJose Abreu return stmmac_tc_setup_taprio(priv, priv, type_data); 4377430b383cSJose Abreu case TC_SETUP_QDISC_ETF: 4378430b383cSJose Abreu return stmmac_tc_setup_etf(priv, priv, type_data); 43794dbbe8ddSJose Abreu default: 43804dbbe8ddSJose Abreu return -EOPNOTSUPP; 43814dbbe8ddSJose Abreu } 43824dbbe8ddSJose Abreu } 43834dbbe8ddSJose Abreu 43844993e5b3SJose Abreu static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 43854993e5b3SJose Abreu struct net_device *sb_dev) 43864993e5b3SJose Abreu { 4387b7766206SJose Abreu int gso = skb_shinfo(skb)->gso_type; 4388b7766206SJose Abreu 4389b7766206SJose Abreu if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 43904993e5b3SJose Abreu /* 4391b7766206SJose Abreu * There is no way to determine the number of TSO/USO 43924993e5b3SJose Abreu * capable Queues. Let's use always the Queue 0 4393b7766206SJose Abreu * because if TSO/USO is supported then at least this 43944993e5b3SJose Abreu * one will be capable. 43954993e5b3SJose Abreu */ 43964993e5b3SJose Abreu return 0; 43974993e5b3SJose Abreu } 43984993e5b3SJose Abreu 43994993e5b3SJose Abreu return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 44004993e5b3SJose Abreu } 44014993e5b3SJose Abreu 4402a830405eSBhadram Varka static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 4403a830405eSBhadram Varka { 4404a830405eSBhadram Varka struct stmmac_priv *priv = netdev_priv(ndev); 4405a830405eSBhadram Varka int ret = 0; 4406a830405eSBhadram Varka 4407a830405eSBhadram Varka ret = eth_mac_addr(ndev, addr); 4408a830405eSBhadram Varka if (ret) 4409a830405eSBhadram Varka return ret; 4410a830405eSBhadram Varka 4411c10d4c82SJose Abreu stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 4412a830405eSBhadram Varka 4413a830405eSBhadram Varka return ret; 4414a830405eSBhadram Varka } 4415a830405eSBhadram Varka 441650fb4f74SGiuseppe CAVALLARO #ifdef CONFIG_DEBUG_FS 44177ac29055SGiuseppe CAVALLARO static struct dentry *stmmac_fs_dir; 44187ac29055SGiuseppe CAVALLARO 4419c24602efSGiuseppe CAVALLARO static void sysfs_display_ring(void *head, int size, int extend_desc, 4420bfaf91caSJoakim Zhang struct seq_file *seq, dma_addr_t dma_phy_addr) 44217ac29055SGiuseppe CAVALLARO { 44227ac29055SGiuseppe CAVALLARO int i; 4423c24602efSGiuseppe CAVALLARO struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 4424c24602efSGiuseppe CAVALLARO struct dma_desc *p = (struct dma_desc *)head; 4425bfaf91caSJoakim Zhang dma_addr_t dma_addr; 44267ac29055SGiuseppe CAVALLARO 4427c24602efSGiuseppe CAVALLARO for (i = 0; i < size; i++) { 4428c24602efSGiuseppe CAVALLARO if (extend_desc) { 4429bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*ep); 4430bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4431bfaf91caSJoakim Zhang i, &dma_addr, 4432f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des0), 4433f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des1), 4434f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des2), 4435f8be0d78SMichael Weiser le32_to_cpu(ep->basic.des3)); 4436c24602efSGiuseppe CAVALLARO ep++; 4437c24602efSGiuseppe CAVALLARO } else { 4438bfaf91caSJoakim Zhang dma_addr = dma_phy_addr + i * sizeof(*p); 4439bfaf91caSJoakim Zhang seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4440bfaf91caSJoakim Zhang i, &dma_addr, 4441f8be0d78SMichael Weiser le32_to_cpu(p->des0), le32_to_cpu(p->des1), 4442f8be0d78SMichael Weiser le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 4443c24602efSGiuseppe CAVALLARO p++; 4444c24602efSGiuseppe CAVALLARO } 44457ac29055SGiuseppe CAVALLARO seq_printf(seq, "\n"); 44467ac29055SGiuseppe CAVALLARO } 4447c24602efSGiuseppe CAVALLARO } 44487ac29055SGiuseppe CAVALLARO 4449fb0d9c63SYangtao Li static int stmmac_rings_status_show(struct seq_file *seq, void *v) 4450c24602efSGiuseppe CAVALLARO { 4451c24602efSGiuseppe CAVALLARO struct net_device *dev = seq->private; 4452c24602efSGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 445354139cf3SJoao Pinto u32 rx_count = priv->plat->rx_queues_to_use; 4454ce736788SJoao Pinto u32 tx_count = priv->plat->tx_queues_to_use; 445554139cf3SJoao Pinto u32 queue; 445654139cf3SJoao Pinto 44575f2b8b62SThierry Reding if ((dev->flags & IFF_UP) == 0) 44585f2b8b62SThierry Reding return 0; 44595f2b8b62SThierry Reding 446054139cf3SJoao Pinto for (queue = 0; queue < rx_count; queue++) { 446154139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 446254139cf3SJoao Pinto 446354139cf3SJoao Pinto seq_printf(seq, "RX Queue %d:\n", queue); 44647ac29055SGiuseppe CAVALLARO 4465c24602efSGiuseppe CAVALLARO if (priv->extend_desc) { 446654139cf3SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 446754139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_erx, 4468bfaf91caSJoakim Zhang priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 446954139cf3SJoao Pinto } else { 447054139cf3SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 447154139cf3SJoao Pinto sysfs_display_ring((void *)rx_q->dma_rx, 4472bfaf91caSJoakim Zhang priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 447354139cf3SJoao Pinto } 447454139cf3SJoao Pinto } 447554139cf3SJoao Pinto 4476ce736788SJoao Pinto for (queue = 0; queue < tx_count; queue++) { 4477ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4478ce736788SJoao Pinto 4479ce736788SJoao Pinto seq_printf(seq, "TX Queue %d:\n", queue); 4480ce736788SJoao Pinto 448154139cf3SJoao Pinto if (priv->extend_desc) { 4482ce736788SJoao Pinto seq_printf(seq, "Extended descriptor ring:\n"); 4483ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_etx, 4484bfaf91caSJoakim Zhang priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 4485579a25a8SJose Abreu } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 4486ce736788SJoao Pinto seq_printf(seq, "Descriptor ring:\n"); 4487ce736788SJoao Pinto sysfs_display_ring((void *)tx_q->dma_tx, 4488bfaf91caSJoakim Zhang priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 4489ce736788SJoao Pinto } 44907ac29055SGiuseppe CAVALLARO } 44917ac29055SGiuseppe CAVALLARO 44927ac29055SGiuseppe CAVALLARO return 0; 44937ac29055SGiuseppe CAVALLARO } 4494fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 44957ac29055SGiuseppe CAVALLARO 4496fb0d9c63SYangtao Li static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 4497e7434821SGiuseppe CAVALLARO { 4498e7434821SGiuseppe CAVALLARO struct net_device *dev = seq->private; 4499e7434821SGiuseppe CAVALLARO struct stmmac_priv *priv = netdev_priv(dev); 4500e7434821SGiuseppe CAVALLARO 450119e30c14SGiuseppe CAVALLARO if (!priv->hw_cap_support) { 4502e7434821SGiuseppe CAVALLARO seq_printf(seq, "DMA HW features not supported\n"); 4503e7434821SGiuseppe CAVALLARO return 0; 4504e7434821SGiuseppe CAVALLARO } 4505e7434821SGiuseppe CAVALLARO 4506e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 4507e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tDMA HW features\n"); 4508e7434821SGiuseppe CAVALLARO seq_printf(seq, "==============================\n"); 4509e7434821SGiuseppe CAVALLARO 451022d3efe5SPavel Machek seq_printf(seq, "\t10/100 Mbps: %s\n", 4511e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 451222d3efe5SPavel Machek seq_printf(seq, "\t1000 Mbps: %s\n", 4513e7434821SGiuseppe CAVALLARO (priv->dma_cap.mbps_1000) ? "Y" : "N"); 451422d3efe5SPavel Machek seq_printf(seq, "\tHalf duplex: %s\n", 4515e7434821SGiuseppe CAVALLARO (priv->dma_cap.half_duplex) ? "Y" : "N"); 4516e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tHash Filter: %s\n", 4517e7434821SGiuseppe CAVALLARO (priv->dma_cap.hash_filter) ? "Y" : "N"); 4518e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tMultiple MAC address registers: %s\n", 4519e7434821SGiuseppe CAVALLARO (priv->dma_cap.multi_addr) ? "Y" : "N"); 45208d45e42bSLABBE Corentin seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 4521e7434821SGiuseppe CAVALLARO (priv->dma_cap.pcs) ? "Y" : "N"); 4522e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 4523e7434821SGiuseppe CAVALLARO (priv->dma_cap.sma_mdio) ? "Y" : "N"); 4524e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Remote wake up: %s\n", 4525e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 4526e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tPMT Magic Frame: %s\n", 4527e7434821SGiuseppe CAVALLARO (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 4528e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRMON module: %s\n", 4529e7434821SGiuseppe CAVALLARO (priv->dma_cap.rmon) ? "Y" : "N"); 4530e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 4531e7434821SGiuseppe CAVALLARO (priv->dma_cap.time_stamp) ? "Y" : "N"); 4532e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 4533e7434821SGiuseppe CAVALLARO (priv->dma_cap.atime_stamp) ? "Y" : "N"); 453422d3efe5SPavel Machek seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 4535e7434821SGiuseppe CAVALLARO (priv->dma_cap.eee) ? "Y" : "N"); 4536e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 4537e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tChecksum Offload in TX: %s\n", 4538e7434821SGiuseppe CAVALLARO (priv->dma_cap.tx_coe) ? "Y" : "N"); 4539f748be53SAlexandre TORGUE if (priv->synopsys_id >= DWMAC_CORE_4_00) { 4540f748be53SAlexandre TORGUE seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 4541f748be53SAlexandre TORGUE (priv->dma_cap.rx_coe) ? "Y" : "N"); 4542f748be53SAlexandre TORGUE } else { 4543e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 4544e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 4545e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 4546e7434821SGiuseppe CAVALLARO (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 4547f748be53SAlexandre TORGUE } 4548e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 4549e7434821SGiuseppe CAVALLARO (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 4550e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 4551e7434821SGiuseppe CAVALLARO priv->dma_cap.number_rx_channel); 4552e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 4553e7434821SGiuseppe CAVALLARO priv->dma_cap.number_tx_channel); 45547d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 45557d0b447aSJose Abreu priv->dma_cap.number_rx_queues); 45567d0b447aSJose Abreu seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 45577d0b447aSJose Abreu priv->dma_cap.number_tx_queues); 4558e7434821SGiuseppe CAVALLARO seq_printf(seq, "\tEnhanced descriptors: %s\n", 4559e7434821SGiuseppe CAVALLARO (priv->dma_cap.enh_desc) ? "Y" : "N"); 45607d0b447aSJose Abreu seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 45617d0b447aSJose Abreu seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 45627d0b447aSJose Abreu seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 45637d0b447aSJose Abreu seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 45647d0b447aSJose Abreu seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 45657d0b447aSJose Abreu priv->dma_cap.pps_out_num); 45667d0b447aSJose Abreu seq_printf(seq, "\tSafety Features: %s\n", 45677d0b447aSJose Abreu priv->dma_cap.asp ? "Y" : "N"); 45687d0b447aSJose Abreu seq_printf(seq, "\tFlexible RX Parser: %s\n", 45697d0b447aSJose Abreu priv->dma_cap.frpsel ? "Y" : "N"); 45707d0b447aSJose Abreu seq_printf(seq, "\tEnhanced Addressing: %d\n", 45717d0b447aSJose Abreu priv->dma_cap.addr64); 45727d0b447aSJose Abreu seq_printf(seq, "\tReceive Side Scaling: %s\n", 45737d0b447aSJose Abreu priv->dma_cap.rssen ? "Y" : "N"); 45747d0b447aSJose Abreu seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 45757d0b447aSJose Abreu priv->dma_cap.vlhash ? "Y" : "N"); 45767d0b447aSJose Abreu seq_printf(seq, "\tSplit Header: %s\n", 45777d0b447aSJose Abreu priv->dma_cap.sphen ? "Y" : "N"); 45787d0b447aSJose Abreu seq_printf(seq, "\tVLAN TX Insertion: %s\n", 45797d0b447aSJose Abreu priv->dma_cap.vlins ? "Y" : "N"); 45807d0b447aSJose Abreu seq_printf(seq, "\tDouble VLAN: %s\n", 45817d0b447aSJose Abreu priv->dma_cap.dvlan ? "Y" : "N"); 45827d0b447aSJose Abreu seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 45837d0b447aSJose Abreu priv->dma_cap.l3l4fnum); 45847d0b447aSJose Abreu seq_printf(seq, "\tARP Offloading: %s\n", 45857d0b447aSJose Abreu priv->dma_cap.arpoffsel ? "Y" : "N"); 458644e65475SJose Abreu seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 458744e65475SJose Abreu priv->dma_cap.estsel ? "Y" : "N"); 458844e65475SJose Abreu seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 458944e65475SJose Abreu priv->dma_cap.fpesel ? "Y" : "N"); 459044e65475SJose Abreu seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 459144e65475SJose Abreu priv->dma_cap.tbssel ? "Y" : "N"); 4592e7434821SGiuseppe CAVALLARO return 0; 4593e7434821SGiuseppe CAVALLARO } 4594fb0d9c63SYangtao Li DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 4595e7434821SGiuseppe CAVALLARO 4596481a7d15SJiping Ma /* Use network device events to rename debugfs file entries. 4597481a7d15SJiping Ma */ 4598481a7d15SJiping Ma static int stmmac_device_event(struct notifier_block *unused, 4599481a7d15SJiping Ma unsigned long event, void *ptr) 4600481a7d15SJiping Ma { 4601481a7d15SJiping Ma struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4602481a7d15SJiping Ma struct stmmac_priv *priv = netdev_priv(dev); 4603481a7d15SJiping Ma 4604481a7d15SJiping Ma if (dev->netdev_ops != &stmmac_netdev_ops) 4605481a7d15SJiping Ma goto done; 4606481a7d15SJiping Ma 4607481a7d15SJiping Ma switch (event) { 4608481a7d15SJiping Ma case NETDEV_CHANGENAME: 4609481a7d15SJiping Ma if (priv->dbgfs_dir) 4610481a7d15SJiping Ma priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 4611481a7d15SJiping Ma priv->dbgfs_dir, 4612481a7d15SJiping Ma stmmac_fs_dir, 4613481a7d15SJiping Ma dev->name); 4614481a7d15SJiping Ma break; 4615481a7d15SJiping Ma } 4616481a7d15SJiping Ma done: 4617481a7d15SJiping Ma return NOTIFY_DONE; 4618481a7d15SJiping Ma } 4619481a7d15SJiping Ma 4620481a7d15SJiping Ma static struct notifier_block stmmac_notifier = { 4621481a7d15SJiping Ma .notifier_call = stmmac_device_event, 4622481a7d15SJiping Ma }; 4623481a7d15SJiping Ma 46248d72ab11SGreg Kroah-Hartman static void stmmac_init_fs(struct net_device *dev) 46257ac29055SGiuseppe CAVALLARO { 4626466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 46277ac29055SGiuseppe CAVALLARO 4628474a31e1SAaro Koskinen rtnl_lock(); 4629474a31e1SAaro Koskinen 4630466c5ac8SMathieu Olivari /* Create per netdev entries */ 4631466c5ac8SMathieu Olivari priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 4632466c5ac8SMathieu Olivari 46337ac29055SGiuseppe CAVALLARO /* Entry to report DMA RX/TX rings */ 46348d72ab11SGreg Kroah-Hartman debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 46357ac29055SGiuseppe CAVALLARO &stmmac_rings_status_fops); 46367ac29055SGiuseppe CAVALLARO 4637e7434821SGiuseppe CAVALLARO /* Entry to report the DMA HW features */ 46388d72ab11SGreg Kroah-Hartman debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 46398d72ab11SGreg Kroah-Hartman &stmmac_dma_cap_fops); 4640481a7d15SJiping Ma 4641474a31e1SAaro Koskinen rtnl_unlock(); 46427ac29055SGiuseppe CAVALLARO } 46437ac29055SGiuseppe CAVALLARO 4644466c5ac8SMathieu Olivari static void stmmac_exit_fs(struct net_device *dev) 46457ac29055SGiuseppe CAVALLARO { 4646466c5ac8SMathieu Olivari struct stmmac_priv *priv = netdev_priv(dev); 4647466c5ac8SMathieu Olivari 4648466c5ac8SMathieu Olivari debugfs_remove_recursive(priv->dbgfs_dir); 46497ac29055SGiuseppe CAVALLARO } 465050fb4f74SGiuseppe CAVALLARO #endif /* CONFIG_DEBUG_FS */ 46517ac29055SGiuseppe CAVALLARO 46523cd1cfcbSJose Abreu static u32 stmmac_vid_crc32_le(__le16 vid_le) 46533cd1cfcbSJose Abreu { 46543cd1cfcbSJose Abreu unsigned char *data = (unsigned char *)&vid_le; 46553cd1cfcbSJose Abreu unsigned char data_byte = 0; 46563cd1cfcbSJose Abreu u32 crc = ~0x0; 46573cd1cfcbSJose Abreu u32 temp = 0; 46583cd1cfcbSJose Abreu int i, bits; 46593cd1cfcbSJose Abreu 46603cd1cfcbSJose Abreu bits = get_bitmask_order(VLAN_VID_MASK); 46613cd1cfcbSJose Abreu for (i = 0; i < bits; i++) { 46623cd1cfcbSJose Abreu if ((i % 8) == 0) 46633cd1cfcbSJose Abreu data_byte = data[i / 8]; 46643cd1cfcbSJose Abreu 46653cd1cfcbSJose Abreu temp = ((crc & 1) ^ data_byte) & 1; 46663cd1cfcbSJose Abreu crc >>= 1; 46673cd1cfcbSJose Abreu data_byte >>= 1; 46683cd1cfcbSJose Abreu 46693cd1cfcbSJose Abreu if (temp) 46703cd1cfcbSJose Abreu crc ^= 0xedb88320; 46713cd1cfcbSJose Abreu } 46723cd1cfcbSJose Abreu 46733cd1cfcbSJose Abreu return crc; 46743cd1cfcbSJose Abreu } 46753cd1cfcbSJose Abreu 46763cd1cfcbSJose Abreu static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 46773cd1cfcbSJose Abreu { 46783cd1cfcbSJose Abreu u32 crc, hash = 0; 4679a24cae70SJose Abreu __le16 pmatch = 0; 4680c7ab0b80SJose Abreu int count = 0; 4681c7ab0b80SJose Abreu u16 vid = 0; 46823cd1cfcbSJose Abreu 46833cd1cfcbSJose Abreu for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 46843cd1cfcbSJose Abreu __le16 vid_le = cpu_to_le16(vid); 46853cd1cfcbSJose Abreu crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 46863cd1cfcbSJose Abreu hash |= (1 << crc); 4687c7ab0b80SJose Abreu count++; 46883cd1cfcbSJose Abreu } 46893cd1cfcbSJose Abreu 4690c7ab0b80SJose Abreu if (!priv->dma_cap.vlhash) { 4691c7ab0b80SJose Abreu if (count > 2) /* VID = 0 always passes filter */ 4692c7ab0b80SJose Abreu return -EOPNOTSUPP; 4693c7ab0b80SJose Abreu 4694a24cae70SJose Abreu pmatch = cpu_to_le16(vid); 4695c7ab0b80SJose Abreu hash = 0; 4696c7ab0b80SJose Abreu } 4697c7ab0b80SJose Abreu 4698a24cae70SJose Abreu return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 46993cd1cfcbSJose Abreu } 47003cd1cfcbSJose Abreu 47013cd1cfcbSJose Abreu static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 47023cd1cfcbSJose Abreu { 47033cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 47043cd1cfcbSJose Abreu bool is_double = false; 47053cd1cfcbSJose Abreu int ret; 47063cd1cfcbSJose Abreu 47073cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 47083cd1cfcbSJose Abreu is_double = true; 47093cd1cfcbSJose Abreu 47103cd1cfcbSJose Abreu set_bit(vid, priv->active_vlans); 47113cd1cfcbSJose Abreu ret = stmmac_vlan_update(priv, is_double); 47123cd1cfcbSJose Abreu if (ret) { 47133cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 47143cd1cfcbSJose Abreu return ret; 47153cd1cfcbSJose Abreu } 47163cd1cfcbSJose Abreu 4717dd6a4998SJose Abreu if (priv->hw->num_vlan) { 4718ed64639bSWong Vee Khee ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 4719dd6a4998SJose Abreu if (ret) 47203cd1cfcbSJose Abreu return ret; 47213cd1cfcbSJose Abreu } 47223cd1cfcbSJose Abreu 4723dd6a4998SJose Abreu return 0; 4724dd6a4998SJose Abreu } 4725dd6a4998SJose Abreu 47263cd1cfcbSJose Abreu static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 47273cd1cfcbSJose Abreu { 47283cd1cfcbSJose Abreu struct stmmac_priv *priv = netdev_priv(ndev); 47293cd1cfcbSJose Abreu bool is_double = false; 4730ed64639bSWong Vee Khee int ret; 47313cd1cfcbSJose Abreu 47323cd1cfcbSJose Abreu if (be16_to_cpu(proto) == ETH_P_8021AD) 47333cd1cfcbSJose Abreu is_double = true; 47343cd1cfcbSJose Abreu 47353cd1cfcbSJose Abreu clear_bit(vid, priv->active_vlans); 4736dd6a4998SJose Abreu 4737dd6a4998SJose Abreu if (priv->hw->num_vlan) { 4738ed64639bSWong Vee Khee ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 4739ed64639bSWong Vee Khee if (ret) 4740ed64639bSWong Vee Khee return ret; 4741dd6a4998SJose Abreu } 4742ed64639bSWong Vee Khee 47433cd1cfcbSJose Abreu return stmmac_vlan_update(priv, is_double); 47443cd1cfcbSJose Abreu } 47453cd1cfcbSJose Abreu 47467ac6653aSJeff Kirsher static const struct net_device_ops stmmac_netdev_ops = { 47477ac6653aSJeff Kirsher .ndo_open = stmmac_open, 47487ac6653aSJeff Kirsher .ndo_start_xmit = stmmac_xmit, 47497ac6653aSJeff Kirsher .ndo_stop = stmmac_release, 47507ac6653aSJeff Kirsher .ndo_change_mtu = stmmac_change_mtu, 47517ac6653aSJeff Kirsher .ndo_fix_features = stmmac_fix_features, 4752d2afb5bdSGiuseppe CAVALLARO .ndo_set_features = stmmac_set_features, 475301789349SJiri Pirko .ndo_set_rx_mode = stmmac_set_rx_mode, 47547ac6653aSJeff Kirsher .ndo_tx_timeout = stmmac_tx_timeout, 47557ac6653aSJeff Kirsher .ndo_do_ioctl = stmmac_ioctl, 47564dbbe8ddSJose Abreu .ndo_setup_tc = stmmac_setup_tc, 47574993e5b3SJose Abreu .ndo_select_queue = stmmac_select_queue, 47587ac6653aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 47597ac6653aSJeff Kirsher .ndo_poll_controller = stmmac_poll_controller, 47607ac6653aSJeff Kirsher #endif 4761a830405eSBhadram Varka .ndo_set_mac_address = stmmac_set_mac_address, 47623cd1cfcbSJose Abreu .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 47633cd1cfcbSJose Abreu .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 47647ac6653aSJeff Kirsher }; 47657ac6653aSJeff Kirsher 476634877a15SJose Abreu static void stmmac_reset_subtask(struct stmmac_priv *priv) 476734877a15SJose Abreu { 476834877a15SJose Abreu if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 476934877a15SJose Abreu return; 477034877a15SJose Abreu if (test_bit(STMMAC_DOWN, &priv->state)) 477134877a15SJose Abreu return; 477234877a15SJose Abreu 477334877a15SJose Abreu netdev_err(priv->dev, "Reset adapter.\n"); 477434877a15SJose Abreu 477534877a15SJose Abreu rtnl_lock(); 477634877a15SJose Abreu netif_trans_update(priv->dev); 477734877a15SJose Abreu while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 477834877a15SJose Abreu usleep_range(1000, 2000); 477934877a15SJose Abreu 478034877a15SJose Abreu set_bit(STMMAC_DOWN, &priv->state); 478134877a15SJose Abreu dev_close(priv->dev); 478200f54e68SPetr Machata dev_open(priv->dev, NULL); 478334877a15SJose Abreu clear_bit(STMMAC_DOWN, &priv->state); 478434877a15SJose Abreu clear_bit(STMMAC_RESETING, &priv->state); 478534877a15SJose Abreu rtnl_unlock(); 478634877a15SJose Abreu } 478734877a15SJose Abreu 478834877a15SJose Abreu static void stmmac_service_task(struct work_struct *work) 478934877a15SJose Abreu { 479034877a15SJose Abreu struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 479134877a15SJose Abreu service_task); 479234877a15SJose Abreu 479334877a15SJose Abreu stmmac_reset_subtask(priv); 479434877a15SJose Abreu clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 479534877a15SJose Abreu } 479634877a15SJose Abreu 47977ac6653aSJeff Kirsher /** 4798cf3f047bSGiuseppe CAVALLARO * stmmac_hw_init - Init the MAC device 479932ceabcaSGiuseppe CAVALLARO * @priv: driver private structure 4800732fdf0eSGiuseppe CAVALLARO * Description: this function is to configure the MAC device according to 4801732fdf0eSGiuseppe CAVALLARO * some platform parameters or the HW capability register. It prepares the 4802732fdf0eSGiuseppe CAVALLARO * driver to use either ring or chain modes and to setup either enhanced or 4803732fdf0eSGiuseppe CAVALLARO * normal descriptors. 4804cf3f047bSGiuseppe CAVALLARO */ 4805cf3f047bSGiuseppe CAVALLARO static int stmmac_hw_init(struct stmmac_priv *priv) 4806cf3f047bSGiuseppe CAVALLARO { 48075f0456b4SJose Abreu int ret; 4808cf3f047bSGiuseppe CAVALLARO 48099f93ac8dSLABBE Corentin /* dwmac-sun8i only work in chain mode */ 48109f93ac8dSLABBE Corentin if (priv->plat->has_sun8i) 48119f93ac8dSLABBE Corentin chain_mode = 1; 48125f0456b4SJose Abreu priv->chain_mode = chain_mode; 48139f93ac8dSLABBE Corentin 48145f0456b4SJose Abreu /* Initialize HW Interface */ 48155f0456b4SJose Abreu ret = stmmac_hwif_init(priv); 48165f0456b4SJose Abreu if (ret) 48175f0456b4SJose Abreu return ret; 48184a7d666aSGiuseppe CAVALLARO 4819cf3f047bSGiuseppe CAVALLARO /* Get the HW capability (new GMAC newer than 3.50a) */ 4820cf3f047bSGiuseppe CAVALLARO priv->hw_cap_support = stmmac_get_hw_features(priv); 4821cf3f047bSGiuseppe CAVALLARO if (priv->hw_cap_support) { 482238ddc59dSLABBE Corentin dev_info(priv->device, "DMA HW capability register supported\n"); 4823cf3f047bSGiuseppe CAVALLARO 4824cf3f047bSGiuseppe CAVALLARO /* We can override some gmac/dma configuration fields: e.g. 4825cf3f047bSGiuseppe CAVALLARO * enh_desc, tx_coe (e.g. that are passed through the 4826cf3f047bSGiuseppe CAVALLARO * platform) with the values from the HW capability 4827cf3f047bSGiuseppe CAVALLARO * register (if supported). 4828cf3f047bSGiuseppe CAVALLARO */ 4829cf3f047bSGiuseppe CAVALLARO priv->plat->enh_desc = priv->dma_cap.enh_desc; 4830cf3f047bSGiuseppe CAVALLARO priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 48313fe5cadbSGiuseppe CAVALLARO priv->hw->pmt = priv->plat->pmt; 4832b8ef7020SBiao Huang if (priv->dma_cap.hash_tb_sz) { 4833b8ef7020SBiao Huang priv->hw->multicast_filter_bins = 4834b8ef7020SBiao Huang (BIT(priv->dma_cap.hash_tb_sz) << 5); 4835b8ef7020SBiao Huang priv->hw->mcast_bits_log2 = 4836b8ef7020SBiao Huang ilog2(priv->hw->multicast_filter_bins); 4837b8ef7020SBiao Huang } 483838912bdbSDeepak SIKRI 4839a8df35d4SEzequiel Garcia /* TXCOE doesn't work in thresh DMA mode */ 4840a8df35d4SEzequiel Garcia if (priv->plat->force_thresh_dma_mode) 4841a8df35d4SEzequiel Garcia priv->plat->tx_coe = 0; 4842a8df35d4SEzequiel Garcia else 484338912bdbSDeepak SIKRI priv->plat->tx_coe = priv->dma_cap.tx_coe; 4844a8df35d4SEzequiel Garcia 4845f748be53SAlexandre TORGUE /* In case of GMAC4 rx_coe is from HW cap register. */ 4846f748be53SAlexandre TORGUE priv->plat->rx_coe = priv->dma_cap.rx_coe; 484738912bdbSDeepak SIKRI 484838912bdbSDeepak SIKRI if (priv->dma_cap.rx_coe_type2) 484938912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 485038912bdbSDeepak SIKRI else if (priv->dma_cap.rx_coe_type1) 485138912bdbSDeepak SIKRI priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 485238912bdbSDeepak SIKRI 485338ddc59dSLABBE Corentin } else { 485438ddc59dSLABBE Corentin dev_info(priv->device, "No HW DMA feature register supported\n"); 485538ddc59dSLABBE Corentin } 4856cf3f047bSGiuseppe CAVALLARO 4857d2afb5bdSGiuseppe CAVALLARO if (priv->plat->rx_coe) { 4858d2afb5bdSGiuseppe CAVALLARO priv->hw->rx_csum = priv->plat->rx_coe; 485938ddc59dSLABBE Corentin dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 4860f748be53SAlexandre TORGUE if (priv->synopsys_id < DWMAC_CORE_4_00) 486138ddc59dSLABBE Corentin dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 4862d2afb5bdSGiuseppe CAVALLARO } 4863cf3f047bSGiuseppe CAVALLARO if (priv->plat->tx_coe) 486438ddc59dSLABBE Corentin dev_info(priv->device, "TX Checksum insertion supported\n"); 4865cf3f047bSGiuseppe CAVALLARO 4866cf3f047bSGiuseppe CAVALLARO if (priv->plat->pmt) { 486738ddc59dSLABBE Corentin dev_info(priv->device, "Wake-Up On Lan supported\n"); 4868cf3f047bSGiuseppe CAVALLARO device_set_wakeup_capable(priv->device, 1); 4869cf3f047bSGiuseppe CAVALLARO } 4870cf3f047bSGiuseppe CAVALLARO 4871f748be53SAlexandre TORGUE if (priv->dma_cap.tsoen) 487238ddc59dSLABBE Corentin dev_info(priv->device, "TSO supported\n"); 4873f748be53SAlexandre TORGUE 4874e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 4875e0f9956aSChuah, Kim Tatt priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 4876e0f9956aSChuah, Kim Tatt 48777cfde0afSJose Abreu /* Run HW quirks, if any */ 48787cfde0afSJose Abreu if (priv->hwif_quirks) { 48797cfde0afSJose Abreu ret = priv->hwif_quirks(priv); 48807cfde0afSJose Abreu if (ret) 48817cfde0afSJose Abreu return ret; 48827cfde0afSJose Abreu } 48837cfde0afSJose Abreu 48843b509466SJose Abreu /* Rx Watchdog is available in the COREs newer than the 3.40. 48853b509466SJose Abreu * In some case, for example on bugged HW this feature 48863b509466SJose Abreu * has to be disable and this can be done by passing the 48873b509466SJose Abreu * riwt_off field from the platform. 48883b509466SJose Abreu */ 48893b509466SJose Abreu if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 48903b509466SJose Abreu (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 48913b509466SJose Abreu priv->use_riwt = 1; 48923b509466SJose Abreu dev_info(priv->device, 48933b509466SJose Abreu "Enable RX Mitigation via HW Watchdog Timer\n"); 48943b509466SJose Abreu } 48953b509466SJose Abreu 4896c24602efSGiuseppe CAVALLARO return 0; 4897cf3f047bSGiuseppe CAVALLARO } 4898cf3f047bSGiuseppe CAVALLARO 48990366f7e0SOng Boon Leong static void stmmac_napi_add(struct net_device *dev) 49000366f7e0SOng Boon Leong { 49010366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 49020366f7e0SOng Boon Leong u32 queue, maxq; 49030366f7e0SOng Boon Leong 49040366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 49050366f7e0SOng Boon Leong 49060366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 49070366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 49080366f7e0SOng Boon Leong 49090366f7e0SOng Boon Leong ch->priv_data = priv; 49100366f7e0SOng Boon Leong ch->index = queue; 49112b94f526SMarek Szyprowski spin_lock_init(&ch->lock); 49120366f7e0SOng Boon Leong 49130366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) { 49140366f7e0SOng Boon Leong netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 49150366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 49160366f7e0SOng Boon Leong } 49170366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) { 49180366f7e0SOng Boon Leong netif_tx_napi_add(dev, &ch->tx_napi, 49190366f7e0SOng Boon Leong stmmac_napi_poll_tx, 49200366f7e0SOng Boon Leong NAPI_POLL_WEIGHT); 49210366f7e0SOng Boon Leong } 49220366f7e0SOng Boon Leong } 49230366f7e0SOng Boon Leong } 49240366f7e0SOng Boon Leong 49250366f7e0SOng Boon Leong static void stmmac_napi_del(struct net_device *dev) 49260366f7e0SOng Boon Leong { 49270366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 49280366f7e0SOng Boon Leong u32 queue, maxq; 49290366f7e0SOng Boon Leong 49300366f7e0SOng Boon Leong maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 49310366f7e0SOng Boon Leong 49320366f7e0SOng Boon Leong for (queue = 0; queue < maxq; queue++) { 49330366f7e0SOng Boon Leong struct stmmac_channel *ch = &priv->channel[queue]; 49340366f7e0SOng Boon Leong 49350366f7e0SOng Boon Leong if (queue < priv->plat->rx_queues_to_use) 49360366f7e0SOng Boon Leong netif_napi_del(&ch->rx_napi); 49370366f7e0SOng Boon Leong if (queue < priv->plat->tx_queues_to_use) 49380366f7e0SOng Boon Leong netif_napi_del(&ch->tx_napi); 49390366f7e0SOng Boon Leong } 49400366f7e0SOng Boon Leong } 49410366f7e0SOng Boon Leong 49420366f7e0SOng Boon Leong int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 49430366f7e0SOng Boon Leong { 49440366f7e0SOng Boon Leong struct stmmac_priv *priv = netdev_priv(dev); 49450366f7e0SOng Boon Leong int ret = 0; 49460366f7e0SOng Boon Leong 49470366f7e0SOng Boon Leong if (netif_running(dev)) 49480366f7e0SOng Boon Leong stmmac_release(dev); 49490366f7e0SOng Boon Leong 49500366f7e0SOng Boon Leong stmmac_napi_del(dev); 49510366f7e0SOng Boon Leong 49520366f7e0SOng Boon Leong priv->plat->rx_queues_to_use = rx_cnt; 49530366f7e0SOng Boon Leong priv->plat->tx_queues_to_use = tx_cnt; 49540366f7e0SOng Boon Leong 49550366f7e0SOng Boon Leong stmmac_napi_add(dev); 49560366f7e0SOng Boon Leong 49570366f7e0SOng Boon Leong if (netif_running(dev)) 49580366f7e0SOng Boon Leong ret = stmmac_open(dev); 49590366f7e0SOng Boon Leong 49600366f7e0SOng Boon Leong return ret; 49610366f7e0SOng Boon Leong } 49620366f7e0SOng Boon Leong 4963aa042f60SSong, Yoong Siang int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 4964aa042f60SSong, Yoong Siang { 4965aa042f60SSong, Yoong Siang struct stmmac_priv *priv = netdev_priv(dev); 4966aa042f60SSong, Yoong Siang int ret = 0; 4967aa042f60SSong, Yoong Siang 4968aa042f60SSong, Yoong Siang if (netif_running(dev)) 4969aa042f60SSong, Yoong Siang stmmac_release(dev); 4970aa042f60SSong, Yoong Siang 4971aa042f60SSong, Yoong Siang priv->dma_rx_size = rx_size; 4972aa042f60SSong, Yoong Siang priv->dma_tx_size = tx_size; 4973aa042f60SSong, Yoong Siang 4974aa042f60SSong, Yoong Siang if (netif_running(dev)) 4975aa042f60SSong, Yoong Siang ret = stmmac_open(dev); 4976aa042f60SSong, Yoong Siang 4977aa042f60SSong, Yoong Siang return ret; 4978aa042f60SSong, Yoong Siang } 4979aa042f60SSong, Yoong Siang 4980cf3f047bSGiuseppe CAVALLARO /** 4981bfab27a1SGiuseppe CAVALLARO * stmmac_dvr_probe 4982bfab27a1SGiuseppe CAVALLARO * @device: device pointer 4983ff3dd78cSGiuseppe CAVALLARO * @plat_dat: platform data pointer 4984e56788cfSJoachim Eastwood * @res: stmmac resource pointer 4985bfab27a1SGiuseppe CAVALLARO * Description: this is the main probe function used to 4986bfab27a1SGiuseppe CAVALLARO * call the alloc_etherdev, allocate the priv structure. 49879afec6efSAndy Shevchenko * Return: 498815ffac73SJoachim Eastwood * returns 0 on success, otherwise errno. 49897ac6653aSJeff Kirsher */ 499015ffac73SJoachim Eastwood int stmmac_dvr_probe(struct device *device, 4991cf3f047bSGiuseppe CAVALLARO struct plat_stmmacenet_data *plat_dat, 4992e56788cfSJoachim Eastwood struct stmmac_resources *res) 49937ac6653aSJeff Kirsher { 4994bfab27a1SGiuseppe CAVALLARO struct net_device *ndev = NULL; 4995bfab27a1SGiuseppe CAVALLARO struct stmmac_priv *priv; 49960366f7e0SOng Boon Leong u32 rxq; 499776067459SJose Abreu int i, ret = 0; 49987ac6653aSJeff Kirsher 49999737070cSJisheng Zhang ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 50009737070cSJisheng Zhang MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 500141de8d4cSJoe Perches if (!ndev) 500215ffac73SJoachim Eastwood return -ENOMEM; 50037ac6653aSJeff Kirsher 5004bfab27a1SGiuseppe CAVALLARO SET_NETDEV_DEV(ndev, device); 50057ac6653aSJeff Kirsher 5006bfab27a1SGiuseppe CAVALLARO priv = netdev_priv(ndev); 5007bfab27a1SGiuseppe CAVALLARO priv->device = device; 5008bfab27a1SGiuseppe CAVALLARO priv->dev = ndev; 5009bfab27a1SGiuseppe CAVALLARO 5010bfab27a1SGiuseppe CAVALLARO stmmac_set_ethtool_ops(ndev); 5011cf3f047bSGiuseppe CAVALLARO priv->pause = pause; 5012cf3f047bSGiuseppe CAVALLARO priv->plat = plat_dat; 5013e56788cfSJoachim Eastwood priv->ioaddr = res->addr; 5014e56788cfSJoachim Eastwood priv->dev->base_addr = (unsigned long)res->addr; 5015e56788cfSJoachim Eastwood 5016e56788cfSJoachim Eastwood priv->dev->irq = res->irq; 5017e56788cfSJoachim Eastwood priv->wol_irq = res->wol_irq; 5018e56788cfSJoachim Eastwood priv->lpi_irq = res->lpi_irq; 5019e56788cfSJoachim Eastwood 5020a51645f7SPetr Štetiar if (!IS_ERR_OR_NULL(res->mac)) 5021e56788cfSJoachim Eastwood memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 5022bfab27a1SGiuseppe CAVALLARO 5023a7a62685SJoachim Eastwood dev_set_drvdata(device, priv->dev); 5024803f8fc4SJoachim Eastwood 5025cf3f047bSGiuseppe CAVALLARO /* Verify driver arguments */ 5026cf3f047bSGiuseppe CAVALLARO stmmac_verify_args(); 5027cf3f047bSGiuseppe CAVALLARO 502834877a15SJose Abreu /* Allocate workqueue */ 502934877a15SJose Abreu priv->wq = create_singlethread_workqueue("stmmac_wq"); 503034877a15SJose Abreu if (!priv->wq) { 503134877a15SJose Abreu dev_err(priv->device, "failed to create workqueue\n"); 50329737070cSJisheng Zhang return -ENOMEM; 503334877a15SJose Abreu } 503434877a15SJose Abreu 503534877a15SJose Abreu INIT_WORK(&priv->service_task, stmmac_service_task); 503634877a15SJose Abreu 5037cf3f047bSGiuseppe CAVALLARO /* Override with kernel parameters if supplied XXX CRS XXX 5038ceb69499SGiuseppe CAVALLARO * this needs to have multiple instances 5039ceb69499SGiuseppe CAVALLARO */ 5040cf3f047bSGiuseppe CAVALLARO if ((phyaddr >= 0) && (phyaddr <= 31)) 5041cf3f047bSGiuseppe CAVALLARO priv->plat->phy_addr = phyaddr; 5042cf3f047bSGiuseppe CAVALLARO 504390f522a2SEugeniy Paltsev if (priv->plat->stmmac_rst) { 504490f522a2SEugeniy Paltsev ret = reset_control_assert(priv->plat->stmmac_rst); 5045f573c0b9Sjpinto reset_control_deassert(priv->plat->stmmac_rst); 504690f522a2SEugeniy Paltsev /* Some reset controllers have only reset callback instead of 504790f522a2SEugeniy Paltsev * assert + deassert callbacks pair. 504890f522a2SEugeniy Paltsev */ 504990f522a2SEugeniy Paltsev if (ret == -ENOTSUPP) 505090f522a2SEugeniy Paltsev reset_control_reset(priv->plat->stmmac_rst); 505190f522a2SEugeniy Paltsev } 5052c5e4ddbdSChen-Yu Tsai 5053cf3f047bSGiuseppe CAVALLARO /* Init MAC and get the capabilities */ 5054c24602efSGiuseppe CAVALLARO ret = stmmac_hw_init(priv); 5055c24602efSGiuseppe CAVALLARO if (ret) 505662866e98SChen-Yu Tsai goto error_hw_init; 5057cf3f047bSGiuseppe CAVALLARO 5058b561af36SVinod Koul stmmac_check_ether_addr(priv); 5059b561af36SVinod Koul 5060cf3f047bSGiuseppe CAVALLARO ndev->netdev_ops = &stmmac_netdev_ops; 5061cf3f047bSGiuseppe CAVALLARO 5062cf3f047bSGiuseppe CAVALLARO ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5063cf3f047bSGiuseppe CAVALLARO NETIF_F_RXCSUM; 5064f748be53SAlexandre TORGUE 50654dbbe8ddSJose Abreu ret = stmmac_tc_init(priv, priv); 50664dbbe8ddSJose Abreu if (!ret) { 50674dbbe8ddSJose Abreu ndev->hw_features |= NETIF_F_HW_TC; 50684dbbe8ddSJose Abreu } 50694dbbe8ddSJose Abreu 5070f748be53SAlexandre TORGUE if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 50719edfa7daSNiklas Cassel ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 5072b7766206SJose Abreu if (priv->plat->has_gmac4) 5073b7766206SJose Abreu ndev->hw_features |= NETIF_F_GSO_UDP_L4; 5074f748be53SAlexandre TORGUE priv->tso = true; 507538ddc59dSLABBE Corentin dev_info(priv->device, "TSO feature enabled\n"); 5076f748be53SAlexandre TORGUE } 5077a993db88SJose Abreu 507867afd6d1SJose Abreu if (priv->dma_cap.sphen) { 507967afd6d1SJose Abreu ndev->hw_features |= NETIF_F_GRO; 508067afd6d1SJose Abreu priv->sph = true; 508167afd6d1SJose Abreu dev_info(priv->device, "SPH feature enabled\n"); 508267afd6d1SJose Abreu } 508367afd6d1SJose Abreu 5084f119cc98SFugang Duan /* The current IP register MAC_HW_Feature1[ADDR64] only define 5085f119cc98SFugang Duan * 32/40/64 bit width, but some SOC support others like i.MX8MP 5086f119cc98SFugang Duan * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 5087f119cc98SFugang Duan * So overwrite dma_cap.addr64 according to HW real design. 5088f119cc98SFugang Duan */ 5089f119cc98SFugang Duan if (priv->plat->addr64) 5090f119cc98SFugang Duan priv->dma_cap.addr64 = priv->plat->addr64; 5091f119cc98SFugang Duan 5092a993db88SJose Abreu if (priv->dma_cap.addr64) { 5093a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, 5094a993db88SJose Abreu DMA_BIT_MASK(priv->dma_cap.addr64)); 5095a993db88SJose Abreu if (!ret) { 5096a993db88SJose Abreu dev_info(priv->device, "Using %d bits DMA width\n", 5097a993db88SJose Abreu priv->dma_cap.addr64); 5098968a2978SThierry Reding 5099968a2978SThierry Reding /* 5100968a2978SThierry Reding * If more than 32 bits can be addressed, make sure to 5101968a2978SThierry Reding * enable enhanced addressing mode. 5102968a2978SThierry Reding */ 5103968a2978SThierry Reding if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 5104968a2978SThierry Reding priv->plat->dma_cfg->eame = true; 5105a993db88SJose Abreu } else { 5106a993db88SJose Abreu ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 5107a993db88SJose Abreu if (ret) { 5108a993db88SJose Abreu dev_err(priv->device, "Failed to set DMA Mask\n"); 5109a993db88SJose Abreu goto error_hw_init; 5110a993db88SJose Abreu } 5111a993db88SJose Abreu 5112a993db88SJose Abreu priv->dma_cap.addr64 = 32; 5113a993db88SJose Abreu } 5114a993db88SJose Abreu } 5115a993db88SJose Abreu 5116bfab27a1SGiuseppe CAVALLARO ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 5117bfab27a1SGiuseppe CAVALLARO ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 51187ac6653aSJeff Kirsher #ifdef STMMAC_VLAN_TAG_USED 51197ac6653aSJeff Kirsher /* Both mac100 and gmac support receive VLAN tag detection */ 5120ab188e8fSElad Nachman ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 51213cd1cfcbSJose Abreu if (priv->dma_cap.vlhash) { 51223cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 51233cd1cfcbSJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 51243cd1cfcbSJose Abreu } 512530d93227SJose Abreu if (priv->dma_cap.vlins) { 512630d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 512730d93227SJose Abreu if (priv->dma_cap.dvlan) 512830d93227SJose Abreu ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 512930d93227SJose Abreu } 51307ac6653aSJeff Kirsher #endif 51317ac6653aSJeff Kirsher priv->msg_enable = netif_msg_init(debug, default_msg_level); 51327ac6653aSJeff Kirsher 513376067459SJose Abreu /* Initialize RSS */ 513476067459SJose Abreu rxq = priv->plat->rx_queues_to_use; 513576067459SJose Abreu netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 513676067459SJose Abreu for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 513776067459SJose Abreu priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 513876067459SJose Abreu 513976067459SJose Abreu if (priv->dma_cap.rssen && priv->plat->rss_en) 514076067459SJose Abreu ndev->features |= NETIF_F_RXHASH; 514176067459SJose Abreu 514244770e11SJarod Wilson /* MTU range: 46 - hw-specific max */ 514344770e11SJarod Wilson ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 514456bcd591SJose Abreu if (priv->plat->has_xgmac) 51457d9e6c5aSJose Abreu ndev->max_mtu = XGMAC_JUMBO_LEN; 514656bcd591SJose Abreu else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 514756bcd591SJose Abreu ndev->max_mtu = JUMBO_LEN; 514844770e11SJarod Wilson else 514944770e11SJarod Wilson ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 5150a2cd64f3SKweh, Hock Leong /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 5151a2cd64f3SKweh, Hock Leong * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 5152a2cd64f3SKweh, Hock Leong */ 5153a2cd64f3SKweh, Hock Leong if ((priv->plat->maxmtu < ndev->max_mtu) && 5154a2cd64f3SKweh, Hock Leong (priv->plat->maxmtu >= ndev->min_mtu)) 515544770e11SJarod Wilson ndev->max_mtu = priv->plat->maxmtu; 5156a2cd64f3SKweh, Hock Leong else if (priv->plat->maxmtu < ndev->min_mtu) 5157b618ab45SHeiner Kallweit dev_warn(priv->device, 5158a2cd64f3SKweh, Hock Leong "%s: warning: maxmtu having invalid value (%d)\n", 5159a2cd64f3SKweh, Hock Leong __func__, priv->plat->maxmtu); 516044770e11SJarod Wilson 51617ac6653aSJeff Kirsher if (flow_ctrl) 51627ac6653aSJeff Kirsher priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 51637ac6653aSJeff Kirsher 51648fce3331SJose Abreu /* Setup channels NAPI */ 51650366f7e0SOng Boon Leong stmmac_napi_add(ndev); 51667ac6653aSJeff Kirsher 516729555fa3SThierry Reding mutex_init(&priv->lock); 51687ac6653aSJeff Kirsher 5169cd7201f4SGiuseppe CAVALLARO /* If a specific clk_csr value is passed from the platform 5170cd7201f4SGiuseppe CAVALLARO * this means that the CSR Clock Range selection cannot be 5171cd7201f4SGiuseppe CAVALLARO * changed at run-time and it is fixed. Viceversa the driver'll try to 5172cd7201f4SGiuseppe CAVALLARO * set the MDC clock dynamically according to the csr actual 5173cd7201f4SGiuseppe CAVALLARO * clock input. 5174cd7201f4SGiuseppe CAVALLARO */ 51755e7f7fc5SBiao Huang if (priv->plat->clk_csr >= 0) 5176cd7201f4SGiuseppe CAVALLARO priv->clk_csr = priv->plat->clk_csr; 51775e7f7fc5SBiao Huang else 51785e7f7fc5SBiao Huang stmmac_clk_csr_set(priv); 5179cd7201f4SGiuseppe CAVALLARO 5180e58bb43fSGiuseppe CAVALLARO stmmac_check_pcs_mode(priv); 5181e58bb43fSGiuseppe CAVALLARO 5182a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 51833fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) { 51844bfcbd7aSFrancesco Virlinzi /* MDIO bus Registration */ 51854bfcbd7aSFrancesco Virlinzi ret = stmmac_mdio_register(ndev); 51864bfcbd7aSFrancesco Virlinzi if (ret < 0) { 5187b618ab45SHeiner Kallweit dev_err(priv->device, 518838ddc59dSLABBE Corentin "%s: MDIO bus (id: %d) registration failed", 51894bfcbd7aSFrancesco Virlinzi __func__, priv->plat->bus_id); 51906a81c26fSViresh Kumar goto error_mdio_register; 51914bfcbd7aSFrancesco Virlinzi } 5192e58bb43fSGiuseppe CAVALLARO } 51934bfcbd7aSFrancesco Virlinzi 519474371272SJose Abreu ret = stmmac_phy_setup(priv); 519574371272SJose Abreu if (ret) { 519674371272SJose Abreu netdev_err(ndev, "failed to setup phy (%d)\n", ret); 519774371272SJose Abreu goto error_phy_setup; 519874371272SJose Abreu } 519974371272SJose Abreu 520057016590SFlorian Fainelli ret = register_netdev(ndev); 5201b2eb09afSFlorian Fainelli if (ret) { 5202b618ab45SHeiner Kallweit dev_err(priv->device, "%s: ERROR %i registering the device\n", 520357016590SFlorian Fainelli __func__, ret); 5204b2eb09afSFlorian Fainelli goto error_netdev_register; 5205b2eb09afSFlorian Fainelli } 52067ac6653aSJeff Kirsher 5207b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 5208b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 5209b9663b7cSVoon Weifeng priv->plat->bsp_priv); 5210b9663b7cSVoon Weifeng 5211b9663b7cSVoon Weifeng if (ret < 0) 5212801eb050SAndy Shevchenko goto error_serdes_powerup; 5213b9663b7cSVoon Weifeng } 5214b9663b7cSVoon Weifeng 52155f2b8b62SThierry Reding #ifdef CONFIG_DEBUG_FS 52168d72ab11SGreg Kroah-Hartman stmmac_init_fs(ndev); 52175f2b8b62SThierry Reding #endif 52185f2b8b62SThierry Reding 521957016590SFlorian Fainelli return ret; 52207ac6653aSJeff Kirsher 5221801eb050SAndy Shevchenko error_serdes_powerup: 5222801eb050SAndy Shevchenko unregister_netdev(ndev); 52236a81c26fSViresh Kumar error_netdev_register: 522474371272SJose Abreu phylink_destroy(priv->phylink); 522574371272SJose Abreu error_phy_setup: 5226a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 5227b2eb09afSFlorian Fainelli priv->hw->pcs != STMMAC_PCS_RTBI) 5228b2eb09afSFlorian Fainelli stmmac_mdio_unregister(ndev); 52297ac6653aSJeff Kirsher error_mdio_register: 52300366f7e0SOng Boon Leong stmmac_napi_del(ndev); 523162866e98SChen-Yu Tsai error_hw_init: 523234877a15SJose Abreu destroy_workqueue(priv->wq); 52337ac6653aSJeff Kirsher 523415ffac73SJoachim Eastwood return ret; 52357ac6653aSJeff Kirsher } 5236b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 52377ac6653aSJeff Kirsher 52387ac6653aSJeff Kirsher /** 52397ac6653aSJeff Kirsher * stmmac_dvr_remove 5240f4e7bd81SJoachim Eastwood * @dev: device pointer 52417ac6653aSJeff Kirsher * Description: this function resets the TX/RX processes, disables the MAC RX/TX 5242bfab27a1SGiuseppe CAVALLARO * changes the link status, releases the DMA descriptor rings. 52437ac6653aSJeff Kirsher */ 5244f4e7bd81SJoachim Eastwood int stmmac_dvr_remove(struct device *dev) 52457ac6653aSJeff Kirsher { 5246f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 52477ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 52487ac6653aSJeff Kirsher 524938ddc59dSLABBE Corentin netdev_info(priv->dev, "%s: removing driver", __func__); 52507ac6653aSJeff Kirsher 5251ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 52527ac6653aSJeff Kirsher 5253b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 5254b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5255b9663b7cSVoon Weifeng 5256c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 52577ac6653aSJeff Kirsher netif_carrier_off(ndev); 52587ac6653aSJeff Kirsher unregister_netdev(ndev); 5259474a31e1SAaro Koskinen #ifdef CONFIG_DEBUG_FS 5260474a31e1SAaro Koskinen stmmac_exit_fs(ndev); 5261474a31e1SAaro Koskinen #endif 526274371272SJose Abreu phylink_destroy(priv->phylink); 5263f573c0b9Sjpinto if (priv->plat->stmmac_rst) 5264f573c0b9Sjpinto reset_control_assert(priv->plat->stmmac_rst); 5265f573c0b9Sjpinto clk_disable_unprepare(priv->plat->pclk); 5266f573c0b9Sjpinto clk_disable_unprepare(priv->plat->stmmac_clk); 5267a47b9e15SDejin Zheng if (priv->hw->pcs != STMMAC_PCS_TBI && 52683fe5cadbSGiuseppe CAVALLARO priv->hw->pcs != STMMAC_PCS_RTBI) 5269e743471fSBryan O'Donoghue stmmac_mdio_unregister(ndev); 527034877a15SJose Abreu destroy_workqueue(priv->wq); 527129555fa3SThierry Reding mutex_destroy(&priv->lock); 52727ac6653aSJeff Kirsher 52737ac6653aSJeff Kirsher return 0; 52747ac6653aSJeff Kirsher } 5275b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 52767ac6653aSJeff Kirsher 5277732fdf0eSGiuseppe CAVALLARO /** 5278732fdf0eSGiuseppe CAVALLARO * stmmac_suspend - suspend callback 5279f4e7bd81SJoachim Eastwood * @dev: device pointer 5280732fdf0eSGiuseppe CAVALLARO * Description: this is the function to suspend the device and it is called 5281732fdf0eSGiuseppe CAVALLARO * by the platform driver to stop the network queue, release the resources, 5282732fdf0eSGiuseppe CAVALLARO * program the PMT register (for WoL), clean and release driver resources. 5283732fdf0eSGiuseppe CAVALLARO */ 5284f4e7bd81SJoachim Eastwood int stmmac_suspend(struct device *dev) 52857ac6653aSJeff Kirsher { 5286f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 52877ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 528814b41a29SNicolin Chen u32 chan; 52897ac6653aSJeff Kirsher 52907ac6653aSJeff Kirsher if (!ndev || !netif_running(ndev)) 52917ac6653aSJeff Kirsher return 0; 52927ac6653aSJeff Kirsher 52933e2bf04fSJose Abreu phylink_mac_change(priv->phylink, false); 52947ac6653aSJeff Kirsher 5295134cc4ceSThierry Reding mutex_lock(&priv->lock); 529619e13cb2SJose Abreu 52977ac6653aSJeff Kirsher netif_device_detach(ndev); 52987ac6653aSJeff Kirsher 5299c22a3f48SJoao Pinto stmmac_disable_all_queues(priv); 53007ac6653aSJeff Kirsher 530114b41a29SNicolin Chen for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 5302d5a05e69SVincent Whitchurch hrtimer_cancel(&priv->tx_queue[chan].txtimer); 530314b41a29SNicolin Chen 53045f585913SFugang Duan if (priv->eee_enabled) { 53055f585913SFugang Duan priv->tx_path_in_lpi_mode = false; 53065f585913SFugang Duan del_timer_sync(&priv->eee_ctrl_timer); 53075f585913SFugang Duan } 53085f585913SFugang Duan 53097ac6653aSJeff Kirsher /* Stop TX/RX DMA */ 5310ae4f0d46SJoao Pinto stmmac_stop_all_dma(priv); 5311c24602efSGiuseppe CAVALLARO 5312b9663b7cSVoon Weifeng if (priv->plat->serdes_powerdown) 5313b9663b7cSVoon Weifeng priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5314b9663b7cSVoon Weifeng 53157ac6653aSJeff Kirsher /* Enable Power down mode by programming the PMT regs */ 5316e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 5317c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, priv->wolopts); 531889f7f2cfSSrinivas Kandagatla priv->irq_wake = 1; 531989f7f2cfSSrinivas Kandagatla } else { 5320134cc4ceSThierry Reding mutex_unlock(&priv->lock); 53213e2bf04fSJose Abreu rtnl_lock(); 532277b28983SJisheng Zhang if (device_may_wakeup(priv->device)) 532377b28983SJisheng Zhang phylink_speed_down(priv->phylink, false); 53243e2bf04fSJose Abreu phylink_stop(priv->phylink); 53253e2bf04fSJose Abreu rtnl_unlock(); 5326134cc4ceSThierry Reding mutex_lock(&priv->lock); 53273e2bf04fSJose Abreu 5328c10d4c82SJose Abreu stmmac_mac_set(priv, priv->ioaddr, false); 5329db88f10aSSrinivas Kandagatla pinctrl_pm_select_sleep_state(priv->device); 5330ba1377ffSGiuseppe CAVALLARO /* Disable clock in case of PWM is off */ 5331e497c20eSBiao Huang clk_disable_unprepare(priv->plat->clk_ptp_ref); 5332e497c20eSBiao Huang clk_disable_unprepare(priv->plat->pclk); 5333e497c20eSBiao Huang clk_disable_unprepare(priv->plat->stmmac_clk); 5334ba1377ffSGiuseppe CAVALLARO } 533529555fa3SThierry Reding mutex_unlock(&priv->lock); 53362d871aa0SVince Bridgers 5337bd00632cSLABBE Corentin priv->speed = SPEED_UNKNOWN; 53387ac6653aSJeff Kirsher return 0; 53397ac6653aSJeff Kirsher } 5340b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_suspend); 53417ac6653aSJeff Kirsher 5342732fdf0eSGiuseppe CAVALLARO /** 534354139cf3SJoao Pinto * stmmac_reset_queues_param - reset queue parameters 5344d0ea5cbdSJesse Brandeburg * @priv: device pointer 534554139cf3SJoao Pinto */ 534654139cf3SJoao Pinto static void stmmac_reset_queues_param(struct stmmac_priv *priv) 534754139cf3SJoao Pinto { 534854139cf3SJoao Pinto u32 rx_cnt = priv->plat->rx_queues_to_use; 5349ce736788SJoao Pinto u32 tx_cnt = priv->plat->tx_queues_to_use; 535054139cf3SJoao Pinto u32 queue; 535154139cf3SJoao Pinto 535254139cf3SJoao Pinto for (queue = 0; queue < rx_cnt; queue++) { 535354139cf3SJoao Pinto struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 535454139cf3SJoao Pinto 535554139cf3SJoao Pinto rx_q->cur_rx = 0; 535654139cf3SJoao Pinto rx_q->dirty_rx = 0; 535754139cf3SJoao Pinto } 535854139cf3SJoao Pinto 5359ce736788SJoao Pinto for (queue = 0; queue < tx_cnt; queue++) { 5360ce736788SJoao Pinto struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5361ce736788SJoao Pinto 5362ce736788SJoao Pinto tx_q->cur_tx = 0; 5363ce736788SJoao Pinto tx_q->dirty_tx = 0; 53648d212a9eSNiklas Cassel tx_q->mss = 0; 5365c511819dSJoakim Zhang 5366c511819dSJoakim Zhang netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 5367ce736788SJoao Pinto } 536854139cf3SJoao Pinto } 536954139cf3SJoao Pinto 537054139cf3SJoao Pinto /** 5371732fdf0eSGiuseppe CAVALLARO * stmmac_resume - resume callback 5372f4e7bd81SJoachim Eastwood * @dev: device pointer 5373732fdf0eSGiuseppe CAVALLARO * Description: when resume this function is invoked to setup the DMA and CORE 5374732fdf0eSGiuseppe CAVALLARO * in a usable state. 5375732fdf0eSGiuseppe CAVALLARO */ 5376f4e7bd81SJoachim Eastwood int stmmac_resume(struct device *dev) 53777ac6653aSJeff Kirsher { 5378f4e7bd81SJoachim Eastwood struct net_device *ndev = dev_get_drvdata(dev); 53797ac6653aSJeff Kirsher struct stmmac_priv *priv = netdev_priv(ndev); 5380b9663b7cSVoon Weifeng int ret; 53817ac6653aSJeff Kirsher 53827ac6653aSJeff Kirsher if (!netif_running(ndev)) 53837ac6653aSJeff Kirsher return 0; 53847ac6653aSJeff Kirsher 53857ac6653aSJeff Kirsher /* Power Down bit, into the PM register, is cleared 53867ac6653aSJeff Kirsher * automatically as soon as a magic packet or a Wake-up frame 53877ac6653aSJeff Kirsher * is received. Anyway, it's better to manually clear 53887ac6653aSJeff Kirsher * this bit because it can generate problems while resuming 5389ceb69499SGiuseppe CAVALLARO * from another devices (e.g. serial console). 5390ceb69499SGiuseppe CAVALLARO */ 5391e8377e7aSJisheng Zhang if (device_may_wakeup(priv->device) && priv->plat->pmt) { 539229555fa3SThierry Reding mutex_lock(&priv->lock); 5393c10d4c82SJose Abreu stmmac_pmt(priv, priv->hw, 0); 539429555fa3SThierry Reding mutex_unlock(&priv->lock); 539589f7f2cfSSrinivas Kandagatla priv->irq_wake = 0; 5396623997fbSSrinivas Kandagatla } else { 5397db88f10aSSrinivas Kandagatla pinctrl_pm_select_default_state(priv->device); 53988d45e42bSLABBE Corentin /* enable the clk previously disabled */ 5399e497c20eSBiao Huang clk_prepare_enable(priv->plat->stmmac_clk); 5400e497c20eSBiao Huang clk_prepare_enable(priv->plat->pclk); 5401e497c20eSBiao Huang if (priv->plat->clk_ptp_ref) 5402e497c20eSBiao Huang clk_prepare_enable(priv->plat->clk_ptp_ref); 5403623997fbSSrinivas Kandagatla /* reset the phy so that it's ready */ 5404623997fbSSrinivas Kandagatla if (priv->mii) 5405623997fbSSrinivas Kandagatla stmmac_mdio_reset(priv->mii); 5406623997fbSSrinivas Kandagatla } 54077ac6653aSJeff Kirsher 5408b9663b7cSVoon Weifeng if (priv->plat->serdes_powerup) { 5409b9663b7cSVoon Weifeng ret = priv->plat->serdes_powerup(ndev, 5410b9663b7cSVoon Weifeng priv->plat->bsp_priv); 5411b9663b7cSVoon Weifeng 5412b9663b7cSVoon Weifeng if (ret < 0) 5413b9663b7cSVoon Weifeng return ret; 5414b9663b7cSVoon Weifeng } 5415b9663b7cSVoon Weifeng 541636d18b56SFugang Duan if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 541736d18b56SFugang Duan rtnl_lock(); 541836d18b56SFugang Duan phylink_start(priv->phylink); 541936d18b56SFugang Duan /* We may have called phylink_speed_down before */ 542036d18b56SFugang Duan phylink_speed_up(priv->phylink); 542136d18b56SFugang Duan rtnl_unlock(); 542236d18b56SFugang Duan } 542336d18b56SFugang Duan 54248e5debedSWong Vee Khee rtnl_lock(); 542529555fa3SThierry Reding mutex_lock(&priv->lock); 5426f55d84b0SVincent Palatin 542754139cf3SJoao Pinto stmmac_reset_queues_param(priv); 5428*9c63faaaSJoakim Zhang stmmac_reinit_rx_buffers(priv); 54294ec236c7SFugang Duan stmmac_free_tx_skbufs(priv); 5430ae79a639SGiuseppe CAVALLARO stmmac_clear_descriptors(priv); 5431ae79a639SGiuseppe CAVALLARO 5432fe131929SHuacai Chen stmmac_hw_setup(ndev, false); 5433d429b66eSJose Abreu stmmac_init_coalesce(priv); 5434ac316c78SGiuseppe CAVALLARO stmmac_set_rx_mode(ndev); 54357ac6653aSJeff Kirsher 5436ed64639bSWong Vee Khee stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 5437ed64639bSWong Vee Khee 5438c22a3f48SJoao Pinto stmmac_enable_all_queues(priv); 54397ac6653aSJeff Kirsher 5440134cc4ceSThierry Reding mutex_unlock(&priv->lock); 54418e5debedSWong Vee Khee rtnl_unlock(); 5442134cc4ceSThierry Reding 54433e2bf04fSJose Abreu phylink_mac_change(priv->phylink, true); 5444102463b1SFrancesco Virlinzi 544531096c3eSLeon Yu netif_device_attach(ndev); 544631096c3eSLeon Yu 54477ac6653aSJeff Kirsher return 0; 54487ac6653aSJeff Kirsher } 5449b2e2f0c7SAndy Shevchenko EXPORT_SYMBOL_GPL(stmmac_resume); 5450ba27ec66SGiuseppe CAVALLARO 54517ac6653aSJeff Kirsher #ifndef MODULE 54527ac6653aSJeff Kirsher static int __init stmmac_cmdline_opt(char *str) 54537ac6653aSJeff Kirsher { 54547ac6653aSJeff Kirsher char *opt; 54557ac6653aSJeff Kirsher 54567ac6653aSJeff Kirsher if (!str || !*str) 54577ac6653aSJeff Kirsher return -EINVAL; 54587ac6653aSJeff Kirsher while ((opt = strsep(&str, ",")) != NULL) { 54597ac6653aSJeff Kirsher if (!strncmp(opt, "debug:", 6)) { 5460ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &debug)) 54617ac6653aSJeff Kirsher goto err; 54627ac6653aSJeff Kirsher } else if (!strncmp(opt, "phyaddr:", 8)) { 5463ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 8, 0, &phyaddr)) 54647ac6653aSJeff Kirsher goto err; 54657ac6653aSJeff Kirsher } else if (!strncmp(opt, "buf_sz:", 7)) { 5466ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 7, 0, &buf_sz)) 54677ac6653aSJeff Kirsher goto err; 54687ac6653aSJeff Kirsher } else if (!strncmp(opt, "tc:", 3)) { 5469ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 3, 0, &tc)) 54707ac6653aSJeff Kirsher goto err; 54717ac6653aSJeff Kirsher } else if (!strncmp(opt, "watchdog:", 9)) { 5472ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 9, 0, &watchdog)) 54737ac6653aSJeff Kirsher goto err; 54747ac6653aSJeff Kirsher } else if (!strncmp(opt, "flow_ctrl:", 10)) { 5475ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &flow_ctrl)) 54767ac6653aSJeff Kirsher goto err; 54777ac6653aSJeff Kirsher } else if (!strncmp(opt, "pause:", 6)) { 5478ea2ab871SGiuseppe CAVALLARO if (kstrtoint(opt + 6, 0, &pause)) 54797ac6653aSJeff Kirsher goto err; 5480506f669cSGiuseppe CAVALLARO } else if (!strncmp(opt, "eee_timer:", 10)) { 5481d765955dSGiuseppe CAVALLARO if (kstrtoint(opt + 10, 0, &eee_timer)) 5482d765955dSGiuseppe CAVALLARO goto err; 54834a7d666aSGiuseppe CAVALLARO } else if (!strncmp(opt, "chain_mode:", 11)) { 54844a7d666aSGiuseppe CAVALLARO if (kstrtoint(opt + 11, 0, &chain_mode)) 54854a7d666aSGiuseppe CAVALLARO goto err; 54867ac6653aSJeff Kirsher } 54877ac6653aSJeff Kirsher } 54887ac6653aSJeff Kirsher return 0; 54897ac6653aSJeff Kirsher 54907ac6653aSJeff Kirsher err: 54917ac6653aSJeff Kirsher pr_err("%s: ERROR broken module parameter conversion", __func__); 54927ac6653aSJeff Kirsher return -EINVAL; 54937ac6653aSJeff Kirsher } 54947ac6653aSJeff Kirsher 54957ac6653aSJeff Kirsher __setup("stmmaceth=", stmmac_cmdline_opt); 5496ceb69499SGiuseppe CAVALLARO #endif /* MODULE */ 54976fc0d0f2SGiuseppe Cavallaro 5498466c5ac8SMathieu Olivari static int __init stmmac_init(void) 5499466c5ac8SMathieu Olivari { 5500466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 5501466c5ac8SMathieu Olivari /* Create debugfs main directory if it doesn't exist yet */ 55028d72ab11SGreg Kroah-Hartman if (!stmmac_fs_dir) 5503466c5ac8SMathieu Olivari stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 5504474a31e1SAaro Koskinen register_netdevice_notifier(&stmmac_notifier); 5505466c5ac8SMathieu Olivari #endif 5506466c5ac8SMathieu Olivari 5507466c5ac8SMathieu Olivari return 0; 5508466c5ac8SMathieu Olivari } 5509466c5ac8SMathieu Olivari 5510466c5ac8SMathieu Olivari static void __exit stmmac_exit(void) 5511466c5ac8SMathieu Olivari { 5512466c5ac8SMathieu Olivari #ifdef CONFIG_DEBUG_FS 5513474a31e1SAaro Koskinen unregister_netdevice_notifier(&stmmac_notifier); 5514466c5ac8SMathieu Olivari debugfs_remove_recursive(stmmac_fs_dir); 5515466c5ac8SMathieu Olivari #endif 5516466c5ac8SMathieu Olivari } 5517466c5ac8SMathieu Olivari 5518466c5ac8SMathieu Olivari module_init(stmmac_init) 5519466c5ac8SMathieu Olivari module_exit(stmmac_exit) 5520466c5ac8SMathieu Olivari 55216fc0d0f2SGiuseppe Cavallaro MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 55226fc0d0f2SGiuseppe Cavallaro MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 55236fc0d0f2SGiuseppe Cavallaro MODULE_LICENSE("GPL"); 5524