1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21edb9ca6SSiva Reddy /* 10G controller driver for Samsung SoCs
31edb9ca6SSiva Reddy *
41edb9ca6SSiva Reddy * Copyright (C) 2013 Samsung Electronics Co., Ltd.
51edb9ca6SSiva Reddy * http://www.samsung.com
61edb9ca6SSiva Reddy *
71edb9ca6SSiva Reddy * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
81edb9ca6SSiva Reddy */
91edb9ca6SSiva Reddy
101edb9ca6SSiva Reddy #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
111edb9ca6SSiva Reddy
121edb9ca6SSiva Reddy #include <linux/clk.h>
131edb9ca6SSiva Reddy #include <linux/crc32.h>
141edb9ca6SSiva Reddy #include <linux/dma-mapping.h>
151edb9ca6SSiva Reddy #include <linux/etherdevice.h>
161edb9ca6SSiva Reddy #include <linux/ethtool.h>
171edb9ca6SSiva Reddy #include <linux/if.h>
181edb9ca6SSiva Reddy #include <linux/if_ether.h>
191edb9ca6SSiva Reddy #include <linux/if_vlan.h>
201edb9ca6SSiva Reddy #include <linux/init.h>
211edb9ca6SSiva Reddy #include <linux/interrupt.h>
221edb9ca6SSiva Reddy #include <linux/ip.h>
231edb9ca6SSiva Reddy #include <linux/kernel.h>
241edb9ca6SSiva Reddy #include <linux/mii.h>
251edb9ca6SSiva Reddy #include <linux/module.h>
261edb9ca6SSiva Reddy #include <linux/net_tstamp.h>
271edb9ca6SSiva Reddy #include <linux/netdevice.h>
281edb9ca6SSiva Reddy #include <linux/phy.h>
291edb9ca6SSiva Reddy #include <linux/platform_device.h>
301edb9ca6SSiva Reddy #include <linux/prefetch.h>
311edb9ca6SSiva Reddy #include <linux/skbuff.h>
321edb9ca6SSiva Reddy #include <linux/slab.h>
331edb9ca6SSiva Reddy #include <linux/tcp.h>
341edb9ca6SSiva Reddy #include <linux/sxgbe_platform.h>
351edb9ca6SSiva Reddy
361edb9ca6SSiva Reddy #include "sxgbe_common.h"
371edb9ca6SSiva Reddy #include "sxgbe_desc.h"
381edb9ca6SSiva Reddy #include "sxgbe_dma.h"
391edb9ca6SSiva Reddy #include "sxgbe_mtl.h"
401edb9ca6SSiva Reddy #include "sxgbe_reg.h"
411edb9ca6SSiva Reddy
421edb9ca6SSiva Reddy #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
431edb9ca6SSiva Reddy #define JUMBO_LEN 9000
441edb9ca6SSiva Reddy
451edb9ca6SSiva Reddy /* Module parameters */
461edb9ca6SSiva Reddy #define TX_TIMEO 5000
471edb9ca6SSiva Reddy #define DMA_TX_SIZE 512
481edb9ca6SSiva Reddy #define DMA_RX_SIZE 1024
491edb9ca6SSiva Reddy #define TC_DEFAULT 64
501edb9ca6SSiva Reddy #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
511edb9ca6SSiva Reddy /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
521edb9ca6SSiva Reddy #define SXGBE_DEFAULT_LPI_TIMER 1000
531edb9ca6SSiva Reddy
541edb9ca6SSiva Reddy static int debug = -1;
55acc18c14SGirish K S static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
56acc18c14SGirish K S
57d3757ba4SJoe Perches module_param(eee_timer, int, 0644);
581edb9ca6SSiva Reddy
59d3757ba4SJoe Perches module_param(debug, int, 0644);
601edb9ca6SSiva Reddy static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
611edb9ca6SSiva Reddy NETIF_MSG_LINK | NETIF_MSG_IFUP |
621edb9ca6SSiva Reddy NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
631edb9ca6SSiva Reddy
641edb9ca6SSiva Reddy static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
651edb9ca6SSiva Reddy static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
661edb9ca6SSiva Reddy static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
671edb9ca6SSiva Reddy
681edb9ca6SSiva Reddy #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
691edb9ca6SSiva Reddy
70acc18c14SGirish K S #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
71acc18c14SGirish K S
72acc18c14SGirish K S /**
73acc18c14SGirish K S * sxgbe_verify_args - verify the driver parameters.
74acc18c14SGirish K S * Description: it verifies if some wrong parameter is passed to the driver.
75acc18c14SGirish K S * Note that wrong parameters are replaced with the default values.
76acc18c14SGirish K S */
sxgbe_verify_args(void)77acc18c14SGirish K S static void sxgbe_verify_args(void)
78acc18c14SGirish K S {
79acc18c14SGirish K S if (unlikely(eee_timer < 0))
80acc18c14SGirish K S eee_timer = SXGBE_DEFAULT_LPI_TIMER;
81acc18c14SGirish K S }
82acc18c14SGirish K S
sxgbe_enable_eee_mode(const struct sxgbe_priv_data * priv)83acc18c14SGirish K S static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
84acc18c14SGirish K S {
85acc18c14SGirish K S /* Check and enter in LPI mode */
86acc18c14SGirish K S if (!priv->tx_path_in_lpi_mode)
87acc18c14SGirish K S priv->hw->mac->set_eee_mode(priv->ioaddr);
88acc18c14SGirish K S }
89acc18c14SGirish K S
sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)90acc18c14SGirish K S void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
91acc18c14SGirish K S {
92abf1efb6SJilin Yuan /* Exit and disable EEE in case of we are in LPI state. */
93acc18c14SGirish K S priv->hw->mac->reset_eee_mode(priv->ioaddr);
94acc18c14SGirish K S del_timer_sync(&priv->eee_ctrl_timer);
95acc18c14SGirish K S priv->tx_path_in_lpi_mode = false;
96acc18c14SGirish K S }
97acc18c14SGirish K S
98acc18c14SGirish K S /**
99acc18c14SGirish K S * sxgbe_eee_ctrl_timer
100d0ea5cbdSJesse Brandeburg * @t: timer list containing a data
101acc18c14SGirish K S * Description:
102acc18c14SGirish K S * If there is no data transfer and if we are not in LPI state,
103acc18c14SGirish K S * then MAC Transmitter can be moved to LPI state.
104acc18c14SGirish K S */
sxgbe_eee_ctrl_timer(struct timer_list * t)105c37631c7SKees Cook static void sxgbe_eee_ctrl_timer(struct timer_list *t)
106acc18c14SGirish K S {
107c37631c7SKees Cook struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
108acc18c14SGirish K S
109acc18c14SGirish K S sxgbe_enable_eee_mode(priv);
110acc18c14SGirish K S mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
111acc18c14SGirish K S }
112acc18c14SGirish K S
113acc18c14SGirish K S /**
114acc18c14SGirish K S * sxgbe_eee_init
115acc18c14SGirish K S * @priv: private device pointer
116acc18c14SGirish K S * Description:
117acc18c14SGirish K S * If the EEE support has been enabled while configuring the driver,
118acc18c14SGirish K S * if the GMAC actually supports the EEE (from the HW cap reg) and the
119acc18c14SGirish K S * phy can also manage EEE, so enable the LPI state and start the timer
120acc18c14SGirish K S * to verify if the tx path can enter in LPI state.
121acc18c14SGirish K S */
sxgbe_eee_init(struct sxgbe_priv_data * const priv)122acc18c14SGirish K S bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
123acc18c14SGirish K S {
1242ebc440aSPhilippe Reynes struct net_device *ndev = priv->dev;
125acc18c14SGirish K S bool ret = false;
126acc18c14SGirish K S
127acc18c14SGirish K S /* MAC core supports the EEE feature. */
128acc18c14SGirish K S if (priv->hw_cap.eee) {
129acc18c14SGirish K S /* Check if the PHY supports EEE */
13053243d41SJisheng Zhang if (phy_init_eee(ndev->phydev, true))
131acc18c14SGirish K S return false;
132acc18c14SGirish K S
133acc18c14SGirish K S priv->eee_active = 1;
134c37631c7SKees Cook timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
135acc18c14SGirish K S priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
136acc18c14SGirish K S add_timer(&priv->eee_ctrl_timer);
137acc18c14SGirish K S
138acc18c14SGirish K S priv->hw->mac->set_eee_timer(priv->ioaddr,
139acc18c14SGirish K S SXGBE_DEFAULT_LPI_TIMER,
140acc18c14SGirish K S priv->tx_lpi_timer);
141acc18c14SGirish K S
142acc18c14SGirish K S pr_info("Energy-Efficient Ethernet initialized\n");
143acc18c14SGirish K S
144acc18c14SGirish K S ret = true;
145acc18c14SGirish K S }
146acc18c14SGirish K S
147acc18c14SGirish K S return ret;
148acc18c14SGirish K S }
149acc18c14SGirish K S
sxgbe_eee_adjust(const struct sxgbe_priv_data * priv)150acc18c14SGirish K S static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
151acc18c14SGirish K S {
1522ebc440aSPhilippe Reynes struct net_device *ndev = priv->dev;
1532ebc440aSPhilippe Reynes
154acc18c14SGirish K S /* When the EEE has been already initialised we have to
155acc18c14SGirish K S * modify the PLS bit in the LPI ctrl & status reg according
156acc18c14SGirish K S * to the PHY link status. For this reason.
157acc18c14SGirish K S */
158acc18c14SGirish K S if (priv->eee_enabled)
1592ebc440aSPhilippe Reynes priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
160acc18c14SGirish K S }
161acc18c14SGirish K S
1621edb9ca6SSiva Reddy /**
1631edb9ca6SSiva Reddy * sxgbe_clk_csr_set - dynamically set the MDC clock
1641edb9ca6SSiva Reddy * @priv: driver private structure
1651edb9ca6SSiva Reddy * Description: this is to dynamically set the MDC clock according to the csr
1661edb9ca6SSiva Reddy * clock input.
1671edb9ca6SSiva Reddy */
sxgbe_clk_csr_set(struct sxgbe_priv_data * priv)1681edb9ca6SSiva Reddy static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
1691edb9ca6SSiva Reddy {
1701edb9ca6SSiva Reddy u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
1711edb9ca6SSiva Reddy
1721edb9ca6SSiva Reddy /* assign the proper divider, this will be used during
1731edb9ca6SSiva Reddy * mdio communication
1741edb9ca6SSiva Reddy */
1751edb9ca6SSiva Reddy if (clk_rate < SXGBE_CSR_F_150M)
1761edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_100_150M;
1771edb9ca6SSiva Reddy else if (clk_rate <= SXGBE_CSR_F_250M)
1781edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_150_250M;
1791edb9ca6SSiva Reddy else if (clk_rate <= SXGBE_CSR_F_300M)
1801edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_250_300M;
1811edb9ca6SSiva Reddy else if (clk_rate <= SXGBE_CSR_F_350M)
1821edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_300_350M;
1831edb9ca6SSiva Reddy else if (clk_rate <= SXGBE_CSR_F_400M)
1841edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_350_400M;
1851edb9ca6SSiva Reddy else if (clk_rate <= SXGBE_CSR_F_500M)
1861edb9ca6SSiva Reddy priv->clk_csr = SXGBE_CSR_400_500M;
1871edb9ca6SSiva Reddy }
1881edb9ca6SSiva Reddy
1891edb9ca6SSiva Reddy /* minimum number of free TX descriptors required to wake up TX process */
1901edb9ca6SSiva Reddy #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
1911edb9ca6SSiva Reddy
sxgbe_tx_avail(struct sxgbe_tx_queue * queue,int tx_qsize)1921edb9ca6SSiva Reddy static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
1931edb9ca6SSiva Reddy {
1941edb9ca6SSiva Reddy return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
1951edb9ca6SSiva Reddy }
1961edb9ca6SSiva Reddy
1971edb9ca6SSiva Reddy /**
1981edb9ca6SSiva Reddy * sxgbe_adjust_link
1991edb9ca6SSiva Reddy * @dev: net device structure
2001edb9ca6SSiva Reddy * Description: it adjusts the link parameters.
2011edb9ca6SSiva Reddy */
sxgbe_adjust_link(struct net_device * dev)2021edb9ca6SSiva Reddy static void sxgbe_adjust_link(struct net_device *dev)
2031edb9ca6SSiva Reddy {
2041edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
2052ebc440aSPhilippe Reynes struct phy_device *phydev = dev->phydev;
2061edb9ca6SSiva Reddy u8 new_state = 0;
2071edb9ca6SSiva Reddy u8 speed = 0xff;
2081edb9ca6SSiva Reddy
2091edb9ca6SSiva Reddy if (!phydev)
2101edb9ca6SSiva Reddy return;
2111edb9ca6SSiva Reddy
2121edb9ca6SSiva Reddy /* SXGBE is not supporting auto-negotiation and
2131edb9ca6SSiva Reddy * half duplex mode. so, not handling duplex change
2141edb9ca6SSiva Reddy * in this function. only handling speed and link status
2151edb9ca6SSiva Reddy */
2161edb9ca6SSiva Reddy if (phydev->link) {
2171edb9ca6SSiva Reddy if (phydev->speed != priv->speed) {
2181edb9ca6SSiva Reddy new_state = 1;
2191edb9ca6SSiva Reddy switch (phydev->speed) {
2201edb9ca6SSiva Reddy case SPEED_10000:
2211edb9ca6SSiva Reddy speed = SXGBE_SPEED_10G;
2221edb9ca6SSiva Reddy break;
2231edb9ca6SSiva Reddy case SPEED_2500:
2241edb9ca6SSiva Reddy speed = SXGBE_SPEED_2_5G;
2251edb9ca6SSiva Reddy break;
2261edb9ca6SSiva Reddy case SPEED_1000:
2271edb9ca6SSiva Reddy speed = SXGBE_SPEED_1G;
2281edb9ca6SSiva Reddy break;
2291edb9ca6SSiva Reddy default:
2301edb9ca6SSiva Reddy netif_err(priv, link, dev,
2311edb9ca6SSiva Reddy "Speed (%d) not supported\n",
2321edb9ca6SSiva Reddy phydev->speed);
2331edb9ca6SSiva Reddy }
2341edb9ca6SSiva Reddy
2351edb9ca6SSiva Reddy priv->speed = phydev->speed;
2361edb9ca6SSiva Reddy priv->hw->mac->set_speed(priv->ioaddr, speed);
2371edb9ca6SSiva Reddy }
2381edb9ca6SSiva Reddy
2391edb9ca6SSiva Reddy if (!priv->oldlink) {
2401edb9ca6SSiva Reddy new_state = 1;
2411edb9ca6SSiva Reddy priv->oldlink = 1;
2421edb9ca6SSiva Reddy }
2431edb9ca6SSiva Reddy } else if (priv->oldlink) {
2441edb9ca6SSiva Reddy new_state = 1;
2451edb9ca6SSiva Reddy priv->oldlink = 0;
2461edb9ca6SSiva Reddy priv->speed = SPEED_UNKNOWN;
2471edb9ca6SSiva Reddy }
2481edb9ca6SSiva Reddy
2491edb9ca6SSiva Reddy if (new_state & netif_msg_link(priv))
2501edb9ca6SSiva Reddy phy_print_status(phydev);
251acc18c14SGirish K S
252acc18c14SGirish K S /* Alter the MAC settings for EEE */
253acc18c14SGirish K S sxgbe_eee_adjust(priv);
2541edb9ca6SSiva Reddy }
2551edb9ca6SSiva Reddy
2561edb9ca6SSiva Reddy /**
2571edb9ca6SSiva Reddy * sxgbe_init_phy - PHY initialization
258d0ea5cbdSJesse Brandeburg * @ndev: net device structure
2591edb9ca6SSiva Reddy * Description: it initializes the driver's PHY state, and attaches the PHY
2601edb9ca6SSiva Reddy * to the mac driver.
2611edb9ca6SSiva Reddy * Return value:
2621edb9ca6SSiva Reddy * 0 on success
2631edb9ca6SSiva Reddy */
sxgbe_init_phy(struct net_device * ndev)2641edb9ca6SSiva Reddy static int sxgbe_init_phy(struct net_device *ndev)
2651edb9ca6SSiva Reddy {
2661edb9ca6SSiva Reddy char phy_id_fmt[MII_BUS_ID_SIZE + 3];
2671edb9ca6SSiva Reddy char bus_id[MII_BUS_ID_SIZE];
2681edb9ca6SSiva Reddy struct phy_device *phydev;
2691edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(ndev);
2701edb9ca6SSiva Reddy int phy_iface = priv->plat->interface;
2711edb9ca6SSiva Reddy
2721edb9ca6SSiva Reddy /* assign default link status */
2731edb9ca6SSiva Reddy priv->oldlink = 0;
2741edb9ca6SSiva Reddy priv->speed = SPEED_UNKNOWN;
2751edb9ca6SSiva Reddy priv->oldduplex = DUPLEX_UNKNOWN;
2761edb9ca6SSiva Reddy
2771edb9ca6SSiva Reddy if (priv->plat->phy_bus_name)
2781edb9ca6SSiva Reddy snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
2791edb9ca6SSiva Reddy priv->plat->phy_bus_name, priv->plat->bus_id);
2801edb9ca6SSiva Reddy else
2811edb9ca6SSiva Reddy snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
2821edb9ca6SSiva Reddy priv->plat->bus_id);
2831edb9ca6SSiva Reddy
2841edb9ca6SSiva Reddy snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
2851edb9ca6SSiva Reddy priv->plat->phy_addr);
2861edb9ca6SSiva Reddy netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
2871edb9ca6SSiva Reddy
2881edb9ca6SSiva Reddy phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
2891edb9ca6SSiva Reddy
2901edb9ca6SSiva Reddy if (IS_ERR(phydev)) {
2911edb9ca6SSiva Reddy netdev_err(ndev, "Could not attach to PHY\n");
2921edb9ca6SSiva Reddy return PTR_ERR(phydev);
2931edb9ca6SSiva Reddy }
2941edb9ca6SSiva Reddy
2951edb9ca6SSiva Reddy /* Stop Advertising 1000BASE Capability if interface is not GMII */
2961edb9ca6SSiva Reddy if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
2971edb9ca6SSiva Reddy (phy_iface == PHY_INTERFACE_MODE_RMII))
29858056c1eSAndrew Lunn phy_set_max_speed(phydev, SPEED_1000);
29958056c1eSAndrew Lunn
3001edb9ca6SSiva Reddy if (phydev->phy_id == 0) {
3011edb9ca6SSiva Reddy phy_disconnect(phydev);
3021edb9ca6SSiva Reddy return -ENODEV;
3031edb9ca6SSiva Reddy }
3041edb9ca6SSiva Reddy
3051edb9ca6SSiva Reddy netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
3061edb9ca6SSiva Reddy __func__, phydev->phy_id, phydev->link);
3071edb9ca6SSiva Reddy
3081edb9ca6SSiva Reddy return 0;
3091edb9ca6SSiva Reddy }
3101edb9ca6SSiva Reddy
3111edb9ca6SSiva Reddy /**
3121edb9ca6SSiva Reddy * sxgbe_clear_descriptors: clear descriptors
3131edb9ca6SSiva Reddy * @priv: driver private structure
3141edb9ca6SSiva Reddy * Description: this function is called to clear the tx and rx descriptors
3151edb9ca6SSiva Reddy * in case of both basic and extended descriptors are used.
3161edb9ca6SSiva Reddy */
sxgbe_clear_descriptors(struct sxgbe_priv_data * priv)3171edb9ca6SSiva Reddy static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
3181edb9ca6SSiva Reddy {
3191edb9ca6SSiva Reddy int i, j;
3201edb9ca6SSiva Reddy unsigned int txsize = priv->dma_tx_size;
3211edb9ca6SSiva Reddy unsigned int rxsize = priv->dma_rx_size;
3221edb9ca6SSiva Reddy
3231edb9ca6SSiva Reddy /* Clear the Rx/Tx descriptors */
3241edb9ca6SSiva Reddy for (j = 0; j < SXGBE_RX_QUEUES; j++) {
3251edb9ca6SSiva Reddy for (i = 0; i < rxsize; i++)
3261edb9ca6SSiva Reddy priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
3271edb9ca6SSiva Reddy priv->use_riwt, priv->mode,
3281edb9ca6SSiva Reddy (i == rxsize - 1));
3291edb9ca6SSiva Reddy }
3301edb9ca6SSiva Reddy
3311edb9ca6SSiva Reddy for (j = 0; j < SXGBE_TX_QUEUES; j++) {
3321edb9ca6SSiva Reddy for (i = 0; i < txsize; i++)
3331edb9ca6SSiva Reddy priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
3341edb9ca6SSiva Reddy }
3351edb9ca6SSiva Reddy }
3361edb9ca6SSiva Reddy
sxgbe_init_rx_buffers(struct net_device * dev,struct sxgbe_rx_norm_desc * p,int i,unsigned int dma_buf_sz,struct sxgbe_rx_queue * rx_ring)3371edb9ca6SSiva Reddy static int sxgbe_init_rx_buffers(struct net_device *dev,
3381edb9ca6SSiva Reddy struct sxgbe_rx_norm_desc *p, int i,
3391edb9ca6SSiva Reddy unsigned int dma_buf_sz,
3401edb9ca6SSiva Reddy struct sxgbe_rx_queue *rx_ring)
3411edb9ca6SSiva Reddy {
3421edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
3431edb9ca6SSiva Reddy struct sk_buff *skb;
3441edb9ca6SSiva Reddy
3451edb9ca6SSiva Reddy skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
3461edb9ca6SSiva Reddy if (!skb)
3471edb9ca6SSiva Reddy return -ENOMEM;
3481edb9ca6SSiva Reddy
3491edb9ca6SSiva Reddy rx_ring->rx_skbuff[i] = skb;
3501edb9ca6SSiva Reddy rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
3511edb9ca6SSiva Reddy dma_buf_sz, DMA_FROM_DEVICE);
3521edb9ca6SSiva Reddy
3531edb9ca6SSiva Reddy if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
3541edb9ca6SSiva Reddy netdev_err(dev, "%s: DMA mapping error\n", __func__);
3551edb9ca6SSiva Reddy dev_kfree_skb_any(skb);
3561edb9ca6SSiva Reddy return -EINVAL;
3571edb9ca6SSiva Reddy }
3581edb9ca6SSiva Reddy
3591edb9ca6SSiva Reddy p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
3601edb9ca6SSiva Reddy
3611edb9ca6SSiva Reddy return 0;
3621edb9ca6SSiva Reddy }
36337c85c34SDan Carpenter
36437c85c34SDan Carpenter /**
36537c85c34SDan Carpenter * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
36637c85c34SDan Carpenter * @dev: net device structure
367d0ea5cbdSJesse Brandeburg * @p: dec pointer
368d0ea5cbdSJesse Brandeburg * @i: index
369d0ea5cbdSJesse Brandeburg * @dma_buf_sz: size
37037c85c34SDan Carpenter * @rx_ring: ring to be freed
371d0ea5cbdSJesse Brandeburg *
37237c85c34SDan Carpenter * Description: this function initializes the DMA RX descriptor
37337c85c34SDan Carpenter */
sxgbe_free_rx_buffers(struct net_device * dev,struct sxgbe_rx_norm_desc * p,int i,unsigned int dma_buf_sz,struct sxgbe_rx_queue * rx_ring)37437c85c34SDan Carpenter static void sxgbe_free_rx_buffers(struct net_device *dev,
37537c85c34SDan Carpenter struct sxgbe_rx_norm_desc *p, int i,
37637c85c34SDan Carpenter unsigned int dma_buf_sz,
37737c85c34SDan Carpenter struct sxgbe_rx_queue *rx_ring)
37837c85c34SDan Carpenter {
37937c85c34SDan Carpenter struct sxgbe_priv_data *priv = netdev_priv(dev);
38037c85c34SDan Carpenter
38137c85c34SDan Carpenter kfree_skb(rx_ring->rx_skbuff[i]);
38237c85c34SDan Carpenter dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
38337c85c34SDan Carpenter dma_buf_sz, DMA_FROM_DEVICE);
38437c85c34SDan Carpenter }
38537c85c34SDan Carpenter
3861edb9ca6SSiva Reddy /**
3871edb9ca6SSiva Reddy * init_tx_ring - init the TX descriptor ring
3881edb9ca6SSiva Reddy * @dev: net device structure
389d0ea5cbdSJesse Brandeburg * @queue_no: queue
3906e7c2b4dSMasahiro Yamada * @tx_ring: ring to be initialised
3911edb9ca6SSiva Reddy * @tx_rsize: ring size
3921edb9ca6SSiva Reddy * Description: this function initializes the DMA TX descriptor
3931edb9ca6SSiva Reddy */
init_tx_ring(struct device * dev,u8 queue_no,struct sxgbe_tx_queue * tx_ring,int tx_rsize)3941edb9ca6SSiva Reddy static int init_tx_ring(struct device *dev, u8 queue_no,
3951edb9ca6SSiva Reddy struct sxgbe_tx_queue *tx_ring, int tx_rsize)
3961edb9ca6SSiva Reddy {
3971edb9ca6SSiva Reddy /* TX ring is not allcoated */
3981edb9ca6SSiva Reddy if (!tx_ring) {
3991edb9ca6SSiva Reddy dev_err(dev, "No memory for TX queue of SXGBE\n");
4001edb9ca6SSiva Reddy return -ENOMEM;
4011edb9ca6SSiva Reddy }
4021edb9ca6SSiva Reddy
4031edb9ca6SSiva Reddy /* allocate memory for TX descriptors */
404750afb08SLuis Chamberlain tx_ring->dma_tx = dma_alloc_coherent(dev,
4051edb9ca6SSiva Reddy tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
4061edb9ca6SSiva Reddy &tx_ring->dma_tx_phy, GFP_KERNEL);
4071edb9ca6SSiva Reddy if (!tx_ring->dma_tx)
4081edb9ca6SSiva Reddy return -ENOMEM;
4091edb9ca6SSiva Reddy
4101edb9ca6SSiva Reddy /* allocate memory for TX skbuff array */
4111edb9ca6SSiva Reddy tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
4121edb9ca6SSiva Reddy sizeof(dma_addr_t), GFP_KERNEL);
4131edb9ca6SSiva Reddy if (!tx_ring->tx_skbuff_dma)
4141edb9ca6SSiva Reddy goto dmamem_err;
4151edb9ca6SSiva Reddy
4161edb9ca6SSiva Reddy tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
4171edb9ca6SSiva Reddy sizeof(struct sk_buff *), GFP_KERNEL);
4181edb9ca6SSiva Reddy
4191edb9ca6SSiva Reddy if (!tx_ring->tx_skbuff)
4201edb9ca6SSiva Reddy goto dmamem_err;
4211edb9ca6SSiva Reddy
4221edb9ca6SSiva Reddy /* assign queue number */
4231edb9ca6SSiva Reddy tx_ring->queue_no = queue_no;
4241edb9ca6SSiva Reddy
425dbedd44eSJoe Perches /* initialise counters */
4261edb9ca6SSiva Reddy tx_ring->dirty_tx = 0;
4271edb9ca6SSiva Reddy tx_ring->cur_tx = 0;
4281edb9ca6SSiva Reddy
4291edb9ca6SSiva Reddy return 0;
4301edb9ca6SSiva Reddy
4311edb9ca6SSiva Reddy dmamem_err:
4321edb9ca6SSiva Reddy dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
4331edb9ca6SSiva Reddy tx_ring->dma_tx, tx_ring->dma_tx_phy);
4341edb9ca6SSiva Reddy return -ENOMEM;
4351edb9ca6SSiva Reddy }
4361edb9ca6SSiva Reddy
4371edb9ca6SSiva Reddy /**
4381edb9ca6SSiva Reddy * free_rx_ring - free the RX descriptor ring
4391edb9ca6SSiva Reddy * @dev: net device structure
4406e7c2b4dSMasahiro Yamada * @rx_ring: ring to be initialised
4411edb9ca6SSiva Reddy * @rx_rsize: ring size
4421edb9ca6SSiva Reddy * Description: this function initializes the DMA RX descriptor
4431edb9ca6SSiva Reddy */
free_rx_ring(struct device * dev,struct sxgbe_rx_queue * rx_ring,int rx_rsize)44485da101fSJingoo Han static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
4451edb9ca6SSiva Reddy int rx_rsize)
4461edb9ca6SSiva Reddy {
4471edb9ca6SSiva Reddy dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
4481edb9ca6SSiva Reddy rx_ring->dma_rx, rx_ring->dma_rx_phy);
4491edb9ca6SSiva Reddy kfree(rx_ring->rx_skbuff_dma);
4501edb9ca6SSiva Reddy kfree(rx_ring->rx_skbuff);
4511edb9ca6SSiva Reddy }
4521edb9ca6SSiva Reddy
4531edb9ca6SSiva Reddy /**
4541edb9ca6SSiva Reddy * init_rx_ring - init the RX descriptor ring
4551edb9ca6SSiva Reddy * @dev: net device structure
456d0ea5cbdSJesse Brandeburg * @queue_no: queue
4576e7c2b4dSMasahiro Yamada * @rx_ring: ring to be initialised
4581edb9ca6SSiva Reddy * @rx_rsize: ring size
4591edb9ca6SSiva Reddy * Description: this function initializes the DMA RX descriptor
4601edb9ca6SSiva Reddy */
init_rx_ring(struct net_device * dev,u8 queue_no,struct sxgbe_rx_queue * rx_ring,int rx_rsize)4611edb9ca6SSiva Reddy static int init_rx_ring(struct net_device *dev, u8 queue_no,
4621edb9ca6SSiva Reddy struct sxgbe_rx_queue *rx_ring, int rx_rsize)
4631edb9ca6SSiva Reddy {
4641edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
4651edb9ca6SSiva Reddy int desc_index;
4661edb9ca6SSiva Reddy unsigned int bfsize = 0;
4671edb9ca6SSiva Reddy unsigned int ret = 0;
4681edb9ca6SSiva Reddy
4691edb9ca6SSiva Reddy /* Set the max buffer size according to the MTU. */
4701edb9ca6SSiva Reddy bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
4711edb9ca6SSiva Reddy
4721edb9ca6SSiva Reddy netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
4731edb9ca6SSiva Reddy
4741edb9ca6SSiva Reddy /* RX ring is not allcoated */
4751edb9ca6SSiva Reddy if (rx_ring == NULL) {
4761edb9ca6SSiva Reddy netdev_err(dev, "No memory for RX queue\n");
47737c85c34SDan Carpenter return -ENOMEM;
4781edb9ca6SSiva Reddy }
4791edb9ca6SSiva Reddy
4801edb9ca6SSiva Reddy /* assign queue number */
4811edb9ca6SSiva Reddy rx_ring->queue_no = queue_no;
4821edb9ca6SSiva Reddy
4831edb9ca6SSiva Reddy /* allocate memory for RX descriptors */
484750afb08SLuis Chamberlain rx_ring->dma_rx = dma_alloc_coherent(priv->device,
4851edb9ca6SSiva Reddy rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
4861edb9ca6SSiva Reddy &rx_ring->dma_rx_phy, GFP_KERNEL);
4871edb9ca6SSiva Reddy
4881edb9ca6SSiva Reddy if (rx_ring->dma_rx == NULL)
48937c85c34SDan Carpenter return -ENOMEM;
4901edb9ca6SSiva Reddy
4911edb9ca6SSiva Reddy /* allocate memory for RX skbuff array */
4921edb9ca6SSiva Reddy rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
4931edb9ca6SSiva Reddy sizeof(dma_addr_t), GFP_KERNEL);
494f7d85556SByungho An if (!rx_ring->rx_skbuff_dma) {
49537c85c34SDan Carpenter ret = -ENOMEM;
49637c85c34SDan Carpenter goto err_free_dma_rx;
497f7d85556SByungho An }
4981edb9ca6SSiva Reddy
4991edb9ca6SSiva Reddy rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
5001edb9ca6SSiva Reddy sizeof(struct sk_buff *), GFP_KERNEL);
501f7d85556SByungho An if (!rx_ring->rx_skbuff) {
50237c85c34SDan Carpenter ret = -ENOMEM;
50337c85c34SDan Carpenter goto err_free_skbuff_dma;
504f7d85556SByungho An }
5051edb9ca6SSiva Reddy
5061edb9ca6SSiva Reddy /* initialise the buffers */
5071edb9ca6SSiva Reddy for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
5081edb9ca6SSiva Reddy struct sxgbe_rx_norm_desc *p;
5091edb9ca6SSiva Reddy p = rx_ring->dma_rx + desc_index;
5101edb9ca6SSiva Reddy ret = sxgbe_init_rx_buffers(dev, p, desc_index,
5111edb9ca6SSiva Reddy bfsize, rx_ring);
5121edb9ca6SSiva Reddy if (ret)
51337c85c34SDan Carpenter goto err_free_rx_buffers;
5141edb9ca6SSiva Reddy }
5151edb9ca6SSiva Reddy
516dbedd44eSJoe Perches /* initialise counters */
5171edb9ca6SSiva Reddy rx_ring->cur_rx = 0;
5181edb9ca6SSiva Reddy rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
5191edb9ca6SSiva Reddy priv->dma_buf_sz = bfsize;
5201edb9ca6SSiva Reddy
5211edb9ca6SSiva Reddy return 0;
5221edb9ca6SSiva Reddy
52337c85c34SDan Carpenter err_free_rx_buffers:
52437c85c34SDan Carpenter while (--desc_index >= 0) {
52537c85c34SDan Carpenter struct sxgbe_rx_norm_desc *p;
52637c85c34SDan Carpenter
52737c85c34SDan Carpenter p = rx_ring->dma_rx + desc_index;
52837c85c34SDan Carpenter sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
52937c85c34SDan Carpenter }
53037c85c34SDan Carpenter kfree(rx_ring->rx_skbuff);
53137c85c34SDan Carpenter err_free_skbuff_dma:
53237c85c34SDan Carpenter kfree(rx_ring->rx_skbuff_dma);
53337c85c34SDan Carpenter err_free_dma_rx:
53437c85c34SDan Carpenter dma_free_coherent(priv->device,
53537c85c34SDan Carpenter rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
53637c85c34SDan Carpenter rx_ring->dma_rx, rx_ring->dma_rx_phy);
53737c85c34SDan Carpenter
53837c85c34SDan Carpenter return ret;
5391edb9ca6SSiva Reddy }
5401edb9ca6SSiva Reddy /**
5411edb9ca6SSiva Reddy * free_tx_ring - free the TX descriptor ring
5421edb9ca6SSiva Reddy * @dev: net device structure
5436e7c2b4dSMasahiro Yamada * @tx_ring: ring to be initialised
5441edb9ca6SSiva Reddy * @tx_rsize: ring size
5451edb9ca6SSiva Reddy * Description: this function initializes the DMA TX descriptor
5461edb9ca6SSiva Reddy */
free_tx_ring(struct device * dev,struct sxgbe_tx_queue * tx_ring,int tx_rsize)54785da101fSJingoo Han static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
5481edb9ca6SSiva Reddy int tx_rsize)
5491edb9ca6SSiva Reddy {
5501edb9ca6SSiva Reddy dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
5511edb9ca6SSiva Reddy tx_ring->dma_tx, tx_ring->dma_tx_phy);
5521edb9ca6SSiva Reddy }
5531edb9ca6SSiva Reddy
5541edb9ca6SSiva Reddy /**
5551edb9ca6SSiva Reddy * init_dma_desc_rings - init the RX/TX descriptor rings
556d0ea5cbdSJesse Brandeburg * @netd: net device structure
5571edb9ca6SSiva Reddy * Description: this function initializes the DMA RX/TX descriptors
5581edb9ca6SSiva Reddy * and allocates the socket buffers. It suppors the chained and ring
5591edb9ca6SSiva Reddy * modes.
5601edb9ca6SSiva Reddy */
init_dma_desc_rings(struct net_device * netd)5611edb9ca6SSiva Reddy static int init_dma_desc_rings(struct net_device *netd)
5621edb9ca6SSiva Reddy {
5631edb9ca6SSiva Reddy int queue_num, ret;
5641edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(netd);
5651edb9ca6SSiva Reddy int tx_rsize = priv->dma_tx_size;
5661edb9ca6SSiva Reddy int rx_rsize = priv->dma_rx_size;
5671edb9ca6SSiva Reddy
5681edb9ca6SSiva Reddy /* Allocate memory for queue structures and TX descs */
5691edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
5701edb9ca6SSiva Reddy ret = init_tx_ring(priv->device, queue_num,
5711edb9ca6SSiva Reddy priv->txq[queue_num], tx_rsize);
5721edb9ca6SSiva Reddy if (ret) {
5731edb9ca6SSiva Reddy dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
5741edb9ca6SSiva Reddy goto txalloc_err;
5751edb9ca6SSiva Reddy }
5761edb9ca6SSiva Reddy
5771edb9ca6SSiva Reddy /* save private pointer in each ring this
5781edb9ca6SSiva Reddy * pointer is needed during cleaing TX queue
5791edb9ca6SSiva Reddy */
5801edb9ca6SSiva Reddy priv->txq[queue_num]->priv_ptr = priv;
5811edb9ca6SSiva Reddy }
5821edb9ca6SSiva Reddy
5831edb9ca6SSiva Reddy /* Allocate memory for queue structures and RX descs */
5841edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
5851edb9ca6SSiva Reddy ret = init_rx_ring(netd, queue_num,
5861edb9ca6SSiva Reddy priv->rxq[queue_num], rx_rsize);
5871edb9ca6SSiva Reddy if (ret) {
5881edb9ca6SSiva Reddy netdev_err(netd, "RX DMA ring allocation failed!!\n");
5891edb9ca6SSiva Reddy goto rxalloc_err;
5901edb9ca6SSiva Reddy }
5911edb9ca6SSiva Reddy
5921edb9ca6SSiva Reddy /* save private pointer in each ring this
5931edb9ca6SSiva Reddy * pointer is needed during cleaing TX queue
5941edb9ca6SSiva Reddy */
5951edb9ca6SSiva Reddy priv->rxq[queue_num]->priv_ptr = priv;
5961edb9ca6SSiva Reddy }
5971edb9ca6SSiva Reddy
5981edb9ca6SSiva Reddy sxgbe_clear_descriptors(priv);
5991edb9ca6SSiva Reddy
6001edb9ca6SSiva Reddy return 0;
6011edb9ca6SSiva Reddy
6021edb9ca6SSiva Reddy txalloc_err:
6031edb9ca6SSiva Reddy while (queue_num--)
6041edb9ca6SSiva Reddy free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
6051edb9ca6SSiva Reddy return ret;
6061edb9ca6SSiva Reddy
6071edb9ca6SSiva Reddy rxalloc_err:
6081edb9ca6SSiva Reddy while (queue_num--)
6091edb9ca6SSiva Reddy free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
6101edb9ca6SSiva Reddy return ret;
6111edb9ca6SSiva Reddy }
6121edb9ca6SSiva Reddy
tx_free_ring_skbufs(struct sxgbe_tx_queue * txqueue)6131edb9ca6SSiva Reddy static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
6141edb9ca6SSiva Reddy {
6151edb9ca6SSiva Reddy int dma_desc;
6161edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = txqueue->priv_ptr;
6171edb9ca6SSiva Reddy int tx_rsize = priv->dma_tx_size;
6181edb9ca6SSiva Reddy
6191edb9ca6SSiva Reddy for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
6201edb9ca6SSiva Reddy struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
6211edb9ca6SSiva Reddy
6221edb9ca6SSiva Reddy if (txqueue->tx_skbuff_dma[dma_desc])
6231edb9ca6SSiva Reddy dma_unmap_single(priv->device,
6241edb9ca6SSiva Reddy txqueue->tx_skbuff_dma[dma_desc],
6251edb9ca6SSiva Reddy priv->hw->desc->get_tx_len(tdesc),
6261edb9ca6SSiva Reddy DMA_TO_DEVICE);
6271edb9ca6SSiva Reddy
6281edb9ca6SSiva Reddy dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
6291edb9ca6SSiva Reddy txqueue->tx_skbuff[dma_desc] = NULL;
6301edb9ca6SSiva Reddy txqueue->tx_skbuff_dma[dma_desc] = 0;
6311edb9ca6SSiva Reddy }
6321edb9ca6SSiva Reddy }
6331edb9ca6SSiva Reddy
6341edb9ca6SSiva Reddy
dma_free_tx_skbufs(struct sxgbe_priv_data * priv)6351edb9ca6SSiva Reddy static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
6361edb9ca6SSiva Reddy {
6371edb9ca6SSiva Reddy int queue_num;
6381edb9ca6SSiva Reddy
6391edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
6401edb9ca6SSiva Reddy struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
6411edb9ca6SSiva Reddy tx_free_ring_skbufs(tqueue);
6421edb9ca6SSiva Reddy }
6431edb9ca6SSiva Reddy }
6441edb9ca6SSiva Reddy
free_dma_desc_resources(struct sxgbe_priv_data * priv)6451edb9ca6SSiva Reddy static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
6461edb9ca6SSiva Reddy {
6471edb9ca6SSiva Reddy int queue_num;
6481edb9ca6SSiva Reddy int tx_rsize = priv->dma_tx_size;
6491edb9ca6SSiva Reddy int rx_rsize = priv->dma_rx_size;
6501edb9ca6SSiva Reddy
6511edb9ca6SSiva Reddy /* Release the DMA TX buffers */
6521edb9ca6SSiva Reddy dma_free_tx_skbufs(priv);
6531edb9ca6SSiva Reddy
6541edb9ca6SSiva Reddy /* Release the TX ring memory also */
6551edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
6561edb9ca6SSiva Reddy free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
6571edb9ca6SSiva Reddy }
6581edb9ca6SSiva Reddy
6591edb9ca6SSiva Reddy /* Release the RX ring memory also */
6601edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
6611edb9ca6SSiva Reddy free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
6621edb9ca6SSiva Reddy }
6631edb9ca6SSiva Reddy }
6641edb9ca6SSiva Reddy
txring_mem_alloc(struct sxgbe_priv_data * priv)6651edb9ca6SSiva Reddy static int txring_mem_alloc(struct sxgbe_priv_data *priv)
6661edb9ca6SSiva Reddy {
6671edb9ca6SSiva Reddy int queue_num;
6681edb9ca6SSiva Reddy
6691edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
6701edb9ca6SSiva Reddy priv->txq[queue_num] = devm_kmalloc(priv->device,
6711edb9ca6SSiva Reddy sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
6721edb9ca6SSiva Reddy if (!priv->txq[queue_num])
6731edb9ca6SSiva Reddy return -ENOMEM;
6741edb9ca6SSiva Reddy }
6751edb9ca6SSiva Reddy
6761edb9ca6SSiva Reddy return 0;
6771edb9ca6SSiva Reddy }
6781edb9ca6SSiva Reddy
rxring_mem_alloc(struct sxgbe_priv_data * priv)6791edb9ca6SSiva Reddy static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
6801edb9ca6SSiva Reddy {
6811edb9ca6SSiva Reddy int queue_num;
6821edb9ca6SSiva Reddy
6831edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
6841edb9ca6SSiva Reddy priv->rxq[queue_num] = devm_kmalloc(priv->device,
6851edb9ca6SSiva Reddy sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
6861edb9ca6SSiva Reddy if (!priv->rxq[queue_num])
6871edb9ca6SSiva Reddy return -ENOMEM;
6881edb9ca6SSiva Reddy }
6891edb9ca6SSiva Reddy
6901edb9ca6SSiva Reddy return 0;
6911edb9ca6SSiva Reddy }
6921edb9ca6SSiva Reddy
6931edb9ca6SSiva Reddy /**
6941edb9ca6SSiva Reddy * sxgbe_mtl_operation_mode - HW MTL operation mode
6951edb9ca6SSiva Reddy * @priv: driver private structure
6961edb9ca6SSiva Reddy * Description: it sets the MTL operation mode: tx/rx MTL thresholds
6971edb9ca6SSiva Reddy * or Store-And-Forward capability.
6981edb9ca6SSiva Reddy */
sxgbe_mtl_operation_mode(struct sxgbe_priv_data * priv)6991edb9ca6SSiva Reddy static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
7001edb9ca6SSiva Reddy {
7011edb9ca6SSiva Reddy int queue_num;
7021edb9ca6SSiva Reddy
7031edb9ca6SSiva Reddy /* TX/RX threshold control */
7041edb9ca6SSiva Reddy if (likely(priv->plat->force_sf_dma_mode)) {
7051edb9ca6SSiva Reddy /* set TC mode for TX QUEUES */
7061edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
7071edb9ca6SSiva Reddy priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
7081edb9ca6SSiva Reddy SXGBE_MTL_SFMODE);
7091edb9ca6SSiva Reddy priv->tx_tc = SXGBE_MTL_SFMODE;
7101edb9ca6SSiva Reddy
7111edb9ca6SSiva Reddy /* set TC mode for RX QUEUES */
7121edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
7131edb9ca6SSiva Reddy priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
7141edb9ca6SSiva Reddy SXGBE_MTL_SFMODE);
7151edb9ca6SSiva Reddy priv->rx_tc = SXGBE_MTL_SFMODE;
7161edb9ca6SSiva Reddy } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
7171edb9ca6SSiva Reddy /* set TC mode for TX QUEUES */
7181edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
7191edb9ca6SSiva Reddy priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
7201edb9ca6SSiva Reddy priv->tx_tc);
7211edb9ca6SSiva Reddy /* set TC mode for RX QUEUES */
7221edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
7231edb9ca6SSiva Reddy priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
7241edb9ca6SSiva Reddy priv->rx_tc);
7251edb9ca6SSiva Reddy } else {
7261edb9ca6SSiva Reddy pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
7271edb9ca6SSiva Reddy }
7281edb9ca6SSiva Reddy }
7291edb9ca6SSiva Reddy
7301edb9ca6SSiva Reddy /**
7311edb9ca6SSiva Reddy * sxgbe_tx_queue_clean:
732d0ea5cbdSJesse Brandeburg * @tqueue: queue pointer
7331edb9ca6SSiva Reddy * Description: it reclaims resources after transmission completes.
7341edb9ca6SSiva Reddy */
sxgbe_tx_queue_clean(struct sxgbe_tx_queue * tqueue)7351edb9ca6SSiva Reddy static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
7361edb9ca6SSiva Reddy {
7371edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = tqueue->priv_ptr;
7381edb9ca6SSiva Reddy unsigned int tx_rsize = priv->dma_tx_size;
7391edb9ca6SSiva Reddy struct netdev_queue *dev_txq;
7401edb9ca6SSiva Reddy u8 queue_no = tqueue->queue_no;
7411edb9ca6SSiva Reddy
7421edb9ca6SSiva Reddy dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
7431edb9ca6SSiva Reddy
744980f1404SLino Sanfilippo __netif_tx_lock(dev_txq, smp_processor_id());
7451edb9ca6SSiva Reddy
7461edb9ca6SSiva Reddy priv->xstats.tx_clean++;
7471edb9ca6SSiva Reddy while (tqueue->dirty_tx != tqueue->cur_tx) {
7481edb9ca6SSiva Reddy unsigned int entry = tqueue->dirty_tx % tx_rsize;
7491edb9ca6SSiva Reddy struct sk_buff *skb = tqueue->tx_skbuff[entry];
7501edb9ca6SSiva Reddy struct sxgbe_tx_norm_desc *p;
7511edb9ca6SSiva Reddy
7521edb9ca6SSiva Reddy p = tqueue->dma_tx + entry;
7531edb9ca6SSiva Reddy
7541edb9ca6SSiva Reddy /* Check if the descriptor is owned by the DMA. */
7551edb9ca6SSiva Reddy if (priv->hw->desc->get_tx_owner(p))
7561edb9ca6SSiva Reddy break;
7571edb9ca6SSiva Reddy
7581edb9ca6SSiva Reddy if (netif_msg_tx_done(priv))
7591edb9ca6SSiva Reddy pr_debug("%s: curr %d, dirty %d\n",
7601edb9ca6SSiva Reddy __func__, tqueue->cur_tx, tqueue->dirty_tx);
7611edb9ca6SSiva Reddy
7621edb9ca6SSiva Reddy if (likely(tqueue->tx_skbuff_dma[entry])) {
7631edb9ca6SSiva Reddy dma_unmap_single(priv->device,
7641edb9ca6SSiva Reddy tqueue->tx_skbuff_dma[entry],
7651edb9ca6SSiva Reddy priv->hw->desc->get_tx_len(p),
7661edb9ca6SSiva Reddy DMA_TO_DEVICE);
7671edb9ca6SSiva Reddy tqueue->tx_skbuff_dma[entry] = 0;
7681edb9ca6SSiva Reddy }
7691edb9ca6SSiva Reddy
7701edb9ca6SSiva Reddy if (likely(skb)) {
7711edb9ca6SSiva Reddy dev_kfree_skb(skb);
7721edb9ca6SSiva Reddy tqueue->tx_skbuff[entry] = NULL;
7731edb9ca6SSiva Reddy }
7741edb9ca6SSiva Reddy
7751edb9ca6SSiva Reddy priv->hw->desc->release_tx_desc(p);
7761edb9ca6SSiva Reddy
7771edb9ca6SSiva Reddy tqueue->dirty_tx++;
7781edb9ca6SSiva Reddy }
7791edb9ca6SSiva Reddy
7801edb9ca6SSiva Reddy /* wake up queue */
7811edb9ca6SSiva Reddy if (unlikely(netif_tx_queue_stopped(dev_txq) &&
7821edb9ca6SSiva Reddy sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
7831edb9ca6SSiva Reddy if (netif_msg_tx_done(priv))
7841edb9ca6SSiva Reddy pr_debug("%s: restart transmit\n", __func__);
7851edb9ca6SSiva Reddy netif_tx_wake_queue(dev_txq);
7861edb9ca6SSiva Reddy }
7871edb9ca6SSiva Reddy
788980f1404SLino Sanfilippo __netif_tx_unlock(dev_txq);
7891edb9ca6SSiva Reddy }
7901edb9ca6SSiva Reddy
7911edb9ca6SSiva Reddy /**
79261633d71SYang Shen * sxgbe_tx_all_clean:
7931edb9ca6SSiva Reddy * @priv: driver private structure
7941edb9ca6SSiva Reddy * Description: it reclaims resources after transmission completes.
7951edb9ca6SSiva Reddy */
sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)796acc18c14SGirish K S static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
7971edb9ca6SSiva Reddy {
7981edb9ca6SSiva Reddy u8 queue_num;
7991edb9ca6SSiva Reddy
8001edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
8011edb9ca6SSiva Reddy struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
8021edb9ca6SSiva Reddy
8031edb9ca6SSiva Reddy sxgbe_tx_queue_clean(tqueue);
8041edb9ca6SSiva Reddy }
805acc18c14SGirish K S
806acc18c14SGirish K S if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
807acc18c14SGirish K S sxgbe_enable_eee_mode(priv);
808acc18c14SGirish K S mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
809acc18c14SGirish K S }
8101edb9ca6SSiva Reddy }
8111edb9ca6SSiva Reddy
8121edb9ca6SSiva Reddy /**
8131edb9ca6SSiva Reddy * sxgbe_restart_tx_queue: irq tx error mng function
8141edb9ca6SSiva Reddy * @priv: driver private structure
815d0ea5cbdSJesse Brandeburg * @queue_num: queue number
8161edb9ca6SSiva Reddy * Description: it cleans the descriptors and restarts the transmission
8171edb9ca6SSiva Reddy * in case of errors.
8181edb9ca6SSiva Reddy */
sxgbe_restart_tx_queue(struct sxgbe_priv_data * priv,int queue_num)8191edb9ca6SSiva Reddy static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
8201edb9ca6SSiva Reddy {
8211edb9ca6SSiva Reddy struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
8221edb9ca6SSiva Reddy struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
8231edb9ca6SSiva Reddy queue_num);
8241edb9ca6SSiva Reddy
8251edb9ca6SSiva Reddy /* stop the queue */
8261edb9ca6SSiva Reddy netif_tx_stop_queue(dev_txq);
8271edb9ca6SSiva Reddy
8281edb9ca6SSiva Reddy /* stop the tx dma */
8291edb9ca6SSiva Reddy priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
8301edb9ca6SSiva Reddy
8311edb9ca6SSiva Reddy /* free the skbuffs of the ring */
8321edb9ca6SSiva Reddy tx_free_ring_skbufs(tx_ring);
8331edb9ca6SSiva Reddy
834dbedd44eSJoe Perches /* initialise counters */
8351edb9ca6SSiva Reddy tx_ring->cur_tx = 0;
8361edb9ca6SSiva Reddy tx_ring->dirty_tx = 0;
8371edb9ca6SSiva Reddy
8381edb9ca6SSiva Reddy /* start the tx dma */
8391edb9ca6SSiva Reddy priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
8401edb9ca6SSiva Reddy
8411edb9ca6SSiva Reddy priv->dev->stats.tx_errors++;
8421edb9ca6SSiva Reddy
8431edb9ca6SSiva Reddy /* wakeup the queue */
8441edb9ca6SSiva Reddy netif_tx_wake_queue(dev_txq);
8451edb9ca6SSiva Reddy }
8461edb9ca6SSiva Reddy
8471edb9ca6SSiva Reddy /**
8481edb9ca6SSiva Reddy * sxgbe_reset_all_tx_queues: irq tx error mng function
8491edb9ca6SSiva Reddy * @priv: driver private structure
8501edb9ca6SSiva Reddy * Description: it cleans all the descriptors and
8511edb9ca6SSiva Reddy * restarts the transmission on all queues in case of errors.
8521edb9ca6SSiva Reddy */
sxgbe_reset_all_tx_queues(struct sxgbe_priv_data * priv)8531edb9ca6SSiva Reddy static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
8541edb9ca6SSiva Reddy {
8551edb9ca6SSiva Reddy int queue_num;
8561edb9ca6SSiva Reddy
8571edb9ca6SSiva Reddy /* On TX timeout of net device, resetting of all queues
8581edb9ca6SSiva Reddy * may not be proper way, revisit this later if needed
8591edb9ca6SSiva Reddy */
8601edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
8611edb9ca6SSiva Reddy sxgbe_restart_tx_queue(priv, queue_num);
8621edb9ca6SSiva Reddy }
8631edb9ca6SSiva Reddy
8641edb9ca6SSiva Reddy /**
8651edb9ca6SSiva Reddy * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
8661edb9ca6SSiva Reddy * @priv: driver private structure
8671edb9ca6SSiva Reddy * Description:
8681edb9ca6SSiva Reddy * new GMAC chip generations have a new register to indicate the
8691edb9ca6SSiva Reddy * presence of the optional feature/functions.
8701edb9ca6SSiva Reddy * This can be also used to override the value passed through the
8711edb9ca6SSiva Reddy * platform and necessary for old MAC10/100 and GMAC chips.
8721edb9ca6SSiva Reddy */
sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)8731edb9ca6SSiva Reddy static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
8741edb9ca6SSiva Reddy {
8751edb9ca6SSiva Reddy int rval = 0;
8761edb9ca6SSiva Reddy struct sxgbe_hw_features *features = &priv->hw_cap;
8771edb9ca6SSiva Reddy
8781edb9ca6SSiva Reddy /* Read First Capability Register CAP[0] */
8791edb9ca6SSiva Reddy rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
8801edb9ca6SSiva Reddy if (rval) {
8811edb9ca6SSiva Reddy features->pmt_remote_wake_up =
8821edb9ca6SSiva Reddy SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
8831edb9ca6SSiva Reddy features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
8841edb9ca6SSiva Reddy features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
8851edb9ca6SSiva Reddy features->tx_csum_offload =
8861edb9ca6SSiva Reddy SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
8871edb9ca6SSiva Reddy features->rx_csum_offload =
8881edb9ca6SSiva Reddy SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
8891edb9ca6SSiva Reddy features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
8901edb9ca6SSiva Reddy features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
8911edb9ca6SSiva Reddy features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
892acc18c14SGirish K S features->eee = SXGBE_HW_FEAT_EEE(rval);
8931edb9ca6SSiva Reddy }
8941edb9ca6SSiva Reddy
8951edb9ca6SSiva Reddy /* Read First Capability Register CAP[1] */
8961edb9ca6SSiva Reddy rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
8971edb9ca6SSiva Reddy if (rval) {
8981edb9ca6SSiva Reddy features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
8991edb9ca6SSiva Reddy features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
9001edb9ca6SSiva Reddy features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
9011edb9ca6SSiva Reddy features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
9021edb9ca6SSiva Reddy features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
9031edb9ca6SSiva Reddy features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
9041edb9ca6SSiva Reddy features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
9051edb9ca6SSiva Reddy features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
9061edb9ca6SSiva Reddy features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
9071edb9ca6SSiva Reddy features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
9081edb9ca6SSiva Reddy }
9091edb9ca6SSiva Reddy
9101edb9ca6SSiva Reddy /* Read First Capability Register CAP[2] */
9111edb9ca6SSiva Reddy rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
9121edb9ca6SSiva Reddy if (rval) {
9131edb9ca6SSiva Reddy features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
9141edb9ca6SSiva Reddy features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
9151edb9ca6SSiva Reddy features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
9161edb9ca6SSiva Reddy features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
9171edb9ca6SSiva Reddy features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
9181edb9ca6SSiva Reddy features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
9191edb9ca6SSiva Reddy }
9201edb9ca6SSiva Reddy
9211edb9ca6SSiva Reddy return rval;
9221edb9ca6SSiva Reddy }
9231edb9ca6SSiva Reddy
9241edb9ca6SSiva Reddy /**
9251edb9ca6SSiva Reddy * sxgbe_check_ether_addr: check if the MAC addr is valid
9261edb9ca6SSiva Reddy * @priv: driver private structure
9271edb9ca6SSiva Reddy * Description:
9281edb9ca6SSiva Reddy * it is to verify if the MAC address is valid, in case of failures it
9291edb9ca6SSiva Reddy * generates a random MAC address
9301edb9ca6SSiva Reddy */
sxgbe_check_ether_addr(struct sxgbe_priv_data * priv)9311edb9ca6SSiva Reddy static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
9321edb9ca6SSiva Reddy {
9331edb9ca6SSiva Reddy if (!is_valid_ether_addr(priv->dev->dev_addr)) {
93415fa05bfSJakub Kicinski u8 addr[ETH_ALEN];
93515fa05bfSJakub Kicinski
9361edb9ca6SSiva Reddy priv->hw->mac->get_umac_addr((void __iomem *)
93715fa05bfSJakub Kicinski priv->ioaddr, addr, 0);
93815fa05bfSJakub Kicinski if (is_valid_ether_addr(addr))
93915fa05bfSJakub Kicinski eth_hw_addr_set(priv->dev, addr);
94015fa05bfSJakub Kicinski else
9411edb9ca6SSiva Reddy eth_hw_addr_random(priv->dev);
9421edb9ca6SSiva Reddy }
9431edb9ca6SSiva Reddy dev_info(priv->device, "device MAC address %pM\n",
9441edb9ca6SSiva Reddy priv->dev->dev_addr);
9451edb9ca6SSiva Reddy }
9461edb9ca6SSiva Reddy
9471edb9ca6SSiva Reddy /**
9481edb9ca6SSiva Reddy * sxgbe_init_dma_engine: DMA init.
9491edb9ca6SSiva Reddy * @priv: driver private structure
9501edb9ca6SSiva Reddy * Description:
9511edb9ca6SSiva Reddy * It inits the DMA invoking the specific SXGBE callback.
9521edb9ca6SSiva Reddy * Some DMA parameters can be passed from the platform;
9531edb9ca6SSiva Reddy * in case of these are not passed a default is kept for the MAC or GMAC.
9541edb9ca6SSiva Reddy */
sxgbe_init_dma_engine(struct sxgbe_priv_data * priv)9551edb9ca6SSiva Reddy static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
9561edb9ca6SSiva Reddy {
9571edb9ca6SSiva Reddy int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
9581edb9ca6SSiva Reddy int queue_num;
9591edb9ca6SSiva Reddy
9601edb9ca6SSiva Reddy if (priv->plat->dma_cfg) {
9611edb9ca6SSiva Reddy pbl = priv->plat->dma_cfg->pbl;
9621edb9ca6SSiva Reddy fixed_burst = priv->plat->dma_cfg->fixed_burst;
9631edb9ca6SSiva Reddy burst_map = priv->plat->dma_cfg->burst_map;
9641edb9ca6SSiva Reddy }
9651edb9ca6SSiva Reddy
9661edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
9671edb9ca6SSiva Reddy priv->hw->dma->cha_init(priv->ioaddr, queue_num,
9681edb9ca6SSiva Reddy fixed_burst, pbl,
9691edb9ca6SSiva Reddy (priv->txq[queue_num])->dma_tx_phy,
9701edb9ca6SSiva Reddy (priv->rxq[queue_num])->dma_rx_phy,
9711edb9ca6SSiva Reddy priv->dma_tx_size, priv->dma_rx_size);
9721edb9ca6SSiva Reddy
9731edb9ca6SSiva Reddy return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
9741edb9ca6SSiva Reddy }
9751edb9ca6SSiva Reddy
9761edb9ca6SSiva Reddy /**
9771edb9ca6SSiva Reddy * sxgbe_init_mtl_engine: MTL init.
9781edb9ca6SSiva Reddy * @priv: driver private structure
9791edb9ca6SSiva Reddy * Description:
9801edb9ca6SSiva Reddy * It inits the MTL invoking the specific SXGBE callback.
9811edb9ca6SSiva Reddy */
sxgbe_init_mtl_engine(struct sxgbe_priv_data * priv)9821edb9ca6SSiva Reddy static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
9831edb9ca6SSiva Reddy {
9841edb9ca6SSiva Reddy int queue_num;
9851edb9ca6SSiva Reddy
9861edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
9871edb9ca6SSiva Reddy priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
9881edb9ca6SSiva Reddy priv->hw_cap.tx_mtl_qsize);
9891edb9ca6SSiva Reddy priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
9901edb9ca6SSiva Reddy }
9911edb9ca6SSiva Reddy }
9921edb9ca6SSiva Reddy
9931edb9ca6SSiva Reddy /**
9941edb9ca6SSiva Reddy * sxgbe_disable_mtl_engine: MTL disable.
9951edb9ca6SSiva Reddy * @priv: driver private structure
9961edb9ca6SSiva Reddy * Description:
9971edb9ca6SSiva Reddy * It disables the MTL queues by invoking the specific SXGBE callback.
9981edb9ca6SSiva Reddy */
sxgbe_disable_mtl_engine(struct sxgbe_priv_data * priv)9991edb9ca6SSiva Reddy static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
10001edb9ca6SSiva Reddy {
10011edb9ca6SSiva Reddy int queue_num;
10021edb9ca6SSiva Reddy
10031edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
10041edb9ca6SSiva Reddy priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
10051edb9ca6SSiva Reddy }
10061edb9ca6SSiva Reddy
10071edb9ca6SSiva Reddy
10081edb9ca6SSiva Reddy /**
10091edb9ca6SSiva Reddy * sxgbe_tx_timer: mitigation sw timer for tx.
1010c37631c7SKees Cook * @t: timer pointer
10111edb9ca6SSiva Reddy * Description:
10121edb9ca6SSiva Reddy * This is the timer handler to directly invoke the sxgbe_tx_clean.
10131edb9ca6SSiva Reddy */
sxgbe_tx_timer(struct timer_list * t)1014c37631c7SKees Cook static void sxgbe_tx_timer(struct timer_list *t)
10151edb9ca6SSiva Reddy {
1016c37631c7SKees Cook struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
10171edb9ca6SSiva Reddy sxgbe_tx_queue_clean(p);
10181edb9ca6SSiva Reddy }
10191edb9ca6SSiva Reddy
10201edb9ca6SSiva Reddy /**
102161633d71SYang Shen * sxgbe_tx_init_coalesce: init tx mitigation options.
10221edb9ca6SSiva Reddy * @priv: driver private structure
10231edb9ca6SSiva Reddy * Description:
10241edb9ca6SSiva Reddy * This inits the transmit coalesce parameters: i.e. timer rate,
10251edb9ca6SSiva Reddy * timer handler and default threshold used for enabling the
10261edb9ca6SSiva Reddy * interrupt on completion bit.
10271edb9ca6SSiva Reddy */
sxgbe_tx_init_coalesce(struct sxgbe_priv_data * priv)10281edb9ca6SSiva Reddy static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
10291edb9ca6SSiva Reddy {
10301edb9ca6SSiva Reddy u8 queue_num;
10311edb9ca6SSiva Reddy
10321edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
10331edb9ca6SSiva Reddy struct sxgbe_tx_queue *p = priv->txq[queue_num];
10341edb9ca6SSiva Reddy p->tx_coal_frames = SXGBE_TX_FRAMES;
10351edb9ca6SSiva Reddy p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1036c37631c7SKees Cook timer_setup(&p->txtimer, sxgbe_tx_timer, 0);
10371edb9ca6SSiva Reddy p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
10381edb9ca6SSiva Reddy add_timer(&p->txtimer);
10391edb9ca6SSiva Reddy }
10401edb9ca6SSiva Reddy }
10411edb9ca6SSiva Reddy
sxgbe_tx_del_timer(struct sxgbe_priv_data * priv)10421edb9ca6SSiva Reddy static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
10431edb9ca6SSiva Reddy {
10441edb9ca6SSiva Reddy u8 queue_num;
10451edb9ca6SSiva Reddy
10461edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
10471edb9ca6SSiva Reddy struct sxgbe_tx_queue *p = priv->txq[queue_num];
10481edb9ca6SSiva Reddy del_timer_sync(&p->txtimer);
10491edb9ca6SSiva Reddy }
10501edb9ca6SSiva Reddy }
10511edb9ca6SSiva Reddy
10521edb9ca6SSiva Reddy /**
10531edb9ca6SSiva Reddy * sxgbe_open - open entry point of the driver
10541edb9ca6SSiva Reddy * @dev : pointer to the device structure.
10551edb9ca6SSiva Reddy * Description:
10561edb9ca6SSiva Reddy * This function is the open entry point of the driver.
10571edb9ca6SSiva Reddy * Return value:
10581edb9ca6SSiva Reddy * 0 on success and an appropriate (-)ve integer as defined in errno.h
10591edb9ca6SSiva Reddy * file on failure.
10601edb9ca6SSiva Reddy */
sxgbe_open(struct net_device * dev)10611edb9ca6SSiva Reddy static int sxgbe_open(struct net_device *dev)
10621edb9ca6SSiva Reddy {
10631edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
10641edb9ca6SSiva Reddy int ret, queue_num;
10651edb9ca6SSiva Reddy
10661edb9ca6SSiva Reddy clk_prepare_enable(priv->sxgbe_clk);
10671edb9ca6SSiva Reddy
10681edb9ca6SSiva Reddy sxgbe_check_ether_addr(priv);
10691edb9ca6SSiva Reddy
10701edb9ca6SSiva Reddy /* Init the phy */
10711edb9ca6SSiva Reddy ret = sxgbe_init_phy(dev);
10721edb9ca6SSiva Reddy if (ret) {
10731edb9ca6SSiva Reddy netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
10741edb9ca6SSiva Reddy __func__, ret);
10751edb9ca6SSiva Reddy goto phy_error;
10761edb9ca6SSiva Reddy }
10771edb9ca6SSiva Reddy
10781edb9ca6SSiva Reddy /* Create and initialize the TX/RX descriptors chains. */
10791edb9ca6SSiva Reddy priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
10801edb9ca6SSiva Reddy priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
10811edb9ca6SSiva Reddy priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
10821edb9ca6SSiva Reddy priv->tx_tc = TC_DEFAULT;
10831edb9ca6SSiva Reddy priv->rx_tc = TC_DEFAULT;
10841edb9ca6SSiva Reddy init_dma_desc_rings(dev);
10851edb9ca6SSiva Reddy
10861edb9ca6SSiva Reddy /* DMA initialization and SW reset */
10871edb9ca6SSiva Reddy ret = sxgbe_init_dma_engine(priv);
10881edb9ca6SSiva Reddy if (ret < 0) {
10891edb9ca6SSiva Reddy netdev_err(dev, "%s: DMA initialization failed\n", __func__);
10901edb9ca6SSiva Reddy goto init_error;
10911edb9ca6SSiva Reddy }
10921edb9ca6SSiva Reddy
10931edb9ca6SSiva Reddy /* MTL initialization */
10941edb9ca6SSiva Reddy sxgbe_init_mtl_engine(priv);
10951edb9ca6SSiva Reddy
10961edb9ca6SSiva Reddy /* Copy the MAC addr into the HW */
10971edb9ca6SSiva Reddy priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
10981edb9ca6SSiva Reddy
10991edb9ca6SSiva Reddy /* Initialize the MAC Core */
11001edb9ca6SSiva Reddy priv->hw->mac->core_init(priv->ioaddr);
1101325b94f7SByungho An SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1102325b94f7SByungho An priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1103325b94f7SByungho An }
11041edb9ca6SSiva Reddy
11051edb9ca6SSiva Reddy /* Request the IRQ lines */
11061edb9ca6SSiva Reddy ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
11071edb9ca6SSiva Reddy IRQF_SHARED, dev->name, dev);
11081edb9ca6SSiva Reddy if (unlikely(ret < 0)) {
11091edb9ca6SSiva Reddy netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
11101edb9ca6SSiva Reddy __func__, priv->irq, ret);
11111edb9ca6SSiva Reddy goto init_error;
11121edb9ca6SSiva Reddy }
11131edb9ca6SSiva Reddy
1114acc18c14SGirish K S /* If the LPI irq is different from the mac irq
1115acc18c14SGirish K S * register a dedicated handler
1116acc18c14SGirish K S */
1117acc18c14SGirish K S if (priv->lpi_irq != dev->irq) {
1118acc18c14SGirish K S ret = devm_request_irq(priv->device, priv->lpi_irq,
1119acc18c14SGirish K S sxgbe_common_interrupt,
1120acc18c14SGirish K S IRQF_SHARED, dev->name, dev);
1121acc18c14SGirish K S if (unlikely(ret < 0)) {
1122acc18c14SGirish K S netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1123acc18c14SGirish K S __func__, priv->lpi_irq, ret);
1124acc18c14SGirish K S goto init_error;
1125acc18c14SGirish K S }
1126acc18c14SGirish K S }
1127acc18c14SGirish K S
11281edb9ca6SSiva Reddy /* Request TX DMA irq lines */
11291edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
11301edb9ca6SSiva Reddy ret = devm_request_irq(priv->device,
11311edb9ca6SSiva Reddy (priv->txq[queue_num])->irq_no,
11321edb9ca6SSiva Reddy sxgbe_tx_interrupt, 0,
11331edb9ca6SSiva Reddy dev->name, priv->txq[queue_num]);
11341edb9ca6SSiva Reddy if (unlikely(ret < 0)) {
11351edb9ca6SSiva Reddy netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
11361edb9ca6SSiva Reddy __func__, priv->irq, ret);
11371edb9ca6SSiva Reddy goto init_error;
11381edb9ca6SSiva Reddy }
11391edb9ca6SSiva Reddy }
11401edb9ca6SSiva Reddy
11411edb9ca6SSiva Reddy /* Request RX DMA irq lines */
11421edb9ca6SSiva Reddy SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
11431edb9ca6SSiva Reddy ret = devm_request_irq(priv->device,
11441edb9ca6SSiva Reddy (priv->rxq[queue_num])->irq_no,
11451edb9ca6SSiva Reddy sxgbe_rx_interrupt, 0,
11461edb9ca6SSiva Reddy dev->name, priv->rxq[queue_num]);
11471edb9ca6SSiva Reddy if (unlikely(ret < 0)) {
11481edb9ca6SSiva Reddy netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
11491edb9ca6SSiva Reddy __func__, priv->irq, ret);
11501edb9ca6SSiva Reddy goto init_error;
11511edb9ca6SSiva Reddy }
11521edb9ca6SSiva Reddy }
11531edb9ca6SSiva Reddy
11541edb9ca6SSiva Reddy /* Enable the MAC Rx/Tx */
11551edb9ca6SSiva Reddy priv->hw->mac->enable_tx(priv->ioaddr, true);
11561edb9ca6SSiva Reddy priv->hw->mac->enable_rx(priv->ioaddr, true);
11571edb9ca6SSiva Reddy
11581edb9ca6SSiva Reddy /* Set the HW DMA mode and the COE */
11591edb9ca6SSiva Reddy sxgbe_mtl_operation_mode(priv);
11601edb9ca6SSiva Reddy
11611edb9ca6SSiva Reddy /* Extra statistics */
11621edb9ca6SSiva Reddy memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
11631edb9ca6SSiva Reddy
11641edb9ca6SSiva Reddy priv->xstats.tx_threshold = priv->tx_tc;
11651edb9ca6SSiva Reddy priv->xstats.rx_threshold = priv->rx_tc;
11661edb9ca6SSiva Reddy
11671edb9ca6SSiva Reddy /* Start the ball rolling... */
11681edb9ca6SSiva Reddy netdev_dbg(dev, "DMA RX/TX processes started...\n");
11691edb9ca6SSiva Reddy priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
11701edb9ca6SSiva Reddy priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
11711edb9ca6SSiva Reddy
11722ebc440aSPhilippe Reynes if (dev->phydev)
11732ebc440aSPhilippe Reynes phy_start(dev->phydev);
11741edb9ca6SSiva Reddy
1175dbedd44eSJoe Perches /* initialise TX coalesce parameters */
11761edb9ca6SSiva Reddy sxgbe_tx_init_coalesce(priv);
11771edb9ca6SSiva Reddy
11781edb9ca6SSiva Reddy if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
11791edb9ca6SSiva Reddy priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
11801edb9ca6SSiva Reddy priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
11811edb9ca6SSiva Reddy }
11821edb9ca6SSiva Reddy
1183acc18c14SGirish K S priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1184acc18c14SGirish K S priv->eee_enabled = sxgbe_eee_init(priv);
1185acc18c14SGirish K S
11861edb9ca6SSiva Reddy napi_enable(&priv->napi);
11871edb9ca6SSiva Reddy netif_start_queue(dev);
11881edb9ca6SSiva Reddy
11891edb9ca6SSiva Reddy return 0;
11901edb9ca6SSiva Reddy
11911edb9ca6SSiva Reddy init_error:
11921edb9ca6SSiva Reddy free_dma_desc_resources(priv);
11932ebc440aSPhilippe Reynes if (dev->phydev)
11942ebc440aSPhilippe Reynes phy_disconnect(dev->phydev);
11951edb9ca6SSiva Reddy phy_error:
11961edb9ca6SSiva Reddy clk_disable_unprepare(priv->sxgbe_clk);
11971edb9ca6SSiva Reddy
11981edb9ca6SSiva Reddy return ret;
11991edb9ca6SSiva Reddy }
12001edb9ca6SSiva Reddy
12011edb9ca6SSiva Reddy /**
12021edb9ca6SSiva Reddy * sxgbe_release - close entry point of the driver
12031edb9ca6SSiva Reddy * @dev : device pointer.
12041edb9ca6SSiva Reddy * Description:
12051edb9ca6SSiva Reddy * This is the stop entry point of the driver.
12061edb9ca6SSiva Reddy */
sxgbe_release(struct net_device * dev)12071edb9ca6SSiva Reddy static int sxgbe_release(struct net_device *dev)
12081edb9ca6SSiva Reddy {
12091edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
12101edb9ca6SSiva Reddy
1211acc18c14SGirish K S if (priv->eee_enabled)
1212acc18c14SGirish K S del_timer_sync(&priv->eee_ctrl_timer);
1213acc18c14SGirish K S
12141edb9ca6SSiva Reddy /* Stop and disconnect the PHY */
12152ebc440aSPhilippe Reynes if (dev->phydev) {
12162ebc440aSPhilippe Reynes phy_stop(dev->phydev);
12172ebc440aSPhilippe Reynes phy_disconnect(dev->phydev);
12181edb9ca6SSiva Reddy }
12191edb9ca6SSiva Reddy
12201edb9ca6SSiva Reddy netif_tx_stop_all_queues(dev);
12211edb9ca6SSiva Reddy
12221edb9ca6SSiva Reddy napi_disable(&priv->napi);
12231edb9ca6SSiva Reddy
12241edb9ca6SSiva Reddy /* delete TX timers */
12251edb9ca6SSiva Reddy sxgbe_tx_del_timer(priv);
12261edb9ca6SSiva Reddy
12271edb9ca6SSiva Reddy /* Stop TX/RX DMA and clear the descriptors */
12281edb9ca6SSiva Reddy priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
12291edb9ca6SSiva Reddy priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
12301edb9ca6SSiva Reddy
12311edb9ca6SSiva Reddy /* disable MTL queue */
12321edb9ca6SSiva Reddy sxgbe_disable_mtl_engine(priv);
12331edb9ca6SSiva Reddy
12341edb9ca6SSiva Reddy /* Release and free the Rx/Tx resources */
12351edb9ca6SSiva Reddy free_dma_desc_resources(priv);
12361edb9ca6SSiva Reddy
12371edb9ca6SSiva Reddy /* Disable the MAC Rx/Tx */
12381edb9ca6SSiva Reddy priv->hw->mac->enable_tx(priv->ioaddr, false);
12391edb9ca6SSiva Reddy priv->hw->mac->enable_rx(priv->ioaddr, false);
12401edb9ca6SSiva Reddy
12411edb9ca6SSiva Reddy clk_disable_unprepare(priv->sxgbe_clk);
12421edb9ca6SSiva Reddy
12431edb9ca6SSiva Reddy return 0;
12441edb9ca6SSiva Reddy }
12451051125dSVipul Pandya /* Prepare first Tx descriptor for doing TSO operation */
sxgbe_tso_prepare(struct sxgbe_priv_data * priv,struct sxgbe_tx_norm_desc * first_desc,struct sk_buff * skb)124685da101fSJingoo Han static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
12471051125dSVipul Pandya struct sxgbe_tx_norm_desc *first_desc,
12481051125dSVipul Pandya struct sk_buff *skb)
12491051125dSVipul Pandya {
12501051125dSVipul Pandya unsigned int total_hdr_len, tcp_hdr_len;
12511051125dSVipul Pandya
12521051125dSVipul Pandya /* Write first Tx descriptor with appropriate value */
12531051125dSVipul Pandya tcp_hdr_len = tcp_hdrlen(skb);
12541051125dSVipul Pandya total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
12551051125dSVipul Pandya
12561051125dSVipul Pandya first_desc->tdes01 = dma_map_single(priv->device, skb->data,
12571051125dSVipul Pandya total_hdr_len, DMA_TO_DEVICE);
12581051125dSVipul Pandya if (dma_mapping_error(priv->device, first_desc->tdes01))
12591051125dSVipul Pandya pr_err("%s: TX dma mapping failed!!\n", __func__);
12601051125dSVipul Pandya
12611051125dSVipul Pandya first_desc->tdes23.tx_rd_des23.first_desc = 1;
12621051125dSVipul Pandya priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
12631051125dSVipul Pandya tcp_hdr_len,
12641051125dSVipul Pandya skb->len - total_hdr_len);
12651051125dSVipul Pandya }
12661051125dSVipul Pandya
12671edb9ca6SSiva Reddy /**
12681edb9ca6SSiva Reddy * sxgbe_xmit: Tx entry point of the driver
12691edb9ca6SSiva Reddy * @skb : the socket buffer
12701edb9ca6SSiva Reddy * @dev : device pointer
12711edb9ca6SSiva Reddy * Description : this is the tx entry point of the driver.
12721edb9ca6SSiva Reddy * It programs the chain or the ring and supports oversized frames
12731edb9ca6SSiva Reddy * and SG feature.
12741edb9ca6SSiva Reddy */
sxgbe_xmit(struct sk_buff * skb,struct net_device * dev)12751edb9ca6SSiva Reddy static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
12761edb9ca6SSiva Reddy {
12771edb9ca6SSiva Reddy unsigned int entry, frag_num;
12788f7807aeSVipul Pandya int cksum_flag = 0;
12791edb9ca6SSiva Reddy struct netdev_queue *dev_txq;
12801edb9ca6SSiva Reddy unsigned txq_index = skb_get_queue_mapping(skb);
12811edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
12821edb9ca6SSiva Reddy unsigned int tx_rsize = priv->dma_tx_size;
12831edb9ca6SSiva Reddy struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
12841edb9ca6SSiva Reddy struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
12851051125dSVipul Pandya struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
12861edb9ca6SSiva Reddy int nr_frags = skb_shinfo(skb)->nr_frags;
12871edb9ca6SSiva Reddy int no_pagedlen = skb_headlen(skb);
12881edb9ca6SSiva Reddy int is_jumbo = 0;
12891051125dSVipul Pandya u16 cur_mss = skb_shinfo(skb)->gso_size;
12901051125dSVipul Pandya u32 ctxt_desc_req = 0;
12911edb9ca6SSiva Reddy
12921edb9ca6SSiva Reddy /* get the TX queue handle */
12931edb9ca6SSiva Reddy dev_txq = netdev_get_tx_queue(dev, txq_index);
12941edb9ca6SSiva Reddy
12951051125dSVipul Pandya if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
12961051125dSVipul Pandya ctxt_desc_req = 1;
12971051125dSVipul Pandya
1298df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb) ||
12991051125dSVipul Pandya ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
13001051125dSVipul Pandya tqueue->hwts_tx_en)))
13011051125dSVipul Pandya ctxt_desc_req = 1;
13021051125dSVipul Pandya
1303acc18c14SGirish K S if (priv->tx_path_in_lpi_mode)
1304acc18c14SGirish K S sxgbe_disable_eee_mode(priv);
1305acc18c14SGirish K S
13061edb9ca6SSiva Reddy if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
13071edb9ca6SSiva Reddy if (!netif_tx_queue_stopped(dev_txq)) {
13081edb9ca6SSiva Reddy netif_tx_stop_queue(dev_txq);
13091edb9ca6SSiva Reddy netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
13101edb9ca6SSiva Reddy __func__, txq_index);
13111edb9ca6SSiva Reddy }
13121edb9ca6SSiva Reddy return NETDEV_TX_BUSY;
13131edb9ca6SSiva Reddy }
13141edb9ca6SSiva Reddy
13151edb9ca6SSiva Reddy entry = tqueue->cur_tx % tx_rsize;
13161edb9ca6SSiva Reddy tx_desc = tqueue->dma_tx + entry;
13171edb9ca6SSiva Reddy
13181edb9ca6SSiva Reddy first_desc = tx_desc;
13191051125dSVipul Pandya if (ctxt_desc_req)
13201051125dSVipul Pandya ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
13211edb9ca6SSiva Reddy
13221edb9ca6SSiva Reddy /* save the skb address */
13231edb9ca6SSiva Reddy tqueue->tx_skbuff[entry] = skb;
13241edb9ca6SSiva Reddy
13251edb9ca6SSiva Reddy if (!is_jumbo) {
13261051125dSVipul Pandya if (likely(skb_is_gso(skb))) {
13271051125dSVipul Pandya /* TSO support */
13281051125dSVipul Pandya if (unlikely(tqueue->prev_mss != cur_mss)) {
13291051125dSVipul Pandya priv->hw->desc->tx_ctxt_desc_set_mss(
13301051125dSVipul Pandya ctxt_desc, cur_mss);
13311051125dSVipul Pandya priv->hw->desc->tx_ctxt_desc_set_tcmssv(
13321051125dSVipul Pandya ctxt_desc);
13331051125dSVipul Pandya priv->hw->desc->tx_ctxt_desc_reset_ostc(
13341051125dSVipul Pandya ctxt_desc);
13351051125dSVipul Pandya priv->hw->desc->tx_ctxt_desc_set_ctxt(
13361051125dSVipul Pandya ctxt_desc);
13371051125dSVipul Pandya priv->hw->desc->tx_ctxt_desc_set_owner(
13381051125dSVipul Pandya ctxt_desc);
13391051125dSVipul Pandya
13401051125dSVipul Pandya entry = (++tqueue->cur_tx) % tx_rsize;
13411051125dSVipul Pandya first_desc = tqueue->dma_tx + entry;
13421051125dSVipul Pandya
13431051125dSVipul Pandya tqueue->prev_mss = cur_mss;
13441051125dSVipul Pandya }
13451051125dSVipul Pandya sxgbe_tso_prepare(priv, first_desc, skb);
13461051125dSVipul Pandya } else {
13471051125dSVipul Pandya tx_desc->tdes01 = dma_map_single(priv->device,
13481051125dSVipul Pandya skb->data, no_pagedlen, DMA_TO_DEVICE);
13491edb9ca6SSiva Reddy if (dma_mapping_error(priv->device, tx_desc->tdes01))
13501051125dSVipul Pandya netdev_err(dev, "%s: TX dma mapping failed!!\n",
13511051125dSVipul Pandya __func__);
13521edb9ca6SSiva Reddy
13531edb9ca6SSiva Reddy priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
13548f7807aeSVipul Pandya no_pagedlen, cksum_flag);
13551edb9ca6SSiva Reddy }
13561051125dSVipul Pandya }
13571edb9ca6SSiva Reddy
13581edb9ca6SSiva Reddy for (frag_num = 0; frag_num < nr_frags; frag_num++) {
13591edb9ca6SSiva Reddy const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
13601edb9ca6SSiva Reddy int len = skb_frag_size(frag);
13611edb9ca6SSiva Reddy
13621edb9ca6SSiva Reddy entry = (++tqueue->cur_tx) % tx_rsize;
13631edb9ca6SSiva Reddy tx_desc = tqueue->dma_tx + entry;
13641edb9ca6SSiva Reddy tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
13651edb9ca6SSiva Reddy DMA_TO_DEVICE);
13661edb9ca6SSiva Reddy
13671edb9ca6SSiva Reddy tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
13681edb9ca6SSiva Reddy tqueue->tx_skbuff[entry] = NULL;
13691edb9ca6SSiva Reddy
13701edb9ca6SSiva Reddy /* prepare the descriptor */
13711edb9ca6SSiva Reddy priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
13728f7807aeSVipul Pandya len, cksum_flag);
13731edb9ca6SSiva Reddy /* memory barrier to flush descriptor */
13741edb9ca6SSiva Reddy wmb();
13751edb9ca6SSiva Reddy
13761edb9ca6SSiva Reddy /* set the owner */
13771edb9ca6SSiva Reddy priv->hw->desc->set_tx_owner(tx_desc);
13781edb9ca6SSiva Reddy }
13791edb9ca6SSiva Reddy
13801edb9ca6SSiva Reddy /* close the descriptors */
13811edb9ca6SSiva Reddy priv->hw->desc->close_tx_desc(tx_desc);
13821edb9ca6SSiva Reddy
13831edb9ca6SSiva Reddy /* memory barrier to flush descriptor */
13841edb9ca6SSiva Reddy wmb();
13851edb9ca6SSiva Reddy
13861edb9ca6SSiva Reddy tqueue->tx_count_frames += nr_frags + 1;
13871edb9ca6SSiva Reddy if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
13881edb9ca6SSiva Reddy priv->hw->desc->clear_tx_ic(tx_desc);
13891edb9ca6SSiva Reddy priv->xstats.tx_reset_ic_bit++;
13901edb9ca6SSiva Reddy mod_timer(&tqueue->txtimer,
13911edb9ca6SSiva Reddy SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
13921edb9ca6SSiva Reddy } else {
13931edb9ca6SSiva Reddy tqueue->tx_count_frames = 0;
13941edb9ca6SSiva Reddy }
13951edb9ca6SSiva Reddy
13961edb9ca6SSiva Reddy /* set owner for first desc */
13971edb9ca6SSiva Reddy priv->hw->desc->set_tx_owner(first_desc);
13981edb9ca6SSiva Reddy
13991edb9ca6SSiva Reddy /* memory barrier to flush descriptor */
14001edb9ca6SSiva Reddy wmb();
14011edb9ca6SSiva Reddy
14021edb9ca6SSiva Reddy tqueue->cur_tx++;
14031edb9ca6SSiva Reddy
14041edb9ca6SSiva Reddy /* display current ring */
14051edb9ca6SSiva Reddy netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
14061edb9ca6SSiva Reddy __func__, tqueue->cur_tx % tx_rsize,
14071edb9ca6SSiva Reddy tqueue->dirty_tx % tx_rsize, entry,
14081edb9ca6SSiva Reddy first_desc, nr_frags);
14091edb9ca6SSiva Reddy
14101edb9ca6SSiva Reddy if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
14111edb9ca6SSiva Reddy netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
14121edb9ca6SSiva Reddy __func__);
14131edb9ca6SSiva Reddy netif_tx_stop_queue(dev_txq);
14141edb9ca6SSiva Reddy }
14151edb9ca6SSiva Reddy
14161edb9ca6SSiva Reddy dev->stats.tx_bytes += skb->len;
14171edb9ca6SSiva Reddy
14181edb9ca6SSiva Reddy if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
14191edb9ca6SSiva Reddy tqueue->hwts_tx_en)) {
14201edb9ca6SSiva Reddy /* declare that device is doing timestamping */
14211edb9ca6SSiva Reddy skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
14221edb9ca6SSiva Reddy priv->hw->desc->tx_enable_tstamp(first_desc);
14231edb9ca6SSiva Reddy }
14241edb9ca6SSiva Reddy
14251edb9ca6SSiva Reddy skb_tx_timestamp(skb);
14261edb9ca6SSiva Reddy
14271edb9ca6SSiva Reddy priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
14281edb9ca6SSiva Reddy
14291edb9ca6SSiva Reddy return NETDEV_TX_OK;
14301edb9ca6SSiva Reddy }
14311edb9ca6SSiva Reddy
14321edb9ca6SSiva Reddy /**
14331edb9ca6SSiva Reddy * sxgbe_rx_refill: refill used skb preallocated buffers
14341edb9ca6SSiva Reddy * @priv: driver private structure
14351edb9ca6SSiva Reddy * Description : this is to reallocate the skb for the reception process
14361edb9ca6SSiva Reddy * that is based on zero-copy.
14371edb9ca6SSiva Reddy */
sxgbe_rx_refill(struct sxgbe_priv_data * priv)14381edb9ca6SSiva Reddy static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
14391edb9ca6SSiva Reddy {
14401edb9ca6SSiva Reddy unsigned int rxsize = priv->dma_rx_size;
14411edb9ca6SSiva Reddy int bfsize = priv->dma_buf_sz;
14421edb9ca6SSiva Reddy u8 qnum = priv->cur_rx_qnum;
14431edb9ca6SSiva Reddy
14441edb9ca6SSiva Reddy for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
14451edb9ca6SSiva Reddy priv->rxq[qnum]->dirty_rx++) {
14461edb9ca6SSiva Reddy unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
14471edb9ca6SSiva Reddy struct sxgbe_rx_norm_desc *p;
14481edb9ca6SSiva Reddy
14491edb9ca6SSiva Reddy p = priv->rxq[qnum]->dma_rx + entry;
14501edb9ca6SSiva Reddy
14511edb9ca6SSiva Reddy if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
14521edb9ca6SSiva Reddy struct sk_buff *skb;
14531edb9ca6SSiva Reddy
14541edb9ca6SSiva Reddy skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
14551edb9ca6SSiva Reddy
14561edb9ca6SSiva Reddy if (unlikely(skb == NULL))
14571edb9ca6SSiva Reddy break;
14581edb9ca6SSiva Reddy
14591edb9ca6SSiva Reddy priv->rxq[qnum]->rx_skbuff[entry] = skb;
14601edb9ca6SSiva Reddy priv->rxq[qnum]->rx_skbuff_dma[entry] =
14611edb9ca6SSiva Reddy dma_map_single(priv->device, skb->data, bfsize,
14621edb9ca6SSiva Reddy DMA_FROM_DEVICE);
14631edb9ca6SSiva Reddy
14641edb9ca6SSiva Reddy p->rdes23.rx_rd_des23.buf2_addr =
14651edb9ca6SSiva Reddy priv->rxq[qnum]->rx_skbuff_dma[entry];
14661edb9ca6SSiva Reddy }
14671edb9ca6SSiva Reddy
14681edb9ca6SSiva Reddy /* Added memory barrier for RX descriptor modification */
14691edb9ca6SSiva Reddy wmb();
14701edb9ca6SSiva Reddy priv->hw->desc->set_rx_owner(p);
14713dc638d1SByungho An priv->hw->desc->set_rx_int_on_com(p);
14721edb9ca6SSiva Reddy /* Added memory barrier for RX descriptor modification */
14731edb9ca6SSiva Reddy wmb();
14741edb9ca6SSiva Reddy }
14751edb9ca6SSiva Reddy }
14761edb9ca6SSiva Reddy
14771edb9ca6SSiva Reddy /**
14781edb9ca6SSiva Reddy * sxgbe_rx: receive the frames from the remote host
14791edb9ca6SSiva Reddy * @priv: driver private structure
14801edb9ca6SSiva Reddy * @limit: napi bugget.
14811edb9ca6SSiva Reddy * Description : this the function called by the napi poll method.
14821edb9ca6SSiva Reddy * It gets all the frames inside the ring.
14831edb9ca6SSiva Reddy */
sxgbe_rx(struct sxgbe_priv_data * priv,int limit)14841edb9ca6SSiva Reddy static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
14851edb9ca6SSiva Reddy {
14861edb9ca6SSiva Reddy u8 qnum = priv->cur_rx_qnum;
14871edb9ca6SSiva Reddy unsigned int rxsize = priv->dma_rx_size;
14881edb9ca6SSiva Reddy unsigned int entry = priv->rxq[qnum]->cur_rx;
14891edb9ca6SSiva Reddy unsigned int next_entry = 0;
14901edb9ca6SSiva Reddy unsigned int count = 0;
14918f7807aeSVipul Pandya int checksum;
14928f7807aeSVipul Pandya int status;
14931edb9ca6SSiva Reddy
14941edb9ca6SSiva Reddy while (count < limit) {
14951edb9ca6SSiva Reddy struct sxgbe_rx_norm_desc *p;
14961edb9ca6SSiva Reddy struct sk_buff *skb;
14971edb9ca6SSiva Reddy int frame_len;
14981edb9ca6SSiva Reddy
14991edb9ca6SSiva Reddy p = priv->rxq[qnum]->dma_rx + entry;
15001edb9ca6SSiva Reddy
15011edb9ca6SSiva Reddy if (priv->hw->desc->get_rx_owner(p))
15021edb9ca6SSiva Reddy break;
15031edb9ca6SSiva Reddy
15041edb9ca6SSiva Reddy count++;
15051edb9ca6SSiva Reddy
15061edb9ca6SSiva Reddy next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
15071edb9ca6SSiva Reddy prefetch(priv->rxq[qnum]->dma_rx + next_entry);
15081edb9ca6SSiva Reddy
15098f7807aeSVipul Pandya /* Read the status of the incoming frame and also get checksum
15108f7807aeSVipul Pandya * value based on whether it is enabled in SXGBE hardware or
15118f7807aeSVipul Pandya * not.
15128f7807aeSVipul Pandya */
15138f7807aeSVipul Pandya status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
15148f7807aeSVipul Pandya &checksum);
15158f7807aeSVipul Pandya if (unlikely(status < 0)) {
15168f7807aeSVipul Pandya entry = next_entry;
15178f7807aeSVipul Pandya continue;
15188f7807aeSVipul Pandya }
15198f7807aeSVipul Pandya if (unlikely(!priv->rxcsum_insertion))
15208f7807aeSVipul Pandya checksum = CHECKSUM_NONE;
15211edb9ca6SSiva Reddy
15221edb9ca6SSiva Reddy skb = priv->rxq[qnum]->rx_skbuff[entry];
15231edb9ca6SSiva Reddy
15241edb9ca6SSiva Reddy if (unlikely(!skb))
15251edb9ca6SSiva Reddy netdev_err(priv->dev, "rx descriptor is not consistent\n");
15261edb9ca6SSiva Reddy
15271edb9ca6SSiva Reddy prefetch(skb->data - NET_IP_ALIGN);
15281edb9ca6SSiva Reddy priv->rxq[qnum]->rx_skbuff[entry] = NULL;
15291edb9ca6SSiva Reddy
15301edb9ca6SSiva Reddy frame_len = priv->hw->desc->get_rx_frame_len(p);
15311edb9ca6SSiva Reddy
15321edb9ca6SSiva Reddy skb_put(skb, frame_len);
15331edb9ca6SSiva Reddy
15348f7807aeSVipul Pandya skb->ip_summed = checksum;
15358f7807aeSVipul Pandya if (checksum == CHECKSUM_NONE)
15361edb9ca6SSiva Reddy netif_receive_skb(skb);
15378f7807aeSVipul Pandya else
15388f7807aeSVipul Pandya napi_gro_receive(&priv->napi, skb);
15391edb9ca6SSiva Reddy
15401edb9ca6SSiva Reddy entry = next_entry;
15411edb9ca6SSiva Reddy }
15421edb9ca6SSiva Reddy
15431edb9ca6SSiva Reddy sxgbe_rx_refill(priv);
15441edb9ca6SSiva Reddy
15451edb9ca6SSiva Reddy return count;
15461edb9ca6SSiva Reddy }
15471edb9ca6SSiva Reddy
15481edb9ca6SSiva Reddy /**
15491edb9ca6SSiva Reddy * sxgbe_poll - sxgbe poll method (NAPI)
15501edb9ca6SSiva Reddy * @napi : pointer to the napi structure.
15511edb9ca6SSiva Reddy * @budget : maximum number of packets that the current CPU can receive from
15521edb9ca6SSiva Reddy * all interfaces.
15531edb9ca6SSiva Reddy * Description :
15541edb9ca6SSiva Reddy * To look at the incoming frames and clear the tx resources.
15551edb9ca6SSiva Reddy */
sxgbe_poll(struct napi_struct * napi,int budget)15561edb9ca6SSiva Reddy static int sxgbe_poll(struct napi_struct *napi, int budget)
15571edb9ca6SSiva Reddy {
15581edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = container_of(napi,
15591edb9ca6SSiva Reddy struct sxgbe_priv_data, napi);
15601edb9ca6SSiva Reddy int work_done = 0;
15611edb9ca6SSiva Reddy u8 qnum = priv->cur_rx_qnum;
15621edb9ca6SSiva Reddy
15631edb9ca6SSiva Reddy priv->xstats.napi_poll++;
15641edb9ca6SSiva Reddy /* first, clean the tx queues */
15651edb9ca6SSiva Reddy sxgbe_tx_all_clean(priv);
15661edb9ca6SSiva Reddy
15671edb9ca6SSiva Reddy work_done = sxgbe_rx(priv, budget);
15681edb9ca6SSiva Reddy if (work_done < budget) {
15696ad20165SEric Dumazet napi_complete_done(napi, work_done);
15701edb9ca6SSiva Reddy priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
15711edb9ca6SSiva Reddy }
15721edb9ca6SSiva Reddy
15731edb9ca6SSiva Reddy return work_done;
15741edb9ca6SSiva Reddy }
15751edb9ca6SSiva Reddy
15761edb9ca6SSiva Reddy /**
15771edb9ca6SSiva Reddy * sxgbe_tx_timeout
15781edb9ca6SSiva Reddy * @dev : Pointer to net device structure
1579d0ea5cbdSJesse Brandeburg * @txqueue: index of the hanging queue
15801edb9ca6SSiva Reddy * Description: this function is called when a packet transmission fails to
15811edb9ca6SSiva Reddy * complete within a reasonable time. The driver will mark the error in the
15821edb9ca6SSiva Reddy * netdev structure and arrange for the device to be reset to a sane state
15831edb9ca6SSiva Reddy * in order to transmit a new packet.
15841edb9ca6SSiva Reddy */
sxgbe_tx_timeout(struct net_device * dev,unsigned int txqueue)15850290bd29SMichael S. Tsirkin static void sxgbe_tx_timeout(struct net_device *dev, unsigned int txqueue)
15861edb9ca6SSiva Reddy {
15871edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
15881edb9ca6SSiva Reddy
15891edb9ca6SSiva Reddy sxgbe_reset_all_tx_queues(priv);
15901edb9ca6SSiva Reddy }
15911edb9ca6SSiva Reddy
15921edb9ca6SSiva Reddy /**
15931edb9ca6SSiva Reddy * sxgbe_common_interrupt - main ISR
15941edb9ca6SSiva Reddy * @irq: interrupt number.
15951edb9ca6SSiva Reddy * @dev_id: to pass the net device pointer.
15961edb9ca6SSiva Reddy * Description: this is the main driver interrupt service routine.
15971edb9ca6SSiva Reddy * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
15981edb9ca6SSiva Reddy * interrupts.
15991edb9ca6SSiva Reddy */
sxgbe_common_interrupt(int irq,void * dev_id)16001edb9ca6SSiva Reddy static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
16011edb9ca6SSiva Reddy {
1602acc18c14SGirish K S struct net_device *netdev = (struct net_device *)dev_id;
1603acc18c14SGirish K S struct sxgbe_priv_data *priv = netdev_priv(netdev);
1604acc18c14SGirish K S int status;
1605acc18c14SGirish K S
1606acc18c14SGirish K S status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1607acc18c14SGirish K S /* For LPI we need to save the tx status */
1608acc18c14SGirish K S if (status & TX_ENTRY_LPI_MODE) {
1609acc18c14SGirish K S priv->xstats.tx_lpi_entry_n++;
1610acc18c14SGirish K S priv->tx_path_in_lpi_mode = true;
1611acc18c14SGirish K S }
1612acc18c14SGirish K S if (status & TX_EXIT_LPI_MODE) {
1613acc18c14SGirish K S priv->xstats.tx_lpi_exit_n++;
1614acc18c14SGirish K S priv->tx_path_in_lpi_mode = false;
1615acc18c14SGirish K S }
1616acc18c14SGirish K S if (status & RX_ENTRY_LPI_MODE)
1617acc18c14SGirish K S priv->xstats.rx_lpi_entry_n++;
1618acc18c14SGirish K S if (status & RX_EXIT_LPI_MODE)
1619acc18c14SGirish K S priv->xstats.rx_lpi_exit_n++;
1620acc18c14SGirish K S
16211edb9ca6SSiva Reddy return IRQ_HANDLED;
16221edb9ca6SSiva Reddy }
16231edb9ca6SSiva Reddy
16241edb9ca6SSiva Reddy /**
16251edb9ca6SSiva Reddy * sxgbe_tx_interrupt - TX DMA ISR
16261edb9ca6SSiva Reddy * @irq: interrupt number.
16271edb9ca6SSiva Reddy * @dev_id: to pass the net device pointer.
16281edb9ca6SSiva Reddy * Description: this is the tx dma interrupt service routine.
16291edb9ca6SSiva Reddy */
sxgbe_tx_interrupt(int irq,void * dev_id)16301edb9ca6SSiva Reddy static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
16311edb9ca6SSiva Reddy {
16321edb9ca6SSiva Reddy int status;
16331edb9ca6SSiva Reddy struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
16341edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = txq->priv_ptr;
16351edb9ca6SSiva Reddy
16361edb9ca6SSiva Reddy /* get the channel status */
16371edb9ca6SSiva Reddy status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
16381edb9ca6SSiva Reddy &priv->xstats);
16391edb9ca6SSiva Reddy /* check for normal path */
16401edb9ca6SSiva Reddy if (likely((status & handle_tx)))
16411edb9ca6SSiva Reddy napi_schedule(&priv->napi);
16421edb9ca6SSiva Reddy
16431edb9ca6SSiva Reddy /* check for unrecoverable error */
16441edb9ca6SSiva Reddy if (unlikely((status & tx_hard_error)))
16451edb9ca6SSiva Reddy sxgbe_restart_tx_queue(priv, txq->queue_no);
16461edb9ca6SSiva Reddy
16471edb9ca6SSiva Reddy /* check for TC configuration change */
16481edb9ca6SSiva Reddy if (unlikely((status & tx_bump_tc) &&
16491edb9ca6SSiva Reddy (priv->tx_tc != SXGBE_MTL_SFMODE) &&
16501edb9ca6SSiva Reddy (priv->tx_tc < 512))) {
16511edb9ca6SSiva Reddy /* step of TX TC is 32 till 128, otherwise 64 */
16521edb9ca6SSiva Reddy priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
16531edb9ca6SSiva Reddy priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
16541edb9ca6SSiva Reddy txq->queue_no, priv->tx_tc);
16551edb9ca6SSiva Reddy priv->xstats.tx_threshold = priv->tx_tc;
16561edb9ca6SSiva Reddy }
16571edb9ca6SSiva Reddy
16581edb9ca6SSiva Reddy return IRQ_HANDLED;
16591edb9ca6SSiva Reddy }
16601edb9ca6SSiva Reddy
16611edb9ca6SSiva Reddy /**
16621edb9ca6SSiva Reddy * sxgbe_rx_interrupt - RX DMA ISR
16631edb9ca6SSiva Reddy * @irq: interrupt number.
16641edb9ca6SSiva Reddy * @dev_id: to pass the net device pointer.
16651edb9ca6SSiva Reddy * Description: this is the rx dma interrupt service routine.
16661edb9ca6SSiva Reddy */
sxgbe_rx_interrupt(int irq,void * dev_id)16671edb9ca6SSiva Reddy static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
16681edb9ca6SSiva Reddy {
16691edb9ca6SSiva Reddy int status;
16701edb9ca6SSiva Reddy struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
16711edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = rxq->priv_ptr;
16721edb9ca6SSiva Reddy
16731edb9ca6SSiva Reddy /* get the channel status */
16741edb9ca6SSiva Reddy status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
16751edb9ca6SSiva Reddy &priv->xstats);
16761edb9ca6SSiva Reddy
16771edb9ca6SSiva Reddy if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
16781edb9ca6SSiva Reddy priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
16791edb9ca6SSiva Reddy __napi_schedule(&priv->napi);
16801edb9ca6SSiva Reddy }
16811edb9ca6SSiva Reddy
16821edb9ca6SSiva Reddy /* check for TC configuration change */
16831edb9ca6SSiva Reddy if (unlikely((status & rx_bump_tc) &&
16841edb9ca6SSiva Reddy (priv->rx_tc != SXGBE_MTL_SFMODE) &&
16851edb9ca6SSiva Reddy (priv->rx_tc < 128))) {
16861edb9ca6SSiva Reddy /* step of TC is 32 */
16871edb9ca6SSiva Reddy priv->rx_tc += 32;
16881edb9ca6SSiva Reddy priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
16891edb9ca6SSiva Reddy rxq->queue_no, priv->rx_tc);
16901edb9ca6SSiva Reddy priv->xstats.rx_threshold = priv->rx_tc;
16911edb9ca6SSiva Reddy }
16921edb9ca6SSiva Reddy
16931edb9ca6SSiva Reddy return IRQ_HANDLED;
16941edb9ca6SSiva Reddy }
16951edb9ca6SSiva Reddy
sxgbe_get_stat64(void __iomem * ioaddr,int reg_lo,int reg_hi)16961edb9ca6SSiva Reddy static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
16971edb9ca6SSiva Reddy {
16981edb9ca6SSiva Reddy u64 val = readl(ioaddr + reg_lo);
16991edb9ca6SSiva Reddy
17001edb9ca6SSiva Reddy val |= ((u64)readl(ioaddr + reg_hi)) << 32;
17011edb9ca6SSiva Reddy
17021edb9ca6SSiva Reddy return val;
17031edb9ca6SSiva Reddy }
17041edb9ca6SSiva Reddy
17051edb9ca6SSiva Reddy
17061edb9ca6SSiva Reddy /* sxgbe_get_stats64 - entry point to see statistical information of device
17071edb9ca6SSiva Reddy * @dev : device pointer.
17081edb9ca6SSiva Reddy * @stats : pointer to hold all the statistical information of device.
17091edb9ca6SSiva Reddy * Description:
17101edb9ca6SSiva Reddy * This function is a driver entry point whenever ifconfig command gets
17111edb9ca6SSiva Reddy * executed to see device statistics. Statistics are number of
1712dbedd44eSJoe Perches * bytes sent or received, errors occurred etc.
17131edb9ca6SSiva Reddy */
sxgbe_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1714bc1f4470Sstephen hemminger static void sxgbe_get_stats64(struct net_device *dev,
17151edb9ca6SSiva Reddy struct rtnl_link_stats64 *stats)
17161edb9ca6SSiva Reddy {
17171edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
17181edb9ca6SSiva Reddy void __iomem *ioaddr = priv->ioaddr;
17191edb9ca6SSiva Reddy u64 count;
17201edb9ca6SSiva Reddy
17211edb9ca6SSiva Reddy spin_lock(&priv->stats_lock);
17221edb9ca6SSiva Reddy /* Freeze the counter registers before reading value otherwise it may
17231edb9ca6SSiva Reddy * get updated by hardware while we are reading them
17241edb9ca6SSiva Reddy */
17251edb9ca6SSiva Reddy writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
17261edb9ca6SSiva Reddy
17271edb9ca6SSiva Reddy stats->rx_bytes = sxgbe_get_stat64(ioaddr,
17281edb9ca6SSiva Reddy SXGBE_MMC_RXOCTETLO_GCNT_REG,
17291edb9ca6SSiva Reddy SXGBE_MMC_RXOCTETHI_GCNT_REG);
17301edb9ca6SSiva Reddy
17311edb9ca6SSiva Reddy stats->rx_packets = sxgbe_get_stat64(ioaddr,
17321edb9ca6SSiva Reddy SXGBE_MMC_RXFRAMELO_GBCNT_REG,
17331edb9ca6SSiva Reddy SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
17341edb9ca6SSiva Reddy
17351edb9ca6SSiva Reddy stats->multicast = sxgbe_get_stat64(ioaddr,
17361edb9ca6SSiva Reddy SXGBE_MMC_RXMULTILO_GCNT_REG,
17371edb9ca6SSiva Reddy SXGBE_MMC_RXMULTIHI_GCNT_REG);
17381edb9ca6SSiva Reddy
17391edb9ca6SSiva Reddy stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
17401edb9ca6SSiva Reddy SXGBE_MMC_RXCRCERRLO_REG,
17411edb9ca6SSiva Reddy SXGBE_MMC_RXCRCERRHI_REG);
17421edb9ca6SSiva Reddy
17431edb9ca6SSiva Reddy stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
17441edb9ca6SSiva Reddy SXGBE_MMC_RXLENERRLO_REG,
17451edb9ca6SSiva Reddy SXGBE_MMC_RXLENERRHI_REG);
17461edb9ca6SSiva Reddy
17471edb9ca6SSiva Reddy stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
17481edb9ca6SSiva Reddy SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
17491edb9ca6SSiva Reddy SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
17501edb9ca6SSiva Reddy
17511edb9ca6SSiva Reddy stats->tx_bytes = sxgbe_get_stat64(ioaddr,
17521edb9ca6SSiva Reddy SXGBE_MMC_TXOCTETLO_GCNT_REG,
17531edb9ca6SSiva Reddy SXGBE_MMC_TXOCTETHI_GCNT_REG);
17541edb9ca6SSiva Reddy
17551edb9ca6SSiva Reddy count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
17561edb9ca6SSiva Reddy SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
17571edb9ca6SSiva Reddy
17581edb9ca6SSiva Reddy stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
17591edb9ca6SSiva Reddy SXGBE_MMC_TXFRAMEHI_GCNT_REG);
17601edb9ca6SSiva Reddy stats->tx_errors = count - stats->tx_errors;
17611edb9ca6SSiva Reddy stats->tx_packets = count;
17621edb9ca6SSiva Reddy stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
17631edb9ca6SSiva Reddy SXGBE_MMC_TXUFLWHI_GBCNT_REG);
17641edb9ca6SSiva Reddy writel(0, ioaddr + SXGBE_MMC_CTL_REG);
17651edb9ca6SSiva Reddy spin_unlock(&priv->stats_lock);
17661edb9ca6SSiva Reddy }
17671edb9ca6SSiva Reddy
17681edb9ca6SSiva Reddy /* sxgbe_set_features - entry point to set offload features of the device.
17691edb9ca6SSiva Reddy * @dev : device pointer.
17701edb9ca6SSiva Reddy * @features : features which are required to be set.
17711edb9ca6SSiva Reddy * Description:
17721edb9ca6SSiva Reddy * This function is a driver entry point and called by Linux kernel whenever
17731edb9ca6SSiva Reddy * any device features are set or reset by user.
17741edb9ca6SSiva Reddy * Return value:
17751edb9ca6SSiva Reddy * This function returns 0 after setting or resetting device features.
17761edb9ca6SSiva Reddy */
sxgbe_set_features(struct net_device * dev,netdev_features_t features)17771edb9ca6SSiva Reddy static int sxgbe_set_features(struct net_device *dev,
17781edb9ca6SSiva Reddy netdev_features_t features)
17791edb9ca6SSiva Reddy {
17801edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
17811edb9ca6SSiva Reddy netdev_features_t changed = dev->features ^ features;
17821edb9ca6SSiva Reddy
17831edb9ca6SSiva Reddy if (changed & NETIF_F_RXCSUM) {
17848f7807aeSVipul Pandya if (features & NETIF_F_RXCSUM) {
17858f7807aeSVipul Pandya priv->hw->mac->enable_rx_csum(priv->ioaddr);
17868f7807aeSVipul Pandya priv->rxcsum_insertion = true;
17878f7807aeSVipul Pandya } else {
17888f7807aeSVipul Pandya priv->hw->mac->disable_rx_csum(priv->ioaddr);
17898f7807aeSVipul Pandya priv->rxcsum_insertion = false;
17908f7807aeSVipul Pandya }
17911edb9ca6SSiva Reddy }
17921edb9ca6SSiva Reddy
17931edb9ca6SSiva Reddy return 0;
17941edb9ca6SSiva Reddy }
17951edb9ca6SSiva Reddy
17961edb9ca6SSiva Reddy /* sxgbe_change_mtu - entry point to change MTU size for the device.
17971edb9ca6SSiva Reddy * @dev : device pointer.
17981edb9ca6SSiva Reddy * @new_mtu : the new MTU size for the device.
17991edb9ca6SSiva Reddy * Description: the Maximum Transfer Unit (MTU) is used by the network layer
18001edb9ca6SSiva Reddy * to drive packet transmission. Ethernet has an MTU of 1500 octets
18011edb9ca6SSiva Reddy * (ETH_DATA_LEN). This value can be changed with ifconfig.
18021edb9ca6SSiva Reddy * Return value:
18031edb9ca6SSiva Reddy * 0 on success and an appropriate (-)ve integer as defined in errno.h
18041edb9ca6SSiva Reddy * file on failure.
18051edb9ca6SSiva Reddy */
sxgbe_change_mtu(struct net_device * dev,int new_mtu)18061edb9ca6SSiva Reddy static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
18071edb9ca6SSiva Reddy {
18081edb9ca6SSiva Reddy dev->mtu = new_mtu;
18091edb9ca6SSiva Reddy
18101edb9ca6SSiva Reddy if (!netif_running(dev))
18111edb9ca6SSiva Reddy return 0;
18121edb9ca6SSiva Reddy
18131edb9ca6SSiva Reddy /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
18141edb9ca6SSiva Reddy * changed then reinitilisation of the receive ring buffers need to be
18151edb9ca6SSiva Reddy * done. Hence bring interface down and bring interface back up
18161edb9ca6SSiva Reddy */
18171edb9ca6SSiva Reddy sxgbe_release(dev);
18181edb9ca6SSiva Reddy return sxgbe_open(dev);
18191edb9ca6SSiva Reddy }
18201edb9ca6SSiva Reddy
sxgbe_set_umac_addr(void __iomem * ioaddr,unsigned char * addr,unsigned int reg_n)18211edb9ca6SSiva Reddy static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
18221edb9ca6SSiva Reddy unsigned int reg_n)
18231edb9ca6SSiva Reddy {
18241edb9ca6SSiva Reddy unsigned long data;
18251edb9ca6SSiva Reddy
18261edb9ca6SSiva Reddy data = (addr[5] << 8) | addr[4];
18271edb9ca6SSiva Reddy /* For MAC Addr registers se have to set the Address Enable (AE)
18281edb9ca6SSiva Reddy * bit that has no effect on the High Reg 0 where the bit 31 (MO)
18291edb9ca6SSiva Reddy * is RO.
18301edb9ca6SSiva Reddy */
18311edb9ca6SSiva Reddy writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
18321edb9ca6SSiva Reddy data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
18331edb9ca6SSiva Reddy writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
18341edb9ca6SSiva Reddy }
18351edb9ca6SSiva Reddy
18361edb9ca6SSiva Reddy /**
18371edb9ca6SSiva Reddy * sxgbe_set_rx_mode - entry point for setting different receive mode of
18381edb9ca6SSiva Reddy * a device. unicast, multicast addressing
18391edb9ca6SSiva Reddy * @dev : pointer to the device structure
18401edb9ca6SSiva Reddy * Description:
18411edb9ca6SSiva Reddy * This function is a driver entry point which gets called by the kernel
18421edb9ca6SSiva Reddy * whenever different receive mode like unicast, multicast and promiscuous
18431edb9ca6SSiva Reddy * must be enabled/disabled.
18441edb9ca6SSiva Reddy * Return value:
18451edb9ca6SSiva Reddy * void.
18461edb9ca6SSiva Reddy */
sxgbe_set_rx_mode(struct net_device * dev)18471edb9ca6SSiva Reddy static void sxgbe_set_rx_mode(struct net_device *dev)
18481edb9ca6SSiva Reddy {
18491edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
18501edb9ca6SSiva Reddy void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
18511edb9ca6SSiva Reddy unsigned int value = 0;
18521edb9ca6SSiva Reddy u32 mc_filter[2];
18531edb9ca6SSiva Reddy struct netdev_hw_addr *ha;
18541edb9ca6SSiva Reddy int reg = 1;
18551edb9ca6SSiva Reddy
18561edb9ca6SSiva Reddy netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
18571edb9ca6SSiva Reddy __func__, netdev_mc_count(dev), netdev_uc_count(dev));
18581edb9ca6SSiva Reddy
18591edb9ca6SSiva Reddy if (dev->flags & IFF_PROMISC) {
18601edb9ca6SSiva Reddy value = SXGBE_FRAME_FILTER_PR;
18611edb9ca6SSiva Reddy
18621edb9ca6SSiva Reddy } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
18631edb9ca6SSiva Reddy (dev->flags & IFF_ALLMULTI)) {
18641edb9ca6SSiva Reddy value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
18651edb9ca6SSiva Reddy writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
18661edb9ca6SSiva Reddy writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
18671edb9ca6SSiva Reddy
18681edb9ca6SSiva Reddy } else if (!netdev_mc_empty(dev)) {
18691edb9ca6SSiva Reddy /* Hash filter for multicast */
18701edb9ca6SSiva Reddy value = SXGBE_FRAME_FILTER_HMC;
18711edb9ca6SSiva Reddy
18721edb9ca6SSiva Reddy memset(mc_filter, 0, sizeof(mc_filter));
18731edb9ca6SSiva Reddy netdev_for_each_mc_addr(ha, dev) {
18741edb9ca6SSiva Reddy /* The upper 6 bits of the calculated CRC are used to
18751edb9ca6SSiva Reddy * index the contens of the hash table
18761edb9ca6SSiva Reddy */
18771edb9ca6SSiva Reddy int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
18781edb9ca6SSiva Reddy
18791edb9ca6SSiva Reddy /* The most significant bit determines the register to
18801edb9ca6SSiva Reddy * use (H/L) while the other 5 bits determine the bit
18811edb9ca6SSiva Reddy * within the register.
18821edb9ca6SSiva Reddy */
18831edb9ca6SSiva Reddy mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
18841edb9ca6SSiva Reddy }
18851edb9ca6SSiva Reddy writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
18861edb9ca6SSiva Reddy writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
18871edb9ca6SSiva Reddy }
18881edb9ca6SSiva Reddy
18891edb9ca6SSiva Reddy /* Handle multiple unicast addresses (perfect filtering) */
18901edb9ca6SSiva Reddy if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
18911edb9ca6SSiva Reddy /* Switch to promiscuous mode if more than 16 addrs
18921edb9ca6SSiva Reddy * are required
18931edb9ca6SSiva Reddy */
18941edb9ca6SSiva Reddy value |= SXGBE_FRAME_FILTER_PR;
18951edb9ca6SSiva Reddy else {
18961edb9ca6SSiva Reddy netdev_for_each_uc_addr(ha, dev) {
18971edb9ca6SSiva Reddy sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
18981edb9ca6SSiva Reddy reg++;
18991edb9ca6SSiva Reddy }
19001edb9ca6SSiva Reddy }
19011edb9ca6SSiva Reddy #ifdef FRAME_FILTER_DEBUG
19021edb9ca6SSiva Reddy /* Enable Receive all mode (to debug filtering_fail errors) */
19031edb9ca6SSiva Reddy value |= SXGBE_FRAME_FILTER_RA;
19041edb9ca6SSiva Reddy #endif
19051edb9ca6SSiva Reddy writel(value, ioaddr + SXGBE_FRAME_FILTER);
19061edb9ca6SSiva Reddy
19071edb9ca6SSiva Reddy netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
19081edb9ca6SSiva Reddy readl(ioaddr + SXGBE_FRAME_FILTER),
19091edb9ca6SSiva Reddy readl(ioaddr + SXGBE_HASH_HIGH),
19101edb9ca6SSiva Reddy readl(ioaddr + SXGBE_HASH_LOW));
19111edb9ca6SSiva Reddy }
19121edb9ca6SSiva Reddy
19131edb9ca6SSiva Reddy #ifdef CONFIG_NET_POLL_CONTROLLER
19141edb9ca6SSiva Reddy /**
19151edb9ca6SSiva Reddy * sxgbe_poll_controller - entry point for polling receive by device
19161edb9ca6SSiva Reddy * @dev : pointer to the device structure
19171edb9ca6SSiva Reddy * Description:
19181edb9ca6SSiva Reddy * This function is used by NETCONSOLE and other diagnostic tools
19191edb9ca6SSiva Reddy * to allow network I/O with interrupts disabled.
19201edb9ca6SSiva Reddy * Return value:
19211edb9ca6SSiva Reddy * Void.
19221edb9ca6SSiva Reddy */
sxgbe_poll_controller(struct net_device * dev)19231edb9ca6SSiva Reddy static void sxgbe_poll_controller(struct net_device *dev)
19241edb9ca6SSiva Reddy {
19251edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(dev);
19261edb9ca6SSiva Reddy
19271edb9ca6SSiva Reddy disable_irq(priv->irq);
19281edb9ca6SSiva Reddy sxgbe_rx_interrupt(priv->irq, dev);
19291edb9ca6SSiva Reddy enable_irq(priv->irq);
19301edb9ca6SSiva Reddy }
19311edb9ca6SSiva Reddy #endif
19321edb9ca6SSiva Reddy
19331edb9ca6SSiva Reddy /* sxgbe_ioctl - Entry point for the Ioctl
19341edb9ca6SSiva Reddy * @dev: Device pointer.
19351edb9ca6SSiva Reddy * @rq: An IOCTL specefic structure, that can contain a pointer to
19361edb9ca6SSiva Reddy * a proprietary structure used to pass information to the driver.
19371edb9ca6SSiva Reddy * @cmd: IOCTL command
19381edb9ca6SSiva Reddy * Description:
19391edb9ca6SSiva Reddy * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
19401edb9ca6SSiva Reddy */
sxgbe_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)19411edb9ca6SSiva Reddy static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
19421edb9ca6SSiva Reddy {
19431edb9ca6SSiva Reddy int ret = -EOPNOTSUPP;
19441edb9ca6SSiva Reddy
19451edb9ca6SSiva Reddy if (!netif_running(dev))
19461edb9ca6SSiva Reddy return -EINVAL;
19471edb9ca6SSiva Reddy
19481edb9ca6SSiva Reddy switch (cmd) {
19491edb9ca6SSiva Reddy case SIOCGMIIPHY:
19501edb9ca6SSiva Reddy case SIOCGMIIREG:
19511edb9ca6SSiva Reddy case SIOCSMIIREG:
1952c5d19a6eSHeiner Kallweit ret = phy_do_ioctl(dev, rq, cmd);
19531edb9ca6SSiva Reddy break;
19541edb9ca6SSiva Reddy default:
19551edb9ca6SSiva Reddy break;
19561edb9ca6SSiva Reddy }
19571edb9ca6SSiva Reddy
19581edb9ca6SSiva Reddy return ret;
19591edb9ca6SSiva Reddy }
19601edb9ca6SSiva Reddy
19611edb9ca6SSiva Reddy static const struct net_device_ops sxgbe_netdev_ops = {
19621edb9ca6SSiva Reddy .ndo_open = sxgbe_open,
19631edb9ca6SSiva Reddy .ndo_start_xmit = sxgbe_xmit,
19641edb9ca6SSiva Reddy .ndo_stop = sxgbe_release,
19651edb9ca6SSiva Reddy .ndo_get_stats64 = sxgbe_get_stats64,
19661edb9ca6SSiva Reddy .ndo_change_mtu = sxgbe_change_mtu,
19671edb9ca6SSiva Reddy .ndo_set_features = sxgbe_set_features,
19681edb9ca6SSiva Reddy .ndo_set_rx_mode = sxgbe_set_rx_mode,
19691edb9ca6SSiva Reddy .ndo_tx_timeout = sxgbe_tx_timeout,
1970a7605370SArnd Bergmann .ndo_eth_ioctl = sxgbe_ioctl,
19711edb9ca6SSiva Reddy #ifdef CONFIG_NET_POLL_CONTROLLER
19721edb9ca6SSiva Reddy .ndo_poll_controller = sxgbe_poll_controller,
19731edb9ca6SSiva Reddy #endif
19741edb9ca6SSiva Reddy .ndo_set_mac_address = eth_mac_addr,
19751edb9ca6SSiva Reddy };
19761edb9ca6SSiva Reddy
19771edb9ca6SSiva Reddy /* Get the hardware ops */
sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)197840b92cadSByungho An static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
19791edb9ca6SSiva Reddy {
19801edb9ca6SSiva Reddy ops_ptr->mac = sxgbe_get_core_ops();
19811edb9ca6SSiva Reddy ops_ptr->desc = sxgbe_get_desc_ops();
19821edb9ca6SSiva Reddy ops_ptr->dma = sxgbe_get_dma_ops();
19831edb9ca6SSiva Reddy ops_ptr->mtl = sxgbe_get_mtl_ops();
19841edb9ca6SSiva Reddy
19851edb9ca6SSiva Reddy /* set the MDIO communication Address/Data regisers */
19861edb9ca6SSiva Reddy ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
19871edb9ca6SSiva Reddy ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
19881edb9ca6SSiva Reddy
19891edb9ca6SSiva Reddy /* Assigning the default link settings
19901edb9ca6SSiva Reddy * no SXGBE defined default values to be set in registers,
19911edb9ca6SSiva Reddy * so assigning as 0 for port and duplex
19921edb9ca6SSiva Reddy */
19931edb9ca6SSiva Reddy ops_ptr->link.port = 0;
19941edb9ca6SSiva Reddy ops_ptr->link.duplex = 0;
19951edb9ca6SSiva Reddy ops_ptr->link.speed = SXGBE_SPEED_10G;
19961edb9ca6SSiva Reddy }
19971edb9ca6SSiva Reddy
19981edb9ca6SSiva Reddy /**
19991edb9ca6SSiva Reddy * sxgbe_hw_init - Init the GMAC device
20001edb9ca6SSiva Reddy * @priv: driver private structure
20011edb9ca6SSiva Reddy * Description: this function checks the HW capability
20021edb9ca6SSiva Reddy * (if supported) and sets the driver's features.
20031edb9ca6SSiva Reddy */
sxgbe_hw_init(struct sxgbe_priv_data * const priv)20042405e8f6SByungho An static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
20051edb9ca6SSiva Reddy {
20061edb9ca6SSiva Reddy u32 ctrl_ids;
20071edb9ca6SSiva Reddy
20081edb9ca6SSiva Reddy priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
20092405e8f6SByungho An if(!priv->hw)
20102405e8f6SByungho An return -ENOMEM;
20111edb9ca6SSiva Reddy
20121edb9ca6SSiva Reddy /* get the hardware ops */
20131edb9ca6SSiva Reddy sxgbe_get_ops(priv->hw);
20141edb9ca6SSiva Reddy
20151edb9ca6SSiva Reddy /* get the controller id */
20161edb9ca6SSiva Reddy ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
20171edb9ca6SSiva Reddy priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
20181edb9ca6SSiva Reddy priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
20191edb9ca6SSiva Reddy pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
20201edb9ca6SSiva Reddy priv->hw->ctrl_uid, priv->hw->ctrl_id);
20211edb9ca6SSiva Reddy
20221edb9ca6SSiva Reddy /* get the H/W features */
20231edb9ca6SSiva Reddy if (!sxgbe_get_hw_features(priv))
20241edb9ca6SSiva Reddy pr_info("Hardware features not found\n");
20251edb9ca6SSiva Reddy
20261edb9ca6SSiva Reddy if (priv->hw_cap.tx_csum_offload)
20271edb9ca6SSiva Reddy pr_info("TX Checksum offload supported\n");
20281edb9ca6SSiva Reddy
20291edb9ca6SSiva Reddy if (priv->hw_cap.rx_csum_offload)
20301edb9ca6SSiva Reddy pr_info("RX Checksum offload supported\n");
20312405e8f6SByungho An
20322405e8f6SByungho An return 0;
20331edb9ca6SSiva Reddy }
20341edb9ca6SSiva Reddy
sxgbe_sw_reset(void __iomem * addr)20350a0347b1SByungho An static int sxgbe_sw_reset(void __iomem *addr)
20360a0347b1SByungho An {
20370a0347b1SByungho An int retry_count = 10;
20380a0347b1SByungho An
20390a0347b1SByungho An writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
20400a0347b1SByungho An while (retry_count--) {
20410a0347b1SByungho An if (!(readl(addr + SXGBE_DMA_MODE_REG) &
20420a0347b1SByungho An SXGBE_DMA_SOFT_RESET))
20430a0347b1SByungho An break;
20440a0347b1SByungho An mdelay(10);
20450a0347b1SByungho An }
20460a0347b1SByungho An
20470a0347b1SByungho An if (retry_count < 0)
20480a0347b1SByungho An return -EBUSY;
20490a0347b1SByungho An
20500a0347b1SByungho An return 0;
20510a0347b1SByungho An }
20520a0347b1SByungho An
20531edb9ca6SSiva Reddy /**
20541edb9ca6SSiva Reddy * sxgbe_drv_probe
20551edb9ca6SSiva Reddy * @device: device pointer
20561edb9ca6SSiva Reddy * @plat_dat: platform data pointer
20571edb9ca6SSiva Reddy * @addr: iobase memory address
20581edb9ca6SSiva Reddy * Description: this is the main probe function used to
20591edb9ca6SSiva Reddy * call the alloc_etherdev, allocate the priv structure.
20601edb9ca6SSiva Reddy */
sxgbe_drv_probe(struct device * device,struct sxgbe_plat_data * plat_dat,void __iomem * addr)20611edb9ca6SSiva Reddy struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
20621edb9ca6SSiva Reddy struct sxgbe_plat_data *plat_dat,
20631edb9ca6SSiva Reddy void __iomem *addr)
20641edb9ca6SSiva Reddy {
20651edb9ca6SSiva Reddy struct sxgbe_priv_data *priv;
20661edb9ca6SSiva Reddy struct net_device *ndev;
20671edb9ca6SSiva Reddy int ret;
20681051125dSVipul Pandya u8 queue_num;
20691edb9ca6SSiva Reddy
20701edb9ca6SSiva Reddy ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
20711edb9ca6SSiva Reddy SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
20721edb9ca6SSiva Reddy if (!ndev)
20731edb9ca6SSiva Reddy return NULL;
20741edb9ca6SSiva Reddy
20751edb9ca6SSiva Reddy SET_NETDEV_DEV(ndev, device);
20761edb9ca6SSiva Reddy
20771edb9ca6SSiva Reddy priv = netdev_priv(ndev);
20781edb9ca6SSiva Reddy priv->device = device;
20791edb9ca6SSiva Reddy priv->dev = ndev;
20801edb9ca6SSiva Reddy
20811edb9ca6SSiva Reddy sxgbe_set_ethtool_ops(ndev);
20821edb9ca6SSiva Reddy priv->plat = plat_dat;
20831edb9ca6SSiva Reddy priv->ioaddr = addr;
20841edb9ca6SSiva Reddy
20850a0347b1SByungho An ret = sxgbe_sw_reset(priv->ioaddr);
20860a0347b1SByungho An if (ret)
20870a0347b1SByungho An goto error_free_netdev;
20880a0347b1SByungho An
2089acc18c14SGirish K S /* Verify driver arguments */
2090acc18c14SGirish K S sxgbe_verify_args();
2091acc18c14SGirish K S
20921edb9ca6SSiva Reddy /* Init MAC and get the capabilities */
20932405e8f6SByungho An ret = sxgbe_hw_init(priv);
20942405e8f6SByungho An if (ret)
20952405e8f6SByungho An goto error_free_netdev;
20961edb9ca6SSiva Reddy
20971edb9ca6SSiva Reddy /* allocate memory resources for Descriptor rings */
20981edb9ca6SSiva Reddy ret = txring_mem_alloc(priv);
20991edb9ca6SSiva Reddy if (ret)
2100d9bd6461Sfrançois romieu goto error_free_hw;
21011edb9ca6SSiva Reddy
21021edb9ca6SSiva Reddy ret = rxring_mem_alloc(priv);
21031edb9ca6SSiva Reddy if (ret)
2104d9bd6461Sfrançois romieu goto error_free_hw;
21051edb9ca6SSiva Reddy
21061edb9ca6SSiva Reddy ndev->netdev_ops = &sxgbe_netdev_ops;
21071edb9ca6SSiva Reddy
21081051125dSVipul Pandya ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
21091051125dSVipul Pandya NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
21101051125dSVipul Pandya NETIF_F_GRO;
21111edb9ca6SSiva Reddy ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
21121edb9ca6SSiva Reddy ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
21131edb9ca6SSiva Reddy
21141edb9ca6SSiva Reddy /* assign filtering support */
21151edb9ca6SSiva Reddy ndev->priv_flags |= IFF_UNICAST_FLT;
21161edb9ca6SSiva Reddy
211744770e11SJarod Wilson /* MTU range: 68 - 9000 */
211844770e11SJarod Wilson ndev->min_mtu = MIN_MTU;
211944770e11SJarod Wilson ndev->max_mtu = MAX_MTU;
212044770e11SJarod Wilson
21211edb9ca6SSiva Reddy priv->msg_enable = netif_msg_init(debug, default_msg_level);
21221edb9ca6SSiva Reddy
21231051125dSVipul Pandya /* Enable TCP segmentation offload for all DMA channels */
21241051125dSVipul Pandya if (priv->hw_cap.tcpseg_offload) {
21251051125dSVipul Pandya SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
21261051125dSVipul Pandya priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
21271051125dSVipul Pandya }
21281051125dSVipul Pandya }
21291051125dSVipul Pandya
21308f7807aeSVipul Pandya /* Enable Rx checksum offload */
21318f7807aeSVipul Pandya if (priv->hw_cap.rx_csum_offload) {
21328f7807aeSVipul Pandya priv->hw->mac->enable_rx_csum(priv->ioaddr);
21338f7807aeSVipul Pandya priv->rxcsum_insertion = true;
21348f7807aeSVipul Pandya }
21358f7807aeSVipul Pandya
213625f72a74SVipul Pandya /* Initialise pause frame settings */
213725f72a74SVipul Pandya priv->rx_pause = 1;
213825f72a74SVipul Pandya priv->tx_pause = 1;
213925f72a74SVipul Pandya
21401edb9ca6SSiva Reddy /* Rx Watchdog is available, enable depend on platform data */
21411edb9ca6SSiva Reddy if (!priv->plat->riwt_off) {
21421edb9ca6SSiva Reddy priv->use_riwt = 1;
21431edb9ca6SSiva Reddy pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
21441edb9ca6SSiva Reddy }
21451edb9ca6SSiva Reddy
2146b48b89f9SJakub Kicinski netif_napi_add(ndev, &priv->napi, sxgbe_poll);
21471edb9ca6SSiva Reddy
21481edb9ca6SSiva Reddy spin_lock_init(&priv->stats_lock);
21491edb9ca6SSiva Reddy
21501edb9ca6SSiva Reddy priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
21511edb9ca6SSiva Reddy if (IS_ERR(priv->sxgbe_clk)) {
21521edb9ca6SSiva Reddy netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
21531edb9ca6SSiva Reddy __func__);
2154d9bd6461Sfrançois romieu goto error_napi_del;
21551edb9ca6SSiva Reddy }
21561edb9ca6SSiva Reddy
21571edb9ca6SSiva Reddy /* If a specific clk_csr value is passed from the platform
21581edb9ca6SSiva Reddy * this means that the CSR Clock Range selection cannot be
21591edb9ca6SSiva Reddy * changed at run-time and it is fixed. Viceversa the driver'll try to
21601edb9ca6SSiva Reddy * set the MDC clock dynamically according to the csr actual
21611edb9ca6SSiva Reddy * clock input.
21621edb9ca6SSiva Reddy */
21631edb9ca6SSiva Reddy if (!priv->plat->clk_csr)
21641edb9ca6SSiva Reddy sxgbe_clk_csr_set(priv);
21651edb9ca6SSiva Reddy else
21661edb9ca6SSiva Reddy priv->clk_csr = priv->plat->clk_csr;
21671edb9ca6SSiva Reddy
21681edb9ca6SSiva Reddy /* MDIO bus Registration */
21691edb9ca6SSiva Reddy ret = sxgbe_mdio_register(ndev);
21701edb9ca6SSiva Reddy if (ret < 0) {
21711edb9ca6SSiva Reddy netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
21721edb9ca6SSiva Reddy __func__, priv->plat->bus_id);
2173d9bd6461Sfrançois romieu goto error_clk_put;
21741edb9ca6SSiva Reddy }
21751edb9ca6SSiva Reddy
21761edb9ca6SSiva Reddy ret = register_netdev(ndev);
21771edb9ca6SSiva Reddy if (ret) {
21781edb9ca6SSiva Reddy pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2179d9bd6461Sfrançois romieu goto error_mdio_unregister;
21801edb9ca6SSiva Reddy }
21811edb9ca6SSiva Reddy
21821edb9ca6SSiva Reddy sxgbe_check_ether_addr(priv);
21831edb9ca6SSiva Reddy
21841edb9ca6SSiva Reddy return priv;
21851edb9ca6SSiva Reddy
2186d9bd6461Sfrançois romieu error_mdio_unregister:
2187d9bd6461Sfrançois romieu sxgbe_mdio_unregister(ndev);
2188d9bd6461Sfrançois romieu error_clk_put:
21891edb9ca6SSiva Reddy clk_put(priv->sxgbe_clk);
2190d9bd6461Sfrançois romieu error_napi_del:
21911edb9ca6SSiva Reddy netif_napi_del(&priv->napi);
2192d9bd6461Sfrançois romieu error_free_hw:
2193d9bd6461Sfrançois romieu kfree(priv->hw);
21941edb9ca6SSiva Reddy error_free_netdev:
21951edb9ca6SSiva Reddy free_netdev(ndev);
21961edb9ca6SSiva Reddy
21971edb9ca6SSiva Reddy return NULL;
21981edb9ca6SSiva Reddy }
21991edb9ca6SSiva Reddy
22001edb9ca6SSiva Reddy /**
22011edb9ca6SSiva Reddy * sxgbe_drv_remove
22021edb9ca6SSiva Reddy * @ndev: net device pointer
22031edb9ca6SSiva Reddy * Description: this function resets the TX/RX processes, disables the MAC RX/TX
22041edb9ca6SSiva Reddy * changes the link status, releases the DMA descriptor rings.
22051edb9ca6SSiva Reddy */
sxgbe_drv_remove(struct net_device * ndev)2206*7f88efc8SUwe Kleine-König void sxgbe_drv_remove(struct net_device *ndev)
22071edb9ca6SSiva Reddy {
22081edb9ca6SSiva Reddy struct sxgbe_priv_data *priv = netdev_priv(ndev);
2209325b94f7SByungho An u8 queue_num;
22101edb9ca6SSiva Reddy
22111edb9ca6SSiva Reddy netdev_info(ndev, "%s: removing driver\n", __func__);
22121edb9ca6SSiva Reddy
2213325b94f7SByungho An SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2214325b94f7SByungho An priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2215325b94f7SByungho An }
2216325b94f7SByungho An
22171edb9ca6SSiva Reddy priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
22181edb9ca6SSiva Reddy priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
22191edb9ca6SSiva Reddy
22201edb9ca6SSiva Reddy priv->hw->mac->enable_tx(priv->ioaddr, false);
22211edb9ca6SSiva Reddy priv->hw->mac->enable_rx(priv->ioaddr, false);
22221edb9ca6SSiva Reddy
2223d9bd6461Sfrançois romieu unregister_netdev(ndev);
22241edb9ca6SSiva Reddy
22251edb9ca6SSiva Reddy sxgbe_mdio_unregister(ndev);
22261edb9ca6SSiva Reddy
2227d9bd6461Sfrançois romieu clk_put(priv->sxgbe_clk);
2228d9bd6461Sfrançois romieu
2229d9bd6461Sfrançois romieu netif_napi_del(&priv->napi);
2230d9bd6461Sfrançois romieu
2231d9bd6461Sfrançois romieu kfree(priv->hw);
22321edb9ca6SSiva Reddy
22331edb9ca6SSiva Reddy free_netdev(ndev);
22341edb9ca6SSiva Reddy }
22351edb9ca6SSiva Reddy
22361edb9ca6SSiva Reddy #ifdef CONFIG_PM
sxgbe_suspend(struct net_device * ndev)22371edb9ca6SSiva Reddy int sxgbe_suspend(struct net_device *ndev)
22381edb9ca6SSiva Reddy {
22391edb9ca6SSiva Reddy return 0;
22401edb9ca6SSiva Reddy }
22411edb9ca6SSiva Reddy
sxgbe_resume(struct net_device * ndev)22421edb9ca6SSiva Reddy int sxgbe_resume(struct net_device *ndev)
22431edb9ca6SSiva Reddy {
22441edb9ca6SSiva Reddy return 0;
22451edb9ca6SSiva Reddy }
22461edb9ca6SSiva Reddy
sxgbe_freeze(struct net_device * ndev)22471edb9ca6SSiva Reddy int sxgbe_freeze(struct net_device *ndev)
22481edb9ca6SSiva Reddy {
22491edb9ca6SSiva Reddy return -ENOSYS;
22501edb9ca6SSiva Reddy }
22511edb9ca6SSiva Reddy
sxgbe_restore(struct net_device * ndev)22521edb9ca6SSiva Reddy int sxgbe_restore(struct net_device *ndev)
22531edb9ca6SSiva Reddy {
22541edb9ca6SSiva Reddy return -ENOSYS;
22551edb9ca6SSiva Reddy }
22561edb9ca6SSiva Reddy #endif /* CONFIG_PM */
22571edb9ca6SSiva Reddy
22581edb9ca6SSiva Reddy /* Driver is configured as Platform driver */
sxgbe_init(void)22591edb9ca6SSiva Reddy static int __init sxgbe_init(void)
22601edb9ca6SSiva Reddy {
22611edb9ca6SSiva Reddy int ret;
22621edb9ca6SSiva Reddy
22631edb9ca6SSiva Reddy ret = sxgbe_register_platform();
22641edb9ca6SSiva Reddy if (ret)
22651edb9ca6SSiva Reddy goto err;
22661edb9ca6SSiva Reddy return 0;
22671edb9ca6SSiva Reddy err:
22681edb9ca6SSiva Reddy pr_err("driver registration failed\n");
22691edb9ca6SSiva Reddy return ret;
22701edb9ca6SSiva Reddy }
22711edb9ca6SSiva Reddy
sxgbe_exit(void)22721edb9ca6SSiva Reddy static void __exit sxgbe_exit(void)
22731edb9ca6SSiva Reddy {
22741edb9ca6SSiva Reddy sxgbe_unregister_platform();
22751edb9ca6SSiva Reddy }
22761edb9ca6SSiva Reddy
22771edb9ca6SSiva Reddy module_init(sxgbe_init);
22781edb9ca6SSiva Reddy module_exit(sxgbe_exit);
22791edb9ca6SSiva Reddy
22801edb9ca6SSiva Reddy #ifndef MODULE
sxgbe_cmdline_opt(char * str)22811edb9ca6SSiva Reddy static int __init sxgbe_cmdline_opt(char *str)
22821edb9ca6SSiva Reddy {
2283acc18c14SGirish K S char *opt;
2284acc18c14SGirish K S
2285acc18c14SGirish K S if (!str || !*str)
228650e06ddcSRandy Dunlap return 1;
2287acc18c14SGirish K S while ((opt = strsep(&str, ",")) != NULL) {
2288f3cc008bSDominik Czarnota if (!strncmp(opt, "eee_timer:", 10)) {
2289acc18c14SGirish K S if (kstrtoint(opt + 10, 0, &eee_timer))
2290acc18c14SGirish K S goto err;
2291acc18c14SGirish K S }
2292acc18c14SGirish K S }
229350e06ddcSRandy Dunlap return 1;
2294acc18c14SGirish K S
2295acc18c14SGirish K S err:
2296acc18c14SGirish K S pr_err("%s: ERROR broken module parameter conversion\n", __func__);
229750e06ddcSRandy Dunlap return 1;
22981edb9ca6SSiva Reddy }
22991edb9ca6SSiva Reddy
23001edb9ca6SSiva Reddy __setup("sxgbeeth=", sxgbe_cmdline_opt);
23011edb9ca6SSiva Reddy #endif /* MODULE */
23021edb9ca6SSiva Reddy
23031edb9ca6SSiva Reddy
23041edb9ca6SSiva Reddy
230514a65084SKrzysztof Kozlowski MODULE_DESCRIPTION("Samsung 10G/2.5G/1G Ethernet PLATFORM driver");
23061edb9ca6SSiva Reddy
23071edb9ca6SSiva Reddy MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2308acc18c14SGirish K S MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
23091edb9ca6SSiva Reddy
23101edb9ca6SSiva Reddy MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
23111edb9ca6SSiva Reddy MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
23121edb9ca6SSiva Reddy MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
23131edb9ca6SSiva Reddy MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
23141edb9ca6SSiva Reddy
23151edb9ca6SSiva Reddy MODULE_LICENSE("GPL");
2316