11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
269b4b095SJeff Kirsher /*
369b4b095SJeff Kirsher  * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
469b4b095SJeff Kirsher  *
569b4b095SJeff Kirsher  * Note: This driver is a cleanroom reimplementation based on reverse
669b4b095SJeff Kirsher  *      engineered documentation written by Carl-Daniel Hailfinger
769b4b095SJeff Kirsher  *      and Andrew de Quincey.
869b4b095SJeff Kirsher  *
969b4b095SJeff Kirsher  * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
1069b4b095SJeff Kirsher  * trademarks of NVIDIA Corporation in the United States and other
1169b4b095SJeff Kirsher  * countries.
1269b4b095SJeff Kirsher  *
1369b4b095SJeff Kirsher  * Copyright (C) 2003,4,5 Manfred Spraul
1469b4b095SJeff Kirsher  * Copyright (C) 2004 Andrew de Quincey (wol support)
1569b4b095SJeff Kirsher  * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
1669b4b095SJeff Kirsher  *		IRQ rate fixes, bigendian fixes, cleanups, verification)
1769b4b095SJeff Kirsher  * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
1869b4b095SJeff Kirsher  *
1969b4b095SJeff Kirsher  * Known bugs:
2069b4b095SJeff Kirsher  * We suspect that on some hardware no TX done interrupts are generated.
2169b4b095SJeff Kirsher  * This means recovery from netif_stop_queue only happens if the hw timer
2269b4b095SJeff Kirsher  * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
2369b4b095SJeff Kirsher  * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
2469b4b095SJeff Kirsher  * If your hardware reliably generates tx done interrupts, then you can remove
2569b4b095SJeff Kirsher  * DEV_NEED_TIMERIRQ from the driver_data flags.
2669b4b095SJeff Kirsher  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
2769b4b095SJeff Kirsher  * superfluous timer interrupts from the nic.
2869b4b095SJeff Kirsher  */
2969b4b095SJeff Kirsher 
3069b4b095SJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3169b4b095SJeff Kirsher 
3269b4b095SJeff Kirsher #define FORCEDETH_VERSION		"0.64"
3369b4b095SJeff Kirsher #define DRV_NAME			"forcedeth"
3469b4b095SJeff Kirsher 
3569b4b095SJeff Kirsher #include <linux/module.h>
3669b4b095SJeff Kirsher #include <linux/types.h>
3769b4b095SJeff Kirsher #include <linux/pci.h>
3869b4b095SJeff Kirsher #include <linux/interrupt.h>
3969b4b095SJeff Kirsher #include <linux/netdevice.h>
4069b4b095SJeff Kirsher #include <linux/etherdevice.h>
4169b4b095SJeff Kirsher #include <linux/delay.h>
4269b4b095SJeff Kirsher #include <linux/sched.h>
4369b4b095SJeff Kirsher #include <linux/spinlock.h>
4469b4b095SJeff Kirsher #include <linux/ethtool.h>
4569b4b095SJeff Kirsher #include <linux/timer.h>
4669b4b095SJeff Kirsher #include <linux/skbuff.h>
4769b4b095SJeff Kirsher #include <linux/mii.h>
4869b4b095SJeff Kirsher #include <linux/random.h>
4969b4b095SJeff Kirsher #include <linux/if_vlan.h>
5069b4b095SJeff Kirsher #include <linux/dma-mapping.h>
5169b4b095SJeff Kirsher #include <linux/slab.h>
5269b4b095SJeff Kirsher #include <linux/uaccess.h>
5369b4b095SJeff Kirsher #include <linux/prefetch.h>
54f5d827aeSdavid decotigny #include <linux/u64_stats_sync.h>
5569b4b095SJeff Kirsher #include <linux/io.h>
5669b4b095SJeff Kirsher 
5769b4b095SJeff Kirsher #include <asm/irq.h>
5869b4b095SJeff Kirsher 
5936ffca1aSJakub Kicinski #define TX_WORK_PER_LOOP  NAPI_POLL_WEIGHT
6036ffca1aSJakub Kicinski #define RX_WORK_PER_LOOP  NAPI_POLL_WEIGHT
6169b4b095SJeff Kirsher 
6269b4b095SJeff Kirsher /*
6369b4b095SJeff Kirsher  * Hardware access:
6469b4b095SJeff Kirsher  */
6569b4b095SJeff Kirsher 
6669b4b095SJeff Kirsher #define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
6769b4b095SJeff Kirsher #define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
6869b4b095SJeff Kirsher #define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
6969b4b095SJeff Kirsher #define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
7069b4b095SJeff Kirsher #define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
7169b4b095SJeff Kirsher #define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
7269b4b095SJeff Kirsher #define DEV_HAS_MSI                0x0000040  /* device supports MSI */
7369b4b095SJeff Kirsher #define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
7469b4b095SJeff Kirsher #define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
7569b4b095SJeff Kirsher #define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
7669b4b095SJeff Kirsher #define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
7769b4b095SJeff Kirsher #define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
7869b4b095SJeff Kirsher #define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
7969b4b095SJeff Kirsher #define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
8069b4b095SJeff Kirsher #define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
8169b4b095SJeff Kirsher #define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
8269b4b095SJeff Kirsher #define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
8369b4b095SJeff Kirsher #define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
8469b4b095SJeff Kirsher #define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
8569b4b095SJeff Kirsher #define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
8669b4b095SJeff Kirsher #define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
8769b4b095SJeff Kirsher #define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
8869b4b095SJeff Kirsher #define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
8969b4b095SJeff Kirsher #define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
9069b4b095SJeff Kirsher #define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
9169b4b095SJeff Kirsher #define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
9269b4b095SJeff Kirsher #define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
9369b4b095SJeff Kirsher 
9469b4b095SJeff Kirsher enum {
9569b4b095SJeff Kirsher 	NvRegIrqStatus = 0x000,
9669b4b095SJeff Kirsher #define NVREG_IRQSTAT_MIIEVENT	0x040
9769b4b095SJeff Kirsher #define NVREG_IRQSTAT_MASK		0x83ff
9869b4b095SJeff Kirsher 	NvRegIrqMask = 0x004,
9969b4b095SJeff Kirsher #define NVREG_IRQ_RX_ERROR		0x0001
10069b4b095SJeff Kirsher #define NVREG_IRQ_RX			0x0002
10169b4b095SJeff Kirsher #define NVREG_IRQ_RX_NOBUF		0x0004
10269b4b095SJeff Kirsher #define NVREG_IRQ_TX_ERR		0x0008
10369b4b095SJeff Kirsher #define NVREG_IRQ_TX_OK			0x0010
10469b4b095SJeff Kirsher #define NVREG_IRQ_TIMER			0x0020
10569b4b095SJeff Kirsher #define NVREG_IRQ_LINK			0x0040
10669b4b095SJeff Kirsher #define NVREG_IRQ_RX_FORCED		0x0080
10769b4b095SJeff Kirsher #define NVREG_IRQ_TX_FORCED		0x0100
10869b4b095SJeff Kirsher #define NVREG_IRQ_RECOVER_ERROR		0x8200
10969b4b095SJeff Kirsher #define NVREG_IRQMASK_THROUGHPUT	0x00df
11069b4b095SJeff Kirsher #define NVREG_IRQMASK_CPU		0x0060
11169b4b095SJeff Kirsher #define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
11269b4b095SJeff Kirsher #define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
11369b4b095SJeff Kirsher #define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
11469b4b095SJeff Kirsher 
11569b4b095SJeff Kirsher 	NvRegUnknownSetupReg6 = 0x008,
11669b4b095SJeff Kirsher #define NVREG_UNKSETUP6_VAL		3
11769b4b095SJeff Kirsher 
11869b4b095SJeff Kirsher /*
11969b4b095SJeff Kirsher  * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
12069b4b095SJeff Kirsher  * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
12169b4b095SJeff Kirsher  */
12269b4b095SJeff Kirsher 	NvRegPollingInterval = 0x00c,
12369b4b095SJeff Kirsher #define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
12469b4b095SJeff Kirsher #define NVREG_POLL_DEFAULT_CPU	13
12569b4b095SJeff Kirsher 	NvRegMSIMap0 = 0x020,
12669b4b095SJeff Kirsher 	NvRegMSIMap1 = 0x024,
12769b4b095SJeff Kirsher 	NvRegMSIIrqMask = 0x030,
12869b4b095SJeff Kirsher #define NVREG_MSI_VECTOR_0_ENABLED 0x01
12969b4b095SJeff Kirsher 	NvRegMisc1 = 0x080,
13069b4b095SJeff Kirsher #define NVREG_MISC1_PAUSE_TX	0x01
13169b4b095SJeff Kirsher #define NVREG_MISC1_HD		0x02
13269b4b095SJeff Kirsher #define NVREG_MISC1_FORCE	0x3b0f3c
13369b4b095SJeff Kirsher 
13469b4b095SJeff Kirsher 	NvRegMacReset = 0x34,
13569b4b095SJeff Kirsher #define NVREG_MAC_RESET_ASSERT	0x0F3
13669b4b095SJeff Kirsher 	NvRegTransmitterControl = 0x084,
13769b4b095SJeff Kirsher #define NVREG_XMITCTL_START	0x01
13869b4b095SJeff Kirsher #define NVREG_XMITCTL_MGMT_ST	0x40000000
13969b4b095SJeff Kirsher #define NVREG_XMITCTL_SYNC_MASK		0x000f0000
14069b4b095SJeff Kirsher #define NVREG_XMITCTL_SYNC_NOT_READY	0x0
14169b4b095SJeff Kirsher #define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
14269b4b095SJeff Kirsher #define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
14369b4b095SJeff Kirsher #define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
14469b4b095SJeff Kirsher #define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
14569b4b095SJeff Kirsher #define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
14669b4b095SJeff Kirsher #define NVREG_XMITCTL_HOST_LOADED	0x00004000
14769b4b095SJeff Kirsher #define NVREG_XMITCTL_TX_PATH_EN	0x01000000
14869b4b095SJeff Kirsher #define NVREG_XMITCTL_DATA_START	0x00100000
14969b4b095SJeff Kirsher #define NVREG_XMITCTL_DATA_READY	0x00010000
15069b4b095SJeff Kirsher #define NVREG_XMITCTL_DATA_ERROR	0x00020000
15169b4b095SJeff Kirsher 	NvRegTransmitterStatus = 0x088,
15269b4b095SJeff Kirsher #define NVREG_XMITSTAT_BUSY	0x01
15369b4b095SJeff Kirsher 
15469b4b095SJeff Kirsher 	NvRegPacketFilterFlags = 0x8c,
15569b4b095SJeff Kirsher #define NVREG_PFF_PAUSE_RX	0x08
15669b4b095SJeff Kirsher #define NVREG_PFF_ALWAYS	0x7F0000
15769b4b095SJeff Kirsher #define NVREG_PFF_PROMISC	0x80
15869b4b095SJeff Kirsher #define NVREG_PFF_MYADDR	0x20
15969b4b095SJeff Kirsher #define NVREG_PFF_LOOPBACK	0x10
16069b4b095SJeff Kirsher 
16169b4b095SJeff Kirsher 	NvRegOffloadConfig = 0x90,
16269b4b095SJeff Kirsher #define NVREG_OFFLOAD_HOMEPHY	0x601
16369b4b095SJeff Kirsher #define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
16469b4b095SJeff Kirsher 	NvRegReceiverControl = 0x094,
16569b4b095SJeff Kirsher #define NVREG_RCVCTL_START	0x01
16669b4b095SJeff Kirsher #define NVREG_RCVCTL_RX_PATH_EN	0x01000000
16769b4b095SJeff Kirsher 	NvRegReceiverStatus = 0x98,
16869b4b095SJeff Kirsher #define NVREG_RCVSTAT_BUSY	0x01
16969b4b095SJeff Kirsher 
17069b4b095SJeff Kirsher 	NvRegSlotTime = 0x9c,
17169b4b095SJeff Kirsher #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
17269b4b095SJeff Kirsher #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
17369b4b095SJeff Kirsher #define NVREG_SLOTTIME_1000_FULL	0x0003ff00
17469b4b095SJeff Kirsher #define NVREG_SLOTTIME_HALF		0x0000ff00
17569b4b095SJeff Kirsher #define NVREG_SLOTTIME_DEFAULT		0x00007f00
17669b4b095SJeff Kirsher #define NVREG_SLOTTIME_MASK		0x000000ff
17769b4b095SJeff Kirsher 
17869b4b095SJeff Kirsher 	NvRegTxDeferral = 0xA0,
17969b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
18069b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
18169b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
18269b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
18369b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
18469b4b095SJeff Kirsher #define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
18569b4b095SJeff Kirsher 	NvRegRxDeferral = 0xA4,
18669b4b095SJeff Kirsher #define NVREG_RX_DEFERRAL_DEFAULT	0x16
18769b4b095SJeff Kirsher 	NvRegMacAddrA = 0xA8,
18869b4b095SJeff Kirsher 	NvRegMacAddrB = 0xAC,
18969b4b095SJeff Kirsher 	NvRegMulticastAddrA = 0xB0,
19069b4b095SJeff Kirsher #define NVREG_MCASTADDRA_FORCE	0x01
19169b4b095SJeff Kirsher 	NvRegMulticastAddrB = 0xB4,
19269b4b095SJeff Kirsher 	NvRegMulticastMaskA = 0xB8,
19369b4b095SJeff Kirsher #define NVREG_MCASTMASKA_NONE		0xffffffff
19469b4b095SJeff Kirsher 	NvRegMulticastMaskB = 0xBC,
19569b4b095SJeff Kirsher #define NVREG_MCASTMASKB_NONE		0xffff
19669b4b095SJeff Kirsher 
19769b4b095SJeff Kirsher 	NvRegPhyInterface = 0xC0,
19869b4b095SJeff Kirsher #define PHY_RGMII		0x10000000
19969b4b095SJeff Kirsher 	NvRegBackOffControl = 0xC4,
20069b4b095SJeff Kirsher #define NVREG_BKOFFCTRL_DEFAULT			0x70000000
20169b4b095SJeff Kirsher #define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
20269b4b095SJeff Kirsher #define NVREG_BKOFFCTRL_SELECT			24
20369b4b095SJeff Kirsher #define NVREG_BKOFFCTRL_GEAR			12
20469b4b095SJeff Kirsher 
20569b4b095SJeff Kirsher 	NvRegTxRingPhysAddr = 0x100,
20669b4b095SJeff Kirsher 	NvRegRxRingPhysAddr = 0x104,
20769b4b095SJeff Kirsher 	NvRegRingSizes = 0x108,
20869b4b095SJeff Kirsher #define NVREG_RINGSZ_TXSHIFT 0
20969b4b095SJeff Kirsher #define NVREG_RINGSZ_RXSHIFT 16
21069b4b095SJeff Kirsher 	NvRegTransmitPoll = 0x10c,
21169b4b095SJeff Kirsher #define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
21269b4b095SJeff Kirsher 	NvRegLinkSpeed = 0x110,
21369b4b095SJeff Kirsher #define NVREG_LINKSPEED_FORCE 0x10000
21469b4b095SJeff Kirsher #define NVREG_LINKSPEED_10	1000
21569b4b095SJeff Kirsher #define NVREG_LINKSPEED_100	100
21669b4b095SJeff Kirsher #define NVREG_LINKSPEED_1000	50
21769b4b095SJeff Kirsher #define NVREG_LINKSPEED_MASK	(0xFFF)
21869b4b095SJeff Kirsher 	NvRegUnknownSetupReg5 = 0x130,
21969b4b095SJeff Kirsher #define NVREG_UNKSETUP5_BIT31	(1<<31)
22069b4b095SJeff Kirsher 	NvRegTxWatermark = 0x13c,
22169b4b095SJeff Kirsher #define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
22269b4b095SJeff Kirsher #define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
22369b4b095SJeff Kirsher #define NVREG_TX_WM_DESC2_3_1000	0xfe08000
22469b4b095SJeff Kirsher 	NvRegTxRxControl = 0x144,
22569b4b095SJeff Kirsher #define NVREG_TXRXCTL_KICK	0x0001
22669b4b095SJeff Kirsher #define NVREG_TXRXCTL_BIT1	0x0002
22769b4b095SJeff Kirsher #define NVREG_TXRXCTL_BIT2	0x0004
22869b4b095SJeff Kirsher #define NVREG_TXRXCTL_IDLE	0x0008
22969b4b095SJeff Kirsher #define NVREG_TXRXCTL_RESET	0x0010
23069b4b095SJeff Kirsher #define NVREG_TXRXCTL_RXCHECK	0x0400
23169b4b095SJeff Kirsher #define NVREG_TXRXCTL_DESC_1	0
23269b4b095SJeff Kirsher #define NVREG_TXRXCTL_DESC_2	0x002100
23369b4b095SJeff Kirsher #define NVREG_TXRXCTL_DESC_3	0xc02200
23469b4b095SJeff Kirsher #define NVREG_TXRXCTL_VLANSTRIP 0x00040
23569b4b095SJeff Kirsher #define NVREG_TXRXCTL_VLANINS	0x00080
23669b4b095SJeff Kirsher 	NvRegTxRingPhysAddrHigh = 0x148,
23769b4b095SJeff Kirsher 	NvRegRxRingPhysAddrHigh = 0x14C,
23869b4b095SJeff Kirsher 	NvRegTxPauseFrame = 0x170,
23969b4b095SJeff Kirsher #define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
24069b4b095SJeff Kirsher #define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
24169b4b095SJeff Kirsher #define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
24269b4b095SJeff Kirsher #define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
24369b4b095SJeff Kirsher 	NvRegTxPauseFrameLimit = 0x174,
24469b4b095SJeff Kirsher #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
24569b4b095SJeff Kirsher 	NvRegMIIStatus = 0x180,
24669b4b095SJeff Kirsher #define NVREG_MIISTAT_ERROR		0x0001
24769b4b095SJeff Kirsher #define NVREG_MIISTAT_LINKCHANGE	0x0008
24869b4b095SJeff Kirsher #define NVREG_MIISTAT_MASK_RW		0x0007
24969b4b095SJeff Kirsher #define NVREG_MIISTAT_MASK_ALL		0x000f
25069b4b095SJeff Kirsher 	NvRegMIIMask = 0x184,
25169b4b095SJeff Kirsher #define NVREG_MII_LINKCHANGE		0x0008
25269b4b095SJeff Kirsher 
25369b4b095SJeff Kirsher 	NvRegAdapterControl = 0x188,
25469b4b095SJeff Kirsher #define NVREG_ADAPTCTL_START	0x02
25569b4b095SJeff Kirsher #define NVREG_ADAPTCTL_LINKUP	0x04
25669b4b095SJeff Kirsher #define NVREG_ADAPTCTL_PHYVALID	0x40000
25769b4b095SJeff Kirsher #define NVREG_ADAPTCTL_RUNNING	0x100000
25869b4b095SJeff Kirsher #define NVREG_ADAPTCTL_PHYSHIFT	24
25969b4b095SJeff Kirsher 	NvRegMIISpeed = 0x18c,
26069b4b095SJeff Kirsher #define NVREG_MIISPEED_BIT8	(1<<8)
26169b4b095SJeff Kirsher #define NVREG_MIIDELAY	5
26269b4b095SJeff Kirsher 	NvRegMIIControl = 0x190,
26369b4b095SJeff Kirsher #define NVREG_MIICTL_INUSE	0x08000
26469b4b095SJeff Kirsher #define NVREG_MIICTL_WRITE	0x00400
26569b4b095SJeff Kirsher #define NVREG_MIICTL_ADDRSHIFT	5
26669b4b095SJeff Kirsher 	NvRegMIIData = 0x194,
26769b4b095SJeff Kirsher 	NvRegTxUnicast = 0x1a0,
26869b4b095SJeff Kirsher 	NvRegTxMulticast = 0x1a4,
26969b4b095SJeff Kirsher 	NvRegTxBroadcast = 0x1a8,
27069b4b095SJeff Kirsher 	NvRegWakeUpFlags = 0x200,
27169b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_VAL		0x7770
27269b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
27369b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
27469b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_D3SHIFT	12
27569b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_D2SHIFT	8
27669b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_D1SHIFT	4
27769b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_D0SHIFT	0
27869b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
27969b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
28069b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
28169b4b095SJeff Kirsher #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
28269b4b095SJeff Kirsher 
28369b4b095SJeff Kirsher 	NvRegMgmtUnitGetVersion = 0x204,
28469b4b095SJeff Kirsher #define NVREG_MGMTUNITGETVERSION	0x01
28569b4b095SJeff Kirsher 	NvRegMgmtUnitVersion = 0x208,
28669b4b095SJeff Kirsher #define NVREG_MGMTUNITVERSION		0x08
28769b4b095SJeff Kirsher 	NvRegPowerCap = 0x268,
28869b4b095SJeff Kirsher #define NVREG_POWERCAP_D3SUPP	(1<<30)
28969b4b095SJeff Kirsher #define NVREG_POWERCAP_D2SUPP	(1<<26)
29069b4b095SJeff Kirsher #define NVREG_POWERCAP_D1SUPP	(1<<25)
29169b4b095SJeff Kirsher 	NvRegPowerState = 0x26c,
29269b4b095SJeff Kirsher #define NVREG_POWERSTATE_POWEREDUP	0x8000
29369b4b095SJeff Kirsher #define NVREG_POWERSTATE_VALID		0x0100
29469b4b095SJeff Kirsher #define NVREG_POWERSTATE_MASK		0x0003
29569b4b095SJeff Kirsher #define NVREG_POWERSTATE_D0		0x0000
29669b4b095SJeff Kirsher #define NVREG_POWERSTATE_D1		0x0001
29769b4b095SJeff Kirsher #define NVREG_POWERSTATE_D2		0x0002
29869b4b095SJeff Kirsher #define NVREG_POWERSTATE_D3		0x0003
29969b4b095SJeff Kirsher 	NvRegMgmtUnitControl = 0x278,
30069b4b095SJeff Kirsher #define NVREG_MGMTUNITCONTROL_INUSE	0x20000
30169b4b095SJeff Kirsher 	NvRegTxCnt = 0x280,
30269b4b095SJeff Kirsher 	NvRegTxZeroReXmt = 0x284,
30369b4b095SJeff Kirsher 	NvRegTxOneReXmt = 0x288,
30469b4b095SJeff Kirsher 	NvRegTxManyReXmt = 0x28c,
30569b4b095SJeff Kirsher 	NvRegTxLateCol = 0x290,
30669b4b095SJeff Kirsher 	NvRegTxUnderflow = 0x294,
30769b4b095SJeff Kirsher 	NvRegTxLossCarrier = 0x298,
30869b4b095SJeff Kirsher 	NvRegTxExcessDef = 0x29c,
30969b4b095SJeff Kirsher 	NvRegTxRetryErr = 0x2a0,
31069b4b095SJeff Kirsher 	NvRegRxFrameErr = 0x2a4,
31169b4b095SJeff Kirsher 	NvRegRxExtraByte = 0x2a8,
31269b4b095SJeff Kirsher 	NvRegRxLateCol = 0x2ac,
31369b4b095SJeff Kirsher 	NvRegRxRunt = 0x2b0,
31469b4b095SJeff Kirsher 	NvRegRxFrameTooLong = 0x2b4,
31569b4b095SJeff Kirsher 	NvRegRxOverflow = 0x2b8,
31669b4b095SJeff Kirsher 	NvRegRxFCSErr = 0x2bc,
31769b4b095SJeff Kirsher 	NvRegRxFrameAlignErr = 0x2c0,
31869b4b095SJeff Kirsher 	NvRegRxLenErr = 0x2c4,
31969b4b095SJeff Kirsher 	NvRegRxUnicast = 0x2c8,
32069b4b095SJeff Kirsher 	NvRegRxMulticast = 0x2cc,
32169b4b095SJeff Kirsher 	NvRegRxBroadcast = 0x2d0,
32269b4b095SJeff Kirsher 	NvRegTxDef = 0x2d4,
32369b4b095SJeff Kirsher 	NvRegTxFrame = 0x2d8,
32469b4b095SJeff Kirsher 	NvRegRxCnt = 0x2dc,
32569b4b095SJeff Kirsher 	NvRegTxPause = 0x2e0,
32669b4b095SJeff Kirsher 	NvRegRxPause = 0x2e4,
32769b4b095SJeff Kirsher 	NvRegRxDropFrame = 0x2e8,
32869b4b095SJeff Kirsher 	NvRegVlanControl = 0x300,
32969b4b095SJeff Kirsher #define NVREG_VLANCONTROL_ENABLE	0x2000
33069b4b095SJeff Kirsher 	NvRegMSIXMap0 = 0x3e0,
33169b4b095SJeff Kirsher 	NvRegMSIXMap1 = 0x3e4,
33269b4b095SJeff Kirsher 	NvRegMSIXIrqStatus = 0x3f0,
33369b4b095SJeff Kirsher 
33469b4b095SJeff Kirsher 	NvRegPowerState2 = 0x600,
33569b4b095SJeff Kirsher #define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
33669b4b095SJeff Kirsher #define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
33769b4b095SJeff Kirsher #define NVREG_POWERSTATE2_PHY_RESET		0x0004
33869b4b095SJeff Kirsher #define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
33969b4b095SJeff Kirsher };
34069b4b095SJeff Kirsher 
34169b4b095SJeff Kirsher /* Big endian: should work, but is untested */
34269b4b095SJeff Kirsher struct ring_desc {
34369b4b095SJeff Kirsher 	__le32 buf;
34469b4b095SJeff Kirsher 	__le32 flaglen;
34569b4b095SJeff Kirsher };
34669b4b095SJeff Kirsher 
34769b4b095SJeff Kirsher struct ring_desc_ex {
34869b4b095SJeff Kirsher 	__le32 bufhigh;
34969b4b095SJeff Kirsher 	__le32 buflow;
35069b4b095SJeff Kirsher 	__le32 txvlan;
35169b4b095SJeff Kirsher 	__le32 flaglen;
35269b4b095SJeff Kirsher };
35369b4b095SJeff Kirsher 
35469b4b095SJeff Kirsher union ring_type {
35569b4b095SJeff Kirsher 	struct ring_desc *orig;
35669b4b095SJeff Kirsher 	struct ring_desc_ex *ex;
35769b4b095SJeff Kirsher };
35869b4b095SJeff Kirsher 
35969b4b095SJeff Kirsher #define FLAG_MASK_V1 0xffff0000
36069b4b095SJeff Kirsher #define FLAG_MASK_V2 0xffffc000
36169b4b095SJeff Kirsher #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
36269b4b095SJeff Kirsher #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
36369b4b095SJeff Kirsher 
36469b4b095SJeff Kirsher #define NV_TX_LASTPACKET	(1<<16)
36569b4b095SJeff Kirsher #define NV_TX_RETRYERROR	(1<<19)
36669b4b095SJeff Kirsher #define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
36769b4b095SJeff Kirsher #define NV_TX_FORCED_INTERRUPT	(1<<24)
36869b4b095SJeff Kirsher #define NV_TX_DEFERRED		(1<<26)
36969b4b095SJeff Kirsher #define NV_TX_CARRIERLOST	(1<<27)
37069b4b095SJeff Kirsher #define NV_TX_LATECOLLISION	(1<<28)
37169b4b095SJeff Kirsher #define NV_TX_UNDERFLOW		(1<<29)
37269b4b095SJeff Kirsher #define NV_TX_ERROR		(1<<30)
37369b4b095SJeff Kirsher #define NV_TX_VALID		(1<<31)
37469b4b095SJeff Kirsher 
37569b4b095SJeff Kirsher #define NV_TX2_LASTPACKET	(1<<29)
37669b4b095SJeff Kirsher #define NV_TX2_RETRYERROR	(1<<18)
37769b4b095SJeff Kirsher #define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
37869b4b095SJeff Kirsher #define NV_TX2_FORCED_INTERRUPT	(1<<30)
37969b4b095SJeff Kirsher #define NV_TX2_DEFERRED		(1<<25)
38069b4b095SJeff Kirsher #define NV_TX2_CARRIERLOST	(1<<26)
38169b4b095SJeff Kirsher #define NV_TX2_LATECOLLISION	(1<<27)
38269b4b095SJeff Kirsher #define NV_TX2_UNDERFLOW	(1<<28)
38369b4b095SJeff Kirsher /* error and valid are the same for both */
38469b4b095SJeff Kirsher #define NV_TX2_ERROR		(1<<30)
38569b4b095SJeff Kirsher #define NV_TX2_VALID		(1<<31)
38669b4b095SJeff Kirsher #define NV_TX2_TSO		(1<<28)
38769b4b095SJeff Kirsher #define NV_TX2_TSO_SHIFT	14
38869b4b095SJeff Kirsher #define NV_TX2_TSO_MAX_SHIFT	14
38969b4b095SJeff Kirsher #define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
39069b4b095SJeff Kirsher #define NV_TX2_CHECKSUM_L3	(1<<27)
39169b4b095SJeff Kirsher #define NV_TX2_CHECKSUM_L4	(1<<26)
39269b4b095SJeff Kirsher 
39369b4b095SJeff Kirsher #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
39469b4b095SJeff Kirsher 
39569b4b095SJeff Kirsher #define NV_RX_DESCRIPTORVALID	(1<<16)
39669b4b095SJeff Kirsher #define NV_RX_MISSEDFRAME	(1<<17)
397cef33c81SAntonio Ospite #define NV_RX_SUBTRACT1		(1<<18)
39869b4b095SJeff Kirsher #define NV_RX_ERROR1		(1<<23)
39969b4b095SJeff Kirsher #define NV_RX_ERROR2		(1<<24)
40069b4b095SJeff Kirsher #define NV_RX_ERROR3		(1<<25)
40169b4b095SJeff Kirsher #define NV_RX_ERROR4		(1<<26)
40269b4b095SJeff Kirsher #define NV_RX_CRCERR		(1<<27)
40369b4b095SJeff Kirsher #define NV_RX_OVERFLOW		(1<<28)
40469b4b095SJeff Kirsher #define NV_RX_FRAMINGERR	(1<<29)
40569b4b095SJeff Kirsher #define NV_RX_ERROR		(1<<30)
40669b4b095SJeff Kirsher #define NV_RX_AVAIL		(1<<31)
40769b4b095SJeff Kirsher #define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
40869b4b095SJeff Kirsher 
40969b4b095SJeff Kirsher #define NV_RX2_CHECKSUMMASK	(0x1C000000)
41069b4b095SJeff Kirsher #define NV_RX2_CHECKSUM_IP	(0x10000000)
41169b4b095SJeff Kirsher #define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
41269b4b095SJeff Kirsher #define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
41369b4b095SJeff Kirsher #define NV_RX2_DESCRIPTORVALID	(1<<29)
414cef33c81SAntonio Ospite #define NV_RX2_SUBTRACT1	(1<<25)
41569b4b095SJeff Kirsher #define NV_RX2_ERROR1		(1<<18)
41669b4b095SJeff Kirsher #define NV_RX2_ERROR2		(1<<19)
41769b4b095SJeff Kirsher #define NV_RX2_ERROR3		(1<<20)
41869b4b095SJeff Kirsher #define NV_RX2_ERROR4		(1<<21)
41969b4b095SJeff Kirsher #define NV_RX2_CRCERR		(1<<22)
42069b4b095SJeff Kirsher #define NV_RX2_OVERFLOW		(1<<23)
42169b4b095SJeff Kirsher #define NV_RX2_FRAMINGERR	(1<<24)
42269b4b095SJeff Kirsher /* error and avail are the same for both */
42369b4b095SJeff Kirsher #define NV_RX2_ERROR		(1<<30)
42469b4b095SJeff Kirsher #define NV_RX2_AVAIL		(1<<31)
42569b4b095SJeff Kirsher #define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
42669b4b095SJeff Kirsher 
42769b4b095SJeff Kirsher #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
42869b4b095SJeff Kirsher #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
42969b4b095SJeff Kirsher 
43069b4b095SJeff Kirsher /* Miscellaneous hardware related defines: */
43169b4b095SJeff Kirsher #define NV_PCI_REGSZ_VER1	0x270
43269b4b095SJeff Kirsher #define NV_PCI_REGSZ_VER2	0x2d4
43369b4b095SJeff Kirsher #define NV_PCI_REGSZ_VER3	0x604
43469b4b095SJeff Kirsher #define NV_PCI_REGSZ_MAX	0x604
43569b4b095SJeff Kirsher 
43669b4b095SJeff Kirsher /* various timeout delays: all in usec */
43769b4b095SJeff Kirsher #define NV_TXRX_RESET_DELAY	4
43869b4b095SJeff Kirsher #define NV_TXSTOP_DELAY1	10
43969b4b095SJeff Kirsher #define NV_TXSTOP_DELAY1MAX	500000
44069b4b095SJeff Kirsher #define NV_TXSTOP_DELAY2	100
44169b4b095SJeff Kirsher #define NV_RXSTOP_DELAY1	10
44269b4b095SJeff Kirsher #define NV_RXSTOP_DELAY1MAX	500000
44369b4b095SJeff Kirsher #define NV_RXSTOP_DELAY2	100
44469b4b095SJeff Kirsher #define NV_SETUP5_DELAY		5
44569b4b095SJeff Kirsher #define NV_SETUP5_DELAYMAX	50000
44669b4b095SJeff Kirsher #define NV_POWERUP_DELAY	5
44769b4b095SJeff Kirsher #define NV_POWERUP_DELAYMAX	5000
44869b4b095SJeff Kirsher #define NV_MIIBUSY_DELAY	50
44969b4b095SJeff Kirsher #define NV_MIIPHY_DELAY	10
45069b4b095SJeff Kirsher #define NV_MIIPHY_DELAYMAX	10000
45169b4b095SJeff Kirsher #define NV_MAC_RESET_DELAY	64
45269b4b095SJeff Kirsher 
45369b4b095SJeff Kirsher #define NV_WAKEUPPATTERNS	5
45469b4b095SJeff Kirsher #define NV_WAKEUPMASKENTRIES	4
45569b4b095SJeff Kirsher 
45669b4b095SJeff Kirsher /* General driver defaults */
45769b4b095SJeff Kirsher #define NV_WATCHDOG_TIMEO	(5*HZ)
45869b4b095SJeff Kirsher 
45969b4b095SJeff Kirsher #define RX_RING_DEFAULT		512
46069b4b095SJeff Kirsher #define TX_RING_DEFAULT		256
46169b4b095SJeff Kirsher #define RX_RING_MIN		128
46269b4b095SJeff Kirsher #define TX_RING_MIN		64
46369b4b095SJeff Kirsher #define RING_MAX_DESC_VER_1	1024
46469b4b095SJeff Kirsher #define RING_MAX_DESC_VER_2_3	16384
46569b4b095SJeff Kirsher 
46669b4b095SJeff Kirsher /* rx/tx mac addr + type + vlan + align + slack*/
46769b4b095SJeff Kirsher #define NV_RX_HEADERS		(64)
46869b4b095SJeff Kirsher /* even more slack. */
46969b4b095SJeff Kirsher #define NV_RX_ALLOC_PAD		(64)
47069b4b095SJeff Kirsher 
47169b4b095SJeff Kirsher /* maximum mtu size */
47269b4b095SJeff Kirsher #define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
47369b4b095SJeff Kirsher #define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
47469b4b095SJeff Kirsher 
47569b4b095SJeff Kirsher #define OOM_REFILL	(1+HZ/20)
47669b4b095SJeff Kirsher #define POLL_WAIT	(1+HZ/100)
47769b4b095SJeff Kirsher #define LINK_TIMEOUT	(3*HZ)
47869b4b095SJeff Kirsher #define STATS_INTERVAL	(10*HZ)
47969b4b095SJeff Kirsher 
48069b4b095SJeff Kirsher /*
48169b4b095SJeff Kirsher  * desc_ver values:
48269b4b095SJeff Kirsher  * The nic supports three different descriptor types:
48369b4b095SJeff Kirsher  * - DESC_VER_1: Original
48469b4b095SJeff Kirsher  * - DESC_VER_2: support for jumbo frames.
48569b4b095SJeff Kirsher  * - DESC_VER_3: 64-bit format.
48669b4b095SJeff Kirsher  */
48769b4b095SJeff Kirsher #define DESC_VER_1	1
48869b4b095SJeff Kirsher #define DESC_VER_2	2
48969b4b095SJeff Kirsher #define DESC_VER_3	3
49069b4b095SJeff Kirsher 
49169b4b095SJeff Kirsher /* PHY defines */
49269b4b095SJeff Kirsher #define PHY_OUI_MARVELL		0x5043
49369b4b095SJeff Kirsher #define PHY_OUI_CICADA		0x03f1
49469b4b095SJeff Kirsher #define PHY_OUI_VITESSE		0x01c1
49569b4b095SJeff Kirsher #define PHY_OUI_REALTEK		0x0732
49669b4b095SJeff Kirsher #define PHY_OUI_REALTEK2	0x0020
49769b4b095SJeff Kirsher #define PHYID1_OUI_MASK	0x03ff
49869b4b095SJeff Kirsher #define PHYID1_OUI_SHFT	6
49969b4b095SJeff Kirsher #define PHYID2_OUI_MASK	0xfc00
50069b4b095SJeff Kirsher #define PHYID2_OUI_SHFT	10
50169b4b095SJeff Kirsher #define PHYID2_MODEL_MASK		0x03f0
50269b4b095SJeff Kirsher #define PHY_MODEL_REALTEK_8211		0x0110
50369b4b095SJeff Kirsher #define PHY_REV_MASK			0x0001
50469b4b095SJeff Kirsher #define PHY_REV_REALTEK_8211B		0x0000
50569b4b095SJeff Kirsher #define PHY_REV_REALTEK_8211C		0x0001
50669b4b095SJeff Kirsher #define PHY_MODEL_REALTEK_8201		0x0200
50769b4b095SJeff Kirsher #define PHY_MODEL_MARVELL_E3016		0x0220
50869b4b095SJeff Kirsher #define PHY_MARVELL_E3016_INITMASK	0x0300
50969b4b095SJeff Kirsher #define PHY_CICADA_INIT1	0x0f000
51069b4b095SJeff Kirsher #define PHY_CICADA_INIT2	0x0e00
51169b4b095SJeff Kirsher #define PHY_CICADA_INIT3	0x01000
51269b4b095SJeff Kirsher #define PHY_CICADA_INIT4	0x0200
51369b4b095SJeff Kirsher #define PHY_CICADA_INIT5	0x0004
51469b4b095SJeff Kirsher #define PHY_CICADA_INIT6	0x02000
51569b4b095SJeff Kirsher #define PHY_VITESSE_INIT_REG1	0x1f
51669b4b095SJeff Kirsher #define PHY_VITESSE_INIT_REG2	0x10
51769b4b095SJeff Kirsher #define PHY_VITESSE_INIT_REG3	0x11
51869b4b095SJeff Kirsher #define PHY_VITESSE_INIT_REG4	0x12
51969b4b095SJeff Kirsher #define PHY_VITESSE_INIT_MSK1	0xc
52069b4b095SJeff Kirsher #define PHY_VITESSE_INIT_MSK2	0x0180
52169b4b095SJeff Kirsher #define PHY_VITESSE_INIT1	0x52b5
52269b4b095SJeff Kirsher #define PHY_VITESSE_INIT2	0xaf8a
52369b4b095SJeff Kirsher #define PHY_VITESSE_INIT3	0x8
52469b4b095SJeff Kirsher #define PHY_VITESSE_INIT4	0x8f8a
52569b4b095SJeff Kirsher #define PHY_VITESSE_INIT5	0xaf86
52669b4b095SJeff Kirsher #define PHY_VITESSE_INIT6	0x8f86
52769b4b095SJeff Kirsher #define PHY_VITESSE_INIT7	0xaf82
52869b4b095SJeff Kirsher #define PHY_VITESSE_INIT8	0x0100
52969b4b095SJeff Kirsher #define PHY_VITESSE_INIT9	0x8f82
53069b4b095SJeff Kirsher #define PHY_VITESSE_INIT10	0x0
53169b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG1	0x1f
53269b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG2	0x19
53369b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG3	0x13
53469b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG4	0x14
53569b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG5	0x18
53669b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG6	0x11
53769b4b095SJeff Kirsher #define PHY_REALTEK_INIT_REG7	0x01
53869b4b095SJeff Kirsher #define PHY_REALTEK_INIT1	0x0000
53969b4b095SJeff Kirsher #define PHY_REALTEK_INIT2	0x8e00
54069b4b095SJeff Kirsher #define PHY_REALTEK_INIT3	0x0001
54169b4b095SJeff Kirsher #define PHY_REALTEK_INIT4	0xad17
54269b4b095SJeff Kirsher #define PHY_REALTEK_INIT5	0xfb54
54369b4b095SJeff Kirsher #define PHY_REALTEK_INIT6	0xf5c7
54469b4b095SJeff Kirsher #define PHY_REALTEK_INIT7	0x1000
54569b4b095SJeff Kirsher #define PHY_REALTEK_INIT8	0x0003
54669b4b095SJeff Kirsher #define PHY_REALTEK_INIT9	0x0008
54769b4b095SJeff Kirsher #define PHY_REALTEK_INIT10	0x0005
54869b4b095SJeff Kirsher #define PHY_REALTEK_INIT11	0x0200
54969b4b095SJeff Kirsher #define PHY_REALTEK_INIT_MSK1	0x0003
55069b4b095SJeff Kirsher 
55169b4b095SJeff Kirsher #define PHY_GIGABIT	0x0100
55269b4b095SJeff Kirsher 
55369b4b095SJeff Kirsher #define PHY_TIMEOUT	0x1
55469b4b095SJeff Kirsher #define PHY_ERROR	0x2
55569b4b095SJeff Kirsher 
55669b4b095SJeff Kirsher #define PHY_100	0x1
55769b4b095SJeff Kirsher #define PHY_1000	0x2
55869b4b095SJeff Kirsher #define PHY_HALF	0x100
55969b4b095SJeff Kirsher 
56069b4b095SJeff Kirsher #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
56169b4b095SJeff Kirsher #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
56269b4b095SJeff Kirsher #define NV_PAUSEFRAME_RX_ENABLE  0x0004
56369b4b095SJeff Kirsher #define NV_PAUSEFRAME_TX_ENABLE  0x0008
56469b4b095SJeff Kirsher #define NV_PAUSEFRAME_RX_REQ     0x0010
56569b4b095SJeff Kirsher #define NV_PAUSEFRAME_TX_REQ     0x0020
56669b4b095SJeff Kirsher #define NV_PAUSEFRAME_AUTONEG    0x0040
56769b4b095SJeff Kirsher 
56869b4b095SJeff Kirsher /* MSI/MSI-X defines */
56969b4b095SJeff Kirsher #define NV_MSI_X_MAX_VECTORS  8
57069b4b095SJeff Kirsher #define NV_MSI_X_VECTORS_MASK 0x000f
57169b4b095SJeff Kirsher #define NV_MSI_CAPABLE        0x0010
57269b4b095SJeff Kirsher #define NV_MSI_X_CAPABLE      0x0020
57369b4b095SJeff Kirsher #define NV_MSI_ENABLED        0x0040
57469b4b095SJeff Kirsher #define NV_MSI_X_ENABLED      0x0080
57569b4b095SJeff Kirsher 
57669b4b095SJeff Kirsher #define NV_MSI_X_VECTOR_ALL   0x0
57769b4b095SJeff Kirsher #define NV_MSI_X_VECTOR_RX    0x0
57869b4b095SJeff Kirsher #define NV_MSI_X_VECTOR_TX    0x1
57969b4b095SJeff Kirsher #define NV_MSI_X_VECTOR_OTHER 0x2
58069b4b095SJeff Kirsher 
58169b4b095SJeff Kirsher #define NV_MSI_PRIV_OFFSET 0x68
58269b4b095SJeff Kirsher #define NV_MSI_PRIV_VALUE  0xffffffff
58369b4b095SJeff Kirsher 
58469b4b095SJeff Kirsher #define NV_RESTART_TX         0x1
58569b4b095SJeff Kirsher #define NV_RESTART_RX         0x2
58669b4b095SJeff Kirsher 
58769b4b095SJeff Kirsher #define NV_TX_LIMIT_COUNT     16
58869b4b095SJeff Kirsher 
58969b4b095SJeff Kirsher #define NV_DYNAMIC_THRESHOLD        4
59069b4b095SJeff Kirsher #define NV_DYNAMIC_MAX_QUIET_COUNT  2048
59169b4b095SJeff Kirsher 
59269b4b095SJeff Kirsher /* statistics */
59369b4b095SJeff Kirsher struct nv_ethtool_str {
59469b4b095SJeff Kirsher 	char name[ETH_GSTRING_LEN];
59569b4b095SJeff Kirsher };
59669b4b095SJeff Kirsher 
59769b4b095SJeff Kirsher static const struct nv_ethtool_str nv_estats_str[] = {
598674aee3bSdavid decotigny 	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
59969b4b095SJeff Kirsher 	{ "tx_zero_rexmt" },
60069b4b095SJeff Kirsher 	{ "tx_one_rexmt" },
60169b4b095SJeff Kirsher 	{ "tx_many_rexmt" },
60269b4b095SJeff Kirsher 	{ "tx_late_collision" },
60369b4b095SJeff Kirsher 	{ "tx_fifo_errors" },
60469b4b095SJeff Kirsher 	{ "tx_carrier_errors" },
60569b4b095SJeff Kirsher 	{ "tx_excess_deferral" },
60669b4b095SJeff Kirsher 	{ "tx_retry_error" },
60769b4b095SJeff Kirsher 	{ "rx_frame_error" },
60869b4b095SJeff Kirsher 	{ "rx_extra_byte" },
60969b4b095SJeff Kirsher 	{ "rx_late_collision" },
61069b4b095SJeff Kirsher 	{ "rx_runt" },
61169b4b095SJeff Kirsher 	{ "rx_frame_too_long" },
61269b4b095SJeff Kirsher 	{ "rx_over_errors" },
61369b4b095SJeff Kirsher 	{ "rx_crc_errors" },
61469b4b095SJeff Kirsher 	{ "rx_frame_align_error" },
61569b4b095SJeff Kirsher 	{ "rx_length_error" },
61669b4b095SJeff Kirsher 	{ "rx_unicast" },
61769b4b095SJeff Kirsher 	{ "rx_multicast" },
61869b4b095SJeff Kirsher 	{ "rx_broadcast" },
61969b4b095SJeff Kirsher 	{ "rx_packets" },
62069b4b095SJeff Kirsher 	{ "rx_errors_total" },
62169b4b095SJeff Kirsher 	{ "tx_errors_total" },
62269b4b095SJeff Kirsher 
62369b4b095SJeff Kirsher 	/* version 2 stats */
62469b4b095SJeff Kirsher 	{ "tx_deferral" },
62569b4b095SJeff Kirsher 	{ "tx_packets" },
626674aee3bSdavid decotigny 	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
62769b4b095SJeff Kirsher 	{ "tx_pause" },
62869b4b095SJeff Kirsher 	{ "rx_pause" },
62969b4b095SJeff Kirsher 	{ "rx_drop_frame" },
63069b4b095SJeff Kirsher 
63169b4b095SJeff Kirsher 	/* version 3 stats */
63269b4b095SJeff Kirsher 	{ "tx_unicast" },
63369b4b095SJeff Kirsher 	{ "tx_multicast" },
63469b4b095SJeff Kirsher 	{ "tx_broadcast" }
63569b4b095SJeff Kirsher };
63669b4b095SJeff Kirsher 
63769b4b095SJeff Kirsher struct nv_ethtool_stats {
638674aee3bSdavid decotigny 	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
63969b4b095SJeff Kirsher 	u64 tx_zero_rexmt;
64069b4b095SJeff Kirsher 	u64 tx_one_rexmt;
64169b4b095SJeff Kirsher 	u64 tx_many_rexmt;
64269b4b095SJeff Kirsher 	u64 tx_late_collision;
64369b4b095SJeff Kirsher 	u64 tx_fifo_errors;
64469b4b095SJeff Kirsher 	u64 tx_carrier_errors;
64569b4b095SJeff Kirsher 	u64 tx_excess_deferral;
64669b4b095SJeff Kirsher 	u64 tx_retry_error;
64769b4b095SJeff Kirsher 	u64 rx_frame_error;
64869b4b095SJeff Kirsher 	u64 rx_extra_byte;
64969b4b095SJeff Kirsher 	u64 rx_late_collision;
65069b4b095SJeff Kirsher 	u64 rx_runt;
65169b4b095SJeff Kirsher 	u64 rx_frame_too_long;
65269b4b095SJeff Kirsher 	u64 rx_over_errors;
65369b4b095SJeff Kirsher 	u64 rx_crc_errors;
65469b4b095SJeff Kirsher 	u64 rx_frame_align_error;
65569b4b095SJeff Kirsher 	u64 rx_length_error;
65669b4b095SJeff Kirsher 	u64 rx_unicast;
65769b4b095SJeff Kirsher 	u64 rx_multicast;
65869b4b095SJeff Kirsher 	u64 rx_broadcast;
659674aee3bSdavid decotigny 	u64 rx_packets; /* should be ifconfig->rx_packets */
66069b4b095SJeff Kirsher 	u64 rx_errors_total;
66169b4b095SJeff Kirsher 	u64 tx_errors_total;
66269b4b095SJeff Kirsher 
66369b4b095SJeff Kirsher 	/* version 2 stats */
66469b4b095SJeff Kirsher 	u64 tx_deferral;
665674aee3bSdavid decotigny 	u64 tx_packets; /* should be ifconfig->tx_packets */
666674aee3bSdavid decotigny 	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
66769b4b095SJeff Kirsher 	u64 tx_pause;
66869b4b095SJeff Kirsher 	u64 rx_pause;
66969b4b095SJeff Kirsher 	u64 rx_drop_frame;
67069b4b095SJeff Kirsher 
67169b4b095SJeff Kirsher 	/* version 3 stats */
67269b4b095SJeff Kirsher 	u64 tx_unicast;
67369b4b095SJeff Kirsher 	u64 tx_multicast;
67469b4b095SJeff Kirsher 	u64 tx_broadcast;
67569b4b095SJeff Kirsher };
67669b4b095SJeff Kirsher 
67769b4b095SJeff Kirsher #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
67869b4b095SJeff Kirsher #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
67969b4b095SJeff Kirsher #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
68069b4b095SJeff Kirsher 
68169b4b095SJeff Kirsher /* diagnostics */
68269b4b095SJeff Kirsher #define NV_TEST_COUNT_BASE 3
68369b4b095SJeff Kirsher #define NV_TEST_COUNT_EXTENDED 4
68469b4b095SJeff Kirsher 
68569b4b095SJeff Kirsher static const struct nv_ethtool_str nv_etests_str[] = {
68669b4b095SJeff Kirsher 	{ "link      (online/offline)" },
68769b4b095SJeff Kirsher 	{ "register  (offline)       " },
68869b4b095SJeff Kirsher 	{ "interrupt (offline)       " },
68969b4b095SJeff Kirsher 	{ "loopback  (offline)       " }
69069b4b095SJeff Kirsher };
69169b4b095SJeff Kirsher 
69269b4b095SJeff Kirsher struct register_test {
69369b4b095SJeff Kirsher 	__u32 reg;
69469b4b095SJeff Kirsher 	__u32 mask;
69569b4b095SJeff Kirsher };
69669b4b095SJeff Kirsher 
69769b4b095SJeff Kirsher static const struct register_test nv_registers_test[] = {
69869b4b095SJeff Kirsher 	{ NvRegUnknownSetupReg6, 0x01 },
69969b4b095SJeff Kirsher 	{ NvRegMisc1, 0x03c },
70069b4b095SJeff Kirsher 	{ NvRegOffloadConfig, 0x03ff },
70169b4b095SJeff Kirsher 	{ NvRegMulticastAddrA, 0xffffffff },
70269b4b095SJeff Kirsher 	{ NvRegTxWatermark, 0x0ff },
70369b4b095SJeff Kirsher 	{ NvRegWakeUpFlags, 0x07777 },
70469b4b095SJeff Kirsher 	{ 0, 0 }
70569b4b095SJeff Kirsher };
70669b4b095SJeff Kirsher 
70769b4b095SJeff Kirsher struct nv_skb_map {
70869b4b095SJeff Kirsher 	struct sk_buff *skb;
70969b4b095SJeff Kirsher 	dma_addr_t dma;
71069b4b095SJeff Kirsher 	unsigned int dma_len:31;
71169b4b095SJeff Kirsher 	unsigned int dma_single:1;
71269b4b095SJeff Kirsher 	struct ring_desc_ex *first_tx_desc;
71369b4b095SJeff Kirsher 	struct nv_skb_map *next_tx_ctx;
71469b4b095SJeff Kirsher };
71569b4b095SJeff Kirsher 
716f4b633b9SZhu Yanjun struct nv_txrx_stats {
717f4b633b9SZhu Yanjun 	u64 stat_rx_packets;
718f4b633b9SZhu Yanjun 	u64 stat_rx_bytes; /* not always available in HW */
719f4b633b9SZhu Yanjun 	u64 stat_rx_missed_errors;
720f4b633b9SZhu Yanjun 	u64 stat_rx_dropped;
721f4b633b9SZhu Yanjun 	u64 stat_tx_packets; /* not always available in HW */
722f4b633b9SZhu Yanjun 	u64 stat_tx_bytes;
723f4b633b9SZhu Yanjun 	u64 stat_tx_dropped;
724f4b633b9SZhu Yanjun };
725f4b633b9SZhu Yanjun 
726f4b633b9SZhu Yanjun #define nv_txrx_stats_inc(member) \
727f4b633b9SZhu Yanjun 		__this_cpu_inc(np->txrx_stats->member)
728f4b633b9SZhu Yanjun #define nv_txrx_stats_add(member, count) \
729f4b633b9SZhu Yanjun 		__this_cpu_add(np->txrx_stats->member, (count))
730f4b633b9SZhu Yanjun 
73169b4b095SJeff Kirsher /*
73269b4b095SJeff Kirsher  * SMP locking:
73369b4b095SJeff Kirsher  * All hardware access under netdev_priv(dev)->lock, except the performance
73469b4b095SJeff Kirsher  * critical parts:
73569b4b095SJeff Kirsher  * - rx is (pseudo-) lockless: it relies on the single-threading provided
73669b4b095SJeff Kirsher  *	by the arch code for interrupts.
73769b4b095SJeff Kirsher  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
73869b4b095SJeff Kirsher  *	needs netdev_priv(dev)->lock :-(
73969b4b095SJeff Kirsher  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
740f5d827aeSdavid decotigny  *
741f5d827aeSdavid decotigny  * Hardware stats updates are protected by hwstats_lock:
742f5d827aeSdavid decotigny  * - updated by nv_do_stats_poll (timer). This is meant to avoid
743f5d827aeSdavid decotigny  *   integer wraparound in the NIC stats registers, at low frequency
744f5d827aeSdavid decotigny  *   (0.1 Hz)
745f5d827aeSdavid decotigny  * - updated by nv_get_ethtool_stats + nv_get_stats64
746f5d827aeSdavid decotigny  *
747f5d827aeSdavid decotigny  * Software stats are accessed only through 64b synchronization points
748f5d827aeSdavid decotigny  * and are not subject to other synchronization techniques (single
749f5d827aeSdavid decotigny  * update thread on the TX or RX paths).
75069b4b095SJeff Kirsher  */
75169b4b095SJeff Kirsher 
75269b4b095SJeff Kirsher /* in dev: base, irq */
75369b4b095SJeff Kirsher struct fe_priv {
75469b4b095SJeff Kirsher 	spinlock_t lock;
75569b4b095SJeff Kirsher 
75669b4b095SJeff Kirsher 	struct net_device *dev;
75769b4b095SJeff Kirsher 	struct napi_struct napi;
75869b4b095SJeff Kirsher 
759f5d827aeSdavid decotigny 	/* hardware stats are updated in syscall and timer */
760f5d827aeSdavid decotigny 	spinlock_t hwstats_lock;
76169b4b095SJeff Kirsher 	struct nv_ethtool_stats estats;
762f5d827aeSdavid decotigny 
76369b4b095SJeff Kirsher 	int in_shutdown;
76469b4b095SJeff Kirsher 	u32 linkspeed;
76569b4b095SJeff Kirsher 	int duplex;
76669b4b095SJeff Kirsher 	int autoneg;
76769b4b095SJeff Kirsher 	int fixed_mode;
76869b4b095SJeff Kirsher 	int phyaddr;
76969b4b095SJeff Kirsher 	int wolenabled;
77069b4b095SJeff Kirsher 	unsigned int phy_oui;
77169b4b095SJeff Kirsher 	unsigned int phy_model;
77269b4b095SJeff Kirsher 	unsigned int phy_rev;
77369b4b095SJeff Kirsher 	u16 gigabit;
77469b4b095SJeff Kirsher 	int intr_test;
77569b4b095SJeff Kirsher 	int recover_error;
77669b4b095SJeff Kirsher 	int quiet_count;
77769b4b095SJeff Kirsher 
77869b4b095SJeff Kirsher 	/* General data: RO fields */
77969b4b095SJeff Kirsher 	dma_addr_t ring_addr;
78069b4b095SJeff Kirsher 	struct pci_dev *pci_dev;
78169b4b095SJeff Kirsher 	u32 orig_mac[2];
78269b4b095SJeff Kirsher 	u32 events;
78369b4b095SJeff Kirsher 	u32 irqmask;
78469b4b095SJeff Kirsher 	u32 desc_ver;
78569b4b095SJeff Kirsher 	u32 txrxctl_bits;
78669b4b095SJeff Kirsher 	u32 vlanctl_bits;
78769b4b095SJeff Kirsher 	u32 driver_data;
78869b4b095SJeff Kirsher 	u32 device_id;
78969b4b095SJeff Kirsher 	u32 register_size;
79069b4b095SJeff Kirsher 	u32 mac_in_use;
79169b4b095SJeff Kirsher 	int mgmt_version;
79269b4b095SJeff Kirsher 	int mgmt_sema;
79369b4b095SJeff Kirsher 
79469b4b095SJeff Kirsher 	void __iomem *base;
79569b4b095SJeff Kirsher 
79669b4b095SJeff Kirsher 	/* rx specific fields.
79769b4b095SJeff Kirsher 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
79869b4b095SJeff Kirsher 	 */
79964f26abbSZhu Yanjun 	union ring_type get_rx, put_rx, last_rx;
80069b4b095SJeff Kirsher 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
801a9124ec4SZhu Yanjun 	struct nv_skb_map *last_rx_ctx;
80269b4b095SJeff Kirsher 	struct nv_skb_map *rx_skb;
80369b4b095SJeff Kirsher 
80469b4b095SJeff Kirsher 	union ring_type rx_ring;
80569b4b095SJeff Kirsher 	unsigned int rx_buf_sz;
80669b4b095SJeff Kirsher 	unsigned int pkt_limit;
80769b4b095SJeff Kirsher 	struct timer_list oom_kick;
80869b4b095SJeff Kirsher 	struct timer_list nic_poll;
80969b4b095SJeff Kirsher 	struct timer_list stats_poll;
81069b4b095SJeff Kirsher 	u32 nic_poll_irq;
81169b4b095SJeff Kirsher 	int rx_ring_size;
81269b4b095SJeff Kirsher 
813f5d827aeSdavid decotigny 	/* RX software stats */
814f5d827aeSdavid decotigny 	struct u64_stats_sync swstats_rx_syncp;
815f4b633b9SZhu Yanjun 	struct nv_txrx_stats __percpu *txrx_stats;
816f5d827aeSdavid decotigny 
81769b4b095SJeff Kirsher 	/* media detection workaround.
81869b4b095SJeff Kirsher 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
81969b4b095SJeff Kirsher 	 */
82069b4b095SJeff Kirsher 	int need_linktimer;
82169b4b095SJeff Kirsher 	unsigned long link_timeout;
82269b4b095SJeff Kirsher 	/*
82369b4b095SJeff Kirsher 	 * tx specific fields.
82469b4b095SJeff Kirsher 	 */
825c360f2b5SZhu Yanjun 	union ring_type get_tx, put_tx, last_tx;
82669b4b095SJeff Kirsher 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
82741b0cd36SZhu Yanjun 	struct nv_skb_map *last_tx_ctx;
82869b4b095SJeff Kirsher 	struct nv_skb_map *tx_skb;
82969b4b095SJeff Kirsher 
83069b4b095SJeff Kirsher 	union ring_type tx_ring;
83169b4b095SJeff Kirsher 	u32 tx_flags;
83269b4b095SJeff Kirsher 	int tx_ring_size;
83369b4b095SJeff Kirsher 	int tx_limit;
83469b4b095SJeff Kirsher 	u32 tx_pkts_in_progress;
83569b4b095SJeff Kirsher 	struct nv_skb_map *tx_change_owner;
83669b4b095SJeff Kirsher 	struct nv_skb_map *tx_end_flip;
83769b4b095SJeff Kirsher 	int tx_stop;
83869b4b095SJeff Kirsher 
839f5d827aeSdavid decotigny 	/* TX software stats */
840f5d827aeSdavid decotigny 	struct u64_stats_sync swstats_tx_syncp;
841f5d827aeSdavid decotigny 
84269b4b095SJeff Kirsher 	/* msi/msi-x fields */
84369b4b095SJeff Kirsher 	u32 msi_flags;
84469b4b095SJeff Kirsher 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
84569b4b095SJeff Kirsher 
84669b4b095SJeff Kirsher 	/* flow control */
84769b4b095SJeff Kirsher 	u32 pause_flags;
84869b4b095SJeff Kirsher 
84969b4b095SJeff Kirsher 	/* power saved state */
85069b4b095SJeff Kirsher 	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
85169b4b095SJeff Kirsher 
85269b4b095SJeff Kirsher 	/* for different msi-x irq type */
85369b4b095SJeff Kirsher 	char name_rx[IFNAMSIZ + 3];       /* -rx    */
85469b4b095SJeff Kirsher 	char name_tx[IFNAMSIZ + 3];       /* -tx    */
85569b4b095SJeff Kirsher 	char name_other[IFNAMSIZ + 6];    /* -other */
85669b4b095SJeff Kirsher };
85769b4b095SJeff Kirsher 
85869b4b095SJeff Kirsher /*
85969b4b095SJeff Kirsher  * Maximum number of loops until we assume that a bit in the irq mask
86069b4b095SJeff Kirsher  * is stuck. Overridable with module param.
86169b4b095SJeff Kirsher  */
86269b4b095SJeff Kirsher static int max_interrupt_work = 4;
86369b4b095SJeff Kirsher 
86469b4b095SJeff Kirsher /*
86569b4b095SJeff Kirsher  * Optimization can be either throuput mode or cpu mode
86669b4b095SJeff Kirsher  *
86769b4b095SJeff Kirsher  * Throughput Mode: Every tx and rx packet will generate an interrupt.
86869b4b095SJeff Kirsher  * CPU Mode: Interrupts are controlled by a timer.
86969b4b095SJeff Kirsher  */
87069b4b095SJeff Kirsher enum {
87169b4b095SJeff Kirsher 	NV_OPTIMIZATION_MODE_THROUGHPUT,
87269b4b095SJeff Kirsher 	NV_OPTIMIZATION_MODE_CPU,
87369b4b095SJeff Kirsher 	NV_OPTIMIZATION_MODE_DYNAMIC
87469b4b095SJeff Kirsher };
87569b4b095SJeff Kirsher static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
87669b4b095SJeff Kirsher 
87769b4b095SJeff Kirsher /*
87869b4b095SJeff Kirsher  * Poll interval for timer irq
87969b4b095SJeff Kirsher  *
88069b4b095SJeff Kirsher  * This interval determines how frequent an interrupt is generated.
88169b4b095SJeff Kirsher  * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
88269b4b095SJeff Kirsher  * Min = 0, and Max = 65535
88369b4b095SJeff Kirsher  */
88469b4b095SJeff Kirsher static int poll_interval = -1;
88569b4b095SJeff Kirsher 
88669b4b095SJeff Kirsher /*
88769b4b095SJeff Kirsher  * MSI interrupts
88869b4b095SJeff Kirsher  */
88969b4b095SJeff Kirsher enum {
89069b4b095SJeff Kirsher 	NV_MSI_INT_DISABLED,
89169b4b095SJeff Kirsher 	NV_MSI_INT_ENABLED
89269b4b095SJeff Kirsher };
89369b4b095SJeff Kirsher static int msi = NV_MSI_INT_ENABLED;
89469b4b095SJeff Kirsher 
89569b4b095SJeff Kirsher /*
89669b4b095SJeff Kirsher  * MSIX interrupts
89769b4b095SJeff Kirsher  */
89869b4b095SJeff Kirsher enum {
89969b4b095SJeff Kirsher 	NV_MSIX_INT_DISABLED,
90069b4b095SJeff Kirsher 	NV_MSIX_INT_ENABLED
90169b4b095SJeff Kirsher };
90269b4b095SJeff Kirsher static int msix = NV_MSIX_INT_ENABLED;
90369b4b095SJeff Kirsher 
90469b4b095SJeff Kirsher /*
90569b4b095SJeff Kirsher  * DMA 64bit
90669b4b095SJeff Kirsher  */
90769b4b095SJeff Kirsher enum {
90869b4b095SJeff Kirsher 	NV_DMA_64BIT_DISABLED,
90969b4b095SJeff Kirsher 	NV_DMA_64BIT_ENABLED
91069b4b095SJeff Kirsher };
91169b4b095SJeff Kirsher static int dma_64bit = NV_DMA_64BIT_ENABLED;
91269b4b095SJeff Kirsher 
91369b4b095SJeff Kirsher /*
9141ec4f2d3SSameer Nanda  * Debug output control for tx_timeout
9151ec4f2d3SSameer Nanda  */
9161ec4f2d3SSameer Nanda static bool debug_tx_timeout = false;
9171ec4f2d3SSameer Nanda 
9181ec4f2d3SSameer Nanda /*
91969b4b095SJeff Kirsher  * Crossover Detection
92069b4b095SJeff Kirsher  * Realtek 8201 phy + some OEM boards do not work properly.
92169b4b095SJeff Kirsher  */
92269b4b095SJeff Kirsher enum {
92369b4b095SJeff Kirsher 	NV_CROSSOVER_DETECTION_DISABLED,
92469b4b095SJeff Kirsher 	NV_CROSSOVER_DETECTION_ENABLED
92569b4b095SJeff Kirsher };
92669b4b095SJeff Kirsher static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
92769b4b095SJeff Kirsher 
92869b4b095SJeff Kirsher /*
92969b4b095SJeff Kirsher  * Power down phy when interface is down (persists through reboot;
93069b4b095SJeff Kirsher  * older Linux and other OSes may not power it up again)
93169b4b095SJeff Kirsher  */
93269b4b095SJeff Kirsher static int phy_power_down;
93369b4b095SJeff Kirsher 
get_nvpriv(struct net_device * dev)93469b4b095SJeff Kirsher static inline struct fe_priv *get_nvpriv(struct net_device *dev)
93569b4b095SJeff Kirsher {
93669b4b095SJeff Kirsher 	return netdev_priv(dev);
93769b4b095SJeff Kirsher }
93869b4b095SJeff Kirsher 
get_hwbase(struct net_device * dev)93969b4b095SJeff Kirsher static inline u8 __iomem *get_hwbase(struct net_device *dev)
94069b4b095SJeff Kirsher {
94169b4b095SJeff Kirsher 	return ((struct fe_priv *)netdev_priv(dev))->base;
94269b4b095SJeff Kirsher }
94369b4b095SJeff Kirsher 
pci_push(u8 __iomem * base)94469b4b095SJeff Kirsher static inline void pci_push(u8 __iomem *base)
94569b4b095SJeff Kirsher {
94669b4b095SJeff Kirsher 	/* force out pending posted writes */
94769b4b095SJeff Kirsher 	readl(base);
94869b4b095SJeff Kirsher }
94969b4b095SJeff Kirsher 
nv_descr_getlength(struct ring_desc * prd,u32 v)95069b4b095SJeff Kirsher static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
95169b4b095SJeff Kirsher {
95269b4b095SJeff Kirsher 	return le32_to_cpu(prd->flaglen)
95369b4b095SJeff Kirsher 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
95469b4b095SJeff Kirsher }
95569b4b095SJeff Kirsher 
nv_descr_getlength_ex(struct ring_desc_ex * prd,u32 v)95669b4b095SJeff Kirsher static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
95769b4b095SJeff Kirsher {
95869b4b095SJeff Kirsher 	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
95969b4b095SJeff Kirsher }
96069b4b095SJeff Kirsher 
nv_optimized(struct fe_priv * np)96169b4b095SJeff Kirsher static bool nv_optimized(struct fe_priv *np)
96269b4b095SJeff Kirsher {
96369b4b095SJeff Kirsher 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
96469b4b095SJeff Kirsher 		return false;
96569b4b095SJeff Kirsher 	return true;
96669b4b095SJeff Kirsher }
96769b4b095SJeff Kirsher 
reg_delay(struct net_device * dev,int offset,u32 mask,u32 target,int delay,int delaymax)96869b4b095SJeff Kirsher static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
96969b4b095SJeff Kirsher 		     int delay, int delaymax)
97069b4b095SJeff Kirsher {
97169b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
97269b4b095SJeff Kirsher 
97369b4b095SJeff Kirsher 	pci_push(base);
97469b4b095SJeff Kirsher 	do {
97569b4b095SJeff Kirsher 		udelay(delay);
97669b4b095SJeff Kirsher 		delaymax -= delay;
97769b4b095SJeff Kirsher 		if (delaymax < 0)
97869b4b095SJeff Kirsher 			return 1;
97969b4b095SJeff Kirsher 	} while ((readl(base + offset) & mask) != target);
98069b4b095SJeff Kirsher 	return 0;
98169b4b095SJeff Kirsher }
98269b4b095SJeff Kirsher 
98369b4b095SJeff Kirsher #define NV_SETUP_RX_RING 0x01
98469b4b095SJeff Kirsher #define NV_SETUP_TX_RING 0x02
98569b4b095SJeff Kirsher 
dma_low(dma_addr_t addr)98669b4b095SJeff Kirsher static inline u32 dma_low(dma_addr_t addr)
98769b4b095SJeff Kirsher {
98869b4b095SJeff Kirsher 	return addr;
98969b4b095SJeff Kirsher }
99069b4b095SJeff Kirsher 
dma_high(dma_addr_t addr)99169b4b095SJeff Kirsher static inline u32 dma_high(dma_addr_t addr)
99269b4b095SJeff Kirsher {
99369b4b095SJeff Kirsher 	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
99469b4b095SJeff Kirsher }
99569b4b095SJeff Kirsher 
setup_hw_rings(struct net_device * dev,int rxtx_flags)99669b4b095SJeff Kirsher static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
99769b4b095SJeff Kirsher {
99869b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
99969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
100069b4b095SJeff Kirsher 
100169b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
100269b4b095SJeff Kirsher 		if (rxtx_flags & NV_SETUP_RX_RING)
100369b4b095SJeff Kirsher 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
100469b4b095SJeff Kirsher 		if (rxtx_flags & NV_SETUP_TX_RING)
100569b4b095SJeff Kirsher 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
100669b4b095SJeff Kirsher 	} else {
100769b4b095SJeff Kirsher 		if (rxtx_flags & NV_SETUP_RX_RING) {
100869b4b095SJeff Kirsher 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
100969b4b095SJeff Kirsher 			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
101069b4b095SJeff Kirsher 		}
101169b4b095SJeff Kirsher 		if (rxtx_flags & NV_SETUP_TX_RING) {
101269b4b095SJeff Kirsher 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
101369b4b095SJeff Kirsher 			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
101469b4b095SJeff Kirsher 		}
101569b4b095SJeff Kirsher 	}
101669b4b095SJeff Kirsher }
101769b4b095SJeff Kirsher 
free_rings(struct net_device * dev)101869b4b095SJeff Kirsher static void free_rings(struct net_device *dev)
101969b4b095SJeff Kirsher {
102069b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
102169b4b095SJeff Kirsher 
102269b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
102369b4b095SJeff Kirsher 		if (np->rx_ring.orig)
1024e8992e40SZhu Yanjun 			dma_free_coherent(&np->pci_dev->dev,
1025e8992e40SZhu Yanjun 					  sizeof(struct ring_desc) *
1026e8992e40SZhu Yanjun 					  (np->rx_ring_size +
1027e8992e40SZhu Yanjun 					  np->tx_ring_size),
102869b4b095SJeff Kirsher 					  np->rx_ring.orig, np->ring_addr);
102969b4b095SJeff Kirsher 	} else {
103069b4b095SJeff Kirsher 		if (np->rx_ring.ex)
1031e8992e40SZhu Yanjun 			dma_free_coherent(&np->pci_dev->dev,
1032e8992e40SZhu Yanjun 					  sizeof(struct ring_desc_ex) *
1033e8992e40SZhu Yanjun 					  (np->rx_ring_size +
1034e8992e40SZhu Yanjun 					  np->tx_ring_size),
103569b4b095SJeff Kirsher 					  np->rx_ring.ex, np->ring_addr);
103669b4b095SJeff Kirsher 	}
103769b4b095SJeff Kirsher 	kfree(np->rx_skb);
103869b4b095SJeff Kirsher 	kfree(np->tx_skb);
103969b4b095SJeff Kirsher }
104069b4b095SJeff Kirsher 
using_multi_irqs(struct net_device * dev)104169b4b095SJeff Kirsher static int using_multi_irqs(struct net_device *dev)
104269b4b095SJeff Kirsher {
104369b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
104469b4b095SJeff Kirsher 
104569b4b095SJeff Kirsher 	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
10464c4ac831Skernel test robot 	    ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
104769b4b095SJeff Kirsher 		return 0;
104869b4b095SJeff Kirsher 	else
104969b4b095SJeff Kirsher 		return 1;
105069b4b095SJeff Kirsher }
105169b4b095SJeff Kirsher 
nv_txrx_gate(struct net_device * dev,bool gate)105269b4b095SJeff Kirsher static void nv_txrx_gate(struct net_device *dev, bool gate)
105369b4b095SJeff Kirsher {
105469b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
105569b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
105669b4b095SJeff Kirsher 	u32 powerstate;
105769b4b095SJeff Kirsher 
105869b4b095SJeff Kirsher 	if (!np->mac_in_use &&
105969b4b095SJeff Kirsher 	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
106069b4b095SJeff Kirsher 		powerstate = readl(base + NvRegPowerState2);
106169b4b095SJeff Kirsher 		if (gate)
106269b4b095SJeff Kirsher 			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
106369b4b095SJeff Kirsher 		else
106469b4b095SJeff Kirsher 			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
106569b4b095SJeff Kirsher 		writel(powerstate, base + NvRegPowerState2);
106669b4b095SJeff Kirsher 	}
106769b4b095SJeff Kirsher }
106869b4b095SJeff Kirsher 
nv_enable_irq(struct net_device * dev)106969b4b095SJeff Kirsher static void nv_enable_irq(struct net_device *dev)
107069b4b095SJeff Kirsher {
107169b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
107269b4b095SJeff Kirsher 
107369b4b095SJeff Kirsher 	if (!using_multi_irqs(dev)) {
107469b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_X_ENABLED)
107569b4b095SJeff Kirsher 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
107669b4b095SJeff Kirsher 		else
107769b4b095SJeff Kirsher 			enable_irq(np->pci_dev->irq);
107869b4b095SJeff Kirsher 	} else {
107969b4b095SJeff Kirsher 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
108069b4b095SJeff Kirsher 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
108169b4b095SJeff Kirsher 		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
108269b4b095SJeff Kirsher 	}
108369b4b095SJeff Kirsher }
108469b4b095SJeff Kirsher 
nv_disable_irq(struct net_device * dev)108569b4b095SJeff Kirsher static void nv_disable_irq(struct net_device *dev)
108669b4b095SJeff Kirsher {
108769b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
108869b4b095SJeff Kirsher 
108969b4b095SJeff Kirsher 	if (!using_multi_irqs(dev)) {
109069b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_X_ENABLED)
109169b4b095SJeff Kirsher 			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
109269b4b095SJeff Kirsher 		else
109369b4b095SJeff Kirsher 			disable_irq(np->pci_dev->irq);
109469b4b095SJeff Kirsher 	} else {
109569b4b095SJeff Kirsher 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
109669b4b095SJeff Kirsher 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
109769b4b095SJeff Kirsher 		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
109869b4b095SJeff Kirsher 	}
109969b4b095SJeff Kirsher }
110069b4b095SJeff Kirsher 
110169b4b095SJeff Kirsher /* In MSIX mode, a write to irqmask behaves as XOR */
nv_enable_hw_interrupts(struct net_device * dev,u32 mask)110269b4b095SJeff Kirsher static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
110369b4b095SJeff Kirsher {
110469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
110569b4b095SJeff Kirsher 
110669b4b095SJeff Kirsher 	writel(mask, base + NvRegIrqMask);
110769b4b095SJeff Kirsher }
110869b4b095SJeff Kirsher 
nv_disable_hw_interrupts(struct net_device * dev,u32 mask)110969b4b095SJeff Kirsher static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
111069b4b095SJeff Kirsher {
111169b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
111269b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
111369b4b095SJeff Kirsher 
111469b4b095SJeff Kirsher 	if (np->msi_flags & NV_MSI_X_ENABLED) {
111569b4b095SJeff Kirsher 		writel(mask, base + NvRegIrqMask);
111669b4b095SJeff Kirsher 	} else {
111769b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_ENABLED)
111869b4b095SJeff Kirsher 			writel(0, base + NvRegMSIIrqMask);
111969b4b095SJeff Kirsher 		writel(0, base + NvRegIrqMask);
112069b4b095SJeff Kirsher 	}
112169b4b095SJeff Kirsher }
112269b4b095SJeff Kirsher 
nv_napi_enable(struct net_device * dev)112369b4b095SJeff Kirsher static void nv_napi_enable(struct net_device *dev)
112469b4b095SJeff Kirsher {
112569b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
112669b4b095SJeff Kirsher 
112769b4b095SJeff Kirsher 	napi_enable(&np->napi);
112869b4b095SJeff Kirsher }
112969b4b095SJeff Kirsher 
nv_napi_disable(struct net_device * dev)113069b4b095SJeff Kirsher static void nv_napi_disable(struct net_device *dev)
113169b4b095SJeff Kirsher {
113269b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
113369b4b095SJeff Kirsher 
113469b4b095SJeff Kirsher 	napi_disable(&np->napi);
113569b4b095SJeff Kirsher }
113669b4b095SJeff Kirsher 
113769b4b095SJeff Kirsher #define MII_READ	(-1)
113869b4b095SJeff Kirsher /* mii_rw: read/write a register on the PHY.
113969b4b095SJeff Kirsher  *
114069b4b095SJeff Kirsher  * Caller must guarantee serialization
114169b4b095SJeff Kirsher  */
mii_rw(struct net_device * dev,int addr,int miireg,int value)114269b4b095SJeff Kirsher static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
114369b4b095SJeff Kirsher {
114469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
114569b4b095SJeff Kirsher 	u32 reg;
114669b4b095SJeff Kirsher 	int retval;
114769b4b095SJeff Kirsher 
114869b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
114969b4b095SJeff Kirsher 
115069b4b095SJeff Kirsher 	reg = readl(base + NvRegMIIControl);
115169b4b095SJeff Kirsher 	if (reg & NVREG_MIICTL_INUSE) {
115269b4b095SJeff Kirsher 		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
115369b4b095SJeff Kirsher 		udelay(NV_MIIBUSY_DELAY);
115469b4b095SJeff Kirsher 	}
115569b4b095SJeff Kirsher 
115669b4b095SJeff Kirsher 	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
115769b4b095SJeff Kirsher 	if (value != MII_READ) {
115869b4b095SJeff Kirsher 		writel(value, base + NvRegMIIData);
115969b4b095SJeff Kirsher 		reg |= NVREG_MIICTL_WRITE;
116069b4b095SJeff Kirsher 	}
116169b4b095SJeff Kirsher 	writel(reg, base + NvRegMIIControl);
116269b4b095SJeff Kirsher 
116369b4b095SJeff Kirsher 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
116469b4b095SJeff Kirsher 			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
116569b4b095SJeff Kirsher 		retval = -1;
116669b4b095SJeff Kirsher 	} else if (value != MII_READ) {
116769b4b095SJeff Kirsher 		/* it was a write operation - fewer failures are detectable */
116869b4b095SJeff Kirsher 		retval = 0;
116969b4b095SJeff Kirsher 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
117069b4b095SJeff Kirsher 		retval = -1;
117169b4b095SJeff Kirsher 	} else {
117269b4b095SJeff Kirsher 		retval = readl(base + NvRegMIIData);
117369b4b095SJeff Kirsher 	}
117469b4b095SJeff Kirsher 
117569b4b095SJeff Kirsher 	return retval;
117669b4b095SJeff Kirsher }
117769b4b095SJeff Kirsher 
phy_reset(struct net_device * dev,u32 bmcr_setup)117869b4b095SJeff Kirsher static int phy_reset(struct net_device *dev, u32 bmcr_setup)
117969b4b095SJeff Kirsher {
118069b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
118169b4b095SJeff Kirsher 	u32 miicontrol;
118269b4b095SJeff Kirsher 	unsigned int tries = 0;
118369b4b095SJeff Kirsher 
118469b4b095SJeff Kirsher 	miicontrol = BMCR_RESET | bmcr_setup;
118569b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
118669b4b095SJeff Kirsher 		return -1;
118769b4b095SJeff Kirsher 
118869b4b095SJeff Kirsher 	/* wait for 500ms */
118969b4b095SJeff Kirsher 	msleep(500);
119069b4b095SJeff Kirsher 
119169b4b095SJeff Kirsher 	/* must wait till reset is deasserted */
119269b4b095SJeff Kirsher 	while (miicontrol & BMCR_RESET) {
119369b4b095SJeff Kirsher 		usleep_range(10000, 20000);
119469b4b095SJeff Kirsher 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
119569b4b095SJeff Kirsher 		/* FIXME: 100 tries seem excessive */
119669b4b095SJeff Kirsher 		if (tries++ > 100)
119769b4b095SJeff Kirsher 			return -1;
119869b4b095SJeff Kirsher 	}
119969b4b095SJeff Kirsher 	return 0;
120069b4b095SJeff Kirsher }
120169b4b095SJeff Kirsher 
init_realtek_8211b(struct net_device * dev,struct fe_priv * np)120269b4b095SJeff Kirsher static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
120369b4b095SJeff Kirsher {
120469b4b095SJeff Kirsher 	static const struct {
120569b4b095SJeff Kirsher 		int reg;
120669b4b095SJeff Kirsher 		int init;
120769b4b095SJeff Kirsher 	} ri[] = {
120869b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
120969b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
121069b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
121169b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
121269b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
121369b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
121469b4b095SJeff Kirsher 		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
121569b4b095SJeff Kirsher 	};
121669b4b095SJeff Kirsher 	int i;
121769b4b095SJeff Kirsher 
121869b4b095SJeff Kirsher 	for (i = 0; i < ARRAY_SIZE(ri); i++) {
121969b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
122069b4b095SJeff Kirsher 			return PHY_ERROR;
122169b4b095SJeff Kirsher 	}
122269b4b095SJeff Kirsher 
122369b4b095SJeff Kirsher 	return 0;
122469b4b095SJeff Kirsher }
122569b4b095SJeff Kirsher 
init_realtek_8211c(struct net_device * dev,struct fe_priv * np)122669b4b095SJeff Kirsher static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
122769b4b095SJeff Kirsher {
122869b4b095SJeff Kirsher 	u32 reg;
122969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
123069b4b095SJeff Kirsher 	u32 powerstate = readl(base + NvRegPowerState2);
123169b4b095SJeff Kirsher 
123269b4b095SJeff Kirsher 	/* need to perform hw phy reset */
123369b4b095SJeff Kirsher 	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
123469b4b095SJeff Kirsher 	writel(powerstate, base + NvRegPowerState2);
123569b4b095SJeff Kirsher 	msleep(25);
123669b4b095SJeff Kirsher 
123769b4b095SJeff Kirsher 	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
123869b4b095SJeff Kirsher 	writel(powerstate, base + NvRegPowerState2);
123969b4b095SJeff Kirsher 	msleep(25);
124069b4b095SJeff Kirsher 
124169b4b095SJeff Kirsher 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
124269b4b095SJeff Kirsher 	reg |= PHY_REALTEK_INIT9;
124369b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
124469b4b095SJeff Kirsher 		return PHY_ERROR;
124569b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
124669b4b095SJeff Kirsher 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
124769b4b095SJeff Kirsher 		return PHY_ERROR;
124869b4b095SJeff Kirsher 	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
124969b4b095SJeff Kirsher 	if (!(reg & PHY_REALTEK_INIT11)) {
125069b4b095SJeff Kirsher 		reg |= PHY_REALTEK_INIT11;
125169b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
125269b4b095SJeff Kirsher 			return PHY_ERROR;
125369b4b095SJeff Kirsher 	}
125469b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
125569b4b095SJeff Kirsher 		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
125669b4b095SJeff Kirsher 		return PHY_ERROR;
125769b4b095SJeff Kirsher 
125869b4b095SJeff Kirsher 	return 0;
125969b4b095SJeff Kirsher }
126069b4b095SJeff Kirsher 
init_realtek_8201(struct net_device * dev,struct fe_priv * np)126169b4b095SJeff Kirsher static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
126269b4b095SJeff Kirsher {
126369b4b095SJeff Kirsher 	u32 phy_reserved;
126469b4b095SJeff Kirsher 
126569b4b095SJeff Kirsher 	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
126669b4b095SJeff Kirsher 		phy_reserved = mii_rw(dev, np->phyaddr,
126769b4b095SJeff Kirsher 				      PHY_REALTEK_INIT_REG6, MII_READ);
126869b4b095SJeff Kirsher 		phy_reserved |= PHY_REALTEK_INIT7;
126969b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr,
127069b4b095SJeff Kirsher 			   PHY_REALTEK_INIT_REG6, phy_reserved))
127169b4b095SJeff Kirsher 			return PHY_ERROR;
127269b4b095SJeff Kirsher 	}
127369b4b095SJeff Kirsher 
127469b4b095SJeff Kirsher 	return 0;
127569b4b095SJeff Kirsher }
127669b4b095SJeff Kirsher 
init_realtek_8201_cross(struct net_device * dev,struct fe_priv * np)127769b4b095SJeff Kirsher static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
127869b4b095SJeff Kirsher {
127969b4b095SJeff Kirsher 	u32 phy_reserved;
128069b4b095SJeff Kirsher 
128169b4b095SJeff Kirsher 	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
128269b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr,
128369b4b095SJeff Kirsher 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
128469b4b095SJeff Kirsher 			return PHY_ERROR;
128569b4b095SJeff Kirsher 		phy_reserved = mii_rw(dev, np->phyaddr,
128669b4b095SJeff Kirsher 				      PHY_REALTEK_INIT_REG2, MII_READ);
128769b4b095SJeff Kirsher 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
128869b4b095SJeff Kirsher 		phy_reserved |= PHY_REALTEK_INIT3;
128969b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr,
129069b4b095SJeff Kirsher 			   PHY_REALTEK_INIT_REG2, phy_reserved))
129169b4b095SJeff Kirsher 			return PHY_ERROR;
129269b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr,
129369b4b095SJeff Kirsher 			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
129469b4b095SJeff Kirsher 			return PHY_ERROR;
129569b4b095SJeff Kirsher 	}
129669b4b095SJeff Kirsher 
129769b4b095SJeff Kirsher 	return 0;
129869b4b095SJeff Kirsher }
129969b4b095SJeff Kirsher 
init_cicada(struct net_device * dev,struct fe_priv * np,u32 phyinterface)130069b4b095SJeff Kirsher static int init_cicada(struct net_device *dev, struct fe_priv *np,
130169b4b095SJeff Kirsher 		       u32 phyinterface)
130269b4b095SJeff Kirsher {
130369b4b095SJeff Kirsher 	u32 phy_reserved;
130469b4b095SJeff Kirsher 
130569b4b095SJeff Kirsher 	if (phyinterface & PHY_RGMII) {
130669b4b095SJeff Kirsher 		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
130769b4b095SJeff Kirsher 		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
130869b4b095SJeff Kirsher 		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
130969b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
131069b4b095SJeff Kirsher 			return PHY_ERROR;
131169b4b095SJeff Kirsher 		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
131269b4b095SJeff Kirsher 		phy_reserved |= PHY_CICADA_INIT5;
131369b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
131469b4b095SJeff Kirsher 			return PHY_ERROR;
131569b4b095SJeff Kirsher 	}
131669b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
131769b4b095SJeff Kirsher 	phy_reserved |= PHY_CICADA_INIT6;
131869b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
131969b4b095SJeff Kirsher 		return PHY_ERROR;
132069b4b095SJeff Kirsher 
132169b4b095SJeff Kirsher 	return 0;
132269b4b095SJeff Kirsher }
132369b4b095SJeff Kirsher 
init_vitesse(struct net_device * dev,struct fe_priv * np)132469b4b095SJeff Kirsher static int init_vitesse(struct net_device *dev, struct fe_priv *np)
132569b4b095SJeff Kirsher {
132669b4b095SJeff Kirsher 	u32 phy_reserved;
132769b4b095SJeff Kirsher 
132869b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
132969b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
133069b4b095SJeff Kirsher 		return PHY_ERROR;
133169b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
133269b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
133369b4b095SJeff Kirsher 		return PHY_ERROR;
133469b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
133569b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG4, MII_READ);
133669b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
133769b4b095SJeff Kirsher 		return PHY_ERROR;
133869b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
133969b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG3, MII_READ);
134069b4b095SJeff Kirsher 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
134169b4b095SJeff Kirsher 	phy_reserved |= PHY_VITESSE_INIT3;
134269b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
134369b4b095SJeff Kirsher 		return PHY_ERROR;
134469b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
134569b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
134669b4b095SJeff Kirsher 		return PHY_ERROR;
134769b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
134869b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
134969b4b095SJeff Kirsher 		return PHY_ERROR;
135069b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
135169b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG4, MII_READ);
135269b4b095SJeff Kirsher 	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
135369b4b095SJeff Kirsher 	phy_reserved |= PHY_VITESSE_INIT3;
135469b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
135569b4b095SJeff Kirsher 		return PHY_ERROR;
135669b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
135769b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG3, MII_READ);
135869b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
135969b4b095SJeff Kirsher 		return PHY_ERROR;
136069b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
136169b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
136269b4b095SJeff Kirsher 		return PHY_ERROR;
136369b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
136469b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
136569b4b095SJeff Kirsher 		return PHY_ERROR;
136669b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
136769b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG4, MII_READ);
136869b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
136969b4b095SJeff Kirsher 		return PHY_ERROR;
137069b4b095SJeff Kirsher 	phy_reserved = mii_rw(dev, np->phyaddr,
137169b4b095SJeff Kirsher 			      PHY_VITESSE_INIT_REG3, MII_READ);
137269b4b095SJeff Kirsher 	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
137369b4b095SJeff Kirsher 	phy_reserved |= PHY_VITESSE_INIT8;
137469b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
137569b4b095SJeff Kirsher 		return PHY_ERROR;
137669b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
137769b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
137869b4b095SJeff Kirsher 		return PHY_ERROR;
137969b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr,
138069b4b095SJeff Kirsher 		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
138169b4b095SJeff Kirsher 		return PHY_ERROR;
138269b4b095SJeff Kirsher 
138369b4b095SJeff Kirsher 	return 0;
138469b4b095SJeff Kirsher }
138569b4b095SJeff Kirsher 
phy_init(struct net_device * dev)138669b4b095SJeff Kirsher static int phy_init(struct net_device *dev)
138769b4b095SJeff Kirsher {
138869b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
138969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
139069b4b095SJeff Kirsher 	u32 phyinterface;
139169b4b095SJeff Kirsher 	u32 mii_status, mii_control, mii_control_1000, reg;
139269b4b095SJeff Kirsher 
139369b4b095SJeff Kirsher 	/* phy errata for E3016 phy */
139469b4b095SJeff Kirsher 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
139569b4b095SJeff Kirsher 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
139669b4b095SJeff Kirsher 		reg &= ~PHY_MARVELL_E3016_INITMASK;
139769b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
139869b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy write to errata reg failed\n",
139969b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
140069b4b095SJeff Kirsher 			return PHY_ERROR;
140169b4b095SJeff Kirsher 		}
140269b4b095SJeff Kirsher 	}
140369b4b095SJeff Kirsher 	if (np->phy_oui == PHY_OUI_REALTEK) {
140469b4b095SJeff Kirsher 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
140569b4b095SJeff Kirsher 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
140669b4b095SJeff Kirsher 			if (init_realtek_8211b(dev, np)) {
140769b4b095SJeff Kirsher 				netdev_info(dev, "%s: phy init failed\n",
140869b4b095SJeff Kirsher 					    pci_name(np->pci_dev));
140969b4b095SJeff Kirsher 				return PHY_ERROR;
141069b4b095SJeff Kirsher 			}
141169b4b095SJeff Kirsher 		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
141269b4b095SJeff Kirsher 			   np->phy_rev == PHY_REV_REALTEK_8211C) {
141369b4b095SJeff Kirsher 			if (init_realtek_8211c(dev, np)) {
141469b4b095SJeff Kirsher 				netdev_info(dev, "%s: phy init failed\n",
141569b4b095SJeff Kirsher 					    pci_name(np->pci_dev));
141669b4b095SJeff Kirsher 				return PHY_ERROR;
141769b4b095SJeff Kirsher 			}
141869b4b095SJeff Kirsher 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
141969b4b095SJeff Kirsher 			if (init_realtek_8201(dev, np)) {
142069b4b095SJeff Kirsher 				netdev_info(dev, "%s: phy init failed\n",
142169b4b095SJeff Kirsher 					    pci_name(np->pci_dev));
142269b4b095SJeff Kirsher 				return PHY_ERROR;
142369b4b095SJeff Kirsher 			}
142469b4b095SJeff Kirsher 		}
142569b4b095SJeff Kirsher 	}
142669b4b095SJeff Kirsher 
142769b4b095SJeff Kirsher 	/* set advertise register */
142869b4b095SJeff Kirsher 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
142969b4b095SJeff Kirsher 	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
143069b4b095SJeff Kirsher 		ADVERTISE_100HALF | ADVERTISE_100FULL |
143169b4b095SJeff Kirsher 		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
143269b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
143369b4b095SJeff Kirsher 		netdev_info(dev, "%s: phy write to advertise failed\n",
143469b4b095SJeff Kirsher 			    pci_name(np->pci_dev));
143569b4b095SJeff Kirsher 		return PHY_ERROR;
143669b4b095SJeff Kirsher 	}
143769b4b095SJeff Kirsher 
143869b4b095SJeff Kirsher 	/* get phy interface type */
143969b4b095SJeff Kirsher 	phyinterface = readl(base + NvRegPhyInterface);
144069b4b095SJeff Kirsher 
144169b4b095SJeff Kirsher 	/* see if gigabit phy */
144269b4b095SJeff Kirsher 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
144369b4b095SJeff Kirsher 	if (mii_status & PHY_GIGABIT) {
144469b4b095SJeff Kirsher 		np->gigabit = PHY_GIGABIT;
144569b4b095SJeff Kirsher 		mii_control_1000 = mii_rw(dev, np->phyaddr,
144669b4b095SJeff Kirsher 					  MII_CTRL1000, MII_READ);
144769b4b095SJeff Kirsher 		mii_control_1000 &= ~ADVERTISE_1000HALF;
144869b4b095SJeff Kirsher 		if (phyinterface & PHY_RGMII)
144969b4b095SJeff Kirsher 			mii_control_1000 |= ADVERTISE_1000FULL;
145069b4b095SJeff Kirsher 		else
145169b4b095SJeff Kirsher 			mii_control_1000 &= ~ADVERTISE_1000FULL;
145269b4b095SJeff Kirsher 
145369b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
145469b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy init failed\n",
145569b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
145669b4b095SJeff Kirsher 			return PHY_ERROR;
145769b4b095SJeff Kirsher 		}
145869b4b095SJeff Kirsher 	} else
145969b4b095SJeff Kirsher 		np->gigabit = 0;
146069b4b095SJeff Kirsher 
146169b4b095SJeff Kirsher 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
146269b4b095SJeff Kirsher 	mii_control |= BMCR_ANENABLE;
146369b4b095SJeff Kirsher 
146469b4b095SJeff Kirsher 	if (np->phy_oui == PHY_OUI_REALTEK &&
146569b4b095SJeff Kirsher 	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
146669b4b095SJeff Kirsher 	    np->phy_rev == PHY_REV_REALTEK_8211C) {
146769b4b095SJeff Kirsher 		/* start autoneg since we already performed hw reset above */
146869b4b095SJeff Kirsher 		mii_control |= BMCR_ANRESTART;
146969b4b095SJeff Kirsher 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
147069b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy init failed\n",
147169b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
147269b4b095SJeff Kirsher 			return PHY_ERROR;
147369b4b095SJeff Kirsher 		}
147469b4b095SJeff Kirsher 	} else {
147569b4b095SJeff Kirsher 		/* reset the phy
147669b4b095SJeff Kirsher 		 * (certain phys need bmcr to be setup with reset)
147769b4b095SJeff Kirsher 		 */
147869b4b095SJeff Kirsher 		if (phy_reset(dev, mii_control)) {
147969b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy reset failed\n",
148069b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
148169b4b095SJeff Kirsher 			return PHY_ERROR;
148269b4b095SJeff Kirsher 		}
148369b4b095SJeff Kirsher 	}
148469b4b095SJeff Kirsher 
148569b4b095SJeff Kirsher 	/* phy vendor specific configuration */
1486d46781bcSDavid Wood 	if (np->phy_oui == PHY_OUI_CICADA) {
148769b4b095SJeff Kirsher 		if (init_cicada(dev, np, phyinterface)) {
148869b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy init failed\n",
148969b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
149069b4b095SJeff Kirsher 			return PHY_ERROR;
149169b4b095SJeff Kirsher 		}
149269b4b095SJeff Kirsher 	} else if (np->phy_oui == PHY_OUI_VITESSE) {
149369b4b095SJeff Kirsher 		if (init_vitesse(dev, np)) {
149469b4b095SJeff Kirsher 			netdev_info(dev, "%s: phy init failed\n",
149569b4b095SJeff Kirsher 				    pci_name(np->pci_dev));
149669b4b095SJeff Kirsher 			return PHY_ERROR;
149769b4b095SJeff Kirsher 		}
149869b4b095SJeff Kirsher 	} else if (np->phy_oui == PHY_OUI_REALTEK) {
149969b4b095SJeff Kirsher 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
150069b4b095SJeff Kirsher 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
150169b4b095SJeff Kirsher 			/* reset could have cleared these out, set them back */
150269b4b095SJeff Kirsher 			if (init_realtek_8211b(dev, np)) {
150369b4b095SJeff Kirsher 				netdev_info(dev, "%s: phy init failed\n",
150469b4b095SJeff Kirsher 					    pci_name(np->pci_dev));
150569b4b095SJeff Kirsher 				return PHY_ERROR;
150669b4b095SJeff Kirsher 			}
150769b4b095SJeff Kirsher 		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
150869b4b095SJeff Kirsher 			if (init_realtek_8201(dev, np) ||
150969b4b095SJeff Kirsher 			    init_realtek_8201_cross(dev, np)) {
151069b4b095SJeff Kirsher 				netdev_info(dev, "%s: phy init failed\n",
151169b4b095SJeff Kirsher 					    pci_name(np->pci_dev));
151269b4b095SJeff Kirsher 				return PHY_ERROR;
151369b4b095SJeff Kirsher 			}
151469b4b095SJeff Kirsher 		}
151569b4b095SJeff Kirsher 	}
151669b4b095SJeff Kirsher 
151769b4b095SJeff Kirsher 	/* some phys clear out pause advertisement on reset, set it back */
151869b4b095SJeff Kirsher 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
151969b4b095SJeff Kirsher 
152069b4b095SJeff Kirsher 	/* restart auto negotiation, power down phy */
152169b4b095SJeff Kirsher 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
152269b4b095SJeff Kirsher 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
152369b4b095SJeff Kirsher 	if (phy_power_down)
152469b4b095SJeff Kirsher 		mii_control |= BMCR_PDOWN;
152569b4b095SJeff Kirsher 	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
152669b4b095SJeff Kirsher 		return PHY_ERROR;
152769b4b095SJeff Kirsher 
152869b4b095SJeff Kirsher 	return 0;
152969b4b095SJeff Kirsher }
153069b4b095SJeff Kirsher 
nv_start_rx(struct net_device * dev)153169b4b095SJeff Kirsher static void nv_start_rx(struct net_device *dev)
153269b4b095SJeff Kirsher {
153369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
153469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
153569b4b095SJeff Kirsher 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
153669b4b095SJeff Kirsher 
153769b4b095SJeff Kirsher 	/* Already running? Stop it. */
153869b4b095SJeff Kirsher 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
153969b4b095SJeff Kirsher 		rx_ctrl &= ~NVREG_RCVCTL_START;
154069b4b095SJeff Kirsher 		writel(rx_ctrl, base + NvRegReceiverControl);
154169b4b095SJeff Kirsher 		pci_push(base);
154269b4b095SJeff Kirsher 	}
154369b4b095SJeff Kirsher 	writel(np->linkspeed, base + NvRegLinkSpeed);
154469b4b095SJeff Kirsher 	pci_push(base);
154569b4b095SJeff Kirsher 	rx_ctrl |= NVREG_RCVCTL_START;
154669b4b095SJeff Kirsher 	if (np->mac_in_use)
154769b4b095SJeff Kirsher 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
154869b4b095SJeff Kirsher 	writel(rx_ctrl, base + NvRegReceiverControl);
154969b4b095SJeff Kirsher 	pci_push(base);
155069b4b095SJeff Kirsher }
155169b4b095SJeff Kirsher 
nv_stop_rx(struct net_device * dev)155269b4b095SJeff Kirsher static void nv_stop_rx(struct net_device *dev)
155369b4b095SJeff Kirsher {
155469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
155569b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
155669b4b095SJeff Kirsher 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
155769b4b095SJeff Kirsher 
155869b4b095SJeff Kirsher 	if (!np->mac_in_use)
155969b4b095SJeff Kirsher 		rx_ctrl &= ~NVREG_RCVCTL_START;
156069b4b095SJeff Kirsher 	else
156169b4b095SJeff Kirsher 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
156269b4b095SJeff Kirsher 	writel(rx_ctrl, base + NvRegReceiverControl);
156369b4b095SJeff Kirsher 	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
156469b4b095SJeff Kirsher 		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
156569b4b095SJeff Kirsher 		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
156669b4b095SJeff Kirsher 			    __func__);
156769b4b095SJeff Kirsher 
156869b4b095SJeff Kirsher 	udelay(NV_RXSTOP_DELAY2);
156969b4b095SJeff Kirsher 	if (!np->mac_in_use)
157069b4b095SJeff Kirsher 		writel(0, base + NvRegLinkSpeed);
157169b4b095SJeff Kirsher }
157269b4b095SJeff Kirsher 
nv_start_tx(struct net_device * dev)157369b4b095SJeff Kirsher static void nv_start_tx(struct net_device *dev)
157469b4b095SJeff Kirsher {
157569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
157669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
157769b4b095SJeff Kirsher 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
157869b4b095SJeff Kirsher 
157969b4b095SJeff Kirsher 	tx_ctrl |= NVREG_XMITCTL_START;
158069b4b095SJeff Kirsher 	if (np->mac_in_use)
158169b4b095SJeff Kirsher 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
158269b4b095SJeff Kirsher 	writel(tx_ctrl, base + NvRegTransmitterControl);
158369b4b095SJeff Kirsher 	pci_push(base);
158469b4b095SJeff Kirsher }
158569b4b095SJeff Kirsher 
nv_stop_tx(struct net_device * dev)158669b4b095SJeff Kirsher static void nv_stop_tx(struct net_device *dev)
158769b4b095SJeff Kirsher {
158869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
158969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
159069b4b095SJeff Kirsher 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
159169b4b095SJeff Kirsher 
159269b4b095SJeff Kirsher 	if (!np->mac_in_use)
159369b4b095SJeff Kirsher 		tx_ctrl &= ~NVREG_XMITCTL_START;
159469b4b095SJeff Kirsher 	else
159569b4b095SJeff Kirsher 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
159669b4b095SJeff Kirsher 	writel(tx_ctrl, base + NvRegTransmitterControl);
159769b4b095SJeff Kirsher 	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
159869b4b095SJeff Kirsher 		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
159969b4b095SJeff Kirsher 		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
160069b4b095SJeff Kirsher 			    __func__);
160169b4b095SJeff Kirsher 
160269b4b095SJeff Kirsher 	udelay(NV_TXSTOP_DELAY2);
160369b4b095SJeff Kirsher 	if (!np->mac_in_use)
160469b4b095SJeff Kirsher 		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
160569b4b095SJeff Kirsher 		       base + NvRegTransmitPoll);
160669b4b095SJeff Kirsher }
160769b4b095SJeff Kirsher 
nv_start_rxtx(struct net_device * dev)160869b4b095SJeff Kirsher static void nv_start_rxtx(struct net_device *dev)
160969b4b095SJeff Kirsher {
161069b4b095SJeff Kirsher 	nv_start_rx(dev);
161169b4b095SJeff Kirsher 	nv_start_tx(dev);
161269b4b095SJeff Kirsher }
161369b4b095SJeff Kirsher 
nv_stop_rxtx(struct net_device * dev)161469b4b095SJeff Kirsher static void nv_stop_rxtx(struct net_device *dev)
161569b4b095SJeff Kirsher {
161669b4b095SJeff Kirsher 	nv_stop_rx(dev);
161769b4b095SJeff Kirsher 	nv_stop_tx(dev);
161869b4b095SJeff Kirsher }
161969b4b095SJeff Kirsher 
nv_txrx_reset(struct net_device * dev)162069b4b095SJeff Kirsher static void nv_txrx_reset(struct net_device *dev)
162169b4b095SJeff Kirsher {
162269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
162369b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
162469b4b095SJeff Kirsher 
162569b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
162669b4b095SJeff Kirsher 	pci_push(base);
162769b4b095SJeff Kirsher 	udelay(NV_TXRX_RESET_DELAY);
162869b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
162969b4b095SJeff Kirsher 	pci_push(base);
163069b4b095SJeff Kirsher }
163169b4b095SJeff Kirsher 
nv_mac_reset(struct net_device * dev)163269b4b095SJeff Kirsher static void nv_mac_reset(struct net_device *dev)
163369b4b095SJeff Kirsher {
163469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
163569b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
163669b4b095SJeff Kirsher 	u32 temp1, temp2, temp3;
163769b4b095SJeff Kirsher 
163869b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
163969b4b095SJeff Kirsher 	pci_push(base);
164069b4b095SJeff Kirsher 
164169b4b095SJeff Kirsher 	/* save registers since they will be cleared on reset */
164269b4b095SJeff Kirsher 	temp1 = readl(base + NvRegMacAddrA);
164369b4b095SJeff Kirsher 	temp2 = readl(base + NvRegMacAddrB);
164469b4b095SJeff Kirsher 	temp3 = readl(base + NvRegTransmitPoll);
164569b4b095SJeff Kirsher 
164669b4b095SJeff Kirsher 	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
164769b4b095SJeff Kirsher 	pci_push(base);
164869b4b095SJeff Kirsher 	udelay(NV_MAC_RESET_DELAY);
164969b4b095SJeff Kirsher 	writel(0, base + NvRegMacReset);
165069b4b095SJeff Kirsher 	pci_push(base);
165169b4b095SJeff Kirsher 	udelay(NV_MAC_RESET_DELAY);
165269b4b095SJeff Kirsher 
165369b4b095SJeff Kirsher 	/* restore saved registers */
165469b4b095SJeff Kirsher 	writel(temp1, base + NvRegMacAddrA);
165569b4b095SJeff Kirsher 	writel(temp2, base + NvRegMacAddrB);
165669b4b095SJeff Kirsher 	writel(temp3, base + NvRegTransmitPoll);
165769b4b095SJeff Kirsher 
165869b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
165969b4b095SJeff Kirsher 	pci_push(base);
166069b4b095SJeff Kirsher }
166169b4b095SJeff Kirsher 
1662f5d827aeSdavid decotigny /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
nv_update_stats(struct net_device * dev)1663f5d827aeSdavid decotigny static void nv_update_stats(struct net_device *dev)
166469b4b095SJeff Kirsher {
166569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
166669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
166769b4b095SJeff Kirsher 
1668dc5e8bfcSSebastian Andrzej Siewior 	lockdep_assert_held(&np->hwstats_lock);
1669f5d827aeSdavid decotigny 
1670f5d827aeSdavid decotigny 	/* query hardware */
167169b4b095SJeff Kirsher 	np->estats.tx_bytes += readl(base + NvRegTxCnt);
167269b4b095SJeff Kirsher 	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
167369b4b095SJeff Kirsher 	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
167469b4b095SJeff Kirsher 	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
167569b4b095SJeff Kirsher 	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
167669b4b095SJeff Kirsher 	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
167769b4b095SJeff Kirsher 	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
167869b4b095SJeff Kirsher 	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
167969b4b095SJeff Kirsher 	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
168069b4b095SJeff Kirsher 	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
168169b4b095SJeff Kirsher 	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
168269b4b095SJeff Kirsher 	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
168369b4b095SJeff Kirsher 	np->estats.rx_runt += readl(base + NvRegRxRunt);
168469b4b095SJeff Kirsher 	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
168569b4b095SJeff Kirsher 	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
168669b4b095SJeff Kirsher 	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
168769b4b095SJeff Kirsher 	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
168869b4b095SJeff Kirsher 	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
168969b4b095SJeff Kirsher 	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
169069b4b095SJeff Kirsher 	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
169169b4b095SJeff Kirsher 	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
169269b4b095SJeff Kirsher 	np->estats.rx_packets =
169369b4b095SJeff Kirsher 		np->estats.rx_unicast +
169469b4b095SJeff Kirsher 		np->estats.rx_multicast +
169569b4b095SJeff Kirsher 		np->estats.rx_broadcast;
169669b4b095SJeff Kirsher 	np->estats.rx_errors_total =
169769b4b095SJeff Kirsher 		np->estats.rx_crc_errors +
169869b4b095SJeff Kirsher 		np->estats.rx_over_errors +
169969b4b095SJeff Kirsher 		np->estats.rx_frame_error +
170069b4b095SJeff Kirsher 		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
170169b4b095SJeff Kirsher 		np->estats.rx_late_collision +
170269b4b095SJeff Kirsher 		np->estats.rx_runt +
170369b4b095SJeff Kirsher 		np->estats.rx_frame_too_long;
170469b4b095SJeff Kirsher 	np->estats.tx_errors_total =
170569b4b095SJeff Kirsher 		np->estats.tx_late_collision +
170669b4b095SJeff Kirsher 		np->estats.tx_fifo_errors +
170769b4b095SJeff Kirsher 		np->estats.tx_carrier_errors +
170869b4b095SJeff Kirsher 		np->estats.tx_excess_deferral +
170969b4b095SJeff Kirsher 		np->estats.tx_retry_error;
171069b4b095SJeff Kirsher 
171169b4b095SJeff Kirsher 	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
171269b4b095SJeff Kirsher 		np->estats.tx_deferral += readl(base + NvRegTxDef);
171369b4b095SJeff Kirsher 		np->estats.tx_packets += readl(base + NvRegTxFrame);
171469b4b095SJeff Kirsher 		np->estats.rx_bytes += readl(base + NvRegRxCnt);
171569b4b095SJeff Kirsher 		np->estats.tx_pause += readl(base + NvRegTxPause);
171669b4b095SJeff Kirsher 		np->estats.rx_pause += readl(base + NvRegRxPause);
171769b4b095SJeff Kirsher 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
17180bdfea8bSMandeep Baines 		np->estats.rx_errors_total += np->estats.rx_drop_frame;
171969b4b095SJeff Kirsher 	}
172069b4b095SJeff Kirsher 
172169b4b095SJeff Kirsher 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
172269b4b095SJeff Kirsher 		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
172369b4b095SJeff Kirsher 		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
172469b4b095SJeff Kirsher 		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
172569b4b095SJeff Kirsher 	}
172669b4b095SJeff Kirsher }
172769b4b095SJeff Kirsher 
nv_get_stats(int cpu,struct fe_priv * np,struct rtnl_link_stats64 * storage)1728f4b633b9SZhu Yanjun static void nv_get_stats(int cpu, struct fe_priv *np,
1729f4b633b9SZhu Yanjun 			 struct rtnl_link_stats64 *storage)
1730f4b633b9SZhu Yanjun {
1731f4b633b9SZhu Yanjun 	struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1732f4b633b9SZhu Yanjun 	unsigned int syncp_start;
1733f4b633b9SZhu Yanjun 	u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1734f4b633b9SZhu Yanjun 	u64 tx_packets, tx_bytes, tx_dropped;
1735f4b633b9SZhu Yanjun 
1736f4b633b9SZhu Yanjun 	do {
1737068c38adSThomas Gleixner 		syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp);
1738f4b633b9SZhu Yanjun 		rx_packets       = src->stat_rx_packets;
1739f4b633b9SZhu Yanjun 		rx_bytes         = src->stat_rx_bytes;
1740f4b633b9SZhu Yanjun 		rx_dropped       = src->stat_rx_dropped;
1741f4b633b9SZhu Yanjun 		rx_missed_errors = src->stat_rx_missed_errors;
1742068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start));
1743f4b633b9SZhu Yanjun 
1744f4b633b9SZhu Yanjun 	storage->rx_packets       += rx_packets;
1745f4b633b9SZhu Yanjun 	storage->rx_bytes         += rx_bytes;
1746f4b633b9SZhu Yanjun 	storage->rx_dropped       += rx_dropped;
1747f4b633b9SZhu Yanjun 	storage->rx_missed_errors += rx_missed_errors;
1748f4b633b9SZhu Yanjun 
1749f4b633b9SZhu Yanjun 	do {
1750068c38adSThomas Gleixner 		syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp);
1751f4b633b9SZhu Yanjun 		tx_packets  = src->stat_tx_packets;
1752f4b633b9SZhu Yanjun 		tx_bytes    = src->stat_tx_bytes;
1753f4b633b9SZhu Yanjun 		tx_dropped  = src->stat_tx_dropped;
1754068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start));
1755f4b633b9SZhu Yanjun 
1756f4b633b9SZhu Yanjun 	storage->tx_packets += tx_packets;
1757f4b633b9SZhu Yanjun 	storage->tx_bytes   += tx_bytes;
1758f4b633b9SZhu Yanjun 	storage->tx_dropped += tx_dropped;
1759f4b633b9SZhu Yanjun }
1760f4b633b9SZhu Yanjun 
176169b4b095SJeff Kirsher /*
1762f5d827aeSdavid decotigny  * nv_get_stats64: dev->ndo_get_stats64 function
176369b4b095SJeff Kirsher  * Get latest stats value from the nic.
176469b4b095SJeff Kirsher  * Called with read_lock(&dev_base_lock) held for read -
176569b4b095SJeff Kirsher  * only synchronized against unregister_netdevice.
176669b4b095SJeff Kirsher  */
1767bc1f4470Sstephen hemminger static void
nv_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1768f5d827aeSdavid decotigny nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1769f5d827aeSdavid decotigny 	__acquires(&netdev_priv(dev)->hwstats_lock)
1770f5d827aeSdavid decotigny 	__releases(&netdev_priv(dev)->hwstats_lock)
177169b4b095SJeff Kirsher {
177269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
1773f4b633b9SZhu Yanjun 	int cpu;
177469b4b095SJeff Kirsher 
1775674aee3bSdavid decotigny 	/*
1776f5d827aeSdavid decotigny 	 * Note: because HW stats are not always available and for
1777f5d827aeSdavid decotigny 	 * consistency reasons, the following ifconfig stats are
1778f5d827aeSdavid decotigny 	 * managed by software: rx_bytes, tx_bytes, rx_packets and
1779f5d827aeSdavid decotigny 	 * tx_packets. The related hardware stats reported by ethtool
1780f5d827aeSdavid decotigny 	 * should be equivalent to these ifconfig stats, with 4
1781f5d827aeSdavid decotigny 	 * additional bytes per packet (Ethernet FCS CRC), except for
1782f5d827aeSdavid decotigny 	 * tx_packets when TSO kicks in.
1783674aee3bSdavid decotigny 	 */
1784674aee3bSdavid decotigny 
1785f5d827aeSdavid decotigny 	/* software stats */
1786f4b633b9SZhu Yanjun 	for_each_online_cpu(cpu)
1787f4b633b9SZhu Yanjun 		nv_get_stats(cpu, np, storage);
1788f5d827aeSdavid decotigny 
1789f5d827aeSdavid decotigny 	/* If the nic supports hw counters then retrieve latest values */
1790f5d827aeSdavid decotigny 	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1791f5d827aeSdavid decotigny 		spin_lock_bh(&np->hwstats_lock);
1792f5d827aeSdavid decotigny 
1793f5d827aeSdavid decotigny 		nv_update_stats(dev);
1794f5d827aeSdavid decotigny 
1795f5d827aeSdavid decotigny 		/* generic stats */
1796f5d827aeSdavid decotigny 		storage->rx_errors = np->estats.rx_errors_total;
1797f5d827aeSdavid decotigny 		storage->tx_errors = np->estats.tx_errors_total;
1798f5d827aeSdavid decotigny 
1799f5d827aeSdavid decotigny 		/* meaningful only when NIC supports stats v3 */
1800f5d827aeSdavid decotigny 		storage->multicast = np->estats.rx_multicast;
1801f5d827aeSdavid decotigny 
1802f5d827aeSdavid decotigny 		/* detailed rx_errors */
1803f5d827aeSdavid decotigny 		storage->rx_length_errors = np->estats.rx_length_error;
1804f5d827aeSdavid decotigny 		storage->rx_over_errors   = np->estats.rx_over_errors;
1805f5d827aeSdavid decotigny 		storage->rx_crc_errors    = np->estats.rx_crc_errors;
1806f5d827aeSdavid decotigny 		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1807f5d827aeSdavid decotigny 		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1808f5d827aeSdavid decotigny 
1809f5d827aeSdavid decotigny 		/* detailed tx_errors */
1810f5d827aeSdavid decotigny 		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1811f5d827aeSdavid decotigny 		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1812f5d827aeSdavid decotigny 
1813f5d827aeSdavid decotigny 		spin_unlock_bh(&np->hwstats_lock);
181469b4b095SJeff Kirsher 	}
181569b4b095SJeff Kirsher }
181669b4b095SJeff Kirsher 
181769b4b095SJeff Kirsher /*
181869b4b095SJeff Kirsher  * nv_alloc_rx: fill rx ring entries.
181969b4b095SJeff Kirsher  * Return 1 if the allocations for the skbs failed and the
182069b4b095SJeff Kirsher  * rx engine is without Available descriptors
182169b4b095SJeff Kirsher  */
nv_alloc_rx(struct net_device * dev)182269b4b095SJeff Kirsher static int nv_alloc_rx(struct net_device *dev)
182369b4b095SJeff Kirsher {
182469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
182569b4b095SJeff Kirsher 	struct ring_desc *less_rx;
182669b4b095SJeff Kirsher 
182769b4b095SJeff Kirsher 	less_rx = np->get_rx.orig;
182864f26abbSZhu Yanjun 	if (less_rx-- == np->rx_ring.orig)
182969b4b095SJeff Kirsher 		less_rx = np->last_rx.orig;
183069b4b095SJeff Kirsher 
183169b4b095SJeff Kirsher 	while (np->put_rx.orig != less_rx) {
1832dae2e9f4SPradeep A. Dalvi 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1833ac0b715eSZhu Yanjun 		if (likely(skb)) {
183469b4b095SJeff Kirsher 			np->put_rx_ctx->skb = skb;
18357598b349SZhu Yanjun 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
183669b4b095SJeff Kirsher 							     skb->data,
183769b4b095SJeff Kirsher 							     skb_tailroom(skb),
18387598b349SZhu Yanjun 							     DMA_FROM_DEVICE);
183939e50d96SZhu Yanjun 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
184039e50d96SZhu Yanjun 						       np->put_rx_ctx->dma))) {
1841612a7c4eSLarry Finger 				kfree_skb(skb);
1842612a7c4eSLarry Finger 				goto packet_dropped;
1843612a7c4eSLarry Finger 			}
184469b4b095SJeff Kirsher 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
184569b4b095SJeff Kirsher 			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
184669b4b095SJeff Kirsher 			wmb();
184769b4b095SJeff Kirsher 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
184869b4b095SJeff Kirsher 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
184964f26abbSZhu Yanjun 				np->put_rx.orig = np->rx_ring.orig;
185069b4b095SJeff Kirsher 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1851a9124ec4SZhu Yanjun 				np->put_rx_ctx = np->rx_skb;
18520a1f222dSdavid decotigny 		} else {
1853612a7c4eSLarry Finger packet_dropped:
18540a1f222dSdavid decotigny 			u64_stats_update_begin(&np->swstats_rx_syncp);
1855f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_rx_dropped);
18560a1f222dSdavid decotigny 			u64_stats_update_end(&np->swstats_rx_syncp);
185769b4b095SJeff Kirsher 			return 1;
185869b4b095SJeff Kirsher 		}
18590a1f222dSdavid decotigny 	}
186069b4b095SJeff Kirsher 	return 0;
186169b4b095SJeff Kirsher }
186269b4b095SJeff Kirsher 
nv_alloc_rx_optimized(struct net_device * dev)186369b4b095SJeff Kirsher static int nv_alloc_rx_optimized(struct net_device *dev)
186469b4b095SJeff Kirsher {
186569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
186669b4b095SJeff Kirsher 	struct ring_desc_ex *less_rx;
186769b4b095SJeff Kirsher 
186869b4b095SJeff Kirsher 	less_rx = np->get_rx.ex;
186964f26abbSZhu Yanjun 	if (less_rx-- == np->rx_ring.ex)
187069b4b095SJeff Kirsher 		less_rx = np->last_rx.ex;
187169b4b095SJeff Kirsher 
187269b4b095SJeff Kirsher 	while (np->put_rx.ex != less_rx) {
1873dae2e9f4SPradeep A. Dalvi 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1874ac0b715eSZhu Yanjun 		if (likely(skb)) {
187569b4b095SJeff Kirsher 			np->put_rx_ctx->skb = skb;
18767598b349SZhu Yanjun 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
187769b4b095SJeff Kirsher 							     skb->data,
187869b4b095SJeff Kirsher 							     skb_tailroom(skb),
18797598b349SZhu Yanjun 							     DMA_FROM_DEVICE);
188039e50d96SZhu Yanjun 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
188139e50d96SZhu Yanjun 						       np->put_rx_ctx->dma))) {
1882612a7c4eSLarry Finger 				kfree_skb(skb);
1883612a7c4eSLarry Finger 				goto packet_dropped;
1884612a7c4eSLarry Finger 			}
188569b4b095SJeff Kirsher 			np->put_rx_ctx->dma_len = skb_tailroom(skb);
188669b4b095SJeff Kirsher 			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
188769b4b095SJeff Kirsher 			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
188869b4b095SJeff Kirsher 			wmb();
188969b4b095SJeff Kirsher 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
189069b4b095SJeff Kirsher 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
189164f26abbSZhu Yanjun 				np->put_rx.ex = np->rx_ring.ex;
189269b4b095SJeff Kirsher 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1893a9124ec4SZhu Yanjun 				np->put_rx_ctx = np->rx_skb;
18940a1f222dSdavid decotigny 		} else {
1895612a7c4eSLarry Finger packet_dropped:
18960a1f222dSdavid decotigny 			u64_stats_update_begin(&np->swstats_rx_syncp);
1897f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_rx_dropped);
18980a1f222dSdavid decotigny 			u64_stats_update_end(&np->swstats_rx_syncp);
189969b4b095SJeff Kirsher 			return 1;
190069b4b095SJeff Kirsher 		}
19010a1f222dSdavid decotigny 	}
190269b4b095SJeff Kirsher 	return 0;
190369b4b095SJeff Kirsher }
190469b4b095SJeff Kirsher 
190569b4b095SJeff Kirsher /* If rx bufs are exhausted called after 50ms to attempt to refresh */
nv_do_rx_refill(struct timer_list * t)1906d9935679SKees Cook static void nv_do_rx_refill(struct timer_list *t)
190769b4b095SJeff Kirsher {
1908d9935679SKees Cook 	struct fe_priv *np = from_timer(np, t, oom_kick);
190969b4b095SJeff Kirsher 
191069b4b095SJeff Kirsher 	/* Just reschedule NAPI rx processing */
191169b4b095SJeff Kirsher 	napi_schedule(&np->napi);
191269b4b095SJeff Kirsher }
191369b4b095SJeff Kirsher 
nv_init_rx(struct net_device * dev)191469b4b095SJeff Kirsher static void nv_init_rx(struct net_device *dev)
191569b4b095SJeff Kirsher {
191669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
191769b4b095SJeff Kirsher 	int i;
191869b4b095SJeff Kirsher 
191964f26abbSZhu Yanjun 	np->get_rx = np->rx_ring;
192064f26abbSZhu Yanjun 	np->put_rx = np->rx_ring;
192169b4b095SJeff Kirsher 
192269b4b095SJeff Kirsher 	if (!nv_optimized(np))
192369b4b095SJeff Kirsher 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
192469b4b095SJeff Kirsher 	else
192569b4b095SJeff Kirsher 		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1926a9124ec4SZhu Yanjun 	np->get_rx_ctx = np->rx_skb;
1927a9124ec4SZhu Yanjun 	np->put_rx_ctx = np->rx_skb;
192869b4b095SJeff Kirsher 	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
192969b4b095SJeff Kirsher 
193069b4b095SJeff Kirsher 	for (i = 0; i < np->rx_ring_size; i++) {
193169b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
193269b4b095SJeff Kirsher 			np->rx_ring.orig[i].flaglen = 0;
193369b4b095SJeff Kirsher 			np->rx_ring.orig[i].buf = 0;
193469b4b095SJeff Kirsher 		} else {
193569b4b095SJeff Kirsher 			np->rx_ring.ex[i].flaglen = 0;
193669b4b095SJeff Kirsher 			np->rx_ring.ex[i].txvlan = 0;
193769b4b095SJeff Kirsher 			np->rx_ring.ex[i].bufhigh = 0;
193869b4b095SJeff Kirsher 			np->rx_ring.ex[i].buflow = 0;
193969b4b095SJeff Kirsher 		}
194069b4b095SJeff Kirsher 		np->rx_skb[i].skb = NULL;
194169b4b095SJeff Kirsher 		np->rx_skb[i].dma = 0;
194269b4b095SJeff Kirsher 	}
194369b4b095SJeff Kirsher }
194469b4b095SJeff Kirsher 
nv_init_tx(struct net_device * dev)194569b4b095SJeff Kirsher static void nv_init_tx(struct net_device *dev)
194669b4b095SJeff Kirsher {
194769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
194869b4b095SJeff Kirsher 	int i;
194969b4b095SJeff Kirsher 
1950c360f2b5SZhu Yanjun 	np->get_tx = np->tx_ring;
1951c360f2b5SZhu Yanjun 	np->put_tx = np->tx_ring;
195269b4b095SJeff Kirsher 
195369b4b095SJeff Kirsher 	if (!nv_optimized(np))
195469b4b095SJeff Kirsher 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
195569b4b095SJeff Kirsher 	else
195669b4b095SJeff Kirsher 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
195741b0cd36SZhu Yanjun 	np->get_tx_ctx = np->tx_skb;
195841b0cd36SZhu Yanjun 	np->put_tx_ctx = np->tx_skb;
195969b4b095SJeff Kirsher 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1960b8bfca94STom Herbert 	netdev_reset_queue(np->dev);
196169b4b095SJeff Kirsher 	np->tx_pkts_in_progress = 0;
196269b4b095SJeff Kirsher 	np->tx_change_owner = NULL;
196369b4b095SJeff Kirsher 	np->tx_end_flip = NULL;
196469b4b095SJeff Kirsher 	np->tx_stop = 0;
196569b4b095SJeff Kirsher 
196669b4b095SJeff Kirsher 	for (i = 0; i < np->tx_ring_size; i++) {
196769b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
196869b4b095SJeff Kirsher 			np->tx_ring.orig[i].flaglen = 0;
196969b4b095SJeff Kirsher 			np->tx_ring.orig[i].buf = 0;
197069b4b095SJeff Kirsher 		} else {
197169b4b095SJeff Kirsher 			np->tx_ring.ex[i].flaglen = 0;
197269b4b095SJeff Kirsher 			np->tx_ring.ex[i].txvlan = 0;
197369b4b095SJeff Kirsher 			np->tx_ring.ex[i].bufhigh = 0;
197469b4b095SJeff Kirsher 			np->tx_ring.ex[i].buflow = 0;
197569b4b095SJeff Kirsher 		}
197669b4b095SJeff Kirsher 		np->tx_skb[i].skb = NULL;
197769b4b095SJeff Kirsher 		np->tx_skb[i].dma = 0;
197869b4b095SJeff Kirsher 		np->tx_skb[i].dma_len = 0;
197969b4b095SJeff Kirsher 		np->tx_skb[i].dma_single = 0;
198069b4b095SJeff Kirsher 		np->tx_skb[i].first_tx_desc = NULL;
198169b4b095SJeff Kirsher 		np->tx_skb[i].next_tx_ctx = NULL;
198269b4b095SJeff Kirsher 	}
198369b4b095SJeff Kirsher }
198469b4b095SJeff Kirsher 
nv_init_ring(struct net_device * dev)198569b4b095SJeff Kirsher static int nv_init_ring(struct net_device *dev)
198669b4b095SJeff Kirsher {
198769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
198869b4b095SJeff Kirsher 
198969b4b095SJeff Kirsher 	nv_init_tx(dev);
199069b4b095SJeff Kirsher 	nv_init_rx(dev);
199169b4b095SJeff Kirsher 
199269b4b095SJeff Kirsher 	if (!nv_optimized(np))
199369b4b095SJeff Kirsher 		return nv_alloc_rx(dev);
199469b4b095SJeff Kirsher 	else
199569b4b095SJeff Kirsher 		return nv_alloc_rx_optimized(dev);
199669b4b095SJeff Kirsher }
199769b4b095SJeff Kirsher 
nv_unmap_txskb(struct fe_priv * np,struct nv_skb_map * tx_skb)199869b4b095SJeff Kirsher static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
199969b4b095SJeff Kirsher {
200069b4b095SJeff Kirsher 	if (tx_skb->dma) {
200169b4b095SJeff Kirsher 		if (tx_skb->dma_single)
20027598b349SZhu Yanjun 			dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
200369b4b095SJeff Kirsher 					 tx_skb->dma_len,
20047598b349SZhu Yanjun 					 DMA_TO_DEVICE);
200569b4b095SJeff Kirsher 		else
2006ca43a0c7SZhu Yanjun 			dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
200769b4b095SJeff Kirsher 				       tx_skb->dma_len,
2008ca43a0c7SZhu Yanjun 				       DMA_TO_DEVICE);
200969b4b095SJeff Kirsher 		tx_skb->dma = 0;
201069b4b095SJeff Kirsher 	}
201169b4b095SJeff Kirsher }
201269b4b095SJeff Kirsher 
nv_release_txskb(struct fe_priv * np,struct nv_skb_map * tx_skb)201369b4b095SJeff Kirsher static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
201469b4b095SJeff Kirsher {
201569b4b095SJeff Kirsher 	nv_unmap_txskb(np, tx_skb);
201669b4b095SJeff Kirsher 	if (tx_skb->skb) {
201769b4b095SJeff Kirsher 		dev_kfree_skb_any(tx_skb->skb);
201869b4b095SJeff Kirsher 		tx_skb->skb = NULL;
201969b4b095SJeff Kirsher 		return 1;
202069b4b095SJeff Kirsher 	}
202169b4b095SJeff Kirsher 	return 0;
202269b4b095SJeff Kirsher }
202369b4b095SJeff Kirsher 
nv_drain_tx(struct net_device * dev)202469b4b095SJeff Kirsher static void nv_drain_tx(struct net_device *dev)
202569b4b095SJeff Kirsher {
202669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
202769b4b095SJeff Kirsher 	unsigned int i;
202869b4b095SJeff Kirsher 
202969b4b095SJeff Kirsher 	for (i = 0; i < np->tx_ring_size; i++) {
203069b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
203169b4b095SJeff Kirsher 			np->tx_ring.orig[i].flaglen = 0;
203269b4b095SJeff Kirsher 			np->tx_ring.orig[i].buf = 0;
203369b4b095SJeff Kirsher 		} else {
203469b4b095SJeff Kirsher 			np->tx_ring.ex[i].flaglen = 0;
203569b4b095SJeff Kirsher 			np->tx_ring.ex[i].txvlan = 0;
203669b4b095SJeff Kirsher 			np->tx_ring.ex[i].bufhigh = 0;
203769b4b095SJeff Kirsher 			np->tx_ring.ex[i].buflow = 0;
203869b4b095SJeff Kirsher 		}
2039f5d827aeSdavid decotigny 		if (nv_release_txskb(np, &np->tx_skb[i])) {
2040f5d827aeSdavid decotigny 			u64_stats_update_begin(&np->swstats_tx_syncp);
2041f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_tx_dropped);
2042f5d827aeSdavid decotigny 			u64_stats_update_end(&np->swstats_tx_syncp);
2043f5d827aeSdavid decotigny 		}
204469b4b095SJeff Kirsher 		np->tx_skb[i].dma = 0;
204569b4b095SJeff Kirsher 		np->tx_skb[i].dma_len = 0;
204669b4b095SJeff Kirsher 		np->tx_skb[i].dma_single = 0;
204769b4b095SJeff Kirsher 		np->tx_skb[i].first_tx_desc = NULL;
204869b4b095SJeff Kirsher 		np->tx_skb[i].next_tx_ctx = NULL;
204969b4b095SJeff Kirsher 	}
205069b4b095SJeff Kirsher 	np->tx_pkts_in_progress = 0;
205169b4b095SJeff Kirsher 	np->tx_change_owner = NULL;
205269b4b095SJeff Kirsher 	np->tx_end_flip = NULL;
205369b4b095SJeff Kirsher }
205469b4b095SJeff Kirsher 
nv_drain_rx(struct net_device * dev)205569b4b095SJeff Kirsher static void nv_drain_rx(struct net_device *dev)
205669b4b095SJeff Kirsher {
205769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
205869b4b095SJeff Kirsher 	int i;
205969b4b095SJeff Kirsher 
206069b4b095SJeff Kirsher 	for (i = 0; i < np->rx_ring_size; i++) {
206169b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
206269b4b095SJeff Kirsher 			np->rx_ring.orig[i].flaglen = 0;
206369b4b095SJeff Kirsher 			np->rx_ring.orig[i].buf = 0;
206469b4b095SJeff Kirsher 		} else {
206569b4b095SJeff Kirsher 			np->rx_ring.ex[i].flaglen = 0;
206669b4b095SJeff Kirsher 			np->rx_ring.ex[i].txvlan = 0;
206769b4b095SJeff Kirsher 			np->rx_ring.ex[i].bufhigh = 0;
206869b4b095SJeff Kirsher 			np->rx_ring.ex[i].buflow = 0;
206969b4b095SJeff Kirsher 		}
207069b4b095SJeff Kirsher 		wmb();
207169b4b095SJeff Kirsher 		if (np->rx_skb[i].skb) {
20727598b349SZhu Yanjun 			dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
207369b4b095SJeff Kirsher 					 (skb_end_pointer(np->rx_skb[i].skb) -
207469b4b095SJeff Kirsher 					 np->rx_skb[i].skb->data),
20757598b349SZhu Yanjun 					 DMA_FROM_DEVICE);
207669b4b095SJeff Kirsher 			dev_kfree_skb(np->rx_skb[i].skb);
207769b4b095SJeff Kirsher 			np->rx_skb[i].skb = NULL;
207869b4b095SJeff Kirsher 		}
207969b4b095SJeff Kirsher 	}
208069b4b095SJeff Kirsher }
208169b4b095SJeff Kirsher 
nv_drain_rxtx(struct net_device * dev)208269b4b095SJeff Kirsher static void nv_drain_rxtx(struct net_device *dev)
208369b4b095SJeff Kirsher {
208469b4b095SJeff Kirsher 	nv_drain_tx(dev);
208569b4b095SJeff Kirsher 	nv_drain_rx(dev);
208669b4b095SJeff Kirsher }
208769b4b095SJeff Kirsher 
nv_get_empty_tx_slots(struct fe_priv * np)208869b4b095SJeff Kirsher static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
208969b4b095SJeff Kirsher {
209069b4b095SJeff Kirsher 	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
209169b4b095SJeff Kirsher }
209269b4b095SJeff Kirsher 
nv_legacybackoff_reseed(struct net_device * dev)209369b4b095SJeff Kirsher static void nv_legacybackoff_reseed(struct net_device *dev)
209469b4b095SJeff Kirsher {
209569b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
209669b4b095SJeff Kirsher 	u32 reg;
209769b4b095SJeff Kirsher 	u32 low;
209869b4b095SJeff Kirsher 	int tx_status = 0;
209969b4b095SJeff Kirsher 
210069b4b095SJeff Kirsher 	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
210169b4b095SJeff Kirsher 	get_random_bytes(&low, sizeof(low));
210269b4b095SJeff Kirsher 	reg |= low & NVREG_SLOTTIME_MASK;
210369b4b095SJeff Kirsher 
210469b4b095SJeff Kirsher 	/* Need to stop tx before change takes effect.
210569b4b095SJeff Kirsher 	 * Caller has already gained np->lock.
210669b4b095SJeff Kirsher 	 */
210769b4b095SJeff Kirsher 	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
210869b4b095SJeff Kirsher 	if (tx_status)
210969b4b095SJeff Kirsher 		nv_stop_tx(dev);
211069b4b095SJeff Kirsher 	nv_stop_rx(dev);
211169b4b095SJeff Kirsher 	writel(reg, base + NvRegSlotTime);
211269b4b095SJeff Kirsher 	if (tx_status)
211369b4b095SJeff Kirsher 		nv_start_tx(dev);
211469b4b095SJeff Kirsher 	nv_start_rx(dev);
211569b4b095SJeff Kirsher }
211669b4b095SJeff Kirsher 
211769b4b095SJeff Kirsher /* Gear Backoff Seeds */
211869b4b095SJeff Kirsher #define BACKOFF_SEEDSET_ROWS	8
211969b4b095SJeff Kirsher #define BACKOFF_SEEDSET_LFSRS	15
212069b4b095SJeff Kirsher 
212169b4b095SJeff Kirsher /* Known Good seed sets */
212269b4b095SJeff Kirsher static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
212369b4b095SJeff Kirsher 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
212469b4b095SJeff Kirsher 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
212569b4b095SJeff Kirsher 	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
212669b4b095SJeff Kirsher 	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
212769b4b095SJeff Kirsher 	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
212869b4b095SJeff Kirsher 	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
212969b4b095SJeff Kirsher 	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
213069b4b095SJeff Kirsher 	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
213169b4b095SJeff Kirsher 
213269b4b095SJeff Kirsher static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
213369b4b095SJeff Kirsher 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
213469b4b095SJeff Kirsher 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
213569b4b095SJeff Kirsher 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
213669b4b095SJeff Kirsher 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
213769b4b095SJeff Kirsher 	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
213869b4b095SJeff Kirsher 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
213969b4b095SJeff Kirsher 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
214069b4b095SJeff Kirsher 	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
214169b4b095SJeff Kirsher 
nv_gear_backoff_reseed(struct net_device * dev)214269b4b095SJeff Kirsher static void nv_gear_backoff_reseed(struct net_device *dev)
214369b4b095SJeff Kirsher {
214469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
214569b4b095SJeff Kirsher 	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
214669b4b095SJeff Kirsher 	u32 temp, seedset, combinedSeed;
214769b4b095SJeff Kirsher 	int i;
214869b4b095SJeff Kirsher 
214969b4b095SJeff Kirsher 	/* Setup seed for free running LFSR */
215069b4b095SJeff Kirsher 	/* We are going to read the time stamp counter 3 times
215169b4b095SJeff Kirsher 	   and swizzle bits around to increase randomness */
215269b4b095SJeff Kirsher 	get_random_bytes(&miniseed1, sizeof(miniseed1));
215369b4b095SJeff Kirsher 	miniseed1 &= 0x0fff;
215469b4b095SJeff Kirsher 	if (miniseed1 == 0)
215569b4b095SJeff Kirsher 		miniseed1 = 0xabc;
215669b4b095SJeff Kirsher 
215769b4b095SJeff Kirsher 	get_random_bytes(&miniseed2, sizeof(miniseed2));
215869b4b095SJeff Kirsher 	miniseed2 &= 0x0fff;
215969b4b095SJeff Kirsher 	if (miniseed2 == 0)
216069b4b095SJeff Kirsher 		miniseed2 = 0xabc;
216169b4b095SJeff Kirsher 	miniseed2_reversed =
216269b4b095SJeff Kirsher 		((miniseed2 & 0xF00) >> 8) |
216369b4b095SJeff Kirsher 		 (miniseed2 & 0x0F0) |
216469b4b095SJeff Kirsher 		 ((miniseed2 & 0x00F) << 8);
216569b4b095SJeff Kirsher 
216669b4b095SJeff Kirsher 	get_random_bytes(&miniseed3, sizeof(miniseed3));
216769b4b095SJeff Kirsher 	miniseed3 &= 0x0fff;
216869b4b095SJeff Kirsher 	if (miniseed3 == 0)
216969b4b095SJeff Kirsher 		miniseed3 = 0xabc;
217069b4b095SJeff Kirsher 	miniseed3_reversed =
217169b4b095SJeff Kirsher 		((miniseed3 & 0xF00) >> 8) |
217269b4b095SJeff Kirsher 		 (miniseed3 & 0x0F0) |
217369b4b095SJeff Kirsher 		 ((miniseed3 & 0x00F) << 8);
217469b4b095SJeff Kirsher 
217569b4b095SJeff Kirsher 	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
217669b4b095SJeff Kirsher 		       (miniseed2 ^ miniseed3_reversed);
217769b4b095SJeff Kirsher 
217869b4b095SJeff Kirsher 	/* Seeds can not be zero */
217969b4b095SJeff Kirsher 	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
218069b4b095SJeff Kirsher 		combinedSeed |= 0x08;
218169b4b095SJeff Kirsher 	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
218269b4b095SJeff Kirsher 		combinedSeed |= 0x8000;
218369b4b095SJeff Kirsher 
218469b4b095SJeff Kirsher 	/* No need to disable tx here */
218569b4b095SJeff Kirsher 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
218669b4b095SJeff Kirsher 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
218769b4b095SJeff Kirsher 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
218869b4b095SJeff Kirsher 	writel(temp, base + NvRegBackOffControl);
218969b4b095SJeff Kirsher 
219069b4b095SJeff Kirsher 	/* Setup seeds for all gear LFSRs. */
219169b4b095SJeff Kirsher 	get_random_bytes(&seedset, sizeof(seedset));
219269b4b095SJeff Kirsher 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
219369b4b095SJeff Kirsher 	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
219469b4b095SJeff Kirsher 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
219569b4b095SJeff Kirsher 		temp |= main_seedset[seedset][i-1] & 0x3ff;
219669b4b095SJeff Kirsher 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
219769b4b095SJeff Kirsher 		writel(temp, base + NvRegBackOffControl);
219869b4b095SJeff Kirsher 	}
219969b4b095SJeff Kirsher }
220069b4b095SJeff Kirsher 
220169b4b095SJeff Kirsher /*
220269b4b095SJeff Kirsher  * nv_start_xmit: dev->hard_start_xmit function
220369b4b095SJeff Kirsher  * Called with netif_tx_lock held.
220469b4b095SJeff Kirsher  */
nv_start_xmit(struct sk_buff * skb,struct net_device * dev)220569b4b095SJeff Kirsher static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
220669b4b095SJeff Kirsher {
220769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
220869b4b095SJeff Kirsher 	u32 tx_flags = 0;
220969b4b095SJeff Kirsher 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
221069b4b095SJeff Kirsher 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
221169b4b095SJeff Kirsher 	unsigned int i;
221269b4b095SJeff Kirsher 	u32 offset = 0;
221369b4b095SJeff Kirsher 	u32 bcnt;
221469b4b095SJeff Kirsher 	u32 size = skb_headlen(skb);
221569b4b095SJeff Kirsher 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
221669b4b095SJeff Kirsher 	u32 empty_slots;
221769b4b095SJeff Kirsher 	struct ring_desc *put_tx;
221869b4b095SJeff Kirsher 	struct ring_desc *start_tx;
221969b4b095SJeff Kirsher 	struct ring_desc *prev_tx;
222069b4b095SJeff Kirsher 	struct nv_skb_map *prev_tx_ctx;
2221f7f22874SNeil Horman 	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
222269b4b095SJeff Kirsher 	unsigned long flags;
22235d8876e2SZhu Yanjun 	netdev_tx_t ret = NETDEV_TX_OK;
222469b4b095SJeff Kirsher 
222569b4b095SJeff Kirsher 	/* add fragments to entries count */
222669b4b095SJeff Kirsher 	for (i = 0; i < fragments; i++) {
2227e45a6187Sdavid decotigny 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
22289e903e08SEric Dumazet 
2229e45a6187Sdavid decotigny 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2230e45a6187Sdavid decotigny 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
223169b4b095SJeff Kirsher 	}
223269b4b095SJeff Kirsher 
223369b4b095SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
223469b4b095SJeff Kirsher 	empty_slots = nv_get_empty_tx_slots(np);
223569b4b095SJeff Kirsher 	if (unlikely(empty_slots <= entries)) {
223669b4b095SJeff Kirsher 		netif_stop_queue(dev);
223769b4b095SJeff Kirsher 		np->tx_stop = 1;
223869b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
22395d8876e2SZhu Yanjun 
22405d8876e2SZhu Yanjun 		/* When normal packets and/or xmit_more packets fill up
22415d8876e2SZhu Yanjun 		 * tx_desc, it is necessary to trigger NIC tx reg.
22425d8876e2SZhu Yanjun 		 */
22435d8876e2SZhu Yanjun 		ret = NETDEV_TX_BUSY;
22445d8876e2SZhu Yanjun 		goto txkick;
224569b4b095SJeff Kirsher 	}
224669b4b095SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
224769b4b095SJeff Kirsher 
224869b4b095SJeff Kirsher 	start_tx = put_tx = np->put_tx.orig;
224969b4b095SJeff Kirsher 
225069b4b095SJeff Kirsher 	/* setup the header buffer */
225169b4b095SJeff Kirsher 	do {
225269b4b095SJeff Kirsher 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
22537598b349SZhu Yanjun 		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
22547598b349SZhu Yanjun 						     skb->data + offset, bcnt,
22557598b349SZhu Yanjun 						     DMA_TO_DEVICE);
225639e50d96SZhu Yanjun 		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
225739e50d96SZhu Yanjun 					       np->put_tx_ctx->dma))) {
2258612a7c4eSLarry Finger 			/* on DMA mapping error - drop the packet */
22591616566cSEric W. Biederman 			dev_kfree_skb_any(skb);
2260612a7c4eSLarry Finger 			u64_stats_update_begin(&np->swstats_tx_syncp);
2261f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_tx_dropped);
2262612a7c4eSLarry Finger 			u64_stats_update_end(&np->swstats_tx_syncp);
22635d8876e2SZhu Yanjun 
22645d8876e2SZhu Yanjun 			ret = NETDEV_TX_OK;
22655d8876e2SZhu Yanjun 
22665d8876e2SZhu Yanjun 			goto dma_error;
2267612a7c4eSLarry Finger 		}
226869b4b095SJeff Kirsher 		np->put_tx_ctx->dma_len = bcnt;
226969b4b095SJeff Kirsher 		np->put_tx_ctx->dma_single = 1;
227069b4b095SJeff Kirsher 		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
227169b4b095SJeff Kirsher 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
227269b4b095SJeff Kirsher 
227369b4b095SJeff Kirsher 		tx_flags = np->tx_flags;
227469b4b095SJeff Kirsher 		offset += bcnt;
227569b4b095SJeff Kirsher 		size -= bcnt;
227669b4b095SJeff Kirsher 		if (unlikely(put_tx++ == np->last_tx.orig))
2277c360f2b5SZhu Yanjun 			put_tx = np->tx_ring.orig;
227869b4b095SJeff Kirsher 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
227941b0cd36SZhu Yanjun 			np->put_tx_ctx = np->tx_skb;
228069b4b095SJeff Kirsher 	} while (size);
228169b4b095SJeff Kirsher 
228269b4b095SJeff Kirsher 	/* setup the fragments */
228369b4b095SJeff Kirsher 	for (i = 0; i < fragments; i++) {
22849e903e08SEric Dumazet 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2285e45a6187Sdavid decotigny 		u32 frag_size = skb_frag_size(frag);
228669b4b095SJeff Kirsher 		offset = 0;
228769b4b095SJeff Kirsher 
228869b4b095SJeff Kirsher 		do {
2289f7f22874SNeil Horman 			if (!start_tx_ctx)
2290f7f22874SNeil Horman 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2291f7f22874SNeil Horman 
2292e45a6187Sdavid decotigny 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2293671173c3SIan Campbell 			np->put_tx_ctx->dma = skb_frag_dma_map(
2294671173c3SIan Campbell 							&np->pci_dev->dev,
2295671173c3SIan Campbell 							frag, offset,
2296671173c3SIan Campbell 							bcnt,
22975d6bcdfeSIan Campbell 							DMA_TO_DEVICE);
229839e50d96SZhu Yanjun 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
229939e50d96SZhu Yanjun 						       np->put_tx_ctx->dma))) {
2300f7f22874SNeil Horman 
2301f7f22874SNeil Horman 				/* Unwind the mapped fragments */
2302f7f22874SNeil Horman 				do {
2303f7f22874SNeil Horman 					nv_unmap_txskb(np, start_tx_ctx);
2304f7f22874SNeil Horman 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
230541b0cd36SZhu Yanjun 						tmp_tx_ctx = np->tx_skb;
2306f7f22874SNeil Horman 				} while (tmp_tx_ctx != np->put_tx_ctx);
23071616566cSEric W. Biederman 				dev_kfree_skb_any(skb);
2308f7f22874SNeil Horman 				np->put_tx_ctx = start_tx_ctx;
2309f7f22874SNeil Horman 				u64_stats_update_begin(&np->swstats_tx_syncp);
2310f4b633b9SZhu Yanjun 				nv_txrx_stats_inc(stat_tx_dropped);
2311f7f22874SNeil Horman 				u64_stats_update_end(&np->swstats_tx_syncp);
23125d8876e2SZhu Yanjun 
23135d8876e2SZhu Yanjun 				ret = NETDEV_TX_OK;
23145d8876e2SZhu Yanjun 
23155d8876e2SZhu Yanjun 				goto dma_error;
2316f7f22874SNeil Horman 			}
2317f7f22874SNeil Horman 
231869b4b095SJeff Kirsher 			np->put_tx_ctx->dma_len = bcnt;
231969b4b095SJeff Kirsher 			np->put_tx_ctx->dma_single = 0;
232069b4b095SJeff Kirsher 			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
232169b4b095SJeff Kirsher 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
232269b4b095SJeff Kirsher 
232369b4b095SJeff Kirsher 			offset += bcnt;
2324e45a6187Sdavid decotigny 			frag_size -= bcnt;
232569b4b095SJeff Kirsher 			if (unlikely(put_tx++ == np->last_tx.orig))
2326c360f2b5SZhu Yanjun 				put_tx = np->tx_ring.orig;
232769b4b095SJeff Kirsher 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
232841b0cd36SZhu Yanjun 				np->put_tx_ctx = np->tx_skb;
2329e45a6187Sdavid decotigny 		} while (frag_size);
233069b4b095SJeff Kirsher 	}
233169b4b095SJeff Kirsher 
2332c360f2b5SZhu Yanjun 	if (unlikely(put_tx == np->tx_ring.orig))
23330d728b84SZhu Yanjun 		prev_tx = np->last_tx.orig;
23340d728b84SZhu Yanjun 	else
23350d728b84SZhu Yanjun 		prev_tx = put_tx - 1;
23360d728b84SZhu Yanjun 
233741b0cd36SZhu Yanjun 	if (unlikely(np->put_tx_ctx == np->tx_skb))
23380d728b84SZhu Yanjun 		prev_tx_ctx = np->last_tx_ctx;
23390d728b84SZhu Yanjun 	else
23400d728b84SZhu Yanjun 		prev_tx_ctx = np->put_tx_ctx - 1;
23410d728b84SZhu Yanjun 
234269b4b095SJeff Kirsher 	/* set last fragment flag  */
234369b4b095SJeff Kirsher 	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
234469b4b095SJeff Kirsher 
234569b4b095SJeff Kirsher 	/* save skb in this slot's context area */
234669b4b095SJeff Kirsher 	prev_tx_ctx->skb = skb;
234769b4b095SJeff Kirsher 
234869b4b095SJeff Kirsher 	if (skb_is_gso(skb))
234969b4b095SJeff Kirsher 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
235069b4b095SJeff Kirsher 	else
235169b4b095SJeff Kirsher 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
235269b4b095SJeff Kirsher 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
235369b4b095SJeff Kirsher 
235469b4b095SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
235569b4b095SJeff Kirsher 
235669b4b095SJeff Kirsher 	/* set tx flags */
235769b4b095SJeff Kirsher 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2358b8bfca94STom Herbert 
2359b8bfca94STom Herbert 	netdev_sent_queue(np->dev, skb->len);
2360b8bfca94STom Herbert 
236149cbb1c1SWillem de Bruijn 	skb_tx_timestamp(skb);
236249cbb1c1SWillem de Bruijn 
236369b4b095SJeff Kirsher 	np->put_tx.orig = put_tx;
236469b4b095SJeff Kirsher 
236569b4b095SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
236669b4b095SJeff Kirsher 
23675d8876e2SZhu Yanjun txkick:
23685d8876e2SZhu Yanjun 	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
23695d8876e2SZhu Yanjun 		u32 txrxctl_kick;
23705d8876e2SZhu Yanjun dma_error:
23715d8876e2SZhu Yanjun 		txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
23725d8876e2SZhu Yanjun 		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
23735d8876e2SZhu Yanjun 	}
23745d8876e2SZhu Yanjun 
23755d8876e2SZhu Yanjun 	return ret;
237669b4b095SJeff Kirsher }
237769b4b095SJeff Kirsher 
nv_start_xmit_optimized(struct sk_buff * skb,struct net_device * dev)237869b4b095SJeff Kirsher static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
237969b4b095SJeff Kirsher 					   struct net_device *dev)
238069b4b095SJeff Kirsher {
238169b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
238269b4b095SJeff Kirsher 	u32 tx_flags = 0;
238369b4b095SJeff Kirsher 	u32 tx_flags_extra;
238469b4b095SJeff Kirsher 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
238569b4b095SJeff Kirsher 	unsigned int i;
238669b4b095SJeff Kirsher 	u32 offset = 0;
238769b4b095SJeff Kirsher 	u32 bcnt;
238869b4b095SJeff Kirsher 	u32 size = skb_headlen(skb);
238969b4b095SJeff Kirsher 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
239069b4b095SJeff Kirsher 	u32 empty_slots;
239169b4b095SJeff Kirsher 	struct ring_desc_ex *put_tx;
239269b4b095SJeff Kirsher 	struct ring_desc_ex *start_tx;
239369b4b095SJeff Kirsher 	struct ring_desc_ex *prev_tx;
239469b4b095SJeff Kirsher 	struct nv_skb_map *prev_tx_ctx;
2395f7f22874SNeil Horman 	struct nv_skb_map *start_tx_ctx = NULL;
2396f7f22874SNeil Horman 	struct nv_skb_map *tmp_tx_ctx = NULL;
239769b4b095SJeff Kirsher 	unsigned long flags;
23985d8876e2SZhu Yanjun 	netdev_tx_t ret = NETDEV_TX_OK;
239969b4b095SJeff Kirsher 
240069b4b095SJeff Kirsher 	/* add fragments to entries count */
240169b4b095SJeff Kirsher 	for (i = 0; i < fragments; i++) {
2402e45a6187Sdavid decotigny 		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
24039e903e08SEric Dumazet 
2404e45a6187Sdavid decotigny 		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2405e45a6187Sdavid decotigny 			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
240669b4b095SJeff Kirsher 	}
240769b4b095SJeff Kirsher 
240869b4b095SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
240969b4b095SJeff Kirsher 	empty_slots = nv_get_empty_tx_slots(np);
241069b4b095SJeff Kirsher 	if (unlikely(empty_slots <= entries)) {
241169b4b095SJeff Kirsher 		netif_stop_queue(dev);
241269b4b095SJeff Kirsher 		np->tx_stop = 1;
241369b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
24145d8876e2SZhu Yanjun 
24155d8876e2SZhu Yanjun 		/* When normal packets and/or xmit_more packets fill up
24165d8876e2SZhu Yanjun 		 * tx_desc, it is necessary to trigger NIC tx reg.
24175d8876e2SZhu Yanjun 		 */
24185d8876e2SZhu Yanjun 		ret = NETDEV_TX_BUSY;
24195d8876e2SZhu Yanjun 
24205d8876e2SZhu Yanjun 		goto txkick;
242169b4b095SJeff Kirsher 	}
242269b4b095SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
242369b4b095SJeff Kirsher 
242469b4b095SJeff Kirsher 	start_tx = put_tx = np->put_tx.ex;
242569b4b095SJeff Kirsher 	start_tx_ctx = np->put_tx_ctx;
242669b4b095SJeff Kirsher 
242769b4b095SJeff Kirsher 	/* setup the header buffer */
242869b4b095SJeff Kirsher 	do {
242969b4b095SJeff Kirsher 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
24307598b349SZhu Yanjun 		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
24317598b349SZhu Yanjun 						     skb->data + offset, bcnt,
24327598b349SZhu Yanjun 						     DMA_TO_DEVICE);
243339e50d96SZhu Yanjun 		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
243439e50d96SZhu Yanjun 					       np->put_tx_ctx->dma))) {
2435612a7c4eSLarry Finger 			/* on DMA mapping error - drop the packet */
24361616566cSEric W. Biederman 			dev_kfree_skb_any(skb);
2437612a7c4eSLarry Finger 			u64_stats_update_begin(&np->swstats_tx_syncp);
2438f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_tx_dropped);
2439612a7c4eSLarry Finger 			u64_stats_update_end(&np->swstats_tx_syncp);
24405d8876e2SZhu Yanjun 
24415d8876e2SZhu Yanjun 			ret = NETDEV_TX_OK;
24425d8876e2SZhu Yanjun 
24435d8876e2SZhu Yanjun 			goto dma_error;
2444612a7c4eSLarry Finger 		}
244569b4b095SJeff Kirsher 		np->put_tx_ctx->dma_len = bcnt;
244669b4b095SJeff Kirsher 		np->put_tx_ctx->dma_single = 1;
244769b4b095SJeff Kirsher 		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
244869b4b095SJeff Kirsher 		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
244969b4b095SJeff Kirsher 		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
245069b4b095SJeff Kirsher 
245169b4b095SJeff Kirsher 		tx_flags = NV_TX2_VALID;
245269b4b095SJeff Kirsher 		offset += bcnt;
245369b4b095SJeff Kirsher 		size -= bcnt;
245469b4b095SJeff Kirsher 		if (unlikely(put_tx++ == np->last_tx.ex))
2455c360f2b5SZhu Yanjun 			put_tx = np->tx_ring.ex;
245669b4b095SJeff Kirsher 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
245741b0cd36SZhu Yanjun 			np->put_tx_ctx = np->tx_skb;
245869b4b095SJeff Kirsher 	} while (size);
245969b4b095SJeff Kirsher 
246069b4b095SJeff Kirsher 	/* setup the fragments */
246169b4b095SJeff Kirsher 	for (i = 0; i < fragments; i++) {
246269b4b095SJeff Kirsher 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2463e45a6187Sdavid decotigny 		u32 frag_size = skb_frag_size(frag);
246469b4b095SJeff Kirsher 		offset = 0;
246569b4b095SJeff Kirsher 
246669b4b095SJeff Kirsher 		do {
2467e45a6187Sdavid decotigny 			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2468f7f22874SNeil Horman 			if (!start_tx_ctx)
2469f7f22874SNeil Horman 				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2470671173c3SIan Campbell 			np->put_tx_ctx->dma = skb_frag_dma_map(
2471671173c3SIan Campbell 							&np->pci_dev->dev,
2472671173c3SIan Campbell 							frag, offset,
2473671173c3SIan Campbell 							bcnt,
24745d6bcdfeSIan Campbell 							DMA_TO_DEVICE);
2475f7f22874SNeil Horman 
247639e50d96SZhu Yanjun 			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
247739e50d96SZhu Yanjun 						       np->put_tx_ctx->dma))) {
2478f7f22874SNeil Horman 
2479f7f22874SNeil Horman 				/* Unwind the mapped fragments */
2480f7f22874SNeil Horman 				do {
2481f7f22874SNeil Horman 					nv_unmap_txskb(np, start_tx_ctx);
2482f7f22874SNeil Horman 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
248341b0cd36SZhu Yanjun 						tmp_tx_ctx = np->tx_skb;
2484f7f22874SNeil Horman 				} while (tmp_tx_ctx != np->put_tx_ctx);
24851616566cSEric W. Biederman 				dev_kfree_skb_any(skb);
2486f7f22874SNeil Horman 				np->put_tx_ctx = start_tx_ctx;
2487f7f22874SNeil Horman 				u64_stats_update_begin(&np->swstats_tx_syncp);
2488f4b633b9SZhu Yanjun 				nv_txrx_stats_inc(stat_tx_dropped);
2489f7f22874SNeil Horman 				u64_stats_update_end(&np->swstats_tx_syncp);
24905d8876e2SZhu Yanjun 
24915d8876e2SZhu Yanjun 				ret = NETDEV_TX_OK;
24925d8876e2SZhu Yanjun 
24935d8876e2SZhu Yanjun 				goto dma_error;
2494f7f22874SNeil Horman 			}
249569b4b095SJeff Kirsher 			np->put_tx_ctx->dma_len = bcnt;
249669b4b095SJeff Kirsher 			np->put_tx_ctx->dma_single = 0;
249769b4b095SJeff Kirsher 			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
249869b4b095SJeff Kirsher 			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
249969b4b095SJeff Kirsher 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
250069b4b095SJeff Kirsher 
250169b4b095SJeff Kirsher 			offset += bcnt;
2502e45a6187Sdavid decotigny 			frag_size -= bcnt;
250369b4b095SJeff Kirsher 			if (unlikely(put_tx++ == np->last_tx.ex))
2504c360f2b5SZhu Yanjun 				put_tx = np->tx_ring.ex;
250569b4b095SJeff Kirsher 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
250641b0cd36SZhu Yanjun 				np->put_tx_ctx = np->tx_skb;
2507e45a6187Sdavid decotigny 		} while (frag_size);
250869b4b095SJeff Kirsher 	}
250969b4b095SJeff Kirsher 
2510c360f2b5SZhu Yanjun 	if (unlikely(put_tx == np->tx_ring.ex))
25110d728b84SZhu Yanjun 		prev_tx = np->last_tx.ex;
25120d728b84SZhu Yanjun 	else
25130d728b84SZhu Yanjun 		prev_tx = put_tx - 1;
25140d728b84SZhu Yanjun 
251541b0cd36SZhu Yanjun 	if (unlikely(np->put_tx_ctx == np->tx_skb))
25160d728b84SZhu Yanjun 		prev_tx_ctx = np->last_tx_ctx;
25170d728b84SZhu Yanjun 	else
25180d728b84SZhu Yanjun 		prev_tx_ctx = np->put_tx_ctx - 1;
25190d728b84SZhu Yanjun 
252069b4b095SJeff Kirsher 	/* set last fragment flag  */
252169b4b095SJeff Kirsher 	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
252269b4b095SJeff Kirsher 
252369b4b095SJeff Kirsher 	/* save skb in this slot's context area */
252469b4b095SJeff Kirsher 	prev_tx_ctx->skb = skb;
252569b4b095SJeff Kirsher 
252669b4b095SJeff Kirsher 	if (skb_is_gso(skb))
252769b4b095SJeff Kirsher 		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
252869b4b095SJeff Kirsher 	else
252969b4b095SJeff Kirsher 		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
253069b4b095SJeff Kirsher 			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
253169b4b095SJeff Kirsher 
253269b4b095SJeff Kirsher 	/* vlan tag */
2533df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
253469b4b095SJeff Kirsher 		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2535df8a39deSJiri Pirko 					skb_vlan_tag_get(skb));
253669b4b095SJeff Kirsher 	else
253769b4b095SJeff Kirsher 		start_tx->txvlan = 0;
253869b4b095SJeff Kirsher 
253969b4b095SJeff Kirsher 	spin_lock_irqsave(&np->lock, flags);
254069b4b095SJeff Kirsher 
254169b4b095SJeff Kirsher 	if (np->tx_limit) {
254269b4b095SJeff Kirsher 		/* Limit the number of outstanding tx. Setup all fragments, but
254369b4b095SJeff Kirsher 		 * do not set the VALID bit on the first descriptor. Save a pointer
254469b4b095SJeff Kirsher 		 * to that descriptor and also for next skb_map element.
254569b4b095SJeff Kirsher 		 */
254669b4b095SJeff Kirsher 
254769b4b095SJeff Kirsher 		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
254869b4b095SJeff Kirsher 			if (!np->tx_change_owner)
254969b4b095SJeff Kirsher 				np->tx_change_owner = start_tx_ctx;
255069b4b095SJeff Kirsher 
255169b4b095SJeff Kirsher 			/* remove VALID bit */
255269b4b095SJeff Kirsher 			tx_flags &= ~NV_TX2_VALID;
255369b4b095SJeff Kirsher 			start_tx_ctx->first_tx_desc = start_tx;
255469b4b095SJeff Kirsher 			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
255569b4b095SJeff Kirsher 			np->tx_end_flip = np->put_tx_ctx;
255669b4b095SJeff Kirsher 		} else {
255769b4b095SJeff Kirsher 			np->tx_pkts_in_progress++;
255869b4b095SJeff Kirsher 		}
255969b4b095SJeff Kirsher 	}
256069b4b095SJeff Kirsher 
256169b4b095SJeff Kirsher 	/* set tx flags */
256269b4b095SJeff Kirsher 	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2563b8bfca94STom Herbert 
2564b8bfca94STom Herbert 	netdev_sent_queue(np->dev, skb->len);
2565b8bfca94STom Herbert 
256649cbb1c1SWillem de Bruijn 	skb_tx_timestamp(skb);
256749cbb1c1SWillem de Bruijn 
256869b4b095SJeff Kirsher 	np->put_tx.ex = put_tx;
256969b4b095SJeff Kirsher 
257069b4b095SJeff Kirsher 	spin_unlock_irqrestore(&np->lock, flags);
257169b4b095SJeff Kirsher 
25725d8876e2SZhu Yanjun txkick:
25735d8876e2SZhu Yanjun 	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
25745d8876e2SZhu Yanjun 		u32 txrxctl_kick;
25755d8876e2SZhu Yanjun dma_error:
25765d8876e2SZhu Yanjun 		txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
25775d8876e2SZhu Yanjun 		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
25785d8876e2SZhu Yanjun 	}
25795d8876e2SZhu Yanjun 
25805d8876e2SZhu Yanjun 	return ret;
258169b4b095SJeff Kirsher }
258269b4b095SJeff Kirsher 
nv_tx_flip_ownership(struct net_device * dev)258369b4b095SJeff Kirsher static inline void nv_tx_flip_ownership(struct net_device *dev)
258469b4b095SJeff Kirsher {
258569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
258669b4b095SJeff Kirsher 
258769b4b095SJeff Kirsher 	np->tx_pkts_in_progress--;
258869b4b095SJeff Kirsher 	if (np->tx_change_owner) {
258969b4b095SJeff Kirsher 		np->tx_change_owner->first_tx_desc->flaglen |=
259069b4b095SJeff Kirsher 			cpu_to_le32(NV_TX2_VALID);
259169b4b095SJeff Kirsher 		np->tx_pkts_in_progress++;
259269b4b095SJeff Kirsher 
259369b4b095SJeff Kirsher 		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
259469b4b095SJeff Kirsher 		if (np->tx_change_owner == np->tx_end_flip)
259569b4b095SJeff Kirsher 			np->tx_change_owner = NULL;
259669b4b095SJeff Kirsher 
259769b4b095SJeff Kirsher 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
259869b4b095SJeff Kirsher 	}
259969b4b095SJeff Kirsher }
260069b4b095SJeff Kirsher 
260169b4b095SJeff Kirsher /*
260269b4b095SJeff Kirsher  * nv_tx_done: check for completed packets, release the skbs.
260369b4b095SJeff Kirsher  *
260469b4b095SJeff Kirsher  * Caller must own np->lock.
260569b4b095SJeff Kirsher  */
nv_tx_done(struct net_device * dev,int limit)260669b4b095SJeff Kirsher static int nv_tx_done(struct net_device *dev, int limit)
260769b4b095SJeff Kirsher {
260869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
260969b4b095SJeff Kirsher 	u32 flags;
261069b4b095SJeff Kirsher 	int tx_work = 0;
261169b4b095SJeff Kirsher 	struct ring_desc *orig_get_tx = np->get_tx.orig;
2612b8bfca94STom Herbert 	unsigned int bytes_compl = 0;
261369b4b095SJeff Kirsher 
261469b4b095SJeff Kirsher 	while ((np->get_tx.orig != np->put_tx.orig) &&
261569b4b095SJeff Kirsher 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
261669b4b095SJeff Kirsher 	       (tx_work < limit)) {
261769b4b095SJeff Kirsher 
261869b4b095SJeff Kirsher 		nv_unmap_txskb(np, np->get_tx_ctx);
261969b4b095SJeff Kirsher 
262069b4b095SJeff Kirsher 		if (np->desc_ver == DESC_VER_1) {
262169b4b095SJeff Kirsher 			if (flags & NV_TX_LASTPACKET) {
2622b78a6aa3SZhu Yanjun 				if (unlikely(flags & NV_TX_ERROR)) {
2623f5d827aeSdavid decotigny 					if ((flags & NV_TX_RETRYERROR)
2624f5d827aeSdavid decotigny 					    && !(flags & NV_TX_RETRYCOUNT_MASK))
262569b4b095SJeff Kirsher 						nv_legacybackoff_reseed(dev);
2626674aee3bSdavid decotigny 				} else {
2627f4b633b9SZhu Yanjun 					unsigned int len;
2628f4b633b9SZhu Yanjun 
2629f5d827aeSdavid decotigny 					u64_stats_update_begin(&np->swstats_tx_syncp);
2630f4b633b9SZhu Yanjun 					nv_txrx_stats_inc(stat_tx_packets);
2631f4b633b9SZhu Yanjun 					len = np->get_tx_ctx->skb->len;
2632f4b633b9SZhu Yanjun 					nv_txrx_stats_add(stat_tx_bytes, len);
2633f5d827aeSdavid decotigny 					u64_stats_update_end(&np->swstats_tx_syncp);
263469b4b095SJeff Kirsher 				}
2635b8bfca94STom Herbert 				bytes_compl += np->get_tx_ctx->skb->len;
263669b4b095SJeff Kirsher 				dev_kfree_skb_any(np->get_tx_ctx->skb);
263769b4b095SJeff Kirsher 				np->get_tx_ctx->skb = NULL;
263869b4b095SJeff Kirsher 				tx_work++;
263969b4b095SJeff Kirsher 			}
264069b4b095SJeff Kirsher 		} else {
264169b4b095SJeff Kirsher 			if (flags & NV_TX2_LASTPACKET) {
2642b78a6aa3SZhu Yanjun 				if (unlikely(flags & NV_TX2_ERROR)) {
2643f5d827aeSdavid decotigny 					if ((flags & NV_TX2_RETRYERROR)
2644f5d827aeSdavid decotigny 					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
264569b4b095SJeff Kirsher 						nv_legacybackoff_reseed(dev);
2646674aee3bSdavid decotigny 				} else {
2647f4b633b9SZhu Yanjun 					unsigned int len;
2648f4b633b9SZhu Yanjun 
2649f5d827aeSdavid decotigny 					u64_stats_update_begin(&np->swstats_tx_syncp);
2650f4b633b9SZhu Yanjun 					nv_txrx_stats_inc(stat_tx_packets);
2651f4b633b9SZhu Yanjun 					len = np->get_tx_ctx->skb->len;
2652f4b633b9SZhu Yanjun 					nv_txrx_stats_add(stat_tx_bytes, len);
2653f5d827aeSdavid decotigny 					u64_stats_update_end(&np->swstats_tx_syncp);
265469b4b095SJeff Kirsher 				}
2655b8bfca94STom Herbert 				bytes_compl += np->get_tx_ctx->skb->len;
265669b4b095SJeff Kirsher 				dev_kfree_skb_any(np->get_tx_ctx->skb);
265769b4b095SJeff Kirsher 				np->get_tx_ctx->skb = NULL;
265869b4b095SJeff Kirsher 				tx_work++;
265969b4b095SJeff Kirsher 			}
266069b4b095SJeff Kirsher 		}
266169b4b095SJeff Kirsher 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2662c360f2b5SZhu Yanjun 			np->get_tx.orig = np->tx_ring.orig;
266369b4b095SJeff Kirsher 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
266441b0cd36SZhu Yanjun 			np->get_tx_ctx = np->tx_skb;
266569b4b095SJeff Kirsher 	}
2666b8bfca94STom Herbert 
2667b8bfca94STom Herbert 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
2668b8bfca94STom Herbert 
266969b4b095SJeff Kirsher 	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
267069b4b095SJeff Kirsher 		np->tx_stop = 0;
267169b4b095SJeff Kirsher 		netif_wake_queue(dev);
267269b4b095SJeff Kirsher 	}
267369b4b095SJeff Kirsher 	return tx_work;
267469b4b095SJeff Kirsher }
267569b4b095SJeff Kirsher 
nv_tx_done_optimized(struct net_device * dev,int limit)267669b4b095SJeff Kirsher static int nv_tx_done_optimized(struct net_device *dev, int limit)
267769b4b095SJeff Kirsher {
267869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
267969b4b095SJeff Kirsher 	u32 flags;
268069b4b095SJeff Kirsher 	int tx_work = 0;
268169b4b095SJeff Kirsher 	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2682b8bfca94STom Herbert 	unsigned long bytes_cleaned = 0;
268369b4b095SJeff Kirsher 
268469b4b095SJeff Kirsher 	while ((np->get_tx.ex != np->put_tx.ex) &&
268569b4b095SJeff Kirsher 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
268669b4b095SJeff Kirsher 	       (tx_work < limit)) {
268769b4b095SJeff Kirsher 
268869b4b095SJeff Kirsher 		nv_unmap_txskb(np, np->get_tx_ctx);
268969b4b095SJeff Kirsher 
269069b4b095SJeff Kirsher 		if (flags & NV_TX2_LASTPACKET) {
2691b78a6aa3SZhu Yanjun 			if (unlikely(flags & NV_TX2_ERROR)) {
2692f5d827aeSdavid decotigny 				if ((flags & NV_TX2_RETRYERROR)
2693f5d827aeSdavid decotigny 				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
269469b4b095SJeff Kirsher 					if (np->driver_data & DEV_HAS_GEAR_MODE)
269569b4b095SJeff Kirsher 						nv_gear_backoff_reseed(dev);
269669b4b095SJeff Kirsher 					else
269769b4b095SJeff Kirsher 						nv_legacybackoff_reseed(dev);
269869b4b095SJeff Kirsher 				}
2699674aee3bSdavid decotigny 			} else {
2700f4b633b9SZhu Yanjun 				unsigned int len;
2701f4b633b9SZhu Yanjun 
2702f5d827aeSdavid decotigny 				u64_stats_update_begin(&np->swstats_tx_syncp);
2703f4b633b9SZhu Yanjun 				nv_txrx_stats_inc(stat_tx_packets);
2704f4b633b9SZhu Yanjun 				len = np->get_tx_ctx->skb->len;
2705f4b633b9SZhu Yanjun 				nv_txrx_stats_add(stat_tx_bytes, len);
2706f5d827aeSdavid decotigny 				u64_stats_update_end(&np->swstats_tx_syncp);
270769b4b095SJeff Kirsher 			}
270869b4b095SJeff Kirsher 
2709b8bfca94STom Herbert 			bytes_cleaned += np->get_tx_ctx->skb->len;
271069b4b095SJeff Kirsher 			dev_kfree_skb_any(np->get_tx_ctx->skb);
271169b4b095SJeff Kirsher 			np->get_tx_ctx->skb = NULL;
271269b4b095SJeff Kirsher 			tx_work++;
271369b4b095SJeff Kirsher 
271469b4b095SJeff Kirsher 			if (np->tx_limit)
271569b4b095SJeff Kirsher 				nv_tx_flip_ownership(dev);
271669b4b095SJeff Kirsher 		}
2717b8bfca94STom Herbert 
271869b4b095SJeff Kirsher 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2719c360f2b5SZhu Yanjun 			np->get_tx.ex = np->tx_ring.ex;
272069b4b095SJeff Kirsher 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
272141b0cd36SZhu Yanjun 			np->get_tx_ctx = np->tx_skb;
272269b4b095SJeff Kirsher 	}
27237505afe2SIgor Maravic 
27247505afe2SIgor Maravic 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
27257505afe2SIgor Maravic 
272669b4b095SJeff Kirsher 	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
272769b4b095SJeff Kirsher 		np->tx_stop = 0;
272869b4b095SJeff Kirsher 		netif_wake_queue(dev);
272969b4b095SJeff Kirsher 	}
273069b4b095SJeff Kirsher 	return tx_work;
273169b4b095SJeff Kirsher }
273269b4b095SJeff Kirsher 
273369b4b095SJeff Kirsher /*
273469b4b095SJeff Kirsher  * nv_tx_timeout: dev->tx_timeout function
273569b4b095SJeff Kirsher  * Called with netif_tx_lock held.
273669b4b095SJeff Kirsher  */
nv_tx_timeout(struct net_device * dev,unsigned int txqueue)27370290bd29SMichael S. Tsirkin static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
273869b4b095SJeff Kirsher {
273969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
274069b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
274169b4b095SJeff Kirsher 	u32 status;
274269b4b095SJeff Kirsher 	union ring_type put_tx;
274369b4b095SJeff Kirsher 	int saved_tx_limit;
274469b4b095SJeff Kirsher 
274569b4b095SJeff Kirsher 	if (np->msi_flags & NV_MSI_X_ENABLED)
274669b4b095SJeff Kirsher 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
274769b4b095SJeff Kirsher 	else
274869b4b095SJeff Kirsher 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
274969b4b095SJeff Kirsher 
27501ec4f2d3SSameer Nanda 	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
27511ec4f2d3SSameer Nanda 
27521ec4f2d3SSameer Nanda 	if (unlikely(debug_tx_timeout)) {
27531ec4f2d3SSameer Nanda 		int i;
275469b4b095SJeff Kirsher 
275569b4b095SJeff Kirsher 		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
275669b4b095SJeff Kirsher 		netdev_info(dev, "Dumping tx registers\n");
275769b4b095SJeff Kirsher 		for (i = 0; i <= np->register_size; i += 32) {
275869b4b095SJeff Kirsher 			netdev_info(dev,
27591ec4f2d3SSameer Nanda 				    "%3x: %08x %08x %08x %08x "
27601ec4f2d3SSameer Nanda 				    "%08x %08x %08x %08x\n",
276169b4b095SJeff Kirsher 				    i,
276269b4b095SJeff Kirsher 				    readl(base + i + 0), readl(base + i + 4),
276369b4b095SJeff Kirsher 				    readl(base + i + 8), readl(base + i + 12),
276469b4b095SJeff Kirsher 				    readl(base + i + 16), readl(base + i + 20),
276569b4b095SJeff Kirsher 				    readl(base + i + 24), readl(base + i + 28));
276669b4b095SJeff Kirsher 		}
276769b4b095SJeff Kirsher 		netdev_info(dev, "Dumping tx ring\n");
276869b4b095SJeff Kirsher 		for (i = 0; i < np->tx_ring_size; i += 4) {
276969b4b095SJeff Kirsher 			if (!nv_optimized(np)) {
277069b4b095SJeff Kirsher 				netdev_info(dev,
27711ec4f2d3SSameer Nanda 					    "%03x: %08x %08x // %08x %08x "
27721ec4f2d3SSameer Nanda 					    "// %08x %08x // %08x %08x\n",
277369b4b095SJeff Kirsher 					    i,
277469b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i].buf),
277569b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
277669b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
277769b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
277869b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
277969b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
278069b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
278169b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
278269b4b095SJeff Kirsher 			} else {
278369b4b095SJeff Kirsher 				netdev_info(dev,
27841ec4f2d3SSameer Nanda 					    "%03x: %08x %08x %08x "
27851ec4f2d3SSameer Nanda 					    "// %08x %08x %08x "
27861ec4f2d3SSameer Nanda 					    "// %08x %08x %08x "
27871ec4f2d3SSameer Nanda 					    "// %08x %08x %08x\n",
278869b4b095SJeff Kirsher 					    i,
278969b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
279069b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i].buflow),
279169b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
279269b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
279369b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
279469b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
279569b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
279669b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
279769b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
279869b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
279969b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
280069b4b095SJeff Kirsher 					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
280169b4b095SJeff Kirsher 			}
280269b4b095SJeff Kirsher 		}
28031ec4f2d3SSameer Nanda 	}
280469b4b095SJeff Kirsher 
280569b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
280669b4b095SJeff Kirsher 
280769b4b095SJeff Kirsher 	/* 1) stop tx engine */
280869b4b095SJeff Kirsher 	nv_stop_tx(dev);
280969b4b095SJeff Kirsher 
281069b4b095SJeff Kirsher 	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
281169b4b095SJeff Kirsher 	saved_tx_limit = np->tx_limit;
281269b4b095SJeff Kirsher 	np->tx_limit = 0; /* prevent giving HW any limited pkts */
281369b4b095SJeff Kirsher 	np->tx_stop = 0;  /* prevent waking tx queue */
281469b4b095SJeff Kirsher 	if (!nv_optimized(np))
281569b4b095SJeff Kirsher 		nv_tx_done(dev, np->tx_ring_size);
281669b4b095SJeff Kirsher 	else
281769b4b095SJeff Kirsher 		nv_tx_done_optimized(dev, np->tx_ring_size);
281869b4b095SJeff Kirsher 
281969b4b095SJeff Kirsher 	/* save current HW position */
282069b4b095SJeff Kirsher 	if (np->tx_change_owner)
282169b4b095SJeff Kirsher 		put_tx.ex = np->tx_change_owner->first_tx_desc;
282269b4b095SJeff Kirsher 	else
282369b4b095SJeff Kirsher 		put_tx = np->put_tx;
282469b4b095SJeff Kirsher 
282569b4b095SJeff Kirsher 	/* 3) clear all tx state */
282669b4b095SJeff Kirsher 	nv_drain_tx(dev);
282769b4b095SJeff Kirsher 	nv_init_tx(dev);
282869b4b095SJeff Kirsher 
282969b4b095SJeff Kirsher 	/* 4) restore state to current HW position */
283069b4b095SJeff Kirsher 	np->get_tx = np->put_tx = put_tx;
283169b4b095SJeff Kirsher 	np->tx_limit = saved_tx_limit;
283269b4b095SJeff Kirsher 
283369b4b095SJeff Kirsher 	/* 5) restart tx engine */
283469b4b095SJeff Kirsher 	nv_start_tx(dev);
283569b4b095SJeff Kirsher 	netif_wake_queue(dev);
283669b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
283769b4b095SJeff Kirsher }
283869b4b095SJeff Kirsher 
283969b4b095SJeff Kirsher /*
284069b4b095SJeff Kirsher  * Called when the nic notices a mismatch between the actual data len on the
284169b4b095SJeff Kirsher  * wire and the len indicated in the 802 header
284269b4b095SJeff Kirsher  */
nv_getlen(struct net_device * dev,void * packet,int datalen)284369b4b095SJeff Kirsher static int nv_getlen(struct net_device *dev, void *packet, int datalen)
284469b4b095SJeff Kirsher {
284569b4b095SJeff Kirsher 	int hdrlen;	/* length of the 802 header */
284669b4b095SJeff Kirsher 	int protolen;	/* length as stored in the proto field */
284769b4b095SJeff Kirsher 
284869b4b095SJeff Kirsher 	/* 1) calculate len according to header */
284969b4b095SJeff Kirsher 	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
285069b4b095SJeff Kirsher 		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
285169b4b095SJeff Kirsher 		hdrlen = VLAN_HLEN;
285269b4b095SJeff Kirsher 	} else {
285369b4b095SJeff Kirsher 		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
285469b4b095SJeff Kirsher 		hdrlen = ETH_HLEN;
285569b4b095SJeff Kirsher 	}
285669b4b095SJeff Kirsher 	if (protolen > ETH_DATA_LEN)
285769b4b095SJeff Kirsher 		return datalen; /* Value in proto field not a len, no checks possible */
285869b4b095SJeff Kirsher 
285969b4b095SJeff Kirsher 	protolen += hdrlen;
286069b4b095SJeff Kirsher 	/* consistency checks: */
286169b4b095SJeff Kirsher 	if (datalen > ETH_ZLEN) {
286269b4b095SJeff Kirsher 		if (datalen >= protolen) {
286369b4b095SJeff Kirsher 			/* more data on wire than in 802 header, trim of
286469b4b095SJeff Kirsher 			 * additional data.
286569b4b095SJeff Kirsher 			 */
286669b4b095SJeff Kirsher 			return protolen;
286769b4b095SJeff Kirsher 		} else {
286869b4b095SJeff Kirsher 			/* less data on wire than mentioned in header.
286969b4b095SJeff Kirsher 			 * Discard the packet.
287069b4b095SJeff Kirsher 			 */
287169b4b095SJeff Kirsher 			return -1;
287269b4b095SJeff Kirsher 		}
287369b4b095SJeff Kirsher 	} else {
287469b4b095SJeff Kirsher 		/* short packet. Accept only if 802 values are also short */
287569b4b095SJeff Kirsher 		if (protolen > ETH_ZLEN) {
287669b4b095SJeff Kirsher 			return -1;
287769b4b095SJeff Kirsher 		}
287869b4b095SJeff Kirsher 		return datalen;
287969b4b095SJeff Kirsher 	}
288069b4b095SJeff Kirsher }
288169b4b095SJeff Kirsher 
rx_missing_handler(u32 flags,struct fe_priv * np)2882f4b633b9SZhu Yanjun static void rx_missing_handler(u32 flags, struct fe_priv *np)
2883f4b633b9SZhu Yanjun {
2884f4b633b9SZhu Yanjun 	if (flags & NV_RX_MISSEDFRAME) {
2885f4b633b9SZhu Yanjun 		u64_stats_update_begin(&np->swstats_rx_syncp);
2886f4b633b9SZhu Yanjun 		nv_txrx_stats_inc(stat_rx_missed_errors);
2887f4b633b9SZhu Yanjun 		u64_stats_update_end(&np->swstats_rx_syncp);
2888f4b633b9SZhu Yanjun 	}
2889f4b633b9SZhu Yanjun }
2890f4b633b9SZhu Yanjun 
nv_rx_process(struct net_device * dev,int limit)289169b4b095SJeff Kirsher static int nv_rx_process(struct net_device *dev, int limit)
289269b4b095SJeff Kirsher {
289369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
289469b4b095SJeff Kirsher 	u32 flags;
289569b4b095SJeff Kirsher 	int rx_work = 0;
289669b4b095SJeff Kirsher 	struct sk_buff *skb;
289769b4b095SJeff Kirsher 	int len;
289869b4b095SJeff Kirsher 
289969b4b095SJeff Kirsher 	while ((np->get_rx.orig != np->put_rx.orig) &&
290069b4b095SJeff Kirsher 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
290169b4b095SJeff Kirsher 		(rx_work < limit)) {
290269b4b095SJeff Kirsher 
290369b4b095SJeff Kirsher 		/*
290469b4b095SJeff Kirsher 		 * the packet is for us - immediately tear down the pci mapping.
290569b4b095SJeff Kirsher 		 * TODO: check if a prefetch of the first cacheline improves
290669b4b095SJeff Kirsher 		 * the performance.
290769b4b095SJeff Kirsher 		 */
29087598b349SZhu Yanjun 		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
290969b4b095SJeff Kirsher 				 np->get_rx_ctx->dma_len,
29107598b349SZhu Yanjun 				 DMA_FROM_DEVICE);
291169b4b095SJeff Kirsher 		skb = np->get_rx_ctx->skb;
291269b4b095SJeff Kirsher 		np->get_rx_ctx->skb = NULL;
291369b4b095SJeff Kirsher 
291469b4b095SJeff Kirsher 		/* look at what we actually got: */
291569b4b095SJeff Kirsher 		if (np->desc_ver == DESC_VER_1) {
291669b4b095SJeff Kirsher 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
291769b4b095SJeff Kirsher 				len = flags & LEN_MASK_V1;
291869b4b095SJeff Kirsher 				if (unlikely(flags & NV_RX_ERROR)) {
291969b4b095SJeff Kirsher 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
292069b4b095SJeff Kirsher 						len = nv_getlen(dev, skb->data, len);
292169b4b095SJeff Kirsher 						if (len < 0) {
292269b4b095SJeff Kirsher 							dev_kfree_skb(skb);
292369b4b095SJeff Kirsher 							goto next_pkt;
292469b4b095SJeff Kirsher 						}
292569b4b095SJeff Kirsher 					}
292669b4b095SJeff Kirsher 					/* framing errors are soft errors */
292769b4b095SJeff Kirsher 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2928cef33c81SAntonio Ospite 						if (flags & NV_RX_SUBTRACT1)
292969b4b095SJeff Kirsher 							len--;
293069b4b095SJeff Kirsher 					}
293169b4b095SJeff Kirsher 					/* the rest are hard errors */
293269b4b095SJeff Kirsher 					else {
2933f4b633b9SZhu Yanjun 						rx_missing_handler(flags, np);
293469b4b095SJeff Kirsher 						dev_kfree_skb(skb);
293569b4b095SJeff Kirsher 						goto next_pkt;
293669b4b095SJeff Kirsher 					}
293769b4b095SJeff Kirsher 				}
293869b4b095SJeff Kirsher 			} else {
293969b4b095SJeff Kirsher 				dev_kfree_skb(skb);
294069b4b095SJeff Kirsher 				goto next_pkt;
294169b4b095SJeff Kirsher 			}
294269b4b095SJeff Kirsher 		} else {
294369b4b095SJeff Kirsher 			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
294469b4b095SJeff Kirsher 				len = flags & LEN_MASK_V2;
294569b4b095SJeff Kirsher 				if (unlikely(flags & NV_RX2_ERROR)) {
294669b4b095SJeff Kirsher 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
294769b4b095SJeff Kirsher 						len = nv_getlen(dev, skb->data, len);
294869b4b095SJeff Kirsher 						if (len < 0) {
294969b4b095SJeff Kirsher 							dev_kfree_skb(skb);
295069b4b095SJeff Kirsher 							goto next_pkt;
295169b4b095SJeff Kirsher 						}
295269b4b095SJeff Kirsher 					}
295369b4b095SJeff Kirsher 					/* framing errors are soft errors */
295469b4b095SJeff Kirsher 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2955cef33c81SAntonio Ospite 						if (flags & NV_RX2_SUBTRACT1)
295669b4b095SJeff Kirsher 							len--;
295769b4b095SJeff Kirsher 					}
295869b4b095SJeff Kirsher 					/* the rest are hard errors */
295969b4b095SJeff Kirsher 					else {
296069b4b095SJeff Kirsher 						dev_kfree_skb(skb);
296169b4b095SJeff Kirsher 						goto next_pkt;
296269b4b095SJeff Kirsher 					}
296369b4b095SJeff Kirsher 				}
296469b4b095SJeff Kirsher 				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
296569b4b095SJeff Kirsher 				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
296669b4b095SJeff Kirsher 					skb->ip_summed = CHECKSUM_UNNECESSARY;
296769b4b095SJeff Kirsher 			} else {
296869b4b095SJeff Kirsher 				dev_kfree_skb(skb);
296969b4b095SJeff Kirsher 				goto next_pkt;
297069b4b095SJeff Kirsher 			}
297169b4b095SJeff Kirsher 		}
297269b4b095SJeff Kirsher 		/* got a valid packet - forward it to the network core */
297369b4b095SJeff Kirsher 		skb_put(skb, len);
297469b4b095SJeff Kirsher 		skb->protocol = eth_type_trans(skb, dev);
297569b4b095SJeff Kirsher 		napi_gro_receive(&np->napi, skb);
2976f5d827aeSdavid decotigny 		u64_stats_update_begin(&np->swstats_rx_syncp);
2977f4b633b9SZhu Yanjun 		nv_txrx_stats_inc(stat_rx_packets);
2978f4b633b9SZhu Yanjun 		nv_txrx_stats_add(stat_rx_bytes, len);
2979f5d827aeSdavid decotigny 		u64_stats_update_end(&np->swstats_rx_syncp);
298069b4b095SJeff Kirsher next_pkt:
298169b4b095SJeff Kirsher 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
298264f26abbSZhu Yanjun 			np->get_rx.orig = np->rx_ring.orig;
298369b4b095SJeff Kirsher 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2984a9124ec4SZhu Yanjun 			np->get_rx_ctx = np->rx_skb;
298569b4b095SJeff Kirsher 
298669b4b095SJeff Kirsher 		rx_work++;
298769b4b095SJeff Kirsher 	}
298869b4b095SJeff Kirsher 
298969b4b095SJeff Kirsher 	return rx_work;
299069b4b095SJeff Kirsher }
299169b4b095SJeff Kirsher 
nv_rx_process_optimized(struct net_device * dev,int limit)299269b4b095SJeff Kirsher static int nv_rx_process_optimized(struct net_device *dev, int limit)
299369b4b095SJeff Kirsher {
299469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
299569b4b095SJeff Kirsher 	u32 flags;
299669b4b095SJeff Kirsher 	u32 vlanflags = 0;
299769b4b095SJeff Kirsher 	int rx_work = 0;
299869b4b095SJeff Kirsher 	struct sk_buff *skb;
299969b4b095SJeff Kirsher 	int len;
300069b4b095SJeff Kirsher 
300169b4b095SJeff Kirsher 	while ((np->get_rx.ex != np->put_rx.ex) &&
300269b4b095SJeff Kirsher 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
300369b4b095SJeff Kirsher 	      (rx_work < limit)) {
300469b4b095SJeff Kirsher 
300569b4b095SJeff Kirsher 		/*
300669b4b095SJeff Kirsher 		 * the packet is for us - immediately tear down the pci mapping.
300769b4b095SJeff Kirsher 		 * TODO: check if a prefetch of the first cacheline improves
300869b4b095SJeff Kirsher 		 * the performance.
300969b4b095SJeff Kirsher 		 */
30107598b349SZhu Yanjun 		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
301169b4b095SJeff Kirsher 				 np->get_rx_ctx->dma_len,
30127598b349SZhu Yanjun 				 DMA_FROM_DEVICE);
301369b4b095SJeff Kirsher 		skb = np->get_rx_ctx->skb;
301469b4b095SJeff Kirsher 		np->get_rx_ctx->skb = NULL;
301569b4b095SJeff Kirsher 
301669b4b095SJeff Kirsher 		/* look at what we actually got: */
301769b4b095SJeff Kirsher 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
301869b4b095SJeff Kirsher 			len = flags & LEN_MASK_V2;
301969b4b095SJeff Kirsher 			if (unlikely(flags & NV_RX2_ERROR)) {
302069b4b095SJeff Kirsher 				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
302169b4b095SJeff Kirsher 					len = nv_getlen(dev, skb->data, len);
302269b4b095SJeff Kirsher 					if (len < 0) {
302369b4b095SJeff Kirsher 						dev_kfree_skb(skb);
302469b4b095SJeff Kirsher 						goto next_pkt;
302569b4b095SJeff Kirsher 					}
302669b4b095SJeff Kirsher 				}
302769b4b095SJeff Kirsher 				/* framing errors are soft errors */
302869b4b095SJeff Kirsher 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
3029cef33c81SAntonio Ospite 					if (flags & NV_RX2_SUBTRACT1)
303069b4b095SJeff Kirsher 						len--;
303169b4b095SJeff Kirsher 				}
303269b4b095SJeff Kirsher 				/* the rest are hard errors */
303369b4b095SJeff Kirsher 				else {
303469b4b095SJeff Kirsher 					dev_kfree_skb(skb);
303569b4b095SJeff Kirsher 					goto next_pkt;
303669b4b095SJeff Kirsher 				}
303769b4b095SJeff Kirsher 			}
303869b4b095SJeff Kirsher 
303969b4b095SJeff Kirsher 			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
304069b4b095SJeff Kirsher 			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
304169b4b095SJeff Kirsher 				skb->ip_summed = CHECKSUM_UNNECESSARY;
304269b4b095SJeff Kirsher 
304369b4b095SJeff Kirsher 			/* got a valid packet - forward it to the network core */
304469b4b095SJeff Kirsher 			skb_put(skb, len);
304569b4b095SJeff Kirsher 			skb->protocol = eth_type_trans(skb, dev);
304669b4b095SJeff Kirsher 			prefetch(skb->data);
304769b4b095SJeff Kirsher 
304869b4b095SJeff Kirsher 			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
304969b4b095SJeff Kirsher 
305069b4b095SJeff Kirsher 			/*
3051f646968fSPatrick McHardy 			 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
3052f646968fSPatrick McHardy 			 * here. Even if vlan rx accel is disabled,
305369b4b095SJeff Kirsher 			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
305469b4b095SJeff Kirsher 			 */
3055f646968fSPatrick McHardy 			if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
305669b4b095SJeff Kirsher 			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
305769b4b095SJeff Kirsher 				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
305869b4b095SJeff Kirsher 
305986a9bad3SPatrick McHardy 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
306069b4b095SJeff Kirsher 			}
306169b4b095SJeff Kirsher 			napi_gro_receive(&np->napi, skb);
3062f5d827aeSdavid decotigny 			u64_stats_update_begin(&np->swstats_rx_syncp);
3063f4b633b9SZhu Yanjun 			nv_txrx_stats_inc(stat_rx_packets);
3064f4b633b9SZhu Yanjun 			nv_txrx_stats_add(stat_rx_bytes, len);
3065f5d827aeSdavid decotigny 			u64_stats_update_end(&np->swstats_rx_syncp);
306669b4b095SJeff Kirsher 		} else {
306769b4b095SJeff Kirsher 			dev_kfree_skb(skb);
306869b4b095SJeff Kirsher 		}
306969b4b095SJeff Kirsher next_pkt:
307069b4b095SJeff Kirsher 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
307164f26abbSZhu Yanjun 			np->get_rx.ex = np->rx_ring.ex;
307269b4b095SJeff Kirsher 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
3073a9124ec4SZhu Yanjun 			np->get_rx_ctx = np->rx_skb;
307469b4b095SJeff Kirsher 
307569b4b095SJeff Kirsher 		rx_work++;
307669b4b095SJeff Kirsher 	}
307769b4b095SJeff Kirsher 
307869b4b095SJeff Kirsher 	return rx_work;
307969b4b095SJeff Kirsher }
308069b4b095SJeff Kirsher 
set_bufsize(struct net_device * dev)308169b4b095SJeff Kirsher static void set_bufsize(struct net_device *dev)
308269b4b095SJeff Kirsher {
308369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
308469b4b095SJeff Kirsher 
308569b4b095SJeff Kirsher 	if (dev->mtu <= ETH_DATA_LEN)
308669b4b095SJeff Kirsher 		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
308769b4b095SJeff Kirsher 	else
308869b4b095SJeff Kirsher 		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
308969b4b095SJeff Kirsher }
309069b4b095SJeff Kirsher 
309169b4b095SJeff Kirsher /*
309269b4b095SJeff Kirsher  * nv_change_mtu: dev->change_mtu function
309369b4b095SJeff Kirsher  * Called with dev_base_lock held for read.
309469b4b095SJeff Kirsher  */
nv_change_mtu(struct net_device * dev,int new_mtu)309569b4b095SJeff Kirsher static int nv_change_mtu(struct net_device *dev, int new_mtu)
309669b4b095SJeff Kirsher {
309769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
309869b4b095SJeff Kirsher 	int old_mtu;
309969b4b095SJeff Kirsher 
310069b4b095SJeff Kirsher 	old_mtu = dev->mtu;
310169b4b095SJeff Kirsher 	dev->mtu = new_mtu;
310269b4b095SJeff Kirsher 
310369b4b095SJeff Kirsher 	/* return early if the buffer sizes will not change */
310469b4b095SJeff Kirsher 	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
310569b4b095SJeff Kirsher 		return 0;
310669b4b095SJeff Kirsher 
310769b4b095SJeff Kirsher 	/* synchronized against open : rtnl_lock() held by caller */
310869b4b095SJeff Kirsher 	if (netif_running(dev)) {
310969b4b095SJeff Kirsher 		u8 __iomem *base = get_hwbase(dev);
311069b4b095SJeff Kirsher 		/*
311169b4b095SJeff Kirsher 		 * It seems that the nic preloads valid ring entries into an
311269b4b095SJeff Kirsher 		 * internal buffer. The procedure for flushing everything is
311369b4b095SJeff Kirsher 		 * guessed, there is probably a simpler approach.
311469b4b095SJeff Kirsher 		 * Changing the MTU is a rare event, it shouldn't matter.
311569b4b095SJeff Kirsher 		 */
311669b4b095SJeff Kirsher 		nv_disable_irq(dev);
311769b4b095SJeff Kirsher 		nv_napi_disable(dev);
311869b4b095SJeff Kirsher 		netif_tx_lock_bh(dev);
311969b4b095SJeff Kirsher 		netif_addr_lock(dev);
312069b4b095SJeff Kirsher 		spin_lock(&np->lock);
312169b4b095SJeff Kirsher 		/* stop engines */
312269b4b095SJeff Kirsher 		nv_stop_rxtx(dev);
312369b4b095SJeff Kirsher 		nv_txrx_reset(dev);
312469b4b095SJeff Kirsher 		/* drain rx queue */
312569b4b095SJeff Kirsher 		nv_drain_rxtx(dev);
312669b4b095SJeff Kirsher 		/* reinit driver view of the rx queue */
312769b4b095SJeff Kirsher 		set_bufsize(dev);
312869b4b095SJeff Kirsher 		if (nv_init_ring(dev)) {
312969b4b095SJeff Kirsher 			if (!np->in_shutdown)
313069b4b095SJeff Kirsher 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
313169b4b095SJeff Kirsher 		}
313269b4b095SJeff Kirsher 		/* reinit nic view of the rx queue */
313369b4b095SJeff Kirsher 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
313469b4b095SJeff Kirsher 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
313569b4b095SJeff Kirsher 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
313669b4b095SJeff Kirsher 			base + NvRegRingSizes);
313769b4b095SJeff Kirsher 		pci_push(base);
313869b4b095SJeff Kirsher 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
313969b4b095SJeff Kirsher 		pci_push(base);
314069b4b095SJeff Kirsher 
314169b4b095SJeff Kirsher 		/* restart rx engine */
314269b4b095SJeff Kirsher 		nv_start_rxtx(dev);
314369b4b095SJeff Kirsher 		spin_unlock(&np->lock);
314469b4b095SJeff Kirsher 		netif_addr_unlock(dev);
314569b4b095SJeff Kirsher 		netif_tx_unlock_bh(dev);
314669b4b095SJeff Kirsher 		nv_napi_enable(dev);
314769b4b095SJeff Kirsher 		nv_enable_irq(dev);
314869b4b095SJeff Kirsher 	}
314969b4b095SJeff Kirsher 	return 0;
315069b4b095SJeff Kirsher }
315169b4b095SJeff Kirsher 
nv_copy_mac_to_hw(struct net_device * dev)315269b4b095SJeff Kirsher static void nv_copy_mac_to_hw(struct net_device *dev)
315369b4b095SJeff Kirsher {
315469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
315569b4b095SJeff Kirsher 	u32 mac[2];
315669b4b095SJeff Kirsher 
315769b4b095SJeff Kirsher 	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
315869b4b095SJeff Kirsher 			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
315969b4b095SJeff Kirsher 	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
316069b4b095SJeff Kirsher 
316169b4b095SJeff Kirsher 	writel(mac[0], base + NvRegMacAddrA);
316269b4b095SJeff Kirsher 	writel(mac[1], base + NvRegMacAddrB);
316369b4b095SJeff Kirsher }
316469b4b095SJeff Kirsher 
316569b4b095SJeff Kirsher /*
316669b4b095SJeff Kirsher  * nv_set_mac_address: dev->set_mac_address function
316769b4b095SJeff Kirsher  * Called with rtnl_lock() held.
316869b4b095SJeff Kirsher  */
nv_set_mac_address(struct net_device * dev,void * addr)316969b4b095SJeff Kirsher static int nv_set_mac_address(struct net_device *dev, void *addr)
317069b4b095SJeff Kirsher {
317169b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
317269b4b095SJeff Kirsher 	struct sockaddr *macaddr = (struct sockaddr *)addr;
317369b4b095SJeff Kirsher 
317469b4b095SJeff Kirsher 	if (!is_valid_ether_addr(macaddr->sa_data))
317569b4b095SJeff Kirsher 		return -EADDRNOTAVAIL;
317669b4b095SJeff Kirsher 
317769b4b095SJeff Kirsher 	/* synchronized against open : rtnl_lock() held by caller */
3178a96d317fSJakub Kicinski 	eth_hw_addr_set(dev, macaddr->sa_data);
317969b4b095SJeff Kirsher 
318069b4b095SJeff Kirsher 	if (netif_running(dev)) {
318169b4b095SJeff Kirsher 		netif_tx_lock_bh(dev);
318269b4b095SJeff Kirsher 		netif_addr_lock(dev);
318369b4b095SJeff Kirsher 		spin_lock_irq(&np->lock);
318469b4b095SJeff Kirsher 
318569b4b095SJeff Kirsher 		/* stop rx engine */
318669b4b095SJeff Kirsher 		nv_stop_rx(dev);
318769b4b095SJeff Kirsher 
318869b4b095SJeff Kirsher 		/* set mac address */
318969b4b095SJeff Kirsher 		nv_copy_mac_to_hw(dev);
319069b4b095SJeff Kirsher 
319169b4b095SJeff Kirsher 		/* restart rx engine */
319269b4b095SJeff Kirsher 		nv_start_rx(dev);
319369b4b095SJeff Kirsher 		spin_unlock_irq(&np->lock);
319469b4b095SJeff Kirsher 		netif_addr_unlock(dev);
319569b4b095SJeff Kirsher 		netif_tx_unlock_bh(dev);
319669b4b095SJeff Kirsher 	} else {
319769b4b095SJeff Kirsher 		nv_copy_mac_to_hw(dev);
319869b4b095SJeff Kirsher 	}
319969b4b095SJeff Kirsher 	return 0;
320069b4b095SJeff Kirsher }
320169b4b095SJeff Kirsher 
320269b4b095SJeff Kirsher /*
320369b4b095SJeff Kirsher  * nv_set_multicast: dev->set_multicast function
320469b4b095SJeff Kirsher  * Called with netif_tx_lock held.
320569b4b095SJeff Kirsher  */
nv_set_multicast(struct net_device * dev)320669b4b095SJeff Kirsher static void nv_set_multicast(struct net_device *dev)
320769b4b095SJeff Kirsher {
320869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
320969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
321069b4b095SJeff Kirsher 	u32 addr[2];
321169b4b095SJeff Kirsher 	u32 mask[2];
321269b4b095SJeff Kirsher 	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
321369b4b095SJeff Kirsher 
321469b4b095SJeff Kirsher 	memset(addr, 0, sizeof(addr));
321569b4b095SJeff Kirsher 	memset(mask, 0, sizeof(mask));
321669b4b095SJeff Kirsher 
321769b4b095SJeff Kirsher 	if (dev->flags & IFF_PROMISC) {
321869b4b095SJeff Kirsher 		pff |= NVREG_PFF_PROMISC;
321969b4b095SJeff Kirsher 	} else {
322069b4b095SJeff Kirsher 		pff |= NVREG_PFF_MYADDR;
322169b4b095SJeff Kirsher 
322269b4b095SJeff Kirsher 		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
322369b4b095SJeff Kirsher 			u32 alwaysOff[2];
322469b4b095SJeff Kirsher 			u32 alwaysOn[2];
322569b4b095SJeff Kirsher 
322669b4b095SJeff Kirsher 			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
322769b4b095SJeff Kirsher 			if (dev->flags & IFF_ALLMULTI) {
322869b4b095SJeff Kirsher 				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
322969b4b095SJeff Kirsher 			} else {
323069b4b095SJeff Kirsher 				struct netdev_hw_addr *ha;
323169b4b095SJeff Kirsher 
323269b4b095SJeff Kirsher 				netdev_for_each_mc_addr(ha, dev) {
3233e45a6187Sdavid decotigny 					unsigned char *hw_addr = ha->addr;
323469b4b095SJeff Kirsher 					u32 a, b;
323569b4b095SJeff Kirsher 
3236e45a6187Sdavid decotigny 					a = le32_to_cpu(*(__le32 *) hw_addr);
3237e45a6187Sdavid decotigny 					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
323869b4b095SJeff Kirsher 					alwaysOn[0] &= a;
323969b4b095SJeff Kirsher 					alwaysOff[0] &= ~a;
324069b4b095SJeff Kirsher 					alwaysOn[1] &= b;
324169b4b095SJeff Kirsher 					alwaysOff[1] &= ~b;
324269b4b095SJeff Kirsher 				}
324369b4b095SJeff Kirsher 			}
324469b4b095SJeff Kirsher 			addr[0] = alwaysOn[0];
324569b4b095SJeff Kirsher 			addr[1] = alwaysOn[1];
324669b4b095SJeff Kirsher 			mask[0] = alwaysOn[0] | alwaysOff[0];
324769b4b095SJeff Kirsher 			mask[1] = alwaysOn[1] | alwaysOff[1];
324869b4b095SJeff Kirsher 		} else {
324969b4b095SJeff Kirsher 			mask[0] = NVREG_MCASTMASKA_NONE;
325069b4b095SJeff Kirsher 			mask[1] = NVREG_MCASTMASKB_NONE;
325169b4b095SJeff Kirsher 		}
325269b4b095SJeff Kirsher 	}
325369b4b095SJeff Kirsher 	addr[0] |= NVREG_MCASTADDRA_FORCE;
325469b4b095SJeff Kirsher 	pff |= NVREG_PFF_ALWAYS;
325569b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
325669b4b095SJeff Kirsher 	nv_stop_rx(dev);
325769b4b095SJeff Kirsher 	writel(addr[0], base + NvRegMulticastAddrA);
325869b4b095SJeff Kirsher 	writel(addr[1], base + NvRegMulticastAddrB);
325969b4b095SJeff Kirsher 	writel(mask[0], base + NvRegMulticastMaskA);
326069b4b095SJeff Kirsher 	writel(mask[1], base + NvRegMulticastMaskB);
326169b4b095SJeff Kirsher 	writel(pff, base + NvRegPacketFilterFlags);
326269b4b095SJeff Kirsher 	nv_start_rx(dev);
326369b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
326469b4b095SJeff Kirsher }
326569b4b095SJeff Kirsher 
nv_update_pause(struct net_device * dev,u32 pause_flags)326669b4b095SJeff Kirsher static void nv_update_pause(struct net_device *dev, u32 pause_flags)
326769b4b095SJeff Kirsher {
326869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
326969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
327069b4b095SJeff Kirsher 
327169b4b095SJeff Kirsher 	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
327269b4b095SJeff Kirsher 
327369b4b095SJeff Kirsher 	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
327469b4b095SJeff Kirsher 		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
327569b4b095SJeff Kirsher 		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
327669b4b095SJeff Kirsher 			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
327769b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
327869b4b095SJeff Kirsher 		} else {
327969b4b095SJeff Kirsher 			writel(pff, base + NvRegPacketFilterFlags);
328069b4b095SJeff Kirsher 		}
328169b4b095SJeff Kirsher 	}
328269b4b095SJeff Kirsher 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
328369b4b095SJeff Kirsher 		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
328469b4b095SJeff Kirsher 		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
328569b4b095SJeff Kirsher 			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
328669b4b095SJeff Kirsher 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
328769b4b095SJeff Kirsher 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
328869b4b095SJeff Kirsher 			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
328969b4b095SJeff Kirsher 				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
329069b4b095SJeff Kirsher 				/* limit the number of tx pause frames to a default of 8 */
329169b4b095SJeff Kirsher 				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
329269b4b095SJeff Kirsher 			}
329369b4b095SJeff Kirsher 			writel(pause_enable,  base + NvRegTxPauseFrame);
329469b4b095SJeff Kirsher 			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
329569b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
329669b4b095SJeff Kirsher 		} else {
329769b4b095SJeff Kirsher 			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
329869b4b095SJeff Kirsher 			writel(regmisc, base + NvRegMisc1);
329969b4b095SJeff Kirsher 		}
330069b4b095SJeff Kirsher 	}
330169b4b095SJeff Kirsher }
330269b4b095SJeff Kirsher 
nv_force_linkspeed(struct net_device * dev,int speed,int duplex)3303e19df76aSSanjay Hortikar static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3304e19df76aSSanjay Hortikar {
3305e19df76aSSanjay Hortikar 	struct fe_priv *np = netdev_priv(dev);
3306e19df76aSSanjay Hortikar 	u8 __iomem *base = get_hwbase(dev);
3307e19df76aSSanjay Hortikar 	u32 phyreg, txreg;
3308e19df76aSSanjay Hortikar 	int mii_status;
3309e19df76aSSanjay Hortikar 
3310e19df76aSSanjay Hortikar 	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3311e19df76aSSanjay Hortikar 	np->duplex = duplex;
3312e19df76aSSanjay Hortikar 
3313e19df76aSSanjay Hortikar 	/* see if gigabit phy */
3314e19df76aSSanjay Hortikar 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3315e19df76aSSanjay Hortikar 	if (mii_status & PHY_GIGABIT) {
3316e19df76aSSanjay Hortikar 		np->gigabit = PHY_GIGABIT;
3317e19df76aSSanjay Hortikar 		phyreg = readl(base + NvRegSlotTime);
3318e19df76aSSanjay Hortikar 		phyreg &= ~(0x3FF00);
3319e19df76aSSanjay Hortikar 		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3320e19df76aSSanjay Hortikar 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3321e19df76aSSanjay Hortikar 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3322e19df76aSSanjay Hortikar 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3323e19df76aSSanjay Hortikar 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3324e19df76aSSanjay Hortikar 			phyreg |= NVREG_SLOTTIME_1000_FULL;
3325e19df76aSSanjay Hortikar 		writel(phyreg, base + NvRegSlotTime);
3326e19df76aSSanjay Hortikar 	}
3327e19df76aSSanjay Hortikar 
3328e19df76aSSanjay Hortikar 	phyreg = readl(base + NvRegPhyInterface);
3329e19df76aSSanjay Hortikar 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3330e19df76aSSanjay Hortikar 	if (np->duplex == 0)
3331e19df76aSSanjay Hortikar 		phyreg |= PHY_HALF;
3332e19df76aSSanjay Hortikar 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3333e19df76aSSanjay Hortikar 		phyreg |= PHY_100;
3334e19df76aSSanjay Hortikar 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3335e19df76aSSanjay Hortikar 							NVREG_LINKSPEED_1000)
3336e19df76aSSanjay Hortikar 		phyreg |= PHY_1000;
3337e19df76aSSanjay Hortikar 	writel(phyreg, base + NvRegPhyInterface);
3338e19df76aSSanjay Hortikar 
3339e19df76aSSanjay Hortikar 	if (phyreg & PHY_RGMII) {
3340e19df76aSSanjay Hortikar 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3341e19df76aSSanjay Hortikar 							NVREG_LINKSPEED_1000)
3342e19df76aSSanjay Hortikar 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3343e19df76aSSanjay Hortikar 		else
3344e19df76aSSanjay Hortikar 			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3345e19df76aSSanjay Hortikar 	} else {
3346e19df76aSSanjay Hortikar 		txreg = NVREG_TX_DEFERRAL_DEFAULT;
3347e19df76aSSanjay Hortikar 	}
3348e19df76aSSanjay Hortikar 	writel(txreg, base + NvRegTxDeferral);
3349e19df76aSSanjay Hortikar 
3350e19df76aSSanjay Hortikar 	if (np->desc_ver == DESC_VER_1) {
3351e19df76aSSanjay Hortikar 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3352e19df76aSSanjay Hortikar 	} else {
3353e19df76aSSanjay Hortikar 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3354e19df76aSSanjay Hortikar 					 NVREG_LINKSPEED_1000)
3355e19df76aSSanjay Hortikar 			txreg = NVREG_TX_WM_DESC2_3_1000;
3356e19df76aSSanjay Hortikar 		else
3357e19df76aSSanjay Hortikar 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3358e19df76aSSanjay Hortikar 	}
3359e19df76aSSanjay Hortikar 	writel(txreg, base + NvRegTxWatermark);
3360e19df76aSSanjay Hortikar 
3361e19df76aSSanjay Hortikar 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3362e19df76aSSanjay Hortikar 			base + NvRegMisc1);
3363e19df76aSSanjay Hortikar 	pci_push(base);
3364e19df76aSSanjay Hortikar 	writel(np->linkspeed, base + NvRegLinkSpeed);
3365e19df76aSSanjay Hortikar 	pci_push(base);
3366e19df76aSSanjay Hortikar }
3367e19df76aSSanjay Hortikar 
336869b4b095SJeff Kirsher /**
336949ce9c2cSBen Hutchings  * nv_update_linkspeed - Setup the MAC according to the link partner
337069b4b095SJeff Kirsher  * @dev: Network device to be configured
337169b4b095SJeff Kirsher  *
337269b4b095SJeff Kirsher  * The function queries the PHY and checks if there is a link partner.
337369b4b095SJeff Kirsher  * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
337469b4b095SJeff Kirsher  * set to 10 MBit HD.
337569b4b095SJeff Kirsher  *
337669b4b095SJeff Kirsher  * The function returns 0 if there is no link partner and 1 if there is
337769b4b095SJeff Kirsher  * a good link partner.
337869b4b095SJeff Kirsher  */
nv_update_linkspeed(struct net_device * dev)337969b4b095SJeff Kirsher static int nv_update_linkspeed(struct net_device *dev)
338069b4b095SJeff Kirsher {
338169b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
338269b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
338369b4b095SJeff Kirsher 	int adv = 0;
338469b4b095SJeff Kirsher 	int lpa = 0;
338569b4b095SJeff Kirsher 	int adv_lpa, adv_pause, lpa_pause;
338669b4b095SJeff Kirsher 	int newls = np->linkspeed;
338769b4b095SJeff Kirsher 	int newdup = np->duplex;
338869b4b095SJeff Kirsher 	int mii_status;
3389e19df76aSSanjay Hortikar 	u32 bmcr;
339069b4b095SJeff Kirsher 	int retval = 0;
339169b4b095SJeff Kirsher 	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
339269b4b095SJeff Kirsher 	u32 txrxFlags = 0;
339369b4b095SJeff Kirsher 	u32 phy_exp;
339469b4b095SJeff Kirsher 
3395e19df76aSSanjay Hortikar 	/* If device loopback is enabled, set carrier on and enable max link
3396e19df76aSSanjay Hortikar 	 * speed.
3397e19df76aSSanjay Hortikar 	 */
3398e19df76aSSanjay Hortikar 	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3399e19df76aSSanjay Hortikar 	if (bmcr & BMCR_LOOPBACK) {
3400e19df76aSSanjay Hortikar 		if (netif_running(dev)) {
3401e19df76aSSanjay Hortikar 			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3402e19df76aSSanjay Hortikar 			if (!netif_carrier_ok(dev))
3403e19df76aSSanjay Hortikar 				netif_carrier_on(dev);
3404e19df76aSSanjay Hortikar 		}
3405e19df76aSSanjay Hortikar 		return 1;
3406e19df76aSSanjay Hortikar 	}
3407e19df76aSSanjay Hortikar 
340869b4b095SJeff Kirsher 	/* BMSR_LSTATUS is latched, read it twice:
340969b4b095SJeff Kirsher 	 * we want the current value.
341069b4b095SJeff Kirsher 	 */
341169b4b095SJeff Kirsher 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
341269b4b095SJeff Kirsher 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
341369b4b095SJeff Kirsher 
341469b4b095SJeff Kirsher 	if (!(mii_status & BMSR_LSTATUS)) {
341569b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
341669b4b095SJeff Kirsher 		newdup = 0;
341769b4b095SJeff Kirsher 		retval = 0;
341869b4b095SJeff Kirsher 		goto set_speed;
341969b4b095SJeff Kirsher 	}
342069b4b095SJeff Kirsher 
342169b4b095SJeff Kirsher 	if (np->autoneg == 0) {
342269b4b095SJeff Kirsher 		if (np->fixed_mode & LPA_100FULL) {
342369b4b095SJeff Kirsher 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
342469b4b095SJeff Kirsher 			newdup = 1;
342569b4b095SJeff Kirsher 		} else if (np->fixed_mode & LPA_100HALF) {
342669b4b095SJeff Kirsher 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
342769b4b095SJeff Kirsher 			newdup = 0;
342869b4b095SJeff Kirsher 		} else if (np->fixed_mode & LPA_10FULL) {
342969b4b095SJeff Kirsher 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
343069b4b095SJeff Kirsher 			newdup = 1;
343169b4b095SJeff Kirsher 		} else {
343269b4b095SJeff Kirsher 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
343369b4b095SJeff Kirsher 			newdup = 0;
343469b4b095SJeff Kirsher 		}
343569b4b095SJeff Kirsher 		retval = 1;
343669b4b095SJeff Kirsher 		goto set_speed;
343769b4b095SJeff Kirsher 	}
343869b4b095SJeff Kirsher 	/* check auto negotiation is complete */
343969b4b095SJeff Kirsher 	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
344069b4b095SJeff Kirsher 		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
344169b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
344269b4b095SJeff Kirsher 		newdup = 0;
344369b4b095SJeff Kirsher 		retval = 0;
344469b4b095SJeff Kirsher 		goto set_speed;
344569b4b095SJeff Kirsher 	}
344669b4b095SJeff Kirsher 
344769b4b095SJeff Kirsher 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
344869b4b095SJeff Kirsher 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
344969b4b095SJeff Kirsher 
345069b4b095SJeff Kirsher 	retval = 1;
345169b4b095SJeff Kirsher 	if (np->gigabit == PHY_GIGABIT) {
345269b4b095SJeff Kirsher 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
345369b4b095SJeff Kirsher 		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
345469b4b095SJeff Kirsher 
345569b4b095SJeff Kirsher 		if ((control_1000 & ADVERTISE_1000FULL) &&
345669b4b095SJeff Kirsher 			(status_1000 & LPA_1000FULL)) {
345769b4b095SJeff Kirsher 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
345869b4b095SJeff Kirsher 			newdup = 1;
345969b4b095SJeff Kirsher 			goto set_speed;
346069b4b095SJeff Kirsher 		}
346169b4b095SJeff Kirsher 	}
346269b4b095SJeff Kirsher 
346369b4b095SJeff Kirsher 	/* FIXME: handle parallel detection properly */
346469b4b095SJeff Kirsher 	adv_lpa = lpa & adv;
346569b4b095SJeff Kirsher 	if (adv_lpa & LPA_100FULL) {
346669b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
346769b4b095SJeff Kirsher 		newdup = 1;
346869b4b095SJeff Kirsher 	} else if (adv_lpa & LPA_100HALF) {
346969b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
347069b4b095SJeff Kirsher 		newdup = 0;
347169b4b095SJeff Kirsher 	} else if (adv_lpa & LPA_10FULL) {
347269b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
347369b4b095SJeff Kirsher 		newdup = 1;
347469b4b095SJeff Kirsher 	} else if (adv_lpa & LPA_10HALF) {
347569b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
347669b4b095SJeff Kirsher 		newdup = 0;
347769b4b095SJeff Kirsher 	} else {
347869b4b095SJeff Kirsher 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
347969b4b095SJeff Kirsher 		newdup = 0;
348069b4b095SJeff Kirsher 	}
348169b4b095SJeff Kirsher 
348269b4b095SJeff Kirsher set_speed:
348369b4b095SJeff Kirsher 	if (np->duplex == newdup && np->linkspeed == newls)
348469b4b095SJeff Kirsher 		return retval;
348569b4b095SJeff Kirsher 
348669b4b095SJeff Kirsher 	np->duplex = newdup;
348769b4b095SJeff Kirsher 	np->linkspeed = newls;
348869b4b095SJeff Kirsher 
348969b4b095SJeff Kirsher 	/* The transmitter and receiver must be restarted for safe update */
349069b4b095SJeff Kirsher 	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
349169b4b095SJeff Kirsher 		txrxFlags |= NV_RESTART_TX;
349269b4b095SJeff Kirsher 		nv_stop_tx(dev);
349369b4b095SJeff Kirsher 	}
349469b4b095SJeff Kirsher 	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
349569b4b095SJeff Kirsher 		txrxFlags |= NV_RESTART_RX;
349669b4b095SJeff Kirsher 		nv_stop_rx(dev);
349769b4b095SJeff Kirsher 	}
349869b4b095SJeff Kirsher 
349969b4b095SJeff Kirsher 	if (np->gigabit == PHY_GIGABIT) {
350069b4b095SJeff Kirsher 		phyreg = readl(base + NvRegSlotTime);
350169b4b095SJeff Kirsher 		phyreg &= ~(0x3FF00);
350269b4b095SJeff Kirsher 		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
350369b4b095SJeff Kirsher 		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
350469b4b095SJeff Kirsher 			phyreg |= NVREG_SLOTTIME_10_100_FULL;
350569b4b095SJeff Kirsher 		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
350669b4b095SJeff Kirsher 			phyreg |= NVREG_SLOTTIME_1000_FULL;
350769b4b095SJeff Kirsher 		writel(phyreg, base + NvRegSlotTime);
350869b4b095SJeff Kirsher 	}
350969b4b095SJeff Kirsher 
351069b4b095SJeff Kirsher 	phyreg = readl(base + NvRegPhyInterface);
351169b4b095SJeff Kirsher 	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
351269b4b095SJeff Kirsher 	if (np->duplex == 0)
351369b4b095SJeff Kirsher 		phyreg |= PHY_HALF;
351469b4b095SJeff Kirsher 	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
351569b4b095SJeff Kirsher 		phyreg |= PHY_100;
351669b4b095SJeff Kirsher 	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
351769b4b095SJeff Kirsher 		phyreg |= PHY_1000;
351869b4b095SJeff Kirsher 	writel(phyreg, base + NvRegPhyInterface);
351969b4b095SJeff Kirsher 
352069b4b095SJeff Kirsher 	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
352169b4b095SJeff Kirsher 	if (phyreg & PHY_RGMII) {
352269b4b095SJeff Kirsher 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
352369b4b095SJeff Kirsher 			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
352469b4b095SJeff Kirsher 		} else {
352569b4b095SJeff Kirsher 			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
352669b4b095SJeff Kirsher 				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
352769b4b095SJeff Kirsher 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
352869b4b095SJeff Kirsher 				else
352969b4b095SJeff Kirsher 					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
353069b4b095SJeff Kirsher 			} else {
353169b4b095SJeff Kirsher 				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
353269b4b095SJeff Kirsher 			}
353369b4b095SJeff Kirsher 		}
353469b4b095SJeff Kirsher 	} else {
353569b4b095SJeff Kirsher 		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
353669b4b095SJeff Kirsher 			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
353769b4b095SJeff Kirsher 		else
353869b4b095SJeff Kirsher 			txreg = NVREG_TX_DEFERRAL_DEFAULT;
353969b4b095SJeff Kirsher 	}
354069b4b095SJeff Kirsher 	writel(txreg, base + NvRegTxDeferral);
354169b4b095SJeff Kirsher 
354269b4b095SJeff Kirsher 	if (np->desc_ver == DESC_VER_1) {
354369b4b095SJeff Kirsher 		txreg = NVREG_TX_WM_DESC1_DEFAULT;
354469b4b095SJeff Kirsher 	} else {
354569b4b095SJeff Kirsher 		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
354669b4b095SJeff Kirsher 			txreg = NVREG_TX_WM_DESC2_3_1000;
354769b4b095SJeff Kirsher 		else
354869b4b095SJeff Kirsher 			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
354969b4b095SJeff Kirsher 	}
355069b4b095SJeff Kirsher 	writel(txreg, base + NvRegTxWatermark);
355169b4b095SJeff Kirsher 
355269b4b095SJeff Kirsher 	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
355369b4b095SJeff Kirsher 		base + NvRegMisc1);
355469b4b095SJeff Kirsher 	pci_push(base);
355569b4b095SJeff Kirsher 	writel(np->linkspeed, base + NvRegLinkSpeed);
355669b4b095SJeff Kirsher 	pci_push(base);
355769b4b095SJeff Kirsher 
355869b4b095SJeff Kirsher 	pause_flags = 0;
355969b4b095SJeff Kirsher 	/* setup pause frame */
35601ff39eb6Sdavid decotigny 	if (netif_running(dev) && (np->duplex != 0)) {
356169b4b095SJeff Kirsher 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
356269b4b095SJeff Kirsher 			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
356369b4b095SJeff Kirsher 			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
356469b4b095SJeff Kirsher 
356569b4b095SJeff Kirsher 			switch (adv_pause) {
356669b4b095SJeff Kirsher 			case ADVERTISE_PAUSE_CAP:
356769b4b095SJeff Kirsher 				if (lpa_pause & LPA_PAUSE_CAP) {
356869b4b095SJeff Kirsher 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
356969b4b095SJeff Kirsher 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
357069b4b095SJeff Kirsher 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
357169b4b095SJeff Kirsher 				}
357269b4b095SJeff Kirsher 				break;
357369b4b095SJeff Kirsher 			case ADVERTISE_PAUSE_ASYM:
357469b4b095SJeff Kirsher 				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
357569b4b095SJeff Kirsher 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
357669b4b095SJeff Kirsher 				break;
357769b4b095SJeff Kirsher 			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
357869b4b095SJeff Kirsher 				if (lpa_pause & LPA_PAUSE_CAP) {
357969b4b095SJeff Kirsher 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
358069b4b095SJeff Kirsher 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
358169b4b095SJeff Kirsher 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
358269b4b095SJeff Kirsher 				}
358369b4b095SJeff Kirsher 				if (lpa_pause == LPA_PAUSE_ASYM)
358469b4b095SJeff Kirsher 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
358569b4b095SJeff Kirsher 				break;
358669b4b095SJeff Kirsher 			}
358769b4b095SJeff Kirsher 		} else {
358869b4b095SJeff Kirsher 			pause_flags = np->pause_flags;
358969b4b095SJeff Kirsher 		}
359069b4b095SJeff Kirsher 	}
359169b4b095SJeff Kirsher 	nv_update_pause(dev, pause_flags);
359269b4b095SJeff Kirsher 
359369b4b095SJeff Kirsher 	if (txrxFlags & NV_RESTART_TX)
359469b4b095SJeff Kirsher 		nv_start_tx(dev);
359569b4b095SJeff Kirsher 	if (txrxFlags & NV_RESTART_RX)
359669b4b095SJeff Kirsher 		nv_start_rx(dev);
359769b4b095SJeff Kirsher 
359869b4b095SJeff Kirsher 	return retval;
359969b4b095SJeff Kirsher }
360069b4b095SJeff Kirsher 
nv_linkchange(struct net_device * dev)360169b4b095SJeff Kirsher static void nv_linkchange(struct net_device *dev)
360269b4b095SJeff Kirsher {
360369b4b095SJeff Kirsher 	if (nv_update_linkspeed(dev)) {
360469b4b095SJeff Kirsher 		if (!netif_carrier_ok(dev)) {
360569b4b095SJeff Kirsher 			netif_carrier_on(dev);
360669b4b095SJeff Kirsher 			netdev_info(dev, "link up\n");
360769b4b095SJeff Kirsher 			nv_txrx_gate(dev, false);
360869b4b095SJeff Kirsher 			nv_start_rx(dev);
360969b4b095SJeff Kirsher 		}
361069b4b095SJeff Kirsher 	} else {
361169b4b095SJeff Kirsher 		if (netif_carrier_ok(dev)) {
361269b4b095SJeff Kirsher 			netif_carrier_off(dev);
361369b4b095SJeff Kirsher 			netdev_info(dev, "link down\n");
361469b4b095SJeff Kirsher 			nv_txrx_gate(dev, true);
361569b4b095SJeff Kirsher 			nv_stop_rx(dev);
361669b4b095SJeff Kirsher 		}
361769b4b095SJeff Kirsher 	}
361869b4b095SJeff Kirsher }
361969b4b095SJeff Kirsher 
nv_link_irq(struct net_device * dev)362069b4b095SJeff Kirsher static void nv_link_irq(struct net_device *dev)
362169b4b095SJeff Kirsher {
362269b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
362369b4b095SJeff Kirsher 	u32 miistat;
362469b4b095SJeff Kirsher 
362569b4b095SJeff Kirsher 	miistat = readl(base + NvRegMIIStatus);
362669b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
362769b4b095SJeff Kirsher 
362869b4b095SJeff Kirsher 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
362969b4b095SJeff Kirsher 		nv_linkchange(dev);
363069b4b095SJeff Kirsher }
363169b4b095SJeff Kirsher 
nv_msi_workaround(struct fe_priv * np)363269b4b095SJeff Kirsher static void nv_msi_workaround(struct fe_priv *np)
363369b4b095SJeff Kirsher {
363469b4b095SJeff Kirsher 
363569b4b095SJeff Kirsher 	/* Need to toggle the msi irq mask within the ethernet device,
363669b4b095SJeff Kirsher 	 * otherwise, future interrupts will not be detected.
363769b4b095SJeff Kirsher 	 */
363869b4b095SJeff Kirsher 	if (np->msi_flags & NV_MSI_ENABLED) {
363969b4b095SJeff Kirsher 		u8 __iomem *base = np->base;
364069b4b095SJeff Kirsher 
364169b4b095SJeff Kirsher 		writel(0, base + NvRegMSIIrqMask);
364269b4b095SJeff Kirsher 		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
364369b4b095SJeff Kirsher 	}
364469b4b095SJeff Kirsher }
364569b4b095SJeff Kirsher 
nv_change_interrupt_mode(struct net_device * dev,int total_work)364669b4b095SJeff Kirsher static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
364769b4b095SJeff Kirsher {
364869b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
364969b4b095SJeff Kirsher 
365069b4b095SJeff Kirsher 	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
365169b4b095SJeff Kirsher 		if (total_work > NV_DYNAMIC_THRESHOLD) {
365269b4b095SJeff Kirsher 			/* transition to poll based interrupts */
365369b4b095SJeff Kirsher 			np->quiet_count = 0;
365469b4b095SJeff Kirsher 			if (np->irqmask != NVREG_IRQMASK_CPU) {
365569b4b095SJeff Kirsher 				np->irqmask = NVREG_IRQMASK_CPU;
365669b4b095SJeff Kirsher 				return 1;
365769b4b095SJeff Kirsher 			}
365869b4b095SJeff Kirsher 		} else {
365969b4b095SJeff Kirsher 			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
366069b4b095SJeff Kirsher 				np->quiet_count++;
366169b4b095SJeff Kirsher 			} else {
366269b4b095SJeff Kirsher 				/* reached a period of low activity, switch
366369b4b095SJeff Kirsher 				   to per tx/rx packet interrupts */
366469b4b095SJeff Kirsher 				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
366569b4b095SJeff Kirsher 					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
366669b4b095SJeff Kirsher 					return 1;
366769b4b095SJeff Kirsher 				}
366869b4b095SJeff Kirsher 			}
366969b4b095SJeff Kirsher 		}
367069b4b095SJeff Kirsher 	}
367169b4b095SJeff Kirsher 	return 0;
367269b4b095SJeff Kirsher }
367369b4b095SJeff Kirsher 
nv_nic_irq(int foo,void * data)367469b4b095SJeff Kirsher static irqreturn_t nv_nic_irq(int foo, void *data)
367569b4b095SJeff Kirsher {
367669b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
367769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
367869b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
367969b4b095SJeff Kirsher 
368069b4b095SJeff Kirsher 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
368169b4b095SJeff Kirsher 		np->events = readl(base + NvRegIrqStatus);
368269b4b095SJeff Kirsher 		writel(np->events, base + NvRegIrqStatus);
368369b4b095SJeff Kirsher 	} else {
368469b4b095SJeff Kirsher 		np->events = readl(base + NvRegMSIXIrqStatus);
368569b4b095SJeff Kirsher 		writel(np->events, base + NvRegMSIXIrqStatus);
368669b4b095SJeff Kirsher 	}
368769b4b095SJeff Kirsher 	if (!(np->events & np->irqmask))
368869b4b095SJeff Kirsher 		return IRQ_NONE;
368969b4b095SJeff Kirsher 
369069b4b095SJeff Kirsher 	nv_msi_workaround(np);
369169b4b095SJeff Kirsher 
369269b4b095SJeff Kirsher 	if (napi_schedule_prep(&np->napi)) {
369369b4b095SJeff Kirsher 		/*
369469b4b095SJeff Kirsher 		 * Disable further irq's (msix not enabled with napi)
369569b4b095SJeff Kirsher 		 */
369669b4b095SJeff Kirsher 		writel(0, base + NvRegIrqMask);
369769b4b095SJeff Kirsher 		__napi_schedule(&np->napi);
369869b4b095SJeff Kirsher 	}
369969b4b095SJeff Kirsher 
370069b4b095SJeff Kirsher 	return IRQ_HANDLED;
370169b4b095SJeff Kirsher }
370269b4b095SJeff Kirsher 
37031aa8b471SBen Hutchings /* All _optimized functions are used to help increase performance
370469b4b095SJeff Kirsher  * (reduce CPU and increase throughput). They use descripter version 3,
370569b4b095SJeff Kirsher  * compiler directives, and reduce memory accesses.
370669b4b095SJeff Kirsher  */
nv_nic_irq_optimized(int foo,void * data)370769b4b095SJeff Kirsher static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
370869b4b095SJeff Kirsher {
370969b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
371069b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
371169b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
371269b4b095SJeff Kirsher 
371369b4b095SJeff Kirsher 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
371469b4b095SJeff Kirsher 		np->events = readl(base + NvRegIrqStatus);
371569b4b095SJeff Kirsher 		writel(np->events, base + NvRegIrqStatus);
371669b4b095SJeff Kirsher 	} else {
371769b4b095SJeff Kirsher 		np->events = readl(base + NvRegMSIXIrqStatus);
371869b4b095SJeff Kirsher 		writel(np->events, base + NvRegMSIXIrqStatus);
371969b4b095SJeff Kirsher 	}
372069b4b095SJeff Kirsher 	if (!(np->events & np->irqmask))
372169b4b095SJeff Kirsher 		return IRQ_NONE;
372269b4b095SJeff Kirsher 
372369b4b095SJeff Kirsher 	nv_msi_workaround(np);
372469b4b095SJeff Kirsher 
372569b4b095SJeff Kirsher 	if (napi_schedule_prep(&np->napi)) {
372669b4b095SJeff Kirsher 		/*
372769b4b095SJeff Kirsher 		 * Disable further irq's (msix not enabled with napi)
372869b4b095SJeff Kirsher 		 */
372969b4b095SJeff Kirsher 		writel(0, base + NvRegIrqMask);
373069b4b095SJeff Kirsher 		__napi_schedule(&np->napi);
373169b4b095SJeff Kirsher 	}
373269b4b095SJeff Kirsher 
373369b4b095SJeff Kirsher 	return IRQ_HANDLED;
373469b4b095SJeff Kirsher }
373569b4b095SJeff Kirsher 
nv_nic_irq_tx(int foo,void * data)373669b4b095SJeff Kirsher static irqreturn_t nv_nic_irq_tx(int foo, void *data)
373769b4b095SJeff Kirsher {
373869b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
373969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
374069b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
374169b4b095SJeff Kirsher 	u32 events;
374269b4b095SJeff Kirsher 	int i;
374369b4b095SJeff Kirsher 	unsigned long flags;
374469b4b095SJeff Kirsher 
374569b4b095SJeff Kirsher 	for (i = 0;; i++) {
374669b4b095SJeff Kirsher 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
37472a4e7a08SMike Ditto 		writel(events, base + NvRegMSIXIrqStatus);
37482a4e7a08SMike Ditto 		netdev_dbg(dev, "tx irq events: %08x\n", events);
374969b4b095SJeff Kirsher 		if (!(events & np->irqmask))
375069b4b095SJeff Kirsher 			break;
375169b4b095SJeff Kirsher 
375269b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
375369b4b095SJeff Kirsher 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
375469b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
375569b4b095SJeff Kirsher 
375669b4b095SJeff Kirsher 		if (unlikely(i > max_interrupt_work)) {
375769b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
375869b4b095SJeff Kirsher 			/* disable interrupts on the nic */
375969b4b095SJeff Kirsher 			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
376069b4b095SJeff Kirsher 			pci_push(base);
376169b4b095SJeff Kirsher 
376269b4b095SJeff Kirsher 			if (!np->in_shutdown) {
376369b4b095SJeff Kirsher 				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
376469b4b095SJeff Kirsher 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
376569b4b095SJeff Kirsher 			}
376669b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
376769b4b095SJeff Kirsher 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
376869b4b095SJeff Kirsher 				   __func__, i);
376969b4b095SJeff Kirsher 			break;
377069b4b095SJeff Kirsher 		}
377169b4b095SJeff Kirsher 
377269b4b095SJeff Kirsher 	}
377369b4b095SJeff Kirsher 
377469b4b095SJeff Kirsher 	return IRQ_RETVAL(i);
377569b4b095SJeff Kirsher }
377669b4b095SJeff Kirsher 
nv_napi_poll(struct napi_struct * napi,int budget)377769b4b095SJeff Kirsher static int nv_napi_poll(struct napi_struct *napi, int budget)
377869b4b095SJeff Kirsher {
377969b4b095SJeff Kirsher 	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
378069b4b095SJeff Kirsher 	struct net_device *dev = np->dev;
378169b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
378269b4b095SJeff Kirsher 	unsigned long flags;
378369b4b095SJeff Kirsher 	int retcode;
378469b4b095SJeff Kirsher 	int rx_count, tx_work = 0, rx_work = 0;
378569b4b095SJeff Kirsher 
378669b4b095SJeff Kirsher 	do {
378769b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
378869b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
378969b4b095SJeff Kirsher 			tx_work += nv_tx_done(dev, np->tx_ring_size);
379069b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
379169b4b095SJeff Kirsher 
379269b4b095SJeff Kirsher 			rx_count = nv_rx_process(dev, budget - rx_work);
379369b4b095SJeff Kirsher 			retcode = nv_alloc_rx(dev);
379469b4b095SJeff Kirsher 		} else {
379569b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
379669b4b095SJeff Kirsher 			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
379769b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
379869b4b095SJeff Kirsher 
379969b4b095SJeff Kirsher 			rx_count = nv_rx_process_optimized(dev,
380069b4b095SJeff Kirsher 			    budget - rx_work);
380169b4b095SJeff Kirsher 			retcode = nv_alloc_rx_optimized(dev);
380269b4b095SJeff Kirsher 		}
380369b4b095SJeff Kirsher 	} while (retcode == 0 &&
380469b4b095SJeff Kirsher 		 rx_count > 0 && (rx_work += rx_count) < budget);
380569b4b095SJeff Kirsher 
380669b4b095SJeff Kirsher 	if (retcode) {
380769b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
380869b4b095SJeff Kirsher 		if (!np->in_shutdown)
380969b4b095SJeff Kirsher 			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
381069b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
381169b4b095SJeff Kirsher 	}
381269b4b095SJeff Kirsher 
381369b4b095SJeff Kirsher 	nv_change_interrupt_mode(dev, tx_work + rx_work);
381469b4b095SJeff Kirsher 
381569b4b095SJeff Kirsher 	if (unlikely(np->events & NVREG_IRQ_LINK)) {
381669b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
381769b4b095SJeff Kirsher 		nv_link_irq(dev);
381869b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
381969b4b095SJeff Kirsher 	}
382069b4b095SJeff Kirsher 	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
382169b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
382269b4b095SJeff Kirsher 		nv_linkchange(dev);
382369b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
382469b4b095SJeff Kirsher 		np->link_timeout = jiffies + LINK_TIMEOUT;
382569b4b095SJeff Kirsher 	}
382669b4b095SJeff Kirsher 	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
382769b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
382869b4b095SJeff Kirsher 		if (!np->in_shutdown) {
382969b4b095SJeff Kirsher 			np->nic_poll_irq = np->irqmask;
383069b4b095SJeff Kirsher 			np->recover_error = 1;
383169b4b095SJeff Kirsher 			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
383269b4b095SJeff Kirsher 		}
383369b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
383469b4b095SJeff Kirsher 		napi_complete(napi);
383569b4b095SJeff Kirsher 		return rx_work;
383669b4b095SJeff Kirsher 	}
383769b4b095SJeff Kirsher 
383869b4b095SJeff Kirsher 	if (rx_work < budget) {
383969b4b095SJeff Kirsher 		/* re-enable interrupts
384069b4b095SJeff Kirsher 		   (msix not enabled in napi) */
38416ad20165SEric Dumazet 		napi_complete_done(napi, rx_work);
384269b4b095SJeff Kirsher 
384369b4b095SJeff Kirsher 		writel(np->irqmask, base + NvRegIrqMask);
384469b4b095SJeff Kirsher 	}
384569b4b095SJeff Kirsher 	return rx_work;
384669b4b095SJeff Kirsher }
384769b4b095SJeff Kirsher 
nv_nic_irq_rx(int foo,void * data)384869b4b095SJeff Kirsher static irqreturn_t nv_nic_irq_rx(int foo, void *data)
384969b4b095SJeff Kirsher {
385069b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
385169b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
385269b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
385369b4b095SJeff Kirsher 	u32 events;
385469b4b095SJeff Kirsher 	int i;
385569b4b095SJeff Kirsher 	unsigned long flags;
385669b4b095SJeff Kirsher 
385769b4b095SJeff Kirsher 	for (i = 0;; i++) {
385869b4b095SJeff Kirsher 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
38592a4e7a08SMike Ditto 		writel(events, base + NvRegMSIXIrqStatus);
38602a4e7a08SMike Ditto 		netdev_dbg(dev, "rx irq events: %08x\n", events);
386169b4b095SJeff Kirsher 		if (!(events & np->irqmask))
386269b4b095SJeff Kirsher 			break;
386369b4b095SJeff Kirsher 
386469b4b095SJeff Kirsher 		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
386569b4b095SJeff Kirsher 			if (unlikely(nv_alloc_rx_optimized(dev))) {
386669b4b095SJeff Kirsher 				spin_lock_irqsave(&np->lock, flags);
386769b4b095SJeff Kirsher 				if (!np->in_shutdown)
386869b4b095SJeff Kirsher 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
386969b4b095SJeff Kirsher 				spin_unlock_irqrestore(&np->lock, flags);
387069b4b095SJeff Kirsher 			}
387169b4b095SJeff Kirsher 		}
387269b4b095SJeff Kirsher 
387369b4b095SJeff Kirsher 		if (unlikely(i > max_interrupt_work)) {
387469b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
387569b4b095SJeff Kirsher 			/* disable interrupts on the nic */
387669b4b095SJeff Kirsher 			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
387769b4b095SJeff Kirsher 			pci_push(base);
387869b4b095SJeff Kirsher 
387969b4b095SJeff Kirsher 			if (!np->in_shutdown) {
388069b4b095SJeff Kirsher 				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
388169b4b095SJeff Kirsher 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
388269b4b095SJeff Kirsher 			}
388369b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
388469b4b095SJeff Kirsher 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
388569b4b095SJeff Kirsher 				   __func__, i);
388669b4b095SJeff Kirsher 			break;
388769b4b095SJeff Kirsher 		}
388869b4b095SJeff Kirsher 	}
388969b4b095SJeff Kirsher 
389069b4b095SJeff Kirsher 	return IRQ_RETVAL(i);
389169b4b095SJeff Kirsher }
389269b4b095SJeff Kirsher 
nv_nic_irq_other(int foo,void * data)389369b4b095SJeff Kirsher static irqreturn_t nv_nic_irq_other(int foo, void *data)
389469b4b095SJeff Kirsher {
389569b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
389669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
389769b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
389869b4b095SJeff Kirsher 	u32 events;
389969b4b095SJeff Kirsher 	int i;
390069b4b095SJeff Kirsher 	unsigned long flags;
390169b4b095SJeff Kirsher 
390269b4b095SJeff Kirsher 	for (i = 0;; i++) {
390369b4b095SJeff Kirsher 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
39042a4e7a08SMike Ditto 		writel(events, base + NvRegMSIXIrqStatus);
39052a4e7a08SMike Ditto 		netdev_dbg(dev, "irq events: %08x\n", events);
390669b4b095SJeff Kirsher 		if (!(events & np->irqmask))
390769b4b095SJeff Kirsher 			break;
390869b4b095SJeff Kirsher 
390969b4b095SJeff Kirsher 		/* check tx in case we reached max loop limit in tx isr */
391069b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
391169b4b095SJeff Kirsher 		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
391269b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
391369b4b095SJeff Kirsher 
391469b4b095SJeff Kirsher 		if (events & NVREG_IRQ_LINK) {
391569b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
391669b4b095SJeff Kirsher 			nv_link_irq(dev);
391769b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
391869b4b095SJeff Kirsher 		}
391969b4b095SJeff Kirsher 		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
392069b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
392169b4b095SJeff Kirsher 			nv_linkchange(dev);
392269b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
392369b4b095SJeff Kirsher 			np->link_timeout = jiffies + LINK_TIMEOUT;
392469b4b095SJeff Kirsher 		}
392569b4b095SJeff Kirsher 		if (events & NVREG_IRQ_RECOVER_ERROR) {
3926186e8687SDenis Efremov 			spin_lock_irqsave(&np->lock, flags);
392769b4b095SJeff Kirsher 			/* disable interrupts on the nic */
392869b4b095SJeff Kirsher 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
392969b4b095SJeff Kirsher 			pci_push(base);
393069b4b095SJeff Kirsher 
393169b4b095SJeff Kirsher 			if (!np->in_shutdown) {
393269b4b095SJeff Kirsher 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
393369b4b095SJeff Kirsher 				np->recover_error = 1;
393469b4b095SJeff Kirsher 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
393569b4b095SJeff Kirsher 			}
3936186e8687SDenis Efremov 			spin_unlock_irqrestore(&np->lock, flags);
393769b4b095SJeff Kirsher 			break;
393869b4b095SJeff Kirsher 		}
393969b4b095SJeff Kirsher 		if (unlikely(i > max_interrupt_work)) {
394069b4b095SJeff Kirsher 			spin_lock_irqsave(&np->lock, flags);
394169b4b095SJeff Kirsher 			/* disable interrupts on the nic */
394269b4b095SJeff Kirsher 			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
394369b4b095SJeff Kirsher 			pci_push(base);
394469b4b095SJeff Kirsher 
394569b4b095SJeff Kirsher 			if (!np->in_shutdown) {
394669b4b095SJeff Kirsher 				np->nic_poll_irq |= NVREG_IRQ_OTHER;
394769b4b095SJeff Kirsher 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
394869b4b095SJeff Kirsher 			}
394969b4b095SJeff Kirsher 			spin_unlock_irqrestore(&np->lock, flags);
395069b4b095SJeff Kirsher 			netdev_dbg(dev, "%s: too many iterations (%d)\n",
395169b4b095SJeff Kirsher 				   __func__, i);
395269b4b095SJeff Kirsher 			break;
395369b4b095SJeff Kirsher 		}
395469b4b095SJeff Kirsher 
395569b4b095SJeff Kirsher 	}
395669b4b095SJeff Kirsher 
395769b4b095SJeff Kirsher 	return IRQ_RETVAL(i);
395869b4b095SJeff Kirsher }
395969b4b095SJeff Kirsher 
nv_nic_irq_test(int foo,void * data)396069b4b095SJeff Kirsher static irqreturn_t nv_nic_irq_test(int foo, void *data)
396169b4b095SJeff Kirsher {
396269b4b095SJeff Kirsher 	struct net_device *dev = (struct net_device *) data;
396369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
396469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
396569b4b095SJeff Kirsher 	u32 events;
396669b4b095SJeff Kirsher 
396769b4b095SJeff Kirsher 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
396869b4b095SJeff Kirsher 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
39692a4e7a08SMike Ditto 		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
397069b4b095SJeff Kirsher 	} else {
397169b4b095SJeff Kirsher 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
39722a4e7a08SMike Ditto 		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
397369b4b095SJeff Kirsher 	}
397469b4b095SJeff Kirsher 	pci_push(base);
397569b4b095SJeff Kirsher 	if (!(events & NVREG_IRQ_TIMER))
397669b4b095SJeff Kirsher 		return IRQ_RETVAL(0);
397769b4b095SJeff Kirsher 
397869b4b095SJeff Kirsher 	nv_msi_workaround(np);
397969b4b095SJeff Kirsher 
398069b4b095SJeff Kirsher 	spin_lock(&np->lock);
398169b4b095SJeff Kirsher 	np->intr_test = 1;
398269b4b095SJeff Kirsher 	spin_unlock(&np->lock);
398369b4b095SJeff Kirsher 
398469b4b095SJeff Kirsher 	return IRQ_RETVAL(1);
398569b4b095SJeff Kirsher }
398669b4b095SJeff Kirsher 
set_msix_vector_map(struct net_device * dev,u32 vector,u32 irqmask)398769b4b095SJeff Kirsher static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
398869b4b095SJeff Kirsher {
398969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
399069b4b095SJeff Kirsher 	int i;
399169b4b095SJeff Kirsher 	u32 msixmap = 0;
399269b4b095SJeff Kirsher 
399369b4b095SJeff Kirsher 	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
399469b4b095SJeff Kirsher 	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
399569b4b095SJeff Kirsher 	 * the remaining 8 interrupts.
399669b4b095SJeff Kirsher 	 */
399769b4b095SJeff Kirsher 	for (i = 0; i < 8; i++) {
399869b4b095SJeff Kirsher 		if ((irqmask >> i) & 0x1)
399969b4b095SJeff Kirsher 			msixmap |= vector << (i << 2);
400069b4b095SJeff Kirsher 	}
400169b4b095SJeff Kirsher 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
400269b4b095SJeff Kirsher 
400369b4b095SJeff Kirsher 	msixmap = 0;
400469b4b095SJeff Kirsher 	for (i = 0; i < 8; i++) {
400569b4b095SJeff Kirsher 		if ((irqmask >> (i + 8)) & 0x1)
400669b4b095SJeff Kirsher 			msixmap |= vector << (i << 2);
400769b4b095SJeff Kirsher 	}
400869b4b095SJeff Kirsher 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
400969b4b095SJeff Kirsher }
401069b4b095SJeff Kirsher 
nv_request_irq(struct net_device * dev,int intr_test)401169b4b095SJeff Kirsher static int nv_request_irq(struct net_device *dev, int intr_test)
401269b4b095SJeff Kirsher {
401369b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
401469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
4015d9bd00a1SAlexander Gordeev 	int ret;
401669b4b095SJeff Kirsher 	int i;
401769b4b095SJeff Kirsher 	irqreturn_t (*handler)(int foo, void *data);
401869b4b095SJeff Kirsher 
401969b4b095SJeff Kirsher 	if (intr_test) {
402069b4b095SJeff Kirsher 		handler = nv_nic_irq_test;
402169b4b095SJeff Kirsher 	} else {
402269b4b095SJeff Kirsher 		if (nv_optimized(np))
402369b4b095SJeff Kirsher 			handler = nv_nic_irq_optimized;
402469b4b095SJeff Kirsher 		else
402569b4b095SJeff Kirsher 			handler = nv_nic_irq;
402669b4b095SJeff Kirsher 	}
402769b4b095SJeff Kirsher 
402869b4b095SJeff Kirsher 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
402969b4b095SJeff Kirsher 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
403069b4b095SJeff Kirsher 			np->msi_x_entry[i].entry = i;
403104698ef3SAlexander Gordeev 		ret = pci_enable_msix_range(np->pci_dev,
403204698ef3SAlexander Gordeev 					    np->msi_x_entry,
403304698ef3SAlexander Gordeev 					    np->msi_flags & NV_MSI_X_VECTORS_MASK,
403404698ef3SAlexander Gordeev 					    np->msi_flags & NV_MSI_X_VECTORS_MASK);
403504698ef3SAlexander Gordeev 		if (ret > 0) {
403669b4b095SJeff Kirsher 			np->msi_flags |= NV_MSI_X_ENABLED;
403769b4b095SJeff Kirsher 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
403869b4b095SJeff Kirsher 				/* Request irq for rx handling */
403969b4b095SJeff Kirsher 				sprintf(np->name_rx, "%s-rx", dev->name);
404061c9471eSAlexander Gordeev 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
404161c9471eSAlexander Gordeev 						  nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
404261c9471eSAlexander Gordeev 				if (ret) {
404369b4b095SJeff Kirsher 					netdev_info(dev,
404469b4b095SJeff Kirsher 						    "request_irq failed for rx %d\n",
404569b4b095SJeff Kirsher 						    ret);
404669b4b095SJeff Kirsher 					pci_disable_msix(np->pci_dev);
404769b4b095SJeff Kirsher 					np->msi_flags &= ~NV_MSI_X_ENABLED;
404869b4b095SJeff Kirsher 					goto out_err;
404969b4b095SJeff Kirsher 				}
405069b4b095SJeff Kirsher 				/* Request irq for tx handling */
405169b4b095SJeff Kirsher 				sprintf(np->name_tx, "%s-tx", dev->name);
405261c9471eSAlexander Gordeev 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
405361c9471eSAlexander Gordeev 						  nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
405461c9471eSAlexander Gordeev 				if (ret) {
405569b4b095SJeff Kirsher 					netdev_info(dev,
405669b4b095SJeff Kirsher 						    "request_irq failed for tx %d\n",
405769b4b095SJeff Kirsher 						    ret);
405869b4b095SJeff Kirsher 					pci_disable_msix(np->pci_dev);
405969b4b095SJeff Kirsher 					np->msi_flags &= ~NV_MSI_X_ENABLED;
406069b4b095SJeff Kirsher 					goto out_free_rx;
406169b4b095SJeff Kirsher 				}
406269b4b095SJeff Kirsher 				/* Request irq for link and timer handling */
406369b4b095SJeff Kirsher 				sprintf(np->name_other, "%s-other", dev->name);
406461c9471eSAlexander Gordeev 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
406561c9471eSAlexander Gordeev 						  nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
406661c9471eSAlexander Gordeev 				if (ret) {
406769b4b095SJeff Kirsher 					netdev_info(dev,
406869b4b095SJeff Kirsher 						    "request_irq failed for link %d\n",
406969b4b095SJeff Kirsher 						    ret);
407069b4b095SJeff Kirsher 					pci_disable_msix(np->pci_dev);
407169b4b095SJeff Kirsher 					np->msi_flags &= ~NV_MSI_X_ENABLED;
407269b4b095SJeff Kirsher 					goto out_free_tx;
407369b4b095SJeff Kirsher 				}
407469b4b095SJeff Kirsher 				/* map interrupts to their respective vector */
407569b4b095SJeff Kirsher 				writel(0, base + NvRegMSIXMap0);
407669b4b095SJeff Kirsher 				writel(0, base + NvRegMSIXMap1);
407769b4b095SJeff Kirsher 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
407869b4b095SJeff Kirsher 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
407969b4b095SJeff Kirsher 				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
408069b4b095SJeff Kirsher 			} else {
408169b4b095SJeff Kirsher 				/* Request irq for all interrupts */
408261c9471eSAlexander Gordeev 				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
408361c9471eSAlexander Gordeev 						  handler, IRQF_SHARED, dev->name, dev);
408461c9471eSAlexander Gordeev 				if (ret) {
408569b4b095SJeff Kirsher 					netdev_info(dev,
408669b4b095SJeff Kirsher 						    "request_irq failed %d\n",
408769b4b095SJeff Kirsher 						    ret);
408869b4b095SJeff Kirsher 					pci_disable_msix(np->pci_dev);
408969b4b095SJeff Kirsher 					np->msi_flags &= ~NV_MSI_X_ENABLED;
409069b4b095SJeff Kirsher 					goto out_err;
409169b4b095SJeff Kirsher 				}
409269b4b095SJeff Kirsher 
409369b4b095SJeff Kirsher 				/* map interrupts to vector 0 */
409469b4b095SJeff Kirsher 				writel(0, base + NvRegMSIXMap0);
409569b4b095SJeff Kirsher 				writel(0, base + NvRegMSIXMap1);
409669b4b095SJeff Kirsher 			}
409789328783SMike Ditto 			netdev_info(dev, "MSI-X enabled\n");
4098d9bd00a1SAlexander Gordeev 			return 0;
409969b4b095SJeff Kirsher 		}
410069b4b095SJeff Kirsher 	}
4101d9bd00a1SAlexander Gordeev 	if (np->msi_flags & NV_MSI_CAPABLE) {
410269b4b095SJeff Kirsher 		ret = pci_enable_msi(np->pci_dev);
410369b4b095SJeff Kirsher 		if (ret == 0) {
410469b4b095SJeff Kirsher 			np->msi_flags |= NV_MSI_ENABLED;
410561c9471eSAlexander Gordeev 			ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
410661c9471eSAlexander Gordeev 			if (ret) {
410769b4b095SJeff Kirsher 				netdev_info(dev, "request_irq failed %d\n",
410869b4b095SJeff Kirsher 					    ret);
410969b4b095SJeff Kirsher 				pci_disable_msi(np->pci_dev);
411069b4b095SJeff Kirsher 				np->msi_flags &= ~NV_MSI_ENABLED;
411169b4b095SJeff Kirsher 				goto out_err;
411269b4b095SJeff Kirsher 			}
411369b4b095SJeff Kirsher 
411469b4b095SJeff Kirsher 			/* map interrupts to vector 0 */
411569b4b095SJeff Kirsher 			writel(0, base + NvRegMSIMap0);
411669b4b095SJeff Kirsher 			writel(0, base + NvRegMSIMap1);
411769b4b095SJeff Kirsher 			/* enable msi vector 0 */
411869b4b095SJeff Kirsher 			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
411989328783SMike Ditto 			netdev_info(dev, "MSI enabled\n");
4120d9bd00a1SAlexander Gordeev 			return 0;
412169b4b095SJeff Kirsher 		}
412269b4b095SJeff Kirsher 	}
4123d9bd00a1SAlexander Gordeev 
412469b4b095SJeff Kirsher 	if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
412569b4b095SJeff Kirsher 		goto out_err;
412669b4b095SJeff Kirsher 
412769b4b095SJeff Kirsher 	return 0;
412869b4b095SJeff Kirsher out_free_tx:
412969b4b095SJeff Kirsher 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
413069b4b095SJeff Kirsher out_free_rx:
413169b4b095SJeff Kirsher 	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
413269b4b095SJeff Kirsher out_err:
413369b4b095SJeff Kirsher 	return 1;
413469b4b095SJeff Kirsher }
413569b4b095SJeff Kirsher 
nv_free_irq(struct net_device * dev)413669b4b095SJeff Kirsher static void nv_free_irq(struct net_device *dev)
413769b4b095SJeff Kirsher {
413869b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
413969b4b095SJeff Kirsher 	int i;
414069b4b095SJeff Kirsher 
414169b4b095SJeff Kirsher 	if (np->msi_flags & NV_MSI_X_ENABLED) {
414269b4b095SJeff Kirsher 		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
414369b4b095SJeff Kirsher 			free_irq(np->msi_x_entry[i].vector, dev);
414469b4b095SJeff Kirsher 		pci_disable_msix(np->pci_dev);
414569b4b095SJeff Kirsher 		np->msi_flags &= ~NV_MSI_X_ENABLED;
414669b4b095SJeff Kirsher 	} else {
414769b4b095SJeff Kirsher 		free_irq(np->pci_dev->irq, dev);
414869b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_ENABLED) {
414969b4b095SJeff Kirsher 			pci_disable_msi(np->pci_dev);
415069b4b095SJeff Kirsher 			np->msi_flags &= ~NV_MSI_ENABLED;
415169b4b095SJeff Kirsher 		}
415269b4b095SJeff Kirsher 	}
415369b4b095SJeff Kirsher }
415469b4b095SJeff Kirsher 
nv_do_nic_poll(struct timer_list * t)4155d9935679SKees Cook static void nv_do_nic_poll(struct timer_list *t)
415669b4b095SJeff Kirsher {
4157d9935679SKees Cook 	struct fe_priv *np = from_timer(np, t, nic_poll);
4158d9935679SKees Cook 	struct net_device *dev = np->dev;
415969b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
416069b4b095SJeff Kirsher 	u32 mask = 0;
41610b7c8743SNeil Horman 	unsigned long flags;
41620b7c8743SNeil Horman 	unsigned int irq = 0;
416369b4b095SJeff Kirsher 
416469b4b095SJeff Kirsher 	/*
416569b4b095SJeff Kirsher 	 * First disable irq(s) and then
416669b4b095SJeff Kirsher 	 * reenable interrupts on the nic, we have to do this before calling
416769b4b095SJeff Kirsher 	 * nv_nic_irq because that may decide to do otherwise
416869b4b095SJeff Kirsher 	 */
416969b4b095SJeff Kirsher 
417069b4b095SJeff Kirsher 	if (!using_multi_irqs(dev)) {
417169b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_X_ENABLED)
41720b7c8743SNeil Horman 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
417369b4b095SJeff Kirsher 		else
41740b7c8743SNeil Horman 			irq = np->pci_dev->irq;
417569b4b095SJeff Kirsher 		mask = np->irqmask;
417669b4b095SJeff Kirsher 	} else {
417769b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
41780b7c8743SNeil Horman 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
417969b4b095SJeff Kirsher 			mask |= NVREG_IRQ_RX_ALL;
418069b4b095SJeff Kirsher 		}
418169b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
41820b7c8743SNeil Horman 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
418369b4b095SJeff Kirsher 			mask |= NVREG_IRQ_TX_ALL;
418469b4b095SJeff Kirsher 		}
418569b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
41860b7c8743SNeil Horman 			irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
418769b4b095SJeff Kirsher 			mask |= NVREG_IRQ_OTHER;
418869b4b095SJeff Kirsher 		}
418969b4b095SJeff Kirsher 	}
41900b7c8743SNeil Horman 
41910b7c8743SNeil Horman 	disable_irq_nosync_lockdep_irqsave(irq, &flags);
41920b7c8743SNeil Horman 	synchronize_irq(irq);
419369b4b095SJeff Kirsher 
419469b4b095SJeff Kirsher 	if (np->recover_error) {
419569b4b095SJeff Kirsher 		np->recover_error = 0;
419669b4b095SJeff Kirsher 		netdev_info(dev, "MAC in recoverable error state\n");
419769b4b095SJeff Kirsher 		if (netif_running(dev)) {
419869b4b095SJeff Kirsher 			netif_tx_lock_bh(dev);
419969b4b095SJeff Kirsher 			netif_addr_lock(dev);
420069b4b095SJeff Kirsher 			spin_lock(&np->lock);
420169b4b095SJeff Kirsher 			/* stop engines */
420269b4b095SJeff Kirsher 			nv_stop_rxtx(dev);
420369b4b095SJeff Kirsher 			if (np->driver_data & DEV_HAS_POWER_CNTRL)
420469b4b095SJeff Kirsher 				nv_mac_reset(dev);
420569b4b095SJeff Kirsher 			nv_txrx_reset(dev);
420669b4b095SJeff Kirsher 			/* drain rx queue */
420769b4b095SJeff Kirsher 			nv_drain_rxtx(dev);
420869b4b095SJeff Kirsher 			/* reinit driver view of the rx queue */
420969b4b095SJeff Kirsher 			set_bufsize(dev);
421069b4b095SJeff Kirsher 			if (nv_init_ring(dev)) {
421169b4b095SJeff Kirsher 				if (!np->in_shutdown)
421269b4b095SJeff Kirsher 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
421369b4b095SJeff Kirsher 			}
421469b4b095SJeff Kirsher 			/* reinit nic view of the rx queue */
421569b4b095SJeff Kirsher 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
421669b4b095SJeff Kirsher 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
421769b4b095SJeff Kirsher 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
421869b4b095SJeff Kirsher 				base + NvRegRingSizes);
421969b4b095SJeff Kirsher 			pci_push(base);
422069b4b095SJeff Kirsher 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
422169b4b095SJeff Kirsher 			pci_push(base);
422269b4b095SJeff Kirsher 			/* clear interrupts */
422369b4b095SJeff Kirsher 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
422469b4b095SJeff Kirsher 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
422569b4b095SJeff Kirsher 			else
422669b4b095SJeff Kirsher 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
422769b4b095SJeff Kirsher 
422869b4b095SJeff Kirsher 			/* restart rx engine */
422969b4b095SJeff Kirsher 			nv_start_rxtx(dev);
423069b4b095SJeff Kirsher 			spin_unlock(&np->lock);
423169b4b095SJeff Kirsher 			netif_addr_unlock(dev);
423269b4b095SJeff Kirsher 			netif_tx_unlock_bh(dev);
423369b4b095SJeff Kirsher 		}
423469b4b095SJeff Kirsher 	}
423569b4b095SJeff Kirsher 
423669b4b095SJeff Kirsher 	writel(mask, base + NvRegIrqMask);
423769b4b095SJeff Kirsher 	pci_push(base);
423869b4b095SJeff Kirsher 
423969b4b095SJeff Kirsher 	if (!using_multi_irqs(dev)) {
424069b4b095SJeff Kirsher 		np->nic_poll_irq = 0;
424169b4b095SJeff Kirsher 		if (nv_optimized(np))
424269b4b095SJeff Kirsher 			nv_nic_irq_optimized(0, dev);
424369b4b095SJeff Kirsher 		else
424469b4b095SJeff Kirsher 			nv_nic_irq(0, dev);
424569b4b095SJeff Kirsher 	} else {
424669b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
424769b4b095SJeff Kirsher 			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
424869b4b095SJeff Kirsher 			nv_nic_irq_rx(0, dev);
424969b4b095SJeff Kirsher 		}
425069b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
425169b4b095SJeff Kirsher 			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
425269b4b095SJeff Kirsher 			nv_nic_irq_tx(0, dev);
425369b4b095SJeff Kirsher 		}
425469b4b095SJeff Kirsher 		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
425569b4b095SJeff Kirsher 			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
425669b4b095SJeff Kirsher 			nv_nic_irq_other(0, dev);
425769b4b095SJeff Kirsher 		}
425869b4b095SJeff Kirsher 	}
425969b4b095SJeff Kirsher 
42600b7c8743SNeil Horman 	enable_irq_lockdep_irqrestore(irq, &flags);
426169b4b095SJeff Kirsher }
426269b4b095SJeff Kirsher 
426369b4b095SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
nv_poll_controller(struct net_device * dev)426469b4b095SJeff Kirsher static void nv_poll_controller(struct net_device *dev)
426569b4b095SJeff Kirsher {
4266d9935679SKees Cook 	struct fe_priv *np = netdev_priv(dev);
4267d9935679SKees Cook 
4268d9935679SKees Cook 	nv_do_nic_poll(&np->nic_poll);
426969b4b095SJeff Kirsher }
427069b4b095SJeff Kirsher #endif
427169b4b095SJeff Kirsher 
nv_do_stats_poll(struct timer_list * t)4272d9935679SKees Cook static void nv_do_stats_poll(struct timer_list *t)
4273f5d827aeSdavid decotigny 	__acquires(&netdev_priv(dev)->hwstats_lock)
4274f5d827aeSdavid decotigny 	__releases(&netdev_priv(dev)->hwstats_lock)
427569b4b095SJeff Kirsher {
4276d9935679SKees Cook 	struct fe_priv *np = from_timer(np, t, stats_poll);
4277d9935679SKees Cook 	struct net_device *dev = np->dev;
427869b4b095SJeff Kirsher 
4279f5d827aeSdavid decotigny 	/* If lock is currently taken, the stats are being refreshed
4280f5d827aeSdavid decotigny 	 * and hence fresh enough */
4281f5d827aeSdavid decotigny 	if (spin_trylock(&np->hwstats_lock)) {
4282f5d827aeSdavid decotigny 		nv_update_stats(dev);
4283f5d827aeSdavid decotigny 		spin_unlock(&np->hwstats_lock);
4284f5d827aeSdavid decotigny 	}
428569b4b095SJeff Kirsher 
428669b4b095SJeff Kirsher 	if (!np->in_shutdown)
428769b4b095SJeff Kirsher 		mod_timer(&np->stats_poll,
428869b4b095SJeff Kirsher 			round_jiffies(jiffies + STATS_INTERVAL));
428969b4b095SJeff Kirsher }
429069b4b095SJeff Kirsher 
nv_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)429169b4b095SJeff Kirsher static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
429269b4b095SJeff Kirsher {
429369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
4294f029c781SWolfram Sang 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
4295f029c781SWolfram Sang 	strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4296f029c781SWolfram Sang 	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
429769b4b095SJeff Kirsher }
429869b4b095SJeff Kirsher 
nv_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)429969b4b095SJeff Kirsher static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
430069b4b095SJeff Kirsher {
430169b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
430269b4b095SJeff Kirsher 	wolinfo->supported = WAKE_MAGIC;
430369b4b095SJeff Kirsher 
430469b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
430569b4b095SJeff Kirsher 	if (np->wolenabled)
430669b4b095SJeff Kirsher 		wolinfo->wolopts = WAKE_MAGIC;
430769b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
430869b4b095SJeff Kirsher }
430969b4b095SJeff Kirsher 
nv_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)431069b4b095SJeff Kirsher static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
431169b4b095SJeff Kirsher {
431269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
431369b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
431469b4b095SJeff Kirsher 	u32 flags = 0;
431569b4b095SJeff Kirsher 
431669b4b095SJeff Kirsher 	if (wolinfo->wolopts == 0) {
431769b4b095SJeff Kirsher 		np->wolenabled = 0;
431869b4b095SJeff Kirsher 	} else if (wolinfo->wolopts & WAKE_MAGIC) {
431969b4b095SJeff Kirsher 		np->wolenabled = 1;
432069b4b095SJeff Kirsher 		flags = NVREG_WAKEUPFLAGS_ENABLE;
432169b4b095SJeff Kirsher 	}
432269b4b095SJeff Kirsher 	if (netif_running(dev)) {
432369b4b095SJeff Kirsher 		spin_lock_irq(&np->lock);
432469b4b095SJeff Kirsher 		writel(flags, base + NvRegWakeUpFlags);
432569b4b095SJeff Kirsher 		spin_unlock_irq(&np->lock);
432669b4b095SJeff Kirsher 	}
432769b4b095SJeff Kirsher 	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
432869b4b095SJeff Kirsher 	return 0;
432969b4b095SJeff Kirsher }
433069b4b095SJeff Kirsher 
nv_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)43310fa9e289SPhilippe Reynes static int nv_get_link_ksettings(struct net_device *dev,
43320fa9e289SPhilippe Reynes 				 struct ethtool_link_ksettings *cmd)
433369b4b095SJeff Kirsher {
433469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
43350fa9e289SPhilippe Reynes 	u32 speed, supported, advertising;
433669b4b095SJeff Kirsher 	int adv;
433769b4b095SJeff Kirsher 
433869b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
43390fa9e289SPhilippe Reynes 	cmd->base.port = PORT_MII;
434069b4b095SJeff Kirsher 	if (!netif_running(dev)) {
434169b4b095SJeff Kirsher 		/* We do not track link speed / duplex setting if the
434269b4b095SJeff Kirsher 		 * interface is disabled. Force a link check */
434369b4b095SJeff Kirsher 		if (nv_update_linkspeed(dev)) {
434469b4b095SJeff Kirsher 			netif_carrier_on(dev);
434569b4b095SJeff Kirsher 		} else {
434669b4b095SJeff Kirsher 			netif_carrier_off(dev);
434769b4b095SJeff Kirsher 		}
434869b4b095SJeff Kirsher 	}
434969b4b095SJeff Kirsher 
435069b4b095SJeff Kirsher 	if (netif_carrier_ok(dev)) {
435169b4b095SJeff Kirsher 		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
435269b4b095SJeff Kirsher 		case NVREG_LINKSPEED_10:
435369b4b095SJeff Kirsher 			speed = SPEED_10;
435469b4b095SJeff Kirsher 			break;
435569b4b095SJeff Kirsher 		case NVREG_LINKSPEED_100:
435669b4b095SJeff Kirsher 			speed = SPEED_100;
435769b4b095SJeff Kirsher 			break;
435869b4b095SJeff Kirsher 		case NVREG_LINKSPEED_1000:
435969b4b095SJeff Kirsher 			speed = SPEED_1000;
436069b4b095SJeff Kirsher 			break;
436169b4b095SJeff Kirsher 		default:
436269b4b095SJeff Kirsher 			speed = -1;
436369b4b095SJeff Kirsher 			break;
436469b4b095SJeff Kirsher 		}
43650fa9e289SPhilippe Reynes 		cmd->base.duplex = DUPLEX_HALF;
436669b4b095SJeff Kirsher 		if (np->duplex)
43670fa9e289SPhilippe Reynes 			cmd->base.duplex = DUPLEX_FULL;
436869b4b095SJeff Kirsher 	} else {
4369537fae01SJiri Pirko 		speed = SPEED_UNKNOWN;
43700fa9e289SPhilippe Reynes 		cmd->base.duplex = DUPLEX_UNKNOWN;
437169b4b095SJeff Kirsher 	}
43720fa9e289SPhilippe Reynes 	cmd->base.speed = speed;
43730fa9e289SPhilippe Reynes 	cmd->base.autoneg = np->autoneg;
437469b4b095SJeff Kirsher 
43750fa9e289SPhilippe Reynes 	advertising = ADVERTISED_MII;
437669b4b095SJeff Kirsher 	if (np->autoneg) {
43770fa9e289SPhilippe Reynes 		advertising |= ADVERTISED_Autoneg;
437869b4b095SJeff Kirsher 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
437969b4b095SJeff Kirsher 		if (adv & ADVERTISE_10HALF)
43800fa9e289SPhilippe Reynes 			advertising |= ADVERTISED_10baseT_Half;
438169b4b095SJeff Kirsher 		if (adv & ADVERTISE_10FULL)
43820fa9e289SPhilippe Reynes 			advertising |= ADVERTISED_10baseT_Full;
438369b4b095SJeff Kirsher 		if (adv & ADVERTISE_100HALF)
43840fa9e289SPhilippe Reynes 			advertising |= ADVERTISED_100baseT_Half;
438569b4b095SJeff Kirsher 		if (adv & ADVERTISE_100FULL)
43860fa9e289SPhilippe Reynes 			advertising |= ADVERTISED_100baseT_Full;
438769b4b095SJeff Kirsher 		if (np->gigabit == PHY_GIGABIT) {
438869b4b095SJeff Kirsher 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
438969b4b095SJeff Kirsher 			if (adv & ADVERTISE_1000FULL)
43900fa9e289SPhilippe Reynes 				advertising |= ADVERTISED_1000baseT_Full;
439169b4b095SJeff Kirsher 		}
439269b4b095SJeff Kirsher 	}
43930fa9e289SPhilippe Reynes 	supported = (SUPPORTED_Autoneg |
439469b4b095SJeff Kirsher 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
439569b4b095SJeff Kirsher 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
439669b4b095SJeff Kirsher 		SUPPORTED_MII);
439769b4b095SJeff Kirsher 	if (np->gigabit == PHY_GIGABIT)
43980fa9e289SPhilippe Reynes 		supported |= SUPPORTED_1000baseT_Full;
439969b4b095SJeff Kirsher 
44000fa9e289SPhilippe Reynes 	cmd->base.phy_address = np->phyaddr;
44010fa9e289SPhilippe Reynes 
44020fa9e289SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
44030fa9e289SPhilippe Reynes 						supported);
44040fa9e289SPhilippe Reynes 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
44050fa9e289SPhilippe Reynes 						advertising);
440669b4b095SJeff Kirsher 
440769b4b095SJeff Kirsher 	/* ignore maxtxpkt, maxrxpkt for now */
440869b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
440969b4b095SJeff Kirsher 	return 0;
441069b4b095SJeff Kirsher }
441169b4b095SJeff Kirsher 
nv_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)44120fa9e289SPhilippe Reynes static int nv_set_link_ksettings(struct net_device *dev,
44130fa9e289SPhilippe Reynes 				 const struct ethtool_link_ksettings *cmd)
441469b4b095SJeff Kirsher {
441569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
44160fa9e289SPhilippe Reynes 	u32 speed = cmd->base.speed;
44170fa9e289SPhilippe Reynes 	u32 advertising;
441869b4b095SJeff Kirsher 
44190fa9e289SPhilippe Reynes 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
44200fa9e289SPhilippe Reynes 						cmd->link_modes.advertising);
44210fa9e289SPhilippe Reynes 
44220fa9e289SPhilippe Reynes 	if (cmd->base.port != PORT_MII)
442369b4b095SJeff Kirsher 		return -EINVAL;
44240fa9e289SPhilippe Reynes 	if (cmd->base.phy_address != np->phyaddr) {
442569b4b095SJeff Kirsher 		/* TODO: support switching between multiple phys. Should be
442669b4b095SJeff Kirsher 		 * trivial, but not enabled due to lack of test hardware. */
442769b4b095SJeff Kirsher 		return -EINVAL;
442869b4b095SJeff Kirsher 	}
44290fa9e289SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
443069b4b095SJeff Kirsher 		u32 mask;
443169b4b095SJeff Kirsher 
443269b4b095SJeff Kirsher 		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
443369b4b095SJeff Kirsher 			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
443469b4b095SJeff Kirsher 		if (np->gigabit == PHY_GIGABIT)
443569b4b095SJeff Kirsher 			mask |= ADVERTISED_1000baseT_Full;
443669b4b095SJeff Kirsher 
44370fa9e289SPhilippe Reynes 		if ((advertising & mask) == 0)
443869b4b095SJeff Kirsher 			return -EINVAL;
443969b4b095SJeff Kirsher 
44400fa9e289SPhilippe Reynes 	} else if (cmd->base.autoneg == AUTONEG_DISABLE) {
444169b4b095SJeff Kirsher 		/* Note: autonegotiation disable, speed 1000 intentionally
444269b4b095SJeff Kirsher 		 * forbidden - no one should need that. */
444369b4b095SJeff Kirsher 
444469b4b095SJeff Kirsher 		if (speed != SPEED_10 && speed != SPEED_100)
444569b4b095SJeff Kirsher 			return -EINVAL;
44460fa9e289SPhilippe Reynes 		if (cmd->base.duplex != DUPLEX_HALF &&
44470fa9e289SPhilippe Reynes 		    cmd->base.duplex != DUPLEX_FULL)
444869b4b095SJeff Kirsher 			return -EINVAL;
444969b4b095SJeff Kirsher 	} else {
445069b4b095SJeff Kirsher 		return -EINVAL;
445169b4b095SJeff Kirsher 	}
445269b4b095SJeff Kirsher 
445369b4b095SJeff Kirsher 	netif_carrier_off(dev);
445469b4b095SJeff Kirsher 	if (netif_running(dev)) {
445569b4b095SJeff Kirsher 		unsigned long flags;
445669b4b095SJeff Kirsher 
445769b4b095SJeff Kirsher 		nv_disable_irq(dev);
445869b4b095SJeff Kirsher 		netif_tx_lock_bh(dev);
445969b4b095SJeff Kirsher 		netif_addr_lock(dev);
446069b4b095SJeff Kirsher 		/* with plain spinlock lockdep complains */
446169b4b095SJeff Kirsher 		spin_lock_irqsave(&np->lock, flags);
446269b4b095SJeff Kirsher 		/* stop engines */
446369b4b095SJeff Kirsher 		/* FIXME:
446469b4b095SJeff Kirsher 		 * this can take some time, and interrupts are disabled
446569b4b095SJeff Kirsher 		 * due to spin_lock_irqsave, but let's hope no daemon
446669b4b095SJeff Kirsher 		 * is going to change the settings very often...
446769b4b095SJeff Kirsher 		 * Worst case:
446869b4b095SJeff Kirsher 		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
446969b4b095SJeff Kirsher 		 * + some minor delays, which is up to a second approximately
447069b4b095SJeff Kirsher 		 */
447169b4b095SJeff Kirsher 		nv_stop_rxtx(dev);
447269b4b095SJeff Kirsher 		spin_unlock_irqrestore(&np->lock, flags);
447369b4b095SJeff Kirsher 		netif_addr_unlock(dev);
447469b4b095SJeff Kirsher 		netif_tx_unlock_bh(dev);
447569b4b095SJeff Kirsher 	}
447669b4b095SJeff Kirsher 
44770fa9e289SPhilippe Reynes 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
447869b4b095SJeff Kirsher 		int adv, bmcr;
447969b4b095SJeff Kirsher 
448069b4b095SJeff Kirsher 		np->autoneg = 1;
448169b4b095SJeff Kirsher 
448269b4b095SJeff Kirsher 		/* advertise only what has been requested */
448369b4b095SJeff Kirsher 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
448469b4b095SJeff Kirsher 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
44850fa9e289SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Half)
448669b4b095SJeff Kirsher 			adv |= ADVERTISE_10HALF;
44870fa9e289SPhilippe Reynes 		if (advertising & ADVERTISED_10baseT_Full)
448869b4b095SJeff Kirsher 			adv |= ADVERTISE_10FULL;
44890fa9e289SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Half)
449069b4b095SJeff Kirsher 			adv |= ADVERTISE_100HALF;
44910fa9e289SPhilippe Reynes 		if (advertising & ADVERTISED_100baseT_Full)
449269b4b095SJeff Kirsher 			adv |= ADVERTISE_100FULL;
449369b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
449469b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
449569b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
449669b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_ASYM;
449769b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
449869b4b095SJeff Kirsher 
449969b4b095SJeff Kirsher 		if (np->gigabit == PHY_GIGABIT) {
450069b4b095SJeff Kirsher 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
450169b4b095SJeff Kirsher 			adv &= ~ADVERTISE_1000FULL;
45020fa9e289SPhilippe Reynes 			if (advertising & ADVERTISED_1000baseT_Full)
450369b4b095SJeff Kirsher 				adv |= ADVERTISE_1000FULL;
450469b4b095SJeff Kirsher 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
450569b4b095SJeff Kirsher 		}
450669b4b095SJeff Kirsher 
450769b4b095SJeff Kirsher 		if (netif_running(dev))
450869b4b095SJeff Kirsher 			netdev_info(dev, "link down\n");
450969b4b095SJeff Kirsher 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
451069b4b095SJeff Kirsher 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
451169b4b095SJeff Kirsher 			bmcr |= BMCR_ANENABLE;
451269b4b095SJeff Kirsher 			/* reset the phy in order for settings to stick,
451369b4b095SJeff Kirsher 			 * and cause autoneg to start */
451469b4b095SJeff Kirsher 			if (phy_reset(dev, bmcr)) {
451569b4b095SJeff Kirsher 				netdev_info(dev, "phy reset failed\n");
451669b4b095SJeff Kirsher 				return -EINVAL;
451769b4b095SJeff Kirsher 			}
451869b4b095SJeff Kirsher 		} else {
451969b4b095SJeff Kirsher 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
452069b4b095SJeff Kirsher 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
452169b4b095SJeff Kirsher 		}
452269b4b095SJeff Kirsher 	} else {
452369b4b095SJeff Kirsher 		int adv, bmcr;
452469b4b095SJeff Kirsher 
452569b4b095SJeff Kirsher 		np->autoneg = 0;
452669b4b095SJeff Kirsher 
452769b4b095SJeff Kirsher 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
452869b4b095SJeff Kirsher 		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
45290fa9e289SPhilippe Reynes 		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
453069b4b095SJeff Kirsher 			adv |= ADVERTISE_10HALF;
45310fa9e289SPhilippe Reynes 		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
453269b4b095SJeff Kirsher 			adv |= ADVERTISE_10FULL;
45330fa9e289SPhilippe Reynes 		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
453469b4b095SJeff Kirsher 			adv |= ADVERTISE_100HALF;
45350fa9e289SPhilippe Reynes 		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
453669b4b095SJeff Kirsher 			adv |= ADVERTISE_100FULL;
453769b4b095SJeff Kirsher 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
453869b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
453969b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
454069b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
454169b4b095SJeff Kirsher 		}
454269b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
454369b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_ASYM;
454469b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
454569b4b095SJeff Kirsher 		}
454669b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
454769b4b095SJeff Kirsher 		np->fixed_mode = adv;
454869b4b095SJeff Kirsher 
454969b4b095SJeff Kirsher 		if (np->gigabit == PHY_GIGABIT) {
455069b4b095SJeff Kirsher 			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
455169b4b095SJeff Kirsher 			adv &= ~ADVERTISE_1000FULL;
455269b4b095SJeff Kirsher 			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
455369b4b095SJeff Kirsher 		}
455469b4b095SJeff Kirsher 
455569b4b095SJeff Kirsher 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
455669b4b095SJeff Kirsher 		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
455769b4b095SJeff Kirsher 		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
455869b4b095SJeff Kirsher 			bmcr |= BMCR_FULLDPLX;
455969b4b095SJeff Kirsher 		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
456069b4b095SJeff Kirsher 			bmcr |= BMCR_SPEED100;
456169b4b095SJeff Kirsher 		if (np->phy_oui == PHY_OUI_MARVELL) {
456269b4b095SJeff Kirsher 			/* reset the phy in order for forced mode settings to stick */
456369b4b095SJeff Kirsher 			if (phy_reset(dev, bmcr)) {
456469b4b095SJeff Kirsher 				netdev_info(dev, "phy reset failed\n");
456569b4b095SJeff Kirsher 				return -EINVAL;
456669b4b095SJeff Kirsher 			}
456769b4b095SJeff Kirsher 		} else {
456869b4b095SJeff Kirsher 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
456969b4b095SJeff Kirsher 			if (netif_running(dev)) {
457069b4b095SJeff Kirsher 				/* Wait a bit and then reconfigure the nic. */
457169b4b095SJeff Kirsher 				udelay(10);
457269b4b095SJeff Kirsher 				nv_linkchange(dev);
457369b4b095SJeff Kirsher 			}
457469b4b095SJeff Kirsher 		}
457569b4b095SJeff Kirsher 	}
457669b4b095SJeff Kirsher 
457769b4b095SJeff Kirsher 	if (netif_running(dev)) {
457869b4b095SJeff Kirsher 		nv_start_rxtx(dev);
457969b4b095SJeff Kirsher 		nv_enable_irq(dev);
458069b4b095SJeff Kirsher 	}
458169b4b095SJeff Kirsher 
458269b4b095SJeff Kirsher 	return 0;
458369b4b095SJeff Kirsher }
458469b4b095SJeff Kirsher 
458569b4b095SJeff Kirsher #define FORCEDETH_REGS_VER	1
458669b4b095SJeff Kirsher 
nv_get_regs_len(struct net_device * dev)458769b4b095SJeff Kirsher static int nv_get_regs_len(struct net_device *dev)
458869b4b095SJeff Kirsher {
458969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
459069b4b095SJeff Kirsher 	return np->register_size;
459169b4b095SJeff Kirsher }
459269b4b095SJeff Kirsher 
nv_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)459369b4b095SJeff Kirsher static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
459469b4b095SJeff Kirsher {
459569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
459669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
459769b4b095SJeff Kirsher 	u32 *rbuf = buf;
459869b4b095SJeff Kirsher 	int i;
459969b4b095SJeff Kirsher 
460069b4b095SJeff Kirsher 	regs->version = FORCEDETH_REGS_VER;
460169b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
4602ba9aa134Sdavid decotigny 	for (i = 0; i < np->register_size/sizeof(u32); i++)
460369b4b095SJeff Kirsher 		rbuf[i] = readl(base + i*sizeof(u32));
460469b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
460569b4b095SJeff Kirsher }
460669b4b095SJeff Kirsher 
nv_nway_reset(struct net_device * dev)460769b4b095SJeff Kirsher static int nv_nway_reset(struct net_device *dev)
460869b4b095SJeff Kirsher {
460969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
461069b4b095SJeff Kirsher 	int ret;
461169b4b095SJeff Kirsher 
461269b4b095SJeff Kirsher 	if (np->autoneg) {
461369b4b095SJeff Kirsher 		int bmcr;
461469b4b095SJeff Kirsher 
461569b4b095SJeff Kirsher 		netif_carrier_off(dev);
461669b4b095SJeff Kirsher 		if (netif_running(dev)) {
461769b4b095SJeff Kirsher 			nv_disable_irq(dev);
461869b4b095SJeff Kirsher 			netif_tx_lock_bh(dev);
461969b4b095SJeff Kirsher 			netif_addr_lock(dev);
462069b4b095SJeff Kirsher 			spin_lock(&np->lock);
462169b4b095SJeff Kirsher 			/* stop engines */
462269b4b095SJeff Kirsher 			nv_stop_rxtx(dev);
462369b4b095SJeff Kirsher 			spin_unlock(&np->lock);
462469b4b095SJeff Kirsher 			netif_addr_unlock(dev);
462569b4b095SJeff Kirsher 			netif_tx_unlock_bh(dev);
462669b4b095SJeff Kirsher 			netdev_info(dev, "link down\n");
462769b4b095SJeff Kirsher 		}
462869b4b095SJeff Kirsher 
462969b4b095SJeff Kirsher 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
463069b4b095SJeff Kirsher 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
463169b4b095SJeff Kirsher 			bmcr |= BMCR_ANENABLE;
463269b4b095SJeff Kirsher 			/* reset the phy in order for settings to stick*/
463369b4b095SJeff Kirsher 			if (phy_reset(dev, bmcr)) {
463469b4b095SJeff Kirsher 				netdev_info(dev, "phy reset failed\n");
463569b4b095SJeff Kirsher 				return -EINVAL;
463669b4b095SJeff Kirsher 			}
463769b4b095SJeff Kirsher 		} else {
463869b4b095SJeff Kirsher 			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
463969b4b095SJeff Kirsher 			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
464069b4b095SJeff Kirsher 		}
464169b4b095SJeff Kirsher 
464269b4b095SJeff Kirsher 		if (netif_running(dev)) {
464369b4b095SJeff Kirsher 			nv_start_rxtx(dev);
464469b4b095SJeff Kirsher 			nv_enable_irq(dev);
464569b4b095SJeff Kirsher 		}
464669b4b095SJeff Kirsher 		ret = 0;
464769b4b095SJeff Kirsher 	} else {
464869b4b095SJeff Kirsher 		ret = -EINVAL;
464969b4b095SJeff Kirsher 	}
465069b4b095SJeff Kirsher 
465169b4b095SJeff Kirsher 	return ret;
465269b4b095SJeff Kirsher }
465369b4b095SJeff Kirsher 
nv_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)465474624944SHao Chen static void nv_get_ringparam(struct net_device *dev,
465574624944SHao Chen 			     struct ethtool_ringparam *ring,
465674624944SHao Chen 			     struct kernel_ethtool_ringparam *kernel_ring,
465774624944SHao Chen 			     struct netlink_ext_ack *extack)
465869b4b095SJeff Kirsher {
465969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
466069b4b095SJeff Kirsher 
466169b4b095SJeff Kirsher 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
466269b4b095SJeff Kirsher 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
466369b4b095SJeff Kirsher 
466469b4b095SJeff Kirsher 	ring->rx_pending = np->rx_ring_size;
466569b4b095SJeff Kirsher 	ring->tx_pending = np->tx_ring_size;
466669b4b095SJeff Kirsher }
466769b4b095SJeff Kirsher 
nv_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)466874624944SHao Chen static int nv_set_ringparam(struct net_device *dev,
466974624944SHao Chen 			    struct ethtool_ringparam *ring,
467074624944SHao Chen 			    struct kernel_ethtool_ringparam *kernel_ring,
467174624944SHao Chen 			    struct netlink_ext_ack *extack)
467269b4b095SJeff Kirsher {
467369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
467469b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
467569b4b095SJeff Kirsher 	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
467669b4b095SJeff Kirsher 	dma_addr_t ring_addr;
467769b4b095SJeff Kirsher 
467869b4b095SJeff Kirsher 	if (ring->rx_pending < RX_RING_MIN ||
467969b4b095SJeff Kirsher 	    ring->tx_pending < TX_RING_MIN ||
468069b4b095SJeff Kirsher 	    ring->rx_mini_pending != 0 ||
468169b4b095SJeff Kirsher 	    ring->rx_jumbo_pending != 0 ||
468269b4b095SJeff Kirsher 	    (np->desc_ver == DESC_VER_1 &&
468369b4b095SJeff Kirsher 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
468469b4b095SJeff Kirsher 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
468569b4b095SJeff Kirsher 	    (np->desc_ver != DESC_VER_1 &&
468669b4b095SJeff Kirsher 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
468769b4b095SJeff Kirsher 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
468869b4b095SJeff Kirsher 		return -EINVAL;
468969b4b095SJeff Kirsher 	}
469069b4b095SJeff Kirsher 
469169b4b095SJeff Kirsher 	/* allocate new rings */
469269b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
4693e8992e40SZhu Yanjun 		rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4694e8992e40SZhu Yanjun 					       sizeof(struct ring_desc) *
4695e8992e40SZhu Yanjun 					       (ring->rx_pending +
4696e8992e40SZhu Yanjun 					       ring->tx_pending),
4697e8992e40SZhu Yanjun 					       &ring_addr, GFP_ATOMIC);
469869b4b095SJeff Kirsher 	} else {
4699e8992e40SZhu Yanjun 		rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4700e8992e40SZhu Yanjun 					       sizeof(struct ring_desc_ex) *
4701e8992e40SZhu Yanjun 					       (ring->rx_pending +
4702e8992e40SZhu Yanjun 					       ring->tx_pending),
4703e8992e40SZhu Yanjun 					       &ring_addr, GFP_ATOMIC);
470469b4b095SJeff Kirsher 	}
47056da2ec56SKees Cook 	rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
47066da2ec56SKees Cook 				  GFP_KERNEL);
47076da2ec56SKees Cook 	tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
47086da2ec56SKees Cook 				  GFP_KERNEL);
470969b4b095SJeff Kirsher 	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
471069b4b095SJeff Kirsher 		/* fall back to old rings */
471169b4b095SJeff Kirsher 		if (!nv_optimized(np)) {
471269b4b095SJeff Kirsher 			if (rxtx_ring)
4713e8992e40SZhu Yanjun 				dma_free_coherent(&np->pci_dev->dev,
4714e8992e40SZhu Yanjun 						  sizeof(struct ring_desc) *
4715e8992e40SZhu Yanjun 						  (ring->rx_pending +
4716e8992e40SZhu Yanjun 						  ring->tx_pending),
471769b4b095SJeff Kirsher 						  rxtx_ring, ring_addr);
471869b4b095SJeff Kirsher 		} else {
471969b4b095SJeff Kirsher 			if (rxtx_ring)
4720e8992e40SZhu Yanjun 				dma_free_coherent(&np->pci_dev->dev,
4721e8992e40SZhu Yanjun 						  sizeof(struct ring_desc_ex) *
4722e8992e40SZhu Yanjun 						  (ring->rx_pending +
4723e8992e40SZhu Yanjun 						  ring->tx_pending),
472469b4b095SJeff Kirsher 						  rxtx_ring, ring_addr);
472569b4b095SJeff Kirsher 		}
472669b4b095SJeff Kirsher 
472769b4b095SJeff Kirsher 		kfree(rx_skbuff);
472869b4b095SJeff Kirsher 		kfree(tx_skbuff);
472969b4b095SJeff Kirsher 		goto exit;
473069b4b095SJeff Kirsher 	}
473169b4b095SJeff Kirsher 
473269b4b095SJeff Kirsher 	if (netif_running(dev)) {
473369b4b095SJeff Kirsher 		nv_disable_irq(dev);
473469b4b095SJeff Kirsher 		nv_napi_disable(dev);
473569b4b095SJeff Kirsher 		netif_tx_lock_bh(dev);
473669b4b095SJeff Kirsher 		netif_addr_lock(dev);
473769b4b095SJeff Kirsher 		spin_lock(&np->lock);
473869b4b095SJeff Kirsher 		/* stop engines */
473969b4b095SJeff Kirsher 		nv_stop_rxtx(dev);
474069b4b095SJeff Kirsher 		nv_txrx_reset(dev);
474169b4b095SJeff Kirsher 		/* drain queues */
474269b4b095SJeff Kirsher 		nv_drain_rxtx(dev);
474369b4b095SJeff Kirsher 		/* delete queues */
474469b4b095SJeff Kirsher 		free_rings(dev);
474569b4b095SJeff Kirsher 	}
474669b4b095SJeff Kirsher 
474769b4b095SJeff Kirsher 	/* set new values */
474869b4b095SJeff Kirsher 	np->rx_ring_size = ring->rx_pending;
474969b4b095SJeff Kirsher 	np->tx_ring_size = ring->tx_pending;
475069b4b095SJeff Kirsher 
475169b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
475269b4b095SJeff Kirsher 		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
475369b4b095SJeff Kirsher 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
475469b4b095SJeff Kirsher 	} else {
475569b4b095SJeff Kirsher 		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
475669b4b095SJeff Kirsher 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
475769b4b095SJeff Kirsher 	}
475869b4b095SJeff Kirsher 	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
475969b4b095SJeff Kirsher 	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
476069b4b095SJeff Kirsher 	np->ring_addr = ring_addr;
476169b4b095SJeff Kirsher 
476269b4b095SJeff Kirsher 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
476369b4b095SJeff Kirsher 	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
476469b4b095SJeff Kirsher 
476569b4b095SJeff Kirsher 	if (netif_running(dev)) {
476669b4b095SJeff Kirsher 		/* reinit driver view of the queues */
476769b4b095SJeff Kirsher 		set_bufsize(dev);
476869b4b095SJeff Kirsher 		if (nv_init_ring(dev)) {
476969b4b095SJeff Kirsher 			if (!np->in_shutdown)
477069b4b095SJeff Kirsher 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
477169b4b095SJeff Kirsher 		}
477269b4b095SJeff Kirsher 
477369b4b095SJeff Kirsher 		/* reinit nic view of the queues */
477469b4b095SJeff Kirsher 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
477569b4b095SJeff Kirsher 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
477669b4b095SJeff Kirsher 		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
477769b4b095SJeff Kirsher 			base + NvRegRingSizes);
477869b4b095SJeff Kirsher 		pci_push(base);
477969b4b095SJeff Kirsher 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
478069b4b095SJeff Kirsher 		pci_push(base);
478169b4b095SJeff Kirsher 
478269b4b095SJeff Kirsher 		/* restart engines */
478369b4b095SJeff Kirsher 		nv_start_rxtx(dev);
478469b4b095SJeff Kirsher 		spin_unlock(&np->lock);
478569b4b095SJeff Kirsher 		netif_addr_unlock(dev);
478669b4b095SJeff Kirsher 		netif_tx_unlock_bh(dev);
478769b4b095SJeff Kirsher 		nv_napi_enable(dev);
478869b4b095SJeff Kirsher 		nv_enable_irq(dev);
478969b4b095SJeff Kirsher 	}
479069b4b095SJeff Kirsher 	return 0;
479169b4b095SJeff Kirsher exit:
479269b4b095SJeff Kirsher 	return -ENOMEM;
479369b4b095SJeff Kirsher }
479469b4b095SJeff Kirsher 
nv_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)479569b4b095SJeff Kirsher static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
479669b4b095SJeff Kirsher {
479769b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
479869b4b095SJeff Kirsher 
479969b4b095SJeff Kirsher 	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
480069b4b095SJeff Kirsher 	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
480169b4b095SJeff Kirsher 	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
480269b4b095SJeff Kirsher }
480369b4b095SJeff Kirsher 
nv_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)480469b4b095SJeff Kirsher static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
480569b4b095SJeff Kirsher {
480669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
480769b4b095SJeff Kirsher 	int adv, bmcr;
480869b4b095SJeff Kirsher 
480969b4b095SJeff Kirsher 	if ((!np->autoneg && np->duplex == 0) ||
481069b4b095SJeff Kirsher 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
481169b4b095SJeff Kirsher 		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
481269b4b095SJeff Kirsher 		return -EINVAL;
481369b4b095SJeff Kirsher 	}
481469b4b095SJeff Kirsher 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
481569b4b095SJeff Kirsher 		netdev_info(dev, "hardware does not support tx pause frames\n");
481669b4b095SJeff Kirsher 		return -EINVAL;
481769b4b095SJeff Kirsher 	}
481869b4b095SJeff Kirsher 
481969b4b095SJeff Kirsher 	netif_carrier_off(dev);
482069b4b095SJeff Kirsher 	if (netif_running(dev)) {
482169b4b095SJeff Kirsher 		nv_disable_irq(dev);
482269b4b095SJeff Kirsher 		netif_tx_lock_bh(dev);
482369b4b095SJeff Kirsher 		netif_addr_lock(dev);
482469b4b095SJeff Kirsher 		spin_lock(&np->lock);
482569b4b095SJeff Kirsher 		/* stop engines */
482669b4b095SJeff Kirsher 		nv_stop_rxtx(dev);
482769b4b095SJeff Kirsher 		spin_unlock(&np->lock);
482869b4b095SJeff Kirsher 		netif_addr_unlock(dev);
482969b4b095SJeff Kirsher 		netif_tx_unlock_bh(dev);
483069b4b095SJeff Kirsher 	}
483169b4b095SJeff Kirsher 
483269b4b095SJeff Kirsher 	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
483369b4b095SJeff Kirsher 	if (pause->rx_pause)
483469b4b095SJeff Kirsher 		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
483569b4b095SJeff Kirsher 	if (pause->tx_pause)
483669b4b095SJeff Kirsher 		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
483769b4b095SJeff Kirsher 
483869b4b095SJeff Kirsher 	if (np->autoneg && pause->autoneg) {
483969b4b095SJeff Kirsher 		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
484069b4b095SJeff Kirsher 
484169b4b095SJeff Kirsher 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
484269b4b095SJeff Kirsher 		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
484369b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
484469b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
484569b4b095SJeff Kirsher 		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
484669b4b095SJeff Kirsher 			adv |=  ADVERTISE_PAUSE_ASYM;
484769b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
484869b4b095SJeff Kirsher 
484969b4b095SJeff Kirsher 		if (netif_running(dev))
485069b4b095SJeff Kirsher 			netdev_info(dev, "link down\n");
485169b4b095SJeff Kirsher 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
485269b4b095SJeff Kirsher 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
485369b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
485469b4b095SJeff Kirsher 	} else {
485569b4b095SJeff Kirsher 		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
485669b4b095SJeff Kirsher 		if (pause->rx_pause)
485769b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
485869b4b095SJeff Kirsher 		if (pause->tx_pause)
485969b4b095SJeff Kirsher 			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
486069b4b095SJeff Kirsher 
486169b4b095SJeff Kirsher 		if (!netif_running(dev))
486269b4b095SJeff Kirsher 			nv_update_linkspeed(dev);
486369b4b095SJeff Kirsher 		else
486469b4b095SJeff Kirsher 			nv_update_pause(dev, np->pause_flags);
486569b4b095SJeff Kirsher 	}
486669b4b095SJeff Kirsher 
486769b4b095SJeff Kirsher 	if (netif_running(dev)) {
486869b4b095SJeff Kirsher 		nv_start_rxtx(dev);
486969b4b095SJeff Kirsher 		nv_enable_irq(dev);
487069b4b095SJeff Kirsher 	}
487169b4b095SJeff Kirsher 	return 0;
487269b4b095SJeff Kirsher }
487369b4b095SJeff Kirsher 
nv_set_loopback(struct net_device * dev,netdev_features_t features)4874c8f44affSMichał Mirosław static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4875e19df76aSSanjay Hortikar {
4876e19df76aSSanjay Hortikar 	struct fe_priv *np = netdev_priv(dev);
4877e19df76aSSanjay Hortikar 	unsigned long flags;
4878e19df76aSSanjay Hortikar 	u32 miicontrol;
4879e19df76aSSanjay Hortikar 	int err, retval = 0;
4880e19df76aSSanjay Hortikar 
4881e19df76aSSanjay Hortikar 	spin_lock_irqsave(&np->lock, flags);
4882e19df76aSSanjay Hortikar 	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4883e19df76aSSanjay Hortikar 	if (features & NETIF_F_LOOPBACK) {
4884e19df76aSSanjay Hortikar 		if (miicontrol & BMCR_LOOPBACK) {
4885e19df76aSSanjay Hortikar 			spin_unlock_irqrestore(&np->lock, flags);
4886e19df76aSSanjay Hortikar 			netdev_info(dev, "Loopback already enabled\n");
4887e19df76aSSanjay Hortikar 			return 0;
4888e19df76aSSanjay Hortikar 		}
4889e19df76aSSanjay Hortikar 		nv_disable_irq(dev);
4890e19df76aSSanjay Hortikar 		/* Turn on loopback mode */
4891e19df76aSSanjay Hortikar 		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4892e19df76aSSanjay Hortikar 		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4893e19df76aSSanjay Hortikar 		if (err) {
4894e19df76aSSanjay Hortikar 			retval = PHY_ERROR;
4895e19df76aSSanjay Hortikar 			spin_unlock_irqrestore(&np->lock, flags);
4896e19df76aSSanjay Hortikar 			phy_init(dev);
4897e19df76aSSanjay Hortikar 		} else {
4898e19df76aSSanjay Hortikar 			if (netif_running(dev)) {
4899e19df76aSSanjay Hortikar 				/* Force 1000 Mbps full-duplex */
4900e19df76aSSanjay Hortikar 				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4901e19df76aSSanjay Hortikar 									 1);
4902e19df76aSSanjay Hortikar 				/* Force link up */
4903e19df76aSSanjay Hortikar 				netif_carrier_on(dev);
4904e19df76aSSanjay Hortikar 			}
4905e19df76aSSanjay Hortikar 			spin_unlock_irqrestore(&np->lock, flags);
4906e19df76aSSanjay Hortikar 			netdev_info(dev,
4907e19df76aSSanjay Hortikar 				"Internal PHY loopback mode enabled.\n");
4908e19df76aSSanjay Hortikar 		}
4909e19df76aSSanjay Hortikar 	} else {
4910e19df76aSSanjay Hortikar 		if (!(miicontrol & BMCR_LOOPBACK)) {
4911e19df76aSSanjay Hortikar 			spin_unlock_irqrestore(&np->lock, flags);
4912e19df76aSSanjay Hortikar 			netdev_info(dev, "Loopback already disabled\n");
4913e19df76aSSanjay Hortikar 			return 0;
4914e19df76aSSanjay Hortikar 		}
4915e19df76aSSanjay Hortikar 		nv_disable_irq(dev);
4916e19df76aSSanjay Hortikar 		/* Turn off loopback */
4917e19df76aSSanjay Hortikar 		spin_unlock_irqrestore(&np->lock, flags);
4918e19df76aSSanjay Hortikar 		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4919e19df76aSSanjay Hortikar 		phy_init(dev);
4920e19df76aSSanjay Hortikar 	}
4921e19df76aSSanjay Hortikar 	msleep(500);
4922e19df76aSSanjay Hortikar 	spin_lock_irqsave(&np->lock, flags);
4923e19df76aSSanjay Hortikar 	nv_enable_irq(dev);
4924e19df76aSSanjay Hortikar 	spin_unlock_irqrestore(&np->lock, flags);
4925e19df76aSSanjay Hortikar 
4926e19df76aSSanjay Hortikar 	return retval;
4927e19df76aSSanjay Hortikar }
4928e19df76aSSanjay Hortikar 
nv_fix_features(struct net_device * dev,netdev_features_t features)4929c8f44affSMichał Mirosław static netdev_features_t nv_fix_features(struct net_device *dev,
4930c8f44affSMichał Mirosław 	netdev_features_t features)
493169b4b095SJeff Kirsher {
493269b4b095SJeff Kirsher 	/* vlan is dependent on rx checksum offload */
4933f646968fSPatrick McHardy 	if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
493469b4b095SJeff Kirsher 		features |= NETIF_F_RXCSUM;
493569b4b095SJeff Kirsher 
493669b4b095SJeff Kirsher 	return features;
493769b4b095SJeff Kirsher }
493869b4b095SJeff Kirsher 
nv_vlan_mode(struct net_device * dev,netdev_features_t features)4939c8f44affSMichał Mirosław static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
494069b4b095SJeff Kirsher {
494169b4b095SJeff Kirsher 	struct fe_priv *np = get_nvpriv(dev);
494269b4b095SJeff Kirsher 
494369b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
494469b4b095SJeff Kirsher 
4945f646968fSPatrick McHardy 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
494669b4b095SJeff Kirsher 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
494769b4b095SJeff Kirsher 	else
494869b4b095SJeff Kirsher 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
494969b4b095SJeff Kirsher 
4950f646968fSPatrick McHardy 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
495169b4b095SJeff Kirsher 		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
495269b4b095SJeff Kirsher 	else
495369b4b095SJeff Kirsher 		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
495469b4b095SJeff Kirsher 
495569b4b095SJeff Kirsher 	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
495669b4b095SJeff Kirsher 
495769b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
495869b4b095SJeff Kirsher }
495969b4b095SJeff Kirsher 
nv_set_features(struct net_device * dev,netdev_features_t features)4960c8f44affSMichał Mirosław static int nv_set_features(struct net_device *dev, netdev_features_t features)
496169b4b095SJeff Kirsher {
496269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
496369b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
4964c8f44affSMichał Mirosław 	netdev_features_t changed = dev->features ^ features;
4965e19df76aSSanjay Hortikar 	int retval;
4966e19df76aSSanjay Hortikar 
4967e19df76aSSanjay Hortikar 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4968e19df76aSSanjay Hortikar 		retval = nv_set_loopback(dev, features);
4969e19df76aSSanjay Hortikar 		if (retval != 0)
4970e19df76aSSanjay Hortikar 			return retval;
4971e19df76aSSanjay Hortikar 	}
497269b4b095SJeff Kirsher 
497369b4b095SJeff Kirsher 	if (changed & NETIF_F_RXCSUM) {
497469b4b095SJeff Kirsher 		spin_lock_irq(&np->lock);
497569b4b095SJeff Kirsher 
497669b4b095SJeff Kirsher 		if (features & NETIF_F_RXCSUM)
497769b4b095SJeff Kirsher 			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
497869b4b095SJeff Kirsher 		else
497969b4b095SJeff Kirsher 			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
498069b4b095SJeff Kirsher 
498169b4b095SJeff Kirsher 		if (netif_running(dev))
498269b4b095SJeff Kirsher 			writel(np->txrxctl_bits, base + NvRegTxRxControl);
498369b4b095SJeff Kirsher 
498469b4b095SJeff Kirsher 		spin_unlock_irq(&np->lock);
498569b4b095SJeff Kirsher 	}
498669b4b095SJeff Kirsher 
4987f646968fSPatrick McHardy 	if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
498869b4b095SJeff Kirsher 		nv_vlan_mode(dev, features);
498969b4b095SJeff Kirsher 
499069b4b095SJeff Kirsher 	return 0;
499169b4b095SJeff Kirsher }
499269b4b095SJeff Kirsher 
nv_get_sset_count(struct net_device * dev,int sset)499369b4b095SJeff Kirsher static int nv_get_sset_count(struct net_device *dev, int sset)
499469b4b095SJeff Kirsher {
499569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
499669b4b095SJeff Kirsher 
499769b4b095SJeff Kirsher 	switch (sset) {
499869b4b095SJeff Kirsher 	case ETH_SS_TEST:
499969b4b095SJeff Kirsher 		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
500069b4b095SJeff Kirsher 			return NV_TEST_COUNT_EXTENDED;
500169b4b095SJeff Kirsher 		else
500269b4b095SJeff Kirsher 			return NV_TEST_COUNT_BASE;
500369b4b095SJeff Kirsher 	case ETH_SS_STATS:
500469b4b095SJeff Kirsher 		if (np->driver_data & DEV_HAS_STATISTICS_V3)
500569b4b095SJeff Kirsher 			return NV_DEV_STATISTICS_V3_COUNT;
500669b4b095SJeff Kirsher 		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
500769b4b095SJeff Kirsher 			return NV_DEV_STATISTICS_V2_COUNT;
500869b4b095SJeff Kirsher 		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
500969b4b095SJeff Kirsher 			return NV_DEV_STATISTICS_V1_COUNT;
501069b4b095SJeff Kirsher 		else
501169b4b095SJeff Kirsher 			return 0;
501269b4b095SJeff Kirsher 	default:
501369b4b095SJeff Kirsher 		return -EOPNOTSUPP;
501469b4b095SJeff Kirsher 	}
501569b4b095SJeff Kirsher }
501669b4b095SJeff Kirsher 
nv_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * buffer)5017f5d827aeSdavid decotigny static void nv_get_ethtool_stats(struct net_device *dev,
5018f5d827aeSdavid decotigny 				 struct ethtool_stats *estats, u64 *buffer)
5019f5d827aeSdavid decotigny 	__acquires(&netdev_priv(dev)->hwstats_lock)
5020f5d827aeSdavid decotigny 	__releases(&netdev_priv(dev)->hwstats_lock)
502169b4b095SJeff Kirsher {
502269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
502369b4b095SJeff Kirsher 
5024f5d827aeSdavid decotigny 	spin_lock_bh(&np->hwstats_lock);
5025f5d827aeSdavid decotigny 	nv_update_stats(dev);
5026f5d827aeSdavid decotigny 	memcpy(buffer, &np->estats,
5027f5d827aeSdavid decotigny 	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
5028f5d827aeSdavid decotigny 	spin_unlock_bh(&np->hwstats_lock);
502969b4b095SJeff Kirsher }
503069b4b095SJeff Kirsher 
nv_link_test(struct net_device * dev)503169b4b095SJeff Kirsher static int nv_link_test(struct net_device *dev)
503269b4b095SJeff Kirsher {
503369b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
503469b4b095SJeff Kirsher 	int mii_status;
503569b4b095SJeff Kirsher 
503669b4b095SJeff Kirsher 	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
503769b4b095SJeff Kirsher 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
503869b4b095SJeff Kirsher 
503969b4b095SJeff Kirsher 	/* check phy link status */
504069b4b095SJeff Kirsher 	if (!(mii_status & BMSR_LSTATUS))
504169b4b095SJeff Kirsher 		return 0;
504269b4b095SJeff Kirsher 	else
504369b4b095SJeff Kirsher 		return 1;
504469b4b095SJeff Kirsher }
504569b4b095SJeff Kirsher 
nv_register_test(struct net_device * dev)504669b4b095SJeff Kirsher static int nv_register_test(struct net_device *dev)
504769b4b095SJeff Kirsher {
504869b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
504969b4b095SJeff Kirsher 	int i = 0;
505069b4b095SJeff Kirsher 	u32 orig_read, new_read;
505169b4b095SJeff Kirsher 
505269b4b095SJeff Kirsher 	do {
505369b4b095SJeff Kirsher 		orig_read = readl(base + nv_registers_test[i].reg);
505469b4b095SJeff Kirsher 
505569b4b095SJeff Kirsher 		/* xor with mask to toggle bits */
505669b4b095SJeff Kirsher 		orig_read ^= nv_registers_test[i].mask;
505769b4b095SJeff Kirsher 
505869b4b095SJeff Kirsher 		writel(orig_read, base + nv_registers_test[i].reg);
505969b4b095SJeff Kirsher 
506069b4b095SJeff Kirsher 		new_read = readl(base + nv_registers_test[i].reg);
506169b4b095SJeff Kirsher 
506269b4b095SJeff Kirsher 		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
506369b4b095SJeff Kirsher 			return 0;
506469b4b095SJeff Kirsher 
506569b4b095SJeff Kirsher 		/* restore original value */
506669b4b095SJeff Kirsher 		orig_read ^= nv_registers_test[i].mask;
506769b4b095SJeff Kirsher 		writel(orig_read, base + nv_registers_test[i].reg);
506869b4b095SJeff Kirsher 
506969b4b095SJeff Kirsher 	} while (nv_registers_test[++i].reg != 0);
507069b4b095SJeff Kirsher 
507169b4b095SJeff Kirsher 	return 1;
507269b4b095SJeff Kirsher }
507369b4b095SJeff Kirsher 
nv_interrupt_test(struct net_device * dev)507469b4b095SJeff Kirsher static int nv_interrupt_test(struct net_device *dev)
507569b4b095SJeff Kirsher {
507669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
507769b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
507869b4b095SJeff Kirsher 	int ret = 1;
507969b4b095SJeff Kirsher 	int testcnt;
508069b4b095SJeff Kirsher 	u32 save_msi_flags, save_poll_interval = 0;
508169b4b095SJeff Kirsher 
508269b4b095SJeff Kirsher 	if (netif_running(dev)) {
508369b4b095SJeff Kirsher 		/* free current irq */
508469b4b095SJeff Kirsher 		nv_free_irq(dev);
508569b4b095SJeff Kirsher 		save_poll_interval = readl(base+NvRegPollingInterval);
508669b4b095SJeff Kirsher 	}
508769b4b095SJeff Kirsher 
508869b4b095SJeff Kirsher 	/* flag to test interrupt handler */
508969b4b095SJeff Kirsher 	np->intr_test = 0;
509069b4b095SJeff Kirsher 
509169b4b095SJeff Kirsher 	/* setup test irq */
509269b4b095SJeff Kirsher 	save_msi_flags = np->msi_flags;
509369b4b095SJeff Kirsher 	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
509469b4b095SJeff Kirsher 	np->msi_flags |= 0x001; /* setup 1 vector */
509569b4b095SJeff Kirsher 	if (nv_request_irq(dev, 1))
509669b4b095SJeff Kirsher 		return 0;
509769b4b095SJeff Kirsher 
509869b4b095SJeff Kirsher 	/* setup timer interrupt */
509969b4b095SJeff Kirsher 	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
510069b4b095SJeff Kirsher 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
510169b4b095SJeff Kirsher 
510269b4b095SJeff Kirsher 	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
510369b4b095SJeff Kirsher 
510469b4b095SJeff Kirsher 	/* wait for at least one interrupt */
510569b4b095SJeff Kirsher 	msleep(100);
510669b4b095SJeff Kirsher 
510769b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
510869b4b095SJeff Kirsher 
510969b4b095SJeff Kirsher 	/* flag should be set within ISR */
511069b4b095SJeff Kirsher 	testcnt = np->intr_test;
511169b4b095SJeff Kirsher 	if (!testcnt)
511269b4b095SJeff Kirsher 		ret = 2;
511369b4b095SJeff Kirsher 
511469b4b095SJeff Kirsher 	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
511569b4b095SJeff Kirsher 	if (!(np->msi_flags & NV_MSI_X_ENABLED))
511669b4b095SJeff Kirsher 		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
511769b4b095SJeff Kirsher 	else
511869b4b095SJeff Kirsher 		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
511969b4b095SJeff Kirsher 
512069b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
512169b4b095SJeff Kirsher 
512269b4b095SJeff Kirsher 	nv_free_irq(dev);
512369b4b095SJeff Kirsher 
512469b4b095SJeff Kirsher 	np->msi_flags = save_msi_flags;
512569b4b095SJeff Kirsher 
512669b4b095SJeff Kirsher 	if (netif_running(dev)) {
512769b4b095SJeff Kirsher 		writel(save_poll_interval, base + NvRegPollingInterval);
512869b4b095SJeff Kirsher 		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
512969b4b095SJeff Kirsher 		/* restore original irq */
513069b4b095SJeff Kirsher 		if (nv_request_irq(dev, 0))
513169b4b095SJeff Kirsher 			return 0;
513269b4b095SJeff Kirsher 	}
513369b4b095SJeff Kirsher 
513469b4b095SJeff Kirsher 	return ret;
513569b4b095SJeff Kirsher }
513669b4b095SJeff Kirsher 
nv_loopback_test(struct net_device * dev)513769b4b095SJeff Kirsher static int nv_loopback_test(struct net_device *dev)
513869b4b095SJeff Kirsher {
513969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
514069b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
514169b4b095SJeff Kirsher 	struct sk_buff *tx_skb, *rx_skb;
514269b4b095SJeff Kirsher 	dma_addr_t test_dma_addr;
514369b4b095SJeff Kirsher 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
514469b4b095SJeff Kirsher 	u32 flags;
514569b4b095SJeff Kirsher 	int len, i, pkt_len;
514669b4b095SJeff Kirsher 	u8 *pkt_data;
514769b4b095SJeff Kirsher 	u32 filter_flags = 0;
514869b4b095SJeff Kirsher 	u32 misc1_flags = 0;
514969b4b095SJeff Kirsher 	int ret = 1;
515069b4b095SJeff Kirsher 
515169b4b095SJeff Kirsher 	if (netif_running(dev)) {
515269b4b095SJeff Kirsher 		nv_disable_irq(dev);
515369b4b095SJeff Kirsher 		filter_flags = readl(base + NvRegPacketFilterFlags);
515469b4b095SJeff Kirsher 		misc1_flags = readl(base + NvRegMisc1);
515569b4b095SJeff Kirsher 	} else {
515669b4b095SJeff Kirsher 		nv_txrx_reset(dev);
515769b4b095SJeff Kirsher 	}
515869b4b095SJeff Kirsher 
515969b4b095SJeff Kirsher 	/* reinit driver view of the rx queue */
516069b4b095SJeff Kirsher 	set_bufsize(dev);
516169b4b095SJeff Kirsher 	nv_init_ring(dev);
516269b4b095SJeff Kirsher 
516369b4b095SJeff Kirsher 	/* setup hardware for loopback */
516469b4b095SJeff Kirsher 	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
516569b4b095SJeff Kirsher 	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
516669b4b095SJeff Kirsher 
516769b4b095SJeff Kirsher 	/* reinit nic view of the rx queue */
516869b4b095SJeff Kirsher 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
516969b4b095SJeff Kirsher 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
517069b4b095SJeff Kirsher 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
517169b4b095SJeff Kirsher 		base + NvRegRingSizes);
517269b4b095SJeff Kirsher 	pci_push(base);
517369b4b095SJeff Kirsher 
517469b4b095SJeff Kirsher 	/* restart rx engine */
517569b4b095SJeff Kirsher 	nv_start_rxtx(dev);
517669b4b095SJeff Kirsher 
517769b4b095SJeff Kirsher 	/* setup packet for tx */
517869b4b095SJeff Kirsher 	pkt_len = ETH_DATA_LEN;
5179dae2e9f4SPradeep A. Dalvi 	tx_skb = netdev_alloc_skb(dev, pkt_len);
518069b4b095SJeff Kirsher 	if (!tx_skb) {
518169b4b095SJeff Kirsher 		ret = 0;
518269b4b095SJeff Kirsher 		goto out;
518369b4b095SJeff Kirsher 	}
51847598b349SZhu Yanjun 	test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
518569b4b095SJeff Kirsher 				       skb_tailroom(tx_skb),
51867598b349SZhu Yanjun 				       DMA_FROM_DEVICE);
518739e50d96SZhu Yanjun 	if (unlikely(dma_mapping_error(&np->pci_dev->dev,
518839e50d96SZhu Yanjun 				       test_dma_addr))) {
5189612a7c4eSLarry Finger 		dev_kfree_skb_any(tx_skb);
5190612a7c4eSLarry Finger 		goto out;
5191612a7c4eSLarry Finger 	}
519269b4b095SJeff Kirsher 	pkt_data = skb_put(tx_skb, pkt_len);
519369b4b095SJeff Kirsher 	for (i = 0; i < pkt_len; i++)
519469b4b095SJeff Kirsher 		pkt_data[i] = (u8)(i & 0xff);
519569b4b095SJeff Kirsher 
519669b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
519769b4b095SJeff Kirsher 		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
519869b4b095SJeff Kirsher 		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
519969b4b095SJeff Kirsher 	} else {
520069b4b095SJeff Kirsher 		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
520169b4b095SJeff Kirsher 		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
520269b4b095SJeff Kirsher 		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
520369b4b095SJeff Kirsher 	}
520469b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
520569b4b095SJeff Kirsher 	pci_push(get_hwbase(dev));
520669b4b095SJeff Kirsher 
520769b4b095SJeff Kirsher 	msleep(500);
520869b4b095SJeff Kirsher 
520969b4b095SJeff Kirsher 	/* check for rx of the packet */
521069b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
521169b4b095SJeff Kirsher 		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
521269b4b095SJeff Kirsher 		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
521369b4b095SJeff Kirsher 
521469b4b095SJeff Kirsher 	} else {
521569b4b095SJeff Kirsher 		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
521669b4b095SJeff Kirsher 		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
521769b4b095SJeff Kirsher 	}
521869b4b095SJeff Kirsher 
521969b4b095SJeff Kirsher 	if (flags & NV_RX_AVAIL) {
522069b4b095SJeff Kirsher 		ret = 0;
522169b4b095SJeff Kirsher 	} else if (np->desc_ver == DESC_VER_1) {
522269b4b095SJeff Kirsher 		if (flags & NV_RX_ERROR)
522369b4b095SJeff Kirsher 			ret = 0;
522469b4b095SJeff Kirsher 	} else {
522569b4b095SJeff Kirsher 		if (flags & NV_RX2_ERROR)
522669b4b095SJeff Kirsher 			ret = 0;
522769b4b095SJeff Kirsher 	}
522869b4b095SJeff Kirsher 
522969b4b095SJeff Kirsher 	if (ret) {
523069b4b095SJeff Kirsher 		if (len != pkt_len) {
523169b4b095SJeff Kirsher 			ret = 0;
523269b4b095SJeff Kirsher 		} else {
523369b4b095SJeff Kirsher 			rx_skb = np->rx_skb[0].skb;
523469b4b095SJeff Kirsher 			for (i = 0; i < pkt_len; i++) {
523569b4b095SJeff Kirsher 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
523669b4b095SJeff Kirsher 					ret = 0;
523769b4b095SJeff Kirsher 					break;
523869b4b095SJeff Kirsher 				}
523969b4b095SJeff Kirsher 			}
524069b4b095SJeff Kirsher 		}
524169b4b095SJeff Kirsher 	}
524269b4b095SJeff Kirsher 
52437598b349SZhu Yanjun 	dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
524469b4b095SJeff Kirsher 			 (skb_end_pointer(tx_skb) - tx_skb->data),
52457598b349SZhu Yanjun 			 DMA_TO_DEVICE);
524669b4b095SJeff Kirsher 	dev_kfree_skb_any(tx_skb);
524769b4b095SJeff Kirsher  out:
524869b4b095SJeff Kirsher 	/* stop engines */
524969b4b095SJeff Kirsher 	nv_stop_rxtx(dev);
525069b4b095SJeff Kirsher 	nv_txrx_reset(dev);
525169b4b095SJeff Kirsher 	/* drain rx queue */
525269b4b095SJeff Kirsher 	nv_drain_rxtx(dev);
525369b4b095SJeff Kirsher 
525469b4b095SJeff Kirsher 	if (netif_running(dev)) {
525569b4b095SJeff Kirsher 		writel(misc1_flags, base + NvRegMisc1);
525669b4b095SJeff Kirsher 		writel(filter_flags, base + NvRegPacketFilterFlags);
525769b4b095SJeff Kirsher 		nv_enable_irq(dev);
525869b4b095SJeff Kirsher 	}
525969b4b095SJeff Kirsher 
526069b4b095SJeff Kirsher 	return ret;
526169b4b095SJeff Kirsher }
526269b4b095SJeff Kirsher 
nv_self_test(struct net_device * dev,struct ethtool_test * test,u64 * buffer)526369b4b095SJeff Kirsher static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
526469b4b095SJeff Kirsher {
526569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
526669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
526786d9be26SIvan Vecera 	int result, count;
526886d9be26SIvan Vecera 
526986d9be26SIvan Vecera 	count = nv_get_sset_count(dev, ETH_SS_TEST);
527086d9be26SIvan Vecera 	memset(buffer, 0, count * sizeof(u64));
527169b4b095SJeff Kirsher 
527269b4b095SJeff Kirsher 	if (!nv_link_test(dev)) {
527369b4b095SJeff Kirsher 		test->flags |= ETH_TEST_FL_FAILED;
527469b4b095SJeff Kirsher 		buffer[0] = 1;
527569b4b095SJeff Kirsher 	}
527669b4b095SJeff Kirsher 
527769b4b095SJeff Kirsher 	if (test->flags & ETH_TEST_FL_OFFLINE) {
527869b4b095SJeff Kirsher 		if (netif_running(dev)) {
527969b4b095SJeff Kirsher 			netif_stop_queue(dev);
528069b4b095SJeff Kirsher 			nv_napi_disable(dev);
528169b4b095SJeff Kirsher 			netif_tx_lock_bh(dev);
528269b4b095SJeff Kirsher 			netif_addr_lock(dev);
528369b4b095SJeff Kirsher 			spin_lock_irq(&np->lock);
528469b4b095SJeff Kirsher 			nv_disable_hw_interrupts(dev, np->irqmask);
528569b4b095SJeff Kirsher 			if (!(np->msi_flags & NV_MSI_X_ENABLED))
528669b4b095SJeff Kirsher 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
528769b4b095SJeff Kirsher 			else
528869b4b095SJeff Kirsher 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
528969b4b095SJeff Kirsher 			/* stop engines */
529069b4b095SJeff Kirsher 			nv_stop_rxtx(dev);
529169b4b095SJeff Kirsher 			nv_txrx_reset(dev);
529269b4b095SJeff Kirsher 			/* drain rx queue */
529369b4b095SJeff Kirsher 			nv_drain_rxtx(dev);
529469b4b095SJeff Kirsher 			spin_unlock_irq(&np->lock);
529569b4b095SJeff Kirsher 			netif_addr_unlock(dev);
529669b4b095SJeff Kirsher 			netif_tx_unlock_bh(dev);
529769b4b095SJeff Kirsher 		}
529869b4b095SJeff Kirsher 
529969b4b095SJeff Kirsher 		if (!nv_register_test(dev)) {
530069b4b095SJeff Kirsher 			test->flags |= ETH_TEST_FL_FAILED;
530169b4b095SJeff Kirsher 			buffer[1] = 1;
530269b4b095SJeff Kirsher 		}
530369b4b095SJeff Kirsher 
530469b4b095SJeff Kirsher 		result = nv_interrupt_test(dev);
530569b4b095SJeff Kirsher 		if (result != 1) {
530669b4b095SJeff Kirsher 			test->flags |= ETH_TEST_FL_FAILED;
530769b4b095SJeff Kirsher 			buffer[2] = 1;
530869b4b095SJeff Kirsher 		}
530969b4b095SJeff Kirsher 		if (result == 0) {
531069b4b095SJeff Kirsher 			/* bail out */
531169b4b095SJeff Kirsher 			return;
531269b4b095SJeff Kirsher 		}
531369b4b095SJeff Kirsher 
531486d9be26SIvan Vecera 		if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
531569b4b095SJeff Kirsher 			test->flags |= ETH_TEST_FL_FAILED;
531669b4b095SJeff Kirsher 			buffer[3] = 1;
531769b4b095SJeff Kirsher 		}
531869b4b095SJeff Kirsher 
531969b4b095SJeff Kirsher 		if (netif_running(dev)) {
532069b4b095SJeff Kirsher 			/* reinit driver view of the rx queue */
532169b4b095SJeff Kirsher 			set_bufsize(dev);
532269b4b095SJeff Kirsher 			if (nv_init_ring(dev)) {
532369b4b095SJeff Kirsher 				if (!np->in_shutdown)
532469b4b095SJeff Kirsher 					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
532569b4b095SJeff Kirsher 			}
532669b4b095SJeff Kirsher 			/* reinit nic view of the rx queue */
532769b4b095SJeff Kirsher 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
532869b4b095SJeff Kirsher 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
532969b4b095SJeff Kirsher 			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
533069b4b095SJeff Kirsher 				base + NvRegRingSizes);
533169b4b095SJeff Kirsher 			pci_push(base);
533269b4b095SJeff Kirsher 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
533369b4b095SJeff Kirsher 			pci_push(base);
533469b4b095SJeff Kirsher 			/* restart rx engine */
533569b4b095SJeff Kirsher 			nv_start_rxtx(dev);
533669b4b095SJeff Kirsher 			netif_start_queue(dev);
533769b4b095SJeff Kirsher 			nv_napi_enable(dev);
533869b4b095SJeff Kirsher 			nv_enable_hw_interrupts(dev, np->irqmask);
533969b4b095SJeff Kirsher 		}
534069b4b095SJeff Kirsher 	}
534169b4b095SJeff Kirsher }
534269b4b095SJeff Kirsher 
nv_get_strings(struct net_device * dev,u32 stringset,u8 * buffer)534369b4b095SJeff Kirsher static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
534469b4b095SJeff Kirsher {
534569b4b095SJeff Kirsher 	switch (stringset) {
534669b4b095SJeff Kirsher 	case ETH_SS_STATS:
534769b4b095SJeff Kirsher 		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
534869b4b095SJeff Kirsher 		break;
534969b4b095SJeff Kirsher 	case ETH_SS_TEST:
535069b4b095SJeff Kirsher 		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
535169b4b095SJeff Kirsher 		break;
535269b4b095SJeff Kirsher 	}
535369b4b095SJeff Kirsher }
535469b4b095SJeff Kirsher 
535569b4b095SJeff Kirsher static const struct ethtool_ops ops = {
535669b4b095SJeff Kirsher 	.get_drvinfo = nv_get_drvinfo,
535769b4b095SJeff Kirsher 	.get_link = ethtool_op_get_link,
535869b4b095SJeff Kirsher 	.get_wol = nv_get_wol,
535969b4b095SJeff Kirsher 	.set_wol = nv_set_wol,
536069b4b095SJeff Kirsher 	.get_regs_len = nv_get_regs_len,
536169b4b095SJeff Kirsher 	.get_regs = nv_get_regs,
536269b4b095SJeff Kirsher 	.nway_reset = nv_nway_reset,
536369b4b095SJeff Kirsher 	.get_ringparam = nv_get_ringparam,
536469b4b095SJeff Kirsher 	.set_ringparam = nv_set_ringparam,
536569b4b095SJeff Kirsher 	.get_pauseparam = nv_get_pauseparam,
536669b4b095SJeff Kirsher 	.set_pauseparam = nv_set_pauseparam,
536769b4b095SJeff Kirsher 	.get_strings = nv_get_strings,
536869b4b095SJeff Kirsher 	.get_ethtool_stats = nv_get_ethtool_stats,
536969b4b095SJeff Kirsher 	.get_sset_count = nv_get_sset_count,
537069b4b095SJeff Kirsher 	.self_test = nv_self_test,
53717491302dSRichard Cochran 	.get_ts_info = ethtool_op_get_ts_info,
53720fa9e289SPhilippe Reynes 	.get_link_ksettings = nv_get_link_ksettings,
53730fa9e289SPhilippe Reynes 	.set_link_ksettings = nv_set_link_ksettings,
537469b4b095SJeff Kirsher };
537569b4b095SJeff Kirsher 
537669b4b095SJeff Kirsher /* The mgmt unit and driver use a semaphore to access the phy during init */
nv_mgmt_acquire_sema(struct net_device * dev)537769b4b095SJeff Kirsher static int nv_mgmt_acquire_sema(struct net_device *dev)
537869b4b095SJeff Kirsher {
537969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
538069b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
538169b4b095SJeff Kirsher 	int i;
538269b4b095SJeff Kirsher 	u32 tx_ctrl, mgmt_sema;
538369b4b095SJeff Kirsher 
538469b4b095SJeff Kirsher 	for (i = 0; i < 10; i++) {
538569b4b095SJeff Kirsher 		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
538669b4b095SJeff Kirsher 		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
538769b4b095SJeff Kirsher 			break;
538869b4b095SJeff Kirsher 		msleep(500);
538969b4b095SJeff Kirsher 	}
539069b4b095SJeff Kirsher 
539169b4b095SJeff Kirsher 	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
539269b4b095SJeff Kirsher 		return 0;
539369b4b095SJeff Kirsher 
539469b4b095SJeff Kirsher 	for (i = 0; i < 2; i++) {
539569b4b095SJeff Kirsher 		tx_ctrl = readl(base + NvRegTransmitterControl);
539669b4b095SJeff Kirsher 		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
539769b4b095SJeff Kirsher 		writel(tx_ctrl, base + NvRegTransmitterControl);
539869b4b095SJeff Kirsher 
539969b4b095SJeff Kirsher 		/* verify that semaphore was acquired */
540069b4b095SJeff Kirsher 		tx_ctrl = readl(base + NvRegTransmitterControl);
540169b4b095SJeff Kirsher 		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
540269b4b095SJeff Kirsher 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
540369b4b095SJeff Kirsher 			np->mgmt_sema = 1;
540469b4b095SJeff Kirsher 			return 1;
540569b4b095SJeff Kirsher 		} else
540669b4b095SJeff Kirsher 			udelay(50);
540769b4b095SJeff Kirsher 	}
540869b4b095SJeff Kirsher 
540969b4b095SJeff Kirsher 	return 0;
541069b4b095SJeff Kirsher }
541169b4b095SJeff Kirsher 
nv_mgmt_release_sema(struct net_device * dev)541269b4b095SJeff Kirsher static void nv_mgmt_release_sema(struct net_device *dev)
541369b4b095SJeff Kirsher {
541469b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
541569b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
541669b4b095SJeff Kirsher 	u32 tx_ctrl;
541769b4b095SJeff Kirsher 
541869b4b095SJeff Kirsher 	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
541969b4b095SJeff Kirsher 		if (np->mgmt_sema) {
542069b4b095SJeff Kirsher 			tx_ctrl = readl(base + NvRegTransmitterControl);
542169b4b095SJeff Kirsher 			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
542269b4b095SJeff Kirsher 			writel(tx_ctrl, base + NvRegTransmitterControl);
542369b4b095SJeff Kirsher 		}
542469b4b095SJeff Kirsher 	}
542569b4b095SJeff Kirsher }
542669b4b095SJeff Kirsher 
542769b4b095SJeff Kirsher 
nv_mgmt_get_version(struct net_device * dev)542869b4b095SJeff Kirsher static int nv_mgmt_get_version(struct net_device *dev)
542969b4b095SJeff Kirsher {
543069b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
543169b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
543269b4b095SJeff Kirsher 	u32 data_ready = readl(base + NvRegTransmitterControl);
543369b4b095SJeff Kirsher 	u32 data_ready2 = 0;
543469b4b095SJeff Kirsher 	unsigned long start;
543569b4b095SJeff Kirsher 	int ready = 0;
543669b4b095SJeff Kirsher 
543769b4b095SJeff Kirsher 	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
543869b4b095SJeff Kirsher 	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
543969b4b095SJeff Kirsher 	start = jiffies;
544069b4b095SJeff Kirsher 	while (time_before(jiffies, start + 5*HZ)) {
544169b4b095SJeff Kirsher 		data_ready2 = readl(base + NvRegTransmitterControl);
544269b4b095SJeff Kirsher 		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
544369b4b095SJeff Kirsher 			ready = 1;
544469b4b095SJeff Kirsher 			break;
544569b4b095SJeff Kirsher 		}
544669b4b095SJeff Kirsher 		schedule_timeout_uninterruptible(1);
544769b4b095SJeff Kirsher 	}
544869b4b095SJeff Kirsher 
544969b4b095SJeff Kirsher 	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
545069b4b095SJeff Kirsher 		return 0;
545169b4b095SJeff Kirsher 
545269b4b095SJeff Kirsher 	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
545369b4b095SJeff Kirsher 
545469b4b095SJeff Kirsher 	return 1;
545569b4b095SJeff Kirsher }
545669b4b095SJeff Kirsher 
nv_open(struct net_device * dev)545769b4b095SJeff Kirsher static int nv_open(struct net_device *dev)
545869b4b095SJeff Kirsher {
545969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
546069b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
546169b4b095SJeff Kirsher 	int ret = 1;
546269b4b095SJeff Kirsher 	int oom, i;
546369b4b095SJeff Kirsher 	u32 low;
546469b4b095SJeff Kirsher 
546569b4b095SJeff Kirsher 	/* power up phy */
546669b4b095SJeff Kirsher 	mii_rw(dev, np->phyaddr, MII_BMCR,
546769b4b095SJeff Kirsher 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
546869b4b095SJeff Kirsher 
546969b4b095SJeff Kirsher 	nv_txrx_gate(dev, false);
547069b4b095SJeff Kirsher 	/* erase previous misconfiguration */
547169b4b095SJeff Kirsher 	if (np->driver_data & DEV_HAS_POWER_CNTRL)
547269b4b095SJeff Kirsher 		nv_mac_reset(dev);
547369b4b095SJeff Kirsher 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
547469b4b095SJeff Kirsher 	writel(0, base + NvRegMulticastAddrB);
547569b4b095SJeff Kirsher 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
547669b4b095SJeff Kirsher 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
547769b4b095SJeff Kirsher 	writel(0, base + NvRegPacketFilterFlags);
547869b4b095SJeff Kirsher 
547969b4b095SJeff Kirsher 	writel(0, base + NvRegTransmitterControl);
548069b4b095SJeff Kirsher 	writel(0, base + NvRegReceiverControl);
548169b4b095SJeff Kirsher 
548269b4b095SJeff Kirsher 	writel(0, base + NvRegAdapterControl);
548369b4b095SJeff Kirsher 
548469b4b095SJeff Kirsher 	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
548569b4b095SJeff Kirsher 		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
548669b4b095SJeff Kirsher 
548769b4b095SJeff Kirsher 	/* initialize descriptor rings */
548869b4b095SJeff Kirsher 	set_bufsize(dev);
548969b4b095SJeff Kirsher 	oom = nv_init_ring(dev);
549069b4b095SJeff Kirsher 
549169b4b095SJeff Kirsher 	writel(0, base + NvRegLinkSpeed);
549269b4b095SJeff Kirsher 	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
549369b4b095SJeff Kirsher 	nv_txrx_reset(dev);
549469b4b095SJeff Kirsher 	writel(0, base + NvRegUnknownSetupReg6);
549569b4b095SJeff Kirsher 
549669b4b095SJeff Kirsher 	np->in_shutdown = 0;
549769b4b095SJeff Kirsher 
549869b4b095SJeff Kirsher 	/* give hw rings */
549969b4b095SJeff Kirsher 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
550069b4b095SJeff Kirsher 	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
550169b4b095SJeff Kirsher 		base + NvRegRingSizes);
550269b4b095SJeff Kirsher 
550369b4b095SJeff Kirsher 	writel(np->linkspeed, base + NvRegLinkSpeed);
550469b4b095SJeff Kirsher 	if (np->desc_ver == DESC_VER_1)
550569b4b095SJeff Kirsher 		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
550669b4b095SJeff Kirsher 	else
550769b4b095SJeff Kirsher 		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
550869b4b095SJeff Kirsher 	writel(np->txrxctl_bits, base + NvRegTxRxControl);
550969b4b095SJeff Kirsher 	writel(np->vlanctl_bits, base + NvRegVlanControl);
551069b4b095SJeff Kirsher 	pci_push(base);
551169b4b095SJeff Kirsher 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
551269b4b095SJeff Kirsher 	if (reg_delay(dev, NvRegUnknownSetupReg5,
551369b4b095SJeff Kirsher 		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
551469b4b095SJeff Kirsher 		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
551569b4b095SJeff Kirsher 		netdev_info(dev,
551669b4b095SJeff Kirsher 			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
551769b4b095SJeff Kirsher 
551869b4b095SJeff Kirsher 	writel(0, base + NvRegMIIMask);
551969b4b095SJeff Kirsher 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
552069b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
552169b4b095SJeff Kirsher 
552269b4b095SJeff Kirsher 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
552369b4b095SJeff Kirsher 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
552469b4b095SJeff Kirsher 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
552569b4b095SJeff Kirsher 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
552669b4b095SJeff Kirsher 
552769b4b095SJeff Kirsher 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
552869b4b095SJeff Kirsher 
552969b4b095SJeff Kirsher 	get_random_bytes(&low, sizeof(low));
553069b4b095SJeff Kirsher 	low &= NVREG_SLOTTIME_MASK;
553169b4b095SJeff Kirsher 	if (np->desc_ver == DESC_VER_1) {
553269b4b095SJeff Kirsher 		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
553369b4b095SJeff Kirsher 	} else {
553469b4b095SJeff Kirsher 		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
553569b4b095SJeff Kirsher 			/* setup legacy backoff */
553669b4b095SJeff Kirsher 			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
553769b4b095SJeff Kirsher 		} else {
553869b4b095SJeff Kirsher 			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
553969b4b095SJeff Kirsher 			nv_gear_backoff_reseed(dev);
554069b4b095SJeff Kirsher 		}
554169b4b095SJeff Kirsher 	}
554269b4b095SJeff Kirsher 	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
554369b4b095SJeff Kirsher 	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
554469b4b095SJeff Kirsher 	if (poll_interval == -1) {
554569b4b095SJeff Kirsher 		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
554669b4b095SJeff Kirsher 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
554769b4b095SJeff Kirsher 		else
554869b4b095SJeff Kirsher 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
554969b4b095SJeff Kirsher 	} else
555069b4b095SJeff Kirsher 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
555169b4b095SJeff Kirsher 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
555269b4b095SJeff Kirsher 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
555369b4b095SJeff Kirsher 			base + NvRegAdapterControl);
555469b4b095SJeff Kirsher 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
555569b4b095SJeff Kirsher 	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
555669b4b095SJeff Kirsher 	if (np->wolenabled)
555769b4b095SJeff Kirsher 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
555869b4b095SJeff Kirsher 
555969b4b095SJeff Kirsher 	i = readl(base + NvRegPowerState);
556069b4b095SJeff Kirsher 	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
556169b4b095SJeff Kirsher 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
556269b4b095SJeff Kirsher 
556369b4b095SJeff Kirsher 	pci_push(base);
556469b4b095SJeff Kirsher 	udelay(10);
556569b4b095SJeff Kirsher 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
556669b4b095SJeff Kirsher 
556769b4b095SJeff Kirsher 	nv_disable_hw_interrupts(dev, np->irqmask);
556869b4b095SJeff Kirsher 	pci_push(base);
556969b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
557069b4b095SJeff Kirsher 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
557169b4b095SJeff Kirsher 	pci_push(base);
557269b4b095SJeff Kirsher 
557369b4b095SJeff Kirsher 	if (nv_request_irq(dev, 0))
557469b4b095SJeff Kirsher 		goto out_drain;
557569b4b095SJeff Kirsher 
557669b4b095SJeff Kirsher 	/* ask for interrupts */
557769b4b095SJeff Kirsher 	nv_enable_hw_interrupts(dev, np->irqmask);
557869b4b095SJeff Kirsher 
557969b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
558069b4b095SJeff Kirsher 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
558169b4b095SJeff Kirsher 	writel(0, base + NvRegMulticastAddrB);
558269b4b095SJeff Kirsher 	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
558369b4b095SJeff Kirsher 	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
558469b4b095SJeff Kirsher 	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
558569b4b095SJeff Kirsher 	/* One manual link speed update: Interrupts are enabled, future link
558669b4b095SJeff Kirsher 	 * speed changes cause interrupts and are handled by nv_link_irq().
558769b4b095SJeff Kirsher 	 */
55881da847b9SZhu Yanjun 	readl(base + NvRegMIIStatus);
558969b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
55901da847b9SZhu Yanjun 
559169b4b095SJeff Kirsher 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
559269b4b095SJeff Kirsher 	 * to init hw */
559369b4b095SJeff Kirsher 	np->linkspeed = 0;
559469b4b095SJeff Kirsher 	ret = nv_update_linkspeed(dev);
559569b4b095SJeff Kirsher 	nv_start_rxtx(dev);
559669b4b095SJeff Kirsher 	netif_start_queue(dev);
559769b4b095SJeff Kirsher 	nv_napi_enable(dev);
559869b4b095SJeff Kirsher 
559969b4b095SJeff Kirsher 	if (ret) {
560069b4b095SJeff Kirsher 		netif_carrier_on(dev);
560169b4b095SJeff Kirsher 	} else {
560269b4b095SJeff Kirsher 		netdev_info(dev, "no link during initialization\n");
560369b4b095SJeff Kirsher 		netif_carrier_off(dev);
560469b4b095SJeff Kirsher 	}
560569b4b095SJeff Kirsher 	if (oom)
560669b4b095SJeff Kirsher 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
560769b4b095SJeff Kirsher 
560869b4b095SJeff Kirsher 	/* start statistics timer */
560969b4b095SJeff Kirsher 	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
561069b4b095SJeff Kirsher 		mod_timer(&np->stats_poll,
561169b4b095SJeff Kirsher 			round_jiffies(jiffies + STATS_INTERVAL));
561269b4b095SJeff Kirsher 
561369b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
561469b4b095SJeff Kirsher 
5615e19df76aSSanjay Hortikar 	/* If the loopback feature was set while the device was down, make sure
5616e19df76aSSanjay Hortikar 	 * that it's set correctly now.
5617e19df76aSSanjay Hortikar 	 */
5618e19df76aSSanjay Hortikar 	if (dev->features & NETIF_F_LOOPBACK)
5619e19df76aSSanjay Hortikar 		nv_set_loopback(dev, dev->features);
5620e19df76aSSanjay Hortikar 
562169b4b095SJeff Kirsher 	return 0;
562269b4b095SJeff Kirsher out_drain:
562369b4b095SJeff Kirsher 	nv_drain_rxtx(dev);
562469b4b095SJeff Kirsher 	return ret;
562569b4b095SJeff Kirsher }
562669b4b095SJeff Kirsher 
nv_close(struct net_device * dev)562769b4b095SJeff Kirsher static int nv_close(struct net_device *dev)
562869b4b095SJeff Kirsher {
562969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
563069b4b095SJeff Kirsher 	u8 __iomem *base;
563169b4b095SJeff Kirsher 
563269b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
563369b4b095SJeff Kirsher 	np->in_shutdown = 1;
563469b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
563569b4b095SJeff Kirsher 	nv_napi_disable(dev);
563669b4b095SJeff Kirsher 	synchronize_irq(np->pci_dev->irq);
563769b4b095SJeff Kirsher 
563869b4b095SJeff Kirsher 	del_timer_sync(&np->oom_kick);
563969b4b095SJeff Kirsher 	del_timer_sync(&np->nic_poll);
564069b4b095SJeff Kirsher 	del_timer_sync(&np->stats_poll);
564169b4b095SJeff Kirsher 
564269b4b095SJeff Kirsher 	netif_stop_queue(dev);
564369b4b095SJeff Kirsher 	spin_lock_irq(&np->lock);
56441ff39eb6Sdavid decotigny 	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
564569b4b095SJeff Kirsher 	nv_stop_rxtx(dev);
564669b4b095SJeff Kirsher 	nv_txrx_reset(dev);
564769b4b095SJeff Kirsher 
564869b4b095SJeff Kirsher 	/* disable interrupts on the nic or we will lock up */
564969b4b095SJeff Kirsher 	base = get_hwbase(dev);
565069b4b095SJeff Kirsher 	nv_disable_hw_interrupts(dev, np->irqmask);
565169b4b095SJeff Kirsher 	pci_push(base);
565269b4b095SJeff Kirsher 
565369b4b095SJeff Kirsher 	spin_unlock_irq(&np->lock);
565469b4b095SJeff Kirsher 
565569b4b095SJeff Kirsher 	nv_free_irq(dev);
565669b4b095SJeff Kirsher 
565769b4b095SJeff Kirsher 	nv_drain_rxtx(dev);
565869b4b095SJeff Kirsher 
565969b4b095SJeff Kirsher 	if (np->wolenabled || !phy_power_down) {
566069b4b095SJeff Kirsher 		nv_txrx_gate(dev, false);
566169b4b095SJeff Kirsher 		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
566269b4b095SJeff Kirsher 		nv_start_rx(dev);
566369b4b095SJeff Kirsher 	} else {
566469b4b095SJeff Kirsher 		/* power down phy */
566569b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_BMCR,
566669b4b095SJeff Kirsher 		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
566769b4b095SJeff Kirsher 		nv_txrx_gate(dev, true);
566869b4b095SJeff Kirsher 	}
566969b4b095SJeff Kirsher 
567069b4b095SJeff Kirsher 	/* FIXME: power down nic */
567169b4b095SJeff Kirsher 
567269b4b095SJeff Kirsher 	return 0;
567369b4b095SJeff Kirsher }
567469b4b095SJeff Kirsher 
567569b4b095SJeff Kirsher static const struct net_device_ops nv_netdev_ops = {
567669b4b095SJeff Kirsher 	.ndo_open		= nv_open,
567769b4b095SJeff Kirsher 	.ndo_stop		= nv_close,
5678f5d827aeSdavid decotigny 	.ndo_get_stats64	= nv_get_stats64,
567969b4b095SJeff Kirsher 	.ndo_start_xmit		= nv_start_xmit,
568069b4b095SJeff Kirsher 	.ndo_tx_timeout		= nv_tx_timeout,
568169b4b095SJeff Kirsher 	.ndo_change_mtu		= nv_change_mtu,
568269b4b095SJeff Kirsher 	.ndo_fix_features	= nv_fix_features,
568369b4b095SJeff Kirsher 	.ndo_set_features	= nv_set_features,
568469b4b095SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
568569b4b095SJeff Kirsher 	.ndo_set_mac_address	= nv_set_mac_address,
5686afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= nv_set_multicast,
568769b4b095SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
568869b4b095SJeff Kirsher 	.ndo_poll_controller	= nv_poll_controller,
568969b4b095SJeff Kirsher #endif
569069b4b095SJeff Kirsher };
569169b4b095SJeff Kirsher 
569269b4b095SJeff Kirsher static const struct net_device_ops nv_netdev_ops_optimized = {
569369b4b095SJeff Kirsher 	.ndo_open		= nv_open,
569469b4b095SJeff Kirsher 	.ndo_stop		= nv_close,
5695f5d827aeSdavid decotigny 	.ndo_get_stats64	= nv_get_stats64,
569669b4b095SJeff Kirsher 	.ndo_start_xmit		= nv_start_xmit_optimized,
569769b4b095SJeff Kirsher 	.ndo_tx_timeout		= nv_tx_timeout,
569869b4b095SJeff Kirsher 	.ndo_change_mtu		= nv_change_mtu,
569969b4b095SJeff Kirsher 	.ndo_fix_features	= nv_fix_features,
570069b4b095SJeff Kirsher 	.ndo_set_features	= nv_set_features,
570169b4b095SJeff Kirsher 	.ndo_validate_addr	= eth_validate_addr,
570269b4b095SJeff Kirsher 	.ndo_set_mac_address	= nv_set_mac_address,
5703afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= nv_set_multicast,
570469b4b095SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
570569b4b095SJeff Kirsher 	.ndo_poll_controller	= nv_poll_controller,
570669b4b095SJeff Kirsher #endif
570769b4b095SJeff Kirsher };
570869b4b095SJeff Kirsher 
nv_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)5709d05919a1SBill Pemberton static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
571069b4b095SJeff Kirsher {
571169b4b095SJeff Kirsher 	struct net_device *dev;
571269b4b095SJeff Kirsher 	struct fe_priv *np;
571369b4b095SJeff Kirsher 	unsigned long addr;
571469b4b095SJeff Kirsher 	u8 __iomem *base;
571569b4b095SJeff Kirsher 	int err, i;
571669b4b095SJeff Kirsher 	u32 powerstate, txreg;
571769b4b095SJeff Kirsher 	u32 phystate_orig = 0, phystate;
571869b4b095SJeff Kirsher 	int phyinitialized = 0;
571969b4b095SJeff Kirsher 	static int printed_version;
57202b373670SJakub Kicinski 	u8 mac[ETH_ALEN];
572169b4b095SJeff Kirsher 
572269b4b095SJeff Kirsher 	if (!printed_version++)
572369b4b095SJeff Kirsher 		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
572469b4b095SJeff Kirsher 			FORCEDETH_VERSION);
572569b4b095SJeff Kirsher 
572669b4b095SJeff Kirsher 	dev = alloc_etherdev(sizeof(struct fe_priv));
572769b4b095SJeff Kirsher 	err = -ENOMEM;
572869b4b095SJeff Kirsher 	if (!dev)
572969b4b095SJeff Kirsher 		goto out;
573069b4b095SJeff Kirsher 
573169b4b095SJeff Kirsher 	np = netdev_priv(dev);
573269b4b095SJeff Kirsher 	np->dev = dev;
573369b4b095SJeff Kirsher 	np->pci_dev = pci_dev;
573469b4b095SJeff Kirsher 	spin_lock_init(&np->lock);
5735f5d827aeSdavid decotigny 	spin_lock_init(&np->hwstats_lock);
573669b4b095SJeff Kirsher 	SET_NETDEV_DEV(dev, &pci_dev->dev);
5737827da44cSJohn Stultz 	u64_stats_init(&np->swstats_rx_syncp);
5738827da44cSJohn Stultz 	u64_stats_init(&np->swstats_tx_syncp);
5739f4b633b9SZhu Yanjun 	np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5740f4b633b9SZhu Yanjun 	if (!np->txrx_stats) {
5741f4b633b9SZhu Yanjun 		pr_err("np->txrx_stats, alloc memory error.\n");
5742f4b633b9SZhu Yanjun 		err = -ENOMEM;
5743f4b633b9SZhu Yanjun 		goto out_alloc_percpu;
5744f4b633b9SZhu Yanjun 	}
574569b4b095SJeff Kirsher 
5746d9935679SKees Cook 	timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5747d9935679SKees Cook 	timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
5748d9935679SKees Cook 	timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
574969b4b095SJeff Kirsher 
575069b4b095SJeff Kirsher 	err = pci_enable_device(pci_dev);
575169b4b095SJeff Kirsher 	if (err)
575269b4b095SJeff Kirsher 		goto out_free;
575369b4b095SJeff Kirsher 
575469b4b095SJeff Kirsher 	pci_set_master(pci_dev);
575569b4b095SJeff Kirsher 
575669b4b095SJeff Kirsher 	err = pci_request_regions(pci_dev, DRV_NAME);
575769b4b095SJeff Kirsher 	if (err < 0)
575869b4b095SJeff Kirsher 		goto out_disable;
575969b4b095SJeff Kirsher 
576069b4b095SJeff Kirsher 	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
576169b4b095SJeff Kirsher 		np->register_size = NV_PCI_REGSZ_VER3;
576269b4b095SJeff Kirsher 	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
576369b4b095SJeff Kirsher 		np->register_size = NV_PCI_REGSZ_VER2;
576469b4b095SJeff Kirsher 	else
576569b4b095SJeff Kirsher 		np->register_size = NV_PCI_REGSZ_VER1;
576669b4b095SJeff Kirsher 
576769b4b095SJeff Kirsher 	err = -EINVAL;
576869b4b095SJeff Kirsher 	addr = 0;
576969b4b095SJeff Kirsher 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
577069b4b095SJeff Kirsher 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
577169b4b095SJeff Kirsher 				pci_resource_len(pci_dev, i) >= np->register_size) {
577269b4b095SJeff Kirsher 			addr = pci_resource_start(pci_dev, i);
577369b4b095SJeff Kirsher 			break;
577469b4b095SJeff Kirsher 		}
577569b4b095SJeff Kirsher 	}
577669b4b095SJeff Kirsher 	if (i == DEVICE_COUNT_RESOURCE) {
577769b4b095SJeff Kirsher 		dev_info(&pci_dev->dev, "Couldn't find register window\n");
577869b4b095SJeff Kirsher 		goto out_relreg;
577969b4b095SJeff Kirsher 	}
578069b4b095SJeff Kirsher 
578169b4b095SJeff Kirsher 	/* copy of driver data */
578269b4b095SJeff Kirsher 	np->driver_data = id->driver_data;
578369b4b095SJeff Kirsher 	/* copy of device id */
578469b4b095SJeff Kirsher 	np->device_id = id->device;
578569b4b095SJeff Kirsher 
578669b4b095SJeff Kirsher 	/* handle different descriptor versions */
578769b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
578869b4b095SJeff Kirsher 		/* packet format 3: supports 40-bit addressing */
578969b4b095SJeff Kirsher 		np->desc_ver = DESC_VER_3;
579069b4b095SJeff Kirsher 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
579169b4b095SJeff Kirsher 		if (dma_64bit) {
5792e5c88bc9SChristophe JAILLET 			if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39)))
579369b4b095SJeff Kirsher 				dev_info(&pci_dev->dev,
579469b4b095SJeff Kirsher 					 "64-bit DMA failed, using 32-bit addressing\n");
579569b4b095SJeff Kirsher 			else
579669b4b095SJeff Kirsher 				dev->features |= NETIF_F_HIGHDMA;
579769b4b095SJeff Kirsher 		}
579869b4b095SJeff Kirsher 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
579969b4b095SJeff Kirsher 		/* packet format 2: supports jumbo frames */
580069b4b095SJeff Kirsher 		np->desc_ver = DESC_VER_2;
580169b4b095SJeff Kirsher 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
580269b4b095SJeff Kirsher 	} else {
580369b4b095SJeff Kirsher 		/* original packet format */
580469b4b095SJeff Kirsher 		np->desc_ver = DESC_VER_1;
580569b4b095SJeff Kirsher 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
580669b4b095SJeff Kirsher 	}
580769b4b095SJeff Kirsher 
580869b4b095SJeff Kirsher 	np->pkt_limit = NV_PKTLIMIT_1;
580969b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_LARGEDESC)
581069b4b095SJeff Kirsher 		np->pkt_limit = NV_PKTLIMIT_2;
581169b4b095SJeff Kirsher 
581269b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_CHECKSUM) {
581369b4b095SJeff Kirsher 		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
581469b4b095SJeff Kirsher 		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
581569b4b095SJeff Kirsher 			NETIF_F_TSO | NETIF_F_RXCSUM;
581669b4b095SJeff Kirsher 	}
581769b4b095SJeff Kirsher 
581869b4b095SJeff Kirsher 	np->vlanctl_bits = 0;
581969b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_VLAN) {
582069b4b095SJeff Kirsher 		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5821f646968fSPatrick McHardy 		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5822f646968fSPatrick McHardy 				    NETIF_F_HW_VLAN_CTAG_TX;
582369b4b095SJeff Kirsher 	}
582469b4b095SJeff Kirsher 
582569b4b095SJeff Kirsher 	dev->features |= dev->hw_features;
582669b4b095SJeff Kirsher 
5827e19df76aSSanjay Hortikar 	/* Add loopback capability to the device. */
5828e19df76aSSanjay Hortikar 	dev->hw_features |= NETIF_F_LOOPBACK;
5829e19df76aSSanjay Hortikar 
583044770e11SJarod Wilson 	/* MTU range: 64 - 1500 or 9100 */
583144770e11SJarod Wilson 	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
583244770e11SJarod Wilson 	dev->max_mtu = np->pkt_limit;
583344770e11SJarod Wilson 
583469b4b095SJeff Kirsher 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
583569b4b095SJeff Kirsher 	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
583669b4b095SJeff Kirsher 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
583769b4b095SJeff Kirsher 	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
583869b4b095SJeff Kirsher 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
583969b4b095SJeff Kirsher 	}
584069b4b095SJeff Kirsher 
584169b4b095SJeff Kirsher 	err = -ENOMEM;
584269b4b095SJeff Kirsher 	np->base = ioremap(addr, np->register_size);
584369b4b095SJeff Kirsher 	if (!np->base)
584469b4b095SJeff Kirsher 		goto out_relreg;
584569b4b095SJeff Kirsher 
584669b4b095SJeff Kirsher 	np->rx_ring_size = RX_RING_DEFAULT;
584769b4b095SJeff Kirsher 	np->tx_ring_size = TX_RING_DEFAULT;
584869b4b095SJeff Kirsher 
584969b4b095SJeff Kirsher 	if (!nv_optimized(np)) {
5850e8992e40SZhu Yanjun 		np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
5851e8992e40SZhu Yanjun 						      sizeof(struct ring_desc) *
5852e8992e40SZhu Yanjun 						      (np->rx_ring_size +
5853e8992e40SZhu Yanjun 						      np->tx_ring_size),
5854e8992e40SZhu Yanjun 						      &np->ring_addr,
58556ae5cbc4SJia-Ju Bai 						      GFP_KERNEL);
585669b4b095SJeff Kirsher 		if (!np->rx_ring.orig)
585769b4b095SJeff Kirsher 			goto out_unmap;
585869b4b095SJeff Kirsher 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
585969b4b095SJeff Kirsher 	} else {
5860e8992e40SZhu Yanjun 		np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
5861e8992e40SZhu Yanjun 						    sizeof(struct ring_desc_ex) *
5862e8992e40SZhu Yanjun 						    (np->rx_ring_size +
5863e8992e40SZhu Yanjun 						    np->tx_ring_size),
58646ae5cbc4SJia-Ju Bai 						    &np->ring_addr, GFP_KERNEL);
586569b4b095SJeff Kirsher 		if (!np->rx_ring.ex)
586669b4b095SJeff Kirsher 			goto out_unmap;
586769b4b095SJeff Kirsher 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
586869b4b095SJeff Kirsher 	}
586969b4b095SJeff Kirsher 	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
587069b4b095SJeff Kirsher 	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
587169b4b095SJeff Kirsher 	if (!np->rx_skb || !np->tx_skb)
587269b4b095SJeff Kirsher 		goto out_freering;
587369b4b095SJeff Kirsher 
587469b4b095SJeff Kirsher 	if (!nv_optimized(np))
587569b4b095SJeff Kirsher 		dev->netdev_ops = &nv_netdev_ops;
587669b4b095SJeff Kirsher 	else
587769b4b095SJeff Kirsher 		dev->netdev_ops = &nv_netdev_ops_optimized;
587869b4b095SJeff Kirsher 
5879b48b89f9SJakub Kicinski 	netif_napi_add(dev, &np->napi, nv_napi_poll);
58807ad24ea4SWilfried Klaebe 	dev->ethtool_ops = &ops;
588169b4b095SJeff Kirsher 	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
588269b4b095SJeff Kirsher 
588369b4b095SJeff Kirsher 	pci_set_drvdata(pci_dev, dev);
588469b4b095SJeff Kirsher 
588569b4b095SJeff Kirsher 	/* read the mac address */
588669b4b095SJeff Kirsher 	base = get_hwbase(dev);
588769b4b095SJeff Kirsher 	np->orig_mac[0] = readl(base + NvRegMacAddrA);
588869b4b095SJeff Kirsher 	np->orig_mac[1] = readl(base + NvRegMacAddrB);
588969b4b095SJeff Kirsher 
589069b4b095SJeff Kirsher 	/* check the workaround bit for correct mac address order */
589169b4b095SJeff Kirsher 	txreg = readl(base + NvRegTransmitPoll);
589269b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
589369b4b095SJeff Kirsher 		/* mac address is already in correct order */
58942b373670SJakub Kicinski 		mac[0] = (np->orig_mac[0] >>  0) & 0xff;
58952b373670SJakub Kicinski 		mac[1] = (np->orig_mac[0] >>  8) & 0xff;
58962b373670SJakub Kicinski 		mac[2] = (np->orig_mac[0] >> 16) & 0xff;
58972b373670SJakub Kicinski 		mac[3] = (np->orig_mac[0] >> 24) & 0xff;
58982b373670SJakub Kicinski 		mac[4] = (np->orig_mac[1] >>  0) & 0xff;
58992b373670SJakub Kicinski 		mac[5] = (np->orig_mac[1] >>  8) & 0xff;
590069b4b095SJeff Kirsher 	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
590169b4b095SJeff Kirsher 		/* mac address is already in correct order */
59022b373670SJakub Kicinski 		mac[0] = (np->orig_mac[0] >>  0) & 0xff;
59032b373670SJakub Kicinski 		mac[1] = (np->orig_mac[0] >>  8) & 0xff;
59042b373670SJakub Kicinski 		mac[2] = (np->orig_mac[0] >> 16) & 0xff;
59052b373670SJakub Kicinski 		mac[3] = (np->orig_mac[0] >> 24) & 0xff;
59062b373670SJakub Kicinski 		mac[4] = (np->orig_mac[1] >>  0) & 0xff;
59072b373670SJakub Kicinski 		mac[5] = (np->orig_mac[1] >>  8) & 0xff;
590869b4b095SJeff Kirsher 		/*
590969b4b095SJeff Kirsher 		 * Set orig mac address back to the reversed version.
591069b4b095SJeff Kirsher 		 * This flag will be cleared during low power transition.
591169b4b095SJeff Kirsher 		 * Therefore, we should always put back the reversed address.
591269b4b095SJeff Kirsher 		 */
59132b373670SJakub Kicinski 		np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
59142b373670SJakub Kicinski 			(mac[3] << 16) + (mac[2] << 24);
59152b373670SJakub Kicinski 		np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
591669b4b095SJeff Kirsher 	} else {
591769b4b095SJeff Kirsher 		/* need to reverse mac address to correct order */
59182b373670SJakub Kicinski 		mac[0] = (np->orig_mac[1] >>  8) & 0xff;
59192b373670SJakub Kicinski 		mac[1] = (np->orig_mac[1] >>  0) & 0xff;
59202b373670SJakub Kicinski 		mac[2] = (np->orig_mac[0] >> 24) & 0xff;
59212b373670SJakub Kicinski 		mac[3] = (np->orig_mac[0] >> 16) & 0xff;
59222b373670SJakub Kicinski 		mac[4] = (np->orig_mac[0] >>  8) & 0xff;
59232b373670SJakub Kicinski 		mac[5] = (np->orig_mac[0] >>  0) & 0xff;
592469b4b095SJeff Kirsher 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
592569b4b095SJeff Kirsher 		dev_dbg(&pci_dev->dev,
592669b4b095SJeff Kirsher 			"%s: set workaround bit for reversed mac addr\n",
592769b4b095SJeff Kirsher 			__func__);
592869b4b095SJeff Kirsher 	}
592969b4b095SJeff Kirsher 
59302b373670SJakub Kicinski 	if (is_valid_ether_addr(mac)) {
59312b373670SJakub Kicinski 		eth_hw_addr_set(dev, mac);
59322b373670SJakub Kicinski 	} else {
593369b4b095SJeff Kirsher 		/*
593469b4b095SJeff Kirsher 		 * Bad mac address. At least one bios sets the mac address
593569b4b095SJeff Kirsher 		 * to 01:23:45:67:89:ab
593669b4b095SJeff Kirsher 		 */
593769b4b095SJeff Kirsher 		dev_err(&pci_dev->dev,
593869b4b095SJeff Kirsher 			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
59392b373670SJakub Kicinski 			mac);
59407ce5d222SDanny Kukawka 		eth_hw_addr_random(dev);
594169b4b095SJeff Kirsher 		dev_err(&pci_dev->dev,
594269b4b095SJeff Kirsher 			"Using random MAC address: %pM\n", dev->dev_addr);
594369b4b095SJeff Kirsher 	}
594469b4b095SJeff Kirsher 
594569b4b095SJeff Kirsher 	/* set mac address */
594669b4b095SJeff Kirsher 	nv_copy_mac_to_hw(dev);
594769b4b095SJeff Kirsher 
594869b4b095SJeff Kirsher 	/* disable WOL */
594969b4b095SJeff Kirsher 	writel(0, base + NvRegWakeUpFlags);
595069b4b095SJeff Kirsher 	np->wolenabled = 0;
595169b4b095SJeff Kirsher 	device_set_wakeup_enable(&pci_dev->dev, false);
595269b4b095SJeff Kirsher 
595369b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
595469b4b095SJeff Kirsher 
595569b4b095SJeff Kirsher 		/* take phy and nic out of low power mode */
595669b4b095SJeff Kirsher 		powerstate = readl(base + NvRegPowerState2);
595769b4b095SJeff Kirsher 		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
595869b4b095SJeff Kirsher 		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
595969b4b095SJeff Kirsher 		    pci_dev->revision >= 0xA3)
596069b4b095SJeff Kirsher 			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
596169b4b095SJeff Kirsher 		writel(powerstate, base + NvRegPowerState2);
596269b4b095SJeff Kirsher 	}
596369b4b095SJeff Kirsher 
596469b4b095SJeff Kirsher 	if (np->desc_ver == DESC_VER_1)
596569b4b095SJeff Kirsher 		np->tx_flags = NV_TX_VALID;
596669b4b095SJeff Kirsher 	else
596769b4b095SJeff Kirsher 		np->tx_flags = NV_TX2_VALID;
596869b4b095SJeff Kirsher 
596969b4b095SJeff Kirsher 	np->msi_flags = 0;
597069b4b095SJeff Kirsher 	if ((id->driver_data & DEV_HAS_MSI) && msi)
597169b4b095SJeff Kirsher 		np->msi_flags |= NV_MSI_CAPABLE;
597269b4b095SJeff Kirsher 
597369b4b095SJeff Kirsher 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
597469b4b095SJeff Kirsher 		/* msix has had reported issues when modifying irqmask
597569b4b095SJeff Kirsher 		   as in the case of napi, therefore, disable for now
597669b4b095SJeff Kirsher 		*/
597769b4b095SJeff Kirsher #if 0
597869b4b095SJeff Kirsher 		np->msi_flags |= NV_MSI_X_CAPABLE;
597969b4b095SJeff Kirsher #endif
598069b4b095SJeff Kirsher 	}
598169b4b095SJeff Kirsher 
598269b4b095SJeff Kirsher 	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
598369b4b095SJeff Kirsher 		np->irqmask = NVREG_IRQMASK_CPU;
598469b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
598569b4b095SJeff Kirsher 			np->msi_flags |= 0x0001;
598669b4b095SJeff Kirsher 	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
598769b4b095SJeff Kirsher 		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
598869b4b095SJeff Kirsher 		/* start off in throughput mode */
598969b4b095SJeff Kirsher 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
599069b4b095SJeff Kirsher 		/* remove support for msix mode */
599169b4b095SJeff Kirsher 		np->msi_flags &= ~NV_MSI_X_CAPABLE;
599269b4b095SJeff Kirsher 	} else {
599369b4b095SJeff Kirsher 		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
599469b4b095SJeff Kirsher 		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
599569b4b095SJeff Kirsher 		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
599669b4b095SJeff Kirsher 			np->msi_flags |= 0x0003;
599769b4b095SJeff Kirsher 	}
599869b4b095SJeff Kirsher 
599969b4b095SJeff Kirsher 	if (id->driver_data & DEV_NEED_TIMERIRQ)
600069b4b095SJeff Kirsher 		np->irqmask |= NVREG_IRQ_TIMER;
600169b4b095SJeff Kirsher 	if (id->driver_data & DEV_NEED_LINKTIMER) {
600269b4b095SJeff Kirsher 		np->need_linktimer = 1;
600369b4b095SJeff Kirsher 		np->link_timeout = jiffies + LINK_TIMEOUT;
600469b4b095SJeff Kirsher 	} else {
600569b4b095SJeff Kirsher 		np->need_linktimer = 0;
600669b4b095SJeff Kirsher 	}
600769b4b095SJeff Kirsher 
600869b4b095SJeff Kirsher 	/* Limit the number of tx's outstanding for hw bug */
600969b4b095SJeff Kirsher 	if (id->driver_data & DEV_NEED_TX_LIMIT) {
601069b4b095SJeff Kirsher 		np->tx_limit = 1;
601169b4b095SJeff Kirsher 		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
601269b4b095SJeff Kirsher 		    pci_dev->revision >= 0xA2)
601369b4b095SJeff Kirsher 			np->tx_limit = 0;
601469b4b095SJeff Kirsher 	}
601569b4b095SJeff Kirsher 
601669b4b095SJeff Kirsher 	/* clear phy state and temporarily halt phy interrupts */
601769b4b095SJeff Kirsher 	writel(0, base + NvRegMIIMask);
601869b4b095SJeff Kirsher 	phystate = readl(base + NvRegAdapterControl);
601969b4b095SJeff Kirsher 	if (phystate & NVREG_ADAPTCTL_RUNNING) {
602069b4b095SJeff Kirsher 		phystate_orig = 1;
602169b4b095SJeff Kirsher 		phystate &= ~NVREG_ADAPTCTL_RUNNING;
602269b4b095SJeff Kirsher 		writel(phystate, base + NvRegAdapterControl);
602369b4b095SJeff Kirsher 	}
602469b4b095SJeff Kirsher 	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
602569b4b095SJeff Kirsher 
602669b4b095SJeff Kirsher 	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
602769b4b095SJeff Kirsher 		/* management unit running on the mac? */
602869b4b095SJeff Kirsher 		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
602969b4b095SJeff Kirsher 		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
603069b4b095SJeff Kirsher 		    nv_mgmt_acquire_sema(dev) &&
603169b4b095SJeff Kirsher 		    nv_mgmt_get_version(dev)) {
603269b4b095SJeff Kirsher 			np->mac_in_use = 1;
603369b4b095SJeff Kirsher 			if (np->mgmt_version > 0)
603469b4b095SJeff Kirsher 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
603569b4b095SJeff Kirsher 			/* management unit setup the phy already? */
603669b4b095SJeff Kirsher 			if (np->mac_in_use &&
603769b4b095SJeff Kirsher 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
603869b4b095SJeff Kirsher 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
603969b4b095SJeff Kirsher 				/* phy is inited by mgmt unit */
604069b4b095SJeff Kirsher 				phyinitialized = 1;
604169b4b095SJeff Kirsher 			} else {
604269b4b095SJeff Kirsher 				/* we need to init the phy */
604369b4b095SJeff Kirsher 			}
604469b4b095SJeff Kirsher 		}
604569b4b095SJeff Kirsher 	}
604669b4b095SJeff Kirsher 
604769b4b095SJeff Kirsher 	/* find a suitable phy */
604869b4b095SJeff Kirsher 	for (i = 1; i <= 32; i++) {
604969b4b095SJeff Kirsher 		int id1, id2;
605069b4b095SJeff Kirsher 		int phyaddr = i & 0x1F;
605169b4b095SJeff Kirsher 
605269b4b095SJeff Kirsher 		spin_lock_irq(&np->lock);
605369b4b095SJeff Kirsher 		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
605469b4b095SJeff Kirsher 		spin_unlock_irq(&np->lock);
605569b4b095SJeff Kirsher 		if (id1 < 0 || id1 == 0xffff)
605669b4b095SJeff Kirsher 			continue;
605769b4b095SJeff Kirsher 		spin_lock_irq(&np->lock);
605869b4b095SJeff Kirsher 		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
605969b4b095SJeff Kirsher 		spin_unlock_irq(&np->lock);
606069b4b095SJeff Kirsher 		if (id2 < 0 || id2 == 0xffff)
606169b4b095SJeff Kirsher 			continue;
606269b4b095SJeff Kirsher 
606369b4b095SJeff Kirsher 		np->phy_model = id2 & PHYID2_MODEL_MASK;
606469b4b095SJeff Kirsher 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
606569b4b095SJeff Kirsher 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
606669b4b095SJeff Kirsher 		np->phyaddr = phyaddr;
606769b4b095SJeff Kirsher 		np->phy_oui = id1 | id2;
606869b4b095SJeff Kirsher 
606969b4b095SJeff Kirsher 		/* Realtek hardcoded phy id1 to all zero's on certain phys */
607069b4b095SJeff Kirsher 		if (np->phy_oui == PHY_OUI_REALTEK2)
607169b4b095SJeff Kirsher 			np->phy_oui = PHY_OUI_REALTEK;
607269b4b095SJeff Kirsher 		/* Setup phy revision for Realtek */
607369b4b095SJeff Kirsher 		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
607469b4b095SJeff Kirsher 			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
607569b4b095SJeff Kirsher 
607669b4b095SJeff Kirsher 		break;
607769b4b095SJeff Kirsher 	}
607869b4b095SJeff Kirsher 	if (i == 33) {
607969b4b095SJeff Kirsher 		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
608069b4b095SJeff Kirsher 		goto out_error;
608169b4b095SJeff Kirsher 	}
608269b4b095SJeff Kirsher 
608369b4b095SJeff Kirsher 	if (!phyinitialized) {
608469b4b095SJeff Kirsher 		/* reset it */
608569b4b095SJeff Kirsher 		phy_init(dev);
608669b4b095SJeff Kirsher 	} else {
608769b4b095SJeff Kirsher 		/* see if it is a gigabit phy */
608869b4b095SJeff Kirsher 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
608969b4b095SJeff Kirsher 		if (mii_status & PHY_GIGABIT)
609069b4b095SJeff Kirsher 			np->gigabit = PHY_GIGABIT;
609169b4b095SJeff Kirsher 	}
609269b4b095SJeff Kirsher 
609369b4b095SJeff Kirsher 	/* set default link speed settings */
609469b4b095SJeff Kirsher 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
609569b4b095SJeff Kirsher 	np->duplex = 0;
609669b4b095SJeff Kirsher 	np->autoneg = 1;
609769b4b095SJeff Kirsher 
609869b4b095SJeff Kirsher 	err = register_netdev(dev);
609969b4b095SJeff Kirsher 	if (err) {
610069b4b095SJeff Kirsher 		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
610169b4b095SJeff Kirsher 		goto out_error;
610269b4b095SJeff Kirsher 	}
610369b4b095SJeff Kirsher 
61043f0a1b58Sdavid decotigny 	netif_carrier_off(dev);
61053f0a1b58Sdavid decotigny 
61063f0a1b58Sdavid decotigny 	/* Some NICs freeze when TX pause is enabled while NIC is
61073f0a1b58Sdavid decotigny 	 * down, and this stays across warm reboots. The sequence
61083f0a1b58Sdavid decotigny 	 * below should be enough to recover from that state.
61093f0a1b58Sdavid decotigny 	 */
61103f0a1b58Sdavid decotigny 	nv_update_pause(dev, 0);
61113f0a1b58Sdavid decotigny 	nv_start_tx(dev);
61123f0a1b58Sdavid decotigny 	nv_stop_tx(dev);
61133f0a1b58Sdavid decotigny 
6114823dcd25SDavid S. Miller 	if (id->driver_data & DEV_HAS_VLAN)
611569b4b095SJeff Kirsher 		nv_vlan_mode(dev, dev->features);
611669b4b095SJeff Kirsher 
611769b4b095SJeff Kirsher 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
611869b4b095SJeff Kirsher 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
611969b4b095SJeff Kirsher 
6120e19df76aSSanjay Hortikar 	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
612169b4b095SJeff Kirsher 		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
612269b4b095SJeff Kirsher 		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
612369b4b095SJeff Kirsher 			"csum " : "",
6124f646968fSPatrick McHardy 		 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6125f646968fSPatrick McHardy 				  NETIF_F_HW_VLAN_CTAG_TX) ?
612669b4b095SJeff Kirsher 			"vlan " : "",
6127e19df76aSSanjay Hortikar 		 dev->features & (NETIF_F_LOOPBACK) ?
6128e19df76aSSanjay Hortikar 			"loopback " : "",
612969b4b095SJeff Kirsher 		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
613069b4b095SJeff Kirsher 		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
613169b4b095SJeff Kirsher 		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
613269b4b095SJeff Kirsher 		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
613369b4b095SJeff Kirsher 		 np->need_linktimer ? "lnktim " : "",
613469b4b095SJeff Kirsher 		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
613569b4b095SJeff Kirsher 		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
613669b4b095SJeff Kirsher 		 np->desc_ver);
613769b4b095SJeff Kirsher 
613869b4b095SJeff Kirsher 	return 0;
613969b4b095SJeff Kirsher 
614069b4b095SJeff Kirsher out_error:
6141*5b17a497SChristophe JAILLET 	nv_mgmt_release_sema(dev);
614269b4b095SJeff Kirsher 	if (phystate_orig)
614369b4b095SJeff Kirsher 		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
614469b4b095SJeff Kirsher out_freering:
614569b4b095SJeff Kirsher 	free_rings(dev);
614669b4b095SJeff Kirsher out_unmap:
614769b4b095SJeff Kirsher 	iounmap(get_hwbase(dev));
614869b4b095SJeff Kirsher out_relreg:
614969b4b095SJeff Kirsher 	pci_release_regions(pci_dev);
615069b4b095SJeff Kirsher out_disable:
615169b4b095SJeff Kirsher 	pci_disable_device(pci_dev);
615269b4b095SJeff Kirsher out_free:
6153f4b633b9SZhu Yanjun 	free_percpu(np->txrx_stats);
6154f4b633b9SZhu Yanjun out_alloc_percpu:
615569b4b095SJeff Kirsher 	free_netdev(dev);
615669b4b095SJeff Kirsher out:
615769b4b095SJeff Kirsher 	return err;
615869b4b095SJeff Kirsher }
615969b4b095SJeff Kirsher 
nv_restore_phy(struct net_device * dev)616069b4b095SJeff Kirsher static void nv_restore_phy(struct net_device *dev)
616169b4b095SJeff Kirsher {
616269b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
616369b4b095SJeff Kirsher 	u16 phy_reserved, mii_control;
616469b4b095SJeff Kirsher 
616569b4b095SJeff Kirsher 	if (np->phy_oui == PHY_OUI_REALTEK &&
616669b4b095SJeff Kirsher 	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
616769b4b095SJeff Kirsher 	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
616869b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
616969b4b095SJeff Kirsher 		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
617069b4b095SJeff Kirsher 		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
617169b4b095SJeff Kirsher 		phy_reserved |= PHY_REALTEK_INIT8;
617269b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
617369b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
617469b4b095SJeff Kirsher 
617569b4b095SJeff Kirsher 		/* restart auto negotiation */
617669b4b095SJeff Kirsher 		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
617769b4b095SJeff Kirsher 		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
617869b4b095SJeff Kirsher 		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
617969b4b095SJeff Kirsher 	}
618069b4b095SJeff Kirsher }
618169b4b095SJeff Kirsher 
nv_restore_mac_addr(struct pci_dev * pci_dev)618269b4b095SJeff Kirsher static void nv_restore_mac_addr(struct pci_dev *pci_dev)
618369b4b095SJeff Kirsher {
618469b4b095SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pci_dev);
618569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
618669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
618769b4b095SJeff Kirsher 
618869b4b095SJeff Kirsher 	/* special op: write back the misordered MAC address - otherwise
618969b4b095SJeff Kirsher 	 * the next nv_probe would see a wrong address.
619069b4b095SJeff Kirsher 	 */
619169b4b095SJeff Kirsher 	writel(np->orig_mac[0], base + NvRegMacAddrA);
619269b4b095SJeff Kirsher 	writel(np->orig_mac[1], base + NvRegMacAddrB);
619369b4b095SJeff Kirsher 	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
619469b4b095SJeff Kirsher 	       base + NvRegTransmitPoll);
619569b4b095SJeff Kirsher }
619669b4b095SJeff Kirsher 
nv_remove(struct pci_dev * pci_dev)6197d05919a1SBill Pemberton static void nv_remove(struct pci_dev *pci_dev)
619869b4b095SJeff Kirsher {
619969b4b095SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pci_dev);
6200f4b633b9SZhu Yanjun 	struct fe_priv *np = netdev_priv(dev);
6201f4b633b9SZhu Yanjun 
6202f4b633b9SZhu Yanjun 	free_percpu(np->txrx_stats);
620369b4b095SJeff Kirsher 
620469b4b095SJeff Kirsher 	unregister_netdev(dev);
620569b4b095SJeff Kirsher 
620669b4b095SJeff Kirsher 	nv_restore_mac_addr(pci_dev);
620769b4b095SJeff Kirsher 
620869b4b095SJeff Kirsher 	/* restore any phy related changes */
620969b4b095SJeff Kirsher 	nv_restore_phy(dev);
621069b4b095SJeff Kirsher 
621169b4b095SJeff Kirsher 	nv_mgmt_release_sema(dev);
621269b4b095SJeff Kirsher 
621369b4b095SJeff Kirsher 	/* free all structures */
621469b4b095SJeff Kirsher 	free_rings(dev);
621569b4b095SJeff Kirsher 	iounmap(get_hwbase(dev));
621669b4b095SJeff Kirsher 	pci_release_regions(pci_dev);
621769b4b095SJeff Kirsher 	pci_disable_device(pci_dev);
621869b4b095SJeff Kirsher 	free_netdev(dev);
621969b4b095SJeff Kirsher }
622069b4b095SJeff Kirsher 
622169b4b095SJeff Kirsher #ifdef CONFIG_PM_SLEEP
nv_suspend(struct device * device)622269b4b095SJeff Kirsher static int nv_suspend(struct device *device)
622369b4b095SJeff Kirsher {
6224dfa56f83SChuhong Yuan 	struct net_device *dev = dev_get_drvdata(device);
622569b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
622669b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
622769b4b095SJeff Kirsher 	int i;
622869b4b095SJeff Kirsher 
622969b4b095SJeff Kirsher 	if (netif_running(dev)) {
623069b4b095SJeff Kirsher 		/* Gross. */
623169b4b095SJeff Kirsher 		nv_close(dev);
623269b4b095SJeff Kirsher 	}
623369b4b095SJeff Kirsher 	netif_device_detach(dev);
623469b4b095SJeff Kirsher 
623569b4b095SJeff Kirsher 	/* save non-pci configuration space */
623669b4b095SJeff Kirsher 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
623769b4b095SJeff Kirsher 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
623869b4b095SJeff Kirsher 
623969b4b095SJeff Kirsher 	return 0;
624069b4b095SJeff Kirsher }
624169b4b095SJeff Kirsher 
nv_resume(struct device * device)624269b4b095SJeff Kirsher static int nv_resume(struct device *device)
624369b4b095SJeff Kirsher {
624469b4b095SJeff Kirsher 	struct pci_dev *pdev = to_pci_dev(device);
624569b4b095SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
624669b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
624769b4b095SJeff Kirsher 	u8 __iomem *base = get_hwbase(dev);
624869b4b095SJeff Kirsher 	int i, rc = 0;
624969b4b095SJeff Kirsher 
625069b4b095SJeff Kirsher 	/* restore non-pci configuration space */
625169b4b095SJeff Kirsher 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
625269b4b095SJeff Kirsher 		writel(np->saved_config_space[i], base+i*sizeof(u32));
625369b4b095SJeff Kirsher 
625469b4b095SJeff Kirsher 	if (np->driver_data & DEV_NEED_MSI_FIX)
625569b4b095SJeff Kirsher 		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
625669b4b095SJeff Kirsher 
625769b4b095SJeff Kirsher 	/* restore phy state, including autoneg */
625869b4b095SJeff Kirsher 	phy_init(dev);
625969b4b095SJeff Kirsher 
626069b4b095SJeff Kirsher 	netif_device_attach(dev);
626169b4b095SJeff Kirsher 	if (netif_running(dev)) {
626269b4b095SJeff Kirsher 		rc = nv_open(dev);
626369b4b095SJeff Kirsher 		nv_set_multicast(dev);
626469b4b095SJeff Kirsher 	}
626569b4b095SJeff Kirsher 	return rc;
626669b4b095SJeff Kirsher }
626769b4b095SJeff Kirsher 
626869b4b095SJeff Kirsher static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
626969b4b095SJeff Kirsher #define NV_PM_OPS (&nv_pm_ops)
627069b4b095SJeff Kirsher 
627169b4b095SJeff Kirsher #else
627269b4b095SJeff Kirsher #define NV_PM_OPS NULL
627369b4b095SJeff Kirsher #endif /* CONFIG_PM_SLEEP */
627469b4b095SJeff Kirsher 
627569b4b095SJeff Kirsher #ifdef CONFIG_PM
nv_shutdown(struct pci_dev * pdev)627669b4b095SJeff Kirsher static void nv_shutdown(struct pci_dev *pdev)
627769b4b095SJeff Kirsher {
627869b4b095SJeff Kirsher 	struct net_device *dev = pci_get_drvdata(pdev);
627969b4b095SJeff Kirsher 	struct fe_priv *np = netdev_priv(dev);
628069b4b095SJeff Kirsher 
628169b4b095SJeff Kirsher 	if (netif_running(dev))
628269b4b095SJeff Kirsher 		nv_close(dev);
628369b4b095SJeff Kirsher 
628469b4b095SJeff Kirsher 	/*
628569b4b095SJeff Kirsher 	 * Restore the MAC so a kernel started by kexec won't get confused.
628669b4b095SJeff Kirsher 	 * If we really go for poweroff, we must not restore the MAC,
628769b4b095SJeff Kirsher 	 * otherwise the MAC for WOL will be reversed at least on some boards.
628869b4b095SJeff Kirsher 	 */
628969b4b095SJeff Kirsher 	if (system_state != SYSTEM_POWER_OFF)
629069b4b095SJeff Kirsher 		nv_restore_mac_addr(pdev);
629169b4b095SJeff Kirsher 
629269b4b095SJeff Kirsher 	pci_disable_device(pdev);
629369b4b095SJeff Kirsher 	/*
629469b4b095SJeff Kirsher 	 * Apparently it is not possible to reinitialise from D3 hot,
629569b4b095SJeff Kirsher 	 * only put the device into D3 if we really go for poweroff.
629669b4b095SJeff Kirsher 	 */
629769b4b095SJeff Kirsher 	if (system_state == SYSTEM_POWER_OFF) {
629869b4b095SJeff Kirsher 		pci_wake_from_d3(pdev, np->wolenabled);
629969b4b095SJeff Kirsher 		pci_set_power_state(pdev, PCI_D3hot);
630069b4b095SJeff Kirsher 	}
630169b4b095SJeff Kirsher }
630269b4b095SJeff Kirsher #else
630369b4b095SJeff Kirsher #define nv_shutdown NULL
630469b4b095SJeff Kirsher #endif /* CONFIG_PM */
630569b4b095SJeff Kirsher 
63069baa3c34SBenoit Taine static const struct pci_device_id pci_tbl[] = {
630769b4b095SJeff Kirsher 	{	/* nForce Ethernet Controller */
630869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x01C3),
630969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
631069b4b095SJeff Kirsher 	},
631169b4b095SJeff Kirsher 	{	/* nForce2 Ethernet Controller */
631269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0066),
631369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
631469b4b095SJeff Kirsher 	},
631569b4b095SJeff Kirsher 	{	/* nForce3 Ethernet Controller */
631669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x00D6),
631769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
631869b4b095SJeff Kirsher 	},
631969b4b095SJeff Kirsher 	{	/* nForce3 Ethernet Controller */
632069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0086),
632169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
632269b4b095SJeff Kirsher 	},
632369b4b095SJeff Kirsher 	{	/* nForce3 Ethernet Controller */
632469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x008C),
632569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
632669b4b095SJeff Kirsher 	},
632769b4b095SJeff Kirsher 	{	/* nForce3 Ethernet Controller */
632869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x00E6),
632969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
633069b4b095SJeff Kirsher 	},
633169b4b095SJeff Kirsher 	{	/* nForce3 Ethernet Controller */
633269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x00DF),
633369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
633469b4b095SJeff Kirsher 	},
633569b4b095SJeff Kirsher 	{	/* CK804 Ethernet Controller */
633669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0056),
633769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
633869b4b095SJeff Kirsher 	},
633969b4b095SJeff Kirsher 	{	/* CK804 Ethernet Controller */
634069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0057),
634169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
634269b4b095SJeff Kirsher 	},
634369b4b095SJeff Kirsher 	{	/* MCP04 Ethernet Controller */
634469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0037),
634569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
634669b4b095SJeff Kirsher 	},
634769b4b095SJeff Kirsher 	{	/* MCP04 Ethernet Controller */
634869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0038),
634969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
635069b4b095SJeff Kirsher 	},
635169b4b095SJeff Kirsher 	{	/* MCP51 Ethernet Controller */
635269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0268),
635369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
635469b4b095SJeff Kirsher 	},
635569b4b095SJeff Kirsher 	{	/* MCP51 Ethernet Controller */
635669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0269),
635769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
635869b4b095SJeff Kirsher 	},
635969b4b095SJeff Kirsher 	{	/* MCP55 Ethernet Controller */
636069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0372),
636169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
636269b4b095SJeff Kirsher 	},
636369b4b095SJeff Kirsher 	{	/* MCP55 Ethernet Controller */
636469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0373),
636569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
636669b4b095SJeff Kirsher 	},
636769b4b095SJeff Kirsher 	{	/* MCP61 Ethernet Controller */
636869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x03E5),
636969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
637069b4b095SJeff Kirsher 	},
637169b4b095SJeff Kirsher 	{	/* MCP61 Ethernet Controller */
637269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x03E6),
637369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
637469b4b095SJeff Kirsher 	},
637569b4b095SJeff Kirsher 	{	/* MCP61 Ethernet Controller */
637669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x03EE),
637769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
637869b4b095SJeff Kirsher 	},
637969b4b095SJeff Kirsher 	{	/* MCP61 Ethernet Controller */
638069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x03EF),
638169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
638269b4b095SJeff Kirsher 	},
638369b4b095SJeff Kirsher 	{	/* MCP65 Ethernet Controller */
638469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0450),
638569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
638669b4b095SJeff Kirsher 	},
638769b4b095SJeff Kirsher 	{	/* MCP65 Ethernet Controller */
638869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0451),
638969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
639069b4b095SJeff Kirsher 	},
639169b4b095SJeff Kirsher 	{	/* MCP65 Ethernet Controller */
639269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0452),
639369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
639469b4b095SJeff Kirsher 	},
639569b4b095SJeff Kirsher 	{	/* MCP65 Ethernet Controller */
639669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0453),
639769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
639869b4b095SJeff Kirsher 	},
639969b4b095SJeff Kirsher 	{	/* MCP67 Ethernet Controller */
640069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x054C),
640169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
640269b4b095SJeff Kirsher 	},
640369b4b095SJeff Kirsher 	{	/* MCP67 Ethernet Controller */
640469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x054D),
640569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
640669b4b095SJeff Kirsher 	},
640769b4b095SJeff Kirsher 	{	/* MCP67 Ethernet Controller */
640869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x054E),
640969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
641069b4b095SJeff Kirsher 	},
641169b4b095SJeff Kirsher 	{	/* MCP67 Ethernet Controller */
641269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x054F),
641369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
641469b4b095SJeff Kirsher 	},
641569b4b095SJeff Kirsher 	{	/* MCP73 Ethernet Controller */
641669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x07DC),
641769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
641869b4b095SJeff Kirsher 	},
641969b4b095SJeff Kirsher 	{	/* MCP73 Ethernet Controller */
642069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x07DD),
642169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
642269b4b095SJeff Kirsher 	},
642369b4b095SJeff Kirsher 	{	/* MCP73 Ethernet Controller */
642469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x07DE),
642569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
642669b4b095SJeff Kirsher 	},
642769b4b095SJeff Kirsher 	{	/* MCP73 Ethernet Controller */
642869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x07DF),
642969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
643069b4b095SJeff Kirsher 	},
643169b4b095SJeff Kirsher 	{	/* MCP77 Ethernet Controller */
643269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0760),
643369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
643469b4b095SJeff Kirsher 	},
643569b4b095SJeff Kirsher 	{	/* MCP77 Ethernet Controller */
643669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0761),
643769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
643869b4b095SJeff Kirsher 	},
643969b4b095SJeff Kirsher 	{	/* MCP77 Ethernet Controller */
644069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0762),
644169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
644269b4b095SJeff Kirsher 	},
644369b4b095SJeff Kirsher 	{	/* MCP77 Ethernet Controller */
644469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0763),
644569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
644669b4b095SJeff Kirsher 	},
644769b4b095SJeff Kirsher 	{	/* MCP79 Ethernet Controller */
644869b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0AB0),
644969b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
645069b4b095SJeff Kirsher 	},
645169b4b095SJeff Kirsher 	{	/* MCP79 Ethernet Controller */
645269b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0AB1),
645369b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
645469b4b095SJeff Kirsher 	},
645569b4b095SJeff Kirsher 	{	/* MCP79 Ethernet Controller */
645669b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0AB2),
645769b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
645869b4b095SJeff Kirsher 	},
645969b4b095SJeff Kirsher 	{	/* MCP79 Ethernet Controller */
646069b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0AB3),
646169b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
646269b4b095SJeff Kirsher 	},
646369b4b095SJeff Kirsher 	{	/* MCP89 Ethernet Controller */
646469b4b095SJeff Kirsher 		PCI_DEVICE(0x10DE, 0x0D7D),
646569b4b095SJeff Kirsher 		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
646669b4b095SJeff Kirsher 	},
646769b4b095SJeff Kirsher 	{0,},
646869b4b095SJeff Kirsher };
646969b4b095SJeff Kirsher 
64704f45c40fSPeter Hüwe static struct pci_driver forcedeth_pci_driver = {
647169b4b095SJeff Kirsher 	.name		= DRV_NAME,
647269b4b095SJeff Kirsher 	.id_table	= pci_tbl,
647369b4b095SJeff Kirsher 	.probe		= nv_probe,
6474d05919a1SBill Pemberton 	.remove		= nv_remove,
647569b4b095SJeff Kirsher 	.shutdown	= nv_shutdown,
647669b4b095SJeff Kirsher 	.driver.pm	= NV_PM_OPS,
647769b4b095SJeff Kirsher };
647869b4b095SJeff Kirsher 
647969b4b095SJeff Kirsher module_param(max_interrupt_work, int, 0);
648069b4b095SJeff Kirsher MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
648169b4b095SJeff Kirsher module_param(optimization_mode, int, 0);
648269b4b095SJeff Kirsher MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
648369b4b095SJeff Kirsher module_param(poll_interval, int, 0);
648469b4b095SJeff Kirsher MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
648569b4b095SJeff Kirsher module_param(msi, int, 0);
648669b4b095SJeff Kirsher MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
648769b4b095SJeff Kirsher module_param(msix, int, 0);
648869b4b095SJeff Kirsher MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
648969b4b095SJeff Kirsher module_param(dma_64bit, int, 0);
649069b4b095SJeff Kirsher MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
649169b4b095SJeff Kirsher module_param(phy_cross, int, 0);
649269b4b095SJeff Kirsher MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
649369b4b095SJeff Kirsher module_param(phy_power_down, int, 0);
649469b4b095SJeff Kirsher MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
64951ec4f2d3SSameer Nanda module_param(debug_tx_timeout, bool, 0);
64961ec4f2d3SSameer Nanda MODULE_PARM_DESC(debug_tx_timeout,
64971ec4f2d3SSameer Nanda 		 "Dump tx related registers and ring when tx_timeout happens");
649869b4b095SJeff Kirsher 
64994f45c40fSPeter Hüwe module_pci_driver(forcedeth_pci_driver);
650069b4b095SJeff Kirsher MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
650169b4b095SJeff Kirsher MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
650269b4b095SJeff Kirsher MODULE_LICENSE("GPL");
650369b4b095SJeff Kirsher MODULE_DEVICE_TABLE(pci, pci_tbl);
6504