156cb4e50SThomas Gleixner // SPDX-License-Identifier: GPL-2.0+
2e689cf4aSJeff Kirsher /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3e689cf4aSJeff Kirsher *
4e689cf4aSJeff Kirsher * Copyright (C) 2004 Sun Microsystems Inc.
5e689cf4aSJeff Kirsher * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6e689cf4aSJeff Kirsher *
7e689cf4aSJeff Kirsher * This driver uses the sungem driver (c) David Miller
8e689cf4aSJeff Kirsher * (davem@redhat.com) as its basis.
9e689cf4aSJeff Kirsher *
10e689cf4aSJeff Kirsher * The cassini chip has a number of features that distinguish it from
11e689cf4aSJeff Kirsher * the gem chip:
12e689cf4aSJeff Kirsher * 4 transmit descriptor rings that are used for either QoS (VLAN) or
13e689cf4aSJeff Kirsher * load balancing (non-VLAN mode)
14e689cf4aSJeff Kirsher * batching of multiple packets
15e689cf4aSJeff Kirsher * multiple CPU dispatching
16e689cf4aSJeff Kirsher * page-based RX descriptor engine with separate completion rings
17e689cf4aSJeff Kirsher * Gigabit support (GMII and PCS interface)
18e689cf4aSJeff Kirsher * MIF link up/down detection works
19e689cf4aSJeff Kirsher *
20e689cf4aSJeff Kirsher * RX is handled by page sized buffers that are attached as fragments to
21e689cf4aSJeff Kirsher * the skb. here's what's done:
22e689cf4aSJeff Kirsher * -- driver allocates pages at a time and keeps reference counts
23e689cf4aSJeff Kirsher * on them.
24e689cf4aSJeff Kirsher * -- the upper protocol layers assume that the header is in the skb
25e689cf4aSJeff Kirsher * itself. as a result, cassini will copy a small amount (64 bytes)
26e689cf4aSJeff Kirsher * to make them happy.
27e689cf4aSJeff Kirsher * -- driver appends the rest of the data pages as frags to skbuffs
28e689cf4aSJeff Kirsher * and increments the reference count
29e689cf4aSJeff Kirsher * -- on page reclamation, the driver swaps the page with a spare page.
30e689cf4aSJeff Kirsher * if that page is still in use, it frees its reference to that page,
31e689cf4aSJeff Kirsher * and allocates a new page for use. otherwise, it just recycles the
32c3178883SJilin Yuan * page.
33e689cf4aSJeff Kirsher *
34e689cf4aSJeff Kirsher * NOTE: cassini can parse the header. however, it's not worth it
35e689cf4aSJeff Kirsher * as long as the network stack requires a header copy.
36e689cf4aSJeff Kirsher *
37e689cf4aSJeff Kirsher * TX has 4 queues. currently these queues are used in a round-robin
38e689cf4aSJeff Kirsher * fashion for load balancing. They can also be used for QoS. for that
39e689cf4aSJeff Kirsher * to work, however, QoS information needs to be exposed down to the driver
40e689cf4aSJeff Kirsher * level so that subqueues get targeted to particular transmit rings.
41e689cf4aSJeff Kirsher * alternatively, the queues can be configured via use of the all-purpose
42e689cf4aSJeff Kirsher * ioctl.
43e689cf4aSJeff Kirsher *
44e689cf4aSJeff Kirsher * RX DATA: the rx completion ring has all the info, but the rx desc
45e689cf4aSJeff Kirsher * ring has all of the data. RX can conceivably come in under multiple
46e689cf4aSJeff Kirsher * interrupts, but the INT# assignment needs to be set up properly by
47e689cf4aSJeff Kirsher * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
48e689cf4aSJeff Kirsher * that. also, the two descriptor rings are designed to distinguish between
49e689cf4aSJeff Kirsher * encrypted and non-encrypted packets, but we use them for buffering
50e689cf4aSJeff Kirsher * instead.
51e689cf4aSJeff Kirsher *
52e689cf4aSJeff Kirsher * by default, the selective clear mask is set up to process rx packets.
53e689cf4aSJeff Kirsher */
54e689cf4aSJeff Kirsher
55e689cf4aSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56e689cf4aSJeff Kirsher
57e689cf4aSJeff Kirsher #include <linux/module.h>
58e689cf4aSJeff Kirsher #include <linux/kernel.h>
59e689cf4aSJeff Kirsher #include <linux/types.h>
60e689cf4aSJeff Kirsher #include <linux/compiler.h>
61e689cf4aSJeff Kirsher #include <linux/slab.h>
62e689cf4aSJeff Kirsher #include <linux/delay.h>
63e689cf4aSJeff Kirsher #include <linux/init.h>
64e689cf4aSJeff Kirsher #include <linux/interrupt.h>
65e689cf4aSJeff Kirsher #include <linux/vmalloc.h>
66e689cf4aSJeff Kirsher #include <linux/ioport.h>
67e689cf4aSJeff Kirsher #include <linux/pci.h>
68e689cf4aSJeff Kirsher #include <linux/mm.h>
69e689cf4aSJeff Kirsher #include <linux/highmem.h>
70e689cf4aSJeff Kirsher #include <linux/list.h>
71e689cf4aSJeff Kirsher #include <linux/dma-mapping.h>
72e689cf4aSJeff Kirsher
73e689cf4aSJeff Kirsher #include <linux/netdevice.h>
74e689cf4aSJeff Kirsher #include <linux/etherdevice.h>
75e689cf4aSJeff Kirsher #include <linux/skbuff.h>
76e689cf4aSJeff Kirsher #include <linux/ethtool.h>
77e689cf4aSJeff Kirsher #include <linux/crc32.h>
78e689cf4aSJeff Kirsher #include <linux/random.h>
79e689cf4aSJeff Kirsher #include <linux/mii.h>
80e689cf4aSJeff Kirsher #include <linux/ip.h>
81e689cf4aSJeff Kirsher #include <linux/tcp.h>
82e689cf4aSJeff Kirsher #include <linux/mutex.h>
83e689cf4aSJeff Kirsher #include <linux/firmware.h>
84e689cf4aSJeff Kirsher
85e689cf4aSJeff Kirsher #include <net/checksum.h>
86e689cf4aSJeff Kirsher
87e689cf4aSJeff Kirsher #include <linux/atomic.h>
88e689cf4aSJeff Kirsher #include <asm/io.h>
89e689cf4aSJeff Kirsher #include <asm/byteorder.h>
907c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
91e0e8028cSWang Qing #include <linux/jiffies.h>
92e689cf4aSJeff Kirsher
93e689cf4aSJeff Kirsher #define CAS_NCPUS num_online_cpus()
94e689cf4aSJeff Kirsher
95e689cf4aSJeff Kirsher #define cas_skb_release(x) netif_rx(x)
96e689cf4aSJeff Kirsher
97e689cf4aSJeff Kirsher /* select which firmware to use */
98e689cf4aSJeff Kirsher #define USE_HP_WORKAROUND
99e689cf4aSJeff Kirsher #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
100e689cf4aSJeff Kirsher #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
101e689cf4aSJeff Kirsher
102e689cf4aSJeff Kirsher #include "cassini.h"
103e689cf4aSJeff Kirsher
104e689cf4aSJeff Kirsher #define USE_TX_COMPWB /* use completion writeback registers */
105e689cf4aSJeff Kirsher #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
106e689cf4aSJeff Kirsher #define USE_RX_BLANK /* hw interrupt mitigation */
107e689cf4aSJeff Kirsher #undef USE_ENTROPY_DEV /* don't test for entropy device */
108e689cf4aSJeff Kirsher
109e689cf4aSJeff Kirsher /* NOTE: these aren't useable unless PCI interrupts can be assigned.
110e689cf4aSJeff Kirsher * also, we need to make cp->lock finer-grained.
111e689cf4aSJeff Kirsher */
112e689cf4aSJeff Kirsher #undef USE_PCI_INTB
113e689cf4aSJeff Kirsher #undef USE_PCI_INTC
114e689cf4aSJeff Kirsher #undef USE_PCI_INTD
115e689cf4aSJeff Kirsher #undef USE_QOS
116e689cf4aSJeff Kirsher
117e689cf4aSJeff Kirsher #undef USE_VPD_DEBUG /* debug vpd information if defined */
118e689cf4aSJeff Kirsher
119e689cf4aSJeff Kirsher /* rx processing options */
120e689cf4aSJeff Kirsher #define USE_PAGE_ORDER /* specify to allocate large rx pages */
121e689cf4aSJeff Kirsher #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
122e689cf4aSJeff Kirsher #define RX_COPY_ALWAYS 0 /* if 0, use frags */
123e689cf4aSJeff Kirsher #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
124e689cf4aSJeff Kirsher #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
125e689cf4aSJeff Kirsher
126e689cf4aSJeff Kirsher #define DRV_MODULE_NAME "cassini"
127e689cf4aSJeff Kirsher #define DRV_MODULE_VERSION "1.6"
128e689cf4aSJeff Kirsher #define DRV_MODULE_RELDATE "21 May 2008"
129e689cf4aSJeff Kirsher
130e689cf4aSJeff Kirsher #define CAS_DEF_MSG_ENABLE \
131e689cf4aSJeff Kirsher (NETIF_MSG_DRV | \
132e689cf4aSJeff Kirsher NETIF_MSG_PROBE | \
133e689cf4aSJeff Kirsher NETIF_MSG_LINK | \
134e689cf4aSJeff Kirsher NETIF_MSG_TIMER | \
135e689cf4aSJeff Kirsher NETIF_MSG_IFDOWN | \
136e689cf4aSJeff Kirsher NETIF_MSG_IFUP | \
137e689cf4aSJeff Kirsher NETIF_MSG_RX_ERR | \
138e689cf4aSJeff Kirsher NETIF_MSG_TX_ERR)
139e689cf4aSJeff Kirsher
140e689cf4aSJeff Kirsher /* length of time before we decide the hardware is borked,
141e689cf4aSJeff Kirsher * and dev->tx_timeout() should be called to fix the problem
142e689cf4aSJeff Kirsher */
143e689cf4aSJeff Kirsher #define CAS_TX_TIMEOUT (HZ)
144e689cf4aSJeff Kirsher #define CAS_LINK_TIMEOUT (22*HZ/10)
145e689cf4aSJeff Kirsher #define CAS_LINK_FAST_TIMEOUT (1)
146e689cf4aSJeff Kirsher
147e689cf4aSJeff Kirsher /* timeout values for state changing. these specify the number
148e689cf4aSJeff Kirsher * of 10us delays to be used before giving up.
149e689cf4aSJeff Kirsher */
150e689cf4aSJeff Kirsher #define STOP_TRIES_PHY 1000
151e689cf4aSJeff Kirsher #define STOP_TRIES 5000
152e689cf4aSJeff Kirsher
153e689cf4aSJeff Kirsher /* specify a minimum frame size to deal with some fifo issues
154e689cf4aSJeff Kirsher * max mtu == 2 * page size - ethernet header - 64 - swivel =
155e689cf4aSJeff Kirsher * 2 * page_size - 0x50
156e689cf4aSJeff Kirsher */
157e689cf4aSJeff Kirsher #define CAS_MIN_FRAME 97
158e689cf4aSJeff Kirsher #define CAS_1000MB_MIN_FRAME 255
159e689cf4aSJeff Kirsher #define CAS_MIN_MTU 60
160e689cf4aSJeff Kirsher #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
161e689cf4aSJeff Kirsher
162e689cf4aSJeff Kirsher #if 1
163e689cf4aSJeff Kirsher /*
164e689cf4aSJeff Kirsher * Eliminate these and use separate atomic counters for each, to
165e689cf4aSJeff Kirsher * avoid a race condition.
166e689cf4aSJeff Kirsher */
167e689cf4aSJeff Kirsher #else
168e689cf4aSJeff Kirsher #define CAS_RESET_MTU 1
169e689cf4aSJeff Kirsher #define CAS_RESET_ALL 2
170e689cf4aSJeff Kirsher #define CAS_RESET_SPARE 3
171e689cf4aSJeff Kirsher #endif
172e689cf4aSJeff Kirsher
173f73d12bdSBill Pemberton static char version[] =
174e689cf4aSJeff Kirsher DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
175e689cf4aSJeff Kirsher
176e689cf4aSJeff Kirsher static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
177e689cf4aSJeff Kirsher static int link_mode;
178e689cf4aSJeff Kirsher
179e689cf4aSJeff Kirsher MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
180e689cf4aSJeff Kirsher MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
181e689cf4aSJeff Kirsher MODULE_LICENSE("GPL");
182e689cf4aSJeff Kirsher MODULE_FIRMWARE("sun/cassini.bin");
183e689cf4aSJeff Kirsher module_param(cassini_debug, int, 0);
184e689cf4aSJeff Kirsher MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
185e689cf4aSJeff Kirsher module_param(link_mode, int, 0);
186e689cf4aSJeff Kirsher MODULE_PARM_DESC(link_mode, "default link mode");
187e689cf4aSJeff Kirsher
188e689cf4aSJeff Kirsher /*
189e689cf4aSJeff Kirsher * Work around for a PCS bug in which the link goes down due to the chip
190e689cf4aSJeff Kirsher * being confused and never showing a link status of "up."
191e689cf4aSJeff Kirsher */
192e689cf4aSJeff Kirsher #define DEFAULT_LINKDOWN_TIMEOUT 5
193e689cf4aSJeff Kirsher /*
194e689cf4aSJeff Kirsher * Value in seconds, for user input.
195e689cf4aSJeff Kirsher */
196e689cf4aSJeff Kirsher static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
197e689cf4aSJeff Kirsher module_param(linkdown_timeout, int, 0);
198e689cf4aSJeff Kirsher MODULE_PARM_DESC(linkdown_timeout,
199e689cf4aSJeff Kirsher "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
200e689cf4aSJeff Kirsher
201e689cf4aSJeff Kirsher /*
202e689cf4aSJeff Kirsher * value in 'ticks' (units used by jiffies). Set when we init the
203e689cf4aSJeff Kirsher * module because 'HZ' in actually a function call on some flavors of
204e689cf4aSJeff Kirsher * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
205e689cf4aSJeff Kirsher */
206e689cf4aSJeff Kirsher static int link_transition_timeout;
207e689cf4aSJeff Kirsher
208e689cf4aSJeff Kirsher
209e689cf4aSJeff Kirsher
210f73d12bdSBill Pemberton static u16 link_modes[] = {
211e689cf4aSJeff Kirsher BMCR_ANENABLE, /* 0 : autoneg */
212e689cf4aSJeff Kirsher 0, /* 1 : 10bt half duplex */
213e689cf4aSJeff Kirsher BMCR_SPEED100, /* 2 : 100bt half duplex */
214e689cf4aSJeff Kirsher BMCR_FULLDPLX, /* 3 : 10bt full duplex */
215e689cf4aSJeff Kirsher BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
216e689cf4aSJeff Kirsher CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
217e689cf4aSJeff Kirsher };
218e689cf4aSJeff Kirsher
2199baa3c34SBenoit Taine static const struct pci_device_id cas_pci_tbl[] = {
220e689cf4aSJeff Kirsher { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
221e689cf4aSJeff Kirsher PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222e689cf4aSJeff Kirsher { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
223e689cf4aSJeff Kirsher PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224e689cf4aSJeff Kirsher { 0, }
225e689cf4aSJeff Kirsher };
226e689cf4aSJeff Kirsher
227e689cf4aSJeff Kirsher MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
228e689cf4aSJeff Kirsher
229e689cf4aSJeff Kirsher static void cas_set_link_modes(struct cas *cp);
230e689cf4aSJeff Kirsher
cas_lock_tx(struct cas * cp)231e689cf4aSJeff Kirsher static inline void cas_lock_tx(struct cas *cp)
232e689cf4aSJeff Kirsher {
233e689cf4aSJeff Kirsher int i;
234e689cf4aSJeff Kirsher
235e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++)
236a9de0500SEmil Goode spin_lock_nested(&cp->tx_lock[i], i);
237e689cf4aSJeff Kirsher }
238e689cf4aSJeff Kirsher
239e689cf4aSJeff Kirsher /* WTZ: QA was finding deadlock problems with the previous
240e689cf4aSJeff Kirsher * versions after long test runs with multiple cards per machine.
241e689cf4aSJeff Kirsher * See if replacing cas_lock_all with safer versions helps. The
242e689cf4aSJeff Kirsher * symptoms QA is reporting match those we'd expect if interrupts
243e689cf4aSJeff Kirsher * aren't being properly restored, and we fixed a previous deadlock
244e689cf4aSJeff Kirsher * with similar symptoms by using save/restore versions in other
245e689cf4aSJeff Kirsher * places.
246e689cf4aSJeff Kirsher */
247e689cf4aSJeff Kirsher #define cas_lock_all_save(cp, flags) \
248e689cf4aSJeff Kirsher do { \
249e689cf4aSJeff Kirsher struct cas *xxxcp = (cp); \
250e689cf4aSJeff Kirsher spin_lock_irqsave(&xxxcp->lock, flags); \
251e689cf4aSJeff Kirsher cas_lock_tx(xxxcp); \
252e689cf4aSJeff Kirsher } while (0)
253e689cf4aSJeff Kirsher
cas_unlock_tx(struct cas * cp)254e689cf4aSJeff Kirsher static inline void cas_unlock_tx(struct cas *cp)
255e689cf4aSJeff Kirsher {
256e689cf4aSJeff Kirsher int i;
257e689cf4aSJeff Kirsher
258e689cf4aSJeff Kirsher for (i = N_TX_RINGS; i > 0; i--)
259e689cf4aSJeff Kirsher spin_unlock(&cp->tx_lock[i - 1]);
260e689cf4aSJeff Kirsher }
261e689cf4aSJeff Kirsher
262e689cf4aSJeff Kirsher #define cas_unlock_all_restore(cp, flags) \
263e689cf4aSJeff Kirsher do { \
264e689cf4aSJeff Kirsher struct cas *xxxcp = (cp); \
265e689cf4aSJeff Kirsher cas_unlock_tx(xxxcp); \
266e689cf4aSJeff Kirsher spin_unlock_irqrestore(&xxxcp->lock, flags); \
267e689cf4aSJeff Kirsher } while (0)
268e689cf4aSJeff Kirsher
cas_disable_irq(struct cas * cp,const int ring)269e689cf4aSJeff Kirsher static void cas_disable_irq(struct cas *cp, const int ring)
270e689cf4aSJeff Kirsher {
271e689cf4aSJeff Kirsher /* Make sure we won't get any more interrupts */
272e689cf4aSJeff Kirsher if (ring == 0) {
273e689cf4aSJeff Kirsher writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
274e689cf4aSJeff Kirsher return;
275e689cf4aSJeff Kirsher }
276e689cf4aSJeff Kirsher
277e689cf4aSJeff Kirsher /* disable completion interrupts and selectively mask */
278e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
279e689cf4aSJeff Kirsher switch (ring) {
280e689cf4aSJeff Kirsher #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
281e689cf4aSJeff Kirsher #ifdef USE_PCI_INTB
282e689cf4aSJeff Kirsher case 1:
283e689cf4aSJeff Kirsher #endif
284e689cf4aSJeff Kirsher #ifdef USE_PCI_INTC
285e689cf4aSJeff Kirsher case 2:
286e689cf4aSJeff Kirsher #endif
287e689cf4aSJeff Kirsher #ifdef USE_PCI_INTD
288e689cf4aSJeff Kirsher case 3:
289e689cf4aSJeff Kirsher #endif
290e689cf4aSJeff Kirsher writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
291e689cf4aSJeff Kirsher cp->regs + REG_PLUS_INTRN_MASK(ring));
292e689cf4aSJeff Kirsher break;
293e689cf4aSJeff Kirsher #endif
294e689cf4aSJeff Kirsher default:
295e689cf4aSJeff Kirsher writel(INTRN_MASK_CLEAR_ALL, cp->regs +
296e689cf4aSJeff Kirsher REG_PLUS_INTRN_MASK(ring));
297e689cf4aSJeff Kirsher break;
298e689cf4aSJeff Kirsher }
299e689cf4aSJeff Kirsher }
300e689cf4aSJeff Kirsher }
301e689cf4aSJeff Kirsher
cas_mask_intr(struct cas * cp)302e689cf4aSJeff Kirsher static inline void cas_mask_intr(struct cas *cp)
303e689cf4aSJeff Kirsher {
304e689cf4aSJeff Kirsher int i;
305e689cf4aSJeff Kirsher
306e689cf4aSJeff Kirsher for (i = 0; i < N_RX_COMP_RINGS; i++)
307e689cf4aSJeff Kirsher cas_disable_irq(cp, i);
308e689cf4aSJeff Kirsher }
309e689cf4aSJeff Kirsher
cas_enable_irq(struct cas * cp,const int ring)310e689cf4aSJeff Kirsher static void cas_enable_irq(struct cas *cp, const int ring)
311e689cf4aSJeff Kirsher {
312e689cf4aSJeff Kirsher if (ring == 0) { /* all but TX_DONE */
313e689cf4aSJeff Kirsher writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
314e689cf4aSJeff Kirsher return;
315e689cf4aSJeff Kirsher }
316e689cf4aSJeff Kirsher
317e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
318e689cf4aSJeff Kirsher switch (ring) {
319e689cf4aSJeff Kirsher #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
320e689cf4aSJeff Kirsher #ifdef USE_PCI_INTB
321e689cf4aSJeff Kirsher case 1:
322e689cf4aSJeff Kirsher #endif
323e689cf4aSJeff Kirsher #ifdef USE_PCI_INTC
324e689cf4aSJeff Kirsher case 2:
325e689cf4aSJeff Kirsher #endif
326e689cf4aSJeff Kirsher #ifdef USE_PCI_INTD
327e689cf4aSJeff Kirsher case 3:
328e689cf4aSJeff Kirsher #endif
329e689cf4aSJeff Kirsher writel(INTRN_MASK_RX_EN, cp->regs +
330e689cf4aSJeff Kirsher REG_PLUS_INTRN_MASK(ring));
331e689cf4aSJeff Kirsher break;
332e689cf4aSJeff Kirsher #endif
333e689cf4aSJeff Kirsher default:
334e689cf4aSJeff Kirsher break;
335e689cf4aSJeff Kirsher }
336e689cf4aSJeff Kirsher }
337e689cf4aSJeff Kirsher }
338e689cf4aSJeff Kirsher
cas_unmask_intr(struct cas * cp)339e689cf4aSJeff Kirsher static inline void cas_unmask_intr(struct cas *cp)
340e689cf4aSJeff Kirsher {
341e689cf4aSJeff Kirsher int i;
342e689cf4aSJeff Kirsher
343e689cf4aSJeff Kirsher for (i = 0; i < N_RX_COMP_RINGS; i++)
344e689cf4aSJeff Kirsher cas_enable_irq(cp, i);
345e689cf4aSJeff Kirsher }
346e689cf4aSJeff Kirsher
cas_entropy_gather(struct cas * cp)347e689cf4aSJeff Kirsher static inline void cas_entropy_gather(struct cas *cp)
348e689cf4aSJeff Kirsher {
349e689cf4aSJeff Kirsher #ifdef USE_ENTROPY_DEV
350e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
351e689cf4aSJeff Kirsher return;
352e689cf4aSJeff Kirsher
353e689cf4aSJeff Kirsher batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
354e689cf4aSJeff Kirsher readl(cp->regs + REG_ENTROPY_IV),
355e689cf4aSJeff Kirsher sizeof(uint64_t)*8);
356e689cf4aSJeff Kirsher #endif
357e689cf4aSJeff Kirsher }
358e689cf4aSJeff Kirsher
cas_entropy_reset(struct cas * cp)359e689cf4aSJeff Kirsher static inline void cas_entropy_reset(struct cas *cp)
360e689cf4aSJeff Kirsher {
361e689cf4aSJeff Kirsher #ifdef USE_ENTROPY_DEV
362e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
363e689cf4aSJeff Kirsher return;
364e689cf4aSJeff Kirsher
365e689cf4aSJeff Kirsher writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
366e689cf4aSJeff Kirsher cp->regs + REG_BIM_LOCAL_DEV_EN);
367e689cf4aSJeff Kirsher writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
368e689cf4aSJeff Kirsher writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
369e689cf4aSJeff Kirsher
370e689cf4aSJeff Kirsher /* if we read back 0x0, we don't have an entropy device */
371e689cf4aSJeff Kirsher if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
372e689cf4aSJeff Kirsher cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
373e689cf4aSJeff Kirsher #endif
374e689cf4aSJeff Kirsher }
375e689cf4aSJeff Kirsher
376e689cf4aSJeff Kirsher /* access to the phy. the following assumes that we've initialized the MIF to
377e689cf4aSJeff Kirsher * be in frame rather than bit-bang mode
378e689cf4aSJeff Kirsher */
cas_phy_read(struct cas * cp,int reg)379e689cf4aSJeff Kirsher static u16 cas_phy_read(struct cas *cp, int reg)
380e689cf4aSJeff Kirsher {
381e689cf4aSJeff Kirsher u32 cmd;
382e689cf4aSJeff Kirsher int limit = STOP_TRIES_PHY;
383e689cf4aSJeff Kirsher
384e689cf4aSJeff Kirsher cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
385e689cf4aSJeff Kirsher cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
386e689cf4aSJeff Kirsher cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
387e689cf4aSJeff Kirsher cmd |= MIF_FRAME_TURN_AROUND_MSB;
388e689cf4aSJeff Kirsher writel(cmd, cp->regs + REG_MIF_FRAME);
389e689cf4aSJeff Kirsher
390e689cf4aSJeff Kirsher /* poll for completion */
391e689cf4aSJeff Kirsher while (limit-- > 0) {
392e689cf4aSJeff Kirsher udelay(10);
393e689cf4aSJeff Kirsher cmd = readl(cp->regs + REG_MIF_FRAME);
394e689cf4aSJeff Kirsher if (cmd & MIF_FRAME_TURN_AROUND_LSB)
395e689cf4aSJeff Kirsher return cmd & MIF_FRAME_DATA_MASK;
396e689cf4aSJeff Kirsher }
397e689cf4aSJeff Kirsher return 0xFFFF; /* -1 */
398e689cf4aSJeff Kirsher }
399e689cf4aSJeff Kirsher
cas_phy_write(struct cas * cp,int reg,u16 val)400e689cf4aSJeff Kirsher static int cas_phy_write(struct cas *cp, int reg, u16 val)
401e689cf4aSJeff Kirsher {
402e689cf4aSJeff Kirsher int limit = STOP_TRIES_PHY;
403e689cf4aSJeff Kirsher u32 cmd;
404e689cf4aSJeff Kirsher
405e689cf4aSJeff Kirsher cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
406e689cf4aSJeff Kirsher cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
407e689cf4aSJeff Kirsher cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
408e689cf4aSJeff Kirsher cmd |= MIF_FRAME_TURN_AROUND_MSB;
409e689cf4aSJeff Kirsher cmd |= val & MIF_FRAME_DATA_MASK;
410e689cf4aSJeff Kirsher writel(cmd, cp->regs + REG_MIF_FRAME);
411e689cf4aSJeff Kirsher
412e689cf4aSJeff Kirsher /* poll for completion */
413e689cf4aSJeff Kirsher while (limit-- > 0) {
414e689cf4aSJeff Kirsher udelay(10);
415e689cf4aSJeff Kirsher cmd = readl(cp->regs + REG_MIF_FRAME);
416e689cf4aSJeff Kirsher if (cmd & MIF_FRAME_TURN_AROUND_LSB)
417e689cf4aSJeff Kirsher return 0;
418e689cf4aSJeff Kirsher }
419e689cf4aSJeff Kirsher return -1;
420e689cf4aSJeff Kirsher }
421e689cf4aSJeff Kirsher
cas_phy_powerup(struct cas * cp)422e689cf4aSJeff Kirsher static void cas_phy_powerup(struct cas *cp)
423e689cf4aSJeff Kirsher {
424e689cf4aSJeff Kirsher u16 ctl = cas_phy_read(cp, MII_BMCR);
425e689cf4aSJeff Kirsher
426e689cf4aSJeff Kirsher if ((ctl & BMCR_PDOWN) == 0)
427e689cf4aSJeff Kirsher return;
428e689cf4aSJeff Kirsher ctl &= ~BMCR_PDOWN;
429e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, ctl);
430e689cf4aSJeff Kirsher }
431e689cf4aSJeff Kirsher
cas_phy_powerdown(struct cas * cp)432e689cf4aSJeff Kirsher static void cas_phy_powerdown(struct cas *cp)
433e689cf4aSJeff Kirsher {
434e689cf4aSJeff Kirsher u16 ctl = cas_phy_read(cp, MII_BMCR);
435e689cf4aSJeff Kirsher
436e689cf4aSJeff Kirsher if (ctl & BMCR_PDOWN)
437e689cf4aSJeff Kirsher return;
438e689cf4aSJeff Kirsher ctl |= BMCR_PDOWN;
439e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, ctl);
440e689cf4aSJeff Kirsher }
441e689cf4aSJeff Kirsher
442e689cf4aSJeff Kirsher /* cp->lock held. note: the last put_page will free the buffer */
cas_page_free(struct cas * cp,cas_page_t * page)443e689cf4aSJeff Kirsher static int cas_page_free(struct cas *cp, cas_page_t *page)
444e689cf4aSJeff Kirsher {
445dcc82bb0SChristophe JAILLET dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
446dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
447e689cf4aSJeff Kirsher __free_pages(page->buffer, cp->page_order);
448e689cf4aSJeff Kirsher kfree(page);
449e689cf4aSJeff Kirsher return 0;
450e689cf4aSJeff Kirsher }
451e689cf4aSJeff Kirsher
452e689cf4aSJeff Kirsher #ifdef RX_COUNT_BUFFERS
453e689cf4aSJeff Kirsher #define RX_USED_ADD(x, y) ((x)->used += (y))
454e689cf4aSJeff Kirsher #define RX_USED_SET(x, y) ((x)->used = (y))
455e689cf4aSJeff Kirsher #else
456d0ea5cbdSJesse Brandeburg #define RX_USED_ADD(x, y) do { } while(0)
457d0ea5cbdSJesse Brandeburg #define RX_USED_SET(x, y) do { } while(0)
458e689cf4aSJeff Kirsher #endif
459e689cf4aSJeff Kirsher
460e689cf4aSJeff Kirsher /* local page allocation routines for the receive buffers. jumbo pages
461e689cf4aSJeff Kirsher * require at least 8K contiguous and 8K aligned buffers.
462e689cf4aSJeff Kirsher */
cas_page_alloc(struct cas * cp,const gfp_t flags)463e689cf4aSJeff Kirsher static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
464e689cf4aSJeff Kirsher {
465e689cf4aSJeff Kirsher cas_page_t *page;
466e689cf4aSJeff Kirsher
467e689cf4aSJeff Kirsher page = kmalloc(sizeof(cas_page_t), flags);
468e689cf4aSJeff Kirsher if (!page)
469e689cf4aSJeff Kirsher return NULL;
470e689cf4aSJeff Kirsher
471e689cf4aSJeff Kirsher INIT_LIST_HEAD(&page->list);
472e689cf4aSJeff Kirsher RX_USED_SET(page, 0);
473e689cf4aSJeff Kirsher page->buffer = alloc_pages(flags, cp->page_order);
474e689cf4aSJeff Kirsher if (!page->buffer)
475e689cf4aSJeff Kirsher goto page_err;
476dcc82bb0SChristophe JAILLET page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
477dcc82bb0SChristophe JAILLET cp->page_size, DMA_FROM_DEVICE);
478e689cf4aSJeff Kirsher return page;
479e689cf4aSJeff Kirsher
480e689cf4aSJeff Kirsher page_err:
481e689cf4aSJeff Kirsher kfree(page);
482e689cf4aSJeff Kirsher return NULL;
483e689cf4aSJeff Kirsher }
484e689cf4aSJeff Kirsher
485e689cf4aSJeff Kirsher /* initialize spare pool of rx buffers, but allocate during the open */
cas_spare_init(struct cas * cp)486e689cf4aSJeff Kirsher static void cas_spare_init(struct cas *cp)
487e689cf4aSJeff Kirsher {
488e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
489e689cf4aSJeff Kirsher INIT_LIST_HEAD(&cp->rx_inuse_list);
490e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
491e689cf4aSJeff Kirsher
492e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
493e689cf4aSJeff Kirsher INIT_LIST_HEAD(&cp->rx_spare_list);
494e689cf4aSJeff Kirsher cp->rx_spares_needed = RX_SPARE_COUNT;
495e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
496e689cf4aSJeff Kirsher }
497e689cf4aSJeff Kirsher
498e689cf4aSJeff Kirsher /* used on close. free all the spare buffers. */
cas_spare_free(struct cas * cp)499e689cf4aSJeff Kirsher static void cas_spare_free(struct cas *cp)
500e689cf4aSJeff Kirsher {
501e689cf4aSJeff Kirsher struct list_head list, *elem, *tmp;
502e689cf4aSJeff Kirsher
503e689cf4aSJeff Kirsher /* free spare buffers */
504e689cf4aSJeff Kirsher INIT_LIST_HEAD(&list);
505e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
506e689cf4aSJeff Kirsher list_splice_init(&cp->rx_spare_list, &list);
507e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
508e689cf4aSJeff Kirsher list_for_each_safe(elem, tmp, &list) {
509e689cf4aSJeff Kirsher cas_page_free(cp, list_entry(elem, cas_page_t, list));
510e689cf4aSJeff Kirsher }
511e689cf4aSJeff Kirsher
512e689cf4aSJeff Kirsher INIT_LIST_HEAD(&list);
513e689cf4aSJeff Kirsher #if 1
514e689cf4aSJeff Kirsher /*
515e689cf4aSJeff Kirsher * Looks like Adrian had protected this with a different
516e689cf4aSJeff Kirsher * lock than used everywhere else to manipulate this list.
517e689cf4aSJeff Kirsher */
518e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
519e689cf4aSJeff Kirsher list_splice_init(&cp->rx_inuse_list, &list);
520e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
521e689cf4aSJeff Kirsher #else
522e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
523e689cf4aSJeff Kirsher list_splice_init(&cp->rx_inuse_list, &list);
524e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
525e689cf4aSJeff Kirsher #endif
526e689cf4aSJeff Kirsher list_for_each_safe(elem, tmp, &list) {
527e689cf4aSJeff Kirsher cas_page_free(cp, list_entry(elem, cas_page_t, list));
528e689cf4aSJeff Kirsher }
529e689cf4aSJeff Kirsher }
530e689cf4aSJeff Kirsher
531e689cf4aSJeff Kirsher /* replenish spares if needed */
cas_spare_recover(struct cas * cp,const gfp_t flags)532e689cf4aSJeff Kirsher static void cas_spare_recover(struct cas *cp, const gfp_t flags)
533e689cf4aSJeff Kirsher {
534e689cf4aSJeff Kirsher struct list_head list, *elem, *tmp;
535e689cf4aSJeff Kirsher int needed, i;
536e689cf4aSJeff Kirsher
537e689cf4aSJeff Kirsher /* check inuse list. if we don't need any more free buffers,
538e689cf4aSJeff Kirsher * just free it
539e689cf4aSJeff Kirsher */
540e689cf4aSJeff Kirsher
541e689cf4aSJeff Kirsher /* make a local copy of the list */
542e689cf4aSJeff Kirsher INIT_LIST_HEAD(&list);
543e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
544e689cf4aSJeff Kirsher list_splice_init(&cp->rx_inuse_list, &list);
545e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
546e689cf4aSJeff Kirsher
547e689cf4aSJeff Kirsher list_for_each_safe(elem, tmp, &list) {
548e689cf4aSJeff Kirsher cas_page_t *page = list_entry(elem, cas_page_t, list);
549e689cf4aSJeff Kirsher
550e689cf4aSJeff Kirsher /*
551e689cf4aSJeff Kirsher * With the lockless pagecache, cassini buffering scheme gets
552e689cf4aSJeff Kirsher * slightly less accurate: we might find that a page has an
553e689cf4aSJeff Kirsher * elevated reference count here, due to a speculative ref,
554e689cf4aSJeff Kirsher * and skip it as in-use. Ideally we would be able to reclaim
555e689cf4aSJeff Kirsher * it. However this would be such a rare case, it doesn't
556e689cf4aSJeff Kirsher * matter too much as we should pick it up the next time round.
557e689cf4aSJeff Kirsher *
558e689cf4aSJeff Kirsher * Importantly, if we find that the page has a refcount of 1
559e689cf4aSJeff Kirsher * here (our refcount), then we know it is definitely not inuse
560e689cf4aSJeff Kirsher * so we can reuse it.
561e689cf4aSJeff Kirsher */
562e689cf4aSJeff Kirsher if (page_count(page->buffer) > 1)
563e689cf4aSJeff Kirsher continue;
564e689cf4aSJeff Kirsher
565e689cf4aSJeff Kirsher list_del(elem);
566e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
567e689cf4aSJeff Kirsher if (cp->rx_spares_needed > 0) {
568e689cf4aSJeff Kirsher list_add(elem, &cp->rx_spare_list);
569e689cf4aSJeff Kirsher cp->rx_spares_needed--;
570e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
571e689cf4aSJeff Kirsher } else {
572e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
573e689cf4aSJeff Kirsher cas_page_free(cp, page);
574e689cf4aSJeff Kirsher }
575e689cf4aSJeff Kirsher }
576e689cf4aSJeff Kirsher
577e689cf4aSJeff Kirsher /* put any inuse buffers back on the list */
578e689cf4aSJeff Kirsher if (!list_empty(&list)) {
579e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
580e689cf4aSJeff Kirsher list_splice(&list, &cp->rx_inuse_list);
581e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
582e689cf4aSJeff Kirsher }
583e689cf4aSJeff Kirsher
584e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
585e689cf4aSJeff Kirsher needed = cp->rx_spares_needed;
586e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
587e689cf4aSJeff Kirsher if (!needed)
588e689cf4aSJeff Kirsher return;
589e689cf4aSJeff Kirsher
590e689cf4aSJeff Kirsher /* we still need spares, so try to allocate some */
591e689cf4aSJeff Kirsher INIT_LIST_HEAD(&list);
592e689cf4aSJeff Kirsher i = 0;
593e689cf4aSJeff Kirsher while (i < needed) {
594e689cf4aSJeff Kirsher cas_page_t *spare = cas_page_alloc(cp, flags);
595e689cf4aSJeff Kirsher if (!spare)
596e689cf4aSJeff Kirsher break;
597e689cf4aSJeff Kirsher list_add(&spare->list, &list);
598e689cf4aSJeff Kirsher i++;
599e689cf4aSJeff Kirsher }
600e689cf4aSJeff Kirsher
601e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
602e689cf4aSJeff Kirsher list_splice(&list, &cp->rx_spare_list);
603e689cf4aSJeff Kirsher cp->rx_spares_needed -= i;
604e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
605e689cf4aSJeff Kirsher }
606e689cf4aSJeff Kirsher
607e689cf4aSJeff Kirsher /* pull a page from the list. */
cas_page_dequeue(struct cas * cp)608e689cf4aSJeff Kirsher static cas_page_t *cas_page_dequeue(struct cas *cp)
609e689cf4aSJeff Kirsher {
610e689cf4aSJeff Kirsher struct list_head *entry;
611e689cf4aSJeff Kirsher int recover;
612e689cf4aSJeff Kirsher
613e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
614e689cf4aSJeff Kirsher if (list_empty(&cp->rx_spare_list)) {
615e689cf4aSJeff Kirsher /* try to do a quick recovery */
616e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
617e689cf4aSJeff Kirsher cas_spare_recover(cp, GFP_ATOMIC);
618e689cf4aSJeff Kirsher spin_lock(&cp->rx_spare_lock);
619e689cf4aSJeff Kirsher if (list_empty(&cp->rx_spare_list)) {
620e689cf4aSJeff Kirsher netif_err(cp, rx_err, cp->dev,
621e689cf4aSJeff Kirsher "no spare buffers available\n");
622e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
623e689cf4aSJeff Kirsher return NULL;
624e689cf4aSJeff Kirsher }
625e689cf4aSJeff Kirsher }
626e689cf4aSJeff Kirsher
627e689cf4aSJeff Kirsher entry = cp->rx_spare_list.next;
628e689cf4aSJeff Kirsher list_del(entry);
629e689cf4aSJeff Kirsher recover = ++cp->rx_spares_needed;
630e689cf4aSJeff Kirsher spin_unlock(&cp->rx_spare_lock);
631e689cf4aSJeff Kirsher
632e689cf4aSJeff Kirsher /* trigger the timer to do the recovery */
633e689cf4aSJeff Kirsher if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
634e689cf4aSJeff Kirsher #if 1
635e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
636e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_spare);
637e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
638e689cf4aSJeff Kirsher #else
639e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
640e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
641e689cf4aSJeff Kirsher #endif
642e689cf4aSJeff Kirsher }
643e689cf4aSJeff Kirsher return list_entry(entry, cas_page_t, list);
644e689cf4aSJeff Kirsher }
645e689cf4aSJeff Kirsher
646e689cf4aSJeff Kirsher
cas_mif_poll(struct cas * cp,const int enable)647e689cf4aSJeff Kirsher static void cas_mif_poll(struct cas *cp, const int enable)
648e689cf4aSJeff Kirsher {
649e689cf4aSJeff Kirsher u32 cfg;
650e689cf4aSJeff Kirsher
651e689cf4aSJeff Kirsher cfg = readl(cp->regs + REG_MIF_CFG);
652e689cf4aSJeff Kirsher cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
653e689cf4aSJeff Kirsher
654e689cf4aSJeff Kirsher if (cp->phy_type & CAS_PHY_MII_MDIO1)
655e689cf4aSJeff Kirsher cfg |= MIF_CFG_PHY_SELECT;
656e689cf4aSJeff Kirsher
657e689cf4aSJeff Kirsher /* poll and interrupt on link status change. */
658e689cf4aSJeff Kirsher if (enable) {
659e689cf4aSJeff Kirsher cfg |= MIF_CFG_POLL_EN;
660e689cf4aSJeff Kirsher cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
661e689cf4aSJeff Kirsher cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
662e689cf4aSJeff Kirsher }
663e689cf4aSJeff Kirsher writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
664e689cf4aSJeff Kirsher cp->regs + REG_MIF_MASK);
665e689cf4aSJeff Kirsher writel(cfg, cp->regs + REG_MIF_CFG);
666e689cf4aSJeff Kirsher }
667e689cf4aSJeff Kirsher
668e689cf4aSJeff Kirsher /* Must be invoked under cp->lock */
cas_begin_auto_negotiation(struct cas * cp,const struct ethtool_link_ksettings * ep)6692c784b00SPhilippe Reynes static void cas_begin_auto_negotiation(struct cas *cp,
6702c784b00SPhilippe Reynes const struct ethtool_link_ksettings *ep)
671e689cf4aSJeff Kirsher {
672e689cf4aSJeff Kirsher u16 ctl;
673e689cf4aSJeff Kirsher #if 1
674e689cf4aSJeff Kirsher int lcntl;
675e689cf4aSJeff Kirsher int changed = 0;
676e689cf4aSJeff Kirsher int oldstate = cp->lstate;
677e689cf4aSJeff Kirsher int link_was_not_down = !(oldstate == link_down);
678e689cf4aSJeff Kirsher #endif
679e689cf4aSJeff Kirsher /* Setup link parameters */
680e689cf4aSJeff Kirsher if (!ep)
681e689cf4aSJeff Kirsher goto start_aneg;
682e689cf4aSJeff Kirsher lcntl = cp->link_cntl;
6832c784b00SPhilippe Reynes if (ep->base.autoneg == AUTONEG_ENABLE) {
684e689cf4aSJeff Kirsher cp->link_cntl = BMCR_ANENABLE;
6852c784b00SPhilippe Reynes } else {
6862c784b00SPhilippe Reynes u32 speed = ep->base.speed;
687e689cf4aSJeff Kirsher cp->link_cntl = 0;
688e689cf4aSJeff Kirsher if (speed == SPEED_100)
689e689cf4aSJeff Kirsher cp->link_cntl |= BMCR_SPEED100;
690e689cf4aSJeff Kirsher else if (speed == SPEED_1000)
691e689cf4aSJeff Kirsher cp->link_cntl |= CAS_BMCR_SPEED1000;
6922c784b00SPhilippe Reynes if (ep->base.duplex == DUPLEX_FULL)
693e689cf4aSJeff Kirsher cp->link_cntl |= BMCR_FULLDPLX;
694e689cf4aSJeff Kirsher }
695e689cf4aSJeff Kirsher #if 1
696e689cf4aSJeff Kirsher changed = (lcntl != cp->link_cntl);
697e689cf4aSJeff Kirsher #endif
698e689cf4aSJeff Kirsher start_aneg:
699e689cf4aSJeff Kirsher if (cp->lstate == link_up) {
700e689cf4aSJeff Kirsher netdev_info(cp->dev, "PCS link down\n");
701e689cf4aSJeff Kirsher } else {
702e689cf4aSJeff Kirsher if (changed) {
703e689cf4aSJeff Kirsher netdev_info(cp->dev, "link configuration changed\n");
704e689cf4aSJeff Kirsher }
705e689cf4aSJeff Kirsher }
706e689cf4aSJeff Kirsher cp->lstate = link_down;
707e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_DOWN;
708e689cf4aSJeff Kirsher if (!cp->hw_running)
709e689cf4aSJeff Kirsher return;
710e689cf4aSJeff Kirsher #if 1
711e689cf4aSJeff Kirsher /*
712e689cf4aSJeff Kirsher * WTZ: If the old state was link_up, we turn off the carrier
713e689cf4aSJeff Kirsher * to replicate everything we do elsewhere on a link-down
714e689cf4aSJeff Kirsher * event when we were already in a link-up state..
715e689cf4aSJeff Kirsher */
716e689cf4aSJeff Kirsher if (oldstate == link_up)
717e689cf4aSJeff Kirsher netif_carrier_off(cp->dev);
718e689cf4aSJeff Kirsher if (changed && link_was_not_down) {
719e689cf4aSJeff Kirsher /*
720e689cf4aSJeff Kirsher * WTZ: This branch will simply schedule a full reset after
721e689cf4aSJeff Kirsher * we explicitly changed link modes in an ioctl. See if this
722e689cf4aSJeff Kirsher * fixes the link-problems we were having for forced mode.
723e689cf4aSJeff Kirsher */
724e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
725e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_all);
726e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
727e689cf4aSJeff Kirsher cp->timer_ticks = 0;
728e689cf4aSJeff Kirsher mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
729e689cf4aSJeff Kirsher return;
730e689cf4aSJeff Kirsher }
731e689cf4aSJeff Kirsher #endif
732e689cf4aSJeff Kirsher if (cp->phy_type & CAS_PHY_SERDES) {
733e689cf4aSJeff Kirsher u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
734e689cf4aSJeff Kirsher
735e689cf4aSJeff Kirsher if (cp->link_cntl & BMCR_ANENABLE) {
736e689cf4aSJeff Kirsher val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
737e689cf4aSJeff Kirsher cp->lstate = link_aneg;
738e689cf4aSJeff Kirsher } else {
739e689cf4aSJeff Kirsher if (cp->link_cntl & BMCR_FULLDPLX)
740e689cf4aSJeff Kirsher val |= PCS_MII_CTRL_DUPLEX;
741e689cf4aSJeff Kirsher val &= ~PCS_MII_AUTONEG_EN;
742e689cf4aSJeff Kirsher cp->lstate = link_force_ok;
743e689cf4aSJeff Kirsher }
744e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
745e689cf4aSJeff Kirsher writel(val, cp->regs + REG_PCS_MII_CTRL);
746e689cf4aSJeff Kirsher
747e689cf4aSJeff Kirsher } else {
748e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
749e689cf4aSJeff Kirsher ctl = cas_phy_read(cp, MII_BMCR);
750e689cf4aSJeff Kirsher ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
751e689cf4aSJeff Kirsher CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
752e689cf4aSJeff Kirsher ctl |= cp->link_cntl;
753e689cf4aSJeff Kirsher if (ctl & BMCR_ANENABLE) {
754e689cf4aSJeff Kirsher ctl |= BMCR_ANRESTART;
755e689cf4aSJeff Kirsher cp->lstate = link_aneg;
756e689cf4aSJeff Kirsher } else {
757e689cf4aSJeff Kirsher cp->lstate = link_force_ok;
758e689cf4aSJeff Kirsher }
759e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
760e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, ctl);
761e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
762e689cf4aSJeff Kirsher }
763e689cf4aSJeff Kirsher
764e689cf4aSJeff Kirsher cp->timer_ticks = 0;
765e689cf4aSJeff Kirsher mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
766e689cf4aSJeff Kirsher }
767e689cf4aSJeff Kirsher
768e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_reset_mii_phy(struct cas * cp)769e689cf4aSJeff Kirsher static int cas_reset_mii_phy(struct cas *cp)
770e689cf4aSJeff Kirsher {
771e689cf4aSJeff Kirsher int limit = STOP_TRIES_PHY;
772e689cf4aSJeff Kirsher u16 val;
773e689cf4aSJeff Kirsher
774e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, BMCR_RESET);
775e689cf4aSJeff Kirsher udelay(100);
776e689cf4aSJeff Kirsher while (--limit) {
777e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_BMCR);
778e689cf4aSJeff Kirsher if ((val & BMCR_RESET) == 0)
779e689cf4aSJeff Kirsher break;
780e689cf4aSJeff Kirsher udelay(10);
781e689cf4aSJeff Kirsher }
782e689cf4aSJeff Kirsher return limit <= 0;
783e689cf4aSJeff Kirsher }
784e689cf4aSJeff Kirsher
cas_saturn_firmware_init(struct cas * cp)78515627e84SBen Hutchings static void cas_saturn_firmware_init(struct cas *cp)
786e689cf4aSJeff Kirsher {
787e689cf4aSJeff Kirsher const struct firmware *fw;
788e689cf4aSJeff Kirsher const char fw_name[] = "sun/cassini.bin";
789e689cf4aSJeff Kirsher int err;
790e689cf4aSJeff Kirsher
791e689cf4aSJeff Kirsher if (PHY_NS_DP83065 != cp->phy_id)
79215627e84SBen Hutchings return;
793e689cf4aSJeff Kirsher
794e689cf4aSJeff Kirsher err = request_firmware(&fw, fw_name, &cp->pdev->dev);
795e689cf4aSJeff Kirsher if (err) {
796e689cf4aSJeff Kirsher pr_err("Failed to load firmware \"%s\"\n",
797e689cf4aSJeff Kirsher fw_name);
79815627e84SBen Hutchings return;
799e689cf4aSJeff Kirsher }
800e689cf4aSJeff Kirsher if (fw->size < 2) {
801e689cf4aSJeff Kirsher pr_err("bogus length %zu in \"%s\"\n",
802e689cf4aSJeff Kirsher fw->size, fw_name);
803e689cf4aSJeff Kirsher goto out;
804e689cf4aSJeff Kirsher }
805e689cf4aSJeff Kirsher cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
806e689cf4aSJeff Kirsher cp->fw_size = fw->size - 2;
807e689cf4aSJeff Kirsher cp->fw_data = vmalloc(cp->fw_size);
80815627e84SBen Hutchings if (!cp->fw_data)
809e689cf4aSJeff Kirsher goto out;
810e689cf4aSJeff Kirsher memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
811e689cf4aSJeff Kirsher out:
812e689cf4aSJeff Kirsher release_firmware(fw);
813e689cf4aSJeff Kirsher }
814e689cf4aSJeff Kirsher
cas_saturn_firmware_load(struct cas * cp)815e689cf4aSJeff Kirsher static void cas_saturn_firmware_load(struct cas *cp)
816e689cf4aSJeff Kirsher {
817e689cf4aSJeff Kirsher int i;
818e689cf4aSJeff Kirsher
81915627e84SBen Hutchings if (!cp->fw_data)
82015627e84SBen Hutchings return;
82115627e84SBen Hutchings
822e689cf4aSJeff Kirsher cas_phy_powerdown(cp);
823e689cf4aSJeff Kirsher
824e689cf4aSJeff Kirsher /* expanded memory access mode */
825e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_MEM, 0x0);
826e689cf4aSJeff Kirsher
827e689cf4aSJeff Kirsher /* pointer configuration for new firmware */
828e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
829e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
830e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
831e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, 0x82);
832e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
833e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, 0x0);
834e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
835e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, 0x39);
836e689cf4aSJeff Kirsher
837e689cf4aSJeff Kirsher /* download new firmware */
838e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_MEM, 0x1);
839e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
840e689cf4aSJeff Kirsher for (i = 0; i < cp->fw_size; i++)
841e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
842e689cf4aSJeff Kirsher
843e689cf4aSJeff Kirsher /* enable firmware */
844e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
845e689cf4aSJeff Kirsher cas_phy_write(cp, DP83065_MII_REGD, 0x1);
846e689cf4aSJeff Kirsher }
847e689cf4aSJeff Kirsher
848e689cf4aSJeff Kirsher
849e689cf4aSJeff Kirsher /* phy initialization */
cas_phy_init(struct cas * cp)850e689cf4aSJeff Kirsher static void cas_phy_init(struct cas *cp)
851e689cf4aSJeff Kirsher {
852e689cf4aSJeff Kirsher u16 val;
853e689cf4aSJeff Kirsher
854e689cf4aSJeff Kirsher /* if we're in MII/GMII mode, set up phy */
855e689cf4aSJeff Kirsher if (CAS_PHY_MII(cp->phy_type)) {
856e689cf4aSJeff Kirsher writel(PCS_DATAPATH_MODE_MII,
857e689cf4aSJeff Kirsher cp->regs + REG_PCS_DATAPATH_MODE);
858e689cf4aSJeff Kirsher
859e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
860e689cf4aSJeff Kirsher cas_reset_mii_phy(cp); /* take out of isolate mode */
861e689cf4aSJeff Kirsher
862e689cf4aSJeff Kirsher if (PHY_LUCENT_B0 == cp->phy_id) {
863e689cf4aSJeff Kirsher /* workaround link up/down issue with lucent */
864e689cf4aSJeff Kirsher cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
865e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, 0x00f1);
866e689cf4aSJeff Kirsher cas_phy_write(cp, LUCENT_MII_REG, 0x0);
867e689cf4aSJeff Kirsher
868e689cf4aSJeff Kirsher } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
869e689cf4aSJeff Kirsher /* workarounds for broadcom phy */
870e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
871e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
872e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
873e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
874e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
875e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
876e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
877e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
878e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
879e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
880e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
881e689cf4aSJeff Kirsher
882e689cf4aSJeff Kirsher } else if (PHY_BROADCOM_5411 == cp->phy_id) {
883e689cf4aSJeff Kirsher val = cas_phy_read(cp, BROADCOM_MII_REG4);
884e689cf4aSJeff Kirsher val = cas_phy_read(cp, BROADCOM_MII_REG4);
885e689cf4aSJeff Kirsher if (val & 0x0080) {
886e689cf4aSJeff Kirsher /* link workaround */
887e689cf4aSJeff Kirsher cas_phy_write(cp, BROADCOM_MII_REG4,
888e689cf4aSJeff Kirsher val & ~0x0080);
889e689cf4aSJeff Kirsher }
890e689cf4aSJeff Kirsher
891e689cf4aSJeff Kirsher } else if (cp->cas_flags & CAS_FLAG_SATURN) {
892e689cf4aSJeff Kirsher writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
893e689cf4aSJeff Kirsher SATURN_PCFG_FSI : 0x0,
894e689cf4aSJeff Kirsher cp->regs + REG_SATURN_PCFG);
895e689cf4aSJeff Kirsher
896e689cf4aSJeff Kirsher /* load firmware to address 10Mbps auto-negotiation
897e689cf4aSJeff Kirsher * issue. NOTE: this will need to be changed if the
898e689cf4aSJeff Kirsher * default firmware gets fixed.
899e689cf4aSJeff Kirsher */
900e689cf4aSJeff Kirsher if (PHY_NS_DP83065 == cp->phy_id) {
901e689cf4aSJeff Kirsher cas_saturn_firmware_load(cp);
902e689cf4aSJeff Kirsher }
903e689cf4aSJeff Kirsher cas_phy_powerup(cp);
904e689cf4aSJeff Kirsher }
905e689cf4aSJeff Kirsher
906e689cf4aSJeff Kirsher /* advertise capabilities */
907e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_BMCR);
908e689cf4aSJeff Kirsher val &= ~BMCR_ANENABLE;
909e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, val);
910e689cf4aSJeff Kirsher udelay(10);
911e689cf4aSJeff Kirsher
912e689cf4aSJeff Kirsher cas_phy_write(cp, MII_ADVERTISE,
913e689cf4aSJeff Kirsher cas_phy_read(cp, MII_ADVERTISE) |
914e689cf4aSJeff Kirsher (ADVERTISE_10HALF | ADVERTISE_10FULL |
915e689cf4aSJeff Kirsher ADVERTISE_100HALF | ADVERTISE_100FULL |
916e689cf4aSJeff Kirsher CAS_ADVERTISE_PAUSE |
917e689cf4aSJeff Kirsher CAS_ADVERTISE_ASYM_PAUSE));
918e689cf4aSJeff Kirsher
919e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
920e689cf4aSJeff Kirsher /* make sure that we don't advertise half
921e689cf4aSJeff Kirsher * duplex to avoid a chip issue
922e689cf4aSJeff Kirsher */
923e689cf4aSJeff Kirsher val = cas_phy_read(cp, CAS_MII_1000_CTRL);
924e689cf4aSJeff Kirsher val &= ~CAS_ADVERTISE_1000HALF;
925e689cf4aSJeff Kirsher val |= CAS_ADVERTISE_1000FULL;
926e689cf4aSJeff Kirsher cas_phy_write(cp, CAS_MII_1000_CTRL, val);
927e689cf4aSJeff Kirsher }
928e689cf4aSJeff Kirsher
929e689cf4aSJeff Kirsher } else {
930e689cf4aSJeff Kirsher /* reset pcs for serdes */
931e689cf4aSJeff Kirsher u32 val;
932e689cf4aSJeff Kirsher int limit;
933e689cf4aSJeff Kirsher
934e689cf4aSJeff Kirsher writel(PCS_DATAPATH_MODE_SERDES,
935e689cf4aSJeff Kirsher cp->regs + REG_PCS_DATAPATH_MODE);
936e689cf4aSJeff Kirsher
937e689cf4aSJeff Kirsher /* enable serdes pins on saturn */
938e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_SATURN)
939e689cf4aSJeff Kirsher writel(0, cp->regs + REG_SATURN_PCFG);
940e689cf4aSJeff Kirsher
941e689cf4aSJeff Kirsher /* Reset PCS unit. */
942e689cf4aSJeff Kirsher val = readl(cp->regs + REG_PCS_MII_CTRL);
943e689cf4aSJeff Kirsher val |= PCS_MII_RESET;
944e689cf4aSJeff Kirsher writel(val, cp->regs + REG_PCS_MII_CTRL);
945e689cf4aSJeff Kirsher
946e689cf4aSJeff Kirsher limit = STOP_TRIES;
947e689cf4aSJeff Kirsher while (--limit > 0) {
948e689cf4aSJeff Kirsher udelay(10);
949e689cf4aSJeff Kirsher if ((readl(cp->regs + REG_PCS_MII_CTRL) &
950e689cf4aSJeff Kirsher PCS_MII_RESET) == 0)
951e689cf4aSJeff Kirsher break;
952e689cf4aSJeff Kirsher }
953e689cf4aSJeff Kirsher if (limit <= 0)
954e689cf4aSJeff Kirsher netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
955e689cf4aSJeff Kirsher readl(cp->regs + REG_PCS_STATE_MACHINE));
956e689cf4aSJeff Kirsher
957e689cf4aSJeff Kirsher /* Make sure PCS is disabled while changing advertisement
958e689cf4aSJeff Kirsher * configuration.
959e689cf4aSJeff Kirsher */
960e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_PCS_CFG);
961e689cf4aSJeff Kirsher
962e689cf4aSJeff Kirsher /* Advertise all capabilities except half-duplex. */
963e689cf4aSJeff Kirsher val = readl(cp->regs + REG_PCS_MII_ADVERT);
964e689cf4aSJeff Kirsher val &= ~PCS_MII_ADVERT_HD;
965e689cf4aSJeff Kirsher val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
966e689cf4aSJeff Kirsher PCS_MII_ADVERT_ASYM_PAUSE);
967e689cf4aSJeff Kirsher writel(val, cp->regs + REG_PCS_MII_ADVERT);
968e689cf4aSJeff Kirsher
969e689cf4aSJeff Kirsher /* enable PCS */
970e689cf4aSJeff Kirsher writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
971e689cf4aSJeff Kirsher
972e689cf4aSJeff Kirsher /* pcs workaround: enable sync detect */
973e689cf4aSJeff Kirsher writel(PCS_SERDES_CTRL_SYNCD_EN,
974e689cf4aSJeff Kirsher cp->regs + REG_PCS_SERDES_CTRL);
975e689cf4aSJeff Kirsher }
976e689cf4aSJeff Kirsher }
977e689cf4aSJeff Kirsher
978e689cf4aSJeff Kirsher
cas_pcs_link_check(struct cas * cp)979e689cf4aSJeff Kirsher static int cas_pcs_link_check(struct cas *cp)
980e689cf4aSJeff Kirsher {
981e689cf4aSJeff Kirsher u32 stat, state_machine;
982e689cf4aSJeff Kirsher int retval = 0;
983e689cf4aSJeff Kirsher
984e689cf4aSJeff Kirsher /* The link status bit latches on zero, so you must
985e689cf4aSJeff Kirsher * read it twice in such a case to see a transition
986e689cf4aSJeff Kirsher * to the link being up.
987e689cf4aSJeff Kirsher */
988e689cf4aSJeff Kirsher stat = readl(cp->regs + REG_PCS_MII_STATUS);
989e689cf4aSJeff Kirsher if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
990e689cf4aSJeff Kirsher stat = readl(cp->regs + REG_PCS_MII_STATUS);
991e689cf4aSJeff Kirsher
992e689cf4aSJeff Kirsher /* The remote-fault indication is only valid
993e689cf4aSJeff Kirsher * when autoneg has completed.
994e689cf4aSJeff Kirsher */
995e689cf4aSJeff Kirsher if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
996e689cf4aSJeff Kirsher PCS_MII_STATUS_REMOTE_FAULT)) ==
997e689cf4aSJeff Kirsher (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
998e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
999e689cf4aSJeff Kirsher
1000e689cf4aSJeff Kirsher /* work around link detection issue by querying the PCS state
1001e689cf4aSJeff Kirsher * machine directly.
1002e689cf4aSJeff Kirsher */
1003e689cf4aSJeff Kirsher state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1004e689cf4aSJeff Kirsher if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1005e689cf4aSJeff Kirsher stat &= ~PCS_MII_STATUS_LINK_STATUS;
1006e689cf4aSJeff Kirsher } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1007e689cf4aSJeff Kirsher stat |= PCS_MII_STATUS_LINK_STATUS;
1008e689cf4aSJeff Kirsher }
1009e689cf4aSJeff Kirsher
1010e689cf4aSJeff Kirsher if (stat & PCS_MII_STATUS_LINK_STATUS) {
1011e689cf4aSJeff Kirsher if (cp->lstate != link_up) {
1012e689cf4aSJeff Kirsher if (cp->opened) {
1013e689cf4aSJeff Kirsher cp->lstate = link_up;
1014e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_UP;
1015e689cf4aSJeff Kirsher
1016e689cf4aSJeff Kirsher cas_set_link_modes(cp);
1017e689cf4aSJeff Kirsher netif_carrier_on(cp->dev);
1018e689cf4aSJeff Kirsher }
1019e689cf4aSJeff Kirsher }
1020e689cf4aSJeff Kirsher } else if (cp->lstate == link_up) {
1021e689cf4aSJeff Kirsher cp->lstate = link_down;
1022e689cf4aSJeff Kirsher if (link_transition_timeout != 0 &&
1023e689cf4aSJeff Kirsher cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1024e689cf4aSJeff Kirsher !cp->link_transition_jiffies_valid) {
1025e689cf4aSJeff Kirsher /*
1026e689cf4aSJeff Kirsher * force a reset, as a workaround for the
1027e689cf4aSJeff Kirsher * link-failure problem. May want to move this to a
1028e689cf4aSJeff Kirsher * point a bit earlier in the sequence. If we had
1029e689cf4aSJeff Kirsher * generated a reset a short time ago, we'll wait for
1030e689cf4aSJeff Kirsher * the link timer to check the status until a
1031e689cf4aSJeff Kirsher * timer expires (link_transistion_jiffies_valid is
1032e689cf4aSJeff Kirsher * true when the timer is running.) Instead of using
1033e689cf4aSJeff Kirsher * a system timer, we just do a check whenever the
1034e689cf4aSJeff Kirsher * link timer is running - this clears the flag after
1035e689cf4aSJeff Kirsher * a suitable delay.
1036e689cf4aSJeff Kirsher */
1037e689cf4aSJeff Kirsher retval = 1;
1038e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1039e689cf4aSJeff Kirsher cp->link_transition_jiffies = jiffies;
1040e689cf4aSJeff Kirsher cp->link_transition_jiffies_valid = 1;
1041e689cf4aSJeff Kirsher } else {
1042e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1043e689cf4aSJeff Kirsher }
1044e689cf4aSJeff Kirsher netif_carrier_off(cp->dev);
1045e689cf4aSJeff Kirsher if (cp->opened)
1046e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev, "PCS link down\n");
1047e689cf4aSJeff Kirsher
1048e689cf4aSJeff Kirsher /* Cassini only: if you force a mode, there can be
1049e689cf4aSJeff Kirsher * sync problems on link down. to fix that, the following
1050e689cf4aSJeff Kirsher * things need to be checked:
1051e689cf4aSJeff Kirsher * 1) read serialink state register
1052e689cf4aSJeff Kirsher * 2) read pcs status register to verify link down.
1053e689cf4aSJeff Kirsher * 3) if link down and serial link == 0x03, then you need
1054e689cf4aSJeff Kirsher * to global reset the chip.
1055e689cf4aSJeff Kirsher */
1056e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1057e689cf4aSJeff Kirsher /* should check to see if we're in a forced mode */
1058e689cf4aSJeff Kirsher stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1059e689cf4aSJeff Kirsher if (stat == 0x03)
1060e689cf4aSJeff Kirsher return 1;
1061e689cf4aSJeff Kirsher }
1062e689cf4aSJeff Kirsher } else if (cp->lstate == link_down) {
1063e689cf4aSJeff Kirsher if (link_transition_timeout != 0 &&
1064e689cf4aSJeff Kirsher cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1065e689cf4aSJeff Kirsher !cp->link_transition_jiffies_valid) {
1066e689cf4aSJeff Kirsher /* force a reset, as a workaround for the
1067e689cf4aSJeff Kirsher * link-failure problem. May want to move
1068e689cf4aSJeff Kirsher * this to a point a bit earlier in the
1069e689cf4aSJeff Kirsher * sequence.
1070e689cf4aSJeff Kirsher */
1071e689cf4aSJeff Kirsher retval = 1;
1072e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1073e689cf4aSJeff Kirsher cp->link_transition_jiffies = jiffies;
1074e689cf4aSJeff Kirsher cp->link_transition_jiffies_valid = 1;
1075e689cf4aSJeff Kirsher } else {
1076e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1077e689cf4aSJeff Kirsher }
1078e689cf4aSJeff Kirsher }
1079e689cf4aSJeff Kirsher
1080e689cf4aSJeff Kirsher return retval;
1081e689cf4aSJeff Kirsher }
1082e689cf4aSJeff Kirsher
cas_pcs_interrupt(struct net_device * dev,struct cas * cp,u32 status)1083e689cf4aSJeff Kirsher static int cas_pcs_interrupt(struct net_device *dev,
1084e689cf4aSJeff Kirsher struct cas *cp, u32 status)
1085e689cf4aSJeff Kirsher {
1086e689cf4aSJeff Kirsher u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1087e689cf4aSJeff Kirsher
1088e689cf4aSJeff Kirsher if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1089e689cf4aSJeff Kirsher return 0;
1090e689cf4aSJeff Kirsher return cas_pcs_link_check(cp);
1091e689cf4aSJeff Kirsher }
1092e689cf4aSJeff Kirsher
cas_txmac_interrupt(struct net_device * dev,struct cas * cp,u32 status)1093e689cf4aSJeff Kirsher static int cas_txmac_interrupt(struct net_device *dev,
1094e689cf4aSJeff Kirsher struct cas *cp, u32 status)
1095e689cf4aSJeff Kirsher {
1096e689cf4aSJeff Kirsher u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1097e689cf4aSJeff Kirsher
1098e689cf4aSJeff Kirsher if (!txmac_stat)
1099e689cf4aSJeff Kirsher return 0;
1100e689cf4aSJeff Kirsher
1101e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1102e689cf4aSJeff Kirsher "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1103e689cf4aSJeff Kirsher
1104e689cf4aSJeff Kirsher /* Defer timer expiration is quite normal,
1105e689cf4aSJeff Kirsher * don't even log the event.
1106e689cf4aSJeff Kirsher */
1107e689cf4aSJeff Kirsher if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1108e689cf4aSJeff Kirsher !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1109e689cf4aSJeff Kirsher return 0;
1110e689cf4aSJeff Kirsher
1111e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
1112e689cf4aSJeff Kirsher if (txmac_stat & MAC_TX_UNDERRUN) {
1113e689cf4aSJeff Kirsher netdev_err(dev, "TX MAC xmit underrun\n");
1114e689cf4aSJeff Kirsher cp->net_stats[0].tx_fifo_errors++;
1115e689cf4aSJeff Kirsher }
1116e689cf4aSJeff Kirsher
1117e689cf4aSJeff Kirsher if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1118e689cf4aSJeff Kirsher netdev_err(dev, "TX MAC max packet size error\n");
1119e689cf4aSJeff Kirsher cp->net_stats[0].tx_errors++;
1120e689cf4aSJeff Kirsher }
1121e689cf4aSJeff Kirsher
1122e689cf4aSJeff Kirsher /* The rest are all cases of one of the 16-bit TX
1123e689cf4aSJeff Kirsher * counters expiring.
1124e689cf4aSJeff Kirsher */
1125e689cf4aSJeff Kirsher if (txmac_stat & MAC_TX_COLL_NORMAL)
1126e689cf4aSJeff Kirsher cp->net_stats[0].collisions += 0x10000;
1127e689cf4aSJeff Kirsher
1128e689cf4aSJeff Kirsher if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129e689cf4aSJeff Kirsher cp->net_stats[0].tx_aborted_errors += 0x10000;
1130e689cf4aSJeff Kirsher cp->net_stats[0].collisions += 0x10000;
1131e689cf4aSJeff Kirsher }
1132e689cf4aSJeff Kirsher
1133e689cf4aSJeff Kirsher if (txmac_stat & MAC_TX_COLL_LATE) {
1134e689cf4aSJeff Kirsher cp->net_stats[0].tx_aborted_errors += 0x10000;
1135e689cf4aSJeff Kirsher cp->net_stats[0].collisions += 0x10000;
1136e689cf4aSJeff Kirsher }
1137e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
1138e689cf4aSJeff Kirsher
1139e689cf4aSJeff Kirsher /* We do not keep track of MAC_TX_COLL_FIRST and
1140e689cf4aSJeff Kirsher * MAC_TX_PEAK_ATTEMPTS events.
1141e689cf4aSJeff Kirsher */
1142e689cf4aSJeff Kirsher return 0;
1143e689cf4aSJeff Kirsher }
1144e689cf4aSJeff Kirsher
cas_load_firmware(struct cas * cp,cas_hp_inst_t * firmware)1145e689cf4aSJeff Kirsher static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146e689cf4aSJeff Kirsher {
1147e689cf4aSJeff Kirsher cas_hp_inst_t *inst;
1148e689cf4aSJeff Kirsher u32 val;
1149e689cf4aSJeff Kirsher int i;
1150e689cf4aSJeff Kirsher
1151e689cf4aSJeff Kirsher i = 0;
1152e689cf4aSJeff Kirsher while ((inst = firmware) && inst->note) {
1153e689cf4aSJeff Kirsher writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154e689cf4aSJeff Kirsher
1155e689cf4aSJeff Kirsher val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157e689cf4aSJeff Kirsher writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158e689cf4aSJeff Kirsher
1159e689cf4aSJeff Kirsher val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166e689cf4aSJeff Kirsher writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167e689cf4aSJeff Kirsher
1168e689cf4aSJeff Kirsher val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171e689cf4aSJeff Kirsher val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172e689cf4aSJeff Kirsher writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173e689cf4aSJeff Kirsher ++firmware;
1174e689cf4aSJeff Kirsher ++i;
1175e689cf4aSJeff Kirsher }
1176e689cf4aSJeff Kirsher }
1177e689cf4aSJeff Kirsher
cas_init_rx_dma(struct cas * cp)1178e689cf4aSJeff Kirsher static void cas_init_rx_dma(struct cas *cp)
1179e689cf4aSJeff Kirsher {
1180e689cf4aSJeff Kirsher u64 desc_dma = cp->block_dvma;
1181e689cf4aSJeff Kirsher u32 val;
1182e689cf4aSJeff Kirsher int i, size;
1183e689cf4aSJeff Kirsher
1184e689cf4aSJeff Kirsher /* rx free descriptors */
1185e689cf4aSJeff Kirsher val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186e689cf4aSJeff Kirsher val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187e689cf4aSJeff Kirsher val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188e689cf4aSJeff Kirsher if ((N_RX_DESC_RINGS > 1) &&
1189e689cf4aSJeff Kirsher (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1190e689cf4aSJeff Kirsher val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_CFG);
1192e689cf4aSJeff Kirsher
1193e689cf4aSJeff Kirsher val = (unsigned long) cp->init_rxds[0] -
1194e689cf4aSJeff Kirsher (unsigned long) cp->init_block;
1195e689cf4aSJeff Kirsher writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196e689cf4aSJeff Kirsher writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197e689cf4aSJeff Kirsher writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198e689cf4aSJeff Kirsher
1199e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200e689cf4aSJeff Kirsher /* rx desc 2 is for IPSEC packets. however,
1201e689cf4aSJeff Kirsher * we don't it that for that purpose.
1202e689cf4aSJeff Kirsher */
1203e689cf4aSJeff Kirsher val = (unsigned long) cp->init_rxds[1] -
1204e689cf4aSJeff Kirsher (unsigned long) cp->init_block;
1205e689cf4aSJeff Kirsher writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206e689cf4aSJeff Kirsher writel((desc_dma + val) & 0xffffffff, cp->regs +
1207e689cf4aSJeff Kirsher REG_PLUS_RX_DB1_LOW);
1208e689cf4aSJeff Kirsher writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209e689cf4aSJeff Kirsher REG_PLUS_RX_KICK1);
1210e689cf4aSJeff Kirsher }
1211e689cf4aSJeff Kirsher
1212e689cf4aSJeff Kirsher /* rx completion registers */
1213e689cf4aSJeff Kirsher val = (unsigned long) cp->init_rxcs[0] -
1214e689cf4aSJeff Kirsher (unsigned long) cp->init_block;
1215e689cf4aSJeff Kirsher writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216e689cf4aSJeff Kirsher writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217e689cf4aSJeff Kirsher
1218e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219e689cf4aSJeff Kirsher /* rx comp 2-4 */
1220e689cf4aSJeff Kirsher for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221e689cf4aSJeff Kirsher val = (unsigned long) cp->init_rxcs[i] -
1222e689cf4aSJeff Kirsher (unsigned long) cp->init_block;
1223e689cf4aSJeff Kirsher writel((desc_dma + val) >> 32, cp->regs +
1224e689cf4aSJeff Kirsher REG_PLUS_RX_CBN_HI(i));
1225e689cf4aSJeff Kirsher writel((desc_dma + val) & 0xffffffff, cp->regs +
1226e689cf4aSJeff Kirsher REG_PLUS_RX_CBN_LOW(i));
1227e689cf4aSJeff Kirsher }
1228e689cf4aSJeff Kirsher }
1229e689cf4aSJeff Kirsher
1230e689cf4aSJeff Kirsher /* read selective clear regs to prevent spurious interrupts
1231e689cf4aSJeff Kirsher * on reset because complete == kick.
1232e689cf4aSJeff Kirsher * selective clear set up to prevent interrupts on resets
1233e689cf4aSJeff Kirsher */
1234e689cf4aSJeff Kirsher readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235e689cf4aSJeff Kirsher writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1236e689cf4aSJeff Kirsher
1237e689cf4aSJeff Kirsher /* set up pause thresholds */
1238e689cf4aSJeff Kirsher val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1239e689cf4aSJeff Kirsher cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1240e689cf4aSJeff Kirsher val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1241e689cf4aSJeff Kirsher cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1242e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1243e689cf4aSJeff Kirsher
1244e689cf4aSJeff Kirsher /* zero out dma reassembly buffers */
1245e689cf4aSJeff Kirsher for (i = 0; i < 64; i++) {
1246e689cf4aSJeff Kirsher writel(i, cp->regs + REG_RX_TABLE_ADDR);
1247e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1248e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1249e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1250e689cf4aSJeff Kirsher }
1251e689cf4aSJeff Kirsher
1252e689cf4aSJeff Kirsher /* make sure address register is 0 for normal operation */
1253e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1254e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1255e689cf4aSJeff Kirsher
1256e689cf4aSJeff Kirsher /* interrupt mitigation */
1257e689cf4aSJeff Kirsher #ifdef USE_RX_BLANK
1258e689cf4aSJeff Kirsher val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1259e689cf4aSJeff Kirsher val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1260e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_BLANK);
1261e689cf4aSJeff Kirsher #else
1262e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_BLANK);
1263e689cf4aSJeff Kirsher #endif
1264e689cf4aSJeff Kirsher
1265e689cf4aSJeff Kirsher /* interrupt generation as a function of low water marks for
1266e689cf4aSJeff Kirsher * free desc and completion entries. these are used to trigger
1267e689cf4aSJeff Kirsher * housekeeping for rx descs. we don't use the free interrupt
1268e689cf4aSJeff Kirsher * as it's not very useful
1269e689cf4aSJeff Kirsher */
1270e689cf4aSJeff Kirsher /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1271e689cf4aSJeff Kirsher val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1272e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_AE_THRESH);
1273e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1274e689cf4aSJeff Kirsher val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1275e689cf4aSJeff Kirsher writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1276e689cf4aSJeff Kirsher }
1277e689cf4aSJeff Kirsher
1278e689cf4aSJeff Kirsher /* Random early detect registers. useful for congestion avoidance.
1279e689cf4aSJeff Kirsher * this should be tunable.
1280e689cf4aSJeff Kirsher */
1281e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_RX_RED);
1282e689cf4aSJeff Kirsher
1283e689cf4aSJeff Kirsher /* receive page sizes. default == 2K (0x800) */
1284e689cf4aSJeff Kirsher val = 0;
1285e689cf4aSJeff Kirsher if (cp->page_size == 0x1000)
1286e689cf4aSJeff Kirsher val = 0x1;
1287e689cf4aSJeff Kirsher else if (cp->page_size == 0x2000)
1288e689cf4aSJeff Kirsher val = 0x2;
1289e689cf4aSJeff Kirsher else if (cp->page_size == 0x4000)
1290e689cf4aSJeff Kirsher val = 0x3;
1291e689cf4aSJeff Kirsher
1292e689cf4aSJeff Kirsher /* round mtu + offset. constrain to page size. */
1293e689cf4aSJeff Kirsher size = cp->dev->mtu + 64;
1294e689cf4aSJeff Kirsher if (size > cp->page_size)
1295e689cf4aSJeff Kirsher size = cp->page_size;
1296e689cf4aSJeff Kirsher
1297e689cf4aSJeff Kirsher if (size <= 0x400)
1298e689cf4aSJeff Kirsher i = 0x0;
1299e689cf4aSJeff Kirsher else if (size <= 0x800)
1300e689cf4aSJeff Kirsher i = 0x1;
1301e689cf4aSJeff Kirsher else if (size <= 0x1000)
1302e689cf4aSJeff Kirsher i = 0x2;
1303e689cf4aSJeff Kirsher else
1304e689cf4aSJeff Kirsher i = 0x3;
1305e689cf4aSJeff Kirsher
1306e689cf4aSJeff Kirsher cp->mtu_stride = 1 << (i + 10);
1307e689cf4aSJeff Kirsher val = CAS_BASE(RX_PAGE_SIZE, val);
1308e689cf4aSJeff Kirsher val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1309e689cf4aSJeff Kirsher val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1310e689cf4aSJeff Kirsher val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1311e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_PAGE_SIZE);
1312e689cf4aSJeff Kirsher
1313e689cf4aSJeff Kirsher /* enable the header parser if desired */
131432329216SMartin Liška if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
1315e689cf4aSJeff Kirsher return;
1316e689cf4aSJeff Kirsher
1317e689cf4aSJeff Kirsher val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1318e689cf4aSJeff Kirsher val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1319e689cf4aSJeff Kirsher val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1320e689cf4aSJeff Kirsher writel(val, cp->regs + REG_HP_CFG);
1321e689cf4aSJeff Kirsher }
1322e689cf4aSJeff Kirsher
cas_rxc_init(struct cas_rx_comp * rxc)1323e689cf4aSJeff Kirsher static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1324e689cf4aSJeff Kirsher {
1325e689cf4aSJeff Kirsher memset(rxc, 0, sizeof(*rxc));
1326e689cf4aSJeff Kirsher rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1327e689cf4aSJeff Kirsher }
1328e689cf4aSJeff Kirsher
1329e689cf4aSJeff Kirsher /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1330e689cf4aSJeff Kirsher * flipping is protected by the fact that the chip will not
1331e689cf4aSJeff Kirsher * hand back the same page index while it's being processed.
1332e689cf4aSJeff Kirsher */
cas_page_spare(struct cas * cp,const int index)1333e689cf4aSJeff Kirsher static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1334e689cf4aSJeff Kirsher {
1335e689cf4aSJeff Kirsher cas_page_t *page = cp->rx_pages[1][index];
1336e689cf4aSJeff Kirsher cas_page_t *new;
1337e689cf4aSJeff Kirsher
1338e689cf4aSJeff Kirsher if (page_count(page->buffer) == 1)
1339e689cf4aSJeff Kirsher return page;
1340e689cf4aSJeff Kirsher
1341e689cf4aSJeff Kirsher new = cas_page_dequeue(cp);
1342e689cf4aSJeff Kirsher if (new) {
1343e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
1344e689cf4aSJeff Kirsher list_add(&page->list, &cp->rx_inuse_list);
1345e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
1346e689cf4aSJeff Kirsher }
1347e689cf4aSJeff Kirsher return new;
1348e689cf4aSJeff Kirsher }
1349e689cf4aSJeff Kirsher
1350e689cf4aSJeff Kirsher /* this needs to be changed if we actually use the ENC RX DESC ring */
cas_page_swap(struct cas * cp,const int ring,const int index)1351e689cf4aSJeff Kirsher static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1352e689cf4aSJeff Kirsher const int index)
1353e689cf4aSJeff Kirsher {
1354e689cf4aSJeff Kirsher cas_page_t **page0 = cp->rx_pages[0];
1355e689cf4aSJeff Kirsher cas_page_t **page1 = cp->rx_pages[1];
1356e689cf4aSJeff Kirsher
1357e689cf4aSJeff Kirsher /* swap if buffer is in use */
1358e689cf4aSJeff Kirsher if (page_count(page0[index]->buffer) > 1) {
1359e689cf4aSJeff Kirsher cas_page_t *new = cas_page_spare(cp, index);
1360e689cf4aSJeff Kirsher if (new) {
1361e689cf4aSJeff Kirsher page1[index] = page0[index];
1362e689cf4aSJeff Kirsher page0[index] = new;
1363e689cf4aSJeff Kirsher }
1364e689cf4aSJeff Kirsher }
1365e689cf4aSJeff Kirsher RX_USED_SET(page0[index], 0);
1366e689cf4aSJeff Kirsher return page0[index];
1367e689cf4aSJeff Kirsher }
1368e689cf4aSJeff Kirsher
cas_clean_rxds(struct cas * cp)1369e689cf4aSJeff Kirsher static void cas_clean_rxds(struct cas *cp)
1370e689cf4aSJeff Kirsher {
1371e689cf4aSJeff Kirsher /* only clean ring 0 as ring 1 is used for spare buffers */
1372e689cf4aSJeff Kirsher struct cas_rx_desc *rxd = cp->init_rxds[0];
1373e689cf4aSJeff Kirsher int i, size;
1374e689cf4aSJeff Kirsher
1375e689cf4aSJeff Kirsher /* release all rx flows */
1376e689cf4aSJeff Kirsher for (i = 0; i < N_RX_FLOWS; i++) {
1377e689cf4aSJeff Kirsher struct sk_buff *skb;
1378e689cf4aSJeff Kirsher while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1379e689cf4aSJeff Kirsher cas_skb_release(skb);
1380e689cf4aSJeff Kirsher }
1381e689cf4aSJeff Kirsher }
1382e689cf4aSJeff Kirsher
1383e689cf4aSJeff Kirsher /* initialize descriptors */
1384e689cf4aSJeff Kirsher size = RX_DESC_RINGN_SIZE(0);
1385e689cf4aSJeff Kirsher for (i = 0; i < size; i++) {
1386e689cf4aSJeff Kirsher cas_page_t *page = cas_page_swap(cp, 0, i);
1387e689cf4aSJeff Kirsher rxd[i].buffer = cpu_to_le64(page->dma_addr);
1388e689cf4aSJeff Kirsher rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1389e689cf4aSJeff Kirsher CAS_BASE(RX_INDEX_RING, 0));
1390e689cf4aSJeff Kirsher }
1391e689cf4aSJeff Kirsher
1392e689cf4aSJeff Kirsher cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1393e689cf4aSJeff Kirsher cp->rx_last[0] = 0;
1394e689cf4aSJeff Kirsher cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1395e689cf4aSJeff Kirsher }
1396e689cf4aSJeff Kirsher
cas_clean_rxcs(struct cas * cp)1397e689cf4aSJeff Kirsher static void cas_clean_rxcs(struct cas *cp)
1398e689cf4aSJeff Kirsher {
1399e689cf4aSJeff Kirsher int i, j;
1400e689cf4aSJeff Kirsher
1401e689cf4aSJeff Kirsher /* take ownership of rx comp descriptors */
1402e689cf4aSJeff Kirsher memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1403e689cf4aSJeff Kirsher memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1404e689cf4aSJeff Kirsher for (i = 0; i < N_RX_COMP_RINGS; i++) {
1405e689cf4aSJeff Kirsher struct cas_rx_comp *rxc = cp->init_rxcs[i];
1406e689cf4aSJeff Kirsher for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1407e689cf4aSJeff Kirsher cas_rxc_init(rxc + j);
1408e689cf4aSJeff Kirsher }
1409e689cf4aSJeff Kirsher }
1410e689cf4aSJeff Kirsher }
1411e689cf4aSJeff Kirsher
1412e689cf4aSJeff Kirsher #if 0
1413e689cf4aSJeff Kirsher /* When we get a RX fifo overflow, the RX unit is probably hung
1414e689cf4aSJeff Kirsher * so we do the following.
1415e689cf4aSJeff Kirsher *
1416e689cf4aSJeff Kirsher * If any part of the reset goes wrong, we return 1 and that causes the
1417e689cf4aSJeff Kirsher * whole chip to be reset.
1418e689cf4aSJeff Kirsher */
1419e689cf4aSJeff Kirsher static int cas_rxmac_reset(struct cas *cp)
1420e689cf4aSJeff Kirsher {
1421e689cf4aSJeff Kirsher struct net_device *dev = cp->dev;
1422e689cf4aSJeff Kirsher int limit;
1423e689cf4aSJeff Kirsher u32 val;
1424e689cf4aSJeff Kirsher
1425e689cf4aSJeff Kirsher /* First, reset MAC RX. */
1426e689cf4aSJeff Kirsher writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1427e689cf4aSJeff Kirsher for (limit = 0; limit < STOP_TRIES; limit++) {
1428e689cf4aSJeff Kirsher if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1429e689cf4aSJeff Kirsher break;
1430e689cf4aSJeff Kirsher udelay(10);
1431e689cf4aSJeff Kirsher }
1432e689cf4aSJeff Kirsher if (limit == STOP_TRIES) {
1433e689cf4aSJeff Kirsher netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1434e689cf4aSJeff Kirsher return 1;
1435e689cf4aSJeff Kirsher }
1436e689cf4aSJeff Kirsher
1437e689cf4aSJeff Kirsher /* Second, disable RX DMA. */
1438e689cf4aSJeff Kirsher writel(0, cp->regs + REG_RX_CFG);
1439e689cf4aSJeff Kirsher for (limit = 0; limit < STOP_TRIES; limit++) {
1440e689cf4aSJeff Kirsher if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1441e689cf4aSJeff Kirsher break;
1442e689cf4aSJeff Kirsher udelay(10);
1443e689cf4aSJeff Kirsher }
1444e689cf4aSJeff Kirsher if (limit == STOP_TRIES) {
1445e689cf4aSJeff Kirsher netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1446e689cf4aSJeff Kirsher return 1;
1447e689cf4aSJeff Kirsher }
1448e689cf4aSJeff Kirsher
1449e689cf4aSJeff Kirsher mdelay(5);
1450e689cf4aSJeff Kirsher
1451e689cf4aSJeff Kirsher /* Execute RX reset command. */
1452e689cf4aSJeff Kirsher writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1453e689cf4aSJeff Kirsher for (limit = 0; limit < STOP_TRIES; limit++) {
1454e689cf4aSJeff Kirsher if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1455e689cf4aSJeff Kirsher break;
1456e689cf4aSJeff Kirsher udelay(10);
1457e689cf4aSJeff Kirsher }
1458e689cf4aSJeff Kirsher if (limit == STOP_TRIES) {
1459e689cf4aSJeff Kirsher netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1460e689cf4aSJeff Kirsher return 1;
1461e689cf4aSJeff Kirsher }
1462e689cf4aSJeff Kirsher
1463e689cf4aSJeff Kirsher /* reset driver rx state */
1464e689cf4aSJeff Kirsher cas_clean_rxds(cp);
1465e689cf4aSJeff Kirsher cas_clean_rxcs(cp);
1466e689cf4aSJeff Kirsher
1467e689cf4aSJeff Kirsher /* Now, reprogram the rest of RX unit. */
1468e689cf4aSJeff Kirsher cas_init_rx_dma(cp);
1469e689cf4aSJeff Kirsher
1470e689cf4aSJeff Kirsher /* re-enable */
1471e689cf4aSJeff Kirsher val = readl(cp->regs + REG_RX_CFG);
1472e689cf4aSJeff Kirsher writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1473e689cf4aSJeff Kirsher writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1474e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_RX_CFG);
1475e689cf4aSJeff Kirsher writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1476e689cf4aSJeff Kirsher return 0;
1477e689cf4aSJeff Kirsher }
1478e689cf4aSJeff Kirsher #endif
1479e689cf4aSJeff Kirsher
cas_rxmac_interrupt(struct net_device * dev,struct cas * cp,u32 status)1480e689cf4aSJeff Kirsher static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1481e689cf4aSJeff Kirsher u32 status)
1482e689cf4aSJeff Kirsher {
1483e689cf4aSJeff Kirsher u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1484e689cf4aSJeff Kirsher
1485e689cf4aSJeff Kirsher if (!stat)
1486e689cf4aSJeff Kirsher return 0;
1487e689cf4aSJeff Kirsher
1488e689cf4aSJeff Kirsher netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1489e689cf4aSJeff Kirsher
1490e689cf4aSJeff Kirsher /* these are all rollovers */
1491e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
1492e689cf4aSJeff Kirsher if (stat & MAC_RX_ALIGN_ERR)
1493e689cf4aSJeff Kirsher cp->net_stats[0].rx_frame_errors += 0x10000;
1494e689cf4aSJeff Kirsher
1495e689cf4aSJeff Kirsher if (stat & MAC_RX_CRC_ERR)
1496e689cf4aSJeff Kirsher cp->net_stats[0].rx_crc_errors += 0x10000;
1497e689cf4aSJeff Kirsher
1498e689cf4aSJeff Kirsher if (stat & MAC_RX_LEN_ERR)
1499e689cf4aSJeff Kirsher cp->net_stats[0].rx_length_errors += 0x10000;
1500e689cf4aSJeff Kirsher
1501e689cf4aSJeff Kirsher if (stat & MAC_RX_OVERFLOW) {
1502e689cf4aSJeff Kirsher cp->net_stats[0].rx_over_errors++;
1503e689cf4aSJeff Kirsher cp->net_stats[0].rx_fifo_errors++;
1504e689cf4aSJeff Kirsher }
1505e689cf4aSJeff Kirsher
1506e689cf4aSJeff Kirsher /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1507e689cf4aSJeff Kirsher * events.
1508e689cf4aSJeff Kirsher */
1509e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
1510e689cf4aSJeff Kirsher return 0;
1511e689cf4aSJeff Kirsher }
1512e689cf4aSJeff Kirsher
cas_mac_interrupt(struct net_device * dev,struct cas * cp,u32 status)1513e689cf4aSJeff Kirsher static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1514e689cf4aSJeff Kirsher u32 status)
1515e689cf4aSJeff Kirsher {
1516e689cf4aSJeff Kirsher u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1517e689cf4aSJeff Kirsher
1518e689cf4aSJeff Kirsher if (!stat)
1519e689cf4aSJeff Kirsher return 0;
1520e689cf4aSJeff Kirsher
1521e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1522e689cf4aSJeff Kirsher "mac interrupt, stat: 0x%x\n", stat);
1523e689cf4aSJeff Kirsher
1524e689cf4aSJeff Kirsher /* This interrupt is just for pause frame and pause
1525e689cf4aSJeff Kirsher * tracking. It is useful for diagnostics and debug
1526e689cf4aSJeff Kirsher * but probably by default we will mask these events.
1527e689cf4aSJeff Kirsher */
1528e689cf4aSJeff Kirsher if (stat & MAC_CTRL_PAUSE_STATE)
1529e689cf4aSJeff Kirsher cp->pause_entered++;
1530e689cf4aSJeff Kirsher
1531e689cf4aSJeff Kirsher if (stat & MAC_CTRL_PAUSE_RECEIVED)
1532e689cf4aSJeff Kirsher cp->pause_last_time_recvd = (stat >> 16);
1533e689cf4aSJeff Kirsher
1534e689cf4aSJeff Kirsher return 0;
1535e689cf4aSJeff Kirsher }
1536e689cf4aSJeff Kirsher
1537e689cf4aSJeff Kirsher
1538e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_mdio_link_not_up(struct cas * cp)1539e689cf4aSJeff Kirsher static inline int cas_mdio_link_not_up(struct cas *cp)
1540e689cf4aSJeff Kirsher {
1541e689cf4aSJeff Kirsher u16 val;
1542e689cf4aSJeff Kirsher
1543e689cf4aSJeff Kirsher switch (cp->lstate) {
1544e689cf4aSJeff Kirsher case link_force_ret:
1545e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1546e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1547e689cf4aSJeff Kirsher cp->timer_ticks = 5;
1548e689cf4aSJeff Kirsher cp->lstate = link_force_ok;
1549e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1550e689cf4aSJeff Kirsher break;
1551e689cf4aSJeff Kirsher
1552e689cf4aSJeff Kirsher case link_aneg:
1553e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_BMCR);
1554e689cf4aSJeff Kirsher
1555e689cf4aSJeff Kirsher /* Try forced modes. we try things in the following order:
1556e689cf4aSJeff Kirsher * 1000 full -> 100 full/half -> 10 half
1557e689cf4aSJeff Kirsher */
1558e689cf4aSJeff Kirsher val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1559e689cf4aSJeff Kirsher val |= BMCR_FULLDPLX;
1560e689cf4aSJeff Kirsher val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1561e689cf4aSJeff Kirsher CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1562e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, val);
1563e689cf4aSJeff Kirsher cp->timer_ticks = 5;
1564e689cf4aSJeff Kirsher cp->lstate = link_force_try;
1565e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1566e689cf4aSJeff Kirsher break;
1567e689cf4aSJeff Kirsher
1568e689cf4aSJeff Kirsher case link_force_try:
1569e689cf4aSJeff Kirsher /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1570e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_BMCR);
1571e689cf4aSJeff Kirsher cp->timer_ticks = 5;
1572e689cf4aSJeff Kirsher if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1573e689cf4aSJeff Kirsher val &= ~CAS_BMCR_SPEED1000;
1574e689cf4aSJeff Kirsher val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1575e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, val);
1576e689cf4aSJeff Kirsher break;
1577e689cf4aSJeff Kirsher }
1578e689cf4aSJeff Kirsher
1579e689cf4aSJeff Kirsher if (val & BMCR_SPEED100) {
1580e689cf4aSJeff Kirsher if (val & BMCR_FULLDPLX) /* fd failed */
1581e689cf4aSJeff Kirsher val &= ~BMCR_FULLDPLX;
1582e689cf4aSJeff Kirsher else { /* 100Mbps failed */
1583e689cf4aSJeff Kirsher val &= ~BMCR_SPEED100;
1584e689cf4aSJeff Kirsher }
1585e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR, val);
1586e689cf4aSJeff Kirsher break;
1587e689cf4aSJeff Kirsher }
15882a86b4a7SGustavo A. R. Silva break;
1589e689cf4aSJeff Kirsher default:
1590e689cf4aSJeff Kirsher break;
1591e689cf4aSJeff Kirsher }
1592e689cf4aSJeff Kirsher return 0;
1593e689cf4aSJeff Kirsher }
1594e689cf4aSJeff Kirsher
1595e689cf4aSJeff Kirsher
1596e689cf4aSJeff Kirsher /* must be invoked with cp->lock held */
cas_mii_link_check(struct cas * cp,const u16 bmsr)1597e689cf4aSJeff Kirsher static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1598e689cf4aSJeff Kirsher {
1599e689cf4aSJeff Kirsher int restart;
1600e689cf4aSJeff Kirsher
1601e689cf4aSJeff Kirsher if (bmsr & BMSR_LSTATUS) {
1602e689cf4aSJeff Kirsher /* Ok, here we got a link. If we had it due to a forced
1603e689cf4aSJeff Kirsher * fallback, and we were configured for autoneg, we
1604e689cf4aSJeff Kirsher * retry a short autoneg pass. If you know your hub is
1605e689cf4aSJeff Kirsher * broken, use ethtool ;)
1606e689cf4aSJeff Kirsher */
1607e689cf4aSJeff Kirsher if ((cp->lstate == link_force_try) &&
1608e689cf4aSJeff Kirsher (cp->link_cntl & BMCR_ANENABLE)) {
1609e689cf4aSJeff Kirsher cp->lstate = link_force_ret;
1610e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1611e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
1612e689cf4aSJeff Kirsher cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1613e689cf4aSJeff Kirsher cp->timer_ticks = 5;
1614e689cf4aSJeff Kirsher if (cp->opened)
1615e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev,
1616e689cf4aSJeff Kirsher "Got link after fallback, retrying autoneg once...\n");
1617e689cf4aSJeff Kirsher cas_phy_write(cp, MII_BMCR,
1618e689cf4aSJeff Kirsher cp->link_fcntl | BMCR_ANENABLE |
1619e689cf4aSJeff Kirsher BMCR_ANRESTART);
1620e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
1621e689cf4aSJeff Kirsher
1622e689cf4aSJeff Kirsher } else if (cp->lstate != link_up) {
1623e689cf4aSJeff Kirsher cp->lstate = link_up;
1624e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_UP;
1625e689cf4aSJeff Kirsher
1626e689cf4aSJeff Kirsher if (cp->opened) {
1627e689cf4aSJeff Kirsher cas_set_link_modes(cp);
1628e689cf4aSJeff Kirsher netif_carrier_on(cp->dev);
1629e689cf4aSJeff Kirsher }
1630e689cf4aSJeff Kirsher }
1631e689cf4aSJeff Kirsher return 0;
1632e689cf4aSJeff Kirsher }
1633e689cf4aSJeff Kirsher
1634e689cf4aSJeff Kirsher /* link not up. if the link was previously up, we restart the
1635e689cf4aSJeff Kirsher * whole process
1636e689cf4aSJeff Kirsher */
1637e689cf4aSJeff Kirsher restart = 0;
1638e689cf4aSJeff Kirsher if (cp->lstate == link_up) {
1639e689cf4aSJeff Kirsher cp->lstate = link_down;
1640e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1641e689cf4aSJeff Kirsher
1642e689cf4aSJeff Kirsher netif_carrier_off(cp->dev);
1643e689cf4aSJeff Kirsher if (cp->opened)
1644e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev, "Link down\n");
1645e689cf4aSJeff Kirsher restart = 1;
1646e689cf4aSJeff Kirsher
1647e689cf4aSJeff Kirsher } else if (++cp->timer_ticks > 10)
1648e689cf4aSJeff Kirsher cas_mdio_link_not_up(cp);
1649e689cf4aSJeff Kirsher
1650e689cf4aSJeff Kirsher return restart;
1651e689cf4aSJeff Kirsher }
1652e689cf4aSJeff Kirsher
cas_mif_interrupt(struct net_device * dev,struct cas * cp,u32 status)1653e689cf4aSJeff Kirsher static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1654e689cf4aSJeff Kirsher u32 status)
1655e689cf4aSJeff Kirsher {
1656e689cf4aSJeff Kirsher u32 stat = readl(cp->regs + REG_MIF_STATUS);
1657e689cf4aSJeff Kirsher u16 bmsr;
1658e689cf4aSJeff Kirsher
1659e689cf4aSJeff Kirsher /* check for a link change */
1660e689cf4aSJeff Kirsher if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1661e689cf4aSJeff Kirsher return 0;
1662e689cf4aSJeff Kirsher
1663e689cf4aSJeff Kirsher bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1664e689cf4aSJeff Kirsher return cas_mii_link_check(cp, bmsr);
1665e689cf4aSJeff Kirsher }
1666e689cf4aSJeff Kirsher
cas_pci_interrupt(struct net_device * dev,struct cas * cp,u32 status)1667e689cf4aSJeff Kirsher static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1668e689cf4aSJeff Kirsher u32 status)
1669e689cf4aSJeff Kirsher {
1670e689cf4aSJeff Kirsher u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1671e689cf4aSJeff Kirsher
1672e689cf4aSJeff Kirsher if (!stat)
1673e689cf4aSJeff Kirsher return 0;
1674e689cf4aSJeff Kirsher
1675e689cf4aSJeff Kirsher netdev_err(dev, "PCI error [%04x:%04x]",
1676e689cf4aSJeff Kirsher stat, readl(cp->regs + REG_BIM_DIAG));
1677e689cf4aSJeff Kirsher
1678e689cf4aSJeff Kirsher /* cassini+ has this reserved */
1679e689cf4aSJeff Kirsher if ((stat & PCI_ERR_BADACK) &&
1680e689cf4aSJeff Kirsher ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1681e689cf4aSJeff Kirsher pr_cont(" <No ACK64# during ABS64 cycle>");
1682e689cf4aSJeff Kirsher
1683e689cf4aSJeff Kirsher if (stat & PCI_ERR_DTRTO)
1684e689cf4aSJeff Kirsher pr_cont(" <Delayed transaction timeout>");
1685e689cf4aSJeff Kirsher if (stat & PCI_ERR_OTHER)
1686e689cf4aSJeff Kirsher pr_cont(" <other>");
1687e689cf4aSJeff Kirsher if (stat & PCI_ERR_BIM_DMA_WRITE)
1688e689cf4aSJeff Kirsher pr_cont(" <BIM DMA 0 write req>");
1689e689cf4aSJeff Kirsher if (stat & PCI_ERR_BIM_DMA_READ)
1690e689cf4aSJeff Kirsher pr_cont(" <BIM DMA 0 read req>");
1691e689cf4aSJeff Kirsher pr_cont("\n");
1692e689cf4aSJeff Kirsher
1693e689cf4aSJeff Kirsher if (stat & PCI_ERR_OTHER) {
16940800d88eSHeiner Kallweit int pci_errs;
1695e689cf4aSJeff Kirsher
1696e689cf4aSJeff Kirsher /* Interrogate PCI config space for the
1697e689cf4aSJeff Kirsher * true cause.
1698e689cf4aSJeff Kirsher */
16990800d88eSHeiner Kallweit pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1700e689cf4aSJeff Kirsher
17010800d88eSHeiner Kallweit netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
17020800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_PARITY)
17030800d88eSHeiner Kallweit netdev_err(dev, "PCI parity error detected\n");
17040800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
17050800d88eSHeiner Kallweit netdev_err(dev, "PCI target abort\n");
17060800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
17070800d88eSHeiner Kallweit netdev_err(dev, "PCI master acks target abort\n");
17080800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
17090800d88eSHeiner Kallweit netdev_err(dev, "PCI master abort\n");
17100800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
17110800d88eSHeiner Kallweit netdev_err(dev, "PCI system error SERR#\n");
17120800d88eSHeiner Kallweit if (pci_errs & PCI_STATUS_DETECTED_PARITY)
17130800d88eSHeiner Kallweit netdev_err(dev, "PCI parity error\n");
1714e689cf4aSJeff Kirsher }
1715e689cf4aSJeff Kirsher
1716e689cf4aSJeff Kirsher /* For all PCI errors, we should reset the chip. */
1717e689cf4aSJeff Kirsher return 1;
1718e689cf4aSJeff Kirsher }
1719e689cf4aSJeff Kirsher
1720e689cf4aSJeff Kirsher /* All non-normal interrupt conditions get serviced here.
1721e689cf4aSJeff Kirsher * Returns non-zero if we should just exit the interrupt
1722e689cf4aSJeff Kirsher * handler right now (ie. if we reset the card which invalidates
1723e689cf4aSJeff Kirsher * all of the other original irq status bits).
1724e689cf4aSJeff Kirsher */
cas_abnormal_irq(struct net_device * dev,struct cas * cp,u32 status)1725e689cf4aSJeff Kirsher static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1726e689cf4aSJeff Kirsher u32 status)
1727e689cf4aSJeff Kirsher {
1728e689cf4aSJeff Kirsher if (status & INTR_RX_TAG_ERROR) {
1729e689cf4aSJeff Kirsher /* corrupt RX tag framing */
1730e689cf4aSJeff Kirsher netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1731e689cf4aSJeff Kirsher "corrupt rx tag framing\n");
1732e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
1733e689cf4aSJeff Kirsher cp->net_stats[0].rx_errors++;
1734e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
1735e689cf4aSJeff Kirsher goto do_reset;
1736e689cf4aSJeff Kirsher }
1737e689cf4aSJeff Kirsher
1738e689cf4aSJeff Kirsher if (status & INTR_RX_LEN_MISMATCH) {
1739e689cf4aSJeff Kirsher /* length mismatch. */
1740e689cf4aSJeff Kirsher netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1741e689cf4aSJeff Kirsher "length mismatch for rx frame\n");
1742e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
1743e689cf4aSJeff Kirsher cp->net_stats[0].rx_errors++;
1744e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
1745e689cf4aSJeff Kirsher goto do_reset;
1746e689cf4aSJeff Kirsher }
1747e689cf4aSJeff Kirsher
1748e689cf4aSJeff Kirsher if (status & INTR_PCS_STATUS) {
1749e689cf4aSJeff Kirsher if (cas_pcs_interrupt(dev, cp, status))
1750e689cf4aSJeff Kirsher goto do_reset;
1751e689cf4aSJeff Kirsher }
1752e689cf4aSJeff Kirsher
1753e689cf4aSJeff Kirsher if (status & INTR_TX_MAC_STATUS) {
1754e689cf4aSJeff Kirsher if (cas_txmac_interrupt(dev, cp, status))
1755e689cf4aSJeff Kirsher goto do_reset;
1756e689cf4aSJeff Kirsher }
1757e689cf4aSJeff Kirsher
1758e689cf4aSJeff Kirsher if (status & INTR_RX_MAC_STATUS) {
1759e689cf4aSJeff Kirsher if (cas_rxmac_interrupt(dev, cp, status))
1760e689cf4aSJeff Kirsher goto do_reset;
1761e689cf4aSJeff Kirsher }
1762e689cf4aSJeff Kirsher
1763e689cf4aSJeff Kirsher if (status & INTR_MAC_CTRL_STATUS) {
1764e689cf4aSJeff Kirsher if (cas_mac_interrupt(dev, cp, status))
1765e689cf4aSJeff Kirsher goto do_reset;
1766e689cf4aSJeff Kirsher }
1767e689cf4aSJeff Kirsher
1768e689cf4aSJeff Kirsher if (status & INTR_MIF_STATUS) {
1769e689cf4aSJeff Kirsher if (cas_mif_interrupt(dev, cp, status))
1770e689cf4aSJeff Kirsher goto do_reset;
1771e689cf4aSJeff Kirsher }
1772e689cf4aSJeff Kirsher
1773e689cf4aSJeff Kirsher if (status & INTR_PCI_ERROR_STATUS) {
1774e689cf4aSJeff Kirsher if (cas_pci_interrupt(dev, cp, status))
1775e689cf4aSJeff Kirsher goto do_reset;
1776e689cf4aSJeff Kirsher }
1777e689cf4aSJeff Kirsher return 0;
1778e689cf4aSJeff Kirsher
1779e689cf4aSJeff Kirsher do_reset:
1780e689cf4aSJeff Kirsher #if 1
1781e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
1782e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_all);
1783e689cf4aSJeff Kirsher netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1784e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
1785e689cf4aSJeff Kirsher #else
1786e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1787e689cf4aSJeff Kirsher netdev_err(dev, "reset called in cas_abnormal_irq\n");
1788e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
1789e689cf4aSJeff Kirsher #endif
1790e689cf4aSJeff Kirsher return 1;
1791e689cf4aSJeff Kirsher }
1792e689cf4aSJeff Kirsher
1793e689cf4aSJeff Kirsher /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1794e689cf4aSJeff Kirsher * determining whether to do a netif_stop/wakeup
1795e689cf4aSJeff Kirsher */
1796e689cf4aSJeff Kirsher #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1797e689cf4aSJeff Kirsher #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
cas_calc_tabort(struct cas * cp,const unsigned long addr,const int len)1798e689cf4aSJeff Kirsher static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1799e689cf4aSJeff Kirsher const int len)
1800e689cf4aSJeff Kirsher {
1801e689cf4aSJeff Kirsher unsigned long off = addr + len;
1802e689cf4aSJeff Kirsher
1803e689cf4aSJeff Kirsher if (CAS_TABORT(cp) == 1)
1804e689cf4aSJeff Kirsher return 0;
1805e689cf4aSJeff Kirsher if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1806e689cf4aSJeff Kirsher return 0;
1807e689cf4aSJeff Kirsher return TX_TARGET_ABORT_LEN;
1808e689cf4aSJeff Kirsher }
1809e689cf4aSJeff Kirsher
cas_tx_ringN(struct cas * cp,int ring,int limit)1810e689cf4aSJeff Kirsher static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1811e689cf4aSJeff Kirsher {
1812e689cf4aSJeff Kirsher struct cas_tx_desc *txds;
1813e689cf4aSJeff Kirsher struct sk_buff **skbs;
1814e689cf4aSJeff Kirsher struct net_device *dev = cp->dev;
1815e689cf4aSJeff Kirsher int entry, count;
1816e689cf4aSJeff Kirsher
1817e689cf4aSJeff Kirsher spin_lock(&cp->tx_lock[ring]);
1818e689cf4aSJeff Kirsher txds = cp->init_txds[ring];
1819e689cf4aSJeff Kirsher skbs = cp->tx_skbs[ring];
1820e689cf4aSJeff Kirsher entry = cp->tx_old[ring];
1821e689cf4aSJeff Kirsher
1822e689cf4aSJeff Kirsher count = TX_BUFF_COUNT(ring, entry, limit);
1823e689cf4aSJeff Kirsher while (entry != limit) {
1824e689cf4aSJeff Kirsher struct sk_buff *skb = skbs[entry];
1825e689cf4aSJeff Kirsher dma_addr_t daddr;
1826e689cf4aSJeff Kirsher u32 dlen;
1827e689cf4aSJeff Kirsher int frag;
1828e689cf4aSJeff Kirsher
1829e689cf4aSJeff Kirsher if (!skb) {
1830e689cf4aSJeff Kirsher /* this should never occur */
1831e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
1832e689cf4aSJeff Kirsher continue;
1833e689cf4aSJeff Kirsher }
1834e689cf4aSJeff Kirsher
1835e689cf4aSJeff Kirsher /* however, we might get only a partial skb release. */
1836e689cf4aSJeff Kirsher count -= skb_shinfo(skb)->nr_frags +
1837e689cf4aSJeff Kirsher + cp->tx_tiny_use[ring][entry].nbufs + 1;
1838e689cf4aSJeff Kirsher if (count < 0)
1839e689cf4aSJeff Kirsher break;
1840e689cf4aSJeff Kirsher
1841e689cf4aSJeff Kirsher netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1842e689cf4aSJeff Kirsher "tx[%d] done, slot %d\n", ring, entry);
1843e689cf4aSJeff Kirsher
1844e689cf4aSJeff Kirsher skbs[entry] = NULL;
1845e689cf4aSJeff Kirsher cp->tx_tiny_use[ring][entry].nbufs = 0;
1846e689cf4aSJeff Kirsher
1847e689cf4aSJeff Kirsher for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1848e689cf4aSJeff Kirsher struct cas_tx_desc *txd = txds + entry;
1849e689cf4aSJeff Kirsher
1850e689cf4aSJeff Kirsher daddr = le64_to_cpu(txd->buffer);
1851e689cf4aSJeff Kirsher dlen = CAS_VAL(TX_DESC_BUFLEN,
1852e689cf4aSJeff Kirsher le64_to_cpu(txd->control));
1853dcc82bb0SChristophe JAILLET dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1854dcc82bb0SChristophe JAILLET DMA_TO_DEVICE);
1855e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
1856e689cf4aSJeff Kirsher
1857e689cf4aSJeff Kirsher /* tiny buffer may follow */
1858e689cf4aSJeff Kirsher if (cp->tx_tiny_use[ring][entry].used) {
1859e689cf4aSJeff Kirsher cp->tx_tiny_use[ring][entry].used = 0;
1860e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
1861e689cf4aSJeff Kirsher }
1862e689cf4aSJeff Kirsher }
1863e689cf4aSJeff Kirsher
1864e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[ring]);
1865e689cf4aSJeff Kirsher cp->net_stats[ring].tx_packets++;
1866e689cf4aSJeff Kirsher cp->net_stats[ring].tx_bytes += skb->len;
1867e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[ring]);
186898fcd70bSYang Wei dev_consume_skb_irq(skb);
1869e689cf4aSJeff Kirsher }
1870e689cf4aSJeff Kirsher cp->tx_old[ring] = entry;
1871e689cf4aSJeff Kirsher
1872e689cf4aSJeff Kirsher /* this is wrong for multiple tx rings. the net device needs
1873e689cf4aSJeff Kirsher * multiple queues for this to do the right thing. we wait
1874e689cf4aSJeff Kirsher * for 2*packets to be available when using tiny buffers
1875e689cf4aSJeff Kirsher */
1876e689cf4aSJeff Kirsher if (netif_queue_stopped(dev) &&
1877e689cf4aSJeff Kirsher (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1878e689cf4aSJeff Kirsher netif_wake_queue(dev);
1879e689cf4aSJeff Kirsher spin_unlock(&cp->tx_lock[ring]);
1880e689cf4aSJeff Kirsher }
1881e689cf4aSJeff Kirsher
cas_tx(struct net_device * dev,struct cas * cp,u32 status)1882e689cf4aSJeff Kirsher static void cas_tx(struct net_device *dev, struct cas *cp,
1883e689cf4aSJeff Kirsher u32 status)
1884e689cf4aSJeff Kirsher {
1885e689cf4aSJeff Kirsher int limit, ring;
1886e689cf4aSJeff Kirsher #ifdef USE_TX_COMPWB
1887e689cf4aSJeff Kirsher u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1888e689cf4aSJeff Kirsher #endif
1889e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1890e689cf4aSJeff Kirsher "tx interrupt, status: 0x%x, %llx\n",
1891e689cf4aSJeff Kirsher status, (unsigned long long)compwb);
1892e689cf4aSJeff Kirsher /* process all the rings */
1893e689cf4aSJeff Kirsher for (ring = 0; ring < N_TX_RINGS; ring++) {
1894e689cf4aSJeff Kirsher #ifdef USE_TX_COMPWB
1895e689cf4aSJeff Kirsher /* use the completion writeback registers */
1896e689cf4aSJeff Kirsher limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1897e689cf4aSJeff Kirsher CAS_VAL(TX_COMPWB_LSB, compwb);
1898e689cf4aSJeff Kirsher compwb = TX_COMPWB_NEXT(compwb);
1899e689cf4aSJeff Kirsher #else
1900e689cf4aSJeff Kirsher limit = readl(cp->regs + REG_TX_COMPN(ring));
1901e689cf4aSJeff Kirsher #endif
1902e689cf4aSJeff Kirsher if (cp->tx_old[ring] != limit)
1903e689cf4aSJeff Kirsher cas_tx_ringN(cp, ring, limit);
1904e689cf4aSJeff Kirsher }
1905e689cf4aSJeff Kirsher }
1906e689cf4aSJeff Kirsher
1907e689cf4aSJeff Kirsher
cas_rx_process_pkt(struct cas * cp,struct cas_rx_comp * rxc,int entry,const u64 * words,struct sk_buff ** skbref)1908e689cf4aSJeff Kirsher static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1909e689cf4aSJeff Kirsher int entry, const u64 *words,
1910e689cf4aSJeff Kirsher struct sk_buff **skbref)
1911e689cf4aSJeff Kirsher {
1912e689cf4aSJeff Kirsher int dlen, hlen, len, i, alloclen;
1913e689cf4aSJeff Kirsher int off, swivel = RX_SWIVEL_OFF_VAL;
1914e689cf4aSJeff Kirsher struct cas_page *page;
1915e689cf4aSJeff Kirsher struct sk_buff *skb;
1916c1914458SAnirudh Venkataramanan void *crcaddr;
1917e689cf4aSJeff Kirsher __sum16 csum;
1918e689cf4aSJeff Kirsher char *p;
1919e689cf4aSJeff Kirsher
1920e689cf4aSJeff Kirsher hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1921e689cf4aSJeff Kirsher dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1922e689cf4aSJeff Kirsher len = hlen + dlen;
1923e689cf4aSJeff Kirsher
1924e689cf4aSJeff Kirsher if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1925e689cf4aSJeff Kirsher alloclen = len;
1926e689cf4aSJeff Kirsher else
1927e689cf4aSJeff Kirsher alloclen = max(hlen, RX_COPY_MIN);
1928e689cf4aSJeff Kirsher
1929dae2e9f4SPradeep A. Dalvi skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1930e689cf4aSJeff Kirsher if (skb == NULL)
1931e689cf4aSJeff Kirsher return -1;
1932e689cf4aSJeff Kirsher
1933e689cf4aSJeff Kirsher *skbref = skb;
1934e689cf4aSJeff Kirsher skb_reserve(skb, swivel);
1935e689cf4aSJeff Kirsher
1936e689cf4aSJeff Kirsher p = skb->data;
1937c1914458SAnirudh Venkataramanan crcaddr = NULL;
1938e689cf4aSJeff Kirsher if (hlen) { /* always copy header pages */
1939e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1940e689cf4aSJeff Kirsher page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1941e689cf4aSJeff Kirsher off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1942e689cf4aSJeff Kirsher swivel;
1943e689cf4aSJeff Kirsher
1944e689cf4aSJeff Kirsher i = hlen;
1945e689cf4aSJeff Kirsher if (!dlen) /* attach FCS */
1946e689cf4aSJeff Kirsher i += cp->crc_size;
1947dcc82bb0SChristophe JAILLET dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1948dcc82bb0SChristophe JAILLET i, DMA_FROM_DEVICE);
1949c1914458SAnirudh Venkataramanan memcpy(p, page_address(page->buffer) + off, i);
1950dcc82bb0SChristophe JAILLET dma_sync_single_for_device(&cp->pdev->dev,
1951dcc82bb0SChristophe JAILLET page->dma_addr + off, i,
1952dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
1953e689cf4aSJeff Kirsher RX_USED_ADD(page, 0x100);
1954e689cf4aSJeff Kirsher p += hlen;
1955e689cf4aSJeff Kirsher swivel = 0;
1956e689cf4aSJeff Kirsher }
1957e689cf4aSJeff Kirsher
1958e689cf4aSJeff Kirsher
1959e689cf4aSJeff Kirsher if (alloclen < (hlen + dlen)) {
1960e689cf4aSJeff Kirsher skb_frag_t *frag = skb_shinfo(skb)->frags;
1961e689cf4aSJeff Kirsher
1962e689cf4aSJeff Kirsher /* normal or jumbo packets. we use frags */
1963e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1964e689cf4aSJeff Kirsher page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1965e689cf4aSJeff Kirsher off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1966e689cf4aSJeff Kirsher
1967e689cf4aSJeff Kirsher hlen = min(cp->page_size - off, dlen);
1968e689cf4aSJeff Kirsher if (hlen < 0) {
1969e689cf4aSJeff Kirsher netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1970e689cf4aSJeff Kirsher "rx page overflow: %d\n", hlen);
1971e689cf4aSJeff Kirsher dev_kfree_skb_irq(skb);
1972e689cf4aSJeff Kirsher return -1;
1973e689cf4aSJeff Kirsher }
1974e689cf4aSJeff Kirsher i = hlen;
1975e689cf4aSJeff Kirsher if (i == dlen) /* attach FCS */
1976e689cf4aSJeff Kirsher i += cp->crc_size;
1977dcc82bb0SChristophe JAILLET dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1978dcc82bb0SChristophe JAILLET i, DMA_FROM_DEVICE);
1979e689cf4aSJeff Kirsher
1980e689cf4aSJeff Kirsher /* make sure we always copy a header */
1981e689cf4aSJeff Kirsher swivel = 0;
1982e689cf4aSJeff Kirsher if (p == (char *) skb->data) { /* not split */
1983c1914458SAnirudh Venkataramanan memcpy(p, page_address(page->buffer) + off,
1984c1914458SAnirudh Venkataramanan RX_COPY_MIN);
1985dcc82bb0SChristophe JAILLET dma_sync_single_for_device(&cp->pdev->dev,
1986dcc82bb0SChristophe JAILLET page->dma_addr + off, i,
1987dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
1988e689cf4aSJeff Kirsher off += RX_COPY_MIN;
1989e689cf4aSJeff Kirsher swivel = RX_COPY_MIN;
1990e689cf4aSJeff Kirsher RX_USED_ADD(page, cp->mtu_stride);
1991e689cf4aSJeff Kirsher } else {
1992e689cf4aSJeff Kirsher RX_USED_ADD(page, hlen);
1993e689cf4aSJeff Kirsher }
1994e689cf4aSJeff Kirsher skb_put(skb, alloclen);
1995e689cf4aSJeff Kirsher
1996e689cf4aSJeff Kirsher skb_shinfo(skb)->nr_frags++;
1997e689cf4aSJeff Kirsher skb->data_len += hlen - swivel;
1998e689cf4aSJeff Kirsher skb->truesize += hlen - swivel;
1999e689cf4aSJeff Kirsher skb->len += hlen - swivel;
2000e689cf4aSJeff Kirsher
200118324d69SIan Campbell skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
200218324d69SIan Campbell __skb_frag_ref(frag);
2003b54c9d5bSJonathan Lemon
20049e903e08SEric Dumazet /* any more data? */
2005e689cf4aSJeff Kirsher if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2006e689cf4aSJeff Kirsher hlen = dlen;
2007e689cf4aSJeff Kirsher off = 0;
2008e689cf4aSJeff Kirsher
2009e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2010e689cf4aSJeff Kirsher page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2011e689cf4aSJeff Kirsher dma_sync_single_for_cpu(&cp->pdev->dev,
2012e689cf4aSJeff Kirsher page->dma_addr,
2013dcc82bb0SChristophe JAILLET hlen + cp->crc_size,
2014dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
2015e689cf4aSJeff Kirsher dma_sync_single_for_device(&cp->pdev->dev,
2016dcc82bb0SChristophe JAILLET page->dma_addr,
2017dcc82bb0SChristophe JAILLET hlen + cp->crc_size,
2018dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
2019e689cf4aSJeff Kirsher
2020dcc82bb0SChristophe JAILLET skb_shinfo(skb)->nr_frags++;
2021e689cf4aSJeff Kirsher skb->data_len += hlen;
2022e689cf4aSJeff Kirsher skb->len += hlen;
2023e689cf4aSJeff Kirsher frag++;
2024e689cf4aSJeff Kirsher
2025e689cf4aSJeff Kirsher skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2026e689cf4aSJeff Kirsher __skb_frag_ref(frag);
202718324d69SIan Campbell RX_USED_ADD(page, hlen + cp->crc_size);
202818324d69SIan Campbell }
2029b54c9d5bSJonathan Lemon
20309e903e08SEric Dumazet if (cp->crc_size)
2031e689cf4aSJeff Kirsher crcaddr = page_address(page->buffer) + off + hlen;
2032e689cf4aSJeff Kirsher
2033e689cf4aSJeff Kirsher } else {
2034c1914458SAnirudh Venkataramanan /* copying packet */
2035c1914458SAnirudh Venkataramanan if (!dlen)
2036e689cf4aSJeff Kirsher goto end_copy_pkt;
2037e689cf4aSJeff Kirsher
2038e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2039e689cf4aSJeff Kirsher page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2040e689cf4aSJeff Kirsher off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2041e689cf4aSJeff Kirsher hlen = min(cp->page_size - off, dlen);
2042e689cf4aSJeff Kirsher if (hlen < 0) {
2043e689cf4aSJeff Kirsher netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2044e689cf4aSJeff Kirsher "rx page overflow: %d\n", hlen);
2045e689cf4aSJeff Kirsher dev_kfree_skb_irq(skb);
2046e689cf4aSJeff Kirsher return -1;
2047e689cf4aSJeff Kirsher }
2048e689cf4aSJeff Kirsher i = hlen;
2049e689cf4aSJeff Kirsher if (i == dlen) /* attach FCS */
2050e689cf4aSJeff Kirsher i += cp->crc_size;
2051e689cf4aSJeff Kirsher dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2052e689cf4aSJeff Kirsher i, DMA_FROM_DEVICE);
2053e689cf4aSJeff Kirsher memcpy(p, page_address(page->buffer) + off, i);
2054e689cf4aSJeff Kirsher dma_sync_single_for_device(&cp->pdev->dev,
2055dcc82bb0SChristophe JAILLET page->dma_addr + off, i,
2056dcc82bb0SChristophe JAILLET DMA_FROM_DEVICE);
2057c1914458SAnirudh Venkataramanan if (p == (char *) skb->data) /* not split */
2058dcc82bb0SChristophe JAILLET RX_USED_ADD(page, cp->mtu_stride);
2059dcc82bb0SChristophe JAILLET else
2060dcc82bb0SChristophe JAILLET RX_USED_ADD(page, i);
2061e689cf4aSJeff Kirsher
2062e689cf4aSJeff Kirsher /* any more data? */
2063e689cf4aSJeff Kirsher if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2064e689cf4aSJeff Kirsher p += hlen;
2065e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2066e689cf4aSJeff Kirsher page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2067e689cf4aSJeff Kirsher dma_sync_single_for_cpu(&cp->pdev->dev,
2068e689cf4aSJeff Kirsher page->dma_addr,
2069e689cf4aSJeff Kirsher dlen + cp->crc_size,
2070e689cf4aSJeff Kirsher DMA_FROM_DEVICE);
2071dcc82bb0SChristophe JAILLET memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2072dcc82bb0SChristophe JAILLET dma_sync_single_for_device(&cp->pdev->dev,
2073e689cf4aSJeff Kirsher page->dma_addr,
2074dcc82bb0SChristophe JAILLET dlen + cp->crc_size,
2075c1914458SAnirudh Venkataramanan DMA_FROM_DEVICE);
2076dcc82bb0SChristophe JAILLET RX_USED_ADD(page, dlen + cp->crc_size);
2077dcc82bb0SChristophe JAILLET }
2078e689cf4aSJeff Kirsher end_copy_pkt:
2079dcc82bb0SChristophe JAILLET if (cp->crc_size)
2080e689cf4aSJeff Kirsher crcaddr = skb->data + alloclen;
2081e689cf4aSJeff Kirsher
2082e689cf4aSJeff Kirsher skb_put(skb, alloclen);
2083c1914458SAnirudh Venkataramanan }
2084e689cf4aSJeff Kirsher
2085c1914458SAnirudh Venkataramanan csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2086e689cf4aSJeff Kirsher if (cp->crc_size) {
2087e689cf4aSJeff Kirsher /* checksum includes FCS. strip it out. */
2088e689cf4aSJeff Kirsher csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2089e689cf4aSJeff Kirsher csum_unfold(csum)));
2090e689cf4aSJeff Kirsher }
2091e689cf4aSJeff Kirsher skb->protocol = eth_type_trans(skb, cp->dev);
2092e689cf4aSJeff Kirsher if (skb->protocol == htons(ETH_P_IP)) {
2093e689cf4aSJeff Kirsher skb->csum = csum_unfold(~csum);
2094e689cf4aSJeff Kirsher skb->ip_summed = CHECKSUM_COMPLETE;
2095e689cf4aSJeff Kirsher } else
2096e689cf4aSJeff Kirsher skb_checksum_none_assert(skb);
2097e689cf4aSJeff Kirsher return len;
2098e689cf4aSJeff Kirsher }
2099e689cf4aSJeff Kirsher
2100e689cf4aSJeff Kirsher
2101e689cf4aSJeff Kirsher /* we can handle up to 64 rx flows at a time. we do the same thing
2102e689cf4aSJeff Kirsher * as nonreassm except that we batch up the buffers.
2103e689cf4aSJeff Kirsher * NOTE: we currently just treat each flow as a bunch of packets that
2104e689cf4aSJeff Kirsher * we pass up. a better way would be to coalesce the packets
2105e689cf4aSJeff Kirsher * into a jumbo packet. to do that, we need to do the following:
2106e689cf4aSJeff Kirsher * 1) the first packet will have a clean split between header and
2107e689cf4aSJeff Kirsher * data. save both.
2108e689cf4aSJeff Kirsher * 2) each time the next flow packet comes in, extend the
2109e689cf4aSJeff Kirsher * data length and merge the checksums.
2110e689cf4aSJeff Kirsher * 3) on flow release, fix up the header.
2111e689cf4aSJeff Kirsher * 4) make sure the higher layer doesn't care.
2112e689cf4aSJeff Kirsher * because packets get coalesced, we shouldn't run into fragment count
2113e689cf4aSJeff Kirsher * issues.
2114e689cf4aSJeff Kirsher */
cas_rx_flow_pkt(struct cas * cp,const u64 * words,struct sk_buff * skb)2115e689cf4aSJeff Kirsher static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2116e689cf4aSJeff Kirsher struct sk_buff *skb)
2117e689cf4aSJeff Kirsher {
2118e689cf4aSJeff Kirsher int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2119e689cf4aSJeff Kirsher struct sk_buff_head *flow = &cp->rx_flows[flowid];
2120e689cf4aSJeff Kirsher
2121e689cf4aSJeff Kirsher /* this is protected at a higher layer, so no need to
2122e689cf4aSJeff Kirsher * do any additional locking here. stick the buffer
2123e689cf4aSJeff Kirsher * at the end.
2124e689cf4aSJeff Kirsher */
2125e689cf4aSJeff Kirsher __skb_queue_tail(flow, skb);
2126e689cf4aSJeff Kirsher if (words[0] & RX_COMP1_RELEASE_FLOW) {
2127e689cf4aSJeff Kirsher while ((skb = __skb_dequeue(flow))) {
2128e689cf4aSJeff Kirsher cas_skb_release(skb);
2129e689cf4aSJeff Kirsher }
2130e689cf4aSJeff Kirsher }
2131e689cf4aSJeff Kirsher }
2132e689cf4aSJeff Kirsher
2133e689cf4aSJeff Kirsher /* put rx descriptor back on ring. if a buffer is in use by a higher
2134e689cf4aSJeff Kirsher * layer, this will need to put in a replacement.
2135e689cf4aSJeff Kirsher */
cas_post_page(struct cas * cp,const int ring,const int index)2136e689cf4aSJeff Kirsher static void cas_post_page(struct cas *cp, const int ring, const int index)
2137e689cf4aSJeff Kirsher {
2138e689cf4aSJeff Kirsher cas_page_t *new;
2139e689cf4aSJeff Kirsher int entry;
2140e689cf4aSJeff Kirsher
2141e689cf4aSJeff Kirsher entry = cp->rx_old[ring];
2142e689cf4aSJeff Kirsher
2143e689cf4aSJeff Kirsher new = cas_page_swap(cp, ring, index);
2144e689cf4aSJeff Kirsher cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2145e689cf4aSJeff Kirsher cp->init_rxds[ring][entry].index =
2146e689cf4aSJeff Kirsher cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2147e689cf4aSJeff Kirsher CAS_BASE(RX_INDEX_RING, ring));
2148e689cf4aSJeff Kirsher
2149e689cf4aSJeff Kirsher entry = RX_DESC_ENTRY(ring, entry + 1);
2150e689cf4aSJeff Kirsher cp->rx_old[ring] = entry;
2151e689cf4aSJeff Kirsher
2152e689cf4aSJeff Kirsher if (entry % 4)
2153e689cf4aSJeff Kirsher return;
2154e689cf4aSJeff Kirsher
2155e689cf4aSJeff Kirsher if (ring == 0)
2156e689cf4aSJeff Kirsher writel(entry, cp->regs + REG_RX_KICK);
2157e689cf4aSJeff Kirsher else if ((N_RX_DESC_RINGS > 1) &&
2158e689cf4aSJeff Kirsher (cp->cas_flags & CAS_FLAG_REG_PLUS))
2159e689cf4aSJeff Kirsher writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2160e689cf4aSJeff Kirsher }
2161e689cf4aSJeff Kirsher
2162e689cf4aSJeff Kirsher
2163e689cf4aSJeff Kirsher /* only when things are bad */
cas_post_rxds_ringN(struct cas * cp,int ring,int num)2164e689cf4aSJeff Kirsher static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2165e689cf4aSJeff Kirsher {
2166e689cf4aSJeff Kirsher unsigned int entry, last, count, released;
2167e689cf4aSJeff Kirsher int cluster;
2168e689cf4aSJeff Kirsher cas_page_t **page = cp->rx_pages[ring];
2169e689cf4aSJeff Kirsher
2170e689cf4aSJeff Kirsher entry = cp->rx_old[ring];
2171e689cf4aSJeff Kirsher
2172e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2173e689cf4aSJeff Kirsher "rxd[%d] interrupt, done: %d\n", ring, entry);
2174e689cf4aSJeff Kirsher
2175e689cf4aSJeff Kirsher cluster = -1;
2176e689cf4aSJeff Kirsher count = entry & 0x3;
2177e689cf4aSJeff Kirsher last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2178e689cf4aSJeff Kirsher released = 0;
2179e689cf4aSJeff Kirsher while (entry != last) {
2180e689cf4aSJeff Kirsher /* make a new buffer if it's still in use */
2181e689cf4aSJeff Kirsher if (page_count(page[entry]->buffer) > 1) {
2182e689cf4aSJeff Kirsher cas_page_t *new = cas_page_dequeue(cp);
2183e689cf4aSJeff Kirsher if (!new) {
2184e689cf4aSJeff Kirsher /* let the timer know that we need to
2185e689cf4aSJeff Kirsher * do this again
2186e689cf4aSJeff Kirsher */
2187e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2188e689cf4aSJeff Kirsher if (!timer_pending(&cp->link_timer))
2189e689cf4aSJeff Kirsher mod_timer(&cp->link_timer, jiffies +
2190e689cf4aSJeff Kirsher CAS_LINK_FAST_TIMEOUT);
2191e689cf4aSJeff Kirsher cp->rx_old[ring] = entry;
2192e689cf4aSJeff Kirsher cp->rx_last[ring] = num ? num - released : 0;
2193e689cf4aSJeff Kirsher return -ENOMEM;
2194e689cf4aSJeff Kirsher }
2195e689cf4aSJeff Kirsher spin_lock(&cp->rx_inuse_lock);
2196e689cf4aSJeff Kirsher list_add(&page[entry]->list, &cp->rx_inuse_list);
2197e689cf4aSJeff Kirsher spin_unlock(&cp->rx_inuse_lock);
2198e689cf4aSJeff Kirsher cp->init_rxds[ring][entry].buffer =
2199e689cf4aSJeff Kirsher cpu_to_le64(new->dma_addr);
2200e689cf4aSJeff Kirsher page[entry] = new;
2201e689cf4aSJeff Kirsher
2202e689cf4aSJeff Kirsher }
2203e689cf4aSJeff Kirsher
2204e689cf4aSJeff Kirsher if (++count == 4) {
2205e689cf4aSJeff Kirsher cluster = entry;
2206e689cf4aSJeff Kirsher count = 0;
2207e689cf4aSJeff Kirsher }
2208e689cf4aSJeff Kirsher released++;
2209e689cf4aSJeff Kirsher entry = RX_DESC_ENTRY(ring, entry + 1);
2210e689cf4aSJeff Kirsher }
2211e689cf4aSJeff Kirsher cp->rx_old[ring] = entry;
2212e689cf4aSJeff Kirsher
2213e689cf4aSJeff Kirsher if (cluster < 0)
2214e689cf4aSJeff Kirsher return 0;
2215e689cf4aSJeff Kirsher
2216e689cf4aSJeff Kirsher if (ring == 0)
2217e689cf4aSJeff Kirsher writel(cluster, cp->regs + REG_RX_KICK);
2218e689cf4aSJeff Kirsher else if ((N_RX_DESC_RINGS > 1) &&
2219e689cf4aSJeff Kirsher (cp->cas_flags & CAS_FLAG_REG_PLUS))
2220e689cf4aSJeff Kirsher writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2221e689cf4aSJeff Kirsher return 0;
2222e689cf4aSJeff Kirsher }
2223e689cf4aSJeff Kirsher
2224e689cf4aSJeff Kirsher
2225e689cf4aSJeff Kirsher /* process a completion ring. packets are set up in three basic ways:
2226e689cf4aSJeff Kirsher * small packets: should be copied header + data in single buffer.
2227e689cf4aSJeff Kirsher * large packets: header and data in a single buffer.
2228e689cf4aSJeff Kirsher * split packets: header in a separate buffer from data.
2229e689cf4aSJeff Kirsher * data may be in multiple pages. data may be > 256
2230e689cf4aSJeff Kirsher * bytes but in a single page.
2231e689cf4aSJeff Kirsher *
2232e689cf4aSJeff Kirsher * NOTE: RX page posting is done in this routine as well. while there's
2233e689cf4aSJeff Kirsher * the capability of using multiple RX completion rings, it isn't
2234e689cf4aSJeff Kirsher * really worthwhile due to the fact that the page posting will
2235e689cf4aSJeff Kirsher * force serialization on the single descriptor ring.
2236e689cf4aSJeff Kirsher */
cas_rx_ringN(struct cas * cp,int ring,int budget)2237e689cf4aSJeff Kirsher static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2238e689cf4aSJeff Kirsher {
2239e689cf4aSJeff Kirsher struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2240e689cf4aSJeff Kirsher int entry, drops;
2241e689cf4aSJeff Kirsher int npackets = 0;
2242e689cf4aSJeff Kirsher
2243e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2244e689cf4aSJeff Kirsher "rx[%d] interrupt, done: %d/%d\n",
2245e689cf4aSJeff Kirsher ring,
2246e689cf4aSJeff Kirsher readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2247e689cf4aSJeff Kirsher
2248e689cf4aSJeff Kirsher entry = cp->rx_new[ring];
2249e689cf4aSJeff Kirsher drops = 0;
2250e689cf4aSJeff Kirsher while (1) {
2251e689cf4aSJeff Kirsher struct cas_rx_comp *rxc = rxcs + entry;
2252e689cf4aSJeff Kirsher struct sk_buff *skb;
2253e689cf4aSJeff Kirsher int type, len;
2254e689cf4aSJeff Kirsher u64 words[4];
2255e689cf4aSJeff Kirsher int i, dring;
22563f649ab7SKees Cook
2257e689cf4aSJeff Kirsher words[0] = le64_to_cpu(rxc->word1);
2258e689cf4aSJeff Kirsher words[1] = le64_to_cpu(rxc->word2);
2259e689cf4aSJeff Kirsher words[2] = le64_to_cpu(rxc->word3);
2260e689cf4aSJeff Kirsher words[3] = le64_to_cpu(rxc->word4);
2261e689cf4aSJeff Kirsher
2262e689cf4aSJeff Kirsher /* don't touch if still owned by hw */
2263e689cf4aSJeff Kirsher type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2264e689cf4aSJeff Kirsher if (type == 0)
2265e689cf4aSJeff Kirsher break;
2266e689cf4aSJeff Kirsher
2267e689cf4aSJeff Kirsher /* hw hasn't cleared the zero bit yet */
2268e689cf4aSJeff Kirsher if (words[3] & RX_COMP4_ZERO) {
2269e689cf4aSJeff Kirsher break;
2270e689cf4aSJeff Kirsher }
2271e689cf4aSJeff Kirsher
2272e689cf4aSJeff Kirsher /* get info on the packet */
2273e689cf4aSJeff Kirsher if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2274e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[ring]);
2275e689cf4aSJeff Kirsher cp->net_stats[ring].rx_errors++;
2276e689cf4aSJeff Kirsher if (words[3] & RX_COMP4_LEN_MISMATCH)
2277e689cf4aSJeff Kirsher cp->net_stats[ring].rx_length_errors++;
2278e689cf4aSJeff Kirsher if (words[3] & RX_COMP4_BAD)
2279e689cf4aSJeff Kirsher cp->net_stats[ring].rx_crc_errors++;
2280e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[ring]);
2281e689cf4aSJeff Kirsher
2282e689cf4aSJeff Kirsher /* We'll just return it to Cassini. */
2283e689cf4aSJeff Kirsher drop_it:
2284e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[ring]);
2285e689cf4aSJeff Kirsher ++cp->net_stats[ring].rx_dropped;
2286e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[ring]);
2287e689cf4aSJeff Kirsher goto next;
2288e689cf4aSJeff Kirsher }
2289e689cf4aSJeff Kirsher
2290e689cf4aSJeff Kirsher len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2291e689cf4aSJeff Kirsher if (len < 0) {
2292e689cf4aSJeff Kirsher ++drops;
2293e689cf4aSJeff Kirsher goto drop_it;
2294e689cf4aSJeff Kirsher }
2295e689cf4aSJeff Kirsher
2296e689cf4aSJeff Kirsher /* see if it's a flow re-assembly or not. the driver
2297e689cf4aSJeff Kirsher * itself handles release back up.
2298e689cf4aSJeff Kirsher */
2299e689cf4aSJeff Kirsher if (RX_DONT_BATCH || (type == 0x2)) {
2300e689cf4aSJeff Kirsher /* non-reassm: these always get released */
2301e689cf4aSJeff Kirsher cas_skb_release(skb);
2302e689cf4aSJeff Kirsher } else {
2303e689cf4aSJeff Kirsher cas_rx_flow_pkt(cp, words, skb);
2304e689cf4aSJeff Kirsher }
2305e689cf4aSJeff Kirsher
2306e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[ring]);
2307e689cf4aSJeff Kirsher cp->net_stats[ring].rx_packets++;
2308e689cf4aSJeff Kirsher cp->net_stats[ring].rx_bytes += len;
2309e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[ring]);
2310e689cf4aSJeff Kirsher
2311e689cf4aSJeff Kirsher next:
2312e689cf4aSJeff Kirsher npackets++;
2313e689cf4aSJeff Kirsher
2314e689cf4aSJeff Kirsher /* should it be released? */
2315e689cf4aSJeff Kirsher if (words[0] & RX_COMP1_RELEASE_HDR) {
2316e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2317e689cf4aSJeff Kirsher dring = CAS_VAL(RX_INDEX_RING, i);
2318e689cf4aSJeff Kirsher i = CAS_VAL(RX_INDEX_NUM, i);
2319e689cf4aSJeff Kirsher cas_post_page(cp, dring, i);
2320e689cf4aSJeff Kirsher }
2321e689cf4aSJeff Kirsher
2322e689cf4aSJeff Kirsher if (words[0] & RX_COMP1_RELEASE_DATA) {
2323e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2324e689cf4aSJeff Kirsher dring = CAS_VAL(RX_INDEX_RING, i);
2325e689cf4aSJeff Kirsher i = CAS_VAL(RX_INDEX_NUM, i);
2326e689cf4aSJeff Kirsher cas_post_page(cp, dring, i);
2327e689cf4aSJeff Kirsher }
2328e689cf4aSJeff Kirsher
2329e689cf4aSJeff Kirsher if (words[0] & RX_COMP1_RELEASE_NEXT) {
2330e689cf4aSJeff Kirsher i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2331e689cf4aSJeff Kirsher dring = CAS_VAL(RX_INDEX_RING, i);
2332e689cf4aSJeff Kirsher i = CAS_VAL(RX_INDEX_NUM, i);
2333e689cf4aSJeff Kirsher cas_post_page(cp, dring, i);
2334e689cf4aSJeff Kirsher }
2335e689cf4aSJeff Kirsher
2336e689cf4aSJeff Kirsher /* skip to the next entry */
2337e689cf4aSJeff Kirsher entry = RX_COMP_ENTRY(ring, entry + 1 +
2338e689cf4aSJeff Kirsher CAS_VAL(RX_COMP1_SKIP, words[0]));
2339e689cf4aSJeff Kirsher #ifdef USE_NAPI
2340e689cf4aSJeff Kirsher if (budget && (npackets >= budget))
2341e689cf4aSJeff Kirsher break;
2342e689cf4aSJeff Kirsher #endif
2343e689cf4aSJeff Kirsher }
2344e689cf4aSJeff Kirsher cp->rx_new[ring] = entry;
2345e689cf4aSJeff Kirsher
2346e689cf4aSJeff Kirsher if (drops)
2347e689cf4aSJeff Kirsher netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2348e689cf4aSJeff Kirsher return npackets;
2349e689cf4aSJeff Kirsher }
2350e689cf4aSJeff Kirsher
2351e689cf4aSJeff Kirsher
2352e689cf4aSJeff Kirsher /* put completion entries back on the ring */
cas_post_rxcs_ringN(struct net_device * dev,struct cas * cp,int ring)2353e689cf4aSJeff Kirsher static void cas_post_rxcs_ringN(struct net_device *dev,
2354e689cf4aSJeff Kirsher struct cas *cp, int ring)
2355e689cf4aSJeff Kirsher {
2356e689cf4aSJeff Kirsher struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2357e689cf4aSJeff Kirsher int last, entry;
2358e689cf4aSJeff Kirsher
2359e689cf4aSJeff Kirsher last = cp->rx_cur[ring];
2360e689cf4aSJeff Kirsher entry = cp->rx_new[ring];
2361e689cf4aSJeff Kirsher netif_printk(cp, intr, KERN_DEBUG, dev,
2362e689cf4aSJeff Kirsher "rxc[%d] interrupt, done: %d/%d\n",
2363e689cf4aSJeff Kirsher ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2364e689cf4aSJeff Kirsher
2365e689cf4aSJeff Kirsher /* zero and re-mark descriptors */
2366e689cf4aSJeff Kirsher while (last != entry) {
2367e689cf4aSJeff Kirsher cas_rxc_init(rxc + last);
2368e689cf4aSJeff Kirsher last = RX_COMP_ENTRY(ring, last + 1);
2369e689cf4aSJeff Kirsher }
2370e689cf4aSJeff Kirsher cp->rx_cur[ring] = last;
2371e689cf4aSJeff Kirsher
2372e689cf4aSJeff Kirsher if (ring == 0)
2373e689cf4aSJeff Kirsher writel(last, cp->regs + REG_RX_COMP_TAIL);
2374e689cf4aSJeff Kirsher else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2375e689cf4aSJeff Kirsher writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2376e689cf4aSJeff Kirsher }
2377e689cf4aSJeff Kirsher
2378e689cf4aSJeff Kirsher
2379e689cf4aSJeff Kirsher
2380e689cf4aSJeff Kirsher /* cassini can use all four PCI interrupts for the completion ring.
2381e689cf4aSJeff Kirsher * rings 3 and 4 are identical
2382e689cf4aSJeff Kirsher */
2383e689cf4aSJeff Kirsher #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
cas_handle_irqN(struct net_device * dev,struct cas * cp,const u32 status,const int ring)2384e689cf4aSJeff Kirsher static inline void cas_handle_irqN(struct net_device *dev,
2385e689cf4aSJeff Kirsher struct cas *cp, const u32 status,
2386e689cf4aSJeff Kirsher const int ring)
2387e689cf4aSJeff Kirsher {
2388e689cf4aSJeff Kirsher if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2389e689cf4aSJeff Kirsher cas_post_rxcs_ringN(dev, cp, ring);
2390e689cf4aSJeff Kirsher }
2391e689cf4aSJeff Kirsher
cas_interruptN(int irq,void * dev_id)2392e689cf4aSJeff Kirsher static irqreturn_t cas_interruptN(int irq, void *dev_id)
2393e689cf4aSJeff Kirsher {
2394e689cf4aSJeff Kirsher struct net_device *dev = dev_id;
2395e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2396e689cf4aSJeff Kirsher unsigned long flags;
2397e689cf4aSJeff Kirsher int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2398e689cf4aSJeff Kirsher u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2399e689cf4aSJeff Kirsher
2400e689cf4aSJeff Kirsher /* check for shared irq */
24018decf868SDavid S. Miller if (status == 0)
2402e689cf4aSJeff Kirsher return IRQ_NONE;
2403e689cf4aSJeff Kirsher
2404e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
2405e689cf4aSJeff Kirsher if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2406e689cf4aSJeff Kirsher #ifdef USE_NAPI
2407e689cf4aSJeff Kirsher cas_mask_intr(cp);
2408e689cf4aSJeff Kirsher napi_schedule(&cp->napi);
2409e689cf4aSJeff Kirsher #else
2410e689cf4aSJeff Kirsher cas_rx_ringN(cp, ring, 0);
2411e689cf4aSJeff Kirsher #endif
2412e689cf4aSJeff Kirsher status &= ~INTR_RX_DONE_ALT;
2413e689cf4aSJeff Kirsher }
2414e689cf4aSJeff Kirsher
2415e689cf4aSJeff Kirsher if (status)
2416e689cf4aSJeff Kirsher cas_handle_irqN(dev, cp, status, ring);
2417e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
2418e689cf4aSJeff Kirsher return IRQ_HANDLED;
2419e689cf4aSJeff Kirsher }
2420e689cf4aSJeff Kirsher #endif
2421e689cf4aSJeff Kirsher
2422e689cf4aSJeff Kirsher #ifdef USE_PCI_INTB
2423e689cf4aSJeff Kirsher /* everything but rx packets */
cas_handle_irq1(struct cas * cp,const u32 status)2424e689cf4aSJeff Kirsher static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2425e689cf4aSJeff Kirsher {
2426e689cf4aSJeff Kirsher if (status & INTR_RX_BUF_UNAVAIL_1) {
2427e689cf4aSJeff Kirsher /* Frame arrived, no free RX buffers available.
2428e689cf4aSJeff Kirsher * NOTE: we can get this on a link transition. */
2429e689cf4aSJeff Kirsher cas_post_rxds_ringN(cp, 1, 0);
2430e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[1]);
2431e689cf4aSJeff Kirsher cp->net_stats[1].rx_dropped++;
2432e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[1]);
2433e689cf4aSJeff Kirsher }
2434e689cf4aSJeff Kirsher
2435e689cf4aSJeff Kirsher if (status & INTR_RX_BUF_AE_1)
2436e689cf4aSJeff Kirsher cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2437e689cf4aSJeff Kirsher RX_AE_FREEN_VAL(1));
2438e689cf4aSJeff Kirsher
2439e689cf4aSJeff Kirsher if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2440e689cf4aSJeff Kirsher cas_post_rxcs_ringN(cp, 1);
2441e689cf4aSJeff Kirsher }
2442e689cf4aSJeff Kirsher
2443e689cf4aSJeff Kirsher /* ring 2 handles a few more events than 3 and 4 */
cas_interrupt1(int irq,void * dev_id)2444e689cf4aSJeff Kirsher static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2445e689cf4aSJeff Kirsher {
2446e689cf4aSJeff Kirsher struct net_device *dev = dev_id;
2447e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2448e689cf4aSJeff Kirsher unsigned long flags;
2449e689cf4aSJeff Kirsher u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2450e689cf4aSJeff Kirsher
2451e689cf4aSJeff Kirsher /* check for shared interrupt */
2452e689cf4aSJeff Kirsher if (status == 0)
2453e689cf4aSJeff Kirsher return IRQ_NONE;
2454e689cf4aSJeff Kirsher
2455e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
2456e689cf4aSJeff Kirsher if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2457e689cf4aSJeff Kirsher #ifdef USE_NAPI
2458e689cf4aSJeff Kirsher cas_mask_intr(cp);
2459e689cf4aSJeff Kirsher napi_schedule(&cp->napi);
2460e689cf4aSJeff Kirsher #else
2461e689cf4aSJeff Kirsher cas_rx_ringN(cp, 1, 0);
2462e689cf4aSJeff Kirsher #endif
2463e689cf4aSJeff Kirsher status &= ~INTR_RX_DONE_ALT;
2464e689cf4aSJeff Kirsher }
2465e689cf4aSJeff Kirsher if (status)
2466e689cf4aSJeff Kirsher cas_handle_irq1(cp, status);
2467e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
2468e689cf4aSJeff Kirsher return IRQ_HANDLED;
2469e689cf4aSJeff Kirsher }
2470e689cf4aSJeff Kirsher #endif
2471e689cf4aSJeff Kirsher
cas_handle_irq(struct net_device * dev,struct cas * cp,const u32 status)2472e689cf4aSJeff Kirsher static inline void cas_handle_irq(struct net_device *dev,
2473e689cf4aSJeff Kirsher struct cas *cp, const u32 status)
2474e689cf4aSJeff Kirsher {
2475e689cf4aSJeff Kirsher /* housekeeping interrupts */
2476e689cf4aSJeff Kirsher if (status & INTR_ERROR_MASK)
2477e689cf4aSJeff Kirsher cas_abnormal_irq(dev, cp, status);
2478e689cf4aSJeff Kirsher
2479e689cf4aSJeff Kirsher if (status & INTR_RX_BUF_UNAVAIL) {
2480e689cf4aSJeff Kirsher /* Frame arrived, no free RX buffers available.
2481e689cf4aSJeff Kirsher * NOTE: we can get this on a link transition.
2482e689cf4aSJeff Kirsher */
2483e689cf4aSJeff Kirsher cas_post_rxds_ringN(cp, 0, 0);
2484e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
2485e689cf4aSJeff Kirsher cp->net_stats[0].rx_dropped++;
2486e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
2487e689cf4aSJeff Kirsher } else if (status & INTR_RX_BUF_AE) {
2488e689cf4aSJeff Kirsher cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2489e689cf4aSJeff Kirsher RX_AE_FREEN_VAL(0));
2490e689cf4aSJeff Kirsher }
2491e689cf4aSJeff Kirsher
2492e689cf4aSJeff Kirsher if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493e689cf4aSJeff Kirsher cas_post_rxcs_ringN(dev, cp, 0);
2494e689cf4aSJeff Kirsher }
2495e689cf4aSJeff Kirsher
cas_interrupt(int irq,void * dev_id)2496e689cf4aSJeff Kirsher static irqreturn_t cas_interrupt(int irq, void *dev_id)
2497e689cf4aSJeff Kirsher {
2498e689cf4aSJeff Kirsher struct net_device *dev = dev_id;
2499e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2500e689cf4aSJeff Kirsher unsigned long flags;
2501e689cf4aSJeff Kirsher u32 status = readl(cp->regs + REG_INTR_STATUS);
2502e689cf4aSJeff Kirsher
2503e689cf4aSJeff Kirsher if (status == 0)
2504e689cf4aSJeff Kirsher return IRQ_NONE;
2505e689cf4aSJeff Kirsher
2506e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
2507e689cf4aSJeff Kirsher if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2508e689cf4aSJeff Kirsher cas_tx(dev, cp, status);
2509e689cf4aSJeff Kirsher status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2510e689cf4aSJeff Kirsher }
2511e689cf4aSJeff Kirsher
2512e689cf4aSJeff Kirsher if (status & INTR_RX_DONE) {
2513e689cf4aSJeff Kirsher #ifdef USE_NAPI
2514e689cf4aSJeff Kirsher cas_mask_intr(cp);
2515e689cf4aSJeff Kirsher napi_schedule(&cp->napi);
2516e689cf4aSJeff Kirsher #else
2517e689cf4aSJeff Kirsher cas_rx_ringN(cp, 0, 0);
2518e689cf4aSJeff Kirsher #endif
2519e689cf4aSJeff Kirsher status &= ~INTR_RX_DONE;
2520e689cf4aSJeff Kirsher }
2521e689cf4aSJeff Kirsher
2522e689cf4aSJeff Kirsher if (status)
2523e689cf4aSJeff Kirsher cas_handle_irq(dev, cp, status);
2524e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
2525e689cf4aSJeff Kirsher return IRQ_HANDLED;
2526e689cf4aSJeff Kirsher }
2527e689cf4aSJeff Kirsher
2528e689cf4aSJeff Kirsher
2529e689cf4aSJeff Kirsher #ifdef USE_NAPI
cas_poll(struct napi_struct * napi,int budget)2530e689cf4aSJeff Kirsher static int cas_poll(struct napi_struct *napi, int budget)
2531e689cf4aSJeff Kirsher {
2532e689cf4aSJeff Kirsher struct cas *cp = container_of(napi, struct cas, napi);
2533e689cf4aSJeff Kirsher struct net_device *dev = cp->dev;
2534e689cf4aSJeff Kirsher int i, enable_intr, credits;
2535e689cf4aSJeff Kirsher u32 status = readl(cp->regs + REG_INTR_STATUS);
2536e689cf4aSJeff Kirsher unsigned long flags;
2537e689cf4aSJeff Kirsher
2538e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
2539e689cf4aSJeff Kirsher cas_tx(dev, cp, status);
2540e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
2541e689cf4aSJeff Kirsher
2542e689cf4aSJeff Kirsher /* NAPI rx packets. we spread the credits across all of the
2543e689cf4aSJeff Kirsher * rxc rings
2544e689cf4aSJeff Kirsher *
2545e689cf4aSJeff Kirsher * to make sure we're fair with the work we loop through each
2546e689cf4aSJeff Kirsher * ring N_RX_COMP_RING times with a request of
2547e689cf4aSJeff Kirsher * budget / N_RX_COMP_RINGS
2548e689cf4aSJeff Kirsher */
2549e689cf4aSJeff Kirsher enable_intr = 1;
2550e689cf4aSJeff Kirsher credits = 0;
2551e689cf4aSJeff Kirsher for (i = 0; i < N_RX_COMP_RINGS; i++) {
2552e689cf4aSJeff Kirsher int j;
2553e689cf4aSJeff Kirsher for (j = 0; j < N_RX_COMP_RINGS; j++) {
2554e689cf4aSJeff Kirsher credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2555e689cf4aSJeff Kirsher if (credits >= budget) {
2556e689cf4aSJeff Kirsher enable_intr = 0;
2557e689cf4aSJeff Kirsher goto rx_comp;
2558e689cf4aSJeff Kirsher }
2559e689cf4aSJeff Kirsher }
2560e689cf4aSJeff Kirsher }
2561e689cf4aSJeff Kirsher
2562e689cf4aSJeff Kirsher rx_comp:
2563e689cf4aSJeff Kirsher /* final rx completion */
2564e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
2565e689cf4aSJeff Kirsher if (status)
2566e689cf4aSJeff Kirsher cas_handle_irq(dev, cp, status);
2567e689cf4aSJeff Kirsher
2568e689cf4aSJeff Kirsher #ifdef USE_PCI_INTB
2569e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 1) {
2570e689cf4aSJeff Kirsher status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2571e689cf4aSJeff Kirsher if (status)
2572e689cf4aSJeff Kirsher cas_handle_irq1(dev, cp, status);
2573e689cf4aSJeff Kirsher }
2574e689cf4aSJeff Kirsher #endif
2575e689cf4aSJeff Kirsher
2576e689cf4aSJeff Kirsher #ifdef USE_PCI_INTC
2577e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 2) {
2578e689cf4aSJeff Kirsher status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2579e689cf4aSJeff Kirsher if (status)
2580e689cf4aSJeff Kirsher cas_handle_irqN(dev, cp, status, 2);
2581e689cf4aSJeff Kirsher }
2582e689cf4aSJeff Kirsher #endif
2583e689cf4aSJeff Kirsher
2584e689cf4aSJeff Kirsher #ifdef USE_PCI_INTD
2585e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 3) {
2586e689cf4aSJeff Kirsher status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2587e689cf4aSJeff Kirsher if (status)
2588e689cf4aSJeff Kirsher cas_handle_irqN(dev, cp, status, 3);
2589e689cf4aSJeff Kirsher }
2590e689cf4aSJeff Kirsher #endif
2591e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
2592e689cf4aSJeff Kirsher if (enable_intr) {
2593e689cf4aSJeff Kirsher napi_complete(napi);
2594e689cf4aSJeff Kirsher cas_unmask_intr(cp);
2595e689cf4aSJeff Kirsher }
2596e689cf4aSJeff Kirsher return credits;
2597e689cf4aSJeff Kirsher }
2598e689cf4aSJeff Kirsher #endif
2599e689cf4aSJeff Kirsher
2600e689cf4aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
cas_netpoll(struct net_device * dev)2601e689cf4aSJeff Kirsher static void cas_netpoll(struct net_device *dev)
2602e689cf4aSJeff Kirsher {
2603e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2604e689cf4aSJeff Kirsher
2605e689cf4aSJeff Kirsher cas_disable_irq(cp, 0);
2606e689cf4aSJeff Kirsher cas_interrupt(cp->pdev->irq, dev);
2607e689cf4aSJeff Kirsher cas_enable_irq(cp, 0);
2608e689cf4aSJeff Kirsher
2609e689cf4aSJeff Kirsher #ifdef USE_PCI_INTB
2610e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 1) {
2611e689cf4aSJeff Kirsher /* cas_interrupt1(); */
2612e689cf4aSJeff Kirsher }
2613e689cf4aSJeff Kirsher #endif
2614e689cf4aSJeff Kirsher #ifdef USE_PCI_INTC
2615e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 2) {
2616e689cf4aSJeff Kirsher /* cas_interruptN(); */
2617e689cf4aSJeff Kirsher }
2618e689cf4aSJeff Kirsher #endif
2619e689cf4aSJeff Kirsher #ifdef USE_PCI_INTD
2620e689cf4aSJeff Kirsher if (N_RX_COMP_RINGS > 3) {
2621e689cf4aSJeff Kirsher /* cas_interruptN(); */
2622e689cf4aSJeff Kirsher }
2623e689cf4aSJeff Kirsher #endif
2624e689cf4aSJeff Kirsher }
2625e689cf4aSJeff Kirsher #endif
2626e689cf4aSJeff Kirsher
cas_tx_timeout(struct net_device * dev,unsigned int txqueue)2627e689cf4aSJeff Kirsher static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2628e689cf4aSJeff Kirsher {
2629e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2630e689cf4aSJeff Kirsher
26310290bd29SMichael S. Tsirkin netdev_err(dev, "transmit timed out, resetting\n");
2632e689cf4aSJeff Kirsher if (!cp->hw_running) {
2633e689cf4aSJeff Kirsher netdev_err(dev, "hrm.. hw not running!\n");
2634e689cf4aSJeff Kirsher return;
2635e689cf4aSJeff Kirsher }
2636e689cf4aSJeff Kirsher
2637e689cf4aSJeff Kirsher netdev_err(dev, "MIF_STATE[%08x]\n",
2638e689cf4aSJeff Kirsher readl(cp->regs + REG_MIF_STATE_MACHINE));
2639e689cf4aSJeff Kirsher
2640e689cf4aSJeff Kirsher netdev_err(dev, "MAC_STATE[%08x]\n",
2641e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_STATE_MACHINE));
2642e689cf4aSJeff Kirsher
2643e689cf4aSJeff Kirsher netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2644e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_CFG),
2645e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_TX_STATUS),
2646e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_TX_CFG),
2647e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2648e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2649e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_FIFO_READ_PTR),
2650e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_SM_1),
2651e689cf4aSJeff Kirsher readl(cp->regs + REG_TX_SM_2));
2652e689cf4aSJeff Kirsher
2653e689cf4aSJeff Kirsher netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2654e689cf4aSJeff Kirsher readl(cp->regs + REG_RX_CFG),
2655e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_RX_STATUS),
2656e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_RX_CFG));
2657e689cf4aSJeff Kirsher
2658e689cf4aSJeff Kirsher netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2659e689cf4aSJeff Kirsher readl(cp->regs + REG_HP_STATE_MACHINE),
2660e689cf4aSJeff Kirsher readl(cp->regs + REG_HP_STATUS0),
2661e689cf4aSJeff Kirsher readl(cp->regs + REG_HP_STATUS1),
2662e689cf4aSJeff Kirsher readl(cp->regs + REG_HP_STATUS2));
2663e689cf4aSJeff Kirsher
2664e689cf4aSJeff Kirsher #if 1
2665e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
2666e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_all);
2667e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
2668e689cf4aSJeff Kirsher #else
2669e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2670e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
2671e689cf4aSJeff Kirsher #endif
2672e689cf4aSJeff Kirsher }
2673e689cf4aSJeff Kirsher
cas_intme(int ring,int entry)2674e689cf4aSJeff Kirsher static inline int cas_intme(int ring, int entry)
2675e689cf4aSJeff Kirsher {
2676e689cf4aSJeff Kirsher /* Algorithm: IRQ every 1/2 of descriptors. */
2677e689cf4aSJeff Kirsher if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2678e689cf4aSJeff Kirsher return 1;
2679e689cf4aSJeff Kirsher return 0;
2680e689cf4aSJeff Kirsher }
2681e689cf4aSJeff Kirsher
2682e689cf4aSJeff Kirsher
cas_write_txd(struct cas * cp,int ring,int entry,dma_addr_t mapping,int len,u64 ctrl,int last)2683e689cf4aSJeff Kirsher static void cas_write_txd(struct cas *cp, int ring, int entry,
2684e689cf4aSJeff Kirsher dma_addr_t mapping, int len, u64 ctrl, int last)
2685e689cf4aSJeff Kirsher {
2686e689cf4aSJeff Kirsher struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2687e689cf4aSJeff Kirsher
2688e689cf4aSJeff Kirsher ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2689e689cf4aSJeff Kirsher if (cas_intme(ring, entry))
2690e689cf4aSJeff Kirsher ctrl |= TX_DESC_INTME;
2691e689cf4aSJeff Kirsher if (last)
2692e689cf4aSJeff Kirsher ctrl |= TX_DESC_EOF;
2693e689cf4aSJeff Kirsher txd->control = cpu_to_le64(ctrl);
2694e689cf4aSJeff Kirsher txd->buffer = cpu_to_le64(mapping);
2695e689cf4aSJeff Kirsher }
2696e689cf4aSJeff Kirsher
tx_tiny_buf(struct cas * cp,const int ring,const int entry)2697e689cf4aSJeff Kirsher static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2698e689cf4aSJeff Kirsher const int entry)
2699e689cf4aSJeff Kirsher {
2700e689cf4aSJeff Kirsher return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2701e689cf4aSJeff Kirsher }
2702e689cf4aSJeff Kirsher
tx_tiny_map(struct cas * cp,const int ring,const int entry,const int tentry)2703e689cf4aSJeff Kirsher static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2704e689cf4aSJeff Kirsher const int entry, const int tentry)
2705e689cf4aSJeff Kirsher {
2706e689cf4aSJeff Kirsher cp->tx_tiny_use[ring][tentry].nbufs++;
2707e689cf4aSJeff Kirsher cp->tx_tiny_use[ring][entry].used = 1;
2708e689cf4aSJeff Kirsher return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2709e689cf4aSJeff Kirsher }
2710e689cf4aSJeff Kirsher
cas_xmit_tx_ringN(struct cas * cp,int ring,struct sk_buff * skb)2711e689cf4aSJeff Kirsher static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2712e689cf4aSJeff Kirsher struct sk_buff *skb)
2713e689cf4aSJeff Kirsher {
2714e689cf4aSJeff Kirsher struct net_device *dev = cp->dev;
2715e689cf4aSJeff Kirsher int entry, nr_frags, frag, tabort, tentry;
2716e689cf4aSJeff Kirsher dma_addr_t mapping;
2717e689cf4aSJeff Kirsher unsigned long flags;
2718e689cf4aSJeff Kirsher u64 ctrl;
2719e689cf4aSJeff Kirsher u32 len;
2720e689cf4aSJeff Kirsher
2721e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->tx_lock[ring], flags);
2722e689cf4aSJeff Kirsher
2723e689cf4aSJeff Kirsher /* This is a hard error, log it. */
2724e689cf4aSJeff Kirsher if (TX_BUFFS_AVAIL(cp, ring) <=
2725e689cf4aSJeff Kirsher CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2726e689cf4aSJeff Kirsher netif_stop_queue(dev);
2727e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2728e689cf4aSJeff Kirsher netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2729e689cf4aSJeff Kirsher return 1;
2730e689cf4aSJeff Kirsher }
2731e689cf4aSJeff Kirsher
2732e689cf4aSJeff Kirsher ctrl = 0;
2733e689cf4aSJeff Kirsher if (skb->ip_summed == CHECKSUM_PARTIAL) {
2734e689cf4aSJeff Kirsher const u64 csum_start_off = skb_checksum_start_offset(skb);
2735e689cf4aSJeff Kirsher const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2736e689cf4aSJeff Kirsher
2737e689cf4aSJeff Kirsher ctrl = TX_DESC_CSUM_EN |
2738e689cf4aSJeff Kirsher CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2739e689cf4aSJeff Kirsher CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2740e689cf4aSJeff Kirsher }
2741e689cf4aSJeff Kirsher
2742e689cf4aSJeff Kirsher entry = cp->tx_new[ring];
2743e689cf4aSJeff Kirsher cp->tx_skbs[ring][entry] = skb;
2744e689cf4aSJeff Kirsher
2745e689cf4aSJeff Kirsher nr_frags = skb_shinfo(skb)->nr_frags;
2746e689cf4aSJeff Kirsher len = skb_headlen(skb);
2747e689cf4aSJeff Kirsher mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2748e689cf4aSJeff Kirsher offset_in_page(skb->data), len, DMA_TO_DEVICE);
2749e689cf4aSJeff Kirsher
2750e689cf4aSJeff Kirsher tentry = entry;
2751dcc82bb0SChristophe JAILLET tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2752dcc82bb0SChristophe JAILLET if (unlikely(tabort)) {
2753e689cf4aSJeff Kirsher /* NOTE: len is always > tabort */
2754e689cf4aSJeff Kirsher cas_write_txd(cp, ring, entry, mapping, len - tabort,
2755e689cf4aSJeff Kirsher ctrl | TX_DESC_SOF, 0);
2756e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
2757e689cf4aSJeff Kirsher
2758e689cf4aSJeff Kirsher skb_copy_from_linear_data_offset(skb, len - tabort,
2759e689cf4aSJeff Kirsher tx_tiny_buf(cp, ring, entry), tabort);
2760e689cf4aSJeff Kirsher mapping = tx_tiny_map(cp, ring, entry, tentry);
2761e689cf4aSJeff Kirsher cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2762e689cf4aSJeff Kirsher (nr_frags == 0));
2763e689cf4aSJeff Kirsher } else {
2764e689cf4aSJeff Kirsher cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2765e689cf4aSJeff Kirsher TX_DESC_SOF, (nr_frags == 0));
2766e689cf4aSJeff Kirsher }
2767e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
2768e689cf4aSJeff Kirsher
2769e689cf4aSJeff Kirsher for (frag = 0; frag < nr_frags; frag++) {
2770e689cf4aSJeff Kirsher const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2771e689cf4aSJeff Kirsher
2772e689cf4aSJeff Kirsher len = skb_frag_size(fragp);
2773e689cf4aSJeff Kirsher mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
27749e903e08SEric Dumazet DMA_TO_DEVICE);
2775e689cf4aSJeff Kirsher
27769e903e08SEric Dumazet tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
277718324d69SIan Campbell if (unlikely(tabort)) {
27785d6bcdfeSIan Campbell /* NOTE: len is always > tabort */
2779e689cf4aSJeff Kirsher cas_write_txd(cp, ring, entry, mapping, len - tabort,
2780b54c9d5bSJonathan Lemon ctrl, 0);
2781e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
2782e689cf4aSJeff Kirsher memcpy_from_page(tx_tiny_buf(cp, ring, entry),
2783e689cf4aSJeff Kirsher skb_frag_page(fragp),
2784e689cf4aSJeff Kirsher skb_frag_off(fragp) + len - tabort,
2785e689cf4aSJeff Kirsher tabort);
2786e3128591SAnirudh Venkataramanan mapping = tx_tiny_map(cp, ring, entry, tentry);
2787e3128591SAnirudh Venkataramanan len = tabort;
2788e3128591SAnirudh Venkataramanan }
2789e689cf4aSJeff Kirsher
2790e689cf4aSJeff Kirsher cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2791e689cf4aSJeff Kirsher (frag + 1 == nr_frags));
2792e689cf4aSJeff Kirsher entry = TX_DESC_NEXT(ring, entry);
2793e689cf4aSJeff Kirsher }
2794e689cf4aSJeff Kirsher
2795e689cf4aSJeff Kirsher cp->tx_new[ring] = entry;
2796e689cf4aSJeff Kirsher if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2797e689cf4aSJeff Kirsher netif_stop_queue(dev);
2798e689cf4aSJeff Kirsher
2799e689cf4aSJeff Kirsher netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2800e689cf4aSJeff Kirsher "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2801e689cf4aSJeff Kirsher ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2802e689cf4aSJeff Kirsher writel(entry, cp->regs + REG_TX_KICKN(ring));
2803e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2804e689cf4aSJeff Kirsher return 0;
2805e689cf4aSJeff Kirsher }
2806e689cf4aSJeff Kirsher
cas_start_xmit(struct sk_buff * skb,struct net_device * dev)2807e689cf4aSJeff Kirsher static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2808e689cf4aSJeff Kirsher {
2809e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
2810e689cf4aSJeff Kirsher
2811e689cf4aSJeff Kirsher /* this is only used as a load-balancing hint, so it doesn't
2812e689cf4aSJeff Kirsher * need to be SMP safe
2813e689cf4aSJeff Kirsher */
2814e689cf4aSJeff Kirsher static int ring;
2815e689cf4aSJeff Kirsher
2816e689cf4aSJeff Kirsher if (skb_padto(skb, cp->min_frame_size))
2817e689cf4aSJeff Kirsher return NETDEV_TX_OK;
2818e689cf4aSJeff Kirsher
2819e689cf4aSJeff Kirsher /* XXX: we need some higher-level QoS hooks to steer packets to
2820e689cf4aSJeff Kirsher * individual queues.
2821e689cf4aSJeff Kirsher */
2822e689cf4aSJeff Kirsher if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2823e689cf4aSJeff Kirsher return NETDEV_TX_BUSY;
2824e689cf4aSJeff Kirsher return NETDEV_TX_OK;
2825e689cf4aSJeff Kirsher }
2826e689cf4aSJeff Kirsher
cas_init_tx_dma(struct cas * cp)2827e689cf4aSJeff Kirsher static void cas_init_tx_dma(struct cas *cp)
2828e689cf4aSJeff Kirsher {
2829e689cf4aSJeff Kirsher u64 desc_dma = cp->block_dvma;
2830e689cf4aSJeff Kirsher unsigned long off;
2831e689cf4aSJeff Kirsher u32 val;
2832e689cf4aSJeff Kirsher int i;
2833e689cf4aSJeff Kirsher
2834e689cf4aSJeff Kirsher /* set up tx completion writeback registers. must be 8-byte aligned */
2835e689cf4aSJeff Kirsher #ifdef USE_TX_COMPWB
2836e689cf4aSJeff Kirsher off = offsetof(struct cas_init_block, tx_compwb);
2837e689cf4aSJeff Kirsher writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2838e689cf4aSJeff Kirsher writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2839e689cf4aSJeff Kirsher #endif
2840e689cf4aSJeff Kirsher
2841e689cf4aSJeff Kirsher /* enable completion writebacks, enable paced mode,
2842e689cf4aSJeff Kirsher * disable read pipe, and disable pre-interrupt compwbs
2843e689cf4aSJeff Kirsher */
2844e689cf4aSJeff Kirsher val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2845e689cf4aSJeff Kirsher TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2846e689cf4aSJeff Kirsher TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2847e689cf4aSJeff Kirsher TX_CFG_INTR_COMPWB_DIS;
2848e689cf4aSJeff Kirsher
2849e689cf4aSJeff Kirsher /* write out tx ring info and tx desc bases */
2850e689cf4aSJeff Kirsher for (i = 0; i < MAX_TX_RINGS; i++) {
2851e689cf4aSJeff Kirsher off = (unsigned long) cp->init_txds[i] -
2852e689cf4aSJeff Kirsher (unsigned long) cp->init_block;
2853e689cf4aSJeff Kirsher
2854e689cf4aSJeff Kirsher val |= CAS_TX_RINGN_BASE(i);
2855e689cf4aSJeff Kirsher writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2856e689cf4aSJeff Kirsher writel((desc_dma + off) & 0xffffffff, cp->regs +
2857e689cf4aSJeff Kirsher REG_TX_DBN_LOW(i));
2858e689cf4aSJeff Kirsher /* don't zero out the kick register here as the system
2859e689cf4aSJeff Kirsher * will wedge
2860e689cf4aSJeff Kirsher */
2861e689cf4aSJeff Kirsher }
2862e689cf4aSJeff Kirsher writel(val, cp->regs + REG_TX_CFG);
2863e689cf4aSJeff Kirsher
2864e689cf4aSJeff Kirsher /* program max burst sizes. these numbers should be different
2865e689cf4aSJeff Kirsher * if doing QoS.
2866e689cf4aSJeff Kirsher */
2867e689cf4aSJeff Kirsher #ifdef USE_QOS
2868e689cf4aSJeff Kirsher writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2869e689cf4aSJeff Kirsher writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2870e689cf4aSJeff Kirsher writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2871e689cf4aSJeff Kirsher writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2872e689cf4aSJeff Kirsher #else
2873e689cf4aSJeff Kirsher writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2874e689cf4aSJeff Kirsher writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2875e689cf4aSJeff Kirsher writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2876e689cf4aSJeff Kirsher writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2877e689cf4aSJeff Kirsher #endif
2878e689cf4aSJeff Kirsher }
2879e689cf4aSJeff Kirsher
2880e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_init_dma(struct cas * cp)2881e689cf4aSJeff Kirsher static inline void cas_init_dma(struct cas *cp)
2882e689cf4aSJeff Kirsher {
2883e689cf4aSJeff Kirsher cas_init_tx_dma(cp);
2884e689cf4aSJeff Kirsher cas_init_rx_dma(cp);
2885e689cf4aSJeff Kirsher }
2886e689cf4aSJeff Kirsher
cas_process_mc_list(struct cas * cp)2887e689cf4aSJeff Kirsher static void cas_process_mc_list(struct cas *cp)
2888e689cf4aSJeff Kirsher {
2889e689cf4aSJeff Kirsher u16 hash_table[16];
2890e689cf4aSJeff Kirsher u32 crc;
2891e689cf4aSJeff Kirsher struct netdev_hw_addr *ha;
2892e689cf4aSJeff Kirsher int i = 1;
2893e689cf4aSJeff Kirsher
2894e689cf4aSJeff Kirsher memset(hash_table, 0, sizeof(hash_table));
2895e689cf4aSJeff Kirsher netdev_for_each_mc_addr(ha, cp->dev) {
2896e689cf4aSJeff Kirsher if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2897e689cf4aSJeff Kirsher /* use the alternate mac address registers for the
2898e689cf4aSJeff Kirsher * first 15 multicast addresses
2899e689cf4aSJeff Kirsher */
2900e689cf4aSJeff Kirsher writel((ha->addr[4] << 8) | ha->addr[5],
2901e689cf4aSJeff Kirsher cp->regs + REG_MAC_ADDRN(i*3 + 0));
2902e689cf4aSJeff Kirsher writel((ha->addr[2] << 8) | ha->addr[3],
2903e689cf4aSJeff Kirsher cp->regs + REG_MAC_ADDRN(i*3 + 1));
2904e689cf4aSJeff Kirsher writel((ha->addr[0] << 8) | ha->addr[1],
2905e689cf4aSJeff Kirsher cp->regs + REG_MAC_ADDRN(i*3 + 2));
2906e689cf4aSJeff Kirsher i++;
2907e689cf4aSJeff Kirsher }
2908e689cf4aSJeff Kirsher else {
2909e689cf4aSJeff Kirsher /* use hw hash table for the next series of
2910e689cf4aSJeff Kirsher * multicast addresses
2911e689cf4aSJeff Kirsher */
2912e689cf4aSJeff Kirsher crc = ether_crc_le(ETH_ALEN, ha->addr);
2913e689cf4aSJeff Kirsher crc >>= 24;
2914e689cf4aSJeff Kirsher hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2915e689cf4aSJeff Kirsher }
2916e689cf4aSJeff Kirsher }
2917e689cf4aSJeff Kirsher for (i = 0; i < 16; i++)
2918e689cf4aSJeff Kirsher writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2919e689cf4aSJeff Kirsher }
2920e689cf4aSJeff Kirsher
2921e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_setup_multicast(struct cas * cp)2922e689cf4aSJeff Kirsher static u32 cas_setup_multicast(struct cas *cp)
2923e689cf4aSJeff Kirsher {
2924e689cf4aSJeff Kirsher u32 rxcfg = 0;
2925e689cf4aSJeff Kirsher int i;
2926e689cf4aSJeff Kirsher
2927e689cf4aSJeff Kirsher if (cp->dev->flags & IFF_PROMISC) {
2928e689cf4aSJeff Kirsher rxcfg |= MAC_RX_CFG_PROMISC_EN;
2929e689cf4aSJeff Kirsher
2930e689cf4aSJeff Kirsher } else if (cp->dev->flags & IFF_ALLMULTI) {
2931e689cf4aSJeff Kirsher for (i=0; i < 16; i++)
2932e689cf4aSJeff Kirsher writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2933e689cf4aSJeff Kirsher rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2934e689cf4aSJeff Kirsher
2935e689cf4aSJeff Kirsher } else {
2936e689cf4aSJeff Kirsher cas_process_mc_list(cp);
2937e689cf4aSJeff Kirsher rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2938e689cf4aSJeff Kirsher }
2939e689cf4aSJeff Kirsher
2940e689cf4aSJeff Kirsher return rxcfg;
2941e689cf4aSJeff Kirsher }
2942e689cf4aSJeff Kirsher
2943e689cf4aSJeff Kirsher /* must be invoked under cp->stat_lock[N_TX_RINGS] */
cas_clear_mac_err(struct cas * cp)2944e689cf4aSJeff Kirsher static void cas_clear_mac_err(struct cas *cp)
2945e689cf4aSJeff Kirsher {
2946e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2947e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_COLL_FIRST);
2948e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2949e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_COLL_LATE);
2950e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2951e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2952e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_RECV_FRAME);
2953e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_LEN_ERR);
2954e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2955e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_FCS_ERR);
2956e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2957e689cf4aSJeff Kirsher }
2958e689cf4aSJeff Kirsher
2959e689cf4aSJeff Kirsher
cas_mac_reset(struct cas * cp)2960e689cf4aSJeff Kirsher static void cas_mac_reset(struct cas *cp)
2961e689cf4aSJeff Kirsher {
2962e689cf4aSJeff Kirsher int i;
2963e689cf4aSJeff Kirsher
2964e689cf4aSJeff Kirsher /* do both TX and RX reset */
2965e689cf4aSJeff Kirsher writel(0x1, cp->regs + REG_MAC_TX_RESET);
2966e689cf4aSJeff Kirsher writel(0x1, cp->regs + REG_MAC_RX_RESET);
2967e689cf4aSJeff Kirsher
2968e689cf4aSJeff Kirsher /* wait for TX */
2969e689cf4aSJeff Kirsher i = STOP_TRIES;
2970e689cf4aSJeff Kirsher while (i-- > 0) {
2971e689cf4aSJeff Kirsher if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2972e689cf4aSJeff Kirsher break;
2973e689cf4aSJeff Kirsher udelay(10);
2974e689cf4aSJeff Kirsher }
2975e689cf4aSJeff Kirsher
2976e689cf4aSJeff Kirsher /* wait for RX */
2977e689cf4aSJeff Kirsher i = STOP_TRIES;
2978e689cf4aSJeff Kirsher while (i-- > 0) {
2979e689cf4aSJeff Kirsher if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
2980e689cf4aSJeff Kirsher break;
2981e689cf4aSJeff Kirsher udelay(10);
2982e689cf4aSJeff Kirsher }
2983e689cf4aSJeff Kirsher
2984e689cf4aSJeff Kirsher if (readl(cp->regs + REG_MAC_TX_RESET) |
2985e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_RX_RESET))
2986e689cf4aSJeff Kirsher netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
2987e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_TX_RESET),
2988e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_RX_RESET),
2989e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_STATE_MACHINE));
2990e689cf4aSJeff Kirsher }
2991e689cf4aSJeff Kirsher
2992e689cf4aSJeff Kirsher
2993e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_init_mac(struct cas * cp)2994e689cf4aSJeff Kirsher static void cas_init_mac(struct cas *cp)
2995e689cf4aSJeff Kirsher {
2996e689cf4aSJeff Kirsher const unsigned char *e = &cp->dev->dev_addr[0];
2997e689cf4aSJeff Kirsher int i;
2998e689cf4aSJeff Kirsher cas_mac_reset(cp);
2999e689cf4aSJeff Kirsher
3000a7639279SJakub Kicinski /* setup core arbitration weight register */
3001e689cf4aSJeff Kirsher writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3002e689cf4aSJeff Kirsher
3003e689cf4aSJeff Kirsher #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3004e689cf4aSJeff Kirsher /* set the infinite burst register for chips that don't have
3005e689cf4aSJeff Kirsher * pci issues.
3006e689cf4aSJeff Kirsher */
3007e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3008e689cf4aSJeff Kirsher writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3009e689cf4aSJeff Kirsher #endif
3010e689cf4aSJeff Kirsher
3011e689cf4aSJeff Kirsher writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3012e689cf4aSJeff Kirsher
3013e689cf4aSJeff Kirsher writel(0x00, cp->regs + REG_MAC_IPG0);
3014e689cf4aSJeff Kirsher writel(0x08, cp->regs + REG_MAC_IPG1);
3015e689cf4aSJeff Kirsher writel(0x04, cp->regs + REG_MAC_IPG2);
3016e689cf4aSJeff Kirsher
3017e689cf4aSJeff Kirsher /* change later for 802.3z */
3018e689cf4aSJeff Kirsher writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3019e689cf4aSJeff Kirsher
3020e689cf4aSJeff Kirsher /* min frame + FCS */
3021e689cf4aSJeff Kirsher writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3022e689cf4aSJeff Kirsher
3023e689cf4aSJeff Kirsher /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3024e689cf4aSJeff Kirsher * specify the maximum frame size to prevent RX tag errors on
3025e689cf4aSJeff Kirsher * oversized frames.
3026e689cf4aSJeff Kirsher */
3027e689cf4aSJeff Kirsher writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3028e689cf4aSJeff Kirsher CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3029e689cf4aSJeff Kirsher (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3030e689cf4aSJeff Kirsher cp->regs + REG_MAC_FRAMESIZE_MAX);
3031e689cf4aSJeff Kirsher
3032e689cf4aSJeff Kirsher /* NOTE: crc_size is used as a surrogate for half-duplex.
3033e689cf4aSJeff Kirsher * workaround saturn half-duplex issue by increasing preamble
3034e689cf4aSJeff Kirsher * size to 65 bytes.
3035e689cf4aSJeff Kirsher */
3036e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3037e689cf4aSJeff Kirsher writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3038e689cf4aSJeff Kirsher else
3039e689cf4aSJeff Kirsher writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3040e689cf4aSJeff Kirsher writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3041e689cf4aSJeff Kirsher writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3042e689cf4aSJeff Kirsher writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3043e689cf4aSJeff Kirsher
3044e689cf4aSJeff Kirsher writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3045e689cf4aSJeff Kirsher
3046e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3047e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3048e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3049e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3050e689cf4aSJeff Kirsher writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3051e689cf4aSJeff Kirsher
3052e689cf4aSJeff Kirsher /* setup mac address in perfect filter array */
3053e689cf4aSJeff Kirsher for (i = 0; i < 45; i++)
3054e689cf4aSJeff Kirsher writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3055e689cf4aSJeff Kirsher
3056e689cf4aSJeff Kirsher writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3057e689cf4aSJeff Kirsher writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3058e689cf4aSJeff Kirsher writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3059e689cf4aSJeff Kirsher
3060e689cf4aSJeff Kirsher writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3061e689cf4aSJeff Kirsher writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3062e689cf4aSJeff Kirsher writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3063e689cf4aSJeff Kirsher
3064e689cf4aSJeff Kirsher cp->mac_rx_cfg = cas_setup_multicast(cp);
3065e689cf4aSJeff Kirsher
3066e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[N_TX_RINGS]);
3067e689cf4aSJeff Kirsher cas_clear_mac_err(cp);
3068e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3069e689cf4aSJeff Kirsher
3070e689cf4aSJeff Kirsher /* Setup MAC interrupts. We want to get all of the interesting
3071e689cf4aSJeff Kirsher * counter expiration events, but we do not want to hear about
3072e689cf4aSJeff Kirsher * normal rx/tx as the DMA engine tells us that.
3073e689cf4aSJeff Kirsher */
3074e689cf4aSJeff Kirsher writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3075e689cf4aSJeff Kirsher writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3076e689cf4aSJeff Kirsher
3077e689cf4aSJeff Kirsher /* Don't enable even the PAUSE interrupts for now, we
3078e689cf4aSJeff Kirsher * make no use of those events other than to record them.
3079e689cf4aSJeff Kirsher */
3080e689cf4aSJeff Kirsher writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3081e689cf4aSJeff Kirsher }
3082e689cf4aSJeff Kirsher
3083e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_init_pause_thresholds(struct cas * cp)3084e689cf4aSJeff Kirsher static void cas_init_pause_thresholds(struct cas *cp)
3085e689cf4aSJeff Kirsher {
3086e689cf4aSJeff Kirsher /* Calculate pause thresholds. Setting the OFF threshold to the
3087e689cf4aSJeff Kirsher * full RX fifo size effectively disables PAUSE generation
3088e689cf4aSJeff Kirsher */
3089e689cf4aSJeff Kirsher if (cp->rx_fifo_size <= (2 * 1024)) {
3090e689cf4aSJeff Kirsher cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3091e689cf4aSJeff Kirsher } else {
3092e689cf4aSJeff Kirsher int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3093e689cf4aSJeff Kirsher if (max_frame * 3 > cp->rx_fifo_size) {
3094e689cf4aSJeff Kirsher cp->rx_pause_off = 7104;
3095e689cf4aSJeff Kirsher cp->rx_pause_on = 960;
3096e689cf4aSJeff Kirsher } else {
3097e689cf4aSJeff Kirsher int off = (cp->rx_fifo_size - (max_frame * 2));
3098e689cf4aSJeff Kirsher int on = off - max_frame;
3099e689cf4aSJeff Kirsher cp->rx_pause_off = off;
3100e689cf4aSJeff Kirsher cp->rx_pause_on = on;
3101e689cf4aSJeff Kirsher }
3102e689cf4aSJeff Kirsher }
3103e689cf4aSJeff Kirsher }
3104e689cf4aSJeff Kirsher
cas_vpd_match(const void __iomem * p,const char * str)3105e689cf4aSJeff Kirsher static int cas_vpd_match(const void __iomem *p, const char *str)
3106e689cf4aSJeff Kirsher {
3107e689cf4aSJeff Kirsher int len = strlen(str) + 1;
3108e689cf4aSJeff Kirsher int i;
3109e689cf4aSJeff Kirsher
3110e689cf4aSJeff Kirsher for (i = 0; i < len; i++) {
3111e689cf4aSJeff Kirsher if (readb(p + i) != str[i])
3112e689cf4aSJeff Kirsher return 0;
3113e689cf4aSJeff Kirsher }
3114e689cf4aSJeff Kirsher return 1;
3115e689cf4aSJeff Kirsher }
3116e689cf4aSJeff Kirsher
3117e689cf4aSJeff Kirsher
3118e689cf4aSJeff Kirsher /* get the mac address by reading the vpd information in the rom.
3119e689cf4aSJeff Kirsher * also get the phy type and determine if there's an entropy generator.
3120e689cf4aSJeff Kirsher * NOTE: this is a bit convoluted for the following reasons:
3121e689cf4aSJeff Kirsher * 1) vpd info has order-dependent mac addresses for multinic cards
3122e689cf4aSJeff Kirsher * 2) the only way to determine the nic order is to use the slot
3123e689cf4aSJeff Kirsher * number.
3124e689cf4aSJeff Kirsher * 3) fiber cards don't have bridges, so their slot numbers don't
3125e689cf4aSJeff Kirsher * mean anything.
3126e689cf4aSJeff Kirsher * 4) we don't actually know we have a fiber card until after
3127e689cf4aSJeff Kirsher * the mac addresses are parsed.
3128e689cf4aSJeff Kirsher */
cas_get_vpd_info(struct cas * cp,unsigned char * dev_addr,const int offset)3129e689cf4aSJeff Kirsher static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3130e689cf4aSJeff Kirsher const int offset)
3131e689cf4aSJeff Kirsher {
3132e689cf4aSJeff Kirsher void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3133e689cf4aSJeff Kirsher void __iomem *base, *kstart;
3134e689cf4aSJeff Kirsher int i, len;
3135e689cf4aSJeff Kirsher int found = 0;
3136e689cf4aSJeff Kirsher #define VPD_FOUND_MAC 0x01
3137e689cf4aSJeff Kirsher #define VPD_FOUND_PHY 0x02
3138e689cf4aSJeff Kirsher
3139e689cf4aSJeff Kirsher int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3140e689cf4aSJeff Kirsher int mac_off = 0;
3141e689cf4aSJeff Kirsher
3142e689cf4aSJeff Kirsher #if defined(CONFIG_SPARC)
3143e689cf4aSJeff Kirsher const unsigned char *addr;
3144e689cf4aSJeff Kirsher #endif
3145e689cf4aSJeff Kirsher
3146e689cf4aSJeff Kirsher /* give us access to the PROM */
3147e689cf4aSJeff Kirsher writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3148e689cf4aSJeff Kirsher cp->regs + REG_BIM_LOCAL_DEV_EN);
3149e689cf4aSJeff Kirsher
3150e689cf4aSJeff Kirsher /* check for an expansion rom */
3151e689cf4aSJeff Kirsher if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3152e689cf4aSJeff Kirsher goto use_random_mac_addr;
3153e689cf4aSJeff Kirsher
3154e689cf4aSJeff Kirsher /* search for beginning of vpd */
3155e689cf4aSJeff Kirsher base = NULL;
3156e689cf4aSJeff Kirsher for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3157e689cf4aSJeff Kirsher /* check for PCIR */
3158e689cf4aSJeff Kirsher if ((readb(p + i + 0) == 0x50) &&
3159e689cf4aSJeff Kirsher (readb(p + i + 1) == 0x43) &&
3160e689cf4aSJeff Kirsher (readb(p + i + 2) == 0x49) &&
3161e689cf4aSJeff Kirsher (readb(p + i + 3) == 0x52)) {
3162e689cf4aSJeff Kirsher base = p + (readb(p + i + 8) |
3163e689cf4aSJeff Kirsher (readb(p + i + 9) << 8));
3164e689cf4aSJeff Kirsher break;
3165e689cf4aSJeff Kirsher }
3166e689cf4aSJeff Kirsher }
3167e689cf4aSJeff Kirsher
3168e689cf4aSJeff Kirsher if (!base || (readb(base) != 0x82))
3169e689cf4aSJeff Kirsher goto use_random_mac_addr;
3170e689cf4aSJeff Kirsher
3171e689cf4aSJeff Kirsher i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3172e689cf4aSJeff Kirsher while (i < EXPANSION_ROM_SIZE) {
3173e689cf4aSJeff Kirsher if (readb(base + i) != 0x90) /* no vpd found */
3174e689cf4aSJeff Kirsher goto use_random_mac_addr;
3175e689cf4aSJeff Kirsher
3176e689cf4aSJeff Kirsher /* found a vpd field */
3177e689cf4aSJeff Kirsher len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3178e689cf4aSJeff Kirsher
3179e689cf4aSJeff Kirsher /* extract keywords */
3180e689cf4aSJeff Kirsher kstart = base + i + 3;
3181e689cf4aSJeff Kirsher p = kstart;
3182e689cf4aSJeff Kirsher while ((p - kstart) < len) {
3183e689cf4aSJeff Kirsher int klen = readb(p + 2);
3184e689cf4aSJeff Kirsher int j;
3185e689cf4aSJeff Kirsher char type;
3186e689cf4aSJeff Kirsher
3187e689cf4aSJeff Kirsher p += 3;
3188e689cf4aSJeff Kirsher
3189e689cf4aSJeff Kirsher /* look for the following things:
3190e689cf4aSJeff Kirsher * -- correct length == 29
3191e689cf4aSJeff Kirsher * 3 (type) + 2 (size) +
3192e689cf4aSJeff Kirsher * 18 (strlen("local-mac-address") + 1) +
3193e689cf4aSJeff Kirsher * 6 (mac addr)
3194e689cf4aSJeff Kirsher * -- VPD Instance 'I'
3195e689cf4aSJeff Kirsher * -- VPD Type Bytes 'B'
3196e689cf4aSJeff Kirsher * -- VPD data length == 6
3197e689cf4aSJeff Kirsher * -- property string == local-mac-address
3198e689cf4aSJeff Kirsher *
3199e689cf4aSJeff Kirsher * -- correct length == 24
3200e689cf4aSJeff Kirsher * 3 (type) + 2 (size) +
3201e689cf4aSJeff Kirsher * 12 (strlen("entropy-dev") + 1) +
3202e689cf4aSJeff Kirsher * 7 (strlen("vms110") + 1)
3203e689cf4aSJeff Kirsher * -- VPD Instance 'I'
3204e689cf4aSJeff Kirsher * -- VPD Type String 'B'
3205e689cf4aSJeff Kirsher * -- VPD data length == 7
3206e689cf4aSJeff Kirsher * -- property string == entropy-dev
3207e689cf4aSJeff Kirsher *
3208e689cf4aSJeff Kirsher * -- correct length == 18
3209e689cf4aSJeff Kirsher * 3 (type) + 2 (size) +
3210e689cf4aSJeff Kirsher * 9 (strlen("phy-type") + 1) +
3211e689cf4aSJeff Kirsher * 4 (strlen("pcs") + 1)
3212e689cf4aSJeff Kirsher * -- VPD Instance 'I'
3213e689cf4aSJeff Kirsher * -- VPD Type String 'S'
3214e689cf4aSJeff Kirsher * -- VPD data length == 4
3215e689cf4aSJeff Kirsher * -- property string == phy-type
3216e689cf4aSJeff Kirsher *
3217e689cf4aSJeff Kirsher * -- correct length == 23
3218e689cf4aSJeff Kirsher * 3 (type) + 2 (size) +
3219e689cf4aSJeff Kirsher * 14 (strlen("phy-interface") + 1) +
3220e689cf4aSJeff Kirsher * 4 (strlen("pcs") + 1)
3221e689cf4aSJeff Kirsher * -- VPD Instance 'I'
3222e689cf4aSJeff Kirsher * -- VPD Type String 'S'
3223e689cf4aSJeff Kirsher * -- VPD data length == 4
3224e689cf4aSJeff Kirsher * -- property string == phy-interface
3225e689cf4aSJeff Kirsher */
3226e689cf4aSJeff Kirsher if (readb(p) != 'I')
3227e689cf4aSJeff Kirsher goto next;
3228e689cf4aSJeff Kirsher
3229e689cf4aSJeff Kirsher /* finally, check string and length */
3230e689cf4aSJeff Kirsher type = readb(p + 3);
3231e689cf4aSJeff Kirsher if (type == 'B') {
3232e689cf4aSJeff Kirsher if ((klen == 29) && readb(p + 4) == 6 &&
3233e689cf4aSJeff Kirsher cas_vpd_match(p + 5,
3234e689cf4aSJeff Kirsher "local-mac-address")) {
3235e689cf4aSJeff Kirsher if (mac_off++ > offset)
3236e689cf4aSJeff Kirsher goto next;
3237e689cf4aSJeff Kirsher
3238e689cf4aSJeff Kirsher /* set mac address */
3239e689cf4aSJeff Kirsher for (j = 0; j < 6; j++)
3240e689cf4aSJeff Kirsher dev_addr[j] =
3241e689cf4aSJeff Kirsher readb(p + 23 + j);
3242e689cf4aSJeff Kirsher goto found_mac;
3243e689cf4aSJeff Kirsher }
3244e689cf4aSJeff Kirsher }
3245e689cf4aSJeff Kirsher
3246e689cf4aSJeff Kirsher if (type != 'S')
3247e689cf4aSJeff Kirsher goto next;
3248e689cf4aSJeff Kirsher
3249e689cf4aSJeff Kirsher #ifdef USE_ENTROPY_DEV
3250e689cf4aSJeff Kirsher if ((klen == 24) &&
3251e689cf4aSJeff Kirsher cas_vpd_match(p + 5, "entropy-dev") &&
3252e689cf4aSJeff Kirsher cas_vpd_match(p + 17, "vms110")) {
3253e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3254e689cf4aSJeff Kirsher goto next;
3255e689cf4aSJeff Kirsher }
3256e689cf4aSJeff Kirsher #endif
3257e689cf4aSJeff Kirsher
3258e689cf4aSJeff Kirsher if (found & VPD_FOUND_PHY)
3259e689cf4aSJeff Kirsher goto next;
3260e689cf4aSJeff Kirsher
3261e689cf4aSJeff Kirsher if ((klen == 18) && readb(p + 4) == 4 &&
3262e689cf4aSJeff Kirsher cas_vpd_match(p + 5, "phy-type")) {
3263e689cf4aSJeff Kirsher if (cas_vpd_match(p + 14, "pcs")) {
3264e689cf4aSJeff Kirsher phy_type = CAS_PHY_SERDES;
3265e689cf4aSJeff Kirsher goto found_phy;
3266e689cf4aSJeff Kirsher }
3267e689cf4aSJeff Kirsher }
3268e689cf4aSJeff Kirsher
3269e689cf4aSJeff Kirsher if ((klen == 23) && readb(p + 4) == 4 &&
3270e689cf4aSJeff Kirsher cas_vpd_match(p + 5, "phy-interface")) {
3271e689cf4aSJeff Kirsher if (cas_vpd_match(p + 19, "pcs")) {
3272e689cf4aSJeff Kirsher phy_type = CAS_PHY_SERDES;
3273e689cf4aSJeff Kirsher goto found_phy;
3274e689cf4aSJeff Kirsher }
3275e689cf4aSJeff Kirsher }
3276e689cf4aSJeff Kirsher found_mac:
3277e689cf4aSJeff Kirsher found |= VPD_FOUND_MAC;
3278e689cf4aSJeff Kirsher goto next;
3279e689cf4aSJeff Kirsher
3280e689cf4aSJeff Kirsher found_phy:
3281e689cf4aSJeff Kirsher found |= VPD_FOUND_PHY;
3282e689cf4aSJeff Kirsher
3283e689cf4aSJeff Kirsher next:
3284e689cf4aSJeff Kirsher p += klen;
3285e689cf4aSJeff Kirsher }
3286e689cf4aSJeff Kirsher i += len + 3;
3287e689cf4aSJeff Kirsher }
3288e689cf4aSJeff Kirsher
3289e689cf4aSJeff Kirsher use_random_mac_addr:
3290e689cf4aSJeff Kirsher if (found & VPD_FOUND_MAC)
3291e689cf4aSJeff Kirsher goto done;
3292e689cf4aSJeff Kirsher
3293e689cf4aSJeff Kirsher #if defined(CONFIG_SPARC)
3294e689cf4aSJeff Kirsher addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3295e689cf4aSJeff Kirsher if (addr != NULL) {
3296e689cf4aSJeff Kirsher memcpy(dev_addr, addr, ETH_ALEN);
3297e689cf4aSJeff Kirsher goto done;
3298e689cf4aSJeff Kirsher }
3299e689cf4aSJeff Kirsher #endif
3300d458cdf7SJoe Perches
3301e689cf4aSJeff Kirsher /* Sun MAC prefix then 3 random bytes. */
3302e689cf4aSJeff Kirsher pr_info("MAC address not found in ROM VPD\n");
3303e689cf4aSJeff Kirsher dev_addr[0] = 0x08;
3304e689cf4aSJeff Kirsher dev_addr[1] = 0x00;
3305e689cf4aSJeff Kirsher dev_addr[2] = 0x20;
3306e689cf4aSJeff Kirsher get_random_bytes(dev_addr + 3, 3);
3307e689cf4aSJeff Kirsher
3308e689cf4aSJeff Kirsher done:
3309e689cf4aSJeff Kirsher writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3310e689cf4aSJeff Kirsher return phy_type;
3311e689cf4aSJeff Kirsher }
3312e689cf4aSJeff Kirsher
3313e689cf4aSJeff Kirsher /* check pci invariants */
cas_check_pci_invariants(struct cas * cp)3314e689cf4aSJeff Kirsher static void cas_check_pci_invariants(struct cas *cp)
3315e689cf4aSJeff Kirsher {
3316e689cf4aSJeff Kirsher struct pci_dev *pdev = cp->pdev;
3317e689cf4aSJeff Kirsher
3318e689cf4aSJeff Kirsher cp->cas_flags = 0;
3319e689cf4aSJeff Kirsher if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3320e689cf4aSJeff Kirsher (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3321e689cf4aSJeff Kirsher if (pdev->revision >= CAS_ID_REVPLUS)
3322e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_REG_PLUS;
3323e689cf4aSJeff Kirsher if (pdev->revision < CAS_ID_REVPLUS02u)
3324e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3325e689cf4aSJeff Kirsher
3326e689cf4aSJeff Kirsher /* Original Cassini supports HW CSUM, but it's not
3327e689cf4aSJeff Kirsher * enabled by default as it can trigger TX hangs.
3328e689cf4aSJeff Kirsher */
3329e689cf4aSJeff Kirsher if (pdev->revision < CAS_ID_REV2)
3330e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3331e689cf4aSJeff Kirsher } else {
3332e689cf4aSJeff Kirsher /* Only sun has original cassini chips. */
3333e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_REG_PLUS;
3334e689cf4aSJeff Kirsher
3335e689cf4aSJeff Kirsher /* We use a flag because the same phy might be externally
3336e689cf4aSJeff Kirsher * connected.
3337e689cf4aSJeff Kirsher */
3338e689cf4aSJeff Kirsher if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3339e689cf4aSJeff Kirsher (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3340e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_SATURN;
3341e689cf4aSJeff Kirsher }
3342e689cf4aSJeff Kirsher }
3343e689cf4aSJeff Kirsher
3344e689cf4aSJeff Kirsher
cas_check_invariants(struct cas * cp)3345e689cf4aSJeff Kirsher static int cas_check_invariants(struct cas *cp)
3346e689cf4aSJeff Kirsher {
3347e689cf4aSJeff Kirsher struct pci_dev *pdev = cp->pdev;
3348e689cf4aSJeff Kirsher u8 addr[ETH_ALEN];
3349e689cf4aSJeff Kirsher u32 cfg;
3350e689cf4aSJeff Kirsher int i;
3351e689cf4aSJeff Kirsher
3352a7639279SJakub Kicinski /* get page size for rx buffers. */
3353e689cf4aSJeff Kirsher cp->page_order = 0;
3354e689cf4aSJeff Kirsher #ifdef USE_PAGE_ORDER
3355e689cf4aSJeff Kirsher if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3356e689cf4aSJeff Kirsher /* see if we can allocate larger pages */
3357e689cf4aSJeff Kirsher struct page *page = alloc_pages(GFP_ATOMIC,
3358e689cf4aSJeff Kirsher CAS_JUMBO_PAGE_SHIFT -
3359e689cf4aSJeff Kirsher PAGE_SHIFT);
3360e689cf4aSJeff Kirsher if (page) {
3361e689cf4aSJeff Kirsher __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3362e689cf4aSJeff Kirsher cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3363e689cf4aSJeff Kirsher } else {
3364e689cf4aSJeff Kirsher printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3365e689cf4aSJeff Kirsher }
3366e689cf4aSJeff Kirsher }
3367e689cf4aSJeff Kirsher #endif
3368e689cf4aSJeff Kirsher cp->page_size = (PAGE_SIZE << cp->page_order);
3369e689cf4aSJeff Kirsher
3370e689cf4aSJeff Kirsher /* Fetch the FIFO configurations. */
3371e689cf4aSJeff Kirsher cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3372e689cf4aSJeff Kirsher cp->rx_fifo_size = RX_FIFO_SIZE;
3373e689cf4aSJeff Kirsher
3374e689cf4aSJeff Kirsher /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3375e689cf4aSJeff Kirsher * they're both connected.
3376e689cf4aSJeff Kirsher */
3377e689cf4aSJeff Kirsher cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3378e689cf4aSJeff Kirsher eth_hw_addr_set(cp->dev, addr);
3379e689cf4aSJeff Kirsher if (cp->phy_type & CAS_PHY_SERDES) {
3380e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3381a7639279SJakub Kicinski return 0; /* no more checking needed */
3382a7639279SJakub Kicinski }
3383e689cf4aSJeff Kirsher
3384e689cf4aSJeff Kirsher /* MII */
3385e689cf4aSJeff Kirsher cfg = readl(cp->regs + REG_MIF_CFG);
3386e689cf4aSJeff Kirsher if (cfg & MIF_CFG_MDIO_1) {
3387e689cf4aSJeff Kirsher cp->phy_type = CAS_PHY_MII_MDIO1;
3388e689cf4aSJeff Kirsher } else if (cfg & MIF_CFG_MDIO_0) {
3389e689cf4aSJeff Kirsher cp->phy_type = CAS_PHY_MII_MDIO0;
3390e689cf4aSJeff Kirsher }
3391e689cf4aSJeff Kirsher
3392e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
3393e689cf4aSJeff Kirsher writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3394e689cf4aSJeff Kirsher
3395e689cf4aSJeff Kirsher for (i = 0; i < 32; i++) {
3396e689cf4aSJeff Kirsher u32 phy_id;
3397e689cf4aSJeff Kirsher int j;
3398e689cf4aSJeff Kirsher
3399e689cf4aSJeff Kirsher for (j = 0; j < 3; j++) {
3400e689cf4aSJeff Kirsher cp->phy_addr = i;
3401e689cf4aSJeff Kirsher phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3402e689cf4aSJeff Kirsher phy_id |= cas_phy_read(cp, MII_PHYSID2);
3403e689cf4aSJeff Kirsher if (phy_id && (phy_id != 0xFFFFFFFF)) {
3404e689cf4aSJeff Kirsher cp->phy_id = phy_id;
3405e689cf4aSJeff Kirsher goto done;
3406e689cf4aSJeff Kirsher }
3407e689cf4aSJeff Kirsher }
3408e689cf4aSJeff Kirsher }
3409e689cf4aSJeff Kirsher pr_err("MII phy did not respond [%08x]\n",
3410e689cf4aSJeff Kirsher readl(cp->regs + REG_MIF_STATE_MACHINE));
3411e689cf4aSJeff Kirsher return -1;
3412e689cf4aSJeff Kirsher
3413e689cf4aSJeff Kirsher done:
3414e689cf4aSJeff Kirsher /* see if we can do gigabit */
3415e689cf4aSJeff Kirsher cfg = cas_phy_read(cp, MII_BMSR);
3416e689cf4aSJeff Kirsher if ((cfg & CAS_BMSR_1000_EXTEND) &&
3417e689cf4aSJeff Kirsher cas_phy_read(cp, CAS_MII_1000_EXTEND))
3418e689cf4aSJeff Kirsher cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3419e689cf4aSJeff Kirsher return 0;
3420e689cf4aSJeff Kirsher }
3421e689cf4aSJeff Kirsher
3422e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_start_dma(struct cas * cp)3423e689cf4aSJeff Kirsher static inline void cas_start_dma(struct cas *cp)
3424e689cf4aSJeff Kirsher {
3425e689cf4aSJeff Kirsher int i;
3426e689cf4aSJeff Kirsher u32 val;
3427e689cf4aSJeff Kirsher int txfailed = 0;
3428e689cf4aSJeff Kirsher
3429e689cf4aSJeff Kirsher /* enable dma */
3430e689cf4aSJeff Kirsher val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3431e689cf4aSJeff Kirsher writel(val, cp->regs + REG_TX_CFG);
3432e689cf4aSJeff Kirsher val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3433e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_CFG);
3434e689cf4aSJeff Kirsher
3435e689cf4aSJeff Kirsher /* enable the mac */
3436e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3437e689cf4aSJeff Kirsher writel(val, cp->regs + REG_MAC_TX_CFG);
3438e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3439e689cf4aSJeff Kirsher writel(val, cp->regs + REG_MAC_RX_CFG);
3440e689cf4aSJeff Kirsher
3441e689cf4aSJeff Kirsher i = STOP_TRIES;
3442e689cf4aSJeff Kirsher while (i-- > 0) {
3443e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_TX_CFG);
3444e689cf4aSJeff Kirsher if ((val & MAC_TX_CFG_EN))
3445e689cf4aSJeff Kirsher break;
3446e689cf4aSJeff Kirsher udelay(10);
3447e689cf4aSJeff Kirsher }
3448e689cf4aSJeff Kirsher if (i < 0) txfailed = 1;
3449e689cf4aSJeff Kirsher i = STOP_TRIES;
3450e689cf4aSJeff Kirsher while (i-- > 0) {
3451e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_RX_CFG);
3452e689cf4aSJeff Kirsher if ((val & MAC_RX_CFG_EN)) {
3453e689cf4aSJeff Kirsher if (txfailed) {
3454e689cf4aSJeff Kirsher netdev_err(cp->dev,
3455e689cf4aSJeff Kirsher "enabling mac failed [tx:%08x:%08x]\n",
3456e689cf4aSJeff Kirsher readl(cp->regs + REG_MIF_STATE_MACHINE),
3457e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_STATE_MACHINE));
3458e689cf4aSJeff Kirsher }
3459e689cf4aSJeff Kirsher goto enable_rx_done;
3460e689cf4aSJeff Kirsher }
3461e689cf4aSJeff Kirsher udelay(10);
3462e689cf4aSJeff Kirsher }
3463e689cf4aSJeff Kirsher netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3464e689cf4aSJeff Kirsher (txfailed ? "tx,rx" : "rx"),
3465e689cf4aSJeff Kirsher readl(cp->regs + REG_MIF_STATE_MACHINE),
3466e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_STATE_MACHINE));
3467e689cf4aSJeff Kirsher
3468e689cf4aSJeff Kirsher enable_rx_done:
3469e689cf4aSJeff Kirsher cas_unmask_intr(cp); /* enable interrupts */
3470e689cf4aSJeff Kirsher writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3471e689cf4aSJeff Kirsher writel(0, cp->regs + REG_RX_COMP_TAIL);
3472e689cf4aSJeff Kirsher
3473e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3474e689cf4aSJeff Kirsher if (N_RX_DESC_RINGS > 1)
3475e689cf4aSJeff Kirsher writel(RX_DESC_RINGN_SIZE(1) - 4,
3476e689cf4aSJeff Kirsher cp->regs + REG_PLUS_RX_KICK1);
3477e689cf4aSJeff Kirsher }
3478e689cf4aSJeff Kirsher }
3479e689cf4aSJeff Kirsher
3480e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_read_pcs_link_mode(struct cas * cp,int * fd,int * spd,int * pause)3481e689cf4aSJeff Kirsher static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3482e689cf4aSJeff Kirsher int *pause)
3483e689cf4aSJeff Kirsher {
3484e689cf4aSJeff Kirsher u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3485e689cf4aSJeff Kirsher *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3486e689cf4aSJeff Kirsher *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3487e689cf4aSJeff Kirsher if (val & PCS_MII_LPA_ASYM_PAUSE)
3488e689cf4aSJeff Kirsher *pause |= 0x10;
3489e689cf4aSJeff Kirsher *spd = 1000;
3490e689cf4aSJeff Kirsher }
3491e689cf4aSJeff Kirsher
3492e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_read_mii_link_mode(struct cas * cp,int * fd,int * spd,int * pause)3493e689cf4aSJeff Kirsher static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3494e689cf4aSJeff Kirsher int *pause)
3495e689cf4aSJeff Kirsher {
3496e689cf4aSJeff Kirsher u32 val;
3497e689cf4aSJeff Kirsher
3498e689cf4aSJeff Kirsher *fd = 0;
3499e689cf4aSJeff Kirsher *spd = 10;
3500e689cf4aSJeff Kirsher *pause = 0;
3501e689cf4aSJeff Kirsher
3502e689cf4aSJeff Kirsher /* use GMII registers */
3503e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_LPA);
3504e689cf4aSJeff Kirsher if (val & CAS_LPA_PAUSE)
3505e689cf4aSJeff Kirsher *pause = 0x01;
3506e689cf4aSJeff Kirsher
3507e689cf4aSJeff Kirsher if (val & CAS_LPA_ASYM_PAUSE)
3508e689cf4aSJeff Kirsher *pause |= 0x10;
3509e689cf4aSJeff Kirsher
3510e689cf4aSJeff Kirsher if (val & LPA_DUPLEX)
3511e689cf4aSJeff Kirsher *fd = 1;
3512e689cf4aSJeff Kirsher if (val & LPA_100)
3513e689cf4aSJeff Kirsher *spd = 100;
3514e689cf4aSJeff Kirsher
3515e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3516e689cf4aSJeff Kirsher val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3517e689cf4aSJeff Kirsher if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3518e689cf4aSJeff Kirsher *spd = 1000;
3519e689cf4aSJeff Kirsher if (val & CAS_LPA_1000FULL)
3520e689cf4aSJeff Kirsher *fd = 1;
3521e689cf4aSJeff Kirsher }
3522e689cf4aSJeff Kirsher }
3523e689cf4aSJeff Kirsher
3524e689cf4aSJeff Kirsher /* A link-up condition has occurred, initialize and enable the
3525e689cf4aSJeff Kirsher * rest of the chip.
3526e689cf4aSJeff Kirsher *
3527e689cf4aSJeff Kirsher * Must be invoked under cp->lock.
3528e689cf4aSJeff Kirsher */
cas_set_link_modes(struct cas * cp)3529e689cf4aSJeff Kirsher static void cas_set_link_modes(struct cas *cp)
3530e689cf4aSJeff Kirsher {
3531e689cf4aSJeff Kirsher u32 val;
3532e689cf4aSJeff Kirsher int full_duplex, speed, pause;
3533e689cf4aSJeff Kirsher
3534e689cf4aSJeff Kirsher full_duplex = 0;
3535e689cf4aSJeff Kirsher speed = 10;
3536e689cf4aSJeff Kirsher pause = 0;
3537e689cf4aSJeff Kirsher
3538e689cf4aSJeff Kirsher if (CAS_PHY_MII(cp->phy_type)) {
3539e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
3540e689cf4aSJeff Kirsher val = cas_phy_read(cp, MII_BMCR);
3541e689cf4aSJeff Kirsher if (val & BMCR_ANENABLE) {
3542e689cf4aSJeff Kirsher cas_read_mii_link_mode(cp, &full_duplex, &speed,
3543e689cf4aSJeff Kirsher &pause);
3544e689cf4aSJeff Kirsher } else {
3545e689cf4aSJeff Kirsher if (val & BMCR_FULLDPLX)
3546e689cf4aSJeff Kirsher full_duplex = 1;
3547e689cf4aSJeff Kirsher
3548e689cf4aSJeff Kirsher if (val & BMCR_SPEED100)
3549e689cf4aSJeff Kirsher speed = 100;
3550e689cf4aSJeff Kirsher else if (val & CAS_BMCR_SPEED1000)
3551e689cf4aSJeff Kirsher speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3552e689cf4aSJeff Kirsher 1000 : 100;
3553e689cf4aSJeff Kirsher }
3554e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
3555e689cf4aSJeff Kirsher
3556e689cf4aSJeff Kirsher } else {
3557e689cf4aSJeff Kirsher val = readl(cp->regs + REG_PCS_MII_CTRL);
3558e689cf4aSJeff Kirsher cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3559e689cf4aSJeff Kirsher if ((val & PCS_MII_AUTONEG_EN) == 0) {
3560e689cf4aSJeff Kirsher if (val & PCS_MII_CTRL_DUPLEX)
3561e689cf4aSJeff Kirsher full_duplex = 1;
3562e689cf4aSJeff Kirsher }
3563e689cf4aSJeff Kirsher }
3564e689cf4aSJeff Kirsher
3565e689cf4aSJeff Kirsher netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3566e689cf4aSJeff Kirsher speed, full_duplex ? "full" : "half");
3567e689cf4aSJeff Kirsher
3568e689cf4aSJeff Kirsher val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3569e689cf4aSJeff Kirsher if (CAS_PHY_MII(cp->phy_type)) {
3570e689cf4aSJeff Kirsher val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3571e689cf4aSJeff Kirsher if (!full_duplex)
3572e689cf4aSJeff Kirsher val |= MAC_XIF_DISABLE_ECHO;
3573e689cf4aSJeff Kirsher }
3574e689cf4aSJeff Kirsher if (full_duplex)
3575e689cf4aSJeff Kirsher val |= MAC_XIF_FDPLX_LED;
3576e689cf4aSJeff Kirsher if (speed == 1000)
3577e689cf4aSJeff Kirsher val |= MAC_XIF_GMII_MODE;
3578e689cf4aSJeff Kirsher writel(val, cp->regs + REG_MAC_XIF_CFG);
3579e689cf4aSJeff Kirsher
3580e689cf4aSJeff Kirsher /* deal with carrier and collision detect. */
3581e689cf4aSJeff Kirsher val = MAC_TX_CFG_IPG_EN;
3582e689cf4aSJeff Kirsher if (full_duplex) {
3583e689cf4aSJeff Kirsher val |= MAC_TX_CFG_IGNORE_CARRIER;
3584e689cf4aSJeff Kirsher val |= MAC_TX_CFG_IGNORE_COLL;
3585e689cf4aSJeff Kirsher } else {
3586e689cf4aSJeff Kirsher #ifndef USE_CSMA_CD_PROTO
3587e689cf4aSJeff Kirsher val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3588e689cf4aSJeff Kirsher val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3589e689cf4aSJeff Kirsher #endif
3590e689cf4aSJeff Kirsher }
3591e689cf4aSJeff Kirsher /* val now set up for REG_MAC_TX_CFG */
3592e689cf4aSJeff Kirsher
3593e689cf4aSJeff Kirsher /* If gigabit and half-duplex, enable carrier extension
3594e689cf4aSJeff Kirsher * mode. increase slot time to 512 bytes as well.
3595e689cf4aSJeff Kirsher * else, disable it and make sure slot time is 64 bytes.
3596e689cf4aSJeff Kirsher * also activate checksum bug workaround
3597e689cf4aSJeff Kirsher */
3598e689cf4aSJeff Kirsher if ((speed == 1000) && !full_duplex) {
3599e689cf4aSJeff Kirsher writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3600e689cf4aSJeff Kirsher cp->regs + REG_MAC_TX_CFG);
3601e689cf4aSJeff Kirsher
3602e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_RX_CFG);
3603e689cf4aSJeff Kirsher val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3604e689cf4aSJeff Kirsher writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3605e689cf4aSJeff Kirsher cp->regs + REG_MAC_RX_CFG);
3606e689cf4aSJeff Kirsher
3607e689cf4aSJeff Kirsher writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3608e689cf4aSJeff Kirsher
3609e689cf4aSJeff Kirsher cp->crc_size = 4;
3610e689cf4aSJeff Kirsher /* minimum size gigabit frame at half duplex */
3611e689cf4aSJeff Kirsher cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3612e689cf4aSJeff Kirsher
3613e689cf4aSJeff Kirsher } else {
3614e689cf4aSJeff Kirsher writel(val, cp->regs + REG_MAC_TX_CFG);
3615e689cf4aSJeff Kirsher
3616e689cf4aSJeff Kirsher /* checksum bug workaround. don't strip FCS when in
3617e689cf4aSJeff Kirsher * half-duplex mode
3618e689cf4aSJeff Kirsher */
3619e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_RX_CFG);
3620e689cf4aSJeff Kirsher if (full_duplex) {
3621e689cf4aSJeff Kirsher val |= MAC_RX_CFG_STRIP_FCS;
3622e689cf4aSJeff Kirsher cp->crc_size = 0;
3623e689cf4aSJeff Kirsher cp->min_frame_size = CAS_MIN_MTU;
3624e689cf4aSJeff Kirsher } else {
3625e689cf4aSJeff Kirsher val &= ~MAC_RX_CFG_STRIP_FCS;
3626e689cf4aSJeff Kirsher cp->crc_size = 4;
3627e689cf4aSJeff Kirsher cp->min_frame_size = CAS_MIN_FRAME;
3628e689cf4aSJeff Kirsher }
3629e689cf4aSJeff Kirsher writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3630e689cf4aSJeff Kirsher cp->regs + REG_MAC_RX_CFG);
3631e689cf4aSJeff Kirsher writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3632e689cf4aSJeff Kirsher }
3633e689cf4aSJeff Kirsher
3634e689cf4aSJeff Kirsher if (netif_msg_link(cp)) {
3635e689cf4aSJeff Kirsher if (pause & 0x01) {
3636e689cf4aSJeff Kirsher netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3637e689cf4aSJeff Kirsher cp->rx_fifo_size,
3638e689cf4aSJeff Kirsher cp->rx_pause_off,
3639e689cf4aSJeff Kirsher cp->rx_pause_on);
3640e689cf4aSJeff Kirsher } else if (pause & 0x10) {
3641e689cf4aSJeff Kirsher netdev_info(cp->dev, "TX pause enabled\n");
3642e689cf4aSJeff Kirsher } else {
3643e689cf4aSJeff Kirsher netdev_info(cp->dev, "Pause is disabled\n");
3644e689cf4aSJeff Kirsher }
3645e689cf4aSJeff Kirsher }
3646e689cf4aSJeff Kirsher
3647e689cf4aSJeff Kirsher val = readl(cp->regs + REG_MAC_CTRL_CFG);
3648e689cf4aSJeff Kirsher val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3649e689cf4aSJeff Kirsher if (pause) { /* symmetric or asymmetric pause */
3650e689cf4aSJeff Kirsher val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3651e689cf4aSJeff Kirsher if (pause & 0x01) { /* symmetric pause */
3652e689cf4aSJeff Kirsher val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3653e689cf4aSJeff Kirsher }
3654e689cf4aSJeff Kirsher }
3655e689cf4aSJeff Kirsher writel(val, cp->regs + REG_MAC_CTRL_CFG);
3656e689cf4aSJeff Kirsher cas_start_dma(cp);
3657e689cf4aSJeff Kirsher }
3658e689cf4aSJeff Kirsher
3659e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_init_hw(struct cas * cp,int restart_link)3660e689cf4aSJeff Kirsher static void cas_init_hw(struct cas *cp, int restart_link)
3661e689cf4aSJeff Kirsher {
3662e689cf4aSJeff Kirsher if (restart_link)
3663e689cf4aSJeff Kirsher cas_phy_init(cp);
3664e689cf4aSJeff Kirsher
3665e689cf4aSJeff Kirsher cas_init_pause_thresholds(cp);
3666e689cf4aSJeff Kirsher cas_init_mac(cp);
3667e689cf4aSJeff Kirsher cas_init_dma(cp);
3668e689cf4aSJeff Kirsher
3669e689cf4aSJeff Kirsher if (restart_link) {
3670e689cf4aSJeff Kirsher /* Default aneg parameters */
3671e689cf4aSJeff Kirsher cp->timer_ticks = 0;
3672e689cf4aSJeff Kirsher cas_begin_auto_negotiation(cp, NULL);
3673e689cf4aSJeff Kirsher } else if (cp->lstate == link_up) {
3674e689cf4aSJeff Kirsher cas_set_link_modes(cp);
3675e689cf4aSJeff Kirsher netif_carrier_on(cp->dev);
3676e689cf4aSJeff Kirsher }
3677e689cf4aSJeff Kirsher }
3678e689cf4aSJeff Kirsher
3679e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. on earlier cassini boards,
3680e689cf4aSJeff Kirsher * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3681e689cf4aSJeff Kirsher * let it settle out, and then restore pci state.
3682e689cf4aSJeff Kirsher */
cas_hard_reset(struct cas * cp)3683e689cf4aSJeff Kirsher static void cas_hard_reset(struct cas *cp)
3684e689cf4aSJeff Kirsher {
3685e689cf4aSJeff Kirsher writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3686e689cf4aSJeff Kirsher udelay(20);
3687e689cf4aSJeff Kirsher pci_restore_state(cp->pdev);
3688e689cf4aSJeff Kirsher }
3689e689cf4aSJeff Kirsher
3690e689cf4aSJeff Kirsher
cas_global_reset(struct cas * cp,int blkflag)3691e689cf4aSJeff Kirsher static void cas_global_reset(struct cas *cp, int blkflag)
3692e689cf4aSJeff Kirsher {
3693e689cf4aSJeff Kirsher int limit;
3694e689cf4aSJeff Kirsher
3695e689cf4aSJeff Kirsher /* issue a global reset. don't use RSTOUT. */
3696e689cf4aSJeff Kirsher if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3697e689cf4aSJeff Kirsher /* For PCS, when the blkflag is set, we should set the
3698e689cf4aSJeff Kirsher * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3699e689cf4aSJeff Kirsher * the last autonegotiation from being cleared. We'll
3700e689cf4aSJeff Kirsher * need some special handling if the chip is set into a
3701e689cf4aSJeff Kirsher * loopback mode.
3702e689cf4aSJeff Kirsher */
3703e689cf4aSJeff Kirsher writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3704e689cf4aSJeff Kirsher cp->regs + REG_SW_RESET);
3705e689cf4aSJeff Kirsher } else {
3706e689cf4aSJeff Kirsher writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3707e689cf4aSJeff Kirsher }
3708e689cf4aSJeff Kirsher
3709e689cf4aSJeff Kirsher /* need to wait at least 3ms before polling register */
3710e689cf4aSJeff Kirsher mdelay(3);
3711e689cf4aSJeff Kirsher
3712e689cf4aSJeff Kirsher limit = STOP_TRIES;
3713e689cf4aSJeff Kirsher while (limit-- > 0) {
3714e689cf4aSJeff Kirsher u32 val = readl(cp->regs + REG_SW_RESET);
3715e689cf4aSJeff Kirsher if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3716e689cf4aSJeff Kirsher goto done;
3717e689cf4aSJeff Kirsher udelay(10);
3718e689cf4aSJeff Kirsher }
3719e689cf4aSJeff Kirsher netdev_err(cp->dev, "sw reset failed\n");
3720e689cf4aSJeff Kirsher
3721e689cf4aSJeff Kirsher done:
3722e689cf4aSJeff Kirsher /* enable various BIM interrupts */
3723e689cf4aSJeff Kirsher writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3724e689cf4aSJeff Kirsher BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3725e689cf4aSJeff Kirsher
3726e689cf4aSJeff Kirsher /* clear out pci error status mask for handled errors.
3727e689cf4aSJeff Kirsher * we don't deal with DMA counter overflows as they happen
3728e689cf4aSJeff Kirsher * all the time.
3729e689cf4aSJeff Kirsher */
3730e689cf4aSJeff Kirsher writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3731e689cf4aSJeff Kirsher PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3732e689cf4aSJeff Kirsher PCI_ERR_BIM_DMA_READ), cp->regs +
3733e689cf4aSJeff Kirsher REG_PCI_ERR_STATUS_MASK);
3734e689cf4aSJeff Kirsher
3735e689cf4aSJeff Kirsher /* set up for MII by default to address mac rx reset timeout
3736e689cf4aSJeff Kirsher * issue
3737e689cf4aSJeff Kirsher */
3738e689cf4aSJeff Kirsher writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3739e689cf4aSJeff Kirsher }
3740e689cf4aSJeff Kirsher
cas_reset(struct cas * cp,int blkflag)3741e689cf4aSJeff Kirsher static void cas_reset(struct cas *cp, int blkflag)
3742e689cf4aSJeff Kirsher {
3743e689cf4aSJeff Kirsher u32 val;
3744e689cf4aSJeff Kirsher
3745e689cf4aSJeff Kirsher cas_mask_intr(cp);
3746e689cf4aSJeff Kirsher cas_global_reset(cp, blkflag);
3747e689cf4aSJeff Kirsher cas_mac_reset(cp);
3748e689cf4aSJeff Kirsher cas_entropy_reset(cp);
3749e689cf4aSJeff Kirsher
3750e689cf4aSJeff Kirsher /* disable dma engines. */
3751e689cf4aSJeff Kirsher val = readl(cp->regs + REG_TX_CFG);
3752e689cf4aSJeff Kirsher val &= ~TX_CFG_DMA_EN;
3753e689cf4aSJeff Kirsher writel(val, cp->regs + REG_TX_CFG);
3754e689cf4aSJeff Kirsher
3755e689cf4aSJeff Kirsher val = readl(cp->regs + REG_RX_CFG);
3756e689cf4aSJeff Kirsher val &= ~RX_CFG_DMA_EN;
3757e689cf4aSJeff Kirsher writel(val, cp->regs + REG_RX_CFG);
3758e689cf4aSJeff Kirsher
3759e689cf4aSJeff Kirsher /* program header parser */
3760e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3761e689cf4aSJeff Kirsher (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3762e689cf4aSJeff Kirsher cas_load_firmware(cp, CAS_HP_FIRMWARE);
3763e689cf4aSJeff Kirsher } else {
3764e689cf4aSJeff Kirsher cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
376532329216SMartin Liška }
3766e689cf4aSJeff Kirsher
3767e689cf4aSJeff Kirsher /* clear out error registers */
3768e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[N_TX_RINGS]);
3769e689cf4aSJeff Kirsher cas_clear_mac_err(cp);
3770e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3771e689cf4aSJeff Kirsher }
3772e689cf4aSJeff Kirsher
3773e689cf4aSJeff Kirsher /* Shut down the chip, must be called with pm_mutex held. */
cas_shutdown(struct cas * cp)3774e689cf4aSJeff Kirsher static void cas_shutdown(struct cas *cp)
3775e689cf4aSJeff Kirsher {
3776e689cf4aSJeff Kirsher unsigned long flags;
3777e689cf4aSJeff Kirsher
3778e689cf4aSJeff Kirsher /* Make us not-running to avoid timers respawning */
3779e689cf4aSJeff Kirsher cp->hw_running = 0;
3780e689cf4aSJeff Kirsher
3781e689cf4aSJeff Kirsher del_timer_sync(&cp->link_timer);
3782e689cf4aSJeff Kirsher
3783e689cf4aSJeff Kirsher /* Stop the reset task */
3784e689cf4aSJeff Kirsher #if 0
3785e689cf4aSJeff Kirsher while (atomic_read(&cp->reset_task_pending_mtu) ||
3786e689cf4aSJeff Kirsher atomic_read(&cp->reset_task_pending_spare) ||
3787e689cf4aSJeff Kirsher atomic_read(&cp->reset_task_pending_all))
3788e689cf4aSJeff Kirsher schedule();
3789e689cf4aSJeff Kirsher
3790e689cf4aSJeff Kirsher #else
3791e689cf4aSJeff Kirsher while (atomic_read(&cp->reset_task_pending))
3792e689cf4aSJeff Kirsher schedule();
3793e689cf4aSJeff Kirsher #endif
3794e689cf4aSJeff Kirsher /* Actually stop the chip */
3795e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
3796e689cf4aSJeff Kirsher cas_reset(cp, 0);
3797e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_SATURN)
3798e689cf4aSJeff Kirsher cas_phy_powerdown(cp);
3799e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
3800e689cf4aSJeff Kirsher }
3801e689cf4aSJeff Kirsher
cas_change_mtu(struct net_device * dev,int new_mtu)3802e689cf4aSJeff Kirsher static int cas_change_mtu(struct net_device *dev, int new_mtu)
3803e689cf4aSJeff Kirsher {
3804e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
3805e689cf4aSJeff Kirsher
3806e689cf4aSJeff Kirsher dev->mtu = new_mtu;
3807e689cf4aSJeff Kirsher if (!netif_running(dev) || !netif_device_present(dev))
3808e689cf4aSJeff Kirsher return 0;
3809e689cf4aSJeff Kirsher
3810e689cf4aSJeff Kirsher /* let the reset task handle it */
3811e689cf4aSJeff Kirsher #if 1
3812e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
3813e689cf4aSJeff Kirsher if ((cp->phy_type & CAS_PHY_SERDES)) {
3814e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_all);
3815e689cf4aSJeff Kirsher } else {
3816e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_mtu);
3817e689cf4aSJeff Kirsher }
3818e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
3819e689cf4aSJeff Kirsher #else
3820e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3821e689cf4aSJeff Kirsher CAS_RESET_ALL : CAS_RESET_MTU);
3822e689cf4aSJeff Kirsher pr_err("reset called in cas_change_mtu\n");
3823e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
3824e689cf4aSJeff Kirsher #endif
3825e689cf4aSJeff Kirsher
3826e689cf4aSJeff Kirsher flush_work(&cp->reset_task);
3827e689cf4aSJeff Kirsher return 0;
3828e689cf4aSJeff Kirsher }
3829e689cf4aSJeff Kirsher
cas_clean_txd(struct cas * cp,int ring)383043829731STejun Heo static void cas_clean_txd(struct cas *cp, int ring)
3831e689cf4aSJeff Kirsher {
3832e689cf4aSJeff Kirsher struct cas_tx_desc *txd = cp->init_txds[ring];
3833e689cf4aSJeff Kirsher struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3834e689cf4aSJeff Kirsher u64 daddr, dlen;
3835e689cf4aSJeff Kirsher int i, size;
3836e689cf4aSJeff Kirsher
3837e689cf4aSJeff Kirsher size = TX_DESC_RINGN_SIZE(ring);
3838e689cf4aSJeff Kirsher for (i = 0; i < size; i++) {
3839e689cf4aSJeff Kirsher int frag;
3840e689cf4aSJeff Kirsher
3841e689cf4aSJeff Kirsher if (skbs[i] == NULL)
3842e689cf4aSJeff Kirsher continue;
3843e689cf4aSJeff Kirsher
3844e689cf4aSJeff Kirsher skb = skbs[i];
3845e689cf4aSJeff Kirsher skbs[i] = NULL;
3846e689cf4aSJeff Kirsher
3847e689cf4aSJeff Kirsher for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3848e689cf4aSJeff Kirsher int ent = i & (size - 1);
3849e689cf4aSJeff Kirsher
3850e689cf4aSJeff Kirsher /* first buffer is never a tiny buffer and so
3851e689cf4aSJeff Kirsher * needs to be unmapped.
3852e689cf4aSJeff Kirsher */
3853e689cf4aSJeff Kirsher daddr = le64_to_cpu(txd[ent].buffer);
3854e689cf4aSJeff Kirsher dlen = CAS_VAL(TX_DESC_BUFLEN,
3855e689cf4aSJeff Kirsher le64_to_cpu(txd[ent].control));
3856e689cf4aSJeff Kirsher dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3857e689cf4aSJeff Kirsher DMA_TO_DEVICE);
3858e689cf4aSJeff Kirsher
3859e689cf4aSJeff Kirsher if (frag != skb_shinfo(skb)->nr_frags) {
3860dcc82bb0SChristophe JAILLET i++;
3861dcc82bb0SChristophe JAILLET
3862e689cf4aSJeff Kirsher /* next buffer might by a tiny buffer.
3863e689cf4aSJeff Kirsher * skip past it.
3864e689cf4aSJeff Kirsher */
3865e689cf4aSJeff Kirsher ent = i & (size - 1);
3866e689cf4aSJeff Kirsher if (cp->tx_tiny_use[ring][ent].used)
3867e689cf4aSJeff Kirsher i++;
3868e689cf4aSJeff Kirsher }
3869e689cf4aSJeff Kirsher }
3870e689cf4aSJeff Kirsher dev_kfree_skb_any(skb);
3871e689cf4aSJeff Kirsher }
3872e689cf4aSJeff Kirsher
3873e689cf4aSJeff Kirsher /* zero out tiny buf usage */
3874e689cf4aSJeff Kirsher memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3875e689cf4aSJeff Kirsher }
3876e689cf4aSJeff Kirsher
3877e689cf4aSJeff Kirsher /* freed on close */
cas_free_rx_desc(struct cas * cp,int ring)3878e689cf4aSJeff Kirsher static inline void cas_free_rx_desc(struct cas *cp, int ring)
3879e689cf4aSJeff Kirsher {
3880e689cf4aSJeff Kirsher cas_page_t **page = cp->rx_pages[ring];
3881e689cf4aSJeff Kirsher int i, size;
3882e689cf4aSJeff Kirsher
3883e689cf4aSJeff Kirsher size = RX_DESC_RINGN_SIZE(ring);
3884e689cf4aSJeff Kirsher for (i = 0; i < size; i++) {
3885e689cf4aSJeff Kirsher if (page[i]) {
3886e689cf4aSJeff Kirsher cas_page_free(cp, page[i]);
3887e689cf4aSJeff Kirsher page[i] = NULL;
3888e689cf4aSJeff Kirsher }
3889e689cf4aSJeff Kirsher }
3890e689cf4aSJeff Kirsher }
3891e689cf4aSJeff Kirsher
cas_free_rxds(struct cas * cp)3892e689cf4aSJeff Kirsher static void cas_free_rxds(struct cas *cp)
3893e689cf4aSJeff Kirsher {
3894e689cf4aSJeff Kirsher int i;
3895e689cf4aSJeff Kirsher
3896e689cf4aSJeff Kirsher for (i = 0; i < N_RX_DESC_RINGS; i++)
3897e689cf4aSJeff Kirsher cas_free_rx_desc(cp, i);
3898e689cf4aSJeff Kirsher }
3899e689cf4aSJeff Kirsher
3900e689cf4aSJeff Kirsher /* Must be invoked under cp->lock. */
cas_clean_rings(struct cas * cp)3901e689cf4aSJeff Kirsher static void cas_clean_rings(struct cas *cp)
3902e689cf4aSJeff Kirsher {
3903e689cf4aSJeff Kirsher int i;
3904e689cf4aSJeff Kirsher
3905e689cf4aSJeff Kirsher /* need to clean all tx rings */
3906e689cf4aSJeff Kirsher memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3907e689cf4aSJeff Kirsher memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3908e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++)
3909e689cf4aSJeff Kirsher cas_clean_txd(cp, i);
3910e689cf4aSJeff Kirsher
3911e689cf4aSJeff Kirsher /* zero out init block */
3912e689cf4aSJeff Kirsher memset(cp->init_block, 0, sizeof(struct cas_init_block));
3913e689cf4aSJeff Kirsher cas_clean_rxds(cp);
3914e689cf4aSJeff Kirsher cas_clean_rxcs(cp);
3915e689cf4aSJeff Kirsher }
3916e689cf4aSJeff Kirsher
3917e689cf4aSJeff Kirsher /* allocated on open */
cas_alloc_rx_desc(struct cas * cp,int ring)3918e689cf4aSJeff Kirsher static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3919e689cf4aSJeff Kirsher {
3920e689cf4aSJeff Kirsher cas_page_t **page = cp->rx_pages[ring];
3921e689cf4aSJeff Kirsher int size, i = 0;
3922e689cf4aSJeff Kirsher
3923e689cf4aSJeff Kirsher size = RX_DESC_RINGN_SIZE(ring);
3924e689cf4aSJeff Kirsher for (i = 0; i < size; i++) {
3925e689cf4aSJeff Kirsher if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3926e689cf4aSJeff Kirsher return -1;
3927e689cf4aSJeff Kirsher }
3928e689cf4aSJeff Kirsher return 0;
3929e689cf4aSJeff Kirsher }
3930e689cf4aSJeff Kirsher
cas_alloc_rxds(struct cas * cp)3931e689cf4aSJeff Kirsher static int cas_alloc_rxds(struct cas *cp)
3932e689cf4aSJeff Kirsher {
3933e689cf4aSJeff Kirsher int i;
3934e689cf4aSJeff Kirsher
3935e689cf4aSJeff Kirsher for (i = 0; i < N_RX_DESC_RINGS; i++) {
3936e689cf4aSJeff Kirsher if (cas_alloc_rx_desc(cp, i) < 0) {
3937e689cf4aSJeff Kirsher cas_free_rxds(cp);
3938e689cf4aSJeff Kirsher return -1;
3939e689cf4aSJeff Kirsher }
3940e689cf4aSJeff Kirsher }
3941e689cf4aSJeff Kirsher return 0;
3942e689cf4aSJeff Kirsher }
3943e689cf4aSJeff Kirsher
cas_reset_task(struct work_struct * work)3944e689cf4aSJeff Kirsher static void cas_reset_task(struct work_struct *work)
3945e689cf4aSJeff Kirsher {
3946e689cf4aSJeff Kirsher struct cas *cp = container_of(work, struct cas, reset_task);
3947e689cf4aSJeff Kirsher #if 0
3948e689cf4aSJeff Kirsher int pending = atomic_read(&cp->reset_task_pending);
3949e689cf4aSJeff Kirsher #else
3950e689cf4aSJeff Kirsher int pending_all = atomic_read(&cp->reset_task_pending_all);
3951e689cf4aSJeff Kirsher int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3952e689cf4aSJeff Kirsher int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3953e689cf4aSJeff Kirsher
3954e689cf4aSJeff Kirsher if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3955e689cf4aSJeff Kirsher /* We can have more tasks scheduled than actually
3956e689cf4aSJeff Kirsher * needed.
3957e689cf4aSJeff Kirsher */
3958e689cf4aSJeff Kirsher atomic_dec(&cp->reset_task_pending);
3959e689cf4aSJeff Kirsher return;
3960e689cf4aSJeff Kirsher }
3961e689cf4aSJeff Kirsher #endif
3962e689cf4aSJeff Kirsher /* The link went down, we reset the ring, but keep
3963e689cf4aSJeff Kirsher * DMA stopped. Use this function for reset
3964e689cf4aSJeff Kirsher * on error as well.
3965e689cf4aSJeff Kirsher */
3966e689cf4aSJeff Kirsher if (cp->hw_running) {
3967e689cf4aSJeff Kirsher unsigned long flags;
3968e689cf4aSJeff Kirsher
3969e689cf4aSJeff Kirsher /* Make sure we don't get interrupts or tx packets */
3970e689cf4aSJeff Kirsher netif_device_detach(cp->dev);
3971e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
3972e689cf4aSJeff Kirsher
3973e689cf4aSJeff Kirsher if (cp->opened) {
3974e689cf4aSJeff Kirsher /* We call cas_spare_recover when we call cas_open.
3975e689cf4aSJeff Kirsher * but we do not initialize the lists cas_spare_recover
3976e689cf4aSJeff Kirsher * uses until cas_open is called.
3977e689cf4aSJeff Kirsher */
3978e689cf4aSJeff Kirsher cas_spare_recover(cp, GFP_ATOMIC);
3979e689cf4aSJeff Kirsher }
3980e689cf4aSJeff Kirsher #if 1
3981e689cf4aSJeff Kirsher /* test => only pending_spare set */
3982e689cf4aSJeff Kirsher if (!pending_all && !pending_mtu)
3983e689cf4aSJeff Kirsher goto done;
3984e689cf4aSJeff Kirsher #else
3985e689cf4aSJeff Kirsher if (pending == CAS_RESET_SPARE)
3986e689cf4aSJeff Kirsher goto done;
3987e689cf4aSJeff Kirsher #endif
3988e689cf4aSJeff Kirsher /* when pending == CAS_RESET_ALL, the following
3989e689cf4aSJeff Kirsher * call to cas_init_hw will restart auto negotiation.
3990e689cf4aSJeff Kirsher * Setting the second argument of cas_reset to
3991e689cf4aSJeff Kirsher * !(pending == CAS_RESET_ALL) will set this argument
3992e689cf4aSJeff Kirsher * to 1 (avoiding reinitializing the PHY for the normal
3993e689cf4aSJeff Kirsher * PCS case) when auto negotiation is not restarted.
3994e689cf4aSJeff Kirsher */
3995e689cf4aSJeff Kirsher #if 1
3996e689cf4aSJeff Kirsher cas_reset(cp, !(pending_all > 0));
3997e689cf4aSJeff Kirsher if (cp->opened)
3998e689cf4aSJeff Kirsher cas_clean_rings(cp);
3999e689cf4aSJeff Kirsher cas_init_hw(cp, (pending_all > 0));
4000e689cf4aSJeff Kirsher #else
4001e689cf4aSJeff Kirsher cas_reset(cp, !(pending == CAS_RESET_ALL));
4002e689cf4aSJeff Kirsher if (cp->opened)
4003e689cf4aSJeff Kirsher cas_clean_rings(cp);
4004e689cf4aSJeff Kirsher cas_init_hw(cp, pending == CAS_RESET_ALL);
4005e689cf4aSJeff Kirsher #endif
4006e689cf4aSJeff Kirsher
4007e689cf4aSJeff Kirsher done:
4008e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
4009e689cf4aSJeff Kirsher netif_device_attach(cp->dev);
4010e689cf4aSJeff Kirsher }
4011e689cf4aSJeff Kirsher #if 1
4012e689cf4aSJeff Kirsher atomic_sub(pending_all, &cp->reset_task_pending_all);
4013e689cf4aSJeff Kirsher atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4014e689cf4aSJeff Kirsher atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4015e689cf4aSJeff Kirsher atomic_dec(&cp->reset_task_pending);
4016e689cf4aSJeff Kirsher #else
4017e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, 0);
4018e689cf4aSJeff Kirsher #endif
4019e689cf4aSJeff Kirsher }
4020e689cf4aSJeff Kirsher
cas_link_timer(struct timer_list * t)4021e689cf4aSJeff Kirsher static void cas_link_timer(struct timer_list *t)
4022e689cf4aSJeff Kirsher {
4023e689cf4aSJeff Kirsher struct cas *cp = from_timer(cp, t, link_timer);
4024e689cf4aSJeff Kirsher int mask, pending = 0, reset = 0;
40250822c5d9SKees Cook unsigned long flags;
4026e689cf4aSJeff Kirsher
40270822c5d9SKees Cook if (link_transition_timeout != 0 &&
4028e689cf4aSJeff Kirsher cp->link_transition_jiffies_valid &&
4029e689cf4aSJeff Kirsher time_is_before_jiffies(cp->link_transition_jiffies +
4030e689cf4aSJeff Kirsher link_transition_timeout)) {
4031e689cf4aSJeff Kirsher /* One-second counter so link-down workaround doesn't
4032e689cf4aSJeff Kirsher * cause resets to occur so fast as to fool the switch
4033e0e8028cSWang Qing * into thinking the link is down.
4034e0e8028cSWang Qing */
4035e689cf4aSJeff Kirsher cp->link_transition_jiffies_valid = 0;
4036e689cf4aSJeff Kirsher }
4037e689cf4aSJeff Kirsher
4038e689cf4aSJeff Kirsher if (!cp->hw_running)
4039e689cf4aSJeff Kirsher return;
4040e689cf4aSJeff Kirsher
4041e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4042e689cf4aSJeff Kirsher cas_lock_tx(cp);
4043e689cf4aSJeff Kirsher cas_entropy_gather(cp);
4044e689cf4aSJeff Kirsher
4045e689cf4aSJeff Kirsher /* If the link task is still pending, we just
4046e689cf4aSJeff Kirsher * reschedule the link timer
4047e689cf4aSJeff Kirsher */
4048e689cf4aSJeff Kirsher #if 1
4049e689cf4aSJeff Kirsher if (atomic_read(&cp->reset_task_pending_all) ||
4050e689cf4aSJeff Kirsher atomic_read(&cp->reset_task_pending_spare) ||
4051e689cf4aSJeff Kirsher atomic_read(&cp->reset_task_pending_mtu))
4052e689cf4aSJeff Kirsher goto done;
4053e689cf4aSJeff Kirsher #else
4054e689cf4aSJeff Kirsher if (atomic_read(&cp->reset_task_pending))
4055e689cf4aSJeff Kirsher goto done;
4056e689cf4aSJeff Kirsher #endif
4057e689cf4aSJeff Kirsher
4058e689cf4aSJeff Kirsher /* check for rx cleaning */
4059e689cf4aSJeff Kirsher if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4060e689cf4aSJeff Kirsher int i, rmask;
4061e689cf4aSJeff Kirsher
4062e689cf4aSJeff Kirsher for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4063e689cf4aSJeff Kirsher rmask = CAS_FLAG_RXD_POST(i);
4064e689cf4aSJeff Kirsher if ((mask & rmask) == 0)
4065e689cf4aSJeff Kirsher continue;
4066e689cf4aSJeff Kirsher
4067e689cf4aSJeff Kirsher /* post_rxds will do a mod_timer */
4068e689cf4aSJeff Kirsher if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4069e689cf4aSJeff Kirsher pending = 1;
4070e689cf4aSJeff Kirsher continue;
4071e689cf4aSJeff Kirsher }
4072e689cf4aSJeff Kirsher cp->cas_flags &= ~rmask;
4073e689cf4aSJeff Kirsher }
4074e689cf4aSJeff Kirsher }
4075e689cf4aSJeff Kirsher
4076e689cf4aSJeff Kirsher if (CAS_PHY_MII(cp->phy_type)) {
4077e689cf4aSJeff Kirsher u16 bmsr;
4078e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
4079e689cf4aSJeff Kirsher bmsr = cas_phy_read(cp, MII_BMSR);
4080e689cf4aSJeff Kirsher /* WTZ: Solaris driver reads this twice, but that
4081e689cf4aSJeff Kirsher * may be due to the PCS case and the use of a
4082e689cf4aSJeff Kirsher * common implementation. Read it twice here to be
4083e689cf4aSJeff Kirsher * safe.
4084e689cf4aSJeff Kirsher */
4085e689cf4aSJeff Kirsher bmsr = cas_phy_read(cp, MII_BMSR);
4086e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
4087e689cf4aSJeff Kirsher readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4088e689cf4aSJeff Kirsher reset = cas_mii_link_check(cp, bmsr);
4089e689cf4aSJeff Kirsher } else {
4090e689cf4aSJeff Kirsher reset = cas_pcs_link_check(cp);
4091e689cf4aSJeff Kirsher }
4092e689cf4aSJeff Kirsher
4093e689cf4aSJeff Kirsher if (reset)
4094e689cf4aSJeff Kirsher goto done;
4095e689cf4aSJeff Kirsher
4096e689cf4aSJeff Kirsher /* check for tx state machine confusion */
4097e689cf4aSJeff Kirsher if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4098e689cf4aSJeff Kirsher u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4099e689cf4aSJeff Kirsher u32 wptr, rptr;
4100e689cf4aSJeff Kirsher int tlm = CAS_VAL(MAC_SM_TLM, val);
4101e689cf4aSJeff Kirsher
4102e689cf4aSJeff Kirsher if (((tlm == 0x5) || (tlm == 0x3)) &&
4103e689cf4aSJeff Kirsher (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4104e689cf4aSJeff Kirsher netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4105e689cf4aSJeff Kirsher "tx err: MAC_STATE[%08x]\n", val);
4106e689cf4aSJeff Kirsher reset = 1;
4107e689cf4aSJeff Kirsher goto done;
4108e689cf4aSJeff Kirsher }
4109e689cf4aSJeff Kirsher
4110e689cf4aSJeff Kirsher val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4111e689cf4aSJeff Kirsher wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4112e689cf4aSJeff Kirsher rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4113e689cf4aSJeff Kirsher if ((val == 0) && (wptr != rptr)) {
4114e689cf4aSJeff Kirsher netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4115e689cf4aSJeff Kirsher "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4116e689cf4aSJeff Kirsher val, wptr, rptr);
4117e689cf4aSJeff Kirsher reset = 1;
4118e689cf4aSJeff Kirsher }
4119e689cf4aSJeff Kirsher
4120e689cf4aSJeff Kirsher if (reset)
4121e689cf4aSJeff Kirsher cas_hard_reset(cp);
4122e689cf4aSJeff Kirsher }
4123e689cf4aSJeff Kirsher
4124e689cf4aSJeff Kirsher done:
4125e689cf4aSJeff Kirsher if (reset) {
4126e689cf4aSJeff Kirsher #if 1
4127e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending);
4128e689cf4aSJeff Kirsher atomic_inc(&cp->reset_task_pending_all);
4129e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
4130e689cf4aSJeff Kirsher #else
4131e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4132e689cf4aSJeff Kirsher pr_err("reset called in cas_link_timer\n");
4133e689cf4aSJeff Kirsher schedule_work(&cp->reset_task);
4134e689cf4aSJeff Kirsher #endif
4135e689cf4aSJeff Kirsher }
4136e689cf4aSJeff Kirsher
4137e689cf4aSJeff Kirsher if (!pending)
4138e689cf4aSJeff Kirsher mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4139e689cf4aSJeff Kirsher cas_unlock_tx(cp);
4140e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4141e689cf4aSJeff Kirsher }
4142e689cf4aSJeff Kirsher
4143e689cf4aSJeff Kirsher /* tiny buffers are used to avoid target abort issues with
4144e689cf4aSJeff Kirsher * older cassini's
4145e689cf4aSJeff Kirsher */
cas_tx_tiny_free(struct cas * cp)4146e689cf4aSJeff Kirsher static void cas_tx_tiny_free(struct cas *cp)
4147e689cf4aSJeff Kirsher {
4148e689cf4aSJeff Kirsher struct pci_dev *pdev = cp->pdev;
4149e689cf4aSJeff Kirsher int i;
4150e689cf4aSJeff Kirsher
4151e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++) {
4152e689cf4aSJeff Kirsher if (!cp->tx_tiny_bufs[i])
4153e689cf4aSJeff Kirsher continue;
4154e689cf4aSJeff Kirsher
4155e689cf4aSJeff Kirsher dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4156e689cf4aSJeff Kirsher cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
4157e689cf4aSJeff Kirsher cp->tx_tiny_bufs[i] = NULL;
4158e689cf4aSJeff Kirsher }
4159dcc82bb0SChristophe JAILLET }
4160dcc82bb0SChristophe JAILLET
cas_tx_tiny_alloc(struct cas * cp)4161e689cf4aSJeff Kirsher static int cas_tx_tiny_alloc(struct cas *cp)
4162e689cf4aSJeff Kirsher {
4163e689cf4aSJeff Kirsher struct pci_dev *pdev = cp->pdev;
4164e689cf4aSJeff Kirsher int i;
4165e689cf4aSJeff Kirsher
4166e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++) {
4167e689cf4aSJeff Kirsher cp->tx_tiny_bufs[i] =
4168e689cf4aSJeff Kirsher dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4169e689cf4aSJeff Kirsher &cp->tx_tiny_dvma[i], GFP_KERNEL);
4170e689cf4aSJeff Kirsher if (!cp->tx_tiny_bufs[i]) {
4171e689cf4aSJeff Kirsher cas_tx_tiny_free(cp);
4172dcc82bb0SChristophe JAILLET return -1;
4173dcc82bb0SChristophe JAILLET }
4174e689cf4aSJeff Kirsher }
4175e689cf4aSJeff Kirsher return 0;
4176e689cf4aSJeff Kirsher }
4177e689cf4aSJeff Kirsher
4178e689cf4aSJeff Kirsher
cas_open(struct net_device * dev)4179e689cf4aSJeff Kirsher static int cas_open(struct net_device *dev)
4180e689cf4aSJeff Kirsher {
4181e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4182e689cf4aSJeff Kirsher int hw_was_up, err;
4183e689cf4aSJeff Kirsher unsigned long flags;
4184e689cf4aSJeff Kirsher
4185e689cf4aSJeff Kirsher mutex_lock(&cp->pm_mutex);
4186e689cf4aSJeff Kirsher
4187e689cf4aSJeff Kirsher hw_was_up = cp->hw_running;
4188e689cf4aSJeff Kirsher
4189e689cf4aSJeff Kirsher /* The power-management mutex protects the hw_running
4190e689cf4aSJeff Kirsher * etc. state so it is safe to do this bit without cp->lock
4191e689cf4aSJeff Kirsher */
4192e689cf4aSJeff Kirsher if (!cp->hw_running) {
4193e689cf4aSJeff Kirsher /* Reset the chip */
4194e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
4195e689cf4aSJeff Kirsher /* We set the second arg to cas_reset to zero
4196e689cf4aSJeff Kirsher * because cas_init_hw below will have its second
4197e689cf4aSJeff Kirsher * argument set to non-zero, which will force
4198e689cf4aSJeff Kirsher * autonegotiation to start.
4199e689cf4aSJeff Kirsher */
4200e689cf4aSJeff Kirsher cas_reset(cp, 0);
4201e689cf4aSJeff Kirsher cp->hw_running = 1;
4202e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
4203e689cf4aSJeff Kirsher }
4204e689cf4aSJeff Kirsher
4205e689cf4aSJeff Kirsher err = -ENOMEM;
4206e689cf4aSJeff Kirsher if (cas_tx_tiny_alloc(cp) < 0)
4207e689cf4aSJeff Kirsher goto err_unlock;
4208e689cf4aSJeff Kirsher
4209e689cf4aSJeff Kirsher /* alloc rx descriptors */
4210e689cf4aSJeff Kirsher if (cas_alloc_rxds(cp) < 0)
4211e689cf4aSJeff Kirsher goto err_tx_tiny;
4212e689cf4aSJeff Kirsher
4213e689cf4aSJeff Kirsher /* allocate spares */
4214e689cf4aSJeff Kirsher cas_spare_init(cp);
4215e689cf4aSJeff Kirsher cas_spare_recover(cp, GFP_KERNEL);
4216e689cf4aSJeff Kirsher
4217e689cf4aSJeff Kirsher /* We can now request the interrupt as we know it's masked
4218e689cf4aSJeff Kirsher * on the controller. cassini+ has up to 4 interrupts
4219e689cf4aSJeff Kirsher * that can be used, but you need to do explicit pci interrupt
4220e689cf4aSJeff Kirsher * mapping to expose them
4221e689cf4aSJeff Kirsher */
4222e689cf4aSJeff Kirsher if (request_irq(cp->pdev->irq, cas_interrupt,
4223e689cf4aSJeff Kirsher IRQF_SHARED, dev->name, (void *) dev)) {
4224e689cf4aSJeff Kirsher netdev_err(cp->dev, "failed to request irq !\n");
4225e689cf4aSJeff Kirsher err = -EAGAIN;
4226e689cf4aSJeff Kirsher goto err_spare;
4227e689cf4aSJeff Kirsher }
4228e689cf4aSJeff Kirsher
4229e689cf4aSJeff Kirsher #ifdef USE_NAPI
4230e689cf4aSJeff Kirsher napi_enable(&cp->napi);
4231e689cf4aSJeff Kirsher #endif
4232e689cf4aSJeff Kirsher /* init hw */
4233e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
4234e689cf4aSJeff Kirsher cas_clean_rings(cp);
4235e689cf4aSJeff Kirsher cas_init_hw(cp, !hw_was_up);
4236e689cf4aSJeff Kirsher cp->opened = 1;
4237e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
4238e689cf4aSJeff Kirsher
4239e689cf4aSJeff Kirsher netif_start_queue(dev);
4240e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
4241e689cf4aSJeff Kirsher return 0;
4242e689cf4aSJeff Kirsher
4243e689cf4aSJeff Kirsher err_spare:
4244e689cf4aSJeff Kirsher cas_spare_free(cp);
4245e689cf4aSJeff Kirsher cas_free_rxds(cp);
4246e689cf4aSJeff Kirsher err_tx_tiny:
4247e689cf4aSJeff Kirsher cas_tx_tiny_free(cp);
4248e689cf4aSJeff Kirsher err_unlock:
4249e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
4250e689cf4aSJeff Kirsher return err;
4251e689cf4aSJeff Kirsher }
4252e689cf4aSJeff Kirsher
cas_close(struct net_device * dev)4253e689cf4aSJeff Kirsher static int cas_close(struct net_device *dev)
4254e689cf4aSJeff Kirsher {
4255e689cf4aSJeff Kirsher unsigned long flags;
4256e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4257e689cf4aSJeff Kirsher
4258e689cf4aSJeff Kirsher #ifdef USE_NAPI
4259e689cf4aSJeff Kirsher napi_disable(&cp->napi);
4260e689cf4aSJeff Kirsher #endif
4261e689cf4aSJeff Kirsher /* Make sure we don't get distracted by suspend/resume */
4262e689cf4aSJeff Kirsher mutex_lock(&cp->pm_mutex);
4263e689cf4aSJeff Kirsher
4264e689cf4aSJeff Kirsher netif_stop_queue(dev);
4265e689cf4aSJeff Kirsher
4266e689cf4aSJeff Kirsher /* Stop traffic, mark us closed */
4267e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
4268e689cf4aSJeff Kirsher cp->opened = 0;
4269e689cf4aSJeff Kirsher cas_reset(cp, 0);
4270e689cf4aSJeff Kirsher cas_phy_init(cp);
4271e689cf4aSJeff Kirsher cas_begin_auto_negotiation(cp, NULL);
4272e689cf4aSJeff Kirsher cas_clean_rings(cp);
4273e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
4274e689cf4aSJeff Kirsher
4275e689cf4aSJeff Kirsher free_irq(cp->pdev->irq, (void *) dev);
4276e689cf4aSJeff Kirsher cas_spare_free(cp);
4277e689cf4aSJeff Kirsher cas_free_rxds(cp);
4278e689cf4aSJeff Kirsher cas_tx_tiny_free(cp);
4279e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
4280e689cf4aSJeff Kirsher return 0;
4281e689cf4aSJeff Kirsher }
4282e689cf4aSJeff Kirsher
4283e689cf4aSJeff Kirsher static struct {
4284e689cf4aSJeff Kirsher const char name[ETH_GSTRING_LEN];
4285e689cf4aSJeff Kirsher } ethtool_cassini_statnames[] = {
4286e689cf4aSJeff Kirsher {"collisions"},
4287e689cf4aSJeff Kirsher {"rx_bytes"},
4288e689cf4aSJeff Kirsher {"rx_crc_errors"},
4289e689cf4aSJeff Kirsher {"rx_dropped"},
4290e689cf4aSJeff Kirsher {"rx_errors"},
4291e689cf4aSJeff Kirsher {"rx_fifo_errors"},
4292e689cf4aSJeff Kirsher {"rx_frame_errors"},
4293e689cf4aSJeff Kirsher {"rx_length_errors"},
4294e689cf4aSJeff Kirsher {"rx_over_errors"},
4295e689cf4aSJeff Kirsher {"rx_packets"},
4296e689cf4aSJeff Kirsher {"tx_aborted_errors"},
4297e689cf4aSJeff Kirsher {"tx_bytes"},
4298e689cf4aSJeff Kirsher {"tx_dropped"},
4299e689cf4aSJeff Kirsher {"tx_errors"},
4300e689cf4aSJeff Kirsher {"tx_fifo_errors"},
4301e689cf4aSJeff Kirsher {"tx_packets"}
4302e689cf4aSJeff Kirsher };
4303e689cf4aSJeff Kirsher #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4304e689cf4aSJeff Kirsher
4305e689cf4aSJeff Kirsher static struct {
4306e689cf4aSJeff Kirsher const int offsets; /* neg. values for 2nd arg to cas_read_phy */
4307e689cf4aSJeff Kirsher } ethtool_register_table[] = {
4308e689cf4aSJeff Kirsher {-MII_BMSR},
4309e689cf4aSJeff Kirsher {-MII_BMCR},
4310e689cf4aSJeff Kirsher {REG_CAWR},
4311e689cf4aSJeff Kirsher {REG_INF_BURST},
4312e689cf4aSJeff Kirsher {REG_BIM_CFG},
4313e689cf4aSJeff Kirsher {REG_RX_CFG},
4314e689cf4aSJeff Kirsher {REG_HP_CFG},
4315e689cf4aSJeff Kirsher {REG_MAC_TX_CFG},
4316e689cf4aSJeff Kirsher {REG_MAC_RX_CFG},
4317e689cf4aSJeff Kirsher {REG_MAC_CTRL_CFG},
4318e689cf4aSJeff Kirsher {REG_MAC_XIF_CFG},
4319e689cf4aSJeff Kirsher {REG_MIF_CFG},
4320e689cf4aSJeff Kirsher {REG_PCS_CFG},
4321e689cf4aSJeff Kirsher {REG_SATURN_PCFG},
4322e689cf4aSJeff Kirsher {REG_PCS_MII_STATUS},
4323e689cf4aSJeff Kirsher {REG_PCS_STATE_MACHINE},
4324e689cf4aSJeff Kirsher {REG_MAC_COLL_EXCESS},
4325e689cf4aSJeff Kirsher {REG_MAC_COLL_LATE}
4326e689cf4aSJeff Kirsher };
4327e689cf4aSJeff Kirsher #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4328e689cf4aSJeff Kirsher #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4329e689cf4aSJeff Kirsher
cas_read_regs(struct cas * cp,u8 * ptr,int len)4330e689cf4aSJeff Kirsher static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4331e689cf4aSJeff Kirsher {
4332e689cf4aSJeff Kirsher u8 *p;
4333e689cf4aSJeff Kirsher int i;
4334e689cf4aSJeff Kirsher unsigned long flags;
4335e689cf4aSJeff Kirsher
4336e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4337e689cf4aSJeff Kirsher for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4338e689cf4aSJeff Kirsher u16 hval;
4339e689cf4aSJeff Kirsher u32 val;
4340e689cf4aSJeff Kirsher if (ethtool_register_table[i].offsets < 0) {
4341e689cf4aSJeff Kirsher hval = cas_phy_read(cp,
4342e689cf4aSJeff Kirsher -ethtool_register_table[i].offsets);
4343e689cf4aSJeff Kirsher val = hval;
4344e689cf4aSJeff Kirsher } else {
4345e689cf4aSJeff Kirsher val= readl(cp->regs+ethtool_register_table[i].offsets);
4346e689cf4aSJeff Kirsher }
4347e689cf4aSJeff Kirsher memcpy(p, (u8 *)&val, sizeof(u32));
4348e689cf4aSJeff Kirsher }
4349e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4350e689cf4aSJeff Kirsher }
4351e689cf4aSJeff Kirsher
cas_get_stats(struct net_device * dev)4352e689cf4aSJeff Kirsher static struct net_device_stats *cas_get_stats(struct net_device *dev)
4353e689cf4aSJeff Kirsher {
4354e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4355e689cf4aSJeff Kirsher struct net_device_stats *stats = cp->net_stats;
4356e689cf4aSJeff Kirsher unsigned long flags;
4357e689cf4aSJeff Kirsher int i;
4358e689cf4aSJeff Kirsher unsigned long tmp;
4359e689cf4aSJeff Kirsher
4360e689cf4aSJeff Kirsher /* we collate all of the stats into net_stats[N_TX_RING] */
4361e689cf4aSJeff Kirsher if (!cp->hw_running)
4362e689cf4aSJeff Kirsher return stats + N_TX_RINGS;
4363e689cf4aSJeff Kirsher
4364e689cf4aSJeff Kirsher /* collect outstanding stats */
4365e689cf4aSJeff Kirsher /* WTZ: the Cassini spec gives these as 16 bit counters but
4366e689cf4aSJeff Kirsher * stored in 32-bit words. Added a mask of 0xffff to be safe,
4367e689cf4aSJeff Kirsher * in case the chip somehow puts any garbage in the other bits.
4368e689cf4aSJeff Kirsher * Also, counter usage didn't seem to mach what Adrian did
4369e689cf4aSJeff Kirsher * in the parts of the code that set these quantities. Made
4370e689cf4aSJeff Kirsher * that consistent.
4371e689cf4aSJeff Kirsher */
4372e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4373e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_crc_errors +=
4374e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4375e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_frame_errors +=
4376e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4377e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_length_errors +=
4378e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4379e689cf4aSJeff Kirsher #if 1
4380e689cf4aSJeff Kirsher tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4381e689cf4aSJeff Kirsher (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4382e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_aborted_errors += tmp;
4383e689cf4aSJeff Kirsher stats[N_TX_RINGS].collisions +=
4384e689cf4aSJeff Kirsher tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4385e689cf4aSJeff Kirsher #else
4386e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_aborted_errors +=
4387e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_COLL_EXCESS);
4388e689cf4aSJeff Kirsher stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4389e689cf4aSJeff Kirsher readl(cp->regs + REG_MAC_COLL_LATE);
4390e689cf4aSJeff Kirsher #endif
4391e689cf4aSJeff Kirsher cas_clear_mac_err(cp);
4392e689cf4aSJeff Kirsher
4393e689cf4aSJeff Kirsher /* saved bits that are unique to ring 0 */
4394e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[0]);
4395e689cf4aSJeff Kirsher stats[N_TX_RINGS].collisions += stats[0].collisions;
4396e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4397e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4398e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4399e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4400e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4401e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[0]);
4402e689cf4aSJeff Kirsher
4403e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++) {
4404e689cf4aSJeff Kirsher spin_lock(&cp->stat_lock[i]);
4405e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_length_errors +=
4406e689cf4aSJeff Kirsher stats[i].rx_length_errors;
4407e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4408e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4409e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4410e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4411e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4412e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4413e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4414e689cf4aSJeff Kirsher stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4415e689cf4aSJeff Kirsher stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4416e689cf4aSJeff Kirsher memset(stats + i, 0, sizeof(struct net_device_stats));
4417e689cf4aSJeff Kirsher spin_unlock(&cp->stat_lock[i]);
4418e689cf4aSJeff Kirsher }
4419e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4420e689cf4aSJeff Kirsher return stats + N_TX_RINGS;
4421e689cf4aSJeff Kirsher }
4422e689cf4aSJeff Kirsher
4423e689cf4aSJeff Kirsher
cas_set_multicast(struct net_device * dev)4424e689cf4aSJeff Kirsher static void cas_set_multicast(struct net_device *dev)
4425e689cf4aSJeff Kirsher {
4426e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4427e689cf4aSJeff Kirsher u32 rxcfg, rxcfg_new;
4428e689cf4aSJeff Kirsher unsigned long flags;
4429e689cf4aSJeff Kirsher int limit = STOP_TRIES;
4430e689cf4aSJeff Kirsher
4431e689cf4aSJeff Kirsher if (!cp->hw_running)
4432e689cf4aSJeff Kirsher return;
4433e689cf4aSJeff Kirsher
4434e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4435e689cf4aSJeff Kirsher rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4436e689cf4aSJeff Kirsher
4437e689cf4aSJeff Kirsher /* disable RX MAC and wait for completion */
4438e689cf4aSJeff Kirsher writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4439e689cf4aSJeff Kirsher while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4440e689cf4aSJeff Kirsher if (!limit--)
4441e689cf4aSJeff Kirsher break;
4442e689cf4aSJeff Kirsher udelay(10);
4443e689cf4aSJeff Kirsher }
4444e689cf4aSJeff Kirsher
4445e689cf4aSJeff Kirsher /* disable hash filter and wait for completion */
4446e689cf4aSJeff Kirsher limit = STOP_TRIES;
4447e689cf4aSJeff Kirsher rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4448e689cf4aSJeff Kirsher writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4449e689cf4aSJeff Kirsher while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4450e689cf4aSJeff Kirsher if (!limit--)
4451e689cf4aSJeff Kirsher break;
4452e689cf4aSJeff Kirsher udelay(10);
4453e689cf4aSJeff Kirsher }
4454e689cf4aSJeff Kirsher
4455e689cf4aSJeff Kirsher /* program hash filters */
4456e689cf4aSJeff Kirsher cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4457e689cf4aSJeff Kirsher rxcfg |= rxcfg_new;
4458e689cf4aSJeff Kirsher writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4459e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4460e689cf4aSJeff Kirsher }
4461e689cf4aSJeff Kirsher
cas_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4462e689cf4aSJeff Kirsher static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4463e689cf4aSJeff Kirsher {
4464e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4465e689cf4aSJeff Kirsher strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4466e689cf4aSJeff Kirsher strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4467e689cf4aSJeff Kirsher strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4468e689cf4aSJeff Kirsher }
4469f029c781SWolfram Sang
cas_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)4470f029c781SWolfram Sang static int cas_get_link_ksettings(struct net_device *dev,
4471f029c781SWolfram Sang struct ethtool_link_ksettings *cmd)
4472e689cf4aSJeff Kirsher {
4473e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
44742c784b00SPhilippe Reynes u16 bmcr;
44752c784b00SPhilippe Reynes int full_duplex, speed, pause;
4476e689cf4aSJeff Kirsher unsigned long flags;
4477e689cf4aSJeff Kirsher enum link_state linkstate = link_up;
4478e689cf4aSJeff Kirsher u32 supported, advertising;
4479e689cf4aSJeff Kirsher
4480e689cf4aSJeff Kirsher advertising = 0;
4481e689cf4aSJeff Kirsher supported = SUPPORTED_Autoneg;
44822c784b00SPhilippe Reynes if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4483e689cf4aSJeff Kirsher supported |= SUPPORTED_1000baseT_Full;
44842c784b00SPhilippe Reynes advertising |= ADVERTISED_1000baseT_Full;
44852c784b00SPhilippe Reynes }
4486e689cf4aSJeff Kirsher
44872c784b00SPhilippe Reynes /* Record PHY settings if HW is on. */
44882c784b00SPhilippe Reynes spin_lock_irqsave(&cp->lock, flags);
4489e689cf4aSJeff Kirsher bmcr = 0;
4490e689cf4aSJeff Kirsher linkstate = cp->lstate;
4491e689cf4aSJeff Kirsher if (CAS_PHY_MII(cp->phy_type)) {
4492e689cf4aSJeff Kirsher cmd->base.port = PORT_MII;
4493e689cf4aSJeff Kirsher cmd->base.phy_address = cp->phy_addr;
4494e689cf4aSJeff Kirsher advertising |= ADVERTISED_TP | ADVERTISED_MII |
4495e689cf4aSJeff Kirsher ADVERTISED_10baseT_Half |
44962c784b00SPhilippe Reynes ADVERTISED_10baseT_Full |
44972c784b00SPhilippe Reynes ADVERTISED_100baseT_Half |
44982c784b00SPhilippe Reynes ADVERTISED_100baseT_Full;
4499e689cf4aSJeff Kirsher
4500e689cf4aSJeff Kirsher supported |=
4501e689cf4aSJeff Kirsher (SUPPORTED_10baseT_Half |
4502e689cf4aSJeff Kirsher SUPPORTED_10baseT_Full |
4503e689cf4aSJeff Kirsher SUPPORTED_100baseT_Half |
45042c784b00SPhilippe Reynes SUPPORTED_100baseT_Full |
4505e689cf4aSJeff Kirsher SUPPORTED_TP | SUPPORTED_MII);
4506e689cf4aSJeff Kirsher
4507e689cf4aSJeff Kirsher if (cp->hw_running) {
4508e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
4509e689cf4aSJeff Kirsher bmcr = cas_phy_read(cp, MII_BMCR);
4510e689cf4aSJeff Kirsher cas_read_mii_link_mode(cp, &full_duplex,
4511e689cf4aSJeff Kirsher &speed, &pause);
4512e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
4513e689cf4aSJeff Kirsher }
4514e689cf4aSJeff Kirsher
4515e689cf4aSJeff Kirsher } else {
4516e689cf4aSJeff Kirsher cmd->base.port = PORT_FIBRE;
4517e689cf4aSJeff Kirsher cmd->base.phy_address = 0;
4518e689cf4aSJeff Kirsher supported |= SUPPORTED_FIBRE;
4519e689cf4aSJeff Kirsher advertising |= ADVERTISED_FIBRE;
45202c784b00SPhilippe Reynes
45212c784b00SPhilippe Reynes if (cp->hw_running) {
45222c784b00SPhilippe Reynes /* pcs uses the same bits as mii */
45232c784b00SPhilippe Reynes bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4524e689cf4aSJeff Kirsher cas_read_pcs_link_mode(cp, &full_duplex,
4525e689cf4aSJeff Kirsher &speed, &pause);
4526e689cf4aSJeff Kirsher }
4527e689cf4aSJeff Kirsher }
4528e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4529e689cf4aSJeff Kirsher
4530e689cf4aSJeff Kirsher if (bmcr & BMCR_ANENABLE) {
4531e689cf4aSJeff Kirsher advertising |= ADVERTISED_Autoneg;
4532e689cf4aSJeff Kirsher cmd->base.autoneg = AUTONEG_ENABLE;
4533e689cf4aSJeff Kirsher cmd->base.speed = ((speed == 10) ?
4534e689cf4aSJeff Kirsher SPEED_10 :
45352c784b00SPhilippe Reynes ((speed == 1000) ?
45362c784b00SPhilippe Reynes SPEED_1000 : SPEED_100));
45372c784b00SPhilippe Reynes cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4538e689cf4aSJeff Kirsher } else {
4539e689cf4aSJeff Kirsher cmd->base.autoneg = AUTONEG_DISABLE;
45402c784b00SPhilippe Reynes cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
45412c784b00SPhilippe Reynes SPEED_1000 :
4542e689cf4aSJeff Kirsher ((bmcr & BMCR_SPEED100) ?
45432c784b00SPhilippe Reynes SPEED_100 : SPEED_10));
45442c784b00SPhilippe Reynes cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4545e689cf4aSJeff Kirsher DUPLEX_FULL : DUPLEX_HALF;
4546e689cf4aSJeff Kirsher }
45472c784b00SPhilippe Reynes if (linkstate != link_up) {
45482c784b00SPhilippe Reynes /* Force these to "unknown" if the link is not up and
4549e689cf4aSJeff Kirsher * autonogotiation in enabled. We can set the link
4550e689cf4aSJeff Kirsher * speed to 0, but not cmd->duplex,
4551e689cf4aSJeff Kirsher * because its legal values are 0 and 1. Ethtool will
4552e689cf4aSJeff Kirsher * print the value reported in parentheses after the
4553e689cf4aSJeff Kirsher * word "Unknown" for unrecognized values.
4554e689cf4aSJeff Kirsher *
4555e689cf4aSJeff Kirsher * If in forced mode, we report the speed and duplex
4556e689cf4aSJeff Kirsher * settings that we configured.
4557e689cf4aSJeff Kirsher */
4558e689cf4aSJeff Kirsher if (cp->link_cntl & BMCR_ANENABLE) {
4559e689cf4aSJeff Kirsher cmd->base.speed = 0;
4560e689cf4aSJeff Kirsher cmd->base.duplex = 0xff;
4561e689cf4aSJeff Kirsher } else {
4562e689cf4aSJeff Kirsher cmd->base.speed = SPEED_10;
45632c784b00SPhilippe Reynes if (cp->link_cntl & BMCR_SPEED100) {
45642c784b00SPhilippe Reynes cmd->base.speed = SPEED_100;
4565e689cf4aSJeff Kirsher } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
45662c784b00SPhilippe Reynes cmd->base.speed = SPEED_1000;
4567e689cf4aSJeff Kirsher }
45682c784b00SPhilippe Reynes cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4569e689cf4aSJeff Kirsher DUPLEX_FULL : DUPLEX_HALF;
45702c784b00SPhilippe Reynes }
4571e689cf4aSJeff Kirsher }
45722c784b00SPhilippe Reynes
4573e689cf4aSJeff Kirsher ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4574e689cf4aSJeff Kirsher supported);
4575e689cf4aSJeff Kirsher ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
45762c784b00SPhilippe Reynes advertising);
45772c784b00SPhilippe Reynes
45782c784b00SPhilippe Reynes return 0;
45792c784b00SPhilippe Reynes }
45802c784b00SPhilippe Reynes
cas_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)45812c784b00SPhilippe Reynes static int cas_set_link_ksettings(struct net_device *dev,
4582e689cf4aSJeff Kirsher const struct ethtool_link_ksettings *cmd)
4583e689cf4aSJeff Kirsher {
4584e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
45852c784b00SPhilippe Reynes unsigned long flags;
45862c784b00SPhilippe Reynes u32 speed = cmd->base.speed;
4587e689cf4aSJeff Kirsher
4588e689cf4aSJeff Kirsher /* Verify the settings we care about. */
4589e689cf4aSJeff Kirsher if (cmd->base.autoneg != AUTONEG_ENABLE &&
45902c784b00SPhilippe Reynes cmd->base.autoneg != AUTONEG_DISABLE)
4591e689cf4aSJeff Kirsher return -EINVAL;
4592e689cf4aSJeff Kirsher
45932c784b00SPhilippe Reynes if (cmd->base.autoneg == AUTONEG_DISABLE &&
45942c784b00SPhilippe Reynes ((speed != SPEED_1000 &&
4595e689cf4aSJeff Kirsher speed != SPEED_100 &&
4596e689cf4aSJeff Kirsher speed != SPEED_10) ||
45972c784b00SPhilippe Reynes (cmd->base.duplex != DUPLEX_HALF &&
4598e689cf4aSJeff Kirsher cmd->base.duplex != DUPLEX_FULL)))
4599e689cf4aSJeff Kirsher return -EINVAL;
4600e689cf4aSJeff Kirsher
46012c784b00SPhilippe Reynes /* Apply settings and restart link process. */
46022c784b00SPhilippe Reynes spin_lock_irqsave(&cp->lock, flags);
4603e689cf4aSJeff Kirsher cas_begin_auto_negotiation(cp, cmd);
4604e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4605e689cf4aSJeff Kirsher return 0;
4606e689cf4aSJeff Kirsher }
4607e689cf4aSJeff Kirsher
cas_nway_reset(struct net_device * dev)4608e689cf4aSJeff Kirsher static int cas_nway_reset(struct net_device *dev)
4609e689cf4aSJeff Kirsher {
4610e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4611e689cf4aSJeff Kirsher unsigned long flags;
4612e689cf4aSJeff Kirsher
4613e689cf4aSJeff Kirsher if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4614e689cf4aSJeff Kirsher return -EINVAL;
4615e689cf4aSJeff Kirsher
4616e689cf4aSJeff Kirsher /* Restart link process. */
4617e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4618e689cf4aSJeff Kirsher cas_begin_auto_negotiation(cp, NULL);
4619e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4620e689cf4aSJeff Kirsher
4621e689cf4aSJeff Kirsher return 0;
4622e689cf4aSJeff Kirsher }
4623e689cf4aSJeff Kirsher
cas_get_link(struct net_device * dev)4624e689cf4aSJeff Kirsher static u32 cas_get_link(struct net_device *dev)
4625e689cf4aSJeff Kirsher {
4626e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4627e689cf4aSJeff Kirsher return cp->lstate == link_up;
4628e689cf4aSJeff Kirsher }
4629e689cf4aSJeff Kirsher
cas_get_msglevel(struct net_device * dev)4630e689cf4aSJeff Kirsher static u32 cas_get_msglevel(struct net_device *dev)
4631e689cf4aSJeff Kirsher {
4632e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4633e689cf4aSJeff Kirsher return cp->msg_enable;
4634e689cf4aSJeff Kirsher }
4635e689cf4aSJeff Kirsher
cas_set_msglevel(struct net_device * dev,u32 value)4636e689cf4aSJeff Kirsher static void cas_set_msglevel(struct net_device *dev, u32 value)
4637e689cf4aSJeff Kirsher {
4638e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4639e689cf4aSJeff Kirsher cp->msg_enable = value;
4640e689cf4aSJeff Kirsher }
4641e689cf4aSJeff Kirsher
cas_get_regs_len(struct net_device * dev)4642e689cf4aSJeff Kirsher static int cas_get_regs_len(struct net_device *dev)
4643e689cf4aSJeff Kirsher {
4644e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4645e689cf4aSJeff Kirsher return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4646e689cf4aSJeff Kirsher }
4647e689cf4aSJeff Kirsher
cas_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)4648e689cf4aSJeff Kirsher static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
46492c9ec169SChangcheng Deng void *p)
4650e689cf4aSJeff Kirsher {
4651e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4652e689cf4aSJeff Kirsher regs->version = 0;
4653e689cf4aSJeff Kirsher /* cas_read_regs handles locks (cp->lock). */
4654e689cf4aSJeff Kirsher cas_read_regs(cp, p, regs->len / sizeof(u32));
4655e689cf4aSJeff Kirsher }
4656e689cf4aSJeff Kirsher
cas_get_sset_count(struct net_device * dev,int sset)4657e689cf4aSJeff Kirsher static int cas_get_sset_count(struct net_device *dev, int sset)
4658e689cf4aSJeff Kirsher {
4659e689cf4aSJeff Kirsher switch (sset) {
4660e689cf4aSJeff Kirsher case ETH_SS_STATS:
4661e689cf4aSJeff Kirsher return CAS_NUM_STAT_KEYS;
4662e689cf4aSJeff Kirsher default:
4663e689cf4aSJeff Kirsher return -EOPNOTSUPP;
4664e689cf4aSJeff Kirsher }
4665e689cf4aSJeff Kirsher }
4666e689cf4aSJeff Kirsher
cas_get_strings(struct net_device * dev,u32 stringset,u8 * data)4667e689cf4aSJeff Kirsher static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4668e689cf4aSJeff Kirsher {
4669e689cf4aSJeff Kirsher memcpy(data, ðtool_cassini_statnames,
4670e689cf4aSJeff Kirsher CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4671e689cf4aSJeff Kirsher }
4672e689cf4aSJeff Kirsher
cas_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * data)4673e689cf4aSJeff Kirsher static void cas_get_ethtool_stats(struct net_device *dev,
4674e689cf4aSJeff Kirsher struct ethtool_stats *estats, u64 *data)
4675e689cf4aSJeff Kirsher {
4676e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4677e689cf4aSJeff Kirsher struct net_device_stats *stats = cas_get_stats(cp->dev);
4678e689cf4aSJeff Kirsher int i = 0;
4679e689cf4aSJeff Kirsher data[i++] = stats->collisions;
4680e689cf4aSJeff Kirsher data[i++] = stats->rx_bytes;
4681e689cf4aSJeff Kirsher data[i++] = stats->rx_crc_errors;
4682e689cf4aSJeff Kirsher data[i++] = stats->rx_dropped;
4683e689cf4aSJeff Kirsher data[i++] = stats->rx_errors;
4684e689cf4aSJeff Kirsher data[i++] = stats->rx_fifo_errors;
4685e689cf4aSJeff Kirsher data[i++] = stats->rx_frame_errors;
4686e689cf4aSJeff Kirsher data[i++] = stats->rx_length_errors;
4687e689cf4aSJeff Kirsher data[i++] = stats->rx_over_errors;
4688e689cf4aSJeff Kirsher data[i++] = stats->rx_packets;
4689e689cf4aSJeff Kirsher data[i++] = stats->tx_aborted_errors;
4690e689cf4aSJeff Kirsher data[i++] = stats->tx_bytes;
4691e689cf4aSJeff Kirsher data[i++] = stats->tx_dropped;
4692e689cf4aSJeff Kirsher data[i++] = stats->tx_errors;
4693e689cf4aSJeff Kirsher data[i++] = stats->tx_fifo_errors;
4694e689cf4aSJeff Kirsher data[i++] = stats->tx_packets;
4695e689cf4aSJeff Kirsher BUG_ON(i != CAS_NUM_STAT_KEYS);
4696e689cf4aSJeff Kirsher }
4697e689cf4aSJeff Kirsher
4698e689cf4aSJeff Kirsher static const struct ethtool_ops cas_ethtool_ops = {
4699e689cf4aSJeff Kirsher .get_drvinfo = cas_get_drvinfo,
4700e689cf4aSJeff Kirsher .nway_reset = cas_nway_reset,
4701e689cf4aSJeff Kirsher .get_link = cas_get_link,
4702e689cf4aSJeff Kirsher .get_msglevel = cas_get_msglevel,
4703e689cf4aSJeff Kirsher .set_msglevel = cas_set_msglevel,
4704e689cf4aSJeff Kirsher .get_regs_len = cas_get_regs_len,
4705e689cf4aSJeff Kirsher .get_regs = cas_get_regs,
4706e689cf4aSJeff Kirsher .get_sset_count = cas_get_sset_count,
4707e689cf4aSJeff Kirsher .get_strings = cas_get_strings,
4708e689cf4aSJeff Kirsher .get_ethtool_stats = cas_get_ethtool_stats,
4709e689cf4aSJeff Kirsher .get_link_ksettings = cas_get_link_ksettings,
4710e689cf4aSJeff Kirsher .set_link_ksettings = cas_set_link_ksettings,
4711e689cf4aSJeff Kirsher };
4712e689cf4aSJeff Kirsher
cas_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)47132c784b00SPhilippe Reynes static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47142c784b00SPhilippe Reynes {
4715e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
4716e689cf4aSJeff Kirsher struct mii_ioctl_data *data = if_mii(ifr);
4717e689cf4aSJeff Kirsher unsigned long flags;
4718e689cf4aSJeff Kirsher int rc = -EOPNOTSUPP;
4719e689cf4aSJeff Kirsher
4720e689cf4aSJeff Kirsher /* Hold the PM mutex while doing ioctl's or we may collide
4721e689cf4aSJeff Kirsher * with open/close and power management and oops.
4722e689cf4aSJeff Kirsher */
4723e689cf4aSJeff Kirsher mutex_lock(&cp->pm_mutex);
4724e689cf4aSJeff Kirsher switch (cmd) {
4725e689cf4aSJeff Kirsher case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4726e689cf4aSJeff Kirsher data->phy_id = cp->phy_addr;
4727e689cf4aSJeff Kirsher fallthrough;
4728e689cf4aSJeff Kirsher
4729e689cf4aSJeff Kirsher case SIOCGMIIREG: /* Read MII PHY register. */
4730e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4731df561f66SGustavo A. R. Silva cas_mif_poll(cp, 0);
4732e689cf4aSJeff Kirsher data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4733e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
4734e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4735e689cf4aSJeff Kirsher rc = 0;
4736e689cf4aSJeff Kirsher break;
4737e689cf4aSJeff Kirsher
4738e689cf4aSJeff Kirsher case SIOCSMIIREG: /* Write MII PHY register. */
4739e689cf4aSJeff Kirsher spin_lock_irqsave(&cp->lock, flags);
4740e689cf4aSJeff Kirsher cas_mif_poll(cp, 0);
4741e689cf4aSJeff Kirsher rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4742e689cf4aSJeff Kirsher cas_mif_poll(cp, 1);
4743e689cf4aSJeff Kirsher spin_unlock_irqrestore(&cp->lock, flags);
4744e689cf4aSJeff Kirsher break;
4745e689cf4aSJeff Kirsher default:
4746e689cf4aSJeff Kirsher break;
4747e689cf4aSJeff Kirsher }
4748e689cf4aSJeff Kirsher
4749e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
4750e689cf4aSJeff Kirsher return rc;
4751e689cf4aSJeff Kirsher }
4752e689cf4aSJeff Kirsher
4753e689cf4aSJeff Kirsher /* When this chip sits underneath an Intel 31154 bridge, it is the
4754e689cf4aSJeff Kirsher * only subordinate device and we can tweak the bridge settings to
4755e689cf4aSJeff Kirsher * reflect that fact.
4756e689cf4aSJeff Kirsher */
cas_program_bridge(struct pci_dev * cas_pdev)4757e689cf4aSJeff Kirsher static void cas_program_bridge(struct pci_dev *cas_pdev)
4758e689cf4aSJeff Kirsher {
4759e689cf4aSJeff Kirsher struct pci_dev *pdev = cas_pdev->bus->self;
4760e689cf4aSJeff Kirsher u32 val;
4761f73d12bdSBill Pemberton
4762e689cf4aSJeff Kirsher if (!pdev)
4763e689cf4aSJeff Kirsher return;
4764e689cf4aSJeff Kirsher
4765e689cf4aSJeff Kirsher if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4766e689cf4aSJeff Kirsher return;
4767e689cf4aSJeff Kirsher
4768e689cf4aSJeff Kirsher /* Clear bit 10 (Bus Parking Control) in the Secondary
4769e689cf4aSJeff Kirsher * Arbiter Control/Status Register which lives at offset
4770e689cf4aSJeff Kirsher * 0x41. Using a 32-bit word read/modify/write at 0x40
4771e689cf4aSJeff Kirsher * is much simpler so that's how we do this.
4772e689cf4aSJeff Kirsher */
4773e689cf4aSJeff Kirsher pci_read_config_dword(pdev, 0x40, &val);
4774e689cf4aSJeff Kirsher val &= ~0x00040000;
4775e689cf4aSJeff Kirsher pci_write_config_dword(pdev, 0x40, val);
4776e689cf4aSJeff Kirsher
4777e689cf4aSJeff Kirsher /* Max out the Multi-Transaction Timer settings since
4778e689cf4aSJeff Kirsher * Cassini is the only device present.
4779e689cf4aSJeff Kirsher *
4780e689cf4aSJeff Kirsher * The register is 16-bit and lives at 0x50. When the
4781e689cf4aSJeff Kirsher * settings are enabled, it extends the GRANT# signal
4782e689cf4aSJeff Kirsher * for a requestor after a transaction is complete. This
4783e689cf4aSJeff Kirsher * allows the next request to run without first needing
4784e689cf4aSJeff Kirsher * to negotiate the GRANT# signal back.
4785e689cf4aSJeff Kirsher *
4786e689cf4aSJeff Kirsher * Bits 12:10 define the grant duration:
4787e689cf4aSJeff Kirsher *
4788e689cf4aSJeff Kirsher * 1 -- 16 clocks
4789e689cf4aSJeff Kirsher * 2 -- 32 clocks
4790e689cf4aSJeff Kirsher * 3 -- 64 clocks
4791e689cf4aSJeff Kirsher * 4 -- 128 clocks
4792e689cf4aSJeff Kirsher * 5 -- 256 clocks
4793e689cf4aSJeff Kirsher *
4794e689cf4aSJeff Kirsher * All other values are illegal.
4795e689cf4aSJeff Kirsher *
4796e689cf4aSJeff Kirsher * Bits 09:00 define which REQ/GNT signal pairs get the
4797e689cf4aSJeff Kirsher * GRANT# signal treatment. We set them all.
4798e689cf4aSJeff Kirsher */
4799e689cf4aSJeff Kirsher pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4800e689cf4aSJeff Kirsher
4801e689cf4aSJeff Kirsher /* The Read Prefecth Policy register is 16-bit and sits at
4802e689cf4aSJeff Kirsher * offset 0x52. It enables a "smart" pre-fetch policy. We
4803e689cf4aSJeff Kirsher * enable it and max out all of the settings since only one
4804e689cf4aSJeff Kirsher * device is sitting underneath and thus bandwidth sharing is
4805e689cf4aSJeff Kirsher * not an issue.
4806e689cf4aSJeff Kirsher *
4807e689cf4aSJeff Kirsher * The register has several 3 bit fields, which indicates a
4808e689cf4aSJeff Kirsher * multiplier applied to the base amount of prefetching the
4809e689cf4aSJeff Kirsher * chip would do. These fields are at:
4810e689cf4aSJeff Kirsher *
4811e689cf4aSJeff Kirsher * 15:13 --- ReRead Primary Bus
4812e689cf4aSJeff Kirsher * 12:10 --- FirstRead Primary Bus
4813e689cf4aSJeff Kirsher * 09:07 --- ReRead Secondary Bus
4814e689cf4aSJeff Kirsher * 06:04 --- FirstRead Secondary Bus
4815e689cf4aSJeff Kirsher *
4816e689cf4aSJeff Kirsher * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4817e689cf4aSJeff Kirsher * get enabled on. Bit 3 is a grouped enabler which controls
4818e689cf4aSJeff Kirsher * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
4819e689cf4aSJeff Kirsher * the individual REQ/GNT pairs [2:0].
4820e689cf4aSJeff Kirsher */
4821e689cf4aSJeff Kirsher pci_write_config_word(pdev, 0x52,
4822e689cf4aSJeff Kirsher (0x7 << 13) |
4823e689cf4aSJeff Kirsher (0x7 << 10) |
4824e689cf4aSJeff Kirsher (0x7 << 7) |
4825e689cf4aSJeff Kirsher (0x7 << 4) |
4826e689cf4aSJeff Kirsher (0xf << 0));
4827e689cf4aSJeff Kirsher
4828e689cf4aSJeff Kirsher /* Force cacheline size to 0x8 */
4829e689cf4aSJeff Kirsher pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4830e689cf4aSJeff Kirsher
4831e689cf4aSJeff Kirsher /* Force latency timer to maximum setting so Cassini can
4832e689cf4aSJeff Kirsher * sit on the bus as long as it likes.
4833e689cf4aSJeff Kirsher */
4834e689cf4aSJeff Kirsher pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4835e689cf4aSJeff Kirsher }
4836e689cf4aSJeff Kirsher
4837e689cf4aSJeff Kirsher static const struct net_device_ops cas_netdev_ops = {
4838e689cf4aSJeff Kirsher .ndo_open = cas_open,
4839e689cf4aSJeff Kirsher .ndo_stop = cas_close,
4840e689cf4aSJeff Kirsher .ndo_start_xmit = cas_start_xmit,
4841e689cf4aSJeff Kirsher .ndo_get_stats = cas_get_stats,
4842e689cf4aSJeff Kirsher .ndo_set_rx_mode = cas_set_multicast,
4843e689cf4aSJeff Kirsher .ndo_eth_ioctl = cas_ioctl,
4844e689cf4aSJeff Kirsher .ndo_tx_timeout = cas_tx_timeout,
4845e689cf4aSJeff Kirsher .ndo_change_mtu = cas_change_mtu,
4846afc4b13dSJiri Pirko .ndo_set_mac_address = eth_mac_addr,
4847a7605370SArnd Bergmann .ndo_validate_addr = eth_validate_addr,
4848e689cf4aSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
4849e689cf4aSJeff Kirsher .ndo_poll_controller = cas_netpoll,
4850e689cf4aSJeff Kirsher #endif
4851e689cf4aSJeff Kirsher };
4852e689cf4aSJeff Kirsher
cas_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)4853e689cf4aSJeff Kirsher static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4854e689cf4aSJeff Kirsher {
4855e689cf4aSJeff Kirsher static int cas_version_printed = 0;
4856e689cf4aSJeff Kirsher unsigned long casreg_len;
48571dd06ae8SGreg Kroah-Hartman struct net_device *dev;
4858e689cf4aSJeff Kirsher struct cas *cp;
4859e689cf4aSJeff Kirsher u16 pci_cmd;
4860e689cf4aSJeff Kirsher int i, err;
4861e689cf4aSJeff Kirsher u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4862e689cf4aSJeff Kirsher
4863e689cf4aSJeff Kirsher if (cas_version_printed++ == 0)
4864584c61ceSChristophe JAILLET pr_info("%s", version);
4865e689cf4aSJeff Kirsher
4866e689cf4aSJeff Kirsher err = pci_enable_device(pdev);
4867e689cf4aSJeff Kirsher if (err) {
4868e689cf4aSJeff Kirsher dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4869e689cf4aSJeff Kirsher return err;
4870e689cf4aSJeff Kirsher }
4871e689cf4aSJeff Kirsher
4872e689cf4aSJeff Kirsher if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4873e689cf4aSJeff Kirsher dev_err(&pdev->dev, "Cannot find proper PCI device "
4874e689cf4aSJeff Kirsher "base address, aborting\n");
4875e689cf4aSJeff Kirsher err = -ENODEV;
4876e689cf4aSJeff Kirsher goto err_out_disable_pdev;
4877e689cf4aSJeff Kirsher }
4878e689cf4aSJeff Kirsher
4879e689cf4aSJeff Kirsher dev = alloc_etherdev(sizeof(*cp));
4880e689cf4aSJeff Kirsher if (!dev) {
4881e689cf4aSJeff Kirsher err = -ENOMEM;
4882e689cf4aSJeff Kirsher goto err_out_disable_pdev;
4883e689cf4aSJeff Kirsher }
4884e689cf4aSJeff Kirsher SET_NETDEV_DEV(dev, &pdev->dev);
4885e689cf4aSJeff Kirsher
4886e689cf4aSJeff Kirsher err = pci_request_regions(pdev, dev->name);
4887e689cf4aSJeff Kirsher if (err) {
4888e689cf4aSJeff Kirsher dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4889e689cf4aSJeff Kirsher goto err_out_free_netdev;
4890e689cf4aSJeff Kirsher }
4891e689cf4aSJeff Kirsher pci_set_master(pdev);
4892e689cf4aSJeff Kirsher
4893e689cf4aSJeff Kirsher /* we must always turn on parity response or else parity
4894e689cf4aSJeff Kirsher * doesn't get generated properly. disable SERR/PERR as well.
4895e689cf4aSJeff Kirsher * in addition, we want to turn MWI on.
4896e689cf4aSJeff Kirsher */
4897e689cf4aSJeff Kirsher pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4898e689cf4aSJeff Kirsher pci_cmd &= ~PCI_COMMAND_SERR;
4899e689cf4aSJeff Kirsher pci_cmd |= PCI_COMMAND_PARITY;
4900e689cf4aSJeff Kirsher pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4901e689cf4aSJeff Kirsher if (pci_try_set_mwi(pdev))
4902e689cf4aSJeff Kirsher pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4903e689cf4aSJeff Kirsher
4904e689cf4aSJeff Kirsher cas_program_bridge(pdev);
4905e689cf4aSJeff Kirsher
4906fe3881cfSJoe Perches /*
4907e689cf4aSJeff Kirsher * On some architectures, the default cache line size set
4908e689cf4aSJeff Kirsher * by pci_try_set_mwi reduces perforamnce. We have to increase
4909e689cf4aSJeff Kirsher * it for this case. To start, we'll print some configuration
4910e689cf4aSJeff Kirsher * data.
4911e689cf4aSJeff Kirsher */
4912e689cf4aSJeff Kirsher #if 1
4913e689cf4aSJeff Kirsher pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4914e689cf4aSJeff Kirsher &orig_cacheline_size);
4915e689cf4aSJeff Kirsher if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4916e689cf4aSJeff Kirsher cas_cacheline_size =
4917e689cf4aSJeff Kirsher (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4918e689cf4aSJeff Kirsher CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4919e689cf4aSJeff Kirsher if (pci_write_config_byte(pdev,
4920e689cf4aSJeff Kirsher PCI_CACHE_LINE_SIZE,
4921e689cf4aSJeff Kirsher cas_cacheline_size)) {
4922e689cf4aSJeff Kirsher dev_err(&pdev->dev, "Could not set PCI cache "
4923e689cf4aSJeff Kirsher "line size\n");
4924e689cf4aSJeff Kirsher goto err_out_free_res;
4925e689cf4aSJeff Kirsher }
4926e689cf4aSJeff Kirsher }
4927e689cf4aSJeff Kirsher #endif
49285a730153SQiushi Wu
4929e689cf4aSJeff Kirsher
4930e689cf4aSJeff Kirsher /* Configure DMA attributes. */
4931e689cf4aSJeff Kirsher err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4932e689cf4aSJeff Kirsher if (err) {
4933e689cf4aSJeff Kirsher dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4934e689cf4aSJeff Kirsher goto err_out_free_res;
4935584c61ceSChristophe JAILLET }
4936e689cf4aSJeff Kirsher
4937584c61ceSChristophe JAILLET casreg_len = pci_resource_len(pdev, 0);
4938e689cf4aSJeff Kirsher
4939e689cf4aSJeff Kirsher cp = netdev_priv(dev);
4940e689cf4aSJeff Kirsher cp->pdev = pdev;
4941e689cf4aSJeff Kirsher #if 1
4942e689cf4aSJeff Kirsher /* A value of 0 indicates we never explicitly set it */
4943e689cf4aSJeff Kirsher cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4944e689cf4aSJeff Kirsher #endif
4945e689cf4aSJeff Kirsher cp->dev = dev;
4946e689cf4aSJeff Kirsher cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4947e689cf4aSJeff Kirsher cassini_debug;
4948e689cf4aSJeff Kirsher
4949e689cf4aSJeff Kirsher #if defined(CONFIG_SPARC)
4950e689cf4aSJeff Kirsher cp->of_node = pci_device_to_OF_node(pdev);
4951e689cf4aSJeff Kirsher #endif
4952e689cf4aSJeff Kirsher
4953e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_UNKNOWN;
4954e689cf4aSJeff Kirsher cp->link_transition_jiffies_valid = 0;
4955e689cf4aSJeff Kirsher
4956e689cf4aSJeff Kirsher spin_lock_init(&cp->lock);
4957e689cf4aSJeff Kirsher spin_lock_init(&cp->rx_inuse_lock);
4958e689cf4aSJeff Kirsher spin_lock_init(&cp->rx_spare_lock);
4959e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++) {
4960e689cf4aSJeff Kirsher spin_lock_init(&cp->stat_lock[i]);
4961e689cf4aSJeff Kirsher spin_lock_init(&cp->tx_lock[i]);
4962e689cf4aSJeff Kirsher }
4963e689cf4aSJeff Kirsher spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4964e689cf4aSJeff Kirsher mutex_init(&cp->pm_mutex);
4965e689cf4aSJeff Kirsher
4966e689cf4aSJeff Kirsher timer_setup(&cp->link_timer, cas_link_timer, 0);
4967e689cf4aSJeff Kirsher
4968e689cf4aSJeff Kirsher #if 1
4969e689cf4aSJeff Kirsher /* Just in case the implementation of atomic operations
49700822c5d9SKees Cook * change so that an explicit initialization is necessary.
49710822c5d9SKees Cook */
4972e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending, 0);
4973e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending_all, 0);
4974e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending_spare, 0);
4975e689cf4aSJeff Kirsher atomic_set(&cp->reset_task_pending_mtu, 0);
4976e689cf4aSJeff Kirsher #endif
4977e689cf4aSJeff Kirsher INIT_WORK(&cp->reset_task, cas_reset_task);
4978e689cf4aSJeff Kirsher
4979e689cf4aSJeff Kirsher /* Default link parameters */
4980e689cf4aSJeff Kirsher if (link_mode >= 0 && link_mode < 6)
4981e689cf4aSJeff Kirsher cp->link_cntl = link_modes[link_mode];
4982e689cf4aSJeff Kirsher else
4983e689cf4aSJeff Kirsher cp->link_cntl = BMCR_ANENABLE;
4984e689cf4aSJeff Kirsher cp->lstate = link_down;
4985e689cf4aSJeff Kirsher cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4986e689cf4aSJeff Kirsher netif_carrier_off(cp->dev);
4987e689cf4aSJeff Kirsher cp->timer_ticks = 0;
4988e689cf4aSJeff Kirsher
4989e689cf4aSJeff Kirsher /* give us access to cassini registers */
4990e689cf4aSJeff Kirsher cp->regs = pci_iomap(pdev, 0, casreg_len);
4991e689cf4aSJeff Kirsher if (!cp->regs) {
4992e689cf4aSJeff Kirsher dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
4993e689cf4aSJeff Kirsher goto err_out_free_res;
4994e689cf4aSJeff Kirsher }
4995e689cf4aSJeff Kirsher cp->casreg_len = casreg_len;
4996e689cf4aSJeff Kirsher
4997e689cf4aSJeff Kirsher pci_save_state(pdev);
4998e689cf4aSJeff Kirsher cas_check_pci_invariants(cp);
4999e689cf4aSJeff Kirsher cas_hard_reset(cp);
5000e689cf4aSJeff Kirsher cas_reset(cp, 0);
5001e689cf4aSJeff Kirsher if (cas_check_invariants(cp))
5002e689cf4aSJeff Kirsher goto err_out_iounmap;
5003e689cf4aSJeff Kirsher if (cp->cas_flags & CAS_FLAG_SATURN)
5004e689cf4aSJeff Kirsher cas_saturn_firmware_init(cp);
5005e689cf4aSJeff Kirsher
5006e689cf4aSJeff Kirsher cp->init_block =
5007e689cf4aSJeff Kirsher dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
500815627e84SBen Hutchings &cp->block_dvma, GFP_KERNEL);
5009e689cf4aSJeff Kirsher if (!cp->init_block) {
50105333fdbeSAishwarya Ramakrishnan dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5011dcc82bb0SChristophe JAILLET goto err_out_iounmap;
5012dcc82bb0SChristophe JAILLET }
5013e689cf4aSJeff Kirsher
5014e689cf4aSJeff Kirsher for (i = 0; i < N_TX_RINGS; i++)
5015e689cf4aSJeff Kirsher cp->init_txds[i] = cp->init_block->txds[i];
5016e689cf4aSJeff Kirsher
5017e689cf4aSJeff Kirsher for (i = 0; i < N_RX_DESC_RINGS; i++)
5018e689cf4aSJeff Kirsher cp->init_rxds[i] = cp->init_block->rxds[i];
5019e689cf4aSJeff Kirsher
5020e689cf4aSJeff Kirsher for (i = 0; i < N_RX_COMP_RINGS; i++)
5021e689cf4aSJeff Kirsher cp->init_rxcs[i] = cp->init_block->rxcs[i];
5022e689cf4aSJeff Kirsher
5023e689cf4aSJeff Kirsher for (i = 0; i < N_RX_FLOWS; i++)
5024e689cf4aSJeff Kirsher skb_queue_head_init(&cp->rx_flows[i]);
5025e689cf4aSJeff Kirsher
5026e689cf4aSJeff Kirsher dev->netdev_ops = &cas_netdev_ops;
5027e689cf4aSJeff Kirsher dev->ethtool_ops = &cas_ethtool_ops;
5028e689cf4aSJeff Kirsher dev->watchdog_timeo = CAS_TX_TIMEOUT;
5029e689cf4aSJeff Kirsher
5030e689cf4aSJeff Kirsher #ifdef USE_NAPI
5031e689cf4aSJeff Kirsher netif_napi_add(dev, &cp->napi, cas_poll);
5032e689cf4aSJeff Kirsher #endif
5033e689cf4aSJeff Kirsher dev->irq = pdev->irq;
5034e689cf4aSJeff Kirsher dev->dma = 0;
5035b48b89f9SJakub Kicinski
5036e689cf4aSJeff Kirsher /* Cassini features. */
5037e689cf4aSJeff Kirsher if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5038e689cf4aSJeff Kirsher dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5039e689cf4aSJeff Kirsher
5040e689cf4aSJeff Kirsher dev->features |= NETIF_F_HIGHDMA;
5041e689cf4aSJeff Kirsher
5042e689cf4aSJeff Kirsher /* MTU range: 60 - varies or 9000 */
5043e689cf4aSJeff Kirsher dev->min_mtu = CAS_MIN_MTU;
5044e689cf4aSJeff Kirsher dev->max_mtu = CAS_MAX_MTU;
5045e689cf4aSJeff Kirsher
5046540bfe30SJarod Wilson if (register_netdev(dev)) {
5047540bfe30SJarod Wilson dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5048540bfe30SJarod Wilson goto err_out_free_consistent;
5049540bfe30SJarod Wilson }
5050e689cf4aSJeff Kirsher
5051e689cf4aSJeff Kirsher i = readl(cp->regs + REG_BIM_CFG);
5052e689cf4aSJeff Kirsher netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5053e689cf4aSJeff Kirsher (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5054e689cf4aSJeff Kirsher (i & BIM_CFG_32BIT) ? "32" : "64",
5055e689cf4aSJeff Kirsher (i & BIM_CFG_66MHZ) ? "66" : "33",
5056e689cf4aSJeff Kirsher (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5057e689cf4aSJeff Kirsher dev->dev_addr);
5058e689cf4aSJeff Kirsher
5059e689cf4aSJeff Kirsher pci_set_drvdata(pdev, dev);
5060e689cf4aSJeff Kirsher cp->hw_running = 1;
5061e689cf4aSJeff Kirsher cas_entropy_reset(cp);
5062e689cf4aSJeff Kirsher cas_phy_init(cp);
5063e689cf4aSJeff Kirsher cas_begin_auto_negotiation(cp, NULL);
5064e689cf4aSJeff Kirsher return 0;
5065e689cf4aSJeff Kirsher
5066e689cf4aSJeff Kirsher err_out_free_consistent:
5067e689cf4aSJeff Kirsher dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5068e689cf4aSJeff Kirsher cp->init_block, cp->block_dvma);
5069e689cf4aSJeff Kirsher
5070e689cf4aSJeff Kirsher err_out_iounmap:
5071dcc82bb0SChristophe JAILLET mutex_lock(&cp->pm_mutex);
5072e689cf4aSJeff Kirsher if (cp->hw_running)
5073e689cf4aSJeff Kirsher cas_shutdown(cp);
5074e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
5075e689cf4aSJeff Kirsher
5076e689cf4aSJeff Kirsher vfree(cp->fw_data);
5077e689cf4aSJeff Kirsher
5078e689cf4aSJeff Kirsher pci_iounmap(pdev, cp->regs);
5079e689cf4aSJeff Kirsher
5080*412cd77aSChristophe JAILLET
5081*412cd77aSChristophe JAILLET err_out_free_res:
5082e689cf4aSJeff Kirsher pci_release_regions(pdev);
5083e689cf4aSJeff Kirsher
5084e689cf4aSJeff Kirsher /* Try to restore it in case the error occurred after we
5085e689cf4aSJeff Kirsher * set it.
5086e689cf4aSJeff Kirsher */
5087e689cf4aSJeff Kirsher pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5088e689cf4aSJeff Kirsher
5089e689cf4aSJeff Kirsher err_out_free_netdev:
5090e689cf4aSJeff Kirsher free_netdev(dev);
5091e689cf4aSJeff Kirsher
5092e689cf4aSJeff Kirsher err_out_disable_pdev:
5093e689cf4aSJeff Kirsher pci_disable_device(pdev);
5094e689cf4aSJeff Kirsher return -ENODEV;
5095e689cf4aSJeff Kirsher }
5096e689cf4aSJeff Kirsher
cas_remove_one(struct pci_dev * pdev)5097e689cf4aSJeff Kirsher static void cas_remove_one(struct pci_dev *pdev)
5098e689cf4aSJeff Kirsher {
5099e689cf4aSJeff Kirsher struct net_device *dev = pci_get_drvdata(pdev);
5100e689cf4aSJeff Kirsher struct cas *cp;
5101f73d12bdSBill Pemberton if (!dev)
5102e689cf4aSJeff Kirsher return;
5103e689cf4aSJeff Kirsher
5104e689cf4aSJeff Kirsher cp = netdev_priv(dev);
5105e689cf4aSJeff Kirsher unregister_netdev(dev);
5106e689cf4aSJeff Kirsher
5107e689cf4aSJeff Kirsher vfree(cp->fw_data);
5108e689cf4aSJeff Kirsher
5109e689cf4aSJeff Kirsher mutex_lock(&cp->pm_mutex);
5110e689cf4aSJeff Kirsher cancel_work_sync(&cp->reset_task);
5111e689cf4aSJeff Kirsher if (cp->hw_running)
5112e689cf4aSJeff Kirsher cas_shutdown(cp);
5113e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
5114e689cf4aSJeff Kirsher
5115e689cf4aSJeff Kirsher #if 1
5116e689cf4aSJeff Kirsher if (cp->orig_cacheline_size) {
5117e689cf4aSJeff Kirsher /* Restore the cache line size if we had modified
5118e689cf4aSJeff Kirsher * it.
5119e689cf4aSJeff Kirsher */
5120e689cf4aSJeff Kirsher pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5121e689cf4aSJeff Kirsher cp->orig_cacheline_size);
5122e689cf4aSJeff Kirsher }
5123e689cf4aSJeff Kirsher #endif
5124e689cf4aSJeff Kirsher dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5125e689cf4aSJeff Kirsher cp->init_block, cp->block_dvma);
5126e689cf4aSJeff Kirsher pci_iounmap(pdev, cp->regs);
5127e689cf4aSJeff Kirsher free_netdev(dev);
5128dcc82bb0SChristophe JAILLET pci_release_regions(pdev);
5129e689cf4aSJeff Kirsher pci_disable_device(pdev);
5130e689cf4aSJeff Kirsher }
5131e689cf4aSJeff Kirsher
cas_suspend(struct device * dev_d)5132e689cf4aSJeff Kirsher static int __maybe_unused cas_suspend(struct device *dev_d)
5133e689cf4aSJeff Kirsher {
5134e689cf4aSJeff Kirsher struct net_device *dev = dev_get_drvdata(dev_d);
5135e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
5136f193f4ebSVaibhav Gupta unsigned long flags;
5137e689cf4aSJeff Kirsher
5138f193f4ebSVaibhav Gupta mutex_lock(&cp->pm_mutex);
5139e689cf4aSJeff Kirsher
5140e689cf4aSJeff Kirsher /* If the driver is opened, we stop the DMA */
5141e689cf4aSJeff Kirsher if (cp->opened) {
5142e689cf4aSJeff Kirsher netif_device_detach(dev);
5143e689cf4aSJeff Kirsher
5144e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
5145e689cf4aSJeff Kirsher
5146e689cf4aSJeff Kirsher /* We can set the second arg of cas_reset to 0
5147e689cf4aSJeff Kirsher * because on resume, we'll call cas_init_hw with
5148e689cf4aSJeff Kirsher * its second arg set so that autonegotiation is
5149e689cf4aSJeff Kirsher * restarted.
5150e689cf4aSJeff Kirsher */
5151e689cf4aSJeff Kirsher cas_reset(cp, 0);
5152e689cf4aSJeff Kirsher cas_clean_rings(cp);
5153e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
5154e689cf4aSJeff Kirsher }
5155e689cf4aSJeff Kirsher
5156e689cf4aSJeff Kirsher if (cp->hw_running)
5157e689cf4aSJeff Kirsher cas_shutdown(cp);
5158e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
5159e689cf4aSJeff Kirsher
5160e689cf4aSJeff Kirsher return 0;
5161e689cf4aSJeff Kirsher }
5162e689cf4aSJeff Kirsher
cas_resume(struct device * dev_d)5163e689cf4aSJeff Kirsher static int __maybe_unused cas_resume(struct device *dev_d)
5164e689cf4aSJeff Kirsher {
5165e689cf4aSJeff Kirsher struct net_device *dev = dev_get_drvdata(dev_d);
5166e689cf4aSJeff Kirsher struct cas *cp = netdev_priv(dev);
5167847d97e0SWei Yongjun
5168e689cf4aSJeff Kirsher netdev_info(dev, "resuming\n");
5169f193f4ebSVaibhav Gupta
5170e689cf4aSJeff Kirsher mutex_lock(&cp->pm_mutex);
5171e689cf4aSJeff Kirsher cas_hard_reset(cp);
5172e689cf4aSJeff Kirsher if (cp->opened) {
5173e689cf4aSJeff Kirsher unsigned long flags;
5174e689cf4aSJeff Kirsher cas_lock_all_save(cp, flags);
5175e689cf4aSJeff Kirsher cas_reset(cp, 0);
5176e689cf4aSJeff Kirsher cp->hw_running = 1;
5177e689cf4aSJeff Kirsher cas_clean_rings(cp);
5178e689cf4aSJeff Kirsher cas_init_hw(cp, 1);
5179e689cf4aSJeff Kirsher cas_unlock_all_restore(cp, flags);
5180e689cf4aSJeff Kirsher
5181e689cf4aSJeff Kirsher netif_device_attach(dev);
5182e689cf4aSJeff Kirsher }
5183e689cf4aSJeff Kirsher mutex_unlock(&cp->pm_mutex);
5184e689cf4aSJeff Kirsher return 0;
5185e689cf4aSJeff Kirsher }
5186e689cf4aSJeff Kirsher
5187e689cf4aSJeff Kirsher static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5188e689cf4aSJeff Kirsher
5189e689cf4aSJeff Kirsher static struct pci_driver cas_driver = {
5190f193f4ebSVaibhav Gupta .name = DRV_MODULE_NAME,
5191f193f4ebSVaibhav Gupta .id_table = cas_pci_tbl,
5192e689cf4aSJeff Kirsher .probe = cas_init_one,
5193e689cf4aSJeff Kirsher .remove = cas_remove_one,
5194e689cf4aSJeff Kirsher .driver.pm = &cas_pm_ops,
5195e689cf4aSJeff Kirsher };
5196e689cf4aSJeff Kirsher
cas_init(void)5197f73d12bdSBill Pemberton static int __init cas_init(void)
5198f193f4ebSVaibhav Gupta {
5199e689cf4aSJeff Kirsher if (linkdown_timeout > 0)
5200e689cf4aSJeff Kirsher link_transition_timeout = linkdown_timeout * HZ;
5201e689cf4aSJeff Kirsher else
5202e689cf4aSJeff Kirsher link_transition_timeout = 0;
5203e689cf4aSJeff Kirsher
5204e689cf4aSJeff Kirsher return pci_register_driver(&cas_driver);
5205e689cf4aSJeff Kirsher }
5206e689cf4aSJeff Kirsher
cas_cleanup(void)5207e689cf4aSJeff Kirsher static void __exit cas_cleanup(void)
5208e689cf4aSJeff Kirsher {
5209e689cf4aSJeff Kirsher pci_unregister_driver(&cas_driver);
5210e689cf4aSJeff Kirsher }
5211e689cf4aSJeff Kirsher
5212e689cf4aSJeff Kirsher module_init(cas_init);
5213e689cf4aSJeff Kirsher module_exit(cas_cleanup);
5214e689cf4aSJeff Kirsher