xref: /openbmc/linux/drivers/net/ethernet/sun/sunhme.c (revision 3d40aed8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
3  *           auto carrier detecting ethernet driver.  Also known as the
4  *           "Happy Meal Ethernet" found on SunSwift SBUS cards.
5  *
6  * Copyright (C) 1996, 1998, 1999, 2002, 2003,
7  *		2006, 2008 David S. Miller (davem@davemloft.net)
8  *
9  * Changes :
10  * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
11  *   - port to non-sparc architectures. Tested only on x86 and
12  *     only currently works with QFE PCI cards.
13  *   - ability to specify the MAC address at module load time by passing this
14  *     argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
15  */
16 
17 #include <linux/bitops.h>
18 #include <linux/crc32.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/fcntl.h>
25 #include <linux/in.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/ioport.h>
30 #include <linux/kernel.h>
31 #include <linux/mii.h>
32 #include <linux/mm.h>
33 #include <linux/module.h>
34 #include <linux/netdevice.h>
35 #include <linux/of.h>
36 #include <linux/of_device.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
39 #include <linux/random.h>
40 #include <linux/skbuff.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include <linux/types.h>
44 #include <linux/uaccess.h>
45 
46 #include <asm/byteorder.h>
47 #include <asm/dma.h>
48 #include <asm/irq.h>
49 
50 #ifdef CONFIG_SPARC
51 #include <asm/auxio.h>
52 #include <asm/idprom.h>
53 #include <asm/openprom.h>
54 #include <asm/oplib.h>
55 #include <asm/prom.h>
56 #endif
57 
58 #include "sunhme.h"
59 
60 #define DRV_NAME	"sunhme"
61 
62 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
63 MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
64 MODULE_LICENSE("GPL");
65 
66 static int macaddr[6];
67 
68 /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
69 module_param_array(macaddr, int, NULL, 0);
70 MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
71 
72 #ifdef CONFIG_SBUS
73 static struct quattro *qfe_sbus_list;
74 #endif
75 
76 #ifdef CONFIG_PCI
77 static struct quattro *qfe_pci_list;
78 #endif
79 
80 #define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
81 #define HMD hme_debug
82 
83 /* "Auto Switch Debug" aka phy debug */
84 #if 1
85 #define ASD hme_debug
86 #else
87 #define ASD(...)
88 #endif
89 
90 #if 0
91 struct hme_tx_logent {
92 	unsigned int tstamp;
93 	int tx_new, tx_old;
94 	unsigned int action;
95 #define TXLOG_ACTION_IRQ	0x01
96 #define TXLOG_ACTION_TXMIT	0x02
97 #define TXLOG_ACTION_TBUSY	0x04
98 #define TXLOG_ACTION_NBUFS	0x08
99 	unsigned int status;
100 };
101 #define TX_LOG_LEN	128
102 static struct hme_tx_logent tx_log[TX_LOG_LEN];
103 static int txlog_cur_entry;
104 static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
105 {
106 	struct hme_tx_logent *tlp;
107 	unsigned long flags;
108 
109 	local_irq_save(flags);
110 	tlp = &tx_log[txlog_cur_entry];
111 	tlp->tstamp = (unsigned int)jiffies;
112 	tlp->tx_new = hp->tx_new;
113 	tlp->tx_old = hp->tx_old;
114 	tlp->action = a;
115 	tlp->status = s;
116 	txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
117 	local_irq_restore(flags);
118 }
119 static __inline__ void tx_dump_log(void)
120 {
121 	int i, this;
122 
123 	this = txlog_cur_entry;
124 	for (i = 0; i < TX_LOG_LEN; i++) {
125 		pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
126 		       tx_log[this].tstamp,
127 		       tx_log[this].tx_new, tx_log[this].tx_old,
128 		       tx_log[this].action, tx_log[this].status);
129 		this = (this + 1) & (TX_LOG_LEN - 1);
130 	}
131 }
132 #else
133 #define tx_add_log(hp, a, s)
134 #define tx_dump_log()
135 #endif
136 
137 #define DEFAULT_IPG0      16 /* For lance-mode only */
138 #define DEFAULT_IPG1       8 /* For all modes */
139 #define DEFAULT_IPG2       4 /* For all modes */
140 #define DEFAULT_JAMSIZE    4 /* Toe jam */
141 
142 /* NOTE: In the descriptor writes one _must_ write the address
143  *	 member _first_.  The card must not be allowed to see
144  *	 the updated descriptor flags until the address is
145  *	 correct.  I've added a write memory barrier between
146  *	 the two stores so that I can sleep well at night... -DaveM
147  */
148 
149 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
sbus_hme_write32(void __iomem * reg,u32 val)150 static void sbus_hme_write32(void __iomem *reg, u32 val)
151 {
152 	sbus_writel(val, reg);
153 }
154 
sbus_hme_read32(void __iomem * reg)155 static u32 sbus_hme_read32(void __iomem *reg)
156 {
157 	return sbus_readl(reg);
158 }
159 
sbus_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)160 static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
161 {
162 	rxd->rx_addr = (__force hme32)addr;
163 	dma_wmb();
164 	rxd->rx_flags = (__force hme32)flags;
165 }
166 
sbus_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)167 static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
168 {
169 	txd->tx_addr = (__force hme32)addr;
170 	dma_wmb();
171 	txd->tx_flags = (__force hme32)flags;
172 }
173 
sbus_hme_read_desc32(hme32 * p)174 static u32 sbus_hme_read_desc32(hme32 *p)
175 {
176 	return (__force u32)*p;
177 }
178 
pci_hme_write32(void __iomem * reg,u32 val)179 static void pci_hme_write32(void __iomem *reg, u32 val)
180 {
181 	writel(val, reg);
182 }
183 
pci_hme_read32(void __iomem * reg)184 static u32 pci_hme_read32(void __iomem *reg)
185 {
186 	return readl(reg);
187 }
188 
pci_hme_write_rxd(struct happy_meal_rxd * rxd,u32 flags,u32 addr)189 static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
190 {
191 	rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
192 	dma_wmb();
193 	rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
194 }
195 
pci_hme_write_txd(struct happy_meal_txd * txd,u32 flags,u32 addr)196 static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
197 {
198 	txd->tx_addr = (__force hme32)cpu_to_le32(addr);
199 	dma_wmb();
200 	txd->tx_flags = (__force hme32)cpu_to_le32(flags);
201 }
202 
pci_hme_read_desc32(hme32 * p)203 static u32 pci_hme_read_desc32(hme32 *p)
204 {
205 	return le32_to_cpup((__le32 *)p);
206 }
207 
208 #define hme_write32(__hp, __reg, __val) \
209 	((__hp)->write32((__reg), (__val)))
210 #define hme_read32(__hp, __reg) \
211 	((__hp)->read32(__reg))
212 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
213 	((__hp)->write_rxd((__rxd), (__flags), (__addr)))
214 #define hme_write_txd(__hp, __txd, __flags, __addr) \
215 	((__hp)->write_txd((__txd), (__flags), (__addr)))
216 #define hme_read_desc32(__hp, __p) \
217 	((__hp)->read_desc32(__p))
218 #else
219 #ifdef CONFIG_SBUS
220 /* SBUS only compilation */
221 #define hme_write32(__hp, __reg, __val) \
222 	sbus_writel((__val), (__reg))
223 #define hme_read32(__hp, __reg) \
224 	sbus_readl(__reg)
225 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
226 do {	(__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
227 	dma_wmb(); \
228 	(__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
229 } while(0)
230 #define hme_write_txd(__hp, __txd, __flags, __addr) \
231 do {	(__txd)->tx_addr = (__force hme32)(u32)(__addr); \
232 	dma_wmb(); \
233 	(__txd)->tx_flags = (__force hme32)(u32)(__flags); \
234 } while(0)
235 #define hme_read_desc32(__hp, __p)	((__force u32)(hme32)*(__p))
236 #else
237 /* PCI only compilation */
238 #define hme_write32(__hp, __reg, __val) \
239 	writel((__val), (__reg))
240 #define hme_read32(__hp, __reg) \
241 	readl(__reg)
242 #define hme_write_rxd(__hp, __rxd, __flags, __addr) \
243 do {	(__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
244 	dma_wmb(); \
245 	(__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
246 } while(0)
247 #define hme_write_txd(__hp, __txd, __flags, __addr) \
248 do {	(__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
249 	dma_wmb(); \
250 	(__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
251 } while(0)
hme_read_desc32(struct happy_meal * hp,hme32 * p)252 static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
253 {
254 	return le32_to_cpup((__le32 *)p);
255 }
256 #endif
257 #endif
258 
259 
260 /* Oh yes, the MIF BitBang is mighty fun to program.  BitBucket is more like it. */
BB_PUT_BIT(struct happy_meal * hp,void __iomem * tregs,int bit)261 static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
262 {
263 	hme_write32(hp, tregs + TCVR_BBDATA, bit);
264 	hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
265 	hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
266 }
267 
268 #if 0
269 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
270 {
271 	u32 ret;
272 
273 	hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
274 	hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
275 	ret = hme_read32(hp, tregs + TCVR_CFG);
276 	if (internal)
277 		ret &= TCV_CFG_MDIO0;
278 	else
279 		ret &= TCV_CFG_MDIO1;
280 
281 	return ret;
282 }
283 #endif
284 
BB_GET_BIT2(struct happy_meal * hp,void __iomem * tregs,int internal)285 static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
286 {
287 	u32 retval;
288 
289 	hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
290 	udelay(1);
291 	retval = hme_read32(hp, tregs + TCVR_CFG);
292 	if (internal)
293 		retval &= TCV_CFG_MDIO0;
294 	else
295 		retval &= TCV_CFG_MDIO1;
296 	hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
297 
298 	return retval;
299 }
300 
301 #define TCVR_FAILURE      0x80000000     /* Impossible MIF read value */
302 
happy_meal_bb_read(struct happy_meal * hp,void __iomem * tregs,int reg)303 static int happy_meal_bb_read(struct happy_meal *hp,
304 			      void __iomem *tregs, int reg)
305 {
306 	u32 tmp;
307 	int retval = 0;
308 	int i;
309 
310 	/* Enable the MIF BitBang outputs. */
311 	hme_write32(hp, tregs + TCVR_BBOENAB, 1);
312 
313 	/* Force BitBang into the idle state. */
314 	for (i = 0; i < 32; i++)
315 		BB_PUT_BIT(hp, tregs, 1);
316 
317 	/* Give it the read sequence. */
318 	BB_PUT_BIT(hp, tregs, 0);
319 	BB_PUT_BIT(hp, tregs, 1);
320 	BB_PUT_BIT(hp, tregs, 1);
321 	BB_PUT_BIT(hp, tregs, 0);
322 
323 	/* Give it the PHY address. */
324 	tmp = hp->paddr & 0xff;
325 	for (i = 4; i >= 0; i--)
326 		BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
327 
328 	/* Tell it what register we want to read. */
329 	tmp = (reg & 0xff);
330 	for (i = 4; i >= 0; i--)
331 		BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
332 
333 	/* Close down the MIF BitBang outputs. */
334 	hme_write32(hp, tregs + TCVR_BBOENAB, 0);
335 
336 	/* Now read in the value. */
337 	(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
338 	for (i = 15; i >= 0; i--)
339 		retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
340 	(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
341 	(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
342 	(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
343 	ASD("reg=%d value=%x\n", reg, retval);
344 	return retval;
345 }
346 
happy_meal_bb_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)347 static void happy_meal_bb_write(struct happy_meal *hp,
348 				void __iomem *tregs, int reg,
349 				unsigned short value)
350 {
351 	u32 tmp;
352 	int i;
353 
354 	ASD("reg=%d value=%x\n", reg, value);
355 
356 	/* Enable the MIF BitBang outputs. */
357 	hme_write32(hp, tregs + TCVR_BBOENAB, 1);
358 
359 	/* Force BitBang into the idle state. */
360 	for (i = 0; i < 32; i++)
361 		BB_PUT_BIT(hp, tregs, 1);
362 
363 	/* Give it write sequence. */
364 	BB_PUT_BIT(hp, tregs, 0);
365 	BB_PUT_BIT(hp, tregs, 1);
366 	BB_PUT_BIT(hp, tregs, 0);
367 	BB_PUT_BIT(hp, tregs, 1);
368 
369 	/* Give it the PHY address. */
370 	tmp = (hp->paddr & 0xff);
371 	for (i = 4; i >= 0; i--)
372 		BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
373 
374 	/* Tell it what register we will be writing. */
375 	tmp = (reg & 0xff);
376 	for (i = 4; i >= 0; i--)
377 		BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
378 
379 	/* Tell it to become ready for the bits. */
380 	BB_PUT_BIT(hp, tregs, 1);
381 	BB_PUT_BIT(hp, tregs, 0);
382 
383 	for (i = 15; i >= 0; i--)
384 		BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
385 
386 	/* Close down the MIF BitBang outputs. */
387 	hme_write32(hp, tregs + TCVR_BBOENAB, 0);
388 }
389 
390 #define TCVR_READ_TRIES   16
391 
happy_meal_tcvr_read(struct happy_meal * hp,void __iomem * tregs,int reg)392 static int happy_meal_tcvr_read(struct happy_meal *hp,
393 				void __iomem *tregs, int reg)
394 {
395 	int tries = TCVR_READ_TRIES;
396 	int retval;
397 
398 	if (hp->tcvr_type == none) {
399 		ASD("no transceiver, value=TCVR_FAILURE\n");
400 		return TCVR_FAILURE;
401 	}
402 
403 	if (!(hp->happy_flags & HFLAG_FENABLE)) {
404 		ASD("doing bit bang\n");
405 		return happy_meal_bb_read(hp, tregs, reg);
406 	}
407 
408 	hme_write32(hp, tregs + TCVR_FRAME,
409 		    (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
410 	while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
411 		udelay(20);
412 	if (!tries) {
413 		netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
414 		return TCVR_FAILURE;
415 	}
416 	retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
417 	ASD("reg=0x%02x value=%04x\n", reg, retval);
418 	return retval;
419 }
420 
421 #define TCVR_WRITE_TRIES  16
422 
happy_meal_tcvr_write(struct happy_meal * hp,void __iomem * tregs,int reg,unsigned short value)423 static void happy_meal_tcvr_write(struct happy_meal *hp,
424 				  void __iomem *tregs, int reg,
425 				  unsigned short value)
426 {
427 	int tries = TCVR_WRITE_TRIES;
428 
429 	ASD("reg=0x%02x value=%04x\n", reg, value);
430 
431 	/* Welcome to Sun Microsystems, can I take your order please? */
432 	if (!(hp->happy_flags & HFLAG_FENABLE)) {
433 		happy_meal_bb_write(hp, tregs, reg, value);
434 		return;
435 	}
436 
437 	/* Would you like fries with that? */
438 	hme_write32(hp, tregs + TCVR_FRAME,
439 		    (FRAME_WRITE | (hp->paddr << 23) |
440 		     ((reg & 0xff) << 18) | (value & 0xffff)));
441 	while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
442 		udelay(20);
443 
444 	/* Anything else? */
445 	if (!tries)
446 		netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
447 
448 	/* Fifty-two cents is your change, have a nice day. */
449 }
450 
451 /* Auto negotiation.  The scheme is very simple.  We have a timer routine
452  * that keeps watching the auto negotiation process as it progresses.
453  * The DP83840 is first told to start doing it's thing, we set up the time
454  * and place the timer state machine in it's initial state.
455  *
456  * Here the timer peeks at the DP83840 status registers at each click to see
457  * if the auto negotiation has completed, we assume here that the DP83840 PHY
458  * will time out at some point and just tell us what (didn't) happen.  For
459  * complete coverage we only allow so many of the ticks at this level to run,
460  * when this has expired we print a warning message and try another strategy.
461  * This "other" strategy is to force the interface into various speed/duplex
462  * configurations and we stop when we see a link-up condition before the
463  * maximum number of "peek" ticks have occurred.
464  *
465  * Once a valid link status has been detected we configure the BigMAC and
466  * the rest of the Happy Meal to speak the most efficient protocol we could
467  * get a clean link for.  The priority for link configurations, highest first
468  * is:
469  *                 100 Base-T Full Duplex
470  *                 100 Base-T Half Duplex
471  *                 10 Base-T Full Duplex
472  *                 10 Base-T Half Duplex
473  *
474  * We start a new timer now, after a successful auto negotiation status has
475  * been detected.  This timer just waits for the link-up bit to get set in
476  * the BMCR of the DP83840.  When this occurs we print a kernel log message
477  * describing the link type in use and the fact that it is up.
478  *
479  * If a fatal error of some sort is signalled and detected in the interrupt
480  * service routine, and the chip is reset, or the link is ifconfig'd down
481  * and then back up, this entire process repeats itself all over again.
482  */
try_next_permutation(struct happy_meal * hp,void __iomem * tregs)483 static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
484 {
485 	hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
486 
487 	/* Downgrade from full to half duplex.  Only possible
488 	 * via ethtool.
489 	 */
490 	if (hp->sw_bmcr & BMCR_FULLDPLX) {
491 		hp->sw_bmcr &= ~(BMCR_FULLDPLX);
492 		happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
493 		return 0;
494 	}
495 
496 	/* Downgrade from 100 to 10. */
497 	if (hp->sw_bmcr & BMCR_SPEED100) {
498 		hp->sw_bmcr &= ~(BMCR_SPEED100);
499 		happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
500 		return 0;
501 	}
502 
503 	/* We've tried everything. */
504 	return -1;
505 }
506 
display_link_mode(struct happy_meal * hp,void __iomem * tregs)507 static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
508 {
509 	hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
510 
511 	netdev_info(hp->dev,
512 		    "Link is up using %s transceiver at %dMb/s, %s Duplex.\n",
513 		    hp->tcvr_type == external ? "external" : "internal",
514 		    hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10,
515 		    hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");
516 }
517 
display_forced_link_mode(struct happy_meal * hp,void __iomem * tregs)518 static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
519 {
520 	hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
521 
522 	netdev_info(hp->dev,
523 		    "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n",
524 		    hp->tcvr_type == external ? "external" : "internal",
525 		    hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10,
526 		    hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");
527 }
528 
set_happy_link_modes(struct happy_meal * hp,void __iomem * tregs)529 static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
530 {
531 	int full;
532 
533 	/* All we care about is making sure the bigmac tx_cfg has a
534 	 * proper duplex setting.
535 	 */
536 	if (hp->timer_state == arbwait) {
537 		hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
538 		if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
539 			goto no_response;
540 		if (hp->sw_lpa & LPA_100FULL)
541 			full = 1;
542 		else if (hp->sw_lpa & LPA_100HALF)
543 			full = 0;
544 		else if (hp->sw_lpa & LPA_10FULL)
545 			full = 1;
546 		else
547 			full = 0;
548 	} else {
549 		/* Forcing a link mode. */
550 		hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
551 		if (hp->sw_bmcr & BMCR_FULLDPLX)
552 			full = 1;
553 		else
554 			full = 0;
555 	}
556 
557 	/* Before changing other bits in the tx_cfg register, and in
558 	 * general any of other the TX config registers too, you
559 	 * must:
560 	 * 1) Clear Enable
561 	 * 2) Poll with reads until that bit reads back as zero
562 	 * 3) Make TX configuration changes
563 	 * 4) Set Enable once more
564 	 */
565 	hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
566 		    hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
567 		    ~(BIGMAC_TXCFG_ENABLE));
568 	while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
569 		barrier();
570 	if (full) {
571 		hp->happy_flags |= HFLAG_FULL;
572 		hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
573 			    hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
574 			    BIGMAC_TXCFG_FULLDPLX);
575 	} else {
576 		hp->happy_flags &= ~(HFLAG_FULL);
577 		hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
578 			    hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
579 			    ~(BIGMAC_TXCFG_FULLDPLX));
580 	}
581 	hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
582 		    hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
583 		    BIGMAC_TXCFG_ENABLE);
584 	return 0;
585 no_response:
586 	return 1;
587 }
588 
is_lucent_phy(struct happy_meal * hp)589 static int is_lucent_phy(struct happy_meal *hp)
590 {
591 	void __iomem *tregs = hp->tcvregs;
592 	unsigned short mr2, mr3;
593 	int ret = 0;
594 
595 	mr2 = happy_meal_tcvr_read(hp, tregs, 2);
596 	mr3 = happy_meal_tcvr_read(hp, tregs, 3);
597 	if ((mr2 & 0xffff) == 0x0180 &&
598 	    ((mr3 & 0xffff) >> 10) == 0x1d)
599 		ret = 1;
600 
601 	return ret;
602 }
603 
604 /* hp->happy_lock must be held */
605 static void
happy_meal_begin_auto_negotiation(struct happy_meal * hp,void __iomem * tregs,const struct ethtool_link_ksettings * ep)606 happy_meal_begin_auto_negotiation(struct happy_meal *hp,
607 				  void __iomem *tregs,
608 				  const struct ethtool_link_ksettings *ep)
609 {
610 	int timeout;
611 
612 	/* Read all of the registers we are interested in now. */
613 	hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
614 	hp->sw_bmcr      = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
615 	hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
616 	hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
617 
618 	/* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
619 
620 	hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
621 	if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
622 		/* Advertise everything we can support. */
623 		if (hp->sw_bmsr & BMSR_10HALF)
624 			hp->sw_advertise |= (ADVERTISE_10HALF);
625 		else
626 			hp->sw_advertise &= ~(ADVERTISE_10HALF);
627 
628 		if (hp->sw_bmsr & BMSR_10FULL)
629 			hp->sw_advertise |= (ADVERTISE_10FULL);
630 		else
631 			hp->sw_advertise &= ~(ADVERTISE_10FULL);
632 		if (hp->sw_bmsr & BMSR_100HALF)
633 			hp->sw_advertise |= (ADVERTISE_100HALF);
634 		else
635 			hp->sw_advertise &= ~(ADVERTISE_100HALF);
636 		if (hp->sw_bmsr & BMSR_100FULL)
637 			hp->sw_advertise |= (ADVERTISE_100FULL);
638 		else
639 			hp->sw_advertise &= ~(ADVERTISE_100FULL);
640 		happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
641 
642 		/* XXX Currently no Happy Meal cards I know off support 100BaseT4,
643 		 * XXX and this is because the DP83840 does not support it, changes
644 		 * XXX would need to be made to the tx/rx logic in the driver as well
645 		 * XXX so I completely skip checking for it in the BMSR for now.
646 		 */
647 
648 		ASD("Advertising [ %s%s%s%s]\n",
649 		    hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
650 		    hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
651 		    hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
652 		    hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
653 
654 		/* Enable Auto-Negotiation, this is usually on already... */
655 		hp->sw_bmcr |= BMCR_ANENABLE;
656 		happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
657 
658 		/* Restart it to make sure it is going. */
659 		hp->sw_bmcr |= BMCR_ANRESTART;
660 		happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
661 
662 		/* BMCR_ANRESTART self clears when the process has begun. */
663 
664 		timeout = 64;  /* More than enough. */
665 		while (--timeout) {
666 			hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
667 			if (!(hp->sw_bmcr & BMCR_ANRESTART))
668 				break; /* got it. */
669 			udelay(10);
670 		}
671 		if (!timeout) {
672 			netdev_err(hp->dev,
673 				   "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
674 				   hp->sw_bmcr);
675 			netdev_notice(hp->dev,
676 				      "Performing force link detection.\n");
677 			goto force_link;
678 		} else {
679 			hp->timer_state = arbwait;
680 		}
681 	} else {
682 force_link:
683 		/* Force the link up, trying first a particular mode.
684 		 * Either we are here at the request of ethtool or
685 		 * because the Happy Meal would not start to autoneg.
686 		 */
687 
688 		/* Disable auto-negotiation in BMCR, enable the duplex and
689 		 * speed setting, init the timer state machine, and fire it off.
690 		 */
691 		if (!ep || ep->base.autoneg == AUTONEG_ENABLE) {
692 			hp->sw_bmcr = BMCR_SPEED100;
693 		} else {
694 			if (ep->base.speed == SPEED_100)
695 				hp->sw_bmcr = BMCR_SPEED100;
696 			else
697 				hp->sw_bmcr = 0;
698 			if (ep->base.duplex == DUPLEX_FULL)
699 				hp->sw_bmcr |= BMCR_FULLDPLX;
700 		}
701 		happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
702 
703 		if (!is_lucent_phy(hp)) {
704 			/* OK, seems we need do disable the transceiver for the first
705 			 * tick to make sure we get an accurate link state at the
706 			 * second tick.
707 			 */
708 			hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
709 							       DP83840_CSCONFIG);
710 			hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
711 			happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
712 					      hp->sw_csconfig);
713 		}
714 		hp->timer_state = ltrywait;
715 	}
716 
717 	hp->timer_ticks = 0;
718 	hp->happy_timer.expires = jiffies + (12 * HZ)/10;  /* 1.2 sec. */
719 	add_timer(&hp->happy_timer);
720 }
721 
happy_meal_timer(struct timer_list * t)722 static void happy_meal_timer(struct timer_list *t)
723 {
724 	struct happy_meal *hp = from_timer(hp, t, happy_timer);
725 	void __iomem *tregs = hp->tcvregs;
726 	int restart_timer = 0;
727 
728 	spin_lock_irq(&hp->happy_lock);
729 
730 	hp->timer_ticks++;
731 	switch(hp->timer_state) {
732 	case arbwait:
733 		/* Only allow for 5 ticks, thats 10 seconds and much too
734 		 * long to wait for arbitration to complete.
735 		 */
736 		if (hp->timer_ticks >= 10) {
737 			/* Enter force mode. */
738 	do_force_mode:
739 			hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
740 			netdev_notice(hp->dev,
741 				      "Auto-Negotiation unsuccessful, trying force link mode\n");
742 			hp->sw_bmcr = BMCR_SPEED100;
743 			happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
744 
745 			if (!is_lucent_phy(hp)) {
746 				/* OK, seems we need do disable the transceiver for the first
747 				 * tick to make sure we get an accurate link state at the
748 				 * second tick.
749 				 */
750 				hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
751 				hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
752 				happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
753 			}
754 			hp->timer_state = ltrywait;
755 			hp->timer_ticks = 0;
756 			restart_timer = 1;
757 		} else {
758 			/* Anything interesting happen? */
759 			hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
760 			if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
761 				int ret;
762 
763 				/* Just what we've been waiting for... */
764 				ret = set_happy_link_modes(hp, tregs);
765 				if (ret) {
766 					/* Ooops, something bad happened, go to force
767 					 * mode.
768 					 *
769 					 * XXX Broken hubs which don't support 802.3u
770 					 * XXX auto-negotiation make this happen as well.
771 					 */
772 					goto do_force_mode;
773 				}
774 
775 				/* Success, at least so far, advance our state engine. */
776 				hp->timer_state = lupwait;
777 				restart_timer = 1;
778 			} else {
779 				restart_timer = 1;
780 			}
781 		}
782 		break;
783 
784 	case lupwait:
785 		/* Auto negotiation was successful and we are awaiting a
786 		 * link up status.  I have decided to let this timer run
787 		 * forever until some sort of error is signalled, reporting
788 		 * a message to the user at 10 second intervals.
789 		 */
790 		hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
791 		if (hp->sw_bmsr & BMSR_LSTATUS) {
792 			/* Wheee, it's up, display the link mode in use and put
793 			 * the timer to sleep.
794 			 */
795 			display_link_mode(hp, tregs);
796 			hp->timer_state = asleep;
797 			restart_timer = 0;
798 		} else {
799 			if (hp->timer_ticks >= 10) {
800 				netdev_notice(hp->dev,
801 					      "Auto negotiation successful, link still not completely up.\n");
802 				hp->timer_ticks = 0;
803 				restart_timer = 1;
804 			} else {
805 				restart_timer = 1;
806 			}
807 		}
808 		break;
809 
810 	case ltrywait:
811 		/* Making the timeout here too long can make it take
812 		 * annoyingly long to attempt all of the link mode
813 		 * permutations, but then again this is essentially
814 		 * error recovery code for the most part.
815 		 */
816 		hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
817 		hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
818 		if (hp->timer_ticks == 1) {
819 			if (!is_lucent_phy(hp)) {
820 				/* Re-enable transceiver, we'll re-enable the transceiver next
821 				 * tick, then check link state on the following tick.
822 				 */
823 				hp->sw_csconfig |= CSCONFIG_TCVDISAB;
824 				happy_meal_tcvr_write(hp, tregs,
825 						      DP83840_CSCONFIG, hp->sw_csconfig);
826 			}
827 			restart_timer = 1;
828 			break;
829 		}
830 		if (hp->timer_ticks == 2) {
831 			if (!is_lucent_phy(hp)) {
832 				hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
833 				happy_meal_tcvr_write(hp, tregs,
834 						      DP83840_CSCONFIG, hp->sw_csconfig);
835 			}
836 			restart_timer = 1;
837 			break;
838 		}
839 		if (hp->sw_bmsr & BMSR_LSTATUS) {
840 			/* Force mode selection success. */
841 			display_forced_link_mode(hp, tregs);
842 			set_happy_link_modes(hp, tregs); /* XXX error? then what? */
843 			hp->timer_state = asleep;
844 			restart_timer = 0;
845 		} else {
846 			if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
847 				int ret;
848 
849 				ret = try_next_permutation(hp, tregs);
850 				if (ret == -1) {
851 					/* Aieee, tried them all, reset the
852 					 * chip and try all over again.
853 					 */
854 
855 					/* Let the user know... */
856 					netdev_notice(hp->dev,
857 						      "Link down, cable problem?\n");
858 
859 					happy_meal_begin_auto_negotiation(hp, tregs, NULL);
860 					goto out;
861 				}
862 				if (!is_lucent_phy(hp)) {
863 					hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
864 									       DP83840_CSCONFIG);
865 					hp->sw_csconfig |= CSCONFIG_TCVDISAB;
866 					happy_meal_tcvr_write(hp, tregs,
867 							      DP83840_CSCONFIG, hp->sw_csconfig);
868 				}
869 				hp->timer_ticks = 0;
870 				restart_timer = 1;
871 			} else {
872 				restart_timer = 1;
873 			}
874 		}
875 		break;
876 
877 	case asleep:
878 	default:
879 		/* Can't happens.... */
880 		netdev_err(hp->dev,
881 			   "Aieee, link timer is asleep but we got one anyways!\n");
882 		restart_timer = 0;
883 		hp->timer_ticks = 0;
884 		hp->timer_state = asleep; /* foo on you */
885 		break;
886 	}
887 
888 	if (restart_timer) {
889 		hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
890 		add_timer(&hp->happy_timer);
891 	}
892 
893 out:
894 	spin_unlock_irq(&hp->happy_lock);
895 }
896 
897 #define TX_RESET_TRIES     32
898 #define RX_RESET_TRIES     32
899 
900 /* hp->happy_lock must be held */
happy_meal_tx_reset(struct happy_meal * hp,void __iomem * bregs)901 static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
902 {
903 	int tries = TX_RESET_TRIES;
904 
905 	HMD("reset...\n");
906 
907 	/* Would you like to try our SMCC Delux? */
908 	hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
909 	while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
910 		udelay(20);
911 
912 	/* Lettuce, tomato, buggy hardware (no extra charge)? */
913 	if (!tries)
914 		netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
915 
916 	/* Take care. */
917 	HMD("done\n");
918 }
919 
920 /* hp->happy_lock must be held */
happy_meal_rx_reset(struct happy_meal * hp,void __iomem * bregs)921 static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
922 {
923 	int tries = RX_RESET_TRIES;
924 
925 	HMD("reset...\n");
926 
927 	/* We have a special on GNU/Viking hardware bugs today. */
928 	hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
929 	while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
930 		udelay(20);
931 
932 	/* Will that be all? */
933 	if (!tries)
934 		netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
935 
936 	/* Don't forget your vik_1137125_wa.  Have a nice day. */
937 	HMD("done\n");
938 }
939 
940 #define STOP_TRIES         16
941 
942 /* hp->happy_lock must be held */
happy_meal_stop(struct happy_meal * hp,void __iomem * gregs)943 static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
944 {
945 	int tries = STOP_TRIES;
946 
947 	HMD("reset...\n");
948 
949 	/* We're consolidating our STB products, it's your lucky day. */
950 	hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
951 	while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
952 		udelay(20);
953 
954 	/* Come back next week when we are "Sun Microelectronics". */
955 	if (!tries)
956 		netdev_err(hp->dev, "Fry guys.\n");
957 
958 	/* Remember: "Different name, same old buggy as shit hardware." */
959 	HMD("done\n");
960 }
961 
962 /* hp->happy_lock must be held */
happy_meal_get_counters(struct happy_meal * hp,void __iomem * bregs)963 static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
964 {
965 	struct net_device_stats *stats = &hp->dev->stats;
966 
967 	stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
968 	hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
969 
970 	stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
971 	hme_write32(hp, bregs + BMAC_UNALECTR, 0);
972 
973 	stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
974 	hme_write32(hp, bregs + BMAC_GLECTR, 0);
975 
976 	stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
977 
978 	stats->collisions +=
979 		(hme_read32(hp, bregs + BMAC_EXCTR) +
980 		 hme_read32(hp, bregs + BMAC_LTCTR));
981 	hme_write32(hp, bregs + BMAC_EXCTR, 0);
982 	hme_write32(hp, bregs + BMAC_LTCTR, 0);
983 }
984 
985 /* Only Sun can take such nice parts and fuck up the programming interface
986  * like this.  Good job guys...
987  */
988 #define TCVR_RESET_TRIES       16 /* It should reset quickly        */
989 #define TCVR_UNISOLATE_TRIES   32 /* Dis-isolation can take longer. */
990 
991 /* hp->happy_lock must be held */
happy_meal_tcvr_reset(struct happy_meal * hp,void __iomem * tregs)992 static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
993 {
994 	u32 tconfig;
995 	int result, tries = TCVR_RESET_TRIES;
996 
997 	tconfig = hme_read32(hp, tregs + TCVR_CFG);
998 	ASD("tcfg=%08x\n", tconfig);
999 	if (hp->tcvr_type == external) {
1000 		hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
1001 		hp->tcvr_type = internal;
1002 		hp->paddr = TCV_PADDR_ITX;
1003 		happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1004 				      (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1005 		result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1006 		if (result == TCVR_FAILURE) {
1007 			ASD("phyread_fail\n");
1008 			return -1;
1009 		}
1010 		ASD("external: ISOLATE, phyread_ok, PSELECT\n");
1011 		hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1012 		hp->tcvr_type = external;
1013 		hp->paddr = TCV_PADDR_ETX;
1014 	} else {
1015 		if (tconfig & TCV_CFG_MDIO1) {
1016 			hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
1017 			happy_meal_tcvr_write(hp, tregs, MII_BMCR,
1018 					      (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
1019 			result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1020 			if (result == TCVR_FAILURE) {
1021 				ASD("phyread_fail>\n");
1022 				return -1;
1023 			}
1024 			ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
1025 			hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
1026 			hp->tcvr_type = internal;
1027 			hp->paddr = TCV_PADDR_ITX;
1028 		}
1029 	}
1030 
1031 	ASD("BMCR_RESET...\n");
1032 	happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
1033 
1034 	while (--tries) {
1035 		result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1036 		if (result == TCVR_FAILURE)
1037 			return -1;
1038 		hp->sw_bmcr = result;
1039 		if (!(result & BMCR_RESET))
1040 			break;
1041 		udelay(20);
1042 	}
1043 	if (!tries) {
1044 		ASD("BMCR RESET FAILED!\n");
1045 		return -1;
1046 	}
1047 	ASD("RESET_OK\n");
1048 
1049 	/* Get fresh copies of the PHY registers. */
1050 	hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1051 	hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
1052 	hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
1053 	hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1054 
1055 	ASD("UNISOLATE...\n");
1056 	hp->sw_bmcr &= ~(BMCR_ISOLATE);
1057 	happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
1058 
1059 	tries = TCVR_UNISOLATE_TRIES;
1060 	while (--tries) {
1061 		result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
1062 		if (result == TCVR_FAILURE)
1063 			return -1;
1064 		if (!(result & BMCR_ISOLATE))
1065 			break;
1066 		udelay(20);
1067 	}
1068 	if (!tries) {
1069 		ASD("UNISOLATE FAILED!\n");
1070 		return -1;
1071 	}
1072 	ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
1073 	if (!is_lucent_phy(hp)) {
1074 		result = happy_meal_tcvr_read(hp, tregs,
1075 					      DP83840_CSCONFIG);
1076 		happy_meal_tcvr_write(hp, tregs,
1077 				      DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
1078 	}
1079 	return 0;
1080 }
1081 
1082 /* Figure out whether we have an internal or external transceiver.
1083  *
1084  * hp->happy_lock must be held
1085  */
happy_meal_transceiver_check(struct happy_meal * hp,void __iomem * tregs)1086 static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
1087 {
1088 	unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
1089 	u32 reread = hme_read32(hp, tregs + TCVR_CFG);
1090 
1091 	ASD("tcfg=%08lx\n", tconfig);
1092 	if (reread & TCV_CFG_MDIO1) {
1093 		hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
1094 		hp->paddr = TCV_PADDR_ETX;
1095 		hp->tcvr_type = external;
1096 		ASD("not polling, external\n");
1097 	} else {
1098 		if (reread & TCV_CFG_MDIO0) {
1099 			hme_write32(hp, tregs + TCVR_CFG,
1100 				    tconfig & ~(TCV_CFG_PSELECT));
1101 			hp->paddr = TCV_PADDR_ITX;
1102 			hp->tcvr_type = internal;
1103 			ASD("not polling, internal\n");
1104 		} else {
1105 			netdev_err(hp->dev,
1106 				   "Transceiver and a coke please.");
1107 			hp->tcvr_type = none; /* Grrr... */
1108 			ASD("not polling, none\n");
1109 		}
1110 	}
1111 }
1112 
1113 /* The receive ring buffers are a bit tricky to get right.  Here goes...
1114  *
1115  * The buffers we dma into must be 64 byte aligned.  So we use a special
1116  * alloc_skb() routine for the happy meal to allocate 64 bytes more than
1117  * we really need.
1118  *
1119  * We use skb_reserve() to align the data block we get in the skb.  We
1120  * also program the etxregs->cfg register to use an offset of 2.  This
1121  * imperical constant plus the ethernet header size will always leave
1122  * us with a nicely aligned ip header once we pass things up to the
1123  * protocol layers.
1124  *
1125  * The numbers work out to:
1126  *
1127  *         Max ethernet frame size         1518
1128  *         Ethernet header size              14
1129  *         Happy Meal base offset             2
1130  *
1131  * Say a skb data area is at 0xf001b010, and its size alloced is
1132  * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
1133  *
1134  * First our alloc_skb() routine aligns the data base to a 64 byte
1135  * boundary.  We now have 0xf001b040 as our skb data address.  We
1136  * plug this into the receive descriptor address.
1137  *
1138  * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
1139  * So now the data we will end up looking at starts at 0xf001b042.  When
1140  * the packet arrives, we will check out the size received and subtract
1141  * this from the skb->length.  Then we just pass the packet up to the
1142  * protocols as is, and allocate a new skb to replace this slot we have
1143  * just received from.
1144  *
1145  * The ethernet layer will strip the ether header from the front of the
1146  * skb we just sent to it, this leaves us with the ip header sitting
1147  * nicely aligned at 0xf001b050.  Also, for tcp and udp packets the
1148  * Happy Meal has even checksummed the tcp/udp data for us.  The 16
1149  * bit checksum is obtained from the low bits of the receive descriptor
1150  * flags, thus:
1151  *
1152  * 	skb->csum = rxd->rx_flags & 0xffff;
1153  * 	skb->ip_summed = CHECKSUM_COMPLETE;
1154  *
1155  * before sending off the skb to the protocols, and we are good as gold.
1156  */
happy_meal_clean_rings(struct happy_meal * hp)1157 static void happy_meal_clean_rings(struct happy_meal *hp)
1158 {
1159 	int i;
1160 
1161 	for (i = 0; i < RX_RING_SIZE; i++) {
1162 		if (hp->rx_skbs[i] != NULL) {
1163 			struct sk_buff *skb = hp->rx_skbs[i];
1164 			struct happy_meal_rxd *rxd;
1165 			u32 dma_addr;
1166 
1167 			rxd = &hp->happy_block->happy_meal_rxd[i];
1168 			dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
1169 			dma_unmap_single(hp->dma_dev, dma_addr,
1170 					 RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1171 			dev_kfree_skb_any(skb);
1172 			hp->rx_skbs[i] = NULL;
1173 		}
1174 	}
1175 
1176 	for (i = 0; i < TX_RING_SIZE; i++) {
1177 		if (hp->tx_skbs[i] != NULL) {
1178 			struct sk_buff *skb = hp->tx_skbs[i];
1179 			struct happy_meal_txd *txd;
1180 			u32 dma_addr;
1181 			int frag;
1182 
1183 			hp->tx_skbs[i] = NULL;
1184 
1185 			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1186 				txd = &hp->happy_block->happy_meal_txd[i];
1187 				dma_addr = hme_read_desc32(hp, &txd->tx_addr);
1188 				if (!frag)
1189 					dma_unmap_single(hp->dma_dev, dma_addr,
1190 							 (hme_read_desc32(hp, &txd->tx_flags)
1191 							  & TXFLAG_SIZE),
1192 							 DMA_TO_DEVICE);
1193 				else
1194 					dma_unmap_page(hp->dma_dev, dma_addr,
1195 							 (hme_read_desc32(hp, &txd->tx_flags)
1196 							  & TXFLAG_SIZE),
1197 							 DMA_TO_DEVICE);
1198 
1199 				if (frag != skb_shinfo(skb)->nr_frags)
1200 					i++;
1201 			}
1202 
1203 			dev_kfree_skb_any(skb);
1204 		}
1205 	}
1206 }
1207 
1208 /* hp->happy_lock must be held */
happy_meal_init_rings(struct happy_meal * hp)1209 static void happy_meal_init_rings(struct happy_meal *hp)
1210 {
1211 	struct hmeal_init_block *hb = hp->happy_block;
1212 	int i;
1213 
1214 	HMD("counters to zero\n");
1215 	hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
1216 
1217 	/* Free any skippy bufs left around in the rings. */
1218 	happy_meal_clean_rings(hp);
1219 
1220 	/* Now get new skippy bufs for the receive ring. */
1221 	HMD("init rxring\n");
1222 	for (i = 0; i < RX_RING_SIZE; i++) {
1223 		struct sk_buff *skb;
1224 		u32 mapping;
1225 
1226 		skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1227 		if (!skb) {
1228 			hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1229 			continue;
1230 		}
1231 		hp->rx_skbs[i] = skb;
1232 
1233 		/* Because we reserve afterwards. */
1234 		skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1235 		mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1236 					 DMA_FROM_DEVICE);
1237 		if (dma_mapping_error(hp->dma_dev, mapping)) {
1238 			dev_kfree_skb_any(skb);
1239 			hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1240 			continue;
1241 		}
1242 		hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1243 			      (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1244 			      mapping);
1245 		skb_reserve(skb, RX_OFFSET);
1246 	}
1247 
1248 	HMD("init txring\n");
1249 	for (i = 0; i < TX_RING_SIZE; i++)
1250 		hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
1251 
1252 	HMD("done\n");
1253 }
1254 
1255 /* hp->happy_lock must be held */
happy_meal_init(struct happy_meal * hp)1256 static int happy_meal_init(struct happy_meal *hp)
1257 {
1258 	const unsigned char *e = &hp->dev->dev_addr[0];
1259 	void __iomem *gregs        = hp->gregs;
1260 	void __iomem *etxregs      = hp->etxregs;
1261 	void __iomem *erxregs      = hp->erxregs;
1262 	void __iomem *bregs        = hp->bigmacregs;
1263 	void __iomem *tregs        = hp->tcvregs;
1264 	const char *bursts = "64";
1265 	u32 regtmp, rxcfg;
1266 
1267 	/* If auto-negotiation timer is running, kill it. */
1268 	del_timer(&hp->happy_timer);
1269 
1270 	HMD("happy_flags[%08x]\n", hp->happy_flags);
1271 	if (!(hp->happy_flags & HFLAG_INIT)) {
1272 		HMD("set HFLAG_INIT\n");
1273 		hp->happy_flags |= HFLAG_INIT;
1274 		happy_meal_get_counters(hp, bregs);
1275 	}
1276 
1277 	/* Stop transmitter and receiver. */
1278 	HMD("to happy_meal_stop\n");
1279 	happy_meal_stop(hp, gregs);
1280 
1281 	/* Alloc and reset the tx/rx descriptor chains. */
1282 	HMD("to happy_meal_init_rings\n");
1283 	happy_meal_init_rings(hp);
1284 
1285 	/* See if we can enable the MIF frame on this card to speak to the DP83840. */
1286 	if (hp->happy_flags & HFLAG_FENABLE) {
1287 		HMD("use frame old[%08x]\n",
1288 		    hme_read32(hp, tregs + TCVR_CFG));
1289 		hme_write32(hp, tregs + TCVR_CFG,
1290 			    hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1291 	} else {
1292 		HMD("use bitbang old[%08x]\n",
1293 		    hme_read32(hp, tregs + TCVR_CFG));
1294 		hme_write32(hp, tregs + TCVR_CFG,
1295 			    hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1296 	}
1297 
1298 	/* Check the state of the transceiver. */
1299 	HMD("to happy_meal_transceiver_check\n");
1300 	happy_meal_transceiver_check(hp, tregs);
1301 
1302 	/* Put the Big Mac into a sane state. */
1303 	switch(hp->tcvr_type) {
1304 	case none:
1305 		/* Cannot operate if we don't know the transceiver type! */
1306 		HMD("AAIEEE no transceiver type, EAGAIN\n");
1307 		return -EAGAIN;
1308 
1309 	case internal:
1310 		/* Using the MII buffers. */
1311 		HMD("internal, using MII\n");
1312 		hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1313 		break;
1314 
1315 	case external:
1316 		/* Not using the MII, disable it. */
1317 		HMD("external, disable MII\n");
1318 		hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1319 		break;
1320 	}
1321 
1322 	if (happy_meal_tcvr_reset(hp, tregs))
1323 		return -EAGAIN;
1324 
1325 	/* Reset the Happy Meal Big Mac transceiver and the receiver. */
1326 	HMD("tx/rx reset\n");
1327 	happy_meal_tx_reset(hp, bregs);
1328 	happy_meal_rx_reset(hp, bregs);
1329 
1330 	/* Set jam size and inter-packet gaps to reasonable defaults. */
1331 	hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
1332 	hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
1333 	hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
1334 
1335 	/* Load up the MAC address and random seed. */
1336 
1337 	/* The docs recommend to use the 10LSB of our MAC here. */
1338 	hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
1339 
1340 	hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
1341 	hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
1342 	hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
1343 
1344 	if ((hp->dev->flags & IFF_ALLMULTI) ||
1345 	    (netdev_mc_count(hp->dev) > 64)) {
1346 		hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
1347 		hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
1348 		hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
1349 		hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1350 	} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1351 		u16 hash_table[4];
1352 		struct netdev_hw_addr *ha;
1353 		u32 crc;
1354 
1355 		memset(hash_table, 0, sizeof(hash_table));
1356 		netdev_for_each_mc_addr(ha, hp->dev) {
1357 			crc = ether_crc_le(6, ha->addr);
1358 			crc >>= 26;
1359 			hash_table[crc >> 4] |= 1 << (crc & 0xf);
1360 		}
1361 		hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
1362 		hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
1363 		hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
1364 		hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
1365 	} else {
1366 		hme_write32(hp, bregs + BMAC_HTABLE3, 0);
1367 		hme_write32(hp, bregs + BMAC_HTABLE2, 0);
1368 		hme_write32(hp, bregs + BMAC_HTABLE1, 0);
1369 		hme_write32(hp, bregs + BMAC_HTABLE0, 0);
1370 	}
1371 
1372 	/* Set the RX and TX ring ptrs. */
1373 	HMD("ring ptrs rxr[%08x] txr[%08x]\n",
1374 	    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
1375 	    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1376 	hme_write32(hp, erxregs + ERX_RING,
1377 		    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
1378 	hme_write32(hp, etxregs + ETX_RING,
1379 		    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
1380 
1381 	/* Parity issues in the ERX unit of some HME revisions can cause some
1382 	 * registers to not be written unless their parity is even.  Detect such
1383 	 * lost writes and simply rewrite with a low bit set (which will be ignored
1384 	 * since the rxring needs to be 2K aligned).
1385 	 */
1386 	if (hme_read32(hp, erxregs + ERX_RING) !=
1387 	    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
1388 		hme_write32(hp, erxregs + ERX_RING,
1389 			    ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
1390 			    | 0x4);
1391 
1392 	/* Set the supported burst sizes. */
1393 #ifndef CONFIG_SPARC
1394 	/* It is always PCI and can handle 64byte bursts. */
1395 	hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
1396 #else
1397 	if ((hp->happy_bursts & DMA_BURST64) &&
1398 	    ((hp->happy_flags & HFLAG_PCI) != 0
1399 #ifdef CONFIG_SBUS
1400 	     || sbus_can_burst64()
1401 #endif
1402 	     || 0)) {
1403 		u32 gcfg = GREG_CFG_BURST64;
1404 
1405 		/* I have no idea if I should set the extended
1406 		 * transfer mode bit for Cheerio, so for now I
1407 		 * do not.  -DaveM
1408 		 */
1409 #ifdef CONFIG_SBUS
1410 		if ((hp->happy_flags & HFLAG_PCI) == 0) {
1411 			struct platform_device *op = hp->happy_dev;
1412 			if (sbus_can_dma_64bit()) {
1413 				sbus_set_sbus64(&op->dev,
1414 						hp->happy_bursts);
1415 				gcfg |= GREG_CFG_64BIT;
1416 			}
1417 		}
1418 #endif
1419 
1420 		bursts = "64";
1421 		hme_write32(hp, gregs + GREG_CFG, gcfg);
1422 	} else if (hp->happy_bursts & DMA_BURST32) {
1423 		bursts = "32";
1424 		hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
1425 	} else if (hp->happy_bursts & DMA_BURST16) {
1426 		bursts = "16";
1427 		hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
1428 	} else {
1429 		bursts = "XXX";
1430 		hme_write32(hp, gregs + GREG_CFG, 0);
1431 	}
1432 #endif /* CONFIG_SPARC */
1433 
1434 	HMD("old[%08x] bursts<%s>\n",
1435 	    hme_read32(hp, gregs + GREG_CFG), bursts);
1436 
1437 	/* Turn off interrupts we do not want to hear. */
1438 	hme_write32(hp, gregs + GREG_IMASK,
1439 		    (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
1440 		     GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
1441 
1442 	/* Set the transmit ring buffer size. */
1443 	HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
1444 	    hme_read32(hp, etxregs + ETX_RSIZE));
1445 	hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
1446 
1447 	/* Enable transmitter DVMA. */
1448 	HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
1449 	hme_write32(hp, etxregs + ETX_CFG,
1450 		    hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
1451 
1452 	/* This chip really rots, for the receiver sometimes when you
1453 	 * write to its control registers not all the bits get there
1454 	 * properly.  I cannot think of a sane way to provide complete
1455 	 * coverage for this hardware bug yet.
1456 	 */
1457 	HMD("erx regs bug old[%08x]\n",
1458 	    hme_read32(hp, erxregs + ERX_CFG));
1459 	hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1460 	regtmp = hme_read32(hp, erxregs + ERX_CFG);
1461 	hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
1462 	if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
1463 		netdev_err(hp->dev,
1464 			   "Eieee, rx config register gets greasy fries.\n");
1465 		netdev_err(hp->dev,
1466 			   "Trying to set %08x, reread gives %08x\n",
1467 			   ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
1468 		/* XXX Should return failure here... */
1469 	}
1470 
1471 	/* Enable Big Mac hash table filter. */
1472 	HMD("enable hash rx_cfg_old[%08x]\n",
1473 	    hme_read32(hp, bregs + BMAC_RXCFG));
1474 	rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
1475 	if (hp->dev->flags & IFF_PROMISC)
1476 		rxcfg |= BIGMAC_RXCFG_PMISC;
1477 	hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
1478 
1479 	/* Let the bits settle in the chip. */
1480 	udelay(10);
1481 
1482 	/* Ok, configure the Big Mac transmitter. */
1483 	HMD("BIGMAC init\n");
1484 	regtmp = 0;
1485 	if (hp->happy_flags & HFLAG_FULL)
1486 		regtmp |= BIGMAC_TXCFG_FULLDPLX;
1487 
1488 	/* Don't turn on the "don't give up" bit for now.  It could cause hme
1489 	 * to deadlock with the PHY if a Jabber occurs.
1490 	 */
1491 	hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
1492 
1493 	/* Give up after 16 TX attempts. */
1494 	hme_write32(hp, bregs + BMAC_ALIMIT, 16);
1495 
1496 	/* Enable the output drivers no matter what. */
1497 	regtmp = BIGMAC_XCFG_ODENABLE;
1498 
1499 	/* If card can do lance mode, enable it. */
1500 	if (hp->happy_flags & HFLAG_LANCE)
1501 		regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
1502 
1503 	/* Disable the MII buffers if using external transceiver. */
1504 	if (hp->tcvr_type == external)
1505 		regtmp |= BIGMAC_XCFG_MIIDISAB;
1506 
1507 	HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
1508 	hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
1509 
1510 	/* Start things up. */
1511 	HMD("tx old[%08x] and rx [%08x] ON!\n",
1512 	    hme_read32(hp, bregs + BMAC_TXCFG),
1513 	    hme_read32(hp, bregs + BMAC_RXCFG));
1514 
1515 	/* Set larger TX/RX size to allow for 802.1q */
1516 	hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
1517 	hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
1518 
1519 	hme_write32(hp, bregs + BMAC_TXCFG,
1520 		    hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
1521 	hme_write32(hp, bregs + BMAC_RXCFG,
1522 		    hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
1523 
1524 	/* Get the autonegotiation started, and the watch timer ticking. */
1525 	happy_meal_begin_auto_negotiation(hp, tregs, NULL);
1526 
1527 	/* Success. */
1528 	return 0;
1529 }
1530 
1531 /* hp->happy_lock must be held */
happy_meal_set_initial_advertisement(struct happy_meal * hp)1532 static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1533 {
1534 	void __iomem *tregs	= hp->tcvregs;
1535 	void __iomem *bregs	= hp->bigmacregs;
1536 	void __iomem *gregs	= hp->gregs;
1537 
1538 	happy_meal_stop(hp, gregs);
1539 	if (hp->happy_flags & HFLAG_FENABLE)
1540 		hme_write32(hp, tregs + TCVR_CFG,
1541 			    hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
1542 	else
1543 		hme_write32(hp, tregs + TCVR_CFG,
1544 			    hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
1545 	happy_meal_transceiver_check(hp, tregs);
1546 	switch(hp->tcvr_type) {
1547 	case none:
1548 		return;
1549 	case internal:
1550 		hme_write32(hp, bregs + BMAC_XIFCFG, 0);
1551 		break;
1552 	case external:
1553 		hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1554 		break;
1555 	}
1556 	if (happy_meal_tcvr_reset(hp, tregs))
1557 		return;
1558 
1559 	/* Latch PHY registers as of now. */
1560 	hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
1561 	hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
1562 
1563 	/* Advertise everything we can support. */
1564 	if (hp->sw_bmsr & BMSR_10HALF)
1565 		hp->sw_advertise |= (ADVERTISE_10HALF);
1566 	else
1567 		hp->sw_advertise &= ~(ADVERTISE_10HALF);
1568 
1569 	if (hp->sw_bmsr & BMSR_10FULL)
1570 		hp->sw_advertise |= (ADVERTISE_10FULL);
1571 	else
1572 		hp->sw_advertise &= ~(ADVERTISE_10FULL);
1573 	if (hp->sw_bmsr & BMSR_100HALF)
1574 		hp->sw_advertise |= (ADVERTISE_100HALF);
1575 	else
1576 		hp->sw_advertise &= ~(ADVERTISE_100HALF);
1577 	if (hp->sw_bmsr & BMSR_100FULL)
1578 		hp->sw_advertise |= (ADVERTISE_100FULL);
1579 	else
1580 		hp->sw_advertise &= ~(ADVERTISE_100FULL);
1581 
1582 	/* Update the PHY advertisement register. */
1583 	happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
1584 }
1585 
1586 /* Once status is latched (by happy_meal_interrupt) it is cleared by
1587  * the hardware, so we cannot re-read it and get a correct value.
1588  *
1589  * hp->happy_lock must be held
1590  */
happy_meal_is_not_so_happy(struct happy_meal * hp,u32 status)1591 static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
1592 {
1593 	int reset = 0;
1594 
1595 	/* Only print messages for non-counter related interrupts. */
1596 	if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
1597 		      GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
1598 		      GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
1599 		      GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
1600 		      GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
1601 		      GREG_STAT_SLVPERR))
1602 		netdev_err(hp->dev,
1603 			   "Error interrupt for happy meal, status = %08x\n",
1604 			   status);
1605 
1606 	if (status & GREG_STAT_RFIFOVF) {
1607 		/* Receive FIFO overflow is harmless and the hardware will take
1608 		   care of it, just some packets are lost. Who cares. */
1609 		netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
1610 	}
1611 
1612 	if (status & GREG_STAT_STSTERR) {
1613 		/* BigMAC SQE link test failed. */
1614 		netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
1615 		reset = 1;
1616 	}
1617 
1618 	if (status & GREG_STAT_TFIFO_UND) {
1619 		/* Transmit FIFO underrun, again DMA error likely. */
1620 		netdev_err(hp->dev,
1621 			   "Happy Meal transmitter FIFO underrun, DMA error.\n");
1622 		reset = 1;
1623 	}
1624 
1625 	if (status & GREG_STAT_MAXPKTERR) {
1626 		/* Driver error, tried to transmit something larger
1627 		 * than ethernet max mtu.
1628 		 */
1629 		netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
1630 		reset = 1;
1631 	}
1632 
1633 	if (status & GREG_STAT_NORXD) {
1634 		/* This is harmless, it just means the system is
1635 		 * quite loaded and the incoming packet rate was
1636 		 * faster than the interrupt handler could keep up
1637 		 * with.
1638 		 */
1639 		netdev_info(hp->dev,
1640 			    "Happy Meal out of receive descriptors, packet dropped.\n");
1641 	}
1642 
1643 	if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
1644 		/* All sorts of DMA receive errors. */
1645 		netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
1646 			   status & GREG_STAT_RXERR ? "GenericError " : "",
1647 			   status & GREG_STAT_RXPERR ? "ParityError " : "",
1648 			   status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
1649 		reset = 1;
1650 	}
1651 
1652 	if (status & GREG_STAT_EOPERR) {
1653 		/* Driver bug, didn't set EOP bit in tx descriptor given
1654 		 * to the happy meal.
1655 		 */
1656 		netdev_err(hp->dev,
1657 			   "EOP not set in happy meal transmit descriptor!\n");
1658 		reset = 1;
1659 	}
1660 
1661 	if (status & GREG_STAT_MIFIRQ) {
1662 		/* MIF signalled an interrupt, were we polling it? */
1663 		netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
1664 	}
1665 
1666 	if (status &
1667 	    (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
1668 		/* All sorts of transmit DMA errors. */
1669 		netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
1670 			   status & GREG_STAT_TXEACK ? "GenericError " : "",
1671 			   status & GREG_STAT_TXLERR ? "LateError " : "",
1672 			   status & GREG_STAT_TXPERR ? "ParityError " : "",
1673 			   status & GREG_STAT_TXTERR ? "TagBotch " : "");
1674 		reset = 1;
1675 	}
1676 
1677 	if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
1678 		/* Bus or parity error when cpu accessed happy meal registers
1679 		 * or it's internal FIFO's.  Should never see this.
1680 		 */
1681 		netdev_err(hp->dev,
1682 			   "Happy Meal register access SBUS slave (%s) error.\n",
1683 			   (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
1684 		reset = 1;
1685 	}
1686 
1687 	if (reset) {
1688 		netdev_notice(hp->dev, "Resetting...\n");
1689 		happy_meal_init(hp);
1690 		return 1;
1691 	}
1692 	return 0;
1693 }
1694 
1695 /* hp->happy_lock must be held */
happy_meal_tx(struct happy_meal * hp)1696 static void happy_meal_tx(struct happy_meal *hp)
1697 {
1698 	struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1699 	struct happy_meal_txd *this;
1700 	struct net_device *dev = hp->dev;
1701 	int elem;
1702 
1703 	elem = hp->tx_old;
1704 	while (elem != hp->tx_new) {
1705 		struct sk_buff *skb;
1706 		u32 flags, dma_addr, dma_len;
1707 		int frag;
1708 
1709 		netdev_vdbg(hp->dev, "TX[%d]\n", elem);
1710 		this = &txbase[elem];
1711 		flags = hme_read_desc32(hp, &this->tx_flags);
1712 		if (flags & TXFLAG_OWN)
1713 			break;
1714 		skb = hp->tx_skbs[elem];
1715 		if (skb_shinfo(skb)->nr_frags) {
1716 			int last;
1717 
1718 			last = elem + skb_shinfo(skb)->nr_frags;
1719 			last &= (TX_RING_SIZE - 1);
1720 			flags = hme_read_desc32(hp, &txbase[last].tx_flags);
1721 			if (flags & TXFLAG_OWN)
1722 				break;
1723 		}
1724 		hp->tx_skbs[elem] = NULL;
1725 		dev->stats.tx_bytes += skb->len;
1726 
1727 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1728 			dma_addr = hme_read_desc32(hp, &this->tx_addr);
1729 			dma_len = hme_read_desc32(hp, &this->tx_flags);
1730 
1731 			dma_len &= TXFLAG_SIZE;
1732 			if (!frag)
1733 				dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1734 			else
1735 				dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
1736 
1737 			elem = NEXT_TX(elem);
1738 			this = &txbase[elem];
1739 		}
1740 
1741 		dev_consume_skb_irq(skb);
1742 		dev->stats.tx_packets++;
1743 	}
1744 	hp->tx_old = elem;
1745 
1746 	if (netif_queue_stopped(dev) &&
1747 	    TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
1748 		netif_wake_queue(dev);
1749 }
1750 
1751 /* Originally I used to handle the allocation failure by just giving back just
1752  * that one ring buffer to the happy meal.  Problem is that usually when that
1753  * condition is triggered, the happy meal expects you to do something reasonable
1754  * with all of the packets it has DMA'd in.  So now I just drop the entire
1755  * ring when we cannot get a new skb and give them all back to the happy meal,
1756  * maybe things will be "happier" now.
1757  *
1758  * hp->happy_lock must be held
1759  */
happy_meal_rx(struct happy_meal * hp,struct net_device * dev)1760 static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
1761 {
1762 	struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
1763 	struct happy_meal_rxd *this;
1764 	int elem = hp->rx_new, drops = 0;
1765 	u32 flags;
1766 
1767 	this = &rxbase[elem];
1768 	while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
1769 		struct sk_buff *skb;
1770 		int len = flags >> 16;
1771 		u16 csum = flags & RXFLAG_CSUM;
1772 		u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
1773 
1774 		/* Check for errors. */
1775 		if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
1776 			netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
1777 			dev->stats.rx_errors++;
1778 			if (len < ETH_ZLEN)
1779 				dev->stats.rx_length_errors++;
1780 			if (len & (RXFLAG_OVERFLOW >> 16)) {
1781 				dev->stats.rx_over_errors++;
1782 				dev->stats.rx_fifo_errors++;
1783 			}
1784 
1785 			/* Return it to the Happy meal. */
1786 	drop_it:
1787 			dev->stats.rx_dropped++;
1788 			hme_write_rxd(hp, this,
1789 				      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1790 				      dma_addr);
1791 			goto next;
1792 		}
1793 		skb = hp->rx_skbs[elem];
1794 		if (len > RX_COPY_THRESHOLD) {
1795 			struct sk_buff *new_skb;
1796 			u32 mapping;
1797 
1798 			/* Now refill the entry, if we can. */
1799 			new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1800 			if (new_skb == NULL) {
1801 				drops++;
1802 				goto drop_it;
1803 			}
1804 			skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1805 			mapping = dma_map_single(hp->dma_dev, new_skb->data,
1806 						 RX_BUF_ALLOC_SIZE,
1807 						 DMA_FROM_DEVICE);
1808 			if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
1809 				dev_kfree_skb_any(new_skb);
1810 				drops++;
1811 				goto drop_it;
1812 			}
1813 
1814 			dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
1815 			hp->rx_skbs[elem] = new_skb;
1816 			hme_write_rxd(hp, this,
1817 				      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1818 				      mapping);
1819 			skb_reserve(new_skb, RX_OFFSET);
1820 
1821 			/* Trim the original skb for the netif. */
1822 			skb_trim(skb, len);
1823 		} else {
1824 			struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
1825 
1826 			if (copy_skb == NULL) {
1827 				drops++;
1828 				goto drop_it;
1829 			}
1830 
1831 			skb_reserve(copy_skb, 2);
1832 			skb_put(copy_skb, len);
1833 			dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1834 			skb_copy_from_linear_data(skb, copy_skb->data, len);
1835 			dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
1836 			/* Reuse original ring buffer. */
1837 			hme_write_rxd(hp, this,
1838 				      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
1839 				      dma_addr);
1840 
1841 			skb = copy_skb;
1842 		}
1843 
1844 		/* This card is _fucking_ hot... */
1845 		skb->csum = csum_unfold(~(__force __sum16)htons(csum));
1846 		skb->ip_summed = CHECKSUM_COMPLETE;
1847 
1848 		netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
1849 		skb->protocol = eth_type_trans(skb, dev);
1850 		netif_rx(skb);
1851 
1852 		dev->stats.rx_packets++;
1853 		dev->stats.rx_bytes += len;
1854 	next:
1855 		elem = NEXT_RX(elem);
1856 		this = &rxbase[elem];
1857 	}
1858 	hp->rx_new = elem;
1859 	if (drops)
1860 		netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
1861 }
1862 
happy_meal_interrupt(int irq,void * dev_id)1863 static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
1864 {
1865 	struct net_device *dev = dev_id;
1866 	struct happy_meal *hp  = netdev_priv(dev);
1867 	u32 happy_status       = hme_read32(hp, hp->gregs + GREG_STAT);
1868 
1869 	HMD("status=%08x\n", happy_status);
1870 	if (!happy_status)
1871 		return IRQ_NONE;
1872 
1873 	spin_lock(&hp->happy_lock);
1874 
1875 	if (happy_status & GREG_STAT_ERRORS) {
1876 		if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
1877 			goto out;
1878 	}
1879 
1880 	if (happy_status & GREG_STAT_TXALL)
1881 		happy_meal_tx(hp);
1882 
1883 	if (happy_status & GREG_STAT_RXTOHOST)
1884 		happy_meal_rx(hp, dev);
1885 
1886 	HMD("done\n");
1887 out:
1888 	spin_unlock(&hp->happy_lock);
1889 
1890 	return IRQ_HANDLED;
1891 }
1892 
happy_meal_open(struct net_device * dev)1893 static int happy_meal_open(struct net_device *dev)
1894 {
1895 	struct happy_meal *hp = netdev_priv(dev);
1896 	int res;
1897 
1898 	res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
1899 			  dev->name, dev);
1900 	if (res) {
1901 		netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
1902 		return res;
1903 	}
1904 
1905 	HMD("to happy_meal_init\n");
1906 
1907 	spin_lock_irq(&hp->happy_lock);
1908 	res = happy_meal_init(hp);
1909 	spin_unlock_irq(&hp->happy_lock);
1910 
1911 	if (res)
1912 		free_irq(hp->irq, dev);
1913 	return res;
1914 }
1915 
happy_meal_close(struct net_device * dev)1916 static int happy_meal_close(struct net_device *dev)
1917 {
1918 	struct happy_meal *hp = netdev_priv(dev);
1919 
1920 	spin_lock_irq(&hp->happy_lock);
1921 	happy_meal_stop(hp, hp->gregs);
1922 	happy_meal_clean_rings(hp);
1923 
1924 	/* If auto-negotiation timer is running, kill it. */
1925 	del_timer(&hp->happy_timer);
1926 
1927 	spin_unlock_irq(&hp->happy_lock);
1928 
1929 	free_irq(hp->irq, dev);
1930 
1931 	return 0;
1932 }
1933 
happy_meal_tx_timeout(struct net_device * dev,unsigned int txqueue)1934 static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
1935 {
1936 	struct happy_meal *hp = netdev_priv(dev);
1937 
1938 	netdev_err(dev, "transmit timed out, resetting\n");
1939 	tx_dump_log();
1940 	netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
1941 		   hme_read32(hp, hp->gregs + GREG_STAT),
1942 		   hme_read32(hp, hp->etxregs + ETX_CFG),
1943 		   hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
1944 
1945 	spin_lock_irq(&hp->happy_lock);
1946 	happy_meal_init(hp);
1947 	spin_unlock_irq(&hp->happy_lock);
1948 
1949 	netif_wake_queue(dev);
1950 }
1951 
unmap_partial_tx_skb(struct happy_meal * hp,u32 first_mapping,u32 first_len,u32 first_entry,u32 entry)1952 static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
1953 				 u32 first_len, u32 first_entry, u32 entry)
1954 {
1955 	struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
1956 
1957 	dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
1958 
1959 	first_entry = NEXT_TX(first_entry);
1960 	while (first_entry != entry) {
1961 		struct happy_meal_txd *this = &txbase[first_entry];
1962 		u32 addr, len;
1963 
1964 		addr = hme_read_desc32(hp, &this->tx_addr);
1965 		len = hme_read_desc32(hp, &this->tx_flags);
1966 		len &= TXFLAG_SIZE;
1967 		dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
1968 	}
1969 }
1970 
happy_meal_start_xmit(struct sk_buff * skb,struct net_device * dev)1971 static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
1972 					 struct net_device *dev)
1973 {
1974 	struct happy_meal *hp = netdev_priv(dev);
1975 	int entry;
1976 	u32 tx_flags;
1977 
1978 	tx_flags = TXFLAG_OWN;
1979 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1980 		const u32 csum_start_off = skb_checksum_start_offset(skb);
1981 		const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
1982 
1983 		tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
1984 			    ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
1985 			    ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
1986 	}
1987 
1988 	spin_lock_irq(&hp->happy_lock);
1989 
1990 	if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1991 		netif_stop_queue(dev);
1992 		spin_unlock_irq(&hp->happy_lock);
1993 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1994 		return NETDEV_TX_BUSY;
1995 	}
1996 
1997 	entry = hp->tx_new;
1998 	netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
1999 	hp->tx_skbs[entry] = skb;
2000 
2001 	if (skb_shinfo(skb)->nr_frags == 0) {
2002 		u32 mapping, len;
2003 
2004 		len = skb->len;
2005 		mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2006 		if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2007 			goto out_dma_error;
2008 		tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2009 		hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2010 			      (tx_flags | (len & TXFLAG_SIZE)),
2011 			      mapping);
2012 		entry = NEXT_TX(entry);
2013 	} else {
2014 		u32 first_len, first_mapping;
2015 		int frag, first_entry = entry;
2016 
2017 		/* We must give this initial chunk to the device last.
2018 		 * Otherwise we could race with the device.
2019 		 */
2020 		first_len = skb_headlen(skb);
2021 		first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2022 					       DMA_TO_DEVICE);
2023 		if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2024 			goto out_dma_error;
2025 		entry = NEXT_TX(entry);
2026 
2027 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
2028 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
2029 			u32 len, mapping, this_txflags;
2030 
2031 			len = skb_frag_size(this_frag);
2032 			mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2033 						   0, len, DMA_TO_DEVICE);
2034 			if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2035 				unmap_partial_tx_skb(hp, first_mapping, first_len,
2036 						     first_entry, entry);
2037 				goto out_dma_error;
2038 			}
2039 			this_txflags = tx_flags;
2040 			if (frag == skb_shinfo(skb)->nr_frags - 1)
2041 				this_txflags |= TXFLAG_EOP;
2042 			hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2043 				      (this_txflags | (len & TXFLAG_SIZE)),
2044 				      mapping);
2045 			entry = NEXT_TX(entry);
2046 		}
2047 		hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
2048 			      (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
2049 			      first_mapping);
2050 	}
2051 
2052 	hp->tx_new = entry;
2053 
2054 	if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
2055 		netif_stop_queue(dev);
2056 
2057 	/* Get it going. */
2058 	hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
2059 
2060 	spin_unlock_irq(&hp->happy_lock);
2061 
2062 	tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2063 	return NETDEV_TX_OK;
2064 
2065 out_dma_error:
2066 	hp->tx_skbs[hp->tx_new] = NULL;
2067 	spin_unlock_irq(&hp->happy_lock);
2068 
2069 	dev_kfree_skb_any(skb);
2070 	dev->stats.tx_dropped++;
2071 	return NETDEV_TX_OK;
2072 }
2073 
happy_meal_get_stats(struct net_device * dev)2074 static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2075 {
2076 	struct happy_meal *hp = netdev_priv(dev);
2077 
2078 	spin_lock_irq(&hp->happy_lock);
2079 	happy_meal_get_counters(hp, hp->bigmacregs);
2080 	spin_unlock_irq(&hp->happy_lock);
2081 
2082 	return &dev->stats;
2083 }
2084 
happy_meal_set_multicast(struct net_device * dev)2085 static void happy_meal_set_multicast(struct net_device *dev)
2086 {
2087 	struct happy_meal *hp = netdev_priv(dev);
2088 	void __iomem *bregs = hp->bigmacregs;
2089 	struct netdev_hw_addr *ha;
2090 	u32 crc;
2091 
2092 	spin_lock_irq(&hp->happy_lock);
2093 
2094 	if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
2095 		hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
2096 		hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
2097 		hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
2098 		hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
2099 	} else if (dev->flags & IFF_PROMISC) {
2100 		hme_write32(hp, bregs + BMAC_RXCFG,
2101 			    hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
2102 	} else {
2103 		u16 hash_table[4];
2104 
2105 		memset(hash_table, 0, sizeof(hash_table));
2106 		netdev_for_each_mc_addr(ha, dev) {
2107 			crc = ether_crc_le(6, ha->addr);
2108 			crc >>= 26;
2109 			hash_table[crc >> 4] |= 1 << (crc & 0xf);
2110 		}
2111 		hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
2112 		hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
2113 		hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
2114 		hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
2115 	}
2116 
2117 	spin_unlock_irq(&hp->happy_lock);
2118 }
2119 
2120 /* Ethtool support... */
hme_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2121 static int hme_get_link_ksettings(struct net_device *dev,
2122 				  struct ethtool_link_ksettings *cmd)
2123 {
2124 	struct happy_meal *hp = netdev_priv(dev);
2125 	u32 speed;
2126 	u32 supported;
2127 
2128 	supported =
2129 		(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2130 		 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2131 		 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
2132 
2133 	/* XXX hardcoded stuff for now */
2134 	cmd->base.port = PORT_TP; /* XXX no MII support */
2135 	cmd->base.phy_address = 0; /* XXX fixed PHYAD */
2136 
2137 	/* Record PHY settings. */
2138 	spin_lock_irq(&hp->happy_lock);
2139 	hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2140 	hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
2141 	spin_unlock_irq(&hp->happy_lock);
2142 
2143 	if (hp->sw_bmcr & BMCR_ANENABLE) {
2144 		cmd->base.autoneg = AUTONEG_ENABLE;
2145 		speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
2146 			 SPEED_100 : SPEED_10);
2147 		if (speed == SPEED_100)
2148 			cmd->base.duplex =
2149 				(hp->sw_lpa & (LPA_100FULL)) ?
2150 				DUPLEX_FULL : DUPLEX_HALF;
2151 		else
2152 			cmd->base.duplex =
2153 				(hp->sw_lpa & (LPA_10FULL)) ?
2154 				DUPLEX_FULL : DUPLEX_HALF;
2155 	} else {
2156 		cmd->base.autoneg = AUTONEG_DISABLE;
2157 		speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
2158 		cmd->base.duplex =
2159 			(hp->sw_bmcr & BMCR_FULLDPLX) ?
2160 			DUPLEX_FULL : DUPLEX_HALF;
2161 	}
2162 	cmd->base.speed = speed;
2163 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2164 						supported);
2165 
2166 	return 0;
2167 }
2168 
hme_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2169 static int hme_set_link_ksettings(struct net_device *dev,
2170 				  const struct ethtool_link_ksettings *cmd)
2171 {
2172 	struct happy_meal *hp = netdev_priv(dev);
2173 
2174 	/* Verify the settings we care about. */
2175 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
2176 	    cmd->base.autoneg != AUTONEG_DISABLE)
2177 		return -EINVAL;
2178 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
2179 	    ((cmd->base.speed != SPEED_100 &&
2180 	      cmd->base.speed != SPEED_10) ||
2181 	     (cmd->base.duplex != DUPLEX_HALF &&
2182 	      cmd->base.duplex != DUPLEX_FULL)))
2183 		return -EINVAL;
2184 
2185 	/* Ok, do it to it. */
2186 	spin_lock_irq(&hp->happy_lock);
2187 	del_timer(&hp->happy_timer);
2188 	happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
2189 	spin_unlock_irq(&hp->happy_lock);
2190 
2191 	return 0;
2192 }
2193 
hme_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2194 static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2195 {
2196 	struct happy_meal *hp = netdev_priv(dev);
2197 
2198 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2199 	if (hp->happy_flags & HFLAG_PCI) {
2200 		struct pci_dev *pdev = hp->happy_dev;
2201 		strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
2202 	}
2203 #ifdef CONFIG_SBUS
2204 	else {
2205 		const struct linux_prom_registers *regs;
2206 		struct platform_device *op = hp->happy_dev;
2207 		regs = of_get_property(op->dev.of_node, "regs", NULL);
2208 		if (regs)
2209 			snprintf(info->bus_info, sizeof(info->bus_info),
2210 				"SBUS:%d",
2211 				regs->which_io);
2212 	}
2213 #endif
2214 }
2215 
hme_get_link(struct net_device * dev)2216 static u32 hme_get_link(struct net_device *dev)
2217 {
2218 	struct happy_meal *hp = netdev_priv(dev);
2219 
2220 	spin_lock_irq(&hp->happy_lock);
2221 	hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2222 	spin_unlock_irq(&hp->happy_lock);
2223 
2224 	return hp->sw_bmsr & BMSR_LSTATUS;
2225 }
2226 
2227 static const struct ethtool_ops hme_ethtool_ops = {
2228 	.get_drvinfo		= hme_get_drvinfo,
2229 	.get_link		= hme_get_link,
2230 	.get_link_ksettings	= hme_get_link_ksettings,
2231 	.set_link_ksettings	= hme_set_link_ksettings,
2232 };
2233 
2234 #ifdef CONFIG_SBUS
2235 /* Given a happy meal sbus device, find it's quattro parent.
2236  * If none exist, allocate and return a new one.
2237  *
2238  * Return NULL on failure.
2239  */
quattro_sbus_find(struct platform_device * child)2240 static struct quattro *quattro_sbus_find(struct platform_device *child)
2241 {
2242 	struct device *parent = child->dev.parent;
2243 	struct platform_device *op;
2244 	struct quattro *qp;
2245 
2246 	op = to_platform_device(parent);
2247 	qp = platform_get_drvdata(op);
2248 	if (qp)
2249 		return qp;
2250 
2251 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2252 	if (!qp)
2253 		return NULL;
2254 
2255 	qp->quattro_dev = child;
2256 	qp->next = qfe_sbus_list;
2257 	qfe_sbus_list = qp;
2258 
2259 	platform_set_drvdata(op, qp);
2260 	return qp;
2261 }
2262 #endif /* CONFIG_SBUS */
2263 
2264 #ifdef CONFIG_PCI
quattro_pci_find(struct pci_dev * pdev)2265 static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2266 {
2267 	int i;
2268 	struct pci_dev *bdev = pdev->bus->self;
2269 	struct quattro *qp;
2270 
2271 	if (!bdev)
2272 		return ERR_PTR(-ENODEV);
2273 
2274 	for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
2275 		struct pci_dev *qpdev = qp->quattro_dev;
2276 
2277 		if (qpdev == bdev)
2278 			return qp;
2279 	}
2280 
2281 	qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
2282 	if (!qp)
2283 		return ERR_PTR(-ENOMEM);
2284 
2285 	for (i = 0; i < 4; i++)
2286 		qp->happy_meals[i] = NULL;
2287 
2288 	qp->quattro_dev = bdev;
2289 	qp->next = qfe_pci_list;
2290 	qfe_pci_list = qp;
2291 
2292 	/* No range tricks necessary on PCI. */
2293 	qp->nranges = 0;
2294 	return qp;
2295 }
2296 #endif /* CONFIG_PCI */
2297 
2298 static const struct net_device_ops hme_netdev_ops = {
2299 	.ndo_open		= happy_meal_open,
2300 	.ndo_stop		= happy_meal_close,
2301 	.ndo_start_xmit		= happy_meal_start_xmit,
2302 	.ndo_tx_timeout		= happy_meal_tx_timeout,
2303 	.ndo_get_stats		= happy_meal_get_stats,
2304 	.ndo_set_rx_mode	= happy_meal_set_multicast,
2305 	.ndo_set_mac_address 	= eth_mac_addr,
2306 	.ndo_validate_addr	= eth_validate_addr,
2307 };
2308 
2309 #ifdef CONFIG_PCI
is_quattro_p(struct pci_dev * pdev)2310 static int is_quattro_p(struct pci_dev *pdev)
2311 {
2312 	struct pci_dev *busdev = pdev->bus->self;
2313 	struct pci_dev *this_pdev;
2314 	int n_hmes;
2315 
2316 	if (!busdev || busdev->vendor != PCI_VENDOR_ID_DEC ||
2317 	    busdev->device != PCI_DEVICE_ID_DEC_21153)
2318 		return 0;
2319 
2320 	n_hmes = 0;
2321 	list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
2322 		if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
2323 		    this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
2324 			n_hmes++;
2325 	}
2326 
2327 	if (n_hmes != 4)
2328 		return 0;
2329 
2330 	return 1;
2331 }
2332 
2333 /* Fetch MAC address from vital product data of PCI ROM. */
find_eth_addr_in_vpd(void __iomem * rom_base,int len,int index,unsigned char * dev_addr)2334 static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
2335 {
2336 	int this_offset;
2337 
2338 	for (this_offset = 0x20; this_offset < len; this_offset++) {
2339 		void __iomem *p = rom_base + this_offset;
2340 
2341 		if (readb(p + 0) != 0x90 ||
2342 		    readb(p + 1) != 0x00 ||
2343 		    readb(p + 2) != 0x09 ||
2344 		    readb(p + 3) != 0x4e ||
2345 		    readb(p + 4) != 0x41 ||
2346 		    readb(p + 5) != 0x06)
2347 			continue;
2348 
2349 		this_offset += 6;
2350 		p += 6;
2351 
2352 		if (index == 0) {
2353 			for (int i = 0; i < 6; i++)
2354 				dev_addr[i] = readb(p + i);
2355 			return 1;
2356 		}
2357 		index--;
2358 	}
2359 	return 0;
2360 }
2361 
get_hme_mac_nonsparc(struct pci_dev * pdev,unsigned char * dev_addr)2362 static void __maybe_unused get_hme_mac_nonsparc(struct pci_dev *pdev,
2363 						unsigned char *dev_addr)
2364 {
2365 	void __iomem *p;
2366 	size_t size;
2367 
2368 	p = pci_map_rom(pdev, &size);
2369 	if (p) {
2370 		int index = 0;
2371 		int found;
2372 
2373 		if (is_quattro_p(pdev))
2374 			index = PCI_SLOT(pdev->devfn);
2375 
2376 		found = readb(p) == 0x55 &&
2377 			readb(p + 1) == 0xaa &&
2378 			find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
2379 		pci_unmap_rom(pdev, p);
2380 		if (found)
2381 			return;
2382 	}
2383 
2384 	/* Sun MAC prefix then 3 random bytes. */
2385 	dev_addr[0] = 0x08;
2386 	dev_addr[1] = 0x00;
2387 	dev_addr[2] = 0x20;
2388 	get_random_bytes(&dev_addr[3], 3);
2389 }
2390 #endif
2391 
happy_meal_addr_init(struct happy_meal * hp,struct device_node * dp,int qfe_slot)2392 static void happy_meal_addr_init(struct happy_meal *hp,
2393 				 struct device_node *dp, int qfe_slot)
2394 {
2395 	int i;
2396 
2397 	for (i = 0; i < 6; i++) {
2398 		if (macaddr[i] != 0)
2399 			break;
2400 	}
2401 
2402 	if (i < 6) { /* a mac address was given */
2403 		u8 addr[ETH_ALEN];
2404 
2405 		for (i = 0; i < 6; i++)
2406 			addr[i] = macaddr[i];
2407 		eth_hw_addr_set(hp->dev, addr);
2408 		macaddr[5]++;
2409 	} else {
2410 #ifdef CONFIG_SPARC
2411 		const unsigned char *addr;
2412 		int len;
2413 
2414 		/* If user did not specify a MAC address specifically, use
2415 		 * the Quattro local-mac-address property...
2416 		 */
2417 		if (qfe_slot != -1) {
2418 			addr = of_get_property(dp, "local-mac-address", &len);
2419 			if (addr && len == 6) {
2420 				eth_hw_addr_set(hp->dev, addr);
2421 				return;
2422 			}
2423 		}
2424 
2425 		eth_hw_addr_set(hp->dev, idprom->id_ethaddr);
2426 #else
2427 		u8 addr[ETH_ALEN];
2428 
2429 		get_hme_mac_nonsparc(hp->happy_dev, addr);
2430 		eth_hw_addr_set(hp->dev, addr);
2431 #endif
2432 	}
2433 }
2434 
happy_meal_common_probe(struct happy_meal * hp,struct device_node * dp)2435 static int happy_meal_common_probe(struct happy_meal *hp,
2436 				   struct device_node *dp)
2437 {
2438 	struct net_device *dev = hp->dev;
2439 	int err;
2440 
2441 #ifdef CONFIG_SPARC
2442 	hp->hm_revision = of_getintprop_default(dp, "hm-rev", hp->hm_revision);
2443 #endif
2444 
2445 	/* Now enable the feature flags we can. */
2446 	if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
2447 		hp->happy_flags |= HFLAG_20_21;
2448 	else if (hp->hm_revision != 0xa0)
2449 		hp->happy_flags |= HFLAG_NOT_A0;
2450 
2451 	hp->happy_block = dmam_alloc_coherent(hp->dma_dev, PAGE_SIZE,
2452 					      &hp->hblock_dvma, GFP_KERNEL);
2453 	if (!hp->happy_block)
2454 		return -ENOMEM;
2455 
2456 	/* Force check of the link first time we are brought up. */
2457 	hp->linkcheck = 0;
2458 
2459 	/* Force timer state to 'asleep' with count of zero. */
2460 	hp->timer_state = asleep;
2461 	hp->timer_ticks = 0;
2462 
2463 	timer_setup(&hp->happy_timer, happy_meal_timer, 0);
2464 
2465 	dev->netdev_ops = &hme_netdev_ops;
2466 	dev->watchdog_timeo = 5 * HZ;
2467 	dev->ethtool_ops = &hme_ethtool_ops;
2468 
2469 	/* Happy Meal can do it all... */
2470 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2471 	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2472 
2473 
2474 	/* Grrr, Happy Meal comes up by default not advertising
2475 	 * full duplex 100baseT capabilities, fix this.
2476 	 */
2477 	spin_lock_irq(&hp->happy_lock);
2478 	happy_meal_set_initial_advertisement(hp);
2479 	spin_unlock_irq(&hp->happy_lock);
2480 
2481 	err = devm_register_netdev(hp->dma_dev, dev);
2482 	if (err)
2483 		dev_err(hp->dma_dev, "Cannot register net device, aborting.\n");
2484 	return err;
2485 }
2486 
2487 #ifdef CONFIG_SBUS
happy_meal_sbus_probe_one(struct platform_device * op,int is_qfe)2488 static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2489 {
2490 	struct device_node *dp = op->dev.of_node, *sbus_dp;
2491 	struct quattro *qp = NULL;
2492 	struct happy_meal *hp;
2493 	struct net_device *dev;
2494 	int qfe_slot = -1;
2495 	int err;
2496 
2497 	sbus_dp = op->dev.parent->of_node;
2498 
2499 	/* We can match PCI devices too, do not accept those here. */
2500 	if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
2501 		return -ENODEV;
2502 
2503 	if (is_qfe) {
2504 		qp = quattro_sbus_find(op);
2505 		if (qp == NULL)
2506 			return -ENODEV;
2507 		for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2508 			if (qp->happy_meals[qfe_slot] == NULL)
2509 				break;
2510 		if (qfe_slot == 4)
2511 			return -ENODEV;
2512 	}
2513 
2514 	dev = devm_alloc_etherdev(&op->dev, sizeof(struct happy_meal));
2515 	if (!dev)
2516 		return -ENOMEM;
2517 	SET_NETDEV_DEV(dev, &op->dev);
2518 
2519 	hp = netdev_priv(dev);
2520 	hp->dev = dev;
2521 	hp->happy_dev = op;
2522 	hp->dma_dev = &op->dev;
2523 	happy_meal_addr_init(hp, dp, qfe_slot);
2524 
2525 	spin_lock_init(&hp->happy_lock);
2526 
2527 	if (qp != NULL) {
2528 		hp->qfe_parent = qp;
2529 		hp->qfe_ent = qfe_slot;
2530 		qp->happy_meals[qfe_slot] = dev;
2531 	}
2532 
2533 	hp->gregs = devm_platform_ioremap_resource(op, 0);
2534 	if (IS_ERR(hp->gregs)) {
2535 		dev_err(&op->dev, "Cannot map global registers.\n");
2536 		err = PTR_ERR(hp->gregs);
2537 		goto err_out_clear_quattro;
2538 	}
2539 
2540 	hp->etxregs = devm_platform_ioremap_resource(op, 1);
2541 	if (IS_ERR(hp->etxregs)) {
2542 		dev_err(&op->dev, "Cannot map MAC TX registers.\n");
2543 		err = PTR_ERR(hp->etxregs);
2544 		goto err_out_clear_quattro;
2545 	}
2546 
2547 	hp->erxregs = devm_platform_ioremap_resource(op, 2);
2548 	if (IS_ERR(hp->erxregs)) {
2549 		dev_err(&op->dev, "Cannot map MAC RX registers.\n");
2550 		err = PTR_ERR(hp->erxregs);
2551 		goto err_out_clear_quattro;
2552 	}
2553 
2554 	hp->bigmacregs = devm_platform_ioremap_resource(op, 3);
2555 	if (IS_ERR(hp->bigmacregs)) {
2556 		dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
2557 		err = PTR_ERR(hp->bigmacregs);
2558 		goto err_out_clear_quattro;
2559 	}
2560 
2561 	hp->tcvregs = devm_platform_ioremap_resource(op, 4);
2562 	if (IS_ERR(hp->tcvregs)) {
2563 		dev_err(&op->dev, "Cannot map TCVR registers.\n");
2564 		err = PTR_ERR(hp->tcvregs);
2565 		goto err_out_clear_quattro;
2566 	}
2567 
2568 	hp->hm_revision = 0xa0;
2569 
2570 	if (qp != NULL)
2571 		hp->happy_flags |= HFLAG_QUATTRO;
2572 
2573 	hp->irq = op->archdata.irqs[0];
2574 
2575 	/* Get the supported DVMA burst sizes from our Happy SBUS. */
2576 	hp->happy_bursts = of_getintprop_default(sbus_dp,
2577 						 "burst-sizes", 0x00);
2578 
2579 #ifdef CONFIG_PCI
2580 	/* Hook up SBUS register/descriptor accessors. */
2581 	hp->read_desc32 = sbus_hme_read_desc32;
2582 	hp->write_txd = sbus_hme_write_txd;
2583 	hp->write_rxd = sbus_hme_write_rxd;
2584 	hp->read32 = sbus_hme_read32;
2585 	hp->write32 = sbus_hme_write32;
2586 #endif
2587 
2588 	err = happy_meal_common_probe(hp, dp);
2589 	if (err)
2590 		goto err_out_clear_quattro;
2591 
2592 	platform_set_drvdata(op, hp);
2593 
2594 	if (qfe_slot != -1)
2595 		netdev_info(dev,
2596 			    "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
2597 			    qfe_slot, dev->dev_addr);
2598 	else
2599 		netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
2600 			    dev->dev_addr);
2601 
2602 	return 0;
2603 
2604 err_out_clear_quattro:
2605 	if (qp)
2606 		qp->happy_meals[qfe_slot] = NULL;
2607 	return err;
2608 }
2609 #endif
2610 
2611 #ifdef CONFIG_PCI
happy_meal_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2612 static int happy_meal_pci_probe(struct pci_dev *pdev,
2613 				const struct pci_device_id *ent)
2614 {
2615 	struct device_node *dp = NULL;
2616 	struct quattro *qp = NULL;
2617 	struct happy_meal *hp;
2618 	struct net_device *dev;
2619 	void __iomem *hpreg_base;
2620 	struct resource *hpreg_res;
2621 	char prom_name[64];
2622 	int qfe_slot = -1;
2623 	int err = -ENODEV;
2624 
2625 	/* Now make sure pci_dev cookie is there. */
2626 #ifdef CONFIG_SPARC
2627 	dp = pci_device_to_OF_node(pdev);
2628 	snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
2629 #else
2630 	if (is_quattro_p(pdev))
2631 		strcpy(prom_name, "SUNW,qfe");
2632 	else
2633 		strcpy(prom_name, "SUNW,hme");
2634 #endif
2635 
2636 	err = pcim_enable_device(pdev);
2637 	if (err)
2638 		return err;
2639 	pci_set_master(pdev);
2640 
2641 	if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
2642 		qp = quattro_pci_find(pdev);
2643 		if (IS_ERR(qp))
2644 			return PTR_ERR(qp);
2645 
2646 		for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
2647 			if (!qp->happy_meals[qfe_slot])
2648 				break;
2649 
2650 		if (qfe_slot == 4)
2651 			return -ENODEV;
2652 	}
2653 
2654 	dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
2655 	if (!dev)
2656 		return -ENOMEM;
2657 	SET_NETDEV_DEV(dev, &pdev->dev);
2658 
2659 	hp = netdev_priv(dev);
2660 	hp->dev = dev;
2661 	hp->happy_dev = pdev;
2662 	hp->dma_dev = &pdev->dev;
2663 
2664 	spin_lock_init(&hp->happy_lock);
2665 
2666 	if (qp != NULL) {
2667 		hp->qfe_parent = qp;
2668 		hp->qfe_ent = qfe_slot;
2669 		qp->happy_meals[qfe_slot] = dev;
2670 	}
2671 
2672 	err = -EINVAL;
2673 	if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2674 		dev_err(&pdev->dev,
2675 			"Cannot find proper PCI device base address.\n");
2676 		goto err_out_clear_quattro;
2677 	}
2678 
2679 	hpreg_res = devm_request_mem_region(&pdev->dev,
2680 					    pci_resource_start(pdev, 0),
2681 					    pci_resource_len(pdev, 0),
2682 					    DRV_NAME);
2683 	if (!hpreg_res) {
2684 		err = -EBUSY;
2685 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
2686 		goto err_out_clear_quattro;
2687 	}
2688 
2689 	hpreg_base = pcim_iomap(pdev, 0, 0x8000);
2690 	if (!hpreg_base) {
2691 		err = -ENOMEM;
2692 		dev_err(&pdev->dev, "Unable to remap card memory.\n");
2693 		goto err_out_clear_quattro;
2694 	}
2695 
2696 	happy_meal_addr_init(hp, dp, qfe_slot);
2697 
2698 	/* Layout registers. */
2699 	hp->gregs      = (hpreg_base + 0x0000UL);
2700 	hp->etxregs    = (hpreg_base + 0x2000UL);
2701 	hp->erxregs    = (hpreg_base + 0x4000UL);
2702 	hp->bigmacregs = (hpreg_base + 0x6000UL);
2703 	hp->tcvregs    = (hpreg_base + 0x7000UL);
2704 
2705 	if (IS_ENABLED(CONFIG_SPARC))
2706 		hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
2707 	else
2708 		hp->hm_revision = 0x20;
2709 
2710 	if (qp != NULL)
2711 		hp->happy_flags |= HFLAG_QUATTRO;
2712 
2713 	/* And of course, indicate this is PCI. */
2714 	hp->happy_flags |= HFLAG_PCI;
2715 
2716 #ifdef CONFIG_SPARC
2717 	/* Assume PCI happy meals can handle all burst sizes. */
2718 	hp->happy_bursts = DMA_BURSTBITS;
2719 #endif
2720 	hp->irq = pdev->irq;
2721 
2722 #ifdef CONFIG_SBUS
2723 	/* Hook up PCI register/descriptor accessors. */
2724 	hp->read_desc32 = pci_hme_read_desc32;
2725 	hp->write_txd = pci_hme_write_txd;
2726 	hp->write_rxd = pci_hme_write_rxd;
2727 	hp->read32 = pci_hme_read32;
2728 	hp->write32 = pci_hme_write32;
2729 #endif
2730 
2731 	err = happy_meal_common_probe(hp, dp);
2732 	if (err)
2733 		goto err_out_clear_quattro;
2734 
2735 	pci_set_drvdata(pdev, hp);
2736 
2737 	if (!qfe_slot) {
2738 		struct pci_dev *qpdev = qp->quattro_dev;
2739 
2740 		prom_name[0] = 0;
2741 		if (!strncmp(dev->name, "eth", 3)) {
2742 			int i = simple_strtoul(dev->name + 3, NULL, 10);
2743 			sprintf(prom_name, "-%d", i + 3);
2744 		}
2745 		netdev_info(dev,
2746 			    "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
2747 			    prom_name, qpdev->vendor, qpdev->device);
2748 	}
2749 
2750 	if (qfe_slot != -1)
2751 		netdev_info(dev,
2752 			    "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
2753 			    qfe_slot, dev->dev_addr);
2754 	else
2755 		netdev_info(dev,
2756 			    "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
2757 			    dev->dev_addr);
2758 
2759 	return 0;
2760 
2761 err_out_clear_quattro:
2762 	if (qp != NULL)
2763 		qp->happy_meals[qfe_slot] = NULL;
2764 	return err;
2765 }
2766 
2767 static const struct pci_device_id happymeal_pci_ids[] = {
2768 	{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
2769 	{ }			/* Terminating entry */
2770 };
2771 
2772 MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
2773 
2774 static struct pci_driver hme_pci_driver = {
2775 	.name		= "hme",
2776 	.id_table	= happymeal_pci_ids,
2777 	.probe		= happy_meal_pci_probe,
2778 };
2779 
happy_meal_pci_init(void)2780 static int __init happy_meal_pci_init(void)
2781 {
2782 	return pci_register_driver(&hme_pci_driver);
2783 }
2784 
happy_meal_pci_exit(void)2785 static void happy_meal_pci_exit(void)
2786 {
2787 	pci_unregister_driver(&hme_pci_driver);
2788 
2789 	while (qfe_pci_list) {
2790 		struct quattro *qfe = qfe_pci_list;
2791 		struct quattro *next = qfe->next;
2792 
2793 		kfree(qfe);
2794 
2795 		qfe_pci_list = next;
2796 	}
2797 }
2798 
2799 #endif
2800 
2801 #ifdef CONFIG_SBUS
2802 static const struct of_device_id hme_sbus_match[];
hme_sbus_probe(struct platform_device * op)2803 static int hme_sbus_probe(struct platform_device *op)
2804 {
2805 	const struct of_device_id *match;
2806 	struct device_node *dp = op->dev.of_node;
2807 	const char *model = of_get_property(dp, "model", NULL);
2808 	int is_qfe;
2809 
2810 	match = of_match_device(hme_sbus_match, &op->dev);
2811 	if (!match)
2812 		return -EINVAL;
2813 	is_qfe = (match->data != NULL);
2814 
2815 	if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
2816 		is_qfe = 1;
2817 
2818 	return happy_meal_sbus_probe_one(op, is_qfe);
2819 }
2820 
2821 static const struct of_device_id hme_sbus_match[] = {
2822 	{
2823 		.name = "SUNW,hme",
2824 	},
2825 	{
2826 		.name = "SUNW,qfe",
2827 		.data = (void *) 1,
2828 	},
2829 	{
2830 		.name = "qfe",
2831 		.data = (void *) 1,
2832 	},
2833 	{},
2834 };
2835 
2836 MODULE_DEVICE_TABLE(of, hme_sbus_match);
2837 
2838 static struct platform_driver hme_sbus_driver = {
2839 	.driver = {
2840 		.name = "hme",
2841 		.of_match_table = hme_sbus_match,
2842 	},
2843 	.probe		= hme_sbus_probe,
2844 };
2845 
happy_meal_sbus_init(void)2846 static int __init happy_meal_sbus_init(void)
2847 {
2848 	return platform_driver_register(&hme_sbus_driver);
2849 }
2850 
happy_meal_sbus_exit(void)2851 static void happy_meal_sbus_exit(void)
2852 {
2853 	platform_driver_unregister(&hme_sbus_driver);
2854 
2855 	while (qfe_sbus_list) {
2856 		struct quattro *qfe = qfe_sbus_list;
2857 		struct quattro *next = qfe->next;
2858 
2859 		kfree(qfe);
2860 
2861 		qfe_sbus_list = next;
2862 	}
2863 }
2864 #endif
2865 
happy_meal_probe(void)2866 static int __init happy_meal_probe(void)
2867 {
2868 	int err = 0;
2869 
2870 #ifdef CONFIG_SBUS
2871 	err = happy_meal_sbus_init();
2872 #endif
2873 #ifdef CONFIG_PCI
2874 	if (!err) {
2875 		err = happy_meal_pci_init();
2876 #ifdef CONFIG_SBUS
2877 		if (err)
2878 			happy_meal_sbus_exit();
2879 #endif
2880 	}
2881 #endif
2882 
2883 	return err;
2884 }
2885 
2886 
happy_meal_exit(void)2887 static void __exit happy_meal_exit(void)
2888 {
2889 #ifdef CONFIG_SBUS
2890 	happy_meal_sbus_exit();
2891 #endif
2892 #ifdef CONFIG_PCI
2893 	happy_meal_pci_exit();
2894 #endif
2895 }
2896 
2897 module_init(happy_meal_probe);
2898 module_exit(happy_meal_exit);
2899