1 /*
2  * QLogic QLA3xxx NIC HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla3xxx for copyright and licensing details.
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/ip.h>
26 #include <linux/in.h>
27 #include <linux/if_arp.h>
28 #include <linux/if_ether.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/ethtool.h>
32 #include <linux/skbuff.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/if_vlan.h>
35 #include <linux/delay.h>
36 #include <linux/mm.h>
37 #include <linux/prefetch.h>
38 
39 #include "qla3xxx.h"
40 
41 #define DRV_NAME	"qla3xxx"
42 #define DRV_STRING	"QLogic ISP3XXX Network Driver"
43 #define DRV_VERSION	"v2.03.00-k5"
44 
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
47 
48 #define TIMED_OUT_MSG							\
49 "Timed out waiting for management port to get free before issuing command\n"
50 
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
55 
56 static const u32 default_msg
57     = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
58     | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
59 
60 static int debug = -1;		/* defaults above */
61 module_param(debug, int, 0);
62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
63 
64 static int msi;
65 module_param(msi, int, 0);
66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
67 
68 static const struct pci_device_id ql3xxx_pci_tbl[] = {
69 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
70 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
71 	/* required last entry */
72 	{0,}
73 };
74 
75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
76 
77 /*
78  *  These are the known PHY's which are used
79  */
80 enum PHY_DEVICE_TYPE {
81    PHY_TYPE_UNKNOWN   = 0,
82    PHY_VITESSE_VSC8211,
83    PHY_AGERE_ET1011C,
84    MAX_PHY_DEV_TYPES
85 };
86 
87 struct PHY_DEVICE_INFO {
88 	const enum PHY_DEVICE_TYPE	phyDevice;
89 	const u32		phyIdOUI;
90 	const u16		phyIdModel;
91 	const char		*name;
92 };
93 
94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
95 	{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
96 	{PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
97 	{PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
98 };
99 
100 
101 /*
102  * Caller must take hw_lock.
103  */
104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
105 			    u32 sem_mask, u32 sem_bits)
106 {
107 	struct ql3xxx_port_registers __iomem *port_regs =
108 		qdev->mem_map_registers;
109 	u32 value;
110 	unsigned int seconds = 3;
111 
112 	do {
113 		writel((sem_mask | sem_bits),
114 		       &port_regs->CommonRegs.semaphoreReg);
115 		value = readl(&port_regs->CommonRegs.semaphoreReg);
116 		if ((value & (sem_mask >> 16)) == sem_bits)
117 			return 0;
118 		ssleep(1);
119 	} while (--seconds);
120 	return -1;
121 }
122 
123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
124 {
125 	struct ql3xxx_port_registers __iomem *port_regs =
126 		qdev->mem_map_registers;
127 	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
128 	readl(&port_regs->CommonRegs.semaphoreReg);
129 }
130 
131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
132 {
133 	struct ql3xxx_port_registers __iomem *port_regs =
134 		qdev->mem_map_registers;
135 	u32 value;
136 
137 	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
138 	value = readl(&port_regs->CommonRegs.semaphoreReg);
139 	return ((value & (sem_mask >> 16)) == sem_bits);
140 }
141 
142 /*
143  * Caller holds hw_lock.
144  */
145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
146 {
147 	int i = 0;
148 
149 	do {
150 		if (ql_sem_lock(qdev,
151 				QL_DRVR_SEM_MASK,
152 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153 				 * 2) << 1)) {
154 			netdev_printk(KERN_DEBUG, qdev->ndev,
155 				      "driver lock acquired\n");
156 			return 1;
157 		}
158 		ssleep(1);
159 	} while (++i < 10);
160 
161 	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
162 	return 0;
163 }
164 
165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166 {
167 	struct ql3xxx_port_registers __iomem *port_regs =
168 		qdev->mem_map_registers;
169 
170 	writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 			&port_regs->CommonRegs.ispControlStatus);
172 	readl(&port_regs->CommonRegs.ispControlStatus);
173 	qdev->current_page = page;
174 }
175 
176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
177 {
178 	u32 value;
179 	unsigned long hw_flags;
180 
181 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 	value = readl(reg);
183 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
184 
185 	return value;
186 }
187 
188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
189 {
190 	return readl(reg);
191 }
192 
193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
194 {
195 	u32 value;
196 	unsigned long hw_flags;
197 
198 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
199 
200 	if (qdev->current_page != 0)
201 		ql_set_register_page(qdev, 0);
202 	value = readl(reg);
203 
204 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
205 	return value;
206 }
207 
208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
209 {
210 	if (qdev->current_page != 0)
211 		ql_set_register_page(qdev, 0);
212 	return readl(reg);
213 }
214 
215 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
216 				u32 __iomem *reg, u32 value)
217 {
218 	unsigned long hw_flags;
219 
220 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
221 	writel(value, reg);
222 	readl(reg);
223 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
224 }
225 
226 static void ql_write_common_reg(struct ql3_adapter *qdev,
227 				u32 __iomem *reg, u32 value)
228 {
229 	writel(value, reg);
230 	readl(reg);
231 }
232 
233 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
234 				u32 __iomem *reg, u32 value)
235 {
236 	writel(value, reg);
237 	readl(reg);
238 	udelay(1);
239 }
240 
241 static void ql_write_page0_reg(struct ql3_adapter *qdev,
242 			       u32 __iomem *reg, u32 value)
243 {
244 	if (qdev->current_page != 0)
245 		ql_set_register_page(qdev, 0);
246 	writel(value, reg);
247 	readl(reg);
248 }
249 
250 /*
251  * Caller holds hw_lock. Only called during init.
252  */
253 static void ql_write_page1_reg(struct ql3_adapter *qdev,
254 			       u32 __iomem *reg, u32 value)
255 {
256 	if (qdev->current_page != 1)
257 		ql_set_register_page(qdev, 1);
258 	writel(value, reg);
259 	readl(reg);
260 }
261 
262 /*
263  * Caller holds hw_lock. Only called during init.
264  */
265 static void ql_write_page2_reg(struct ql3_adapter *qdev,
266 			       u32 __iomem *reg, u32 value)
267 {
268 	if (qdev->current_page != 2)
269 		ql_set_register_page(qdev, 2);
270 	writel(value, reg);
271 	readl(reg);
272 }
273 
274 static void ql_disable_interrupts(struct ql3_adapter *qdev)
275 {
276 	struct ql3xxx_port_registers __iomem *port_regs =
277 		qdev->mem_map_registers;
278 
279 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280 			    (ISP_IMR_ENABLE_INT << 16));
281 
282 }
283 
284 static void ql_enable_interrupts(struct ql3_adapter *qdev)
285 {
286 	struct ql3xxx_port_registers __iomem *port_regs =
287 		qdev->mem_map_registers;
288 
289 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
290 			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
291 
292 }
293 
294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
295 					    struct ql_rcv_buf_cb *lrg_buf_cb)
296 {
297 	dma_addr_t map;
298 	int err;
299 	lrg_buf_cb->next = NULL;
300 
301 	if (qdev->lrg_buf_free_tail == NULL) {	/* The list is empty  */
302 		qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
303 	} else {
304 		qdev->lrg_buf_free_tail->next = lrg_buf_cb;
305 		qdev->lrg_buf_free_tail = lrg_buf_cb;
306 	}
307 
308 	if (!lrg_buf_cb->skb) {
309 		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
310 						   qdev->lrg_buffer_len);
311 		if (unlikely(!lrg_buf_cb->skb)) {
312 			qdev->lrg_buf_skb_check++;
313 		} else {
314 			/*
315 			 * We save some space to copy the ethhdr from first
316 			 * buffer
317 			 */
318 			skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
319 			map = pci_map_single(qdev->pdev,
320 					     lrg_buf_cb->skb->data,
321 					     qdev->lrg_buffer_len -
322 					     QL_HEADER_SPACE,
323 					     PCI_DMA_FROMDEVICE);
324 			err = pci_dma_mapping_error(qdev->pdev, map);
325 			if (err) {
326 				netdev_err(qdev->ndev,
327 					   "PCI mapping failed with error: %d\n",
328 					   err);
329 				dev_kfree_skb(lrg_buf_cb->skb);
330 				lrg_buf_cb->skb = NULL;
331 
332 				qdev->lrg_buf_skb_check++;
333 				return;
334 			}
335 
336 			lrg_buf_cb->buf_phy_addr_low =
337 			    cpu_to_le32(LS_64BITS(map));
338 			lrg_buf_cb->buf_phy_addr_high =
339 			    cpu_to_le32(MS_64BITS(map));
340 			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
341 			dma_unmap_len_set(lrg_buf_cb, maplen,
342 					  qdev->lrg_buffer_len -
343 					  QL_HEADER_SPACE);
344 		}
345 	}
346 
347 	qdev->lrg_buf_free_count++;
348 }
349 
350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351 							   *qdev)
352 {
353 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 
355 	if (lrg_buf_cb != NULL) {
356 		qdev->lrg_buf_free_head = lrg_buf_cb->next;
357 		if (qdev->lrg_buf_free_head == NULL)
358 			qdev->lrg_buf_free_tail = NULL;
359 		qdev->lrg_buf_free_count--;
360 	}
361 
362 	return lrg_buf_cb;
363 }
364 
365 static u32 addrBits = EEPROM_NO_ADDR_BITS;
366 static u32 dataBits = EEPROM_NO_DATA_BITS;
367 
368 static void fm93c56a_deselect(struct ql3_adapter *qdev);
369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
370 			    unsigned short *value);
371 
372 /*
373  * Caller holds hw_lock.
374  */
375 static void fm93c56a_select(struct ql3_adapter *qdev)
376 {
377 	struct ql3xxx_port_registers __iomem *port_regs =
378 			qdev->mem_map_registers;
379 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
380 
381 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
382 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
383 	ql_write_nvram_reg(qdev, spir,
384 			   ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
385 }
386 
387 /*
388  * Caller holds hw_lock.
389  */
390 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
391 {
392 	int i;
393 	u32 mask;
394 	u32 dataBit;
395 	u32 previousBit;
396 	struct ql3xxx_port_registers __iomem *port_regs =
397 			qdev->mem_map_registers;
398 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
399 
400 	/* Clock in a zero, then do the start bit */
401 	ql_write_nvram_reg(qdev, spir,
402 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
403 			    AUBURN_EEPROM_DO_1));
404 	ql_write_nvram_reg(qdev, spir,
405 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
406 			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
407 	ql_write_nvram_reg(qdev, spir,
408 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
409 			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
410 
411 	mask = 1 << (FM93C56A_CMD_BITS - 1);
412 	/* Force the previous data bit to be different */
413 	previousBit = 0xffff;
414 	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
415 		dataBit = (cmd & mask)
416 			? AUBURN_EEPROM_DO_1
417 			: AUBURN_EEPROM_DO_0;
418 		if (previousBit != dataBit) {
419 			/* If the bit changed, change the DO state to match */
420 			ql_write_nvram_reg(qdev, spir,
421 					   (ISP_NVRAM_MASK |
422 					    qdev->eeprom_cmd_data | dataBit));
423 			previousBit = dataBit;
424 		}
425 		ql_write_nvram_reg(qdev, spir,
426 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
427 				    dataBit | AUBURN_EEPROM_CLK_RISE));
428 		ql_write_nvram_reg(qdev, spir,
429 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
430 				    dataBit | AUBURN_EEPROM_CLK_FALL));
431 		cmd = cmd << 1;
432 	}
433 
434 	mask = 1 << (addrBits - 1);
435 	/* Force the previous data bit to be different */
436 	previousBit = 0xffff;
437 	for (i = 0; i < addrBits; i++) {
438 		dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
439 			: AUBURN_EEPROM_DO_0;
440 		if (previousBit != dataBit) {
441 			/*
442 			 * If the bit changed, then change the DO state to
443 			 * match
444 			 */
445 			ql_write_nvram_reg(qdev, spir,
446 					   (ISP_NVRAM_MASK |
447 					    qdev->eeprom_cmd_data | dataBit));
448 			previousBit = dataBit;
449 		}
450 		ql_write_nvram_reg(qdev, spir,
451 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
452 				    dataBit | AUBURN_EEPROM_CLK_RISE));
453 		ql_write_nvram_reg(qdev, spir,
454 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
455 				    dataBit | AUBURN_EEPROM_CLK_FALL));
456 		eepromAddr = eepromAddr << 1;
457 	}
458 }
459 
460 /*
461  * Caller holds hw_lock.
462  */
463 static void fm93c56a_deselect(struct ql3_adapter *qdev)
464 {
465 	struct ql3xxx_port_registers __iomem *port_regs =
466 			qdev->mem_map_registers;
467 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
468 
469 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
470 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
471 }
472 
473 /*
474  * Caller holds hw_lock.
475  */
476 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
477 {
478 	int i;
479 	u32 data = 0;
480 	u32 dataBit;
481 	struct ql3xxx_port_registers __iomem *port_regs =
482 			qdev->mem_map_registers;
483 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
484 
485 	/* Read the data bits */
486 	/* The first bit is a dummy.  Clock right over it. */
487 	for (i = 0; i < dataBits; i++) {
488 		ql_write_nvram_reg(qdev, spir,
489 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
490 				   AUBURN_EEPROM_CLK_RISE);
491 		ql_write_nvram_reg(qdev, spir,
492 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
493 				   AUBURN_EEPROM_CLK_FALL);
494 		dataBit = (ql_read_common_reg(qdev, spir) &
495 			   AUBURN_EEPROM_DI_1) ? 1 : 0;
496 		data = (data << 1) | dataBit;
497 	}
498 	*value = (u16)data;
499 }
500 
501 /*
502  * Caller holds hw_lock.
503  */
504 static void eeprom_readword(struct ql3_adapter *qdev,
505 			    u32 eepromAddr, unsigned short *value)
506 {
507 	fm93c56a_select(qdev);
508 	fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
509 	fm93c56a_datain(qdev, value);
510 	fm93c56a_deselect(qdev);
511 }
512 
513 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
514 {
515 	__le16 *p = (__le16 *)ndev->dev_addr;
516 	p[0] = cpu_to_le16(addr[0]);
517 	p[1] = cpu_to_le16(addr[1]);
518 	p[2] = cpu_to_le16(addr[2]);
519 }
520 
521 static int ql_get_nvram_params(struct ql3_adapter *qdev)
522 {
523 	u16 *pEEPROMData;
524 	u16 checksum = 0;
525 	u32 index;
526 	unsigned long hw_flags;
527 
528 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
529 
530 	pEEPROMData = (u16 *)&qdev->nvram_data;
531 	qdev->eeprom_cmd_data = 0;
532 	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
533 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
534 			 2) << 10)) {
535 		pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
536 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
537 		return -1;
538 	}
539 
540 	for (index = 0; index < EEPROM_SIZE; index++) {
541 		eeprom_readword(qdev, index, pEEPROMData);
542 		checksum += *pEEPROMData;
543 		pEEPROMData++;
544 	}
545 	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
546 
547 	if (checksum != 0) {
548 		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
549 			   checksum);
550 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
551 		return -1;
552 	}
553 
554 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
555 	return checksum;
556 }
557 
558 static const u32 PHYAddr[2] = {
559 	PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
560 };
561 
562 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
563 {
564 	struct ql3xxx_port_registers __iomem *port_regs =
565 			qdev->mem_map_registers;
566 	u32 temp;
567 	int count = 1000;
568 
569 	while (count) {
570 		temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
571 		if (!(temp & MAC_MII_STATUS_BSY))
572 			return 0;
573 		udelay(10);
574 		count--;
575 	}
576 	return -1;
577 }
578 
579 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
580 {
581 	struct ql3xxx_port_registers __iomem *port_regs =
582 			qdev->mem_map_registers;
583 	u32 scanControl;
584 
585 	if (qdev->numPorts > 1) {
586 		/* Auto scan will cycle through multiple ports */
587 		scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
588 	} else {
589 		scanControl = MAC_MII_CONTROL_SC;
590 	}
591 
592 	/*
593 	 * Scan register 1 of PHY/PETBI,
594 	 * Set up to scan both devices
595 	 * The autoscan starts from the first register, completes
596 	 * the last one before rolling over to the first
597 	 */
598 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
599 			   PHYAddr[0] | MII_SCAN_REGISTER);
600 
601 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
602 			   (scanControl) |
603 			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
604 }
605 
606 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
607 {
608 	u8 ret;
609 	struct ql3xxx_port_registers __iomem *port_regs =
610 					qdev->mem_map_registers;
611 
612 	/* See if scan mode is enabled before we turn it off */
613 	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
614 	    (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
615 		/* Scan is enabled */
616 		ret = 1;
617 	} else {
618 		/* Scan is disabled */
619 		ret = 0;
620 	}
621 
622 	/*
623 	 * When disabling scan mode you must first change the MII register
624 	 * address
625 	 */
626 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
627 			   PHYAddr[0] | MII_SCAN_REGISTER);
628 
629 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
630 			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
631 			     MAC_MII_CONTROL_RC) << 16));
632 
633 	return ret;
634 }
635 
636 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
637 			       u16 regAddr, u16 value, u32 phyAddr)
638 {
639 	struct ql3xxx_port_registers __iomem *port_regs =
640 			qdev->mem_map_registers;
641 	u8 scanWasEnabled;
642 
643 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
644 
645 	if (ql_wait_for_mii_ready(qdev)) {
646 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
647 		return -1;
648 	}
649 
650 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
651 			   phyAddr | regAddr);
652 
653 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
654 
655 	/* Wait for write to complete 9/10/04 SJP */
656 	if (ql_wait_for_mii_ready(qdev)) {
657 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
658 		return -1;
659 	}
660 
661 	if (scanWasEnabled)
662 		ql_mii_enable_scan_mode(qdev);
663 
664 	return 0;
665 }
666 
667 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
668 			      u16 *value, u32 phyAddr)
669 {
670 	struct ql3xxx_port_registers __iomem *port_regs =
671 			qdev->mem_map_registers;
672 	u8 scanWasEnabled;
673 	u32 temp;
674 
675 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
676 
677 	if (ql_wait_for_mii_ready(qdev)) {
678 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
679 		return -1;
680 	}
681 
682 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
683 			   phyAddr | regAddr);
684 
685 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
686 			   (MAC_MII_CONTROL_RC << 16));
687 
688 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
689 			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
690 
691 	/* Wait for the read to complete */
692 	if (ql_wait_for_mii_ready(qdev)) {
693 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
694 		return -1;
695 	}
696 
697 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
698 	*value = (u16) temp;
699 
700 	if (scanWasEnabled)
701 		ql_mii_enable_scan_mode(qdev);
702 
703 	return 0;
704 }
705 
706 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
707 {
708 	struct ql3xxx_port_registers __iomem *port_regs =
709 			qdev->mem_map_registers;
710 
711 	ql_mii_disable_scan_mode(qdev);
712 
713 	if (ql_wait_for_mii_ready(qdev)) {
714 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
715 		return -1;
716 	}
717 
718 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
719 			   qdev->PHYAddr | regAddr);
720 
721 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
722 
723 	/* Wait for write to complete. */
724 	if (ql_wait_for_mii_ready(qdev)) {
725 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
726 		return -1;
727 	}
728 
729 	ql_mii_enable_scan_mode(qdev);
730 
731 	return 0;
732 }
733 
734 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
735 {
736 	u32 temp;
737 	struct ql3xxx_port_registers __iomem *port_regs =
738 			qdev->mem_map_registers;
739 
740 	ql_mii_disable_scan_mode(qdev);
741 
742 	if (ql_wait_for_mii_ready(qdev)) {
743 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
744 		return -1;
745 	}
746 
747 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
748 			   qdev->PHYAddr | regAddr);
749 
750 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
751 			   (MAC_MII_CONTROL_RC << 16));
752 
753 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
754 			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
755 
756 	/* Wait for the read to complete */
757 	if (ql_wait_for_mii_ready(qdev)) {
758 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
759 		return -1;
760 	}
761 
762 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
763 	*value = (u16) temp;
764 
765 	ql_mii_enable_scan_mode(qdev);
766 
767 	return 0;
768 }
769 
770 static void ql_petbi_reset(struct ql3_adapter *qdev)
771 {
772 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
773 }
774 
775 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
776 {
777 	u16 reg;
778 
779 	/* Enable Auto-negotiation sense */
780 	ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
781 	reg |= PETBI_TBI_AUTO_SENSE;
782 	ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
783 
784 	ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
785 			 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
786 
787 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
788 			 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
789 			 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
790 
791 }
792 
793 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
794 {
795 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
796 			    PHYAddr[qdev->mac_index]);
797 }
798 
799 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
800 {
801 	u16 reg;
802 
803 	/* Enable Auto-negotiation sense */
804 	ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
805 			   PHYAddr[qdev->mac_index]);
806 	reg |= PETBI_TBI_AUTO_SENSE;
807 	ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
808 			    PHYAddr[qdev->mac_index]);
809 
810 	ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
811 			    PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
812 			    PHYAddr[qdev->mac_index]);
813 
814 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
815 			    PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
816 			    PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
817 			    PHYAddr[qdev->mac_index]);
818 }
819 
820 static void ql_petbi_init(struct ql3_adapter *qdev)
821 {
822 	ql_petbi_reset(qdev);
823 	ql_petbi_start_neg(qdev);
824 }
825 
826 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
827 {
828 	ql_petbi_reset_ex(qdev);
829 	ql_petbi_start_neg_ex(qdev);
830 }
831 
832 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
833 {
834 	u16 reg;
835 
836 	if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
837 		return 0;
838 
839 	return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
840 }
841 
842 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
843 {
844 	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
845 	/* power down device bit 11 = 1 */
846 	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
847 	/* enable diagnostic mode bit 2 = 1 */
848 	ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
849 	/* 1000MB amplitude adjust (see Agere errata) */
850 	ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
851 	/* 1000MB amplitude adjust (see Agere errata) */
852 	ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
853 	/* 100MB amplitude adjust (see Agere errata) */
854 	ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
855 	/* 100MB amplitude adjust (see Agere errata) */
856 	ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
857 	/* 10MB amplitude adjust (see Agere errata) */
858 	ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
859 	/* 10MB amplitude adjust (see Agere errata) */
860 	ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
861 	/* point to hidden reg 0x2806 */
862 	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
863 	/* Write new PHYAD w/bit 5 set */
864 	ql_mii_write_reg_ex(qdev, 0x11,
865 			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
866 	/*
867 	 * Disable diagnostic mode bit 2 = 0
868 	 * Power up device bit 11 = 0
869 	 * Link up (on) and activity (blink)
870 	 */
871 	ql_mii_write_reg(qdev, 0x12, 0x840a);
872 	ql_mii_write_reg(qdev, 0x00, 0x1140);
873 	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
874 }
875 
876 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
877 				       u16 phyIdReg0, u16 phyIdReg1)
878 {
879 	enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
880 	u32   oui;
881 	u16   model;
882 	int i;
883 
884 	if (phyIdReg0 == 0xffff)
885 		return result;
886 
887 	if (phyIdReg1 == 0xffff)
888 		return result;
889 
890 	/* oui is split between two registers */
891 	oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
892 
893 	model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
894 
895 	/* Scan table for this PHY */
896 	for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
897 		if ((oui == PHY_DEVICES[i].phyIdOUI) &&
898 		    (model == PHY_DEVICES[i].phyIdModel)) {
899 			netdev_info(qdev->ndev, "Phy: %s\n",
900 				    PHY_DEVICES[i].name);
901 			result = PHY_DEVICES[i].phyDevice;
902 			break;
903 		}
904 	}
905 
906 	return result;
907 }
908 
909 static int ql_phy_get_speed(struct ql3_adapter *qdev)
910 {
911 	u16 reg;
912 
913 	switch (qdev->phyType) {
914 	case PHY_AGERE_ET1011C: {
915 		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
916 			return 0;
917 
918 		reg = (reg >> 8) & 3;
919 		break;
920 	}
921 	default:
922 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
923 			return 0;
924 
925 		reg = (((reg & 0x18) >> 3) & 3);
926 	}
927 
928 	switch (reg) {
929 	case 2:
930 		return SPEED_1000;
931 	case 1:
932 		return SPEED_100;
933 	case 0:
934 		return SPEED_10;
935 	default:
936 		return -1;
937 	}
938 }
939 
940 static int ql_is_full_dup(struct ql3_adapter *qdev)
941 {
942 	u16 reg;
943 
944 	switch (qdev->phyType) {
945 	case PHY_AGERE_ET1011C: {
946 		if (ql_mii_read_reg(qdev, 0x1A, &reg))
947 			return 0;
948 
949 		return ((reg & 0x0080) && (reg & 0x1000)) != 0;
950 	}
951 	case PHY_VITESSE_VSC8211:
952 	default: {
953 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
954 			return 0;
955 		return (reg & PHY_AUX_DUPLEX_STAT) != 0;
956 	}
957 	}
958 }
959 
960 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
961 {
962 	u16 reg;
963 
964 	if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
965 		return 0;
966 
967 	return (reg & PHY_NEG_PAUSE) != 0;
968 }
969 
970 static int PHY_Setup(struct ql3_adapter *qdev)
971 {
972 	u16   reg1;
973 	u16   reg2;
974 	bool  agereAddrChangeNeeded = false;
975 	u32 miiAddr = 0;
976 	int err;
977 
978 	/*  Determine the PHY we are using by reading the ID's */
979 	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
980 	if (err != 0) {
981 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
982 		return err;
983 	}
984 
985 	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
986 	if (err != 0) {
987 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
988 		return err;
989 	}
990 
991 	/*  Check if we have a Agere PHY */
992 	if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
993 
994 		/* Determine which MII address we should be using
995 		   determined by the index of the card */
996 		if (qdev->mac_index == 0)
997 			miiAddr = MII_AGERE_ADDR_1;
998 		else
999 			miiAddr = MII_AGERE_ADDR_2;
1000 
1001 		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1002 		if (err != 0) {
1003 			netdev_err(qdev->ndev,
1004 				   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1005 			return err;
1006 		}
1007 
1008 		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1009 		if (err != 0) {
1010 			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1011 			return err;
1012 		}
1013 
1014 		/*  We need to remember to initialize the Agere PHY */
1015 		agereAddrChangeNeeded = true;
1016 	}
1017 
1018 	/*  Determine the particular PHY we have on board to apply
1019 	    PHY specific initializations */
1020 	qdev->phyType = getPhyType(qdev, reg1, reg2);
1021 
1022 	if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1023 		/* need this here so address gets changed */
1024 		phyAgereSpecificInit(qdev, miiAddr);
1025 	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1026 		netdev_err(qdev->ndev, "PHY is unknown\n");
1027 		return -EIO;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 /*
1034  * Caller holds hw_lock.
1035  */
1036 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1037 {
1038 	struct ql3xxx_port_registers __iomem *port_regs =
1039 			qdev->mem_map_registers;
1040 	u32 value;
1041 
1042 	if (enable)
1043 		value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1044 	else
1045 		value = (MAC_CONFIG_REG_PE << 16);
1046 
1047 	if (qdev->mac_index)
1048 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1049 	else
1050 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1051 }
1052 
1053 /*
1054  * Caller holds hw_lock.
1055  */
1056 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1057 {
1058 	struct ql3xxx_port_registers __iomem *port_regs =
1059 			qdev->mem_map_registers;
1060 	u32 value;
1061 
1062 	if (enable)
1063 		value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1064 	else
1065 		value = (MAC_CONFIG_REG_SR << 16);
1066 
1067 	if (qdev->mac_index)
1068 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1069 	else
1070 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1071 }
1072 
1073 /*
1074  * Caller holds hw_lock.
1075  */
1076 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1077 {
1078 	struct ql3xxx_port_registers __iomem *port_regs =
1079 			qdev->mem_map_registers;
1080 	u32 value;
1081 
1082 	if (enable)
1083 		value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1084 	else
1085 		value = (MAC_CONFIG_REG_GM << 16);
1086 
1087 	if (qdev->mac_index)
1088 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1089 	else
1090 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1091 }
1092 
1093 /*
1094  * Caller holds hw_lock.
1095  */
1096 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1097 {
1098 	struct ql3xxx_port_registers __iomem *port_regs =
1099 			qdev->mem_map_registers;
1100 	u32 value;
1101 
1102 	if (enable)
1103 		value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1104 	else
1105 		value = (MAC_CONFIG_REG_FD << 16);
1106 
1107 	if (qdev->mac_index)
1108 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1109 	else
1110 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1111 }
1112 
1113 /*
1114  * Caller holds hw_lock.
1115  */
1116 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1117 {
1118 	struct ql3xxx_port_registers __iomem *port_regs =
1119 			qdev->mem_map_registers;
1120 	u32 value;
1121 
1122 	if (enable)
1123 		value =
1124 		    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1125 		     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1126 	else
1127 		value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1128 
1129 	if (qdev->mac_index)
1130 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1131 	else
1132 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1133 }
1134 
1135 /*
1136  * Caller holds hw_lock.
1137  */
1138 static int ql_is_fiber(struct ql3_adapter *qdev)
1139 {
1140 	struct ql3xxx_port_registers __iomem *port_regs =
1141 			qdev->mem_map_registers;
1142 	u32 bitToCheck = 0;
1143 	u32 temp;
1144 
1145 	switch (qdev->mac_index) {
1146 	case 0:
1147 		bitToCheck = PORT_STATUS_SM0;
1148 		break;
1149 	case 1:
1150 		bitToCheck = PORT_STATUS_SM1;
1151 		break;
1152 	}
1153 
1154 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1155 	return (temp & bitToCheck) != 0;
1156 }
1157 
1158 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1159 {
1160 	u16 reg;
1161 	ql_mii_read_reg(qdev, 0x00, &reg);
1162 	return (reg & 0x1000) != 0;
1163 }
1164 
1165 /*
1166  * Caller holds hw_lock.
1167  */
1168 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1169 {
1170 	struct ql3xxx_port_registers __iomem *port_regs =
1171 			qdev->mem_map_registers;
1172 	u32 bitToCheck = 0;
1173 	u32 temp;
1174 
1175 	switch (qdev->mac_index) {
1176 	case 0:
1177 		bitToCheck = PORT_STATUS_AC0;
1178 		break;
1179 	case 1:
1180 		bitToCheck = PORT_STATUS_AC1;
1181 		break;
1182 	}
1183 
1184 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1185 	if (temp & bitToCheck) {
1186 		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1187 		return 1;
1188 	}
1189 	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1190 	return 0;
1191 }
1192 
1193 /*
1194  *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1195  */
1196 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1197 {
1198 	if (ql_is_fiber(qdev))
1199 		return ql_is_petbi_neg_pause(qdev);
1200 	else
1201 		return ql_is_phy_neg_pause(qdev);
1202 }
1203 
1204 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1205 {
1206 	struct ql3xxx_port_registers __iomem *port_regs =
1207 			qdev->mem_map_registers;
1208 	u32 bitToCheck = 0;
1209 	u32 temp;
1210 
1211 	switch (qdev->mac_index) {
1212 	case 0:
1213 		bitToCheck = PORT_STATUS_AE0;
1214 		break;
1215 	case 1:
1216 		bitToCheck = PORT_STATUS_AE1;
1217 		break;
1218 	}
1219 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1220 	return (temp & bitToCheck) != 0;
1221 }
1222 
1223 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1224 {
1225 	if (ql_is_fiber(qdev))
1226 		return SPEED_1000;
1227 	else
1228 		return ql_phy_get_speed(qdev);
1229 }
1230 
1231 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1232 {
1233 	if (ql_is_fiber(qdev))
1234 		return 1;
1235 	else
1236 		return ql_is_full_dup(qdev);
1237 }
1238 
1239 /*
1240  * Caller holds hw_lock.
1241  */
1242 static int ql_link_down_detect(struct ql3_adapter *qdev)
1243 {
1244 	struct ql3xxx_port_registers __iomem *port_regs =
1245 			qdev->mem_map_registers;
1246 	u32 bitToCheck = 0;
1247 	u32 temp;
1248 
1249 	switch (qdev->mac_index) {
1250 	case 0:
1251 		bitToCheck = ISP_CONTROL_LINK_DN_0;
1252 		break;
1253 	case 1:
1254 		bitToCheck = ISP_CONTROL_LINK_DN_1;
1255 		break;
1256 	}
1257 
1258 	temp =
1259 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1260 	return (temp & bitToCheck) != 0;
1261 }
1262 
1263 /*
1264  * Caller holds hw_lock.
1265  */
1266 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1267 {
1268 	struct ql3xxx_port_registers __iomem *port_regs =
1269 			qdev->mem_map_registers;
1270 
1271 	switch (qdev->mac_index) {
1272 	case 0:
1273 		ql_write_common_reg(qdev,
1274 				    &port_regs->CommonRegs.ispControlStatus,
1275 				    (ISP_CONTROL_LINK_DN_0) |
1276 				    (ISP_CONTROL_LINK_DN_0 << 16));
1277 		break;
1278 
1279 	case 1:
1280 		ql_write_common_reg(qdev,
1281 				    &port_regs->CommonRegs.ispControlStatus,
1282 				    (ISP_CONTROL_LINK_DN_1) |
1283 				    (ISP_CONTROL_LINK_DN_1 << 16));
1284 		break;
1285 
1286 	default:
1287 		return 1;
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 /*
1294  * Caller holds hw_lock.
1295  */
1296 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1297 {
1298 	struct ql3xxx_port_registers __iomem *port_regs =
1299 			qdev->mem_map_registers;
1300 	u32 bitToCheck = 0;
1301 	u32 temp;
1302 
1303 	switch (qdev->mac_index) {
1304 	case 0:
1305 		bitToCheck = PORT_STATUS_F1_ENABLED;
1306 		break;
1307 	case 1:
1308 		bitToCheck = PORT_STATUS_F3_ENABLED;
1309 		break;
1310 	default:
1311 		break;
1312 	}
1313 
1314 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1315 	if (temp & bitToCheck) {
1316 		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1317 			     "not link master\n");
1318 		return 0;
1319 	}
1320 
1321 	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1322 	return 1;
1323 }
1324 
1325 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1326 {
1327 	ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1328 			    PHYAddr[qdev->mac_index]);
1329 }
1330 
1331 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1332 {
1333 	u16 reg;
1334 	u16 portConfiguration;
1335 
1336 	if (qdev->phyType == PHY_AGERE_ET1011C)
1337 		ql_mii_write_reg(qdev, 0x13, 0x0000);
1338 					/* turn off external loopback */
1339 
1340 	if (qdev->mac_index == 0)
1341 		portConfiguration =
1342 			qdev->nvram_data.macCfg_port0.portConfiguration;
1343 	else
1344 		portConfiguration =
1345 			qdev->nvram_data.macCfg_port1.portConfiguration;
1346 
1347 	/*  Some HBA's in the field are set to 0 and they need to
1348 	    be reinterpreted with a default value */
1349 	if (portConfiguration == 0)
1350 		portConfiguration = PORT_CONFIG_DEFAULT;
1351 
1352 	/* Set the 1000 advertisements */
1353 	ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1354 			   PHYAddr[qdev->mac_index]);
1355 	reg &= ~PHY_GIG_ALL_PARAMS;
1356 
1357 	if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1358 		if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1359 			reg |= PHY_GIG_ADV_1000F;
1360 		else
1361 			reg |= PHY_GIG_ADV_1000H;
1362 	}
1363 
1364 	ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1365 			    PHYAddr[qdev->mac_index]);
1366 
1367 	/* Set the 10/100 & pause negotiation advertisements */
1368 	ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1369 			   PHYAddr[qdev->mac_index]);
1370 	reg &= ~PHY_NEG_ALL_PARAMS;
1371 
1372 	if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1373 		reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1374 
1375 	if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1376 		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1377 			reg |= PHY_NEG_ADV_100F;
1378 
1379 		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1380 			reg |= PHY_NEG_ADV_10F;
1381 	}
1382 
1383 	if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1384 		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1385 			reg |= PHY_NEG_ADV_100H;
1386 
1387 		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1388 			reg |= PHY_NEG_ADV_10H;
1389 	}
1390 
1391 	if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1392 		reg |= 1;
1393 
1394 	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1395 			    PHYAddr[qdev->mac_index]);
1396 
1397 	ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1398 
1399 	ql_mii_write_reg_ex(qdev, CONTROL_REG,
1400 			    reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1401 			    PHYAddr[qdev->mac_index]);
1402 }
1403 
1404 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1405 {
1406 	ql_phy_reset_ex(qdev);
1407 	PHY_Setup(qdev);
1408 	ql_phy_start_neg_ex(qdev);
1409 }
1410 
1411 /*
1412  * Caller holds hw_lock.
1413  */
1414 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1415 {
1416 	struct ql3xxx_port_registers __iomem *port_regs =
1417 			qdev->mem_map_registers;
1418 	u32 bitToCheck = 0;
1419 	u32 temp, linkState;
1420 
1421 	switch (qdev->mac_index) {
1422 	case 0:
1423 		bitToCheck = PORT_STATUS_UP0;
1424 		break;
1425 	case 1:
1426 		bitToCheck = PORT_STATUS_UP1;
1427 		break;
1428 	}
1429 
1430 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1431 	if (temp & bitToCheck)
1432 		linkState = LS_UP;
1433 	else
1434 		linkState = LS_DOWN;
1435 
1436 	return linkState;
1437 }
1438 
1439 static int ql_port_start(struct ql3_adapter *qdev)
1440 {
1441 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1442 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1443 			 2) << 7)) {
1444 		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1445 		return -1;
1446 	}
1447 
1448 	if (ql_is_fiber(qdev)) {
1449 		ql_petbi_init(qdev);
1450 	} else {
1451 		/* Copper port */
1452 		ql_phy_init_ex(qdev);
1453 	}
1454 
1455 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1456 	return 0;
1457 }
1458 
1459 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1460 {
1461 
1462 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1463 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1464 			 2) << 7))
1465 		return -1;
1466 
1467 	if (!ql_auto_neg_error(qdev)) {
1468 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1469 			/* configure the MAC */
1470 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1471 				     "Configuring link\n");
1472 			ql_mac_cfg_soft_reset(qdev, 1);
1473 			ql_mac_cfg_gig(qdev,
1474 				       (ql_get_link_speed
1475 					(qdev) ==
1476 					SPEED_1000));
1477 			ql_mac_cfg_full_dup(qdev,
1478 					    ql_is_link_full_dup
1479 					    (qdev));
1480 			ql_mac_cfg_pause(qdev,
1481 					 ql_is_neg_pause
1482 					 (qdev));
1483 			ql_mac_cfg_soft_reset(qdev, 0);
1484 
1485 			/* enable the MAC */
1486 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1487 				     "Enabling mac\n");
1488 			ql_mac_enable(qdev, 1);
1489 		}
1490 
1491 		qdev->port_link_state = LS_UP;
1492 		netif_start_queue(qdev->ndev);
1493 		netif_carrier_on(qdev->ndev);
1494 		netif_info(qdev, link, qdev->ndev,
1495 			   "Link is up at %d Mbps, %s duplex\n",
1496 			   ql_get_link_speed(qdev),
1497 			   ql_is_link_full_dup(qdev) ? "full" : "half");
1498 
1499 	} else {	/* Remote error detected */
1500 
1501 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1502 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1503 				     "Remote error detected. Calling ql_port_start()\n");
1504 			/*
1505 			 * ql_port_start() is shared code and needs
1506 			 * to lock the PHY on it's own.
1507 			 */
1508 			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1509 			if (ql_port_start(qdev))	/* Restart port */
1510 				return -1;
1511 			return 0;
1512 		}
1513 	}
1514 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1515 	return 0;
1516 }
1517 
1518 static void ql_link_state_machine_work(struct work_struct *work)
1519 {
1520 	struct ql3_adapter *qdev =
1521 		container_of(work, struct ql3_adapter, link_state_work.work);
1522 
1523 	u32 curr_link_state;
1524 	unsigned long hw_flags;
1525 
1526 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1527 
1528 	curr_link_state = ql_get_link_state(qdev);
1529 
1530 	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1531 		netif_info(qdev, link, qdev->ndev,
1532 			   "Reset in progress, skip processing link state\n");
1533 
1534 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1535 
1536 		/* Restart timer on 2 second interval. */
1537 		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1538 
1539 		return;
1540 	}
1541 
1542 	switch (qdev->port_link_state) {
1543 	default:
1544 		if (test_bit(QL_LINK_MASTER, &qdev->flags))
1545 			ql_port_start(qdev);
1546 		qdev->port_link_state = LS_DOWN;
1547 		/* Fall Through */
1548 
1549 	case LS_DOWN:
1550 		if (curr_link_state == LS_UP) {
1551 			netif_info(qdev, link, qdev->ndev, "Link is up\n");
1552 			if (ql_is_auto_neg_complete(qdev))
1553 				ql_finish_auto_neg(qdev);
1554 
1555 			if (qdev->port_link_state == LS_UP)
1556 				ql_link_down_detect_clear(qdev);
1557 
1558 			qdev->port_link_state = LS_UP;
1559 		}
1560 		break;
1561 
1562 	case LS_UP:
1563 		/*
1564 		 * See if the link is currently down or went down and came
1565 		 * back up
1566 		 */
1567 		if (curr_link_state == LS_DOWN) {
1568 			netif_info(qdev, link, qdev->ndev, "Link is down\n");
1569 			qdev->port_link_state = LS_DOWN;
1570 		}
1571 		if (ql_link_down_detect(qdev))
1572 			qdev->port_link_state = LS_DOWN;
1573 		break;
1574 	}
1575 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1576 
1577 	/* Restart timer on 2 second interval. */
1578 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1579 }
1580 
1581 /*
1582  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1583  */
1584 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1585 {
1586 	if (ql_this_adapter_controls_port(qdev))
1587 		set_bit(QL_LINK_MASTER, &qdev->flags);
1588 	else
1589 		clear_bit(QL_LINK_MASTER, &qdev->flags);
1590 }
1591 
1592 /*
1593  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1594  */
1595 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1596 {
1597 	ql_mii_enable_scan_mode(qdev);
1598 
1599 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1600 		if (ql_this_adapter_controls_port(qdev))
1601 			ql_petbi_init_ex(qdev);
1602 	} else {
1603 		if (ql_this_adapter_controls_port(qdev))
1604 			ql_phy_init_ex(qdev);
1605 	}
1606 }
1607 
1608 /*
1609  * MII_Setup needs to be called before taking the PHY out of reset
1610  * so that the management interface clock speed can be set properly.
1611  * It would be better if we had a way to disable MDC until after the
1612  * PHY is out of reset, but we don't have that capability.
1613  */
1614 static int ql_mii_setup(struct ql3_adapter *qdev)
1615 {
1616 	u32 reg;
1617 	struct ql3xxx_port_registers __iomem *port_regs =
1618 			qdev->mem_map_registers;
1619 
1620 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1621 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1622 			 2) << 7))
1623 		return -1;
1624 
1625 	if (qdev->device_id == QL3032_DEVICE_ID)
1626 		ql_write_page0_reg(qdev,
1627 			&port_regs->macMIIMgmtControlReg, 0x0f00000);
1628 
1629 	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
1630 	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1631 
1632 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1633 			   reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1634 
1635 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1636 	return 0;
1637 }
1638 
1639 #define SUPPORTED_OPTICAL_MODES	(SUPPORTED_1000baseT_Full |	\
1640 				 SUPPORTED_FIBRE |		\
1641 				 SUPPORTED_Autoneg)
1642 #define SUPPORTED_TP_MODES	(SUPPORTED_10baseT_Half |	\
1643 				 SUPPORTED_10baseT_Full |	\
1644 				 SUPPORTED_100baseT_Half |	\
1645 				 SUPPORTED_100baseT_Full |	\
1646 				 SUPPORTED_1000baseT_Half |	\
1647 				 SUPPORTED_1000baseT_Full |	\
1648 				 SUPPORTED_Autoneg |		\
1649 				 SUPPORTED_TP)			\
1650 
1651 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1652 {
1653 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1654 		return SUPPORTED_OPTICAL_MODES;
1655 
1656 	return SUPPORTED_TP_MODES;
1657 }
1658 
1659 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1660 {
1661 	int status;
1662 	unsigned long hw_flags;
1663 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1664 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1665 			    (QL_RESOURCE_BITS_BASE_CODE |
1666 			     (qdev->mac_index) * 2) << 7)) {
1667 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1668 		return 0;
1669 	}
1670 	status = ql_is_auto_cfg(qdev);
1671 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1672 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1673 	return status;
1674 }
1675 
1676 static u32 ql_get_speed(struct ql3_adapter *qdev)
1677 {
1678 	u32 status;
1679 	unsigned long hw_flags;
1680 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1681 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1682 			    (QL_RESOURCE_BITS_BASE_CODE |
1683 			     (qdev->mac_index) * 2) << 7)) {
1684 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1685 		return 0;
1686 	}
1687 	status = ql_get_link_speed(qdev);
1688 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1689 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1690 	return status;
1691 }
1692 
1693 static int ql_get_full_dup(struct ql3_adapter *qdev)
1694 {
1695 	int status;
1696 	unsigned long hw_flags;
1697 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1698 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1699 			    (QL_RESOURCE_BITS_BASE_CODE |
1700 			     (qdev->mac_index) * 2) << 7)) {
1701 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1702 		return 0;
1703 	}
1704 	status = ql_is_link_full_dup(qdev);
1705 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1706 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1707 	return status;
1708 }
1709 
1710 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1711 {
1712 	struct ql3_adapter *qdev = netdev_priv(ndev);
1713 
1714 	ecmd->transceiver = XCVR_INTERNAL;
1715 	ecmd->supported = ql_supported_modes(qdev);
1716 
1717 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1718 		ecmd->port = PORT_FIBRE;
1719 	} else {
1720 		ecmd->port = PORT_TP;
1721 		ecmd->phy_address = qdev->PHYAddr;
1722 	}
1723 	ecmd->advertising = ql_supported_modes(qdev);
1724 	ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1725 	ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1726 	ecmd->duplex = ql_get_full_dup(qdev);
1727 	return 0;
1728 }
1729 
1730 static void ql_get_drvinfo(struct net_device *ndev,
1731 			   struct ethtool_drvinfo *drvinfo)
1732 {
1733 	struct ql3_adapter *qdev = netdev_priv(ndev);
1734 	strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1735 	strlcpy(drvinfo->version, ql3xxx_driver_version,
1736 		sizeof(drvinfo->version));
1737 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1738 		sizeof(drvinfo->bus_info));
1739 	drvinfo->regdump_len = 0;
1740 	drvinfo->eedump_len = 0;
1741 }
1742 
1743 static u32 ql_get_msglevel(struct net_device *ndev)
1744 {
1745 	struct ql3_adapter *qdev = netdev_priv(ndev);
1746 	return qdev->msg_enable;
1747 }
1748 
1749 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1750 {
1751 	struct ql3_adapter *qdev = netdev_priv(ndev);
1752 	qdev->msg_enable = value;
1753 }
1754 
1755 static void ql_get_pauseparam(struct net_device *ndev,
1756 			      struct ethtool_pauseparam *pause)
1757 {
1758 	struct ql3_adapter *qdev = netdev_priv(ndev);
1759 	struct ql3xxx_port_registers __iomem *port_regs =
1760 		qdev->mem_map_registers;
1761 
1762 	u32 reg;
1763 	if (qdev->mac_index == 0)
1764 		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1765 	else
1766 		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1767 
1768 	pause->autoneg  = ql_get_auto_cfg_status(qdev);
1769 	pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1770 	pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1771 }
1772 
1773 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1774 	.get_settings = ql_get_settings,
1775 	.get_drvinfo = ql_get_drvinfo,
1776 	.get_link = ethtool_op_get_link,
1777 	.get_msglevel = ql_get_msglevel,
1778 	.set_msglevel = ql_set_msglevel,
1779 	.get_pauseparam = ql_get_pauseparam,
1780 };
1781 
1782 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1783 {
1784 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1785 	dma_addr_t map;
1786 	int err;
1787 
1788 	while (lrg_buf_cb) {
1789 		if (!lrg_buf_cb->skb) {
1790 			lrg_buf_cb->skb =
1791 				netdev_alloc_skb(qdev->ndev,
1792 						 qdev->lrg_buffer_len);
1793 			if (unlikely(!lrg_buf_cb->skb)) {
1794 				netdev_printk(KERN_DEBUG, qdev->ndev,
1795 					      "Failed netdev_alloc_skb()\n");
1796 				break;
1797 			} else {
1798 				/*
1799 				 * We save some space to copy the ethhdr from
1800 				 * first buffer
1801 				 */
1802 				skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1803 				map = pci_map_single(qdev->pdev,
1804 						     lrg_buf_cb->skb->data,
1805 						     qdev->lrg_buffer_len -
1806 						     QL_HEADER_SPACE,
1807 						     PCI_DMA_FROMDEVICE);
1808 
1809 				err = pci_dma_mapping_error(qdev->pdev, map);
1810 				if (err) {
1811 					netdev_err(qdev->ndev,
1812 						   "PCI mapping failed with error: %d\n",
1813 						   err);
1814 					dev_kfree_skb(lrg_buf_cb->skb);
1815 					lrg_buf_cb->skb = NULL;
1816 					break;
1817 				}
1818 
1819 
1820 				lrg_buf_cb->buf_phy_addr_low =
1821 					cpu_to_le32(LS_64BITS(map));
1822 				lrg_buf_cb->buf_phy_addr_high =
1823 					cpu_to_le32(MS_64BITS(map));
1824 				dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1825 				dma_unmap_len_set(lrg_buf_cb, maplen,
1826 						  qdev->lrg_buffer_len -
1827 						  QL_HEADER_SPACE);
1828 				--qdev->lrg_buf_skb_check;
1829 				if (!qdev->lrg_buf_skb_check)
1830 					return 1;
1831 			}
1832 		}
1833 		lrg_buf_cb = lrg_buf_cb->next;
1834 	}
1835 	return 0;
1836 }
1837 
1838 /*
1839  * Caller holds hw_lock.
1840  */
1841 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1842 {
1843 	struct ql3xxx_port_registers __iomem *port_regs =
1844 		qdev->mem_map_registers;
1845 
1846 	if (qdev->small_buf_release_cnt >= 16) {
1847 		while (qdev->small_buf_release_cnt >= 16) {
1848 			qdev->small_buf_q_producer_index++;
1849 
1850 			if (qdev->small_buf_q_producer_index ==
1851 			    NUM_SBUFQ_ENTRIES)
1852 				qdev->small_buf_q_producer_index = 0;
1853 			qdev->small_buf_release_cnt -= 8;
1854 		}
1855 		wmb();
1856 		writel(qdev->small_buf_q_producer_index,
1857 			&port_regs->CommonRegs.rxSmallQProducerIndex);
1858 	}
1859 }
1860 
1861 /*
1862  * Caller holds hw_lock.
1863  */
1864 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1865 {
1866 	struct bufq_addr_element *lrg_buf_q_ele;
1867 	int i;
1868 	struct ql_rcv_buf_cb *lrg_buf_cb;
1869 	struct ql3xxx_port_registers __iomem *port_regs =
1870 		qdev->mem_map_registers;
1871 
1872 	if ((qdev->lrg_buf_free_count >= 8) &&
1873 	    (qdev->lrg_buf_release_cnt >= 16)) {
1874 
1875 		if (qdev->lrg_buf_skb_check)
1876 			if (!ql_populate_free_queue(qdev))
1877 				return;
1878 
1879 		lrg_buf_q_ele = qdev->lrg_buf_next_free;
1880 
1881 		while ((qdev->lrg_buf_release_cnt >= 16) &&
1882 		       (qdev->lrg_buf_free_count >= 8)) {
1883 
1884 			for (i = 0; i < 8; i++) {
1885 				lrg_buf_cb =
1886 				    ql_get_from_lrg_buf_free_list(qdev);
1887 				lrg_buf_q_ele->addr_high =
1888 				    lrg_buf_cb->buf_phy_addr_high;
1889 				lrg_buf_q_ele->addr_low =
1890 				    lrg_buf_cb->buf_phy_addr_low;
1891 				lrg_buf_q_ele++;
1892 
1893 				qdev->lrg_buf_release_cnt--;
1894 			}
1895 
1896 			qdev->lrg_buf_q_producer_index++;
1897 
1898 			if (qdev->lrg_buf_q_producer_index ==
1899 			    qdev->num_lbufq_entries)
1900 				qdev->lrg_buf_q_producer_index = 0;
1901 
1902 			if (qdev->lrg_buf_q_producer_index ==
1903 			    (qdev->num_lbufq_entries - 1)) {
1904 				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1905 			}
1906 		}
1907 		wmb();
1908 		qdev->lrg_buf_next_free = lrg_buf_q_ele;
1909 		writel(qdev->lrg_buf_q_producer_index,
1910 			&port_regs->CommonRegs.rxLargeQProducerIndex);
1911 	}
1912 }
1913 
1914 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1915 				   struct ob_mac_iocb_rsp *mac_rsp)
1916 {
1917 	struct ql_tx_buf_cb *tx_cb;
1918 	int i;
1919 
1920 	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1921 		netdev_warn(qdev->ndev,
1922 			    "Frame too short but it was padded and sent\n");
1923 	}
1924 
1925 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1926 
1927 	/*  Check the transmit response flags for any errors */
1928 	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1929 		netdev_err(qdev->ndev,
1930 			   "Frame too short to be legal, frame not sent\n");
1931 
1932 		qdev->ndev->stats.tx_errors++;
1933 		goto frame_not_sent;
1934 	}
1935 
1936 	if (tx_cb->seg_count == 0) {
1937 		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1938 			   mac_rsp->transaction_id);
1939 
1940 		qdev->ndev->stats.tx_errors++;
1941 		goto invalid_seg_count;
1942 	}
1943 
1944 	pci_unmap_single(qdev->pdev,
1945 			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
1946 			 dma_unmap_len(&tx_cb->map[0], maplen),
1947 			 PCI_DMA_TODEVICE);
1948 	tx_cb->seg_count--;
1949 	if (tx_cb->seg_count) {
1950 		for (i = 1; i < tx_cb->seg_count; i++) {
1951 			pci_unmap_page(qdev->pdev,
1952 				       dma_unmap_addr(&tx_cb->map[i],
1953 						      mapaddr),
1954 				       dma_unmap_len(&tx_cb->map[i], maplen),
1955 				       PCI_DMA_TODEVICE);
1956 		}
1957 	}
1958 	qdev->ndev->stats.tx_packets++;
1959 	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1960 
1961 frame_not_sent:
1962 	dev_kfree_skb_irq(tx_cb->skb);
1963 	tx_cb->skb = NULL;
1964 
1965 invalid_seg_count:
1966 	atomic_inc(&qdev->tx_count);
1967 }
1968 
1969 static void ql_get_sbuf(struct ql3_adapter *qdev)
1970 {
1971 	if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1972 		qdev->small_buf_index = 0;
1973 	qdev->small_buf_release_cnt++;
1974 }
1975 
1976 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1977 {
1978 	struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1979 	lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1980 	qdev->lrg_buf_release_cnt++;
1981 	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1982 		qdev->lrg_buf_index = 0;
1983 	return lrg_buf_cb;
1984 }
1985 
1986 /*
1987  * The difference between 3022 and 3032 for inbound completions:
1988  * 3022 uses two buffers per completion.  The first buffer contains
1989  * (some) header info, the second the remainder of the headers plus
1990  * the data.  For this chip we reserve some space at the top of the
1991  * receive buffer so that the header info in buffer one can be
1992  * prepended to the buffer two.  Buffer two is the sent up while
1993  * buffer one is returned to the hardware to be reused.
1994  * 3032 receives all of it's data and headers in one buffer for a
1995  * simpler process.  3032 also supports checksum verification as
1996  * can be seen in ql_process_macip_rx_intr().
1997  */
1998 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1999 				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2000 {
2001 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2002 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2003 	struct sk_buff *skb;
2004 	u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2005 
2006 	/*
2007 	 * Get the inbound address list (small buffer).
2008 	 */
2009 	ql_get_sbuf(qdev);
2010 
2011 	if (qdev->device_id == QL3022_DEVICE_ID)
2012 		lrg_buf_cb1 = ql_get_lbuf(qdev);
2013 
2014 	/* start of second buffer */
2015 	lrg_buf_cb2 = ql_get_lbuf(qdev);
2016 	skb = lrg_buf_cb2->skb;
2017 
2018 	qdev->ndev->stats.rx_packets++;
2019 	qdev->ndev->stats.rx_bytes += length;
2020 
2021 	skb_put(skb, length);
2022 	pci_unmap_single(qdev->pdev,
2023 			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2024 			 dma_unmap_len(lrg_buf_cb2, maplen),
2025 			 PCI_DMA_FROMDEVICE);
2026 	prefetch(skb->data);
2027 	skb_checksum_none_assert(skb);
2028 	skb->protocol = eth_type_trans(skb, qdev->ndev);
2029 
2030 	netif_receive_skb(skb);
2031 	lrg_buf_cb2->skb = NULL;
2032 
2033 	if (qdev->device_id == QL3022_DEVICE_ID)
2034 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2035 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2036 }
2037 
2038 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2039 				     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2040 {
2041 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2042 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2043 	struct sk_buff *skb1 = NULL, *skb2;
2044 	struct net_device *ndev = qdev->ndev;
2045 	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2046 	u16 size = 0;
2047 
2048 	/*
2049 	 * Get the inbound address list (small buffer).
2050 	 */
2051 
2052 	ql_get_sbuf(qdev);
2053 
2054 	if (qdev->device_id == QL3022_DEVICE_ID) {
2055 		/* start of first buffer on 3022 */
2056 		lrg_buf_cb1 = ql_get_lbuf(qdev);
2057 		skb1 = lrg_buf_cb1->skb;
2058 		size = ETH_HLEN;
2059 		if (*((u16 *) skb1->data) != 0xFFFF)
2060 			size += VLAN_ETH_HLEN - ETH_HLEN;
2061 	}
2062 
2063 	/* start of second buffer */
2064 	lrg_buf_cb2 = ql_get_lbuf(qdev);
2065 	skb2 = lrg_buf_cb2->skb;
2066 
2067 	skb_put(skb2, length);	/* Just the second buffer length here. */
2068 	pci_unmap_single(qdev->pdev,
2069 			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2070 			 dma_unmap_len(lrg_buf_cb2, maplen),
2071 			 PCI_DMA_FROMDEVICE);
2072 	prefetch(skb2->data);
2073 
2074 	skb_checksum_none_assert(skb2);
2075 	if (qdev->device_id == QL3022_DEVICE_ID) {
2076 		/*
2077 		 * Copy the ethhdr from first buffer to second. This
2078 		 * is necessary for 3022 IP completions.
2079 		 */
2080 		skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2081 						 skb_push(skb2, size), size);
2082 	} else {
2083 		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2084 		if (checksum &
2085 			(IB_IP_IOCB_RSP_3032_ICE |
2086 			 IB_IP_IOCB_RSP_3032_CE)) {
2087 			netdev_err(ndev,
2088 				   "%s: Bad checksum for this %s packet, checksum = %x\n",
2089 				   __func__,
2090 				   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2091 				    "TCP" : "UDP"), checksum);
2092 		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2093 				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
2094 				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2095 			skb2->ip_summed = CHECKSUM_UNNECESSARY;
2096 		}
2097 	}
2098 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2099 
2100 	netif_receive_skb(skb2);
2101 	ndev->stats.rx_packets++;
2102 	ndev->stats.rx_bytes += length;
2103 	lrg_buf_cb2->skb = NULL;
2104 
2105 	if (qdev->device_id == QL3022_DEVICE_ID)
2106 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2107 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2108 }
2109 
2110 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2111 			  int *tx_cleaned, int *rx_cleaned, int work_to_do)
2112 {
2113 	struct net_rsp_iocb *net_rsp;
2114 	struct net_device *ndev = qdev->ndev;
2115 	int work_done = 0;
2116 
2117 	/* While there are entries in the completion queue. */
2118 	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2119 		qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2120 
2121 		net_rsp = qdev->rsp_current;
2122 		rmb();
2123 		/*
2124 		 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2125 		 * if the inbound completion is for a VLAN.
2126 		 */
2127 		if (qdev->device_id == QL3032_DEVICE_ID)
2128 			net_rsp->opcode &= 0x7f;
2129 		switch (net_rsp->opcode) {
2130 
2131 		case OPCODE_OB_MAC_IOCB_FN0:
2132 		case OPCODE_OB_MAC_IOCB_FN2:
2133 			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2134 					       net_rsp);
2135 			(*tx_cleaned)++;
2136 			break;
2137 
2138 		case OPCODE_IB_MAC_IOCB:
2139 		case OPCODE_IB_3032_MAC_IOCB:
2140 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2141 					       net_rsp);
2142 			(*rx_cleaned)++;
2143 			break;
2144 
2145 		case OPCODE_IB_IP_IOCB:
2146 		case OPCODE_IB_3032_IP_IOCB:
2147 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2148 						 net_rsp);
2149 			(*rx_cleaned)++;
2150 			break;
2151 		default: {
2152 			u32 *tmp = (u32 *)net_rsp;
2153 			netdev_err(ndev,
2154 				   "Hit default case, not handled!\n"
2155 				   "	dropping the packet, opcode = %x\n"
2156 				   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2157 				   net_rsp->opcode,
2158 				   (unsigned long int)tmp[0],
2159 				   (unsigned long int)tmp[1],
2160 				   (unsigned long int)tmp[2],
2161 				   (unsigned long int)tmp[3]);
2162 		}
2163 		}
2164 
2165 		qdev->rsp_consumer_index++;
2166 
2167 		if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2168 			qdev->rsp_consumer_index = 0;
2169 			qdev->rsp_current = qdev->rsp_q_virt_addr;
2170 		} else {
2171 			qdev->rsp_current++;
2172 		}
2173 
2174 		work_done = *tx_cleaned + *rx_cleaned;
2175 	}
2176 
2177 	return work_done;
2178 }
2179 
2180 static int ql_poll(struct napi_struct *napi, int budget)
2181 {
2182 	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2183 	int rx_cleaned = 0, tx_cleaned = 0;
2184 	unsigned long hw_flags;
2185 	struct ql3xxx_port_registers __iomem *port_regs =
2186 		qdev->mem_map_registers;
2187 
2188 	ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2189 
2190 	if (tx_cleaned + rx_cleaned != budget) {
2191 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2192 		__napi_complete(napi);
2193 		ql_update_small_bufq_prod_index(qdev);
2194 		ql_update_lrg_bufq_prod_index(qdev);
2195 		writel(qdev->rsp_consumer_index,
2196 			    &port_regs->CommonRegs.rspQConsumerIndex);
2197 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2198 
2199 		ql_enable_interrupts(qdev);
2200 	}
2201 	return tx_cleaned + rx_cleaned;
2202 }
2203 
2204 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2205 {
2206 
2207 	struct net_device *ndev = dev_id;
2208 	struct ql3_adapter *qdev = netdev_priv(ndev);
2209 	struct ql3xxx_port_registers __iomem *port_regs =
2210 		qdev->mem_map_registers;
2211 	u32 value;
2212 	int handled = 1;
2213 	u32 var;
2214 
2215 	value = ql_read_common_reg_l(qdev,
2216 				     &port_regs->CommonRegs.ispControlStatus);
2217 
2218 	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2219 		spin_lock(&qdev->adapter_lock);
2220 		netif_stop_queue(qdev->ndev);
2221 		netif_carrier_off(qdev->ndev);
2222 		ql_disable_interrupts(qdev);
2223 		qdev->port_link_state = LS_DOWN;
2224 		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2225 
2226 		if (value & ISP_CONTROL_FE) {
2227 			/*
2228 			 * Chip Fatal Error.
2229 			 */
2230 			var =
2231 			    ql_read_page0_reg_l(qdev,
2232 					      &port_regs->PortFatalErrStatus);
2233 			netdev_warn(ndev,
2234 				    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2235 				    var);
2236 			set_bit(QL_RESET_START, &qdev->flags) ;
2237 		} else {
2238 			/*
2239 			 * Soft Reset Requested.
2240 			 */
2241 			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2242 			netdev_err(ndev,
2243 				   "Another function issued a reset to the chip. ISR value = %x\n",
2244 				   value);
2245 		}
2246 		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2247 		spin_unlock(&qdev->adapter_lock);
2248 	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2249 		ql_disable_interrupts(qdev);
2250 		if (likely(napi_schedule_prep(&qdev->napi)))
2251 			__napi_schedule(&qdev->napi);
2252 	} else
2253 		return IRQ_NONE;
2254 
2255 	return IRQ_RETVAL(handled);
2256 }
2257 
2258 /*
2259  * Get the total number of segments needed for the given number of fragments.
2260  * This is necessary because outbound address lists (OAL) will be used when
2261  * more than two frags are given.  Each address list has 5 addr/len pairs.
2262  * The 5th pair in each OAL is used to  point to the next OAL if more frags
2263  * are coming.  That is why the frags:segment count ratio is not linear.
2264  */
2265 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2266 {
2267 	if (qdev->device_id == QL3022_DEVICE_ID)
2268 		return 1;
2269 
2270 	if (frags <= 2)
2271 		return frags + 1;
2272 	else if (frags <= 6)
2273 		return frags + 2;
2274 	else if (frags <= 10)
2275 		return frags + 3;
2276 	else if (frags <= 14)
2277 		return frags + 4;
2278 	else if (frags <= 18)
2279 		return frags + 5;
2280 	return -1;
2281 }
2282 
2283 static void ql_hw_csum_setup(const struct sk_buff *skb,
2284 			     struct ob_mac_iocb_req *mac_iocb_ptr)
2285 {
2286 	const struct iphdr *ip = ip_hdr(skb);
2287 
2288 	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2289 	mac_iocb_ptr->ip_hdr_len = ip->ihl;
2290 
2291 	if (ip->protocol == IPPROTO_TCP) {
2292 		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2293 			OB_3032MAC_IOCB_REQ_IC;
2294 	} else {
2295 		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2296 			OB_3032MAC_IOCB_REQ_IC;
2297 	}
2298 
2299 }
2300 
2301 /*
2302  * Map the buffers for this transmit.
2303  * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2304  */
2305 static int ql_send_map(struct ql3_adapter *qdev,
2306 				struct ob_mac_iocb_req *mac_iocb_ptr,
2307 				struct ql_tx_buf_cb *tx_cb,
2308 				struct sk_buff *skb)
2309 {
2310 	struct oal *oal;
2311 	struct oal_entry *oal_entry;
2312 	int len = skb_headlen(skb);
2313 	dma_addr_t map;
2314 	int err;
2315 	int completed_segs, i;
2316 	int seg_cnt, seg = 0;
2317 	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2318 
2319 	seg_cnt = tx_cb->seg_count;
2320 	/*
2321 	 * Map the skb buffer first.
2322 	 */
2323 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2324 
2325 	err = pci_dma_mapping_error(qdev->pdev, map);
2326 	if (err) {
2327 		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2328 			   err);
2329 
2330 		return NETDEV_TX_BUSY;
2331 	}
2332 
2333 	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2334 	oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2335 	oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2336 	oal_entry->len = cpu_to_le32(len);
2337 	dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2338 	dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2339 	seg++;
2340 
2341 	if (seg_cnt == 1) {
2342 		/* Terminate the last segment. */
2343 		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2344 		return NETDEV_TX_OK;
2345 	}
2346 	oal = tx_cb->oal;
2347 	for (completed_segs = 0;
2348 	     completed_segs < frag_cnt;
2349 	     completed_segs++, seg++) {
2350 		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2351 		oal_entry++;
2352 		/*
2353 		 * Check for continuation requirements.
2354 		 * It's strange but necessary.
2355 		 * Continuation entry points to outbound address list.
2356 		 */
2357 		if ((seg == 2 && seg_cnt > 3) ||
2358 		    (seg == 7 && seg_cnt > 8) ||
2359 		    (seg == 12 && seg_cnt > 13) ||
2360 		    (seg == 17 && seg_cnt > 18)) {
2361 			map = pci_map_single(qdev->pdev, oal,
2362 					     sizeof(struct oal),
2363 					     PCI_DMA_TODEVICE);
2364 
2365 			err = pci_dma_mapping_error(qdev->pdev, map);
2366 			if (err) {
2367 				netdev_err(qdev->ndev,
2368 					   "PCI mapping outbound address list with error: %d\n",
2369 					   err);
2370 				goto map_error;
2371 			}
2372 
2373 			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2374 			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2375 			oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2376 						     OAL_CONT_ENTRY);
2377 			dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2378 			dma_unmap_len_set(&tx_cb->map[seg], maplen,
2379 					  sizeof(struct oal));
2380 			oal_entry = (struct oal_entry *)oal;
2381 			oal++;
2382 			seg++;
2383 		}
2384 
2385 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2386 				       DMA_TO_DEVICE);
2387 
2388 		err = dma_mapping_error(&qdev->pdev->dev, map);
2389 		if (err) {
2390 			netdev_err(qdev->ndev,
2391 				   "PCI mapping frags failed with error: %d\n",
2392 				   err);
2393 			goto map_error;
2394 		}
2395 
2396 		oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2397 		oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2398 		oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2399 		dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2400 		dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2401 		}
2402 	/* Terminate the last segment. */
2403 	oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2404 	return NETDEV_TX_OK;
2405 
2406 map_error:
2407 	/* A PCI mapping failed and now we will need to back out
2408 	 * We need to traverse through the oal's and associated pages which
2409 	 * have been mapped and now we must unmap them to clean up properly
2410 	 */
2411 
2412 	seg = 1;
2413 	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2414 	oal = tx_cb->oal;
2415 	for (i = 0; i < completed_segs; i++, seg++) {
2416 		oal_entry++;
2417 
2418 		/*
2419 		 * Check for continuation requirements.
2420 		 * It's strange but necessary.
2421 		 */
2422 
2423 		if ((seg == 2 && seg_cnt > 3) ||
2424 		    (seg == 7 && seg_cnt > 8) ||
2425 		    (seg == 12 && seg_cnt > 13) ||
2426 		    (seg == 17 && seg_cnt > 18)) {
2427 			pci_unmap_single(qdev->pdev,
2428 				dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2429 				dma_unmap_len(&tx_cb->map[seg], maplen),
2430 				 PCI_DMA_TODEVICE);
2431 			oal++;
2432 			seg++;
2433 		}
2434 
2435 		pci_unmap_page(qdev->pdev,
2436 			       dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2437 			       dma_unmap_len(&tx_cb->map[seg], maplen),
2438 			       PCI_DMA_TODEVICE);
2439 	}
2440 
2441 	pci_unmap_single(qdev->pdev,
2442 			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2443 			 dma_unmap_addr(&tx_cb->map[0], maplen),
2444 			 PCI_DMA_TODEVICE);
2445 
2446 	return NETDEV_TX_BUSY;
2447 
2448 }
2449 
2450 /*
2451  * The difference between 3022 and 3032 sends:
2452  * 3022 only supports a simple single segment transmission.
2453  * 3032 supports checksumming and scatter/gather lists (fragments).
2454  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2455  * in the IOCB plus a chain of outbound address lists (OAL) that
2456  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2457  * will be used to point to an OAL when more ALP entries are required.
2458  * The IOCB is always the top of the chain followed by one or more
2459  * OALs (when necessary).
2460  */
2461 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2462 			       struct net_device *ndev)
2463 {
2464 	struct ql3_adapter *qdev = netdev_priv(ndev);
2465 	struct ql3xxx_port_registers __iomem *port_regs =
2466 			qdev->mem_map_registers;
2467 	struct ql_tx_buf_cb *tx_cb;
2468 	u32 tot_len = skb->len;
2469 	struct ob_mac_iocb_req *mac_iocb_ptr;
2470 
2471 	if (unlikely(atomic_read(&qdev->tx_count) < 2))
2472 		return NETDEV_TX_BUSY;
2473 
2474 	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2475 	tx_cb->seg_count = ql_get_seg_count(qdev,
2476 					     skb_shinfo(skb)->nr_frags);
2477 	if (tx_cb->seg_count == -1) {
2478 		netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2479 		return NETDEV_TX_OK;
2480 	}
2481 
2482 	mac_iocb_ptr = tx_cb->queue_entry;
2483 	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2484 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2485 	mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2486 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2487 	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2488 	mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2489 	tx_cb->skb = skb;
2490 	if (qdev->device_id == QL3032_DEVICE_ID &&
2491 	    skb->ip_summed == CHECKSUM_PARTIAL)
2492 		ql_hw_csum_setup(skb, mac_iocb_ptr);
2493 
2494 	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2495 		netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2496 		return NETDEV_TX_BUSY;
2497 	}
2498 
2499 	wmb();
2500 	qdev->req_producer_index++;
2501 	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2502 		qdev->req_producer_index = 0;
2503 	wmb();
2504 	ql_write_common_reg_l(qdev,
2505 			    &port_regs->CommonRegs.reqQProducerIndex,
2506 			    qdev->req_producer_index);
2507 
2508 	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2509 		     "tx queued, slot %d, len %d\n",
2510 		     qdev->req_producer_index, skb->len);
2511 
2512 	atomic_dec(&qdev->tx_count);
2513 	return NETDEV_TX_OK;
2514 }
2515 
2516 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2517 {
2518 	qdev->req_q_size =
2519 	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2520 
2521 	qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2522 
2523 	/* The barrier is required to ensure request and response queue
2524 	 * addr writes to the registers.
2525 	 */
2526 	wmb();
2527 
2528 	qdev->req_q_virt_addr =
2529 	    pci_alloc_consistent(qdev->pdev,
2530 				 (size_t) qdev->req_q_size,
2531 				 &qdev->req_q_phy_addr);
2532 
2533 	if ((qdev->req_q_virt_addr == NULL) ||
2534 	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2535 		netdev_err(qdev->ndev, "reqQ failed\n");
2536 		return -ENOMEM;
2537 	}
2538 
2539 	qdev->rsp_q_virt_addr =
2540 	    pci_alloc_consistent(qdev->pdev,
2541 				 (size_t) qdev->rsp_q_size,
2542 				 &qdev->rsp_q_phy_addr);
2543 
2544 	if ((qdev->rsp_q_virt_addr == NULL) ||
2545 	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2546 		netdev_err(qdev->ndev, "rspQ allocation failed\n");
2547 		pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2548 				    qdev->req_q_virt_addr,
2549 				    qdev->req_q_phy_addr);
2550 		return -ENOMEM;
2551 	}
2552 
2553 	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2554 
2555 	return 0;
2556 }
2557 
2558 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2559 {
2560 	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2561 		netdev_info(qdev->ndev, "Already done\n");
2562 		return;
2563 	}
2564 
2565 	pci_free_consistent(qdev->pdev,
2566 			    qdev->req_q_size,
2567 			    qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2568 
2569 	qdev->req_q_virt_addr = NULL;
2570 
2571 	pci_free_consistent(qdev->pdev,
2572 			    qdev->rsp_q_size,
2573 			    qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2574 
2575 	qdev->rsp_q_virt_addr = NULL;
2576 
2577 	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2578 }
2579 
2580 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2581 {
2582 	/* Create Large Buffer Queue */
2583 	qdev->lrg_buf_q_size =
2584 		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2585 	if (qdev->lrg_buf_q_size < PAGE_SIZE)
2586 		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2587 	else
2588 		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2589 
2590 	qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2591 				      sizeof(struct ql_rcv_buf_cb),
2592 				      GFP_KERNEL);
2593 	if (qdev->lrg_buf == NULL)
2594 		return -ENOMEM;
2595 
2596 	qdev->lrg_buf_q_alloc_virt_addr =
2597 		pci_alloc_consistent(qdev->pdev,
2598 				     qdev->lrg_buf_q_alloc_size,
2599 				     &qdev->lrg_buf_q_alloc_phy_addr);
2600 
2601 	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2602 		netdev_err(qdev->ndev, "lBufQ failed\n");
2603 		return -ENOMEM;
2604 	}
2605 	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2606 	qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2607 
2608 	/* Create Small Buffer Queue */
2609 	qdev->small_buf_q_size =
2610 		NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2611 	if (qdev->small_buf_q_size < PAGE_SIZE)
2612 		qdev->small_buf_q_alloc_size = PAGE_SIZE;
2613 	else
2614 		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2615 
2616 	qdev->small_buf_q_alloc_virt_addr =
2617 		pci_alloc_consistent(qdev->pdev,
2618 				     qdev->small_buf_q_alloc_size,
2619 				     &qdev->small_buf_q_alloc_phy_addr);
2620 
2621 	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2622 		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2623 		pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2624 				    qdev->lrg_buf_q_alloc_virt_addr,
2625 				    qdev->lrg_buf_q_alloc_phy_addr);
2626 		return -ENOMEM;
2627 	}
2628 
2629 	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2630 	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2631 	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2632 	return 0;
2633 }
2634 
2635 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2636 {
2637 	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2638 		netdev_info(qdev->ndev, "Already done\n");
2639 		return;
2640 	}
2641 	kfree(qdev->lrg_buf);
2642 	pci_free_consistent(qdev->pdev,
2643 			    qdev->lrg_buf_q_alloc_size,
2644 			    qdev->lrg_buf_q_alloc_virt_addr,
2645 			    qdev->lrg_buf_q_alloc_phy_addr);
2646 
2647 	qdev->lrg_buf_q_virt_addr = NULL;
2648 
2649 	pci_free_consistent(qdev->pdev,
2650 			    qdev->small_buf_q_alloc_size,
2651 			    qdev->small_buf_q_alloc_virt_addr,
2652 			    qdev->small_buf_q_alloc_phy_addr);
2653 
2654 	qdev->small_buf_q_virt_addr = NULL;
2655 
2656 	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2657 }
2658 
2659 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2660 {
2661 	int i;
2662 	struct bufq_addr_element *small_buf_q_entry;
2663 
2664 	/* Currently we allocate on one of memory and use it for smallbuffers */
2665 	qdev->small_buf_total_size =
2666 		(QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2667 		 QL_SMALL_BUFFER_SIZE);
2668 
2669 	qdev->small_buf_virt_addr =
2670 		pci_alloc_consistent(qdev->pdev,
2671 				     qdev->small_buf_total_size,
2672 				     &qdev->small_buf_phy_addr);
2673 
2674 	if (qdev->small_buf_virt_addr == NULL) {
2675 		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2676 		return -ENOMEM;
2677 	}
2678 
2679 	qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2680 	qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2681 
2682 	small_buf_q_entry = qdev->small_buf_q_virt_addr;
2683 
2684 	/* Initialize the small buffer queue. */
2685 	for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2686 		small_buf_q_entry->addr_high =
2687 		    cpu_to_le32(qdev->small_buf_phy_addr_high);
2688 		small_buf_q_entry->addr_low =
2689 		    cpu_to_le32(qdev->small_buf_phy_addr_low +
2690 				(i * QL_SMALL_BUFFER_SIZE));
2691 		small_buf_q_entry++;
2692 	}
2693 	qdev->small_buf_index = 0;
2694 	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2695 	return 0;
2696 }
2697 
2698 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2699 {
2700 	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2701 		netdev_info(qdev->ndev, "Already done\n");
2702 		return;
2703 	}
2704 	if (qdev->small_buf_virt_addr != NULL) {
2705 		pci_free_consistent(qdev->pdev,
2706 				    qdev->small_buf_total_size,
2707 				    qdev->small_buf_virt_addr,
2708 				    qdev->small_buf_phy_addr);
2709 
2710 		qdev->small_buf_virt_addr = NULL;
2711 	}
2712 }
2713 
2714 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2715 {
2716 	int i = 0;
2717 	struct ql_rcv_buf_cb *lrg_buf_cb;
2718 
2719 	for (i = 0; i < qdev->num_large_buffers; i++) {
2720 		lrg_buf_cb = &qdev->lrg_buf[i];
2721 		if (lrg_buf_cb->skb) {
2722 			dev_kfree_skb(lrg_buf_cb->skb);
2723 			pci_unmap_single(qdev->pdev,
2724 					 dma_unmap_addr(lrg_buf_cb, mapaddr),
2725 					 dma_unmap_len(lrg_buf_cb, maplen),
2726 					 PCI_DMA_FROMDEVICE);
2727 			memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2728 		} else {
2729 			break;
2730 		}
2731 	}
2732 }
2733 
2734 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2735 {
2736 	int i;
2737 	struct ql_rcv_buf_cb *lrg_buf_cb;
2738 	struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2739 
2740 	for (i = 0; i < qdev->num_large_buffers; i++) {
2741 		lrg_buf_cb = &qdev->lrg_buf[i];
2742 		buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2743 		buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2744 		buf_addr_ele++;
2745 	}
2746 	qdev->lrg_buf_index = 0;
2747 	qdev->lrg_buf_skb_check = 0;
2748 }
2749 
2750 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2751 {
2752 	int i;
2753 	struct ql_rcv_buf_cb *lrg_buf_cb;
2754 	struct sk_buff *skb;
2755 	dma_addr_t map;
2756 	int err;
2757 
2758 	for (i = 0; i < qdev->num_large_buffers; i++) {
2759 		skb = netdev_alloc_skb(qdev->ndev,
2760 				       qdev->lrg_buffer_len);
2761 		if (unlikely(!skb)) {
2762 			/* Better luck next round */
2763 			netdev_err(qdev->ndev,
2764 				   "large buff alloc failed for %d bytes at index %d\n",
2765 				   qdev->lrg_buffer_len * 2, i);
2766 			ql_free_large_buffers(qdev);
2767 			return -ENOMEM;
2768 		} else {
2769 
2770 			lrg_buf_cb = &qdev->lrg_buf[i];
2771 			memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2772 			lrg_buf_cb->index = i;
2773 			lrg_buf_cb->skb = skb;
2774 			/*
2775 			 * We save some space to copy the ethhdr from first
2776 			 * buffer
2777 			 */
2778 			skb_reserve(skb, QL_HEADER_SPACE);
2779 			map = pci_map_single(qdev->pdev,
2780 					     skb->data,
2781 					     qdev->lrg_buffer_len -
2782 					     QL_HEADER_SPACE,
2783 					     PCI_DMA_FROMDEVICE);
2784 
2785 			err = pci_dma_mapping_error(qdev->pdev, map);
2786 			if (err) {
2787 				netdev_err(qdev->ndev,
2788 					   "PCI mapping failed with error: %d\n",
2789 					   err);
2790 				ql_free_large_buffers(qdev);
2791 				return -ENOMEM;
2792 			}
2793 
2794 			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2795 			dma_unmap_len_set(lrg_buf_cb, maplen,
2796 					  qdev->lrg_buffer_len -
2797 					  QL_HEADER_SPACE);
2798 			lrg_buf_cb->buf_phy_addr_low =
2799 			    cpu_to_le32(LS_64BITS(map));
2800 			lrg_buf_cb->buf_phy_addr_high =
2801 			    cpu_to_le32(MS_64BITS(map));
2802 		}
2803 	}
2804 	return 0;
2805 }
2806 
2807 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2808 {
2809 	struct ql_tx_buf_cb *tx_cb;
2810 	int i;
2811 
2812 	tx_cb = &qdev->tx_buf[0];
2813 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2814 		kfree(tx_cb->oal);
2815 		tx_cb->oal = NULL;
2816 		tx_cb++;
2817 	}
2818 }
2819 
2820 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2821 {
2822 	struct ql_tx_buf_cb *tx_cb;
2823 	int i;
2824 	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2825 
2826 	/* Create free list of transmit buffers */
2827 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2828 
2829 		tx_cb = &qdev->tx_buf[i];
2830 		tx_cb->skb = NULL;
2831 		tx_cb->queue_entry = req_q_curr;
2832 		req_q_curr++;
2833 		tx_cb->oal = kmalloc(512, GFP_KERNEL);
2834 		if (tx_cb->oal == NULL)
2835 			return -ENOMEM;
2836 	}
2837 	return 0;
2838 }
2839 
2840 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2841 {
2842 	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2843 		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2844 		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2845 	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2846 		/*
2847 		 * Bigger buffers, so less of them.
2848 		 */
2849 		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2850 		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2851 	} else {
2852 		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
2853 			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2854 		return -ENOMEM;
2855 	}
2856 	qdev->num_large_buffers =
2857 		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2858 	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2859 	qdev->max_frame_size =
2860 		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2861 
2862 	/*
2863 	 * First allocate a page of shared memory and use it for shadow
2864 	 * locations of Network Request Queue Consumer Address Register and
2865 	 * Network Completion Queue Producer Index Register
2866 	 */
2867 	qdev->shadow_reg_virt_addr =
2868 		pci_alloc_consistent(qdev->pdev,
2869 				     PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2870 
2871 	if (qdev->shadow_reg_virt_addr != NULL) {
2872 		qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2873 		qdev->req_consumer_index_phy_addr_high =
2874 			MS_64BITS(qdev->shadow_reg_phy_addr);
2875 		qdev->req_consumer_index_phy_addr_low =
2876 			LS_64BITS(qdev->shadow_reg_phy_addr);
2877 
2878 		qdev->prsp_producer_index =
2879 			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2880 		qdev->rsp_producer_index_phy_addr_high =
2881 			qdev->req_consumer_index_phy_addr_high;
2882 		qdev->rsp_producer_index_phy_addr_low =
2883 			qdev->req_consumer_index_phy_addr_low + 8;
2884 	} else {
2885 		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2886 		return -ENOMEM;
2887 	}
2888 
2889 	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2890 		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2891 		goto err_req_rsp;
2892 	}
2893 
2894 	if (ql_alloc_buffer_queues(qdev) != 0) {
2895 		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2896 		goto err_buffer_queues;
2897 	}
2898 
2899 	if (ql_alloc_small_buffers(qdev) != 0) {
2900 		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2901 		goto err_small_buffers;
2902 	}
2903 
2904 	if (ql_alloc_large_buffers(qdev) != 0) {
2905 		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2906 		goto err_small_buffers;
2907 	}
2908 
2909 	/* Initialize the large buffer queue. */
2910 	ql_init_large_buffers(qdev);
2911 	if (ql_create_send_free_list(qdev))
2912 		goto err_free_list;
2913 
2914 	qdev->rsp_current = qdev->rsp_q_virt_addr;
2915 
2916 	return 0;
2917 err_free_list:
2918 	ql_free_send_free_list(qdev);
2919 err_small_buffers:
2920 	ql_free_buffer_queues(qdev);
2921 err_buffer_queues:
2922 	ql_free_net_req_rsp_queues(qdev);
2923 err_req_rsp:
2924 	pci_free_consistent(qdev->pdev,
2925 			    PAGE_SIZE,
2926 			    qdev->shadow_reg_virt_addr,
2927 			    qdev->shadow_reg_phy_addr);
2928 
2929 	return -ENOMEM;
2930 }
2931 
2932 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2933 {
2934 	ql_free_send_free_list(qdev);
2935 	ql_free_large_buffers(qdev);
2936 	ql_free_small_buffers(qdev);
2937 	ql_free_buffer_queues(qdev);
2938 	ql_free_net_req_rsp_queues(qdev);
2939 	if (qdev->shadow_reg_virt_addr != NULL) {
2940 		pci_free_consistent(qdev->pdev,
2941 				    PAGE_SIZE,
2942 				    qdev->shadow_reg_virt_addr,
2943 				    qdev->shadow_reg_phy_addr);
2944 		qdev->shadow_reg_virt_addr = NULL;
2945 	}
2946 }
2947 
2948 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2949 {
2950 	struct ql3xxx_local_ram_registers __iomem *local_ram =
2951 	    (void __iomem *)qdev->mem_map_registers;
2952 
2953 	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2954 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2955 			 2) << 4))
2956 		return -1;
2957 
2958 	ql_write_page2_reg(qdev,
2959 			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2960 
2961 	ql_write_page2_reg(qdev,
2962 			   &local_ram->maxBufletCount,
2963 			   qdev->nvram_data.bufletCount);
2964 
2965 	ql_write_page2_reg(qdev,
2966 			   &local_ram->freeBufletThresholdLow,
2967 			   (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2968 			   (qdev->nvram_data.tcpWindowThreshold0));
2969 
2970 	ql_write_page2_reg(qdev,
2971 			   &local_ram->freeBufletThresholdHigh,
2972 			   qdev->nvram_data.tcpWindowThreshold50);
2973 
2974 	ql_write_page2_reg(qdev,
2975 			   &local_ram->ipHashTableBase,
2976 			   (qdev->nvram_data.ipHashTableBaseHi << 16) |
2977 			   qdev->nvram_data.ipHashTableBaseLo);
2978 	ql_write_page2_reg(qdev,
2979 			   &local_ram->ipHashTableCount,
2980 			   qdev->nvram_data.ipHashTableSize);
2981 	ql_write_page2_reg(qdev,
2982 			   &local_ram->tcpHashTableBase,
2983 			   (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2984 			   qdev->nvram_data.tcpHashTableBaseLo);
2985 	ql_write_page2_reg(qdev,
2986 			   &local_ram->tcpHashTableCount,
2987 			   qdev->nvram_data.tcpHashTableSize);
2988 	ql_write_page2_reg(qdev,
2989 			   &local_ram->ncbBase,
2990 			   (qdev->nvram_data.ncbTableBaseHi << 16) |
2991 			   qdev->nvram_data.ncbTableBaseLo);
2992 	ql_write_page2_reg(qdev,
2993 			   &local_ram->maxNcbCount,
2994 			   qdev->nvram_data.ncbTableSize);
2995 	ql_write_page2_reg(qdev,
2996 			   &local_ram->drbBase,
2997 			   (qdev->nvram_data.drbTableBaseHi << 16) |
2998 			   qdev->nvram_data.drbTableBaseLo);
2999 	ql_write_page2_reg(qdev,
3000 			   &local_ram->maxDrbCount,
3001 			   qdev->nvram_data.drbTableSize);
3002 	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3003 	return 0;
3004 }
3005 
3006 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3007 {
3008 	u32 value;
3009 	struct ql3xxx_port_registers __iomem *port_regs =
3010 		qdev->mem_map_registers;
3011 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3012 	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3013 		(void __iomem *)port_regs;
3014 	u32 delay = 10;
3015 	int status = 0;
3016 
3017 	if (ql_mii_setup(qdev))
3018 		return -1;
3019 
3020 	/* Bring out PHY out of reset */
3021 	ql_write_common_reg(qdev, spir,
3022 			    (ISP_SERIAL_PORT_IF_WE |
3023 			     (ISP_SERIAL_PORT_IF_WE << 16)));
3024 	/* Give the PHY time to come out of reset. */
3025 	mdelay(100);
3026 	qdev->port_link_state = LS_DOWN;
3027 	netif_carrier_off(qdev->ndev);
3028 
3029 	/* V2 chip fix for ARS-39168. */
3030 	ql_write_common_reg(qdev, spir,
3031 			    (ISP_SERIAL_PORT_IF_SDE |
3032 			     (ISP_SERIAL_PORT_IF_SDE << 16)));
3033 
3034 	/* Request Queue Registers */
3035 	*((u32 *)(qdev->preq_consumer_index)) = 0;
3036 	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3037 	qdev->req_producer_index = 0;
3038 
3039 	ql_write_page1_reg(qdev,
3040 			   &hmem_regs->reqConsumerIndexAddrHigh,
3041 			   qdev->req_consumer_index_phy_addr_high);
3042 	ql_write_page1_reg(qdev,
3043 			   &hmem_regs->reqConsumerIndexAddrLow,
3044 			   qdev->req_consumer_index_phy_addr_low);
3045 
3046 	ql_write_page1_reg(qdev,
3047 			   &hmem_regs->reqBaseAddrHigh,
3048 			   MS_64BITS(qdev->req_q_phy_addr));
3049 	ql_write_page1_reg(qdev,
3050 			   &hmem_regs->reqBaseAddrLow,
3051 			   LS_64BITS(qdev->req_q_phy_addr));
3052 	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3053 
3054 	/* Response Queue Registers */
3055 	*((__le16 *) (qdev->prsp_producer_index)) = 0;
3056 	qdev->rsp_consumer_index = 0;
3057 	qdev->rsp_current = qdev->rsp_q_virt_addr;
3058 
3059 	ql_write_page1_reg(qdev,
3060 			   &hmem_regs->rspProducerIndexAddrHigh,
3061 			   qdev->rsp_producer_index_phy_addr_high);
3062 
3063 	ql_write_page1_reg(qdev,
3064 			   &hmem_regs->rspProducerIndexAddrLow,
3065 			   qdev->rsp_producer_index_phy_addr_low);
3066 
3067 	ql_write_page1_reg(qdev,
3068 			   &hmem_regs->rspBaseAddrHigh,
3069 			   MS_64BITS(qdev->rsp_q_phy_addr));
3070 
3071 	ql_write_page1_reg(qdev,
3072 			   &hmem_regs->rspBaseAddrLow,
3073 			   LS_64BITS(qdev->rsp_q_phy_addr));
3074 
3075 	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3076 
3077 	/* Large Buffer Queue */
3078 	ql_write_page1_reg(qdev,
3079 			   &hmem_regs->rxLargeQBaseAddrHigh,
3080 			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
3081 
3082 	ql_write_page1_reg(qdev,
3083 			   &hmem_regs->rxLargeQBaseAddrLow,
3084 			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
3085 
3086 	ql_write_page1_reg(qdev,
3087 			   &hmem_regs->rxLargeQLength,
3088 			   qdev->num_lbufq_entries);
3089 
3090 	ql_write_page1_reg(qdev,
3091 			   &hmem_regs->rxLargeBufferLength,
3092 			   qdev->lrg_buffer_len);
3093 
3094 	/* Small Buffer Queue */
3095 	ql_write_page1_reg(qdev,
3096 			   &hmem_regs->rxSmallQBaseAddrHigh,
3097 			   MS_64BITS(qdev->small_buf_q_phy_addr));
3098 
3099 	ql_write_page1_reg(qdev,
3100 			   &hmem_regs->rxSmallQBaseAddrLow,
3101 			   LS_64BITS(qdev->small_buf_q_phy_addr));
3102 
3103 	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3104 	ql_write_page1_reg(qdev,
3105 			   &hmem_regs->rxSmallBufferLength,
3106 			   QL_SMALL_BUFFER_SIZE);
3107 
3108 	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3109 	qdev->small_buf_release_cnt = 8;
3110 	qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3111 	qdev->lrg_buf_release_cnt = 8;
3112 	qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3113 	qdev->small_buf_index = 0;
3114 	qdev->lrg_buf_index = 0;
3115 	qdev->lrg_buf_free_count = 0;
3116 	qdev->lrg_buf_free_head = NULL;
3117 	qdev->lrg_buf_free_tail = NULL;
3118 
3119 	ql_write_common_reg(qdev,
3120 			    &port_regs->CommonRegs.
3121 			    rxSmallQProducerIndex,
3122 			    qdev->small_buf_q_producer_index);
3123 	ql_write_common_reg(qdev,
3124 			    &port_regs->CommonRegs.
3125 			    rxLargeQProducerIndex,
3126 			    qdev->lrg_buf_q_producer_index);
3127 
3128 	/*
3129 	 * Find out if the chip has already been initialized.  If it has, then
3130 	 * we skip some of the initialization.
3131 	 */
3132 	clear_bit(QL_LINK_MASTER, &qdev->flags);
3133 	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3134 	if ((value & PORT_STATUS_IC) == 0) {
3135 
3136 		/* Chip has not been configured yet, so let it rip. */
3137 		if (ql_init_misc_registers(qdev)) {
3138 			status = -1;
3139 			goto out;
3140 		}
3141 
3142 		value = qdev->nvram_data.tcpMaxWindowSize;
3143 		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3144 
3145 		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3146 
3147 		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3148 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3149 				 * 2) << 13)) {
3150 			status = -1;
3151 			goto out;
3152 		}
3153 		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3154 		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3155 				   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3156 				     16) | (INTERNAL_CHIP_SD |
3157 					    INTERNAL_CHIP_WE)));
3158 		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3159 	}
3160 
3161 	if (qdev->mac_index)
3162 		ql_write_page0_reg(qdev,
3163 				   &port_regs->mac1MaxFrameLengthReg,
3164 				   qdev->max_frame_size);
3165 	else
3166 		ql_write_page0_reg(qdev,
3167 					   &port_regs->mac0MaxFrameLengthReg,
3168 					   qdev->max_frame_size);
3169 
3170 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3171 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3172 			 2) << 7)) {
3173 		status = -1;
3174 		goto out;
3175 	}
3176 
3177 	PHY_Setup(qdev);
3178 	ql_init_scan_mode(qdev);
3179 	ql_get_phy_owner(qdev);
3180 
3181 	/* Load the MAC Configuration */
3182 
3183 	/* Program lower 32 bits of the MAC address */
3184 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3185 			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3186 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3187 			   ((qdev->ndev->dev_addr[2] << 24)
3188 			    | (qdev->ndev->dev_addr[3] << 16)
3189 			    | (qdev->ndev->dev_addr[4] << 8)
3190 			    | qdev->ndev->dev_addr[5]));
3191 
3192 	/* Program top 16 bits of the MAC address */
3193 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3194 			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3195 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3196 			   ((qdev->ndev->dev_addr[0] << 8)
3197 			    | qdev->ndev->dev_addr[1]));
3198 
3199 	/* Enable Primary MAC */
3200 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3201 			   ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3202 			    MAC_ADDR_INDIRECT_PTR_REG_PE));
3203 
3204 	/* Clear Primary and Secondary IP addresses */
3205 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3206 			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3207 			    (qdev->mac_index << 2)));
3208 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3209 
3210 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3211 			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3212 			    ((qdev->mac_index << 2) + 1)));
3213 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3214 
3215 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3216 
3217 	/* Indicate Configuration Complete */
3218 	ql_write_page0_reg(qdev,
3219 			   &port_regs->portControl,
3220 			   ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3221 
3222 	do {
3223 		value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3224 		if (value & PORT_STATUS_IC)
3225 			break;
3226 		spin_unlock_irq(&qdev->hw_lock);
3227 		msleep(500);
3228 		spin_lock_irq(&qdev->hw_lock);
3229 	} while (--delay);
3230 
3231 	if (delay == 0) {
3232 		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3233 		status = -1;
3234 		goto out;
3235 	}
3236 
3237 	/* Enable Ethernet Function */
3238 	if (qdev->device_id == QL3032_DEVICE_ID) {
3239 		value =
3240 		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3241 		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3242 			QL3032_PORT_CONTROL_ET);
3243 		ql_write_page0_reg(qdev, &port_regs->functionControl,
3244 				   ((value << 16) | value));
3245 	} else {
3246 		value =
3247 		    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3248 		     PORT_CONTROL_HH);
3249 		ql_write_page0_reg(qdev, &port_regs->portControl,
3250 				   ((value << 16) | value));
3251 	}
3252 
3253 
3254 out:
3255 	return status;
3256 }
3257 
3258 /*
3259  * Caller holds hw_lock.
3260  */
3261 static int ql_adapter_reset(struct ql3_adapter *qdev)
3262 {
3263 	struct ql3xxx_port_registers __iomem *port_regs =
3264 		qdev->mem_map_registers;
3265 	int status = 0;
3266 	u16 value;
3267 	int max_wait_time;
3268 
3269 	set_bit(QL_RESET_ACTIVE, &qdev->flags);
3270 	clear_bit(QL_RESET_DONE, &qdev->flags);
3271 
3272 	/*
3273 	 * Issue soft reset to chip.
3274 	 */
3275 	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3276 	ql_write_common_reg(qdev,
3277 			    &port_regs->CommonRegs.ispControlStatus,
3278 			    ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3279 
3280 	/* Wait 3 seconds for reset to complete. */
3281 	netdev_printk(KERN_DEBUG, qdev->ndev,
3282 		      "Wait 10 milliseconds for reset to complete\n");
3283 
3284 	/* Wait until the firmware tells us the Soft Reset is done */
3285 	max_wait_time = 5;
3286 	do {
3287 		value =
3288 		    ql_read_common_reg(qdev,
3289 				       &port_regs->CommonRegs.ispControlStatus);
3290 		if ((value & ISP_CONTROL_SR) == 0)
3291 			break;
3292 
3293 		ssleep(1);
3294 	} while ((--max_wait_time));
3295 
3296 	/*
3297 	 * Also, make sure that the Network Reset Interrupt bit has been
3298 	 * cleared after the soft reset has taken place.
3299 	 */
3300 	value =
3301 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3302 	if (value & ISP_CONTROL_RI) {
3303 		netdev_printk(KERN_DEBUG, qdev->ndev,
3304 			      "clearing RI after reset\n");
3305 		ql_write_common_reg(qdev,
3306 				    &port_regs->CommonRegs.
3307 				    ispControlStatus,
3308 				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3309 	}
3310 
3311 	if (max_wait_time == 0) {
3312 		/* Issue Force Soft Reset */
3313 		ql_write_common_reg(qdev,
3314 				    &port_regs->CommonRegs.
3315 				    ispControlStatus,
3316 				    ((ISP_CONTROL_FSR << 16) |
3317 				     ISP_CONTROL_FSR));
3318 		/*
3319 		 * Wait until the firmware tells us the Force Soft Reset is
3320 		 * done
3321 		 */
3322 		max_wait_time = 5;
3323 		do {
3324 			value = ql_read_common_reg(qdev,
3325 						   &port_regs->CommonRegs.
3326 						   ispControlStatus);
3327 			if ((value & ISP_CONTROL_FSR) == 0)
3328 				break;
3329 			ssleep(1);
3330 		} while ((--max_wait_time));
3331 	}
3332 	if (max_wait_time == 0)
3333 		status = 1;
3334 
3335 	clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3336 	set_bit(QL_RESET_DONE, &qdev->flags);
3337 	return status;
3338 }
3339 
3340 static void ql_set_mac_info(struct ql3_adapter *qdev)
3341 {
3342 	struct ql3xxx_port_registers __iomem *port_regs =
3343 		qdev->mem_map_registers;
3344 	u32 value, port_status;
3345 	u8 func_number;
3346 
3347 	/* Get the function number */
3348 	value =
3349 	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3350 	func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3351 	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3352 	switch (value & ISP_CONTROL_FN_MASK) {
3353 	case ISP_CONTROL_FN0_NET:
3354 		qdev->mac_index = 0;
3355 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3356 		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3357 		qdev->PHYAddr = PORT0_PHY_ADDRESS;
3358 		if (port_status & PORT_STATUS_SM0)
3359 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3360 		else
3361 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3362 		break;
3363 
3364 	case ISP_CONTROL_FN1_NET:
3365 		qdev->mac_index = 1;
3366 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3367 		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3368 		qdev->PHYAddr = PORT1_PHY_ADDRESS;
3369 		if (port_status & PORT_STATUS_SM1)
3370 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3371 		else
3372 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3373 		break;
3374 
3375 	case ISP_CONTROL_FN0_SCSI:
3376 	case ISP_CONTROL_FN1_SCSI:
3377 	default:
3378 		netdev_printk(KERN_DEBUG, qdev->ndev,
3379 			      "Invalid function number, ispControlStatus = 0x%x\n",
3380 			      value);
3381 		break;
3382 	}
3383 	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3384 }
3385 
3386 static void ql_display_dev_info(struct net_device *ndev)
3387 {
3388 	struct ql3_adapter *qdev = netdev_priv(ndev);
3389 	struct pci_dev *pdev = qdev->pdev;
3390 
3391 	netdev_info(ndev,
3392 		    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3393 		    DRV_NAME, qdev->index, qdev->chip_rev_id,
3394 		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3395 		    qdev->pci_slot);
3396 	netdev_info(ndev, "%s Interface\n",
3397 		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3398 
3399 	/*
3400 	 * Print PCI bus width/type.
3401 	 */
3402 	netdev_info(ndev, "Bus interface is %s %s\n",
3403 		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3404 		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
3405 
3406 	netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
3407 		    qdev->mem_map_registers);
3408 	netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3409 
3410 	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3411 }
3412 
3413 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3414 {
3415 	struct net_device *ndev = qdev->ndev;
3416 	int retval = 0;
3417 
3418 	netif_stop_queue(ndev);
3419 	netif_carrier_off(ndev);
3420 
3421 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3422 	clear_bit(QL_LINK_MASTER, &qdev->flags);
3423 
3424 	ql_disable_interrupts(qdev);
3425 
3426 	free_irq(qdev->pdev->irq, ndev);
3427 
3428 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3429 		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3430 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3431 		pci_disable_msi(qdev->pdev);
3432 	}
3433 
3434 	del_timer_sync(&qdev->adapter_timer);
3435 
3436 	napi_disable(&qdev->napi);
3437 
3438 	if (do_reset) {
3439 		int soft_reset;
3440 		unsigned long hw_flags;
3441 
3442 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3443 		if (ql_wait_for_drvr_lock(qdev)) {
3444 			soft_reset = ql_adapter_reset(qdev);
3445 			if (soft_reset) {
3446 				netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3447 					   qdev->index);
3448 			}
3449 			netdev_err(ndev,
3450 				   "Releasing driver lock via chip reset\n");
3451 		} else {
3452 			netdev_err(ndev,
3453 				   "Could not acquire driver lock to do reset!\n");
3454 			retval = -1;
3455 		}
3456 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3457 	}
3458 	ql_free_mem_resources(qdev);
3459 	return retval;
3460 }
3461 
3462 static int ql_adapter_up(struct ql3_adapter *qdev)
3463 {
3464 	struct net_device *ndev = qdev->ndev;
3465 	int err;
3466 	unsigned long irq_flags = IRQF_SHARED;
3467 	unsigned long hw_flags;
3468 
3469 	if (ql_alloc_mem_resources(qdev)) {
3470 		netdev_err(ndev, "Unable to  allocate buffers\n");
3471 		return -ENOMEM;
3472 	}
3473 
3474 	if (qdev->msi) {
3475 		if (pci_enable_msi(qdev->pdev)) {
3476 			netdev_err(ndev,
3477 				   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
3478 			qdev->msi = 0;
3479 		} else {
3480 			netdev_info(ndev, "MSI Enabled...\n");
3481 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3482 			irq_flags &= ~IRQF_SHARED;
3483 		}
3484 	}
3485 
3486 	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3487 			  irq_flags, ndev->name, ndev);
3488 	if (err) {
3489 		netdev_err(ndev,
3490 			   "Failed to reserve interrupt %d - already in use\n",
3491 			   qdev->pdev->irq);
3492 		goto err_irq;
3493 	}
3494 
3495 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3496 
3497 	err = ql_wait_for_drvr_lock(qdev);
3498 	if (err) {
3499 		err = ql_adapter_initialize(qdev);
3500 		if (err) {
3501 			netdev_err(ndev, "Unable to initialize adapter\n");
3502 			goto err_init;
3503 		}
3504 		netdev_err(ndev, "Releasing driver lock\n");
3505 		ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3506 	} else {
3507 		netdev_err(ndev, "Could not acquire driver lock\n");
3508 		goto err_lock;
3509 	}
3510 
3511 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3512 
3513 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3514 
3515 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3516 
3517 	napi_enable(&qdev->napi);
3518 	ql_enable_interrupts(qdev);
3519 	return 0;
3520 
3521 err_init:
3522 	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3523 err_lock:
3524 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3525 	free_irq(qdev->pdev->irq, ndev);
3526 err_irq:
3527 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3528 		netdev_info(ndev, "calling pci_disable_msi()\n");
3529 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3530 		pci_disable_msi(qdev->pdev);
3531 	}
3532 	return err;
3533 }
3534 
3535 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3536 {
3537 	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3538 		netdev_err(qdev->ndev,
3539 			   "Driver up/down cycle failed, closing device\n");
3540 		rtnl_lock();
3541 		dev_close(qdev->ndev);
3542 		rtnl_unlock();
3543 		return -1;
3544 	}
3545 	return 0;
3546 }
3547 
3548 static int ql3xxx_close(struct net_device *ndev)
3549 {
3550 	struct ql3_adapter *qdev = netdev_priv(ndev);
3551 
3552 	/*
3553 	 * Wait for device to recover from a reset.
3554 	 * (Rarely happens, but possible.)
3555 	 */
3556 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3557 		msleep(50);
3558 
3559 	ql_adapter_down(qdev, QL_DO_RESET);
3560 	return 0;
3561 }
3562 
3563 static int ql3xxx_open(struct net_device *ndev)
3564 {
3565 	struct ql3_adapter *qdev = netdev_priv(ndev);
3566 	return ql_adapter_up(qdev);
3567 }
3568 
3569 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3570 {
3571 	struct ql3_adapter *qdev = netdev_priv(ndev);
3572 	struct ql3xxx_port_registers __iomem *port_regs =
3573 			qdev->mem_map_registers;
3574 	struct sockaddr *addr = p;
3575 	unsigned long hw_flags;
3576 
3577 	if (netif_running(ndev))
3578 		return -EBUSY;
3579 
3580 	if (!is_valid_ether_addr(addr->sa_data))
3581 		return -EADDRNOTAVAIL;
3582 
3583 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3584 
3585 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3586 	/* Program lower 32 bits of the MAC address */
3587 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3588 			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3589 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3590 			   ((ndev->dev_addr[2] << 24) | (ndev->
3591 							 dev_addr[3] << 16) |
3592 			    (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3593 
3594 	/* Program top 16 bits of the MAC address */
3595 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3596 			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3597 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3598 			   ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3599 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3600 
3601 	return 0;
3602 }
3603 
3604 static void ql3xxx_tx_timeout(struct net_device *ndev)
3605 {
3606 	struct ql3_adapter *qdev = netdev_priv(ndev);
3607 
3608 	netdev_err(ndev, "Resetting...\n");
3609 	/*
3610 	 * Stop the queues, we've got a problem.
3611 	 */
3612 	netif_stop_queue(ndev);
3613 
3614 	/*
3615 	 * Wake up the worker to process this event.
3616 	 */
3617 	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3618 }
3619 
3620 static void ql_reset_work(struct work_struct *work)
3621 {
3622 	struct ql3_adapter *qdev =
3623 		container_of(work, struct ql3_adapter, reset_work.work);
3624 	struct net_device *ndev = qdev->ndev;
3625 	u32 value;
3626 	struct ql_tx_buf_cb *tx_cb;
3627 	int max_wait_time, i;
3628 	struct ql3xxx_port_registers __iomem *port_regs =
3629 		qdev->mem_map_registers;
3630 	unsigned long hw_flags;
3631 
3632 	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3633 		clear_bit(QL_LINK_MASTER, &qdev->flags);
3634 
3635 		/*
3636 		 * Loop through the active list and return the skb.
3637 		 */
3638 		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3639 			int j;
3640 			tx_cb = &qdev->tx_buf[i];
3641 			if (tx_cb->skb) {
3642 				netdev_printk(KERN_DEBUG, ndev,
3643 					      "Freeing lost SKB\n");
3644 				pci_unmap_single(qdev->pdev,
3645 					 dma_unmap_addr(&tx_cb->map[0],
3646 							mapaddr),
3647 					 dma_unmap_len(&tx_cb->map[0], maplen),
3648 					 PCI_DMA_TODEVICE);
3649 				for (j = 1; j < tx_cb->seg_count; j++) {
3650 					pci_unmap_page(qdev->pdev,
3651 					       dma_unmap_addr(&tx_cb->map[j],
3652 							      mapaddr),
3653 					       dma_unmap_len(&tx_cb->map[j],
3654 							     maplen),
3655 					       PCI_DMA_TODEVICE);
3656 				}
3657 				dev_kfree_skb(tx_cb->skb);
3658 				tx_cb->skb = NULL;
3659 			}
3660 		}
3661 
3662 		netdev_err(ndev, "Clearing NRI after reset\n");
3663 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3664 		ql_write_common_reg(qdev,
3665 				    &port_regs->CommonRegs.
3666 				    ispControlStatus,
3667 				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3668 		/*
3669 		 * Wait the for Soft Reset to Complete.
3670 		 */
3671 		max_wait_time = 10;
3672 		do {
3673 			value = ql_read_common_reg(qdev,
3674 						   &port_regs->CommonRegs.
3675 
3676 						   ispControlStatus);
3677 			if ((value & ISP_CONTROL_SR) == 0) {
3678 				netdev_printk(KERN_DEBUG, ndev,
3679 					      "reset completed\n");
3680 				break;
3681 			}
3682 
3683 			if (value & ISP_CONTROL_RI) {
3684 				netdev_printk(KERN_DEBUG, ndev,
3685 					      "clearing NRI after reset\n");
3686 				ql_write_common_reg(qdev,
3687 						    &port_regs->
3688 						    CommonRegs.
3689 						    ispControlStatus,
3690 						    ((ISP_CONTROL_RI <<
3691 						      16) | ISP_CONTROL_RI));
3692 			}
3693 
3694 			spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3695 			ssleep(1);
3696 			spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3697 		} while (--max_wait_time);
3698 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3699 
3700 		if (value & ISP_CONTROL_SR) {
3701 
3702 			/*
3703 			 * Set the reset flags and clear the board again.
3704 			 * Nothing else to do...
3705 			 */
3706 			netdev_err(ndev,
3707 				   "Timed out waiting for reset to complete\n");
3708 			netdev_err(ndev, "Do a reset\n");
3709 			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3710 			clear_bit(QL_RESET_START, &qdev->flags);
3711 			ql_cycle_adapter(qdev, QL_DO_RESET);
3712 			return;
3713 		}
3714 
3715 		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3716 		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3717 		clear_bit(QL_RESET_START, &qdev->flags);
3718 		ql_cycle_adapter(qdev, QL_NO_RESET);
3719 	}
3720 }
3721 
3722 static void ql_tx_timeout_work(struct work_struct *work)
3723 {
3724 	struct ql3_adapter *qdev =
3725 		container_of(work, struct ql3_adapter, tx_timeout_work.work);
3726 
3727 	ql_cycle_adapter(qdev, QL_DO_RESET);
3728 }
3729 
3730 static void ql_get_board_info(struct ql3_adapter *qdev)
3731 {
3732 	struct ql3xxx_port_registers __iomem *port_regs =
3733 		qdev->mem_map_registers;
3734 	u32 value;
3735 
3736 	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3737 
3738 	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3739 	if (value & PORT_STATUS_64)
3740 		qdev->pci_width = 64;
3741 	else
3742 		qdev->pci_width = 32;
3743 	if (value & PORT_STATUS_X)
3744 		qdev->pci_x = 1;
3745 	else
3746 		qdev->pci_x = 0;
3747 	qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3748 }
3749 
3750 static void ql3xxx_timer(unsigned long ptr)
3751 {
3752 	struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3753 	queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3754 }
3755 
3756 static const struct net_device_ops ql3xxx_netdev_ops = {
3757 	.ndo_open		= ql3xxx_open,
3758 	.ndo_start_xmit		= ql3xxx_send,
3759 	.ndo_stop		= ql3xxx_close,
3760 	.ndo_change_mtu		= eth_change_mtu,
3761 	.ndo_validate_addr	= eth_validate_addr,
3762 	.ndo_set_mac_address	= ql3xxx_set_mac_address,
3763 	.ndo_tx_timeout		= ql3xxx_tx_timeout,
3764 };
3765 
3766 static int ql3xxx_probe(struct pci_dev *pdev,
3767 			const struct pci_device_id *pci_entry)
3768 {
3769 	struct net_device *ndev = NULL;
3770 	struct ql3_adapter *qdev = NULL;
3771 	static int cards_found;
3772 	int uninitialized_var(pci_using_dac), err;
3773 
3774 	err = pci_enable_device(pdev);
3775 	if (err) {
3776 		pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3777 		goto err_out;
3778 	}
3779 
3780 	err = pci_request_regions(pdev, DRV_NAME);
3781 	if (err) {
3782 		pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3783 		goto err_out_disable_pdev;
3784 	}
3785 
3786 	pci_set_master(pdev);
3787 
3788 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3789 		pci_using_dac = 1;
3790 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3791 	} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3792 		pci_using_dac = 0;
3793 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3794 	}
3795 
3796 	if (err) {
3797 		pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3798 		goto err_out_free_regions;
3799 	}
3800 
3801 	ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3802 	if (!ndev) {
3803 		err = -ENOMEM;
3804 		goto err_out_free_regions;
3805 	}
3806 
3807 	SET_NETDEV_DEV(ndev, &pdev->dev);
3808 
3809 	pci_set_drvdata(pdev, ndev);
3810 
3811 	qdev = netdev_priv(ndev);
3812 	qdev->index = cards_found;
3813 	qdev->ndev = ndev;
3814 	qdev->pdev = pdev;
3815 	qdev->device_id = pci_entry->device;
3816 	qdev->port_link_state = LS_DOWN;
3817 	if (msi)
3818 		qdev->msi = 1;
3819 
3820 	qdev->msg_enable = netif_msg_init(debug, default_msg);
3821 
3822 	if (pci_using_dac)
3823 		ndev->features |= NETIF_F_HIGHDMA;
3824 	if (qdev->device_id == QL3032_DEVICE_ID)
3825 		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3826 
3827 	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3828 	if (!qdev->mem_map_registers) {
3829 		pr_err("%s: cannot map device registers\n", pci_name(pdev));
3830 		err = -EIO;
3831 		goto err_out_free_ndev;
3832 	}
3833 
3834 	spin_lock_init(&qdev->adapter_lock);
3835 	spin_lock_init(&qdev->hw_lock);
3836 
3837 	/* Set driver entry points */
3838 	ndev->netdev_ops = &ql3xxx_netdev_ops;
3839 	ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3840 	ndev->watchdog_timeo = 5 * HZ;
3841 
3842 	netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3843 
3844 	ndev->irq = pdev->irq;
3845 
3846 	/* make sure the EEPROM is good */
3847 	if (ql_get_nvram_params(qdev)) {
3848 		pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3849 			 __func__, qdev->index);
3850 		err = -EIO;
3851 		goto err_out_iounmap;
3852 	}
3853 
3854 	ql_set_mac_info(qdev);
3855 
3856 	/* Validate and set parameters */
3857 	if (qdev->mac_index) {
3858 		ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3859 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3860 	} else {
3861 		ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3862 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3863 	}
3864 
3865 	ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3866 
3867 	/* Record PCI bus information. */
3868 	ql_get_board_info(qdev);
3869 
3870 	/*
3871 	 * Set the Maximum Memory Read Byte Count value. We do this to handle
3872 	 * jumbo frames.
3873 	 */
3874 	if (qdev->pci_x)
3875 		pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3876 
3877 	err = register_netdev(ndev);
3878 	if (err) {
3879 		pr_err("%s: cannot register net device\n", pci_name(pdev));
3880 		goto err_out_iounmap;
3881 	}
3882 
3883 	/* we're going to reset, so assume we have no link for now */
3884 
3885 	netif_carrier_off(ndev);
3886 	netif_stop_queue(ndev);
3887 
3888 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
3889 	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3890 	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3891 	INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3892 
3893 	init_timer(&qdev->adapter_timer);
3894 	qdev->adapter_timer.function = ql3xxx_timer;
3895 	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
3896 	qdev->adapter_timer.data = (unsigned long)qdev;
3897 
3898 	if (!cards_found) {
3899 		pr_alert("%s\n", DRV_STRING);
3900 		pr_alert("Driver name: %s, Version: %s\n",
3901 			 DRV_NAME, DRV_VERSION);
3902 	}
3903 	ql_display_dev_info(ndev);
3904 
3905 	cards_found++;
3906 	return 0;
3907 
3908 err_out_iounmap:
3909 	iounmap(qdev->mem_map_registers);
3910 err_out_free_ndev:
3911 	free_netdev(ndev);
3912 err_out_free_regions:
3913 	pci_release_regions(pdev);
3914 err_out_disable_pdev:
3915 	pci_disable_device(pdev);
3916 err_out:
3917 	return err;
3918 }
3919 
3920 static void ql3xxx_remove(struct pci_dev *pdev)
3921 {
3922 	struct net_device *ndev = pci_get_drvdata(pdev);
3923 	struct ql3_adapter *qdev = netdev_priv(ndev);
3924 
3925 	unregister_netdev(ndev);
3926 
3927 	ql_disable_interrupts(qdev);
3928 
3929 	if (qdev->workqueue) {
3930 		cancel_delayed_work(&qdev->reset_work);
3931 		cancel_delayed_work(&qdev->tx_timeout_work);
3932 		destroy_workqueue(qdev->workqueue);
3933 		qdev->workqueue = NULL;
3934 	}
3935 
3936 	iounmap(qdev->mem_map_registers);
3937 	pci_release_regions(pdev);
3938 	free_netdev(ndev);
3939 }
3940 
3941 static struct pci_driver ql3xxx_driver = {
3942 
3943 	.name = DRV_NAME,
3944 	.id_table = ql3xxx_pci_tbl,
3945 	.probe = ql3xxx_probe,
3946 	.remove = ql3xxx_remove,
3947 };
3948 
3949 module_pci_driver(ql3xxx_driver);
3950